2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: vm/vm_pageout.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * The proverbial page-out daemon.
60 /* remove after component merge */
61 extern int vnode_pager_workaround
;
64 #include <mach_pagemap.h>
65 #include <mach_cluster_stats.h>
67 #include <advisory_pageout.h>
69 #include <mach/mach_types.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/mach_host_server.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <kern/host_statistics.h>
76 #include <kern/counters.h>
77 #include <kern/thread.h>
78 #include <kern/thread_swap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <machine/vm_tuning.h>
86 #include <kern/misc_protos.h>
88 extern ipc_port_t memory_manager_default
;
90 #ifndef VM_PAGE_LAUNDRY_MAX
91 #define VM_PAGE_LAUNDRY_MAX 10 /* outstanding DMM page cleans */
92 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
94 #ifndef VM_PAGEOUT_BURST_MAX
95 #define VM_PAGEOUT_BURST_MAX 32 /* simultaneous EMM page cleans */
96 #endif /* VM_PAGEOUT_BURST_MAX */
98 #ifndef VM_PAGEOUT_DISCARD_MAX
99 #define VM_PAGEOUT_DISCARD_MAX 68 /* simultaneous EMM page cleans */
100 #endif /* VM_PAGEOUT_DISCARD_MAX */
102 #ifndef VM_PAGEOUT_BURST_WAIT
103 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
104 #endif /* VM_PAGEOUT_BURST_WAIT */
106 #ifndef VM_PAGEOUT_EMPTY_WAIT
107 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
108 #endif /* VM_PAGEOUT_EMPTY_WAIT */
111 * To obtain a reasonable LRU approximation, the inactive queue
112 * needs to be large enough to give pages on it a chance to be
113 * referenced a second time. This macro defines the fraction
114 * of active+inactive pages that should be inactive.
115 * The pageout daemon uses it to update vm_page_inactive_target.
117 * If vm_page_free_count falls below vm_page_free_target and
118 * vm_page_inactive_count is below vm_page_inactive_target,
119 * then the pageout daemon starts running.
122 #ifndef VM_PAGE_INACTIVE_TARGET
123 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
124 #endif /* VM_PAGE_INACTIVE_TARGET */
127 * Once the pageout daemon starts running, it keeps going
128 * until vm_page_free_count meets or exceeds vm_page_free_target.
131 #ifndef VM_PAGE_FREE_TARGET
132 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
133 #endif /* VM_PAGE_FREE_TARGET */
136 * The pageout daemon always starts running once vm_page_free_count
137 * falls below vm_page_free_min.
140 #ifndef VM_PAGE_FREE_MIN
141 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
142 #endif /* VM_PAGE_FREE_MIN */
145 * When vm_page_free_count falls below vm_page_free_reserved,
146 * only vm-privileged threads can allocate pages. vm-privilege
147 * allows the pageout daemon and default pager (and any other
148 * associated threads needed for default pageout) to continue
149 * operation by dipping into the reserved pool of pages.
152 #ifndef VM_PAGE_FREE_RESERVED
153 #define VM_PAGE_FREE_RESERVED \
154 ((8 * VM_PAGE_LAUNDRY_MAX) + NCPUS)
155 #endif /* VM_PAGE_FREE_RESERVED */
159 * Forward declarations for internal routines.
161 extern void vm_pageout_continue(void);
162 extern void vm_pageout_scan(void);
163 extern void vm_pageout_throttle(vm_page_t m
);
164 extern vm_page_t
vm_pageout_cluster_page(
166 vm_object_offset_t offset
,
167 boolean_t precious_clean
);
169 unsigned int vm_pageout_reserved_internal
= 0;
170 unsigned int vm_pageout_reserved_really
= 0;
172 unsigned int vm_page_laundry_max
= 0; /* # of clusters outstanding */
173 unsigned int vm_page_laundry_min
= 0;
174 unsigned int vm_pageout_burst_max
= 0;
175 unsigned int vm_pageout_burst_wait
= 0; /* milliseconds per page */
176 unsigned int vm_pageout_empty_wait
= 0; /* milliseconds */
177 unsigned int vm_pageout_burst_min
= 0;
178 unsigned int vm_pageout_pause_count
= 0;
179 unsigned int vm_pageout_pause_max
= 0;
180 unsigned int vm_free_page_pause
= 100; /* milliseconds */
183 * These variables record the pageout daemon's actions:
184 * how many pages it looks at and what happens to those pages.
185 * No locking needed because only one thread modifies the variables.
188 unsigned int vm_pageout_active
= 0; /* debugging */
189 unsigned int vm_pageout_inactive
= 0; /* debugging */
190 unsigned int vm_pageout_inactive_throttled
= 0; /* debugging */
191 unsigned int vm_pageout_inactive_forced
= 0; /* debugging */
192 unsigned int vm_pageout_inactive_nolock
= 0; /* debugging */
193 unsigned int vm_pageout_inactive_avoid
= 0; /* debugging */
194 unsigned int vm_pageout_inactive_busy
= 0; /* debugging */
195 unsigned int vm_pageout_inactive_absent
= 0; /* debugging */
196 unsigned int vm_pageout_inactive_used
= 0; /* debugging */
197 unsigned int vm_pageout_inactive_clean
= 0; /* debugging */
198 unsigned int vm_pageout_inactive_dirty
= 0; /* debugging */
199 unsigned int vm_pageout_dirty_no_pager
= 0; /* debugging */
200 unsigned int vm_pageout_inactive_pinned
= 0; /* debugging */
201 unsigned int vm_pageout_inactive_limbo
= 0; /* debugging */
202 unsigned int vm_pageout_setup_limbo
= 0; /* debugging */
203 unsigned int vm_pageout_setup_unprepped
= 0; /* debugging */
204 unsigned int vm_stat_discard
= 0; /* debugging */
205 unsigned int vm_stat_discard_sent
= 0; /* debugging */
206 unsigned int vm_stat_discard_failure
= 0; /* debugging */
207 unsigned int vm_stat_discard_throttle
= 0; /* debugging */
208 unsigned int vm_pageout_scan_active_emm_throttle
= 0; /* debugging */
209 unsigned int vm_pageout_scan_active_emm_throttle_success
= 0; /* debugging */
210 unsigned int vm_pageout_scan_active_emm_throttle_failure
= 0; /* debugging */
211 unsigned int vm_pageout_scan_inactive_emm_throttle
= 0; /* debugging */
212 unsigned int vm_pageout_scan_inactive_emm_throttle_success
= 0; /* debugging */
213 unsigned int vm_pageout_scan_inactive_emm_throttle_failure
= 0; /* debugging */
216 unsigned int vm_pageout_out_of_line
= 0;
217 unsigned int vm_pageout_in_place
= 0;
219 * Routine: vm_pageout_object_allocate
221 * Allocate an object for use as out-of-line memory in a
222 * data_return/data_initialize message.
223 * The page must be in an unlocked object.
225 * If the page belongs to a trusted pager, cleaning in place
226 * will be used, which utilizes a special "pageout object"
227 * containing private alias pages for the real page frames.
228 * Untrusted pagers use normal out-of-line memory.
231 vm_pageout_object_allocate(
234 vm_object_offset_t offset
)
236 vm_object_t object
= m
->object
;
237 vm_object_t new_object
;
239 assert(object
->pager_ready
);
241 if (object
->pager_trusted
|| object
->internal
)
242 vm_pageout_throttle(m
);
244 new_object
= vm_object_allocate(size
);
246 if (object
->pager_trusted
) {
247 assert (offset
< object
->size
);
249 vm_object_lock(new_object
);
250 new_object
->pageout
= TRUE
;
251 new_object
->shadow
= object
;
252 new_object
->can_persist
= FALSE
;
253 new_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
254 new_object
->shadow_offset
= offset
;
255 vm_object_unlock(new_object
);
258 * Take a paging reference on the object. This will be dropped
259 * in vm_pageout_object_terminate()
261 vm_object_lock(object
);
262 vm_object_paging_begin(object
);
263 vm_object_unlock(object
);
265 vm_pageout_in_place
++;
267 vm_pageout_out_of_line
++;
271 #if MACH_CLUSTER_STATS
272 unsigned long vm_pageout_cluster_dirtied
= 0;
273 unsigned long vm_pageout_cluster_cleaned
= 0;
274 unsigned long vm_pageout_cluster_collisions
= 0;
275 unsigned long vm_pageout_cluster_clusters
= 0;
276 unsigned long vm_pageout_cluster_conversions
= 0;
277 unsigned long vm_pageout_target_collisions
= 0;
278 unsigned long vm_pageout_target_page_dirtied
= 0;
279 unsigned long vm_pageout_target_page_freed
= 0;
280 unsigned long vm_pageout_target_page_pinned
= 0;
281 unsigned long vm_pageout_target_page_limbo
= 0;
282 #define CLUSTER_STAT(clause) clause
283 #else /* MACH_CLUSTER_STATS */
284 #define CLUSTER_STAT(clause)
285 #endif /* MACH_CLUSTER_STATS */
288 * Routine: vm_pageout_object_terminate
290 * Destroy the pageout_object allocated by
291 * vm_pageout_object_allocate(), and perform all of the
292 * required cleanup actions.
295 * The object must be locked, and will be returned locked.
298 vm_pageout_object_terminate(
301 vm_object_t shadow_object
;
304 * Deal with the deallocation (last reference) of a pageout object
305 * (used for cleaning-in-place) by dropping the paging references/
306 * freeing pages in the original object.
309 assert(object
->pageout
);
310 shadow_object
= object
->shadow
;
311 vm_object_lock(shadow_object
);
313 while (!queue_empty(&object
->memq
)) {
315 vm_object_offset_t offset
;
317 p
= (vm_page_t
) queue_first(&object
->memq
);
322 assert(!p
->cleaning
);
328 m
= vm_page_lookup(shadow_object
,
329 offset
+ object
->shadow_offset
);
331 if(m
== VM_PAGE_NULL
)
336 * Account for the paging reference taken when
337 * m->cleaning was set on this page.
339 vm_object_paging_end(shadow_object
);
340 assert((m
->dirty
) || (m
->precious
) ||
341 (m
->busy
&& m
->cleaning
));
344 * Handle the trusted pager throttle.
346 vm_page_lock_queues();
348 vm_page_laundry_count
--;
350 if (vm_page_laundry_count
< vm_page_laundry_min
) {
351 vm_page_laundry_min
= 0;
352 thread_wakeup((event_t
) &vm_page_laundry_count
);
357 * Handle the "target" page(s). These pages are to be freed if
358 * successfully cleaned. Target pages are always busy, and are
359 * wired exactly once. The initial target pages are not mapped,
360 * (so cannot be referenced or modified) but converted target
361 * pages may have been modified between the selection as an
362 * adjacent page and conversion to a target.
366 assert(m
->wire_count
== 1);
369 #if MACH_CLUSTER_STATS
370 if (m
->wanted
) vm_pageout_target_collisions
++;
373 * Revoke all access to the page. Since the object is
374 * locked, and the page is busy, this prevents the page
375 * from being dirtied after the pmap_is_modified() call
378 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
381 * Since the page is left "dirty" but "not modifed", we
382 * can detect whether the page was redirtied during
383 * pageout by checking the modify state.
385 m
->dirty
= pmap_is_modified(m
->phys_addr
);
388 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
389 vm_page_unwire(m
);/* reactivates */
390 VM_STAT(reactivations
++);
392 } else if (m
->prep_pin_count
!= 0) {
394 if (m
->pin_count
!= 0) {
395 /* page is pinned; reactivate */
397 vm_pageout_target_page_pinned
++;)
398 vm_page_unwire(m
);/* reactivates */
399 VM_STAT(reactivations
++);
403 * page is prepped but not pinned; send
404 * it into limbo. Note that
405 * vm_page_free (which will be called
406 * after releasing the pin lock) knows
407 * how to handle a page with limbo set.
411 vm_pageout_target_page_limbo
++;)
413 vm_page_pin_unlock();
417 CLUSTER_STAT(vm_pageout_target_page_freed
++;)
418 vm_page_free(m
);/* clears busy, etc. */
420 vm_page_unlock_queues();
424 * Handle the "adjacent" pages. These pages were cleaned in
425 * place, and should be left alone.
426 * If prep_pin_count is nonzero, then someone is using the
427 * page, so make it active.
429 if (!m
->active
&& !m
->inactive
) {
430 if (m
->reference
|| m
->prep_pin_count
!= 0)
433 vm_page_deactivate(m
);
435 if((m
->busy
) && (m
->cleaning
)) {
437 /* the request_page_list case, (COPY_OUT_FROM FALSE) */
440 /* We do not re-set m->dirty ! */
441 /* The page was busy so no extraneous activity */
442 /* could have occured. COPY_INTO is a read into the */
443 /* new pages. CLEAN_IN_PLACE does actually write */
444 /* out the pages but handling outside of this code */
445 /* will take care of resetting dirty. We clear the */
446 /* modify however for the Programmed I/O case. */
447 pmap_clear_modify(m
->phys_addr
);
450 if(shadow_object
->absent_count
== 1)
451 vm_object_absent_release(shadow_object
);
453 shadow_object
->absent_count
--;
455 m
->overwriting
= FALSE
;
456 } else if (m
->overwriting
) {
457 /* alternate request page list, write to page_list */
458 /* case. Occurs when the original page was wired */
459 /* at the time of the list request */
460 assert(m
->wire_count
!= 0);
461 vm_page_unwire(m
);/* reactivates */
462 m
->overwriting
= FALSE
;
465 * Set the dirty state according to whether or not the page was
466 * modified during the pageout. Note that we purposefully do
467 * NOT call pmap_clear_modify since the page is still mapped.
468 * If the page were to be dirtied between the 2 calls, this
469 * this fact would be lost. This code is only necessary to
470 * maintain statistics, since the pmap module is always
471 * consulted if m->dirty is false.
473 #if MACH_CLUSTER_STATS
474 m
->dirty
= pmap_is_modified(m
->phys_addr
);
476 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
477 else vm_pageout_cluster_cleaned
++;
478 if (m
->wanted
) vm_pageout_cluster_collisions
++;
487 * Wakeup any thread waiting for the page to be un-cleaning.
490 vm_page_unlock_queues();
493 * Account for the paging reference taken in vm_paging_object_allocate.
495 vm_object_paging_end(shadow_object
);
496 vm_object_unlock(shadow_object
);
498 assert(object
->ref_count
== 0);
499 assert(object
->paging_in_progress
== 0);
500 assert(object
->resident_page_count
== 0);
505 * Routine: vm_pageout_setup
507 * Set up a page for pageout (clean & flush).
509 * Move the page to a new object, as part of which it will be
510 * sent to its memory manager in a memory_object_data_write or
511 * memory_object_initialize message.
513 * The "new_object" and "new_offset" arguments
514 * indicate where the page should be moved.
517 * The page in question must not be on any pageout queues,
518 * and must be busy. The object to which it belongs
519 * must be unlocked, and the caller must hold a paging
520 * reference to it. The new_object must not be locked.
522 * This routine returns a pointer to a place-holder page,
523 * inserted at the same offset, to block out-of-order
524 * requests for the page. The place-holder page must
525 * be freed after the data_write or initialize message
528 * The original page is put on a paging queue and marked
533 register vm_page_t m
,
534 register vm_object_t new_object
,
535 vm_object_offset_t new_offset
)
537 register vm_object_t old_object
= m
->object
;
538 vm_object_offset_t paging_offset
;
539 vm_object_offset_t offset
;
540 register vm_page_t holding_page
;
541 register vm_page_t new_m
;
542 register vm_page_t new_page
;
543 boolean_t need_to_wire
= FALSE
;
547 "vm_pageout_setup, obj 0x%X off 0x%X page 0x%X new obj 0x%X offset 0x%X\n",
548 (integer_t
)m
->object
, (integer_t
)m
->offset
,
549 (integer_t
)m
, (integer_t
)new_object
,
550 (integer_t
)new_offset
);
551 assert(m
&& m
->busy
&& !m
->absent
&& !m
->fictitious
&& !m
->error
&&
554 assert(m
->dirty
|| m
->precious
);
557 * Create a place-holder page where the old one was, to prevent
558 * attempted pageins of this page while we're unlocked.
559 * If the pageout daemon put this page in limbo and we're not
560 * going to clean in place, get another fictitious page to
561 * exchange for it now.
563 VM_PAGE_GRAB_FICTITIOUS(holding_page
);
566 VM_PAGE_GRAB_FICTITIOUS(new_page
);
568 vm_object_lock(old_object
);
571 paging_offset
= offset
+ old_object
->paging_offset
;
573 if (old_object
->pager_trusted
) {
575 * This pager is trusted, so we can clean this page
576 * in place. Leave it in the old object, and mark it
577 * cleaning & pageout.
579 new_m
= holding_page
;
580 holding_page
= VM_PAGE_NULL
;
583 * If the pageout daemon put this page in limbo, exchange the
584 * identities of the limbo page and the new fictitious page,
585 * and continue with the new page, unless the prep count has
586 * gone to zero in the meantime (which means no one is
587 * interested in the page any more). In that case, just clear
588 * the limbo bit and free the extra fictitious page.
591 if (m
->prep_pin_count
== 0) {
592 /* page doesn't have to be in limbo any more */
594 vm_page_lock_queues();
595 vm_page_free(new_page
);
596 vm_page_unlock_queues();
597 vm_pageout_setup_unprepped
++;
599 vm_page_lock_queues();
600 VM_PAGE_QUEUES_REMOVE(m
);
602 vm_page_limbo_exchange(m
, new_page
);
603 vm_pageout_setup_limbo
++;
604 vm_page_release_limbo(m
);
606 vm_page_insert(m
, old_object
, offset
);
607 vm_page_unlock_queues();
612 * Set up new page to be private shadow of real page.
614 new_m
->phys_addr
= m
->phys_addr
;
615 new_m
->fictitious
= FALSE
;
616 new_m
->private = TRUE
;
617 new_m
->pageout
= TRUE
;
620 * Mark real page as cleaning (indicating that we hold a
621 * paging reference to be released via m_o_d_r_c) and
622 * pageout (indicating that the page should be freed
623 * when the pageout completes).
625 pmap_clear_modify(m
->phys_addr
);
626 vm_page_lock_queues();
632 assert(m
->wire_count
== 1);
633 vm_page_unlock_queues();
637 m
->page_lock
= VM_PROT_NONE
;
639 m
->unlock_request
= VM_PROT_NONE
;
642 * Cannot clean in place, so rip the old page out of the
643 * object, and stick the holding page in. Set new_m to the
644 * page in the new object.
646 vm_page_lock_queues();
647 VM_PAGE_QUEUES_REMOVE(m
);
651 * If the pageout daemon put this page in limbo, exchange the
652 * identities of the limbo page and the new fictitious page,
653 * and continue with the new page, unless the prep count has
654 * gone to zero in the meantime (which means no one is
655 * interested in the page any more). In that case, just clear
656 * the limbo bit and free the extra fictitious page.
659 if (m
->prep_pin_count
== 0) {
660 /* page doesn't have to be in limbo any more */
662 vm_page_free(new_page
);
663 vm_pageout_setup_unprepped
++;
665 vm_page_limbo_exchange(m
, new_page
);
666 vm_pageout_setup_limbo
++;
667 vm_page_release_limbo(m
);
672 vm_page_insert(holding_page
, old_object
, offset
);
673 vm_page_unlock_queues();
678 new_m
->page_lock
= VM_PROT_NONE
;
679 new_m
->unlock_request
= VM_PROT_NONE
;
681 if (old_object
->internal
)
685 * Record that this page has been written out
688 vm_external_state_set(old_object
->existence_map
, offset
);
689 #endif /* MACH_PAGEMAP */
691 vm_object_unlock(old_object
);
693 vm_object_lock(new_object
);
696 * Put the page into the new object. If it is a not wired
697 * (if it's the real page) it will be activated.
700 vm_page_lock_queues();
701 vm_page_insert(new_m
, new_object
, new_offset
);
705 vm_page_activate(new_m
);
706 PAGE_WAKEUP_DONE(new_m
);
707 vm_page_unlock_queues();
709 vm_object_unlock(new_object
);
712 * Return the placeholder page to simplify cleanup.
714 return (holding_page
);
718 * Routine: vm_pageclean_setup
720 * Purpose: setup a page to be cleaned (made non-dirty), but not
721 * necessarily flushed from the VM page cache.
722 * This is accomplished by cleaning in place.
724 * The page must not be busy, and the object and page
725 * queues must be locked.
732 vm_object_t new_object
,
733 vm_object_offset_t new_offset
)
735 vm_object_t old_object
= m
->object
;
737 assert(!m
->cleaning
);
740 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
741 (integer_t
)old_object
, m
->offset
, (integer_t
)m
,
742 (integer_t
)new_m
, new_offset
);
744 pmap_clear_modify(m
->phys_addr
);
745 vm_object_paging_begin(old_object
);
748 * Record that this page has been written out
751 vm_external_state_set(old_object
->existence_map
, m
->offset
);
752 #endif /*MACH_PAGEMAP*/
755 * Mark original page as cleaning in place.
762 * Convert the fictitious page to a private shadow of
765 assert(new_m
->fictitious
);
766 new_m
->fictitious
= FALSE
;
767 new_m
->private = TRUE
;
768 new_m
->pageout
= TRUE
;
769 new_m
->phys_addr
= m
->phys_addr
;
772 vm_page_insert(new_m
, new_object
, new_offset
);
773 assert(!new_m
->wanted
);
781 vm_object_t new_object
,
782 vm_object_offset_t new_offset
)
785 "vm_pageclean_copy, page 0x%X new_m 0x%X new_obj 0x%X offset 0x%X\n",
786 m
, new_m
, new_object
, new_offset
, 0);
788 assert((!m
->busy
) && (!m
->cleaning
));
790 assert(!new_m
->private && !new_m
->fictitious
);
792 pmap_clear_modify(m
->phys_addr
);
795 vm_object_paging_begin(m
->object
);
796 vm_page_unlock_queues();
797 vm_object_unlock(m
->object
);
800 * Copy the original page to the new page.
802 vm_page_copy(m
, new_m
);
805 * Mark the old page as clean. A request to pmap_is_modified
806 * will get the right answer.
808 vm_object_lock(m
->object
);
811 vm_object_paging_end(m
->object
);
813 vm_page_lock_queues();
814 if (!m
->active
&& !m
->inactive
)
818 vm_page_insert(new_m
, new_object
, new_offset
);
819 vm_page_activate(new_m
);
820 new_m
->busy
= FALSE
; /* No other thread can be waiting */
825 * Routine: vm_pageout_initialize_page
827 * Causes the specified page to be initialized in
828 * the appropriate memory object. This routine is used to push
829 * pages into a copy-object when they are modified in the
832 * The page is moved to a temporary object and paged out.
835 * The page in question must not be on any pageout queues.
836 * The object to which it belongs must be locked.
837 * The page must be busy, but not hold a paging reference.
840 * Move this page to a completely new object.
843 vm_pageout_initialize_page(
847 vm_object_t new_object
;
849 vm_object_offset_t paging_offset
;
850 vm_page_t holding_page
;
854 "vm_pageout_initialize_page, page 0x%X\n",
855 (integer_t
)m
, 0, 0, 0, 0);
859 * Verify that we really want to clean this page
866 * Create a paging reference to let us play with the object.
869 paging_offset
= m
->offset
+ object
->paging_offset
;
870 vm_object_paging_begin(object
);
871 vm_object_unlock(object
);
872 if (m
->absent
|| m
->error
|| m
->restart
||
873 (!m
->dirty
&& !m
->precious
)) {
875 panic("reservation without pageout?"); /* alan */
879 /* set the page for future call to vm_fault_list_request */
881 vm_object_lock(m
->object
);
882 vm_page_lock_queues();
883 pmap_clear_modify(m
->phys_addr
);
886 m
->list_req_pending
= TRUE
;
890 vm_page_unlock_queues();
891 vm_object_unlock(m
->object
);
892 vm_pageout_throttle(m
);
896 /* VM_STAT(pages_pagedout++); */
899 * Write the data to its pager.
900 * Note that the data is passed by naming the new object,
901 * not a virtual address; the pager interface has been
902 * manipulated to use the "internal memory" data type.
903 * [The object reference from its allocation is donated
904 * to the eventual recipient.]
906 memory_object_data_initialize(object
->pager
,
907 object
->pager_request
,
912 vm_object_lock(object
);
915 #if MACH_CLUSTER_STATS
916 #define MAXCLUSTERPAGES 16
918 unsigned long pages_in_cluster
;
919 unsigned long pages_at_higher_offsets
;
920 unsigned long pages_at_lower_offsets
;
921 } cluster_stats
[MAXCLUSTERPAGES
];
922 #endif /* MACH_CLUSTER_STATS */
924 boolean_t allow_clustered_pageouts
= FALSE
;
927 * vm_pageout_cluster:
929 * Given a page, page it out, and attempt to clean adjacent pages
930 * in the same operation.
932 * The page must be busy, and the object unlocked w/ paging reference
933 * to prevent deallocation or collapse. The page must not be on any
940 vm_object_t object
= m
->object
;
941 vm_object_offset_t offset
= m
->offset
; /* from vm_object start */
942 vm_object_offset_t paging_offset
= m
->offset
+ object
->paging_offset
;
943 vm_object_t new_object
;
944 vm_object_offset_t new_offset
;
945 vm_size_t cluster_size
;
946 vm_object_offset_t cluster_offset
; /* from memory_object start */
947 vm_object_offset_t cluster_lower_bound
; /* from vm_object_start */
948 vm_object_offset_t cluster_upper_bound
; /* from vm_object_start */
949 vm_object_offset_t cluster_start
, cluster_end
;/* from vm_object start */
950 vm_object_offset_t offset_within_cluster
;
951 vm_size_t length_of_data
;
952 vm_page_t
friend, holding_page
;
955 boolean_t precious_clean
= TRUE
;
956 int pages_in_cluster
;
958 CLUSTER_STAT(int pages_at_higher_offsets
= 0;)
959 CLUSTER_STAT(int pages_at_lower_offsets
= 0;)
962 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
963 (integer_t
)object
, offset
, (integer_t
)m
, 0, 0);
965 CLUSTER_STAT(vm_pageout_cluster_clusters
++;)
967 * Only a certain kind of page is appreciated here.
969 assert(m
->busy
&& (m
->dirty
|| m
->precious
) && (m
->wire_count
== 0));
970 assert(!m
->cleaning
&& !m
->pageout
&& !m
->inactive
&& !m
->active
);
972 vm_object_lock(object
);
973 cluster_size
= object
->cluster_size
;
975 assert(cluster_size
>= PAGE_SIZE
);
976 if (cluster_size
< PAGE_SIZE
) cluster_size
= PAGE_SIZE
;
977 assert(object
->pager_created
&& object
->pager_initialized
);
978 assert(object
->internal
|| object
->pager_ready
);
980 if (m
->precious
&& !m
->dirty
)
981 precious_clean
= TRUE
;
983 if (!object
->pager_trusted
|| !allow_clustered_pageouts
)
984 cluster_size
= PAGE_SIZE
;
985 vm_object_unlock(object
);
987 cluster_offset
= paging_offset
& (vm_object_offset_t
)(cluster_size
- 1);
988 /* bytes from beginning of cluster */
990 * Due to unaligned mappings, we have to be careful
991 * of negative offsets into the VM object. Clip the cluster
992 * boundary to the VM object, not the memory object.
994 if (offset
> cluster_offset
) {
995 cluster_lower_bound
= offset
- cluster_offset
;
998 cluster_lower_bound
= 0;
1000 cluster_upper_bound
= (offset
- cluster_offset
) +
1001 (vm_object_offset_t
)cluster_size
;
1003 /* set the page for future call to vm_fault_list_request */
1004 holding_page
= NULL
;
1005 vm_object_lock(m
->object
);
1006 vm_page_lock_queues();
1008 m
->list_req_pending
= TRUE
;
1012 vm_page_unlock_queues();
1013 vm_object_unlock(m
->object
);
1014 vm_pageout_throttle(m
);
1017 * Search backward for adjacent eligible pages to clean in
1021 cluster_start
= offset
;
1022 if (offset
) { /* avoid wrap-around at zero */
1023 for (cluster_start
= offset
- PAGE_SIZE_64
;
1024 cluster_start
>= cluster_lower_bound
;
1025 cluster_start
-= PAGE_SIZE_64
) {
1026 assert(cluster_size
> PAGE_SIZE
);
1028 vm_object_lock(object
);
1029 vm_page_lock_queues();
1031 if ((friend = vm_pageout_cluster_page(object
, cluster_start
,
1032 precious_clean
)) == VM_PAGE_NULL
) {
1033 vm_page_unlock_queues();
1034 vm_object_unlock(object
);
1037 new_offset
= (cluster_start
+ object
->paging_offset
)
1038 & (cluster_size
- 1);
1040 assert(new_offset
< cluster_offset
);
1041 m
->list_req_pending
= TRUE
;
1043 /* do nothing except advance the write request, all we really need to */
1044 /* do is push the target page and let the code at the other end decide */
1045 /* what is really the right size */
1046 if (vm_page_free_count
<= vm_page_free_reserved
) {
1052 vm_page_unlock_queues();
1053 vm_object_unlock(object
);
1054 if(m
->dirty
|| m
->object
->internal
) {
1055 CLUSTER_STAT(pages_at_lower_offsets
++;)
1059 cluster_start
+= PAGE_SIZE_64
;
1061 assert(cluster_start
>= cluster_lower_bound
);
1062 assert(cluster_start
<= offset
);
1064 * Search forward for adjacent eligible pages to clean in
1067 for (cluster_end
= offset
+ PAGE_SIZE_64
;
1068 cluster_end
< cluster_upper_bound
;
1069 cluster_end
+= PAGE_SIZE_64
) {
1070 assert(cluster_size
> PAGE_SIZE
);
1072 vm_object_lock(object
);
1073 vm_page_lock_queues();
1075 if ((friend = vm_pageout_cluster_page(object
, cluster_end
,
1076 precious_clean
)) == VM_PAGE_NULL
) {
1077 vm_page_unlock_queues();
1078 vm_object_unlock(object
);
1081 new_offset
= (cluster_end
+ object
->paging_offset
)
1082 & (cluster_size
- 1);
1084 assert(new_offset
< cluster_size
);
1085 m
->list_req_pending
= TRUE
;
1087 /* do nothing except advance the write request, all we really need to */
1088 /* do is push the target page and let the code at the other end decide */
1089 /* what is really the right size */
1090 if (vm_page_free_count
<= vm_page_free_reserved
) {
1096 vm_page_unlock_queues();
1097 vm_object_unlock(object
);
1099 if(m
->dirty
|| m
->object
->internal
) {
1100 CLUSTER_STAT(pages_at_higher_offsets
++;)
1103 assert(cluster_end
<= cluster_upper_bound
);
1104 assert(cluster_end
>= offset
+ PAGE_SIZE
);
1107 * (offset - cluster_offset) is beginning of cluster_object
1108 * relative to vm_object start.
1110 offset_within_cluster
= cluster_start
- (offset
- cluster_offset
);
1111 length_of_data
= cluster_end
- cluster_start
;
1113 assert(offset_within_cluster
< cluster_size
);
1114 assert((offset_within_cluster
+ length_of_data
) <= cluster_size
);
1117 assert(rc
== KERN_SUCCESS
);
1119 pages_in_cluster
= length_of_data
/PAGE_SIZE
;
1120 if(m
->dirty
|| m
->object
->internal
) {
1121 VM_STAT(pageouts
++);
1123 /* VM_STAT(pages_pagedout += pages_in_cluster); */
1125 #if MACH_CLUSTER_STATS
1126 (cluster_stats
[pages_at_lower_offsets
].pages_at_lower_offsets
)++;
1127 (cluster_stats
[pages_at_higher_offsets
].pages_at_higher_offsets
)++;
1128 (cluster_stats
[pages_in_cluster
].pages_in_cluster
)++;
1129 #endif /* MACH_CLUSTER_STATS */
1132 * Send the data to the pager.
1134 paging_offset
= cluster_start
+ object
->paging_offset
;
1136 if(((rpc_subsystem_t
)pager_mux_hash_lookup(object
->pager
)) ==
1137 ((rpc_subsystem_t
) &vnode_pager_workaround
)) {
1138 rc
= vnode_pager_data_return(object
->pager
,
1139 object
->pager_request
,
1146 rc
= memory_object_data_return(object
->pager
,
1147 object
->pager_request
,
1155 rc
= memory_object_data_return(object
->pager
,
1156 object
->pager_request
,
1163 vm_object_lock(object
);
1164 vm_object_paging_end(object
);
1167 assert(!object
->pager_trusted
);
1168 VM_PAGE_FREE(holding_page
);
1169 vm_object_paging_end(object
);
1172 vm_object_unlock(object
);
1176 * vm_pageout_return_write_pages
1177 * Recover pages from an aborted write attempt
1181 vm_pageout_return_write_pages(
1182 ipc_port_t control_port
,
1183 vm_object_offset_t object_offset
,
1195 object
= copy
->cpy_object
;
1196 copy_offset
= copy
->offset
;
1199 if((copy
->type
!= VM_MAP_COPY_OBJECT
) || (object
->shadow
== 0)) {
1200 object
= (vm_object_t
)control_port
->ip_kobject
;
1201 shadow_offset
= (object_offset
- object
->paging_offset
)
1204 /* get the offset from the copy object */
1205 shadow_offset
= object
->shadow_offset
;
1206 /* find the backing object */
1207 object
= object
->shadow
;
1209 vm_object_lock(object
);
1211 for(offset
= 0, j
=0; offset
< size
; offset
+=page_size
, j
++) {
1212 m
= vm_page_lookup(object
,
1213 offset
+ shadow_offset
+ copy_offset
);
1214 if((m
== VM_PAGE_NULL
) || m
->fictitious
) {
1218 vm_object_t copy_object
;
1220 /* m might be fictitious if the original page */
1221 /* was found to be in limbo at the time of */
1222 /* vm_pageout_setup */
1224 if((m
!= VM_PAGE_NULL
) && m
->fictitious
) {
1225 m
->cleaning
= FALSE
;
1227 /* if object is not pager trusted then */
1228 /* this fictitious page will be removed */
1229 /* as the holding page in vm_pageout_cluster */
1230 if (object
->pager_trusted
)
1232 if(vm_page_laundry_count
)
1233 vm_page_laundry_count
--;
1234 if (vm_page_laundry_count
1235 < vm_page_laundry_min
) {
1236 vm_page_laundry_min
= 0;
1237 thread_wakeup((event_t
)
1238 &vm_page_laundry_count
);
1241 else if ((object
->pager_trusted
) &&
1242 (copy
->type
== VM_MAP_COPY_OBJECT
)) {
1243 vm_object_paging_end(object
);
1246 copy_object
= copy
->cpy_object
;
1248 if(copy
->type
== VM_MAP_COPY_OBJECT
) {
1249 p
= (vm_page_t
) queue_first(©_object
->memq
);
1252 i
< copy_object
->resident_page_count
;
1254 if(p
->offset
== (offset
+ copy_offset
))
1256 p
= (vm_page_t
) queue_next(&p
->listq
);
1261 p
= copy
->cpy_page_list
[j
];
1262 copy
->cpy_page_list
[j
] = 0;
1266 vm_page_insert(p
, object
,
1267 offset
+ shadow_offset
+ copy_offset
);
1272 p
->pageout
= FALSE
; /*dont throw away target*/
1273 vm_page_unwire(p
);/* reactivates */
1275 } else if(m
->pageout
) {
1276 m
->pageout
= FALSE
; /* dont throw away target pages */
1277 vm_page_unwire(m
);/* reactivates */
1281 vm_object_unlock(object
);
1282 vm_map_copy_discard(copy
);
1283 vm_object_lock(object
);
1285 for(offset
= 0; offset
< size
; offset
+=page_size
) {
1286 m
= vm_page_lookup(object
,
1287 offset
+ shadow_offset
+ copy_offset
);
1288 m
->dirty
= TRUE
; /* we'll send the pages home later */
1289 m
->busy
= FALSE
; /* allow system access again */
1292 vm_object_unlock(object
);
1296 * Trusted pager throttle.
1297 * Object must be unlocked, page queues must be unlocked.
1300 vm_pageout_throttle(
1301 register vm_page_t m
)
1303 vm_page_lock_queues();
1304 assert(!m
->laundry
);
1306 while (vm_page_laundry_count
>= vm_page_laundry_max
) {
1308 * Set the threshold for when vm_page_free()
1309 * should wake us up.
1311 vm_page_laundry_min
= vm_page_laundry_max
/2;
1312 assert_wait((event_t
) &vm_page_laundry_count
, THREAD_UNINT
);
1313 vm_page_unlock_queues();
1316 * Pause to let the default pager catch up.
1318 thread_block((void (*)(void)) 0);
1319 vm_page_lock_queues();
1321 vm_page_laundry_count
++;
1322 vm_page_unlock_queues();
1326 * The global variable vm_pageout_clean_active_pages controls whether
1327 * active pages are considered valid to be cleaned in place during a
1328 * clustered pageout. Performance measurements are necessary to determine
1331 int vm_pageout_clean_active_pages
= 1;
1333 * vm_pageout_cluster_page: [Internal]
1335 * return a vm_page_t to the page at (object,offset) if it is appropriate
1336 * to clean in place. Pages that are non-existent, busy, absent, already
1337 * cleaning, or not dirty are not eligible to be cleaned as an adjacent
1338 * page in a cluster.
1340 * The object must be locked on entry, and remains locked throughout
1345 vm_pageout_cluster_page(
1347 vm_object_offset_t offset
,
1348 boolean_t precious_clean
)
1353 "vm_pageout_cluster_page, object 0x%X offset 0x%X\n",
1354 (integer_t
)object
, offset
, 0, 0, 0);
1356 if ((m
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
)
1357 return(VM_PAGE_NULL
);
1359 if (m
->busy
|| m
->absent
|| m
->cleaning
||
1360 m
->prep_pin_count
!= 0 ||
1361 (m
->wire_count
!= 0) || m
->error
)
1362 return(VM_PAGE_NULL
);
1364 if (vm_pageout_clean_active_pages
) {
1365 if (!m
->active
&& !m
->inactive
) return(VM_PAGE_NULL
);
1367 if (!m
->inactive
) return(VM_PAGE_NULL
);
1370 assert(!m
->private);
1371 assert(!m
->fictitious
);
1373 if (!m
->dirty
) m
->dirty
= pmap_is_modified(m
->phys_addr
);
1375 if (precious_clean
) {
1376 if (!m
->precious
|| !m
->dirty
)
1377 return(VM_PAGE_NULL
);
1380 return(VM_PAGE_NULL
);
1386 * vm_pageout_scan does the dirty work for the pageout daemon.
1387 * It returns with vm_page_queue_free_lock held and
1388 * vm_page_free_wanted == 0.
1390 extern void vm_pageout_scan_continue(void); /* forward; */
1393 vm_pageout_scan(void)
1395 unsigned int burst_count
;
1396 boolean_t now
= FALSE
;
1397 unsigned int laundry_pages
;
1398 boolean_t need_more_inactive_pages
;
1399 unsigned int loop_detect
;
1401 XPR(XPR_VM_PAGEOUT
, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1404 * We want to gradually dribble pages from the active queue
1405 * to the inactive queue. If we let the inactive queue get
1406 * very small, and then suddenly dump many pages into it,
1407 * those pages won't get a sufficient chance to be referenced
1408 * before we start taking them from the inactive queue.
1410 * We must limit the rate at which we send pages to the pagers.
1411 * data_write messages consume memory, for message buffers and
1412 * for map-copy objects. If we get too far ahead of the pagers,
1413 * we can potentially run out of memory.
1415 * We can use the laundry count to limit directly the number
1416 * of pages outstanding to the default pager. A similar
1417 * strategy for external pagers doesn't work, because
1418 * external pagers don't have to deallocate the pages sent them,
1419 * and because we might have to send pages to external pagers
1420 * even if they aren't processing writes. So we also
1421 * use a burst count to limit writes to external pagers.
1423 * When memory is very tight, we can't rely on external pagers to
1424 * clean pages. They probably aren't running, because they
1425 * aren't vm-privileged. If we kept sending dirty pages to them,
1426 * we could exhaust the free list. However, we can't just ignore
1427 * pages belonging to external objects, because there might be no
1428 * pages belonging to internal objects. Hence, we get the page
1429 * into an internal object and then immediately double-page it,
1430 * sending it to the default pager.
1432 * consider_zone_gc should be last, because the other operations
1433 * might return memory to zones.
1438 mutex_lock(&vm_page_queue_free_lock
);
1439 now
= (vm_page_free_count
< vm_page_free_min
);
1440 mutex_unlock(&vm_page_queue_free_lock
);
1442 swapout_threads(now
);
1443 #endif /* THREAD_SWAPPER */
1446 consider_task_collect();
1447 consider_thread_collect();
1448 cleanup_limbo_queue();
1450 consider_machine_collect();
1452 loop_detect
= vm_page_active_count
+ vm_page_inactive_count
;
1454 if (vm_page_free_count
<= vm_page_free_reserved
) {
1455 need_more_inactive_pages
= TRUE
;
1457 need_more_inactive_pages
= FALSE
;
1460 need_more_inactive_pages
= FALSE
;
1463 for (burst_count
= 0;;) {
1464 register vm_page_t m
;
1465 register vm_object_t object
;
1466 unsigned int free_count
;
1469 * Recalculate vm_page_inactivate_target.
1472 vm_page_lock_queues();
1473 vm_page_inactive_target
=
1474 VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1475 vm_page_inactive_count
);
1478 * Move pages from active to inactive.
1481 while ((vm_page_inactive_count
< vm_page_inactive_target
||
1482 need_more_inactive_pages
) &&
1483 !queue_empty(&vm_page_queue_active
)) {
1484 register vm_object_t object
;
1486 vm_pageout_active
++;
1487 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1490 * If we're getting really low on memory,
1491 * try selecting a page that will go
1492 * directly to the default_pager.
1493 * If there are no such pages, we have to
1494 * page out a page backed by an EMM,
1495 * so that the default_pager can recover
1498 if (need_more_inactive_pages
&&
1499 (IP_VALID(memory_manager_default
))) {
1500 vm_pageout_scan_active_emm_throttle
++;
1502 assert(m
->active
&& !m
->inactive
);
1505 if (vm_object_lock_try(object
)) {
1507 if (object
->pager_trusted
||
1510 vm_pageout_scan_active_emm_throttle_success
++;
1511 goto object_locked_active
;
1514 vm_pageout_scan_active_emm_throttle_success
++;
1515 goto object_locked_active
;
1517 vm_object_unlock(object
);
1519 m
= (vm_page_t
) queue_next(&m
->pageq
);
1520 } while (!queue_end(&vm_page_queue_active
,
1521 (queue_entry_t
) m
));
1522 if (queue_end(&vm_page_queue_active
,
1523 (queue_entry_t
) m
)) {
1524 vm_pageout_scan_active_emm_throttle_failure
++;
1526 queue_first(&vm_page_queue_active
);
1530 assert(m
->active
&& !m
->inactive
);
1533 if (!vm_object_lock_try(object
)) {
1535 * Move page to end and continue.
1538 queue_remove(&vm_page_queue_active
, m
,
1540 queue_enter(&vm_page_queue_active
, m
,
1542 vm_page_unlock_queues();
1544 vm_page_lock_queues();
1548 object_locked_active
:
1550 * If the page is busy, then we pull it
1551 * off the active queue and leave it alone.
1555 vm_object_unlock(object
);
1556 queue_remove(&vm_page_queue_active
, m
,
1560 vm_page_active_count
--;
1565 * Deactivate the page while holding the object
1566 * locked, so we know the page is still not busy.
1567 * This should prevent races between pmap_enter
1568 * and pmap_clear_reference. The page might be
1569 * absent or fictitious, but vm_page_deactivate
1573 vm_page_deactivate(m
);
1574 vm_object_unlock(object
);
1578 * We are done if we have met our target *and*
1579 * nobody is still waiting for a page.
1582 mutex_lock(&vm_page_queue_free_lock
);
1583 free_count
= vm_page_free_count
;
1584 if ((free_count
>= vm_page_free_target
) &&
1585 (vm_page_free_wanted
== 0)) {
1586 vm_page_unlock_queues();
1589 mutex_unlock(&vm_page_queue_free_lock
);
1592 * Sometimes we have to pause:
1593 * 1) No inactive pages - nothing to do.
1594 * 2) Flow control - wait for untrusted pagers to catch up.
1597 if (queue_empty(&vm_page_queue_inactive
) ||
1598 ((--loop_detect
) == 0) ||
1599 (burst_count
>= vm_pageout_burst_max
)) {
1600 unsigned int pages
, msecs
;
1603 consider_machine_adjust();
1605 * vm_pageout_burst_wait is msecs/page.
1606 * If there is nothing for us to do, we wait
1607 * at least vm_pageout_empty_wait msecs.
1609 pages
= burst_count
;
1611 if (loop_detect
== 0) {
1612 printf("Warning: No physical memory suitable for pageout or reclaim, pageout thread temporarily going to sleep\n");
1613 msecs
= vm_free_page_pause
;
1616 msecs
= burst_count
* vm_pageout_burst_wait
;
1619 if (queue_empty(&vm_page_queue_inactive
) &&
1620 (msecs
< vm_pageout_empty_wait
))
1621 msecs
= vm_pageout_empty_wait
;
1622 vm_page_unlock_queues();
1623 assert_wait_timeout(msecs
, THREAD_INTERRUPTIBLE
);
1624 counter(c_vm_pageout_scan_block
++);
1627 * Unfortunately, we don't have call_continuation
1628 * so we can't rely on tail-recursion.
1630 wait_result
= thread_block((void (*)(void)) 0);
1631 if (wait_result
!= THREAD_TIMED_OUT
)
1632 thread_cancel_timer();
1633 vm_pageout_scan_continue();
1638 vm_pageout_inactive
++;
1639 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1641 if ((vm_page_free_count
<= vm_page_free_reserved
) &&
1642 (IP_VALID(memory_manager_default
))) {
1644 * We're really low on memory. Try to select a page that
1645 * would go directly to the default_pager.
1646 * If there are no such pages, we have to page out a
1647 * page backed by an EMM, so that the default_pager
1648 * can recover it eventually.
1650 vm_pageout_scan_inactive_emm_throttle
++;
1652 assert(!m
->active
&& m
->inactive
);
1655 if (vm_object_lock_try(object
)) {
1657 if (object
->pager_trusted
||
1660 vm_pageout_scan_inactive_emm_throttle_success
++;
1661 goto object_locked_inactive
;
1664 vm_pageout_scan_inactive_emm_throttle_success
++;
1665 goto object_locked_inactive
;
1667 vm_object_unlock(object
);
1669 m
= (vm_page_t
) queue_next(&m
->pageq
);
1670 } while (!queue_end(&vm_page_queue_inactive
,
1671 (queue_entry_t
) m
));
1672 if (queue_end(&vm_page_queue_inactive
,
1673 (queue_entry_t
) m
)) {
1674 vm_pageout_scan_inactive_emm_throttle_failure
++;
1676 * We should check the "active" queue
1677 * for good candidates to page out.
1679 need_more_inactive_pages
= TRUE
;
1682 queue_first(&vm_page_queue_inactive
);
1686 assert(!m
->active
&& m
->inactive
);
1690 * Try to lock object; since we've got the
1691 * page queues lock, we can only try for this one.
1694 if (!vm_object_lock_try(object
)) {
1696 * Move page to end and continue.
1698 queue_remove(&vm_page_queue_inactive
, m
,
1700 queue_enter(&vm_page_queue_inactive
, m
,
1702 vm_page_unlock_queues();
1704 vm_pageout_inactive_nolock
++;
1708 object_locked_inactive
:
1710 * Paging out pages of objects which pager is being
1711 * created by another thread must be avoided, because
1712 * this thread may claim for memory, thus leading to a
1713 * possible dead lock between it and the pageout thread
1714 * which will wait for pager creation, if such pages are
1715 * finally chosen. The remaining assumption is that there
1716 * will finally be enough available pages in the inactive
1717 * pool to page out in order to satisfy all memory claimed
1718 * by the thread which concurrently creates the pager.
1721 if (!object
->pager_initialized
&& object
->pager_created
) {
1723 * Move page to end and continue, hoping that
1724 * there will be enough other inactive pages to
1725 * page out so that the thread which currently
1726 * initializes the pager will succeed.
1728 queue_remove(&vm_page_queue_inactive
, m
,
1730 queue_enter(&vm_page_queue_inactive
, m
,
1732 vm_page_unlock_queues();
1733 vm_object_unlock(object
);
1734 vm_pageout_inactive_avoid
++;
1739 * Remove the page from the inactive list.
1742 queue_remove(&vm_page_queue_inactive
, m
, vm_page_t
, pageq
);
1743 m
->inactive
= FALSE
;
1745 vm_page_inactive_count
--;
1747 if (m
->busy
|| !object
->alive
) {
1749 * Somebody is already playing with this page.
1750 * Leave it off the pageout queues.
1753 vm_page_unlock_queues();
1754 vm_object_unlock(object
);
1755 vm_pageout_inactive_busy
++;
1760 * If it's absent or in error, we can reclaim the page.
1763 if (m
->absent
|| m
->error
) {
1764 vm_pageout_inactive_absent
++;
1767 vm_page_unlock_queues();
1768 vm_object_unlock(object
);
1772 assert(!m
->private);
1773 assert(!m
->fictitious
);
1776 * If already cleaning this page in place, convert from
1777 * "adjacent" to "target". We can leave the page mapped,
1778 * and vm_pageout_object_terminate will determine whether
1779 * to free or reactivate.
1783 #if MACH_CLUSTER_STATS
1784 vm_pageout_cluster_conversions
++;
1786 if (m
->prep_pin_count
== 0) {
1791 vm_object_unlock(object
);
1792 vm_page_unlock_queues();
1797 * If it's being used, reactivate.
1798 * (Fictitious pages are either busy or absent.)
1801 if (m
->reference
|| pmap_is_referenced(m
->phys_addr
)) {
1802 vm_pageout_inactive_used
++;
1804 #if ADVISORY_PAGEOUT
1805 if (m
->discard_request
) {
1806 m
->discard_request
= FALSE
;
1808 #endif /* ADVISORY_PAGEOUT */
1809 vm_object_unlock(object
);
1810 vm_page_activate(m
);
1811 VM_STAT(reactivations
++);
1812 vm_page_unlock_queues();
1816 if (m
->prep_pin_count
!= 0) {
1817 boolean_t pinned
= FALSE
;
1820 if (m
->pin_count
!= 0) {
1821 /* skip and reactivate pinned page */
1823 vm_pageout_inactive_pinned
++;
1825 /* page is prepped; send it into limbo */
1827 vm_pageout_inactive_limbo
++;
1829 vm_page_pin_unlock();
1831 goto reactivate_page
;
1834 #if ADVISORY_PAGEOUT
1835 if (object
->advisory_pageout
) {
1836 boolean_t do_throttle
;
1838 vm_object_offset_t discard_offset
;
1840 if (m
->discard_request
) {
1841 vm_stat_discard_failure
++;
1842 goto mandatory_pageout
;
1845 assert(object
->pager_initialized
);
1846 m
->discard_request
= TRUE
;
1847 port
= object
->pager
;
1849 /* system-wide throttle */
1850 do_throttle
= (vm_page_free_count
<=
1851 vm_page_free_reserved
);
1853 /* throttle on this pager */
1854 /* XXX lock ordering ? */
1856 do_throttle
= imq_full(&port
->ip_messages
);
1860 vm_stat_discard_throttle
++;
1862 /* ignore this page and skip to next */
1863 vm_page_unlock_queues();
1864 vm_object_unlock(object
);
1867 /* force mandatory pageout */
1868 goto mandatory_pageout
;
1872 /* proceed with discard_request */
1873 vm_page_activate(m
);
1875 VM_STAT(reactivations
++);
1876 discard_offset
= m
->offset
+ object
->paging_offset
;
1877 vm_stat_discard_sent
++;
1878 vm_page_unlock_queues();
1879 vm_object_unlock(object
);
1881 memory_object_discard_request(object->pager,
1882 object->pager_request,
1889 #endif /* ADVISORY_PAGEOUT */
1892 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
1893 (integer_t
)object
, (integer_t
)m
->offset
, (integer_t
)m
, 0,0);
1896 * Eliminate all mappings.
1900 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
1902 m
->dirty
= pmap_is_modified(m
->phys_addr
);
1905 * If it's clean and not precious, we can free the page.
1908 if (!m
->dirty
&& !m
->precious
) {
1909 vm_pageout_inactive_clean
++;
1912 vm_page_unlock_queues();
1915 * If there is no memory object for the page, create
1916 * one and hand it to the default pager.
1919 if (!object
->pager_initialized
)
1920 vm_object_collapse(object
);
1921 if (!object
->pager_initialized
)
1922 vm_object_pager_create(object
);
1923 if (!object
->pager_initialized
) {
1925 * Still no pager for the object.
1926 * Reactivate the page.
1928 * Should only happen if there is no
1931 vm_page_lock_queues();
1932 vm_page_activate(m
);
1933 vm_page_unlock_queues();
1936 * And we are done with it.
1938 PAGE_WAKEUP_DONE(m
);
1939 vm_object_unlock(object
);
1942 * break here to get back to the preemption
1943 * point in the outer loop so that we don't
1944 * spin forever if there is no default pager.
1946 vm_pageout_dirty_no_pager
++;
1948 * Well there's no pager, but we can still reclaim
1949 * free pages out of the inactive list. Go back
1950 * to top of loop and look for suitable pages.
1955 if (object
->pager_initialized
&& object
->pager
== IP_NULL
) {
1957 * This pager has been destroyed by either
1958 * memory_object_destroy or vm_object_destroy, and
1959 * so there is nowhere for the page to go.
1960 * Just free the page.
1963 vm_object_unlock(object
);
1967 vm_pageout_inactive_dirty
++;
1969 if (!object->internal)
1972 vm_object_paging_begin(object
);
1973 vm_object_unlock(object
);
1974 vm_pageout_cluster(m
); /* flush it */
1976 consider_machine_adjust();
1979 counter(unsigned int c_vm_pageout_scan_continue
= 0;)
1982 vm_pageout_scan_continue(void)
1985 * We just paused to let the pagers catch up.
1986 * If vm_page_laundry_count is still high,
1987 * then we aren't waiting long enough.
1988 * If we have paused some vm_pageout_pause_max times without
1989 * adjusting vm_pageout_burst_wait, it might be too big,
1990 * so we decrease it.
1993 vm_page_lock_queues();
1994 counter(++c_vm_pageout_scan_continue
);
1995 if (vm_page_laundry_count
> vm_pageout_burst_min
) {
1996 vm_pageout_burst_wait
++;
1997 vm_pageout_pause_count
= 0;
1998 } else if (++vm_pageout_pause_count
> vm_pageout_pause_max
) {
1999 vm_pageout_burst_wait
= (vm_pageout_burst_wait
* 3) / 4;
2000 if (vm_pageout_burst_wait
< 1)
2001 vm_pageout_burst_wait
= 1;
2002 vm_pageout_pause_count
= 0;
2004 vm_page_unlock_queues();
2007 void vm_page_free_reserve(int pages
);
2008 int vm_page_free_count_init
;
2011 vm_page_free_reserve(
2014 int free_after_reserve
;
2016 vm_page_free_reserved
+= pages
;
2018 free_after_reserve
= vm_page_free_count_init
- vm_page_free_reserved
;
2020 vm_page_free_min
= vm_page_free_reserved
+
2021 VM_PAGE_FREE_MIN(free_after_reserve
);
2023 vm_page_free_target
= vm_page_free_reserved
+
2024 VM_PAGE_FREE_TARGET(free_after_reserve
);
2026 if (vm_page_free_target
< vm_page_free_min
+ 5)
2027 vm_page_free_target
= vm_page_free_min
+ 5;
2031 * vm_pageout is the high level pageout daemon.
2038 thread_t self
= current_thread();
2041 * Set thread privileges.
2043 self
->vm_privilege
= TRUE
;
2044 stack_privilege(self
);
2045 thread_swappable(current_act(), FALSE
);
2048 * Initialize some paging parameters.
2051 if (vm_page_laundry_max
== 0)
2052 vm_page_laundry_max
= VM_PAGE_LAUNDRY_MAX
;
2054 if (vm_pageout_burst_max
== 0)
2055 vm_pageout_burst_max
= VM_PAGEOUT_BURST_MAX
;
2057 if (vm_pageout_burst_wait
== 0)
2058 vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
2060 if (vm_pageout_empty_wait
== 0)
2061 vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
2063 vm_page_free_count_init
= vm_page_free_count
;
2065 * even if we've already called vm_page_free_reserve
2066 * call it again here to insure that the targets are
2067 * accurately calculated (it uses vm_page_free_count_init)
2068 * calling it with an arg of 0 will not change the reserve
2069 * but will re-calculate free_min and free_target
2071 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED
)
2072 vm_page_free_reserve(VM_PAGE_FREE_RESERVED
- vm_page_free_reserved
);
2074 vm_page_free_reserve(0);
2077 * vm_pageout_scan will set vm_page_inactive_target.
2079 * The pageout daemon is never done, so loop forever.
2080 * We should call vm_pageout_scan at least once each
2081 * time we are woken, even if vm_page_free_wanted is
2082 * zero, to check vm_page_free_target and
2083 * vm_page_inactive_target.
2087 /* we hold vm_page_queue_free_lock now */
2088 assert(vm_page_free_wanted
== 0);
2089 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
2090 mutex_unlock(&vm_page_queue_free_lock
);
2091 counter(c_vm_pageout_block
++);
2092 thread_block((void (*)(void)) 0);
2102 upl
->ref_count
-= 1;
2103 if(upl
->ref_count
== 0) {
2110 * Routine: vm_fault_list_request
2112 * Cause the population of a portion of a vm_object.
2113 * Depending on the nature of the request, the pages
2114 * returned may be contain valid data or be uninitialized.
2115 * A page list structure, listing the physical pages
2116 * will be returned upon request.
2117 * This function is called by the file system or any other
2118 * supplier of backing store to a pager.
2119 * IMPORTANT NOTE: The caller must still respect the relationship
2120 * between the vm_object and its backing memory object. The
2121 * caller MUST NOT substitute changes in the backing file
2122 * without first doing a memory_object_lock_request on the
2123 * target range unless it is know that the pages are not
2124 * shared with another entity at the pager level.
2126 * if a page list structure is present
2127 * return the mapped physical pages, where a
2128 * page is not present, return a non-initialized
2129 * one. If the no_sync bit is turned on, don't
2130 * call the pager unlock to synchronize with other
2131 * possible copies of the page. Leave pages busy
2132 * in the original object, if a page list structure
2133 * was specified. When a commit of the page list
2134 * pages is done, the dirty bit will be set for each one.
2136 * If a page list structure is present, return
2137 * all mapped pages. Where a page does not exist
2138 * map a zero filled one. Leave pages busy in
2139 * the original object. If a page list structure
2140 * is not specified, this call is a no-op.
2142 * Note: access of default pager objects has a rather interesting
2143 * twist. The caller of this routine, presumably the file system
2144 * page cache handling code, will never actually make a request
2145 * against a default pager backed object. Only the default
2146 * pager will make requests on backing store related vm_objects
2147 * In this way the default pager can maintain the relationship
2148 * between backing store files (abstract memory objects) and
2149 * the vm_objects (cache objects), they support.
2153 vm_fault_list_request(
2155 vm_object_offset_t offset
,
2158 upl_page_info_t
**user_page_list_ptr
,
2159 int page_list_count
,
2163 vm_object_offset_t dst_offset
= offset
;
2164 upl_page_info_t
*user_page_list
;
2165 vm_size_t xfer_size
= size
;
2166 boolean_t do_m_lock
= FALSE
;
2170 boolean_t encountered_lrp
= FALSE
;
2172 vm_page_t alias_page
= NULL
;
2174 if(cntrl_flags
& UPL_SET_INTERNAL
)
2175 page_list_count
= MAX_UPL_TRANSFER
;
2176 if(((user_page_list_ptr
|| (cntrl_flags
& UPL_SET_INTERNAL
)) &&
2177 !(object
->private)) && (page_list_count
< (size
/page_size
)))
2178 return KERN_INVALID_ARGUMENT
;
2180 if((!object
->internal
) && (object
->paging_offset
!= 0))
2181 panic("vm_fault_list_request: vnode object with non-zero paging offset\n");
2183 if((cntrl_flags
& UPL_COPYOUT_FROM
) && (upl_ptr
== NULL
)) {
2184 return KERN_SUCCESS
;
2187 if((cntrl_flags
& UPL_SET_INTERNAL
) && !(object
->private)) {
2188 upl
= upl_create(TRUE
);
2189 user_page_list
= (upl_page_info_t
*)
2190 (((vm_offset_t
)upl
) + sizeof(struct upl
));
2191 if(user_page_list_ptr
)
2192 *user_page_list_ptr
= user_page_list
;
2193 upl
->flags
|= UPL_INTERNAL
;
2195 upl
= upl_create(FALSE
);
2196 if(user_page_list_ptr
)
2197 user_page_list
= *user_page_list_ptr
;
2199 user_page_list
= NULL
;
2200 if(object
->private) {
2202 upl
->offset
= offset
;
2204 if(user_page_list
) {
2205 user_page_list
[0].phys_addr
= offset
;
2206 user_page_list
[0].device
= TRUE
;
2208 upl
->flags
= UPL_DEVICE_MEMORY
;
2209 return KERN_SUCCESS
;
2214 upl
->map_object
= vm_object_allocate(size
);
2215 vm_object_lock(upl
->map_object
);
2216 upl
->map_object
->shadow
= object
;
2218 upl
->offset
= offset
+ object
->paging_offset
;
2219 upl
->map_object
->pageout
= TRUE
;
2220 upl
->map_object
->can_persist
= FALSE
;
2221 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2222 upl
->map_object
->shadow_offset
= offset
;
2223 vm_object_unlock(upl
->map_object
);
2226 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2227 vm_object_lock(object
);
2230 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
2231 #endif /* UBC_DEBUG */
2232 vm_object_paging_begin(object
);
2234 if(cntrl_flags
& UPL_COPYOUT_FROM
) {
2235 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
2237 if(alias_page
== NULL
) {
2238 vm_object_unlock(object
);
2239 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2240 vm_object_lock(object
);
2242 if(((dst_page
= vm_page_lookup(object
,
2243 dst_offset
)) == VM_PAGE_NULL
) ||
2244 dst_page
->fictitious
||
2247 (dst_page
->wire_count
!= 0 &&
2248 !dst_page
->pageout
) ||
2249 ((!(dst_page
->dirty
|| dst_page
->precious
||
2250 pmap_is_modified(dst_page
->phys_addr
)))
2251 && (cntrl_flags
& UPL_RET_ONLY_DIRTY
))) {
2253 user_page_list
[entry
].phys_addr
= 0;
2256 if(dst_page
->busy
&&
2257 (!(dst_page
->list_req_pending
&&
2258 dst_page
->pageout
))) {
2259 if(cntrl_flags
& UPL_NOBLOCK
) {
2261 user_page_list
[entry
]
2264 dst_offset
+= PAGE_SIZE_64
;
2265 xfer_size
-= PAGE_SIZE
;
2268 /*someone else is playing with the */
2269 /* page. We will have to wait. */
2271 dst_page
, THREAD_UNINT
);
2272 vm_object_unlock(object
);
2273 thread_block((void(*)(void))0);
2274 vm_object_lock(object
);
2277 /* Someone else already cleaning the page? */
2278 if((dst_page
->cleaning
|| dst_page
->absent
||
2279 dst_page
->prep_pin_count
!= 0 ||
2280 dst_page
->wire_count
!= 0) &&
2281 !dst_page
->list_req_pending
) {
2283 user_page_list
[entry
].phys_addr
= 0;
2285 dst_offset
+= PAGE_SIZE_64
;
2286 xfer_size
-= PAGE_SIZE
;
2289 /* eliminate all mappings from the */
2290 /* original object and its prodigy */
2292 vm_page_lock_queues();
2293 pmap_page_protect(dst_page
->phys_addr
,
2296 /* pageout statistics gathering. count */
2297 /* all the pages we will page out that */
2298 /* were not counted in the initial */
2299 /* vm_pageout_scan work */
2300 if(dst_page
->list_req_pending
)
2301 encountered_lrp
= TRUE
;
2302 if((dst_page
->dirty
||
2303 (dst_page
->object
->internal
&&
2304 dst_page
->precious
)) &&
2305 (dst_page
->list_req_pending
2307 if(encountered_lrp
) {
2309 (pages_at_higher_offsets
++;)
2312 (pages_at_lower_offsets
++;)
2316 /* Turn off busy indication on pending */
2317 /* pageout. Note: we can only get here */
2318 /* in the request pending case. */
2319 dst_page
->list_req_pending
= FALSE
;
2320 dst_page
->busy
= FALSE
;
2321 dst_page
->cleaning
= FALSE
;
2323 dirty
= pmap_is_modified(dst_page
->phys_addr
);
2324 dirty
= dirty
? TRUE
: dst_page
->dirty
;
2326 /* use pageclean setup, it is more convenient */
2327 /* even for the pageout cases here */
2328 vm_pageclean_setup(dst_page
, alias_page
,
2329 upl
->map_object
, size
- xfer_size
);
2332 dst_page
->dirty
= FALSE
;
2333 dst_page
->precious
= TRUE
;
2336 if(dst_page
->pageout
)
2337 dst_page
->busy
= TRUE
;
2339 alias_page
->absent
= FALSE
;
2341 if(!(cntrl_flags
& UPL_CLEAN_IN_PLACE
)) {
2342 /* deny access to the target page */
2343 /* while it is being worked on */
2344 if((!dst_page
->pageout
) &&
2345 (dst_page
->wire_count
== 0)) {
2346 dst_page
->busy
= TRUE
;
2347 dst_page
->pageout
= TRUE
;
2348 vm_page_wire(dst_page
);
2351 if(user_page_list
) {
2352 user_page_list
[entry
].phys_addr
2353 = dst_page
->phys_addr
;
2354 user_page_list
[entry
].dirty
=
2356 user_page_list
[entry
].pageout
=
2358 user_page_list
[entry
].absent
=
2360 user_page_list
[entry
].precious
=
2364 vm_page_unlock_queues();
2367 dst_offset
+= PAGE_SIZE_64
;
2368 xfer_size
-= PAGE_SIZE
;
2372 if(alias_page
== NULL
) {
2373 vm_object_unlock(object
);
2374 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2375 vm_object_lock(object
);
2377 dst_page
= vm_page_lookup(object
, dst_offset
);
2378 if(dst_page
!= VM_PAGE_NULL
) {
2379 if((dst_page
->cleaning
) &&
2380 !(dst_page
->list_req_pending
)) {
2381 /*someone else is writing to the */
2382 /* page. We will have to wait. */
2383 PAGE_ASSERT_WAIT(dst_page
, THREAD_UNINT
);
2384 vm_object_unlock(object
);
2385 thread_block((void(*)(void))0);
2386 vm_object_lock(object
);
2389 if ((dst_page
->fictitious
&&
2390 dst_page
->list_req_pending
)) {
2391 /* dump the fictitious page */
2392 dst_page
->list_req_pending
= FALSE
;
2393 dst_page
->clustered
= FALSE
;
2394 vm_page_lock_queues();
2395 vm_page_free(dst_page
);
2396 vm_page_unlock_queues();
2397 } else if ((dst_page
->absent
&&
2398 dst_page
->list_req_pending
)) {
2399 /* the default_pager case */
2400 dst_page
->list_req_pending
= FALSE
;
2401 dst_page
->busy
= FALSE
;
2402 dst_page
->clustered
= FALSE
;
2405 if((dst_page
= vm_page_lookup(
2406 object
, dst_offset
)) == VM_PAGE_NULL
) {
2407 /* need to allocate a page */
2408 dst_page
= vm_page_alloc(object
, dst_offset
);
2409 if (dst_page
== VM_PAGE_NULL
) {
2410 vm_object_unlock(object
);
2412 vm_object_lock(object
);
2415 dst_page
->busy
= FALSE
;
2417 if(cntrl_flags
& UPL_NO_SYNC
) {
2418 dst_page
->page_lock
= 0;
2419 dst_page
->unlock_request
= 0;
2422 dst_page
->absent
= TRUE
;
2423 object
->absent_count
++;
2426 if(cntrl_flags
& UPL_NO_SYNC
) {
2427 dst_page
->page_lock
= 0;
2428 dst_page
->unlock_request
= 0;
2431 dst_page
->overwriting
= TRUE
;
2432 if(dst_page
->fictitious
) {
2433 panic("need corner case for fictitious page");
2435 if(dst_page
->page_lock
) {
2440 /* eliminate all mappings from the */
2441 /* original object and its prodigy */
2443 if(dst_page
->busy
) {
2444 /*someone else is playing with the */
2445 /* page. We will have to wait. */
2447 dst_page
, THREAD_UNINT
);
2448 vm_object_unlock(object
);
2449 thread_block((void(*)(void))0);
2450 vm_object_lock(object
);
2454 vm_page_lock_queues();
2455 pmap_page_protect(dst_page
->phys_addr
,
2457 dirty
= pmap_is_modified(dst_page
->phys_addr
);
2458 dirty
= dirty
? TRUE
: dst_page
->dirty
;
2460 vm_pageclean_setup(dst_page
, alias_page
,
2461 upl
->map_object
, size
- xfer_size
);
2463 if(cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
2464 /* clean in place for read implies */
2465 /* that a write will be done on all */
2466 /* the pages that are dirty before */
2467 /* a upl commit is done. The caller */
2468 /* is obligated to preserve the */
2469 /* contents of all pages marked */
2471 upl
->flags
|= UPL_CLEAR_DIRTY
;
2475 dst_page
->dirty
= FALSE
;
2476 dst_page
->precious
= TRUE
;
2479 if (dst_page
->wire_count
== 0) {
2480 /* deny access to the target page while */
2481 /* it is being worked on */
2482 dst_page
->busy
= TRUE
;
2484 vm_page_wire(dst_page
);
2486 /* expect the page to be used */
2487 dst_page
->reference
= TRUE
;
2488 dst_page
->precious
=
2489 (cntrl_flags
& UPL_PRECIOUS
)
2491 alias_page
->absent
= FALSE
;
2493 if(user_page_list
) {
2494 user_page_list
[entry
].phys_addr
2495 = dst_page
->phys_addr
;
2496 user_page_list
[entry
].dirty
=
2498 user_page_list
[entry
].pageout
=
2500 user_page_list
[entry
].absent
=
2502 user_page_list
[entry
].precious
=
2505 vm_page_unlock_queues();
2508 dst_offset
+= PAGE_SIZE_64
;
2509 xfer_size
-= PAGE_SIZE
;
2512 if(alias_page
!= NULL
) {
2513 vm_page_lock_queues();
2514 vm_page_free(alias_page
);
2515 vm_page_unlock_queues();
2518 vm_prot_t access_required
;
2519 /* call back all associated pages from other users of the pager */
2520 /* all future updates will be on data which is based on the */
2521 /* changes we are going to make here. Note: it is assumed that */
2522 /* we already hold copies of the data so we will not be seeing */
2523 /* an avalanche of incoming data from the pager */
2524 access_required
= (cntrl_flags
& UPL_COPYOUT_FROM
)
2525 ? VM_PROT_READ
: VM_PROT_WRITE
;
2530 if(!object
->pager_ready
) {
2531 thread
= current_thread();
2532 vm_object_assert_wait(object
,
2533 VM_OBJECT_EVENT_PAGER_READY
, THREAD_UNINT
);
2534 vm_object_unlock(object
);
2535 thread_block((void (*)(void))0);
2536 if (thread
->wait_result
!= THREAD_AWAKENED
) {
2537 return(KERN_FAILURE
);
2539 vm_object_lock(object
);
2543 vm_object_unlock(object
);
2545 if (rc
= memory_object_data_unlock(
2547 object
->pager_request
,
2548 dst_offset
+ object
->paging_offset
,
2551 if (rc
== MACH_SEND_INTERRUPTED
)
2554 return KERN_FAILURE
;
2559 /* lets wait on the last page requested */
2560 /* NOTE: we will have to update lock completed routine to signal */
2561 if(dst_page
!= VM_PAGE_NULL
&&
2562 (access_required
& dst_page
->page_lock
) != access_required
) {
2563 PAGE_ASSERT_WAIT(dst_page
, THREAD_UNINT
);
2564 thread_block((void (*)(void))0);
2565 vm_object_lock(object
);
2568 vm_object_unlock(object
);
2569 return KERN_SUCCESS
;
2574 upl_system_list_request(
2576 vm_object_offset_t offset
,
2578 vm_size_t super_cluster
,
2580 upl_page_info_t
**user_page_list_ptr
,
2581 int page_list_count
,
2584 if(object
->paging_offset
> offset
)
2585 return KERN_FAILURE
;
2586 offset
= offset
- object
->paging_offset
;
2588 /* turns off super cluster exercised by the default_pager */
2590 super_cluster = size;
2592 if ((super_cluster
> size
) &&
2593 (vm_page_free_count
> vm_page_free_reserved
)) {
2595 vm_object_offset_t base_offset
;
2596 vm_size_t super_size
;
2598 base_offset
= (offset
&
2599 ~((vm_object_offset_t
) super_cluster
- 1));
2600 super_size
= (offset
+size
) > (base_offset
+ super_cluster
) ?
2601 super_cluster
<<1 : super_cluster
;
2602 super_size
= ((base_offset
+ super_size
) > object
->size
) ?
2603 (object
->size
- base_offset
) : super_size
;
2604 if(offset
> (base_offset
+ super_size
))
2605 panic("upl_system_list_request: Missed target pageout 0x%x,0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", offset
, base_offset
, super_size
, super_cluster
, size
, object
->paging_offset
);
2606 /* apparently there is a case where the vm requests a */
2607 /* page to be written out who's offset is beyond the */
2609 if((offset
+ size
) > (base_offset
+ super_size
))
2610 super_size
= (offset
+ size
) - base_offset
;
2612 offset
= base_offset
;
2615 vm_fault_list_request(object
, offset
, size
, upl
, user_page_list_ptr
,
2616 page_list_count
, cntrl_flags
);
2624 vm_offset_t
*dst_addr
)
2627 vm_object_offset_t offset
;
2632 /* check to see if already mapped */
2633 if(UPL_PAGE_LIST_MAPPED
& upl
->flags
)
2634 return KERN_FAILURE
;
2636 offset
= 0; /* Always map the entire object */
2639 vm_object_lock(upl
->map_object
);
2640 upl
->map_object
->ref_count
++;
2641 vm_object_res_reference(upl
->map_object
);
2642 vm_object_unlock(upl
->map_object
);
2647 /* NEED A UPL_MAP ALIAS */
2648 kr
= vm_map_enter(map
, dst_addr
, size
, (vm_offset_t
) 0, TRUE
,
2649 upl
->map_object
, offset
, FALSE
,
2650 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
2652 if (kr
!= KERN_SUCCESS
)
2655 for(addr
=*dst_addr
; size
> 0; size
-=PAGE_SIZE
,addr
+=PAGE_SIZE
) {
2656 m
= vm_page_lookup(upl
->map_object
, offset
);
2658 PMAP_ENTER(map
->pmap
, addr
, m
, VM_PROT_ALL
, TRUE
);
2660 offset
+=PAGE_SIZE_64
;
2663 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
2664 upl
->kaddr
= *dst_addr
;
2665 return KERN_SUCCESS
;
2676 if(upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
2678 vm_deallocate(map
, upl
->kaddr
, size
);
2679 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
2680 upl
->kaddr
= (vm_offset_t
) 0;
2681 return KERN_SUCCESS
;
2683 return KERN_FAILURE
;
2688 uc_upl_commit_range(
2693 upl_page_info_t
*page_list
)
2695 vm_size_t xfer_size
= size
;
2696 vm_object_t shadow_object
= upl
->map_object
->shadow
;
2697 vm_object_t object
= upl
->map_object
;
2698 vm_object_offset_t target_offset
;
2699 vm_object_offset_t page_offset
;
2702 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
2704 } else if ((offset
+ size
) > upl
->size
) {
2705 return KERN_FAILURE
;
2708 vm_object_lock(shadow_object
);
2710 entry
= offset
/PAGE_SIZE
;
2711 target_offset
= (vm_object_offset_t
)offset
;
2716 if((t
= vm_page_lookup(object
, target_offset
)) != NULL
) {
2719 page_offset
= t
->offset
;
2722 m
= vm_page_lookup(shadow_object
,
2723 page_offset
+ object
->shadow_offset
);
2724 if(m
!= VM_PAGE_NULL
) {
2725 vm_object_paging_end(shadow_object
);
2726 vm_page_lock_queues();
2727 if ((upl
->flags
& UPL_CLEAR_DIRTY
) ||
2728 (flags
& UPL_COMMIT_CLEAR_DIRTY
)) {
2729 pmap_clear_modify(m
->phys_addr
);
2733 p
= &(page_list
[entry
]);
2734 if(p
->phys_addr
&& p
->pageout
&& !m
->pageout
) {
2738 } else if (page_list
[entry
].phys_addr
&&
2739 !p
->pageout
&& m
->pageout
) {
2742 m
->overwriting
= FALSE
;
2744 PAGE_WAKEUP_DONE(m
);
2746 page_list
[entry
].phys_addr
= 0;
2749 vm_page_laundry_count
--;
2751 if (vm_page_laundry_count
< vm_page_laundry_min
) {
2752 vm_page_laundry_min
= 0;
2753 thread_wakeup((event_t
)
2754 &vm_page_laundry_count
);
2758 m
->cleaning
= FALSE
;
2760 #if MACH_CLUSTER_STATS
2761 if (m
->wanted
) vm_pageout_target_collisions
++;
2763 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
2764 m
->dirty
= pmap_is_modified(m
->phys_addr
);
2767 vm_pageout_target_page_dirtied
++;)
2768 vm_page_unwire(m
);/* reactivates */
2769 VM_STAT(reactivations
++);
2770 PAGE_WAKEUP_DONE(m
);
2771 } else if (m
->prep_pin_count
!= 0) {
2773 if (m
->pin_count
!= 0) {
2774 /* page is pinned; reactivate */
2776 vm_pageout_target_page_pinned
++;)
2777 vm_page_unwire(m
);/* reactivates */
2778 VM_STAT(reactivations
++);
2779 PAGE_WAKEUP_DONE(m
);
2782 * page is prepped but not pinned;
2783 * send it into limbo. Note that
2784 * vm_page_free (which will be
2785 * called after releasing the pin
2786 * lock) knows how to handle a page
2791 vm_pageout_target_page_limbo
++;)
2793 vm_page_pin_unlock();
2798 vm_pageout_target_page_freed
++;)
2799 vm_page_free(m
);/* clears busy, etc. */
2801 vm_page_unlock_queues();
2802 target_offset
+= PAGE_SIZE_64
;
2803 xfer_size
-= PAGE_SIZE
;
2807 if (flags
& UPL_COMMIT_INACTIVATE
) {
2808 vm_page_deactivate(m
);
2809 m
->reference
= FALSE
;
2810 pmap_clear_reference(m
->phys_addr
);
2811 } else if (!m
->active
&& !m
->inactive
) {
2812 if (m
->reference
|| m
->prep_pin_count
!= 0)
2813 vm_page_activate(m
);
2815 vm_page_deactivate(m
);
2817 #if MACH_CLUSTER_STATS
2818 m
->dirty
= pmap_is_modified(m
->phys_addr
);
2820 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
2821 else vm_pageout_cluster_cleaned
++;
2822 if (m
->wanted
) vm_pageout_cluster_collisions
++;
2827 if((m
->busy
) && (m
->cleaning
)) {
2828 /* the request_page_list case */
2831 if(shadow_object
->absent_count
== 1)
2832 vm_object_absent_release(shadow_object
);
2834 shadow_object
->absent_count
--;
2836 m
->overwriting
= FALSE
;
2840 else if (m
->overwriting
) {
2841 /* alternate request page list, write to
2842 /* page_list case. Occurs when the original
2843 /* page was wired at the time of the list
2845 assert(m
->wire_count
!= 0);
2846 vm_page_unwire(m
);/* reactivates */
2847 m
->overwriting
= FALSE
;
2849 m
->cleaning
= FALSE
;
2850 /* It is a part of the semantic of COPYOUT_FROM */
2851 /* UPLs that a commit implies cache sync */
2852 /* between the vm page and the backing store */
2853 /* this can be used to strip the precious bit */
2854 /* as well as clean */
2855 if (upl
->flags
& UPL_PAGE_SYNC_DONE
)
2856 m
->precious
= FALSE
;
2858 if (flags
& UPL_COMMIT_SET_DIRTY
) {
2862 * Wakeup any thread waiting for the page to be un-cleaning.
2865 vm_page_unlock_queues();
2869 target_offset
+= PAGE_SIZE_64
;
2870 xfer_size
-= PAGE_SIZE
;
2874 vm_object_unlock(shadow_object
);
2875 if(flags
& UPL_COMMIT_FREE_ON_EMPTY
) {
2876 if((upl
->flags
& UPL_DEVICE_MEMORY
)
2877 || (queue_empty(&upl
->map_object
->memq
))) {
2881 return KERN_SUCCESS
;
2890 vm_size_t xfer_size
= size
;
2891 vm_object_t shadow_object
= upl
->map_object
->shadow
;
2892 vm_object_t object
= upl
->map_object
;
2893 vm_object_offset_t target_offset
;
2894 vm_object_offset_t page_offset
;
2897 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
2899 } else if ((offset
+ size
) > upl
->size
) {
2900 return KERN_FAILURE
;
2904 vm_object_lock(shadow_object
);
2906 entry
= offset
/PAGE_SIZE
;
2907 target_offset
= (vm_object_offset_t
)offset
;
2912 if((t
= vm_page_lookup(object
, target_offset
)) != NULL
) {
2915 page_offset
= t
->offset
;
2918 m
= vm_page_lookup(shadow_object
,
2919 page_offset
+ object
->shadow_offset
);
2920 if(m
!= VM_PAGE_NULL
) {
2921 vm_object_paging_end(m
->object
);
2922 vm_page_lock_queues();
2924 /* COPYOUT = FALSE case */
2925 /* check for error conditions which must */
2926 /* be passed back to the pages customer */
2927 if(error
& UPL_ABORT_RESTART
) {
2930 vm_object_absent_release(m
->object
);
2931 m
->page_error
= KERN_MEMORY_ERROR
;
2933 } else if(error
& UPL_ABORT_UNAVAILABLE
) {
2936 m
->clustered
= FALSE
;
2937 } else if(error
& UPL_ABORT_ERROR
) {
2940 vm_object_absent_release(m
->object
);
2941 m
->page_error
= KERN_MEMORY_ERROR
;
2943 } else if(error
& UPL_ABORT_DUMP_PAGES
) {
2944 m
->clustered
= TRUE
;
2946 m
->clustered
= TRUE
;
2950 m
->cleaning
= FALSE
;
2951 m
->overwriting
= FALSE
;
2952 PAGE_WAKEUP_DONE(m
);
2956 vm_page_activate(m
);
2959 vm_page_unlock_queues();
2960 target_offset
+= PAGE_SIZE_64
;
2961 xfer_size
-= PAGE_SIZE
;
2966 * Handle the trusted pager throttle.
2969 vm_page_laundry_count
--;
2971 if (vm_page_laundry_count
2972 < vm_page_laundry_min
) {
2973 vm_page_laundry_min
= 0;
2974 thread_wakeup((event_t
)
2975 &vm_page_laundry_count
);
2980 assert(m
->wire_count
== 1);
2984 m
->cleaning
= FALSE
;
2986 m
->overwriting
= FALSE
;
2988 vm_external_state_clr(
2989 m
->object
->existence_map
, m
->offset
);
2990 #endif /* MACH_PAGEMAP */
2991 if(error
& UPL_ABORT_DUMP_PAGES
) {
2993 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
2997 vm_page_unlock_queues();
3000 target_offset
+= PAGE_SIZE_64
;
3001 xfer_size
-= PAGE_SIZE
;
3004 vm_object_unlock(shadow_object
);
3005 if(error
& UPL_ABORT_FREE_ON_EMPTY
) {
3006 if((upl
->flags
& UPL_DEVICE_MEMORY
)
3007 || (queue_empty(&upl
->map_object
->memq
))) {
3011 return KERN_SUCCESS
;
3019 vm_object_t object
= NULL
;
3020 vm_object_t shadow_object
= NULL
;
3021 vm_object_offset_t offset
;
3022 vm_object_offset_t shadow_offset
;
3023 vm_object_offset_t target_offset
;
3027 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
3029 return KERN_SUCCESS
;
3031 object
= upl
->map_object
;
3033 if(object
== NULL
) {
3034 panic("upl_abort: upl object is not backed by an object");
3035 return KERN_INVALID_ARGUMENT
;
3038 shadow_object
= upl
->map_object
->shadow
;
3039 shadow_offset
= upl
->map_object
->shadow_offset
;
3041 vm_object_lock(shadow_object
);
3042 for(i
= 0; i
<(upl
->size
); i
+=PAGE_SIZE
, offset
+= PAGE_SIZE_64
) {
3043 if((t
= vm_page_lookup(object
,offset
)) != NULL
) {
3044 target_offset
= t
->offset
+ shadow_offset
;
3045 if((m
= vm_page_lookup(shadow_object
, target_offset
)) != NULL
) {
3046 vm_object_paging_end(m
->object
);
3047 vm_page_lock_queues();
3049 /* COPYOUT = FALSE case */
3050 /* check for error conditions which must */
3051 /* be passed back to the pages customer */
3052 if(error
& UPL_ABORT_RESTART
) {
3055 vm_object_absent_release(m
->object
);
3056 m
->page_error
= KERN_MEMORY_ERROR
;
3058 } else if(error
& UPL_ABORT_UNAVAILABLE
) {
3061 m
->clustered
= FALSE
;
3062 } else if(error
& UPL_ABORT_ERROR
) {
3065 vm_object_absent_release(m
->object
);
3066 m
->page_error
= KERN_MEMORY_ERROR
;
3068 } else if(error
& UPL_ABORT_DUMP_PAGES
) {
3069 m
->clustered
= TRUE
;
3071 m
->clustered
= TRUE
;
3074 m
->cleaning
= FALSE
;
3075 m
->overwriting
= FALSE
;
3076 PAGE_WAKEUP_DONE(m
);
3080 vm_page_activate(m
);
3082 vm_page_unlock_queues();
3086 * Handle the trusted pager throttle.
3089 vm_page_laundry_count
--;
3091 if (vm_page_laundry_count
3092 < vm_page_laundry_min
) {
3093 vm_page_laundry_min
= 0;
3094 thread_wakeup((event_t
)
3095 &vm_page_laundry_count
);
3100 assert(m
->wire_count
== 1);
3104 m
->cleaning
= FALSE
;
3106 m
->overwriting
= FALSE
;
3108 vm_external_state_clr(
3109 m
->object
->existence_map
, m
->offset
);
3110 #endif /* MACH_PAGEMAP */
3111 if(error
& UPL_ABORT_DUMP_PAGES
) {
3113 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
3117 vm_page_unlock_queues();
3121 vm_object_unlock(shadow_object
);
3122 /* Remove all the pages from the map object so */
3123 /* vm_pageout_object_terminate will work properly. */
3124 while (!queue_empty(&upl
->map_object
->memq
)) {
3127 p
= (vm_page_t
) queue_first(&upl
->map_object
->memq
);
3132 assert(!p
->cleaning
);
3137 return KERN_SUCCESS
;
3140 /* an option on commit should be wire */
3144 upl_page_info_t
*page_list
)
3146 if (upl
->flags
& UPL_DEVICE_MEMORY
)
3148 if ((upl
->flags
& UPL_CLEAR_DIRTY
) ||
3149 (upl
->flags
& UPL_PAGE_SYNC_DONE
)) {
3150 vm_object_t shadow_object
= upl
->map_object
->shadow
;
3151 vm_object_t object
= upl
->map_object
;
3152 vm_object_offset_t target_offset
;
3157 vm_object_lock(shadow_object
);
3159 target_offset
= object
->shadow_offset
;
3160 xfer_end
= upl
->size
+ object
->shadow_offset
;
3162 while(target_offset
< xfer_end
) {
3163 if ((t
= vm_page_lookup(object
,
3164 target_offset
- object
->shadow_offset
))
3167 shadow_object
, target_offset
);
3168 if(m
!= VM_PAGE_NULL
) {
3169 if (upl
->flags
& UPL_CLEAR_DIRTY
) {
3170 pmap_clear_modify(m
->phys_addr
);
3173 /* It is a part of the semantic of */
3174 /* COPYOUT_FROM UPLs that a commit */
3175 /* implies cache sync between the */
3176 /* vm page and the backing store */
3177 /* this can be used to strip the */
3178 /* precious bit as well as clean */
3179 if (upl
->flags
& UPL_PAGE_SYNC_DONE
)
3180 m
->precious
= FALSE
;
3183 target_offset
+= PAGE_SIZE_64
;
3185 vm_object_unlock(shadow_object
);
3188 vm_object_t shadow_object
= upl
->map_object
->shadow
;
3189 vm_object_t object
= upl
->map_object
;
3190 vm_object_offset_t target_offset
;
3197 vm_object_lock(shadow_object
);
3200 target_offset
= object
->shadow_offset
;
3201 xfer_end
= upl
->size
+ object
->shadow_offset
;
3203 while(target_offset
< xfer_end
) {
3205 if ((t
= vm_page_lookup(object
,
3206 target_offset
- object
->shadow_offset
))
3208 target_offset
+= PAGE_SIZE_64
;
3213 m
= vm_page_lookup(shadow_object
, target_offset
);
3214 if(m
!= VM_PAGE_NULL
) {
3215 p
= &(page_list
[entry
]);
3216 if(page_list
[entry
].phys_addr
&&
3217 p
->pageout
&& !m
->pageout
) {
3218 vm_page_lock_queues();
3222 vm_page_unlock_queues();
3223 } else if (page_list
[entry
].phys_addr
&&
3224 !p
->pageout
&& m
->pageout
) {
3225 vm_page_lock_queues();
3228 m
->overwriting
= FALSE
;
3230 PAGE_WAKEUP_DONE(m
);
3231 vm_page_unlock_queues();
3233 page_list
[entry
].phys_addr
= 0;
3235 target_offset
+= PAGE_SIZE_64
;
3239 vm_object_unlock(shadow_object
);
3242 return KERN_SUCCESS
;
3252 upl
= (upl_t
)kalloc(sizeof(struct upl
)
3253 + (sizeof(struct upl_page_info
)*MAX_UPL_TRANSFER
));
3255 upl
= (upl_t
)kalloc(sizeof(struct upl
));
3258 upl
->src_object
= NULL
;
3259 upl
->kaddr
= (vm_offset_t
)0;
3261 upl
->map_object
= NULL
;
3265 upl
->ubc_alias1
= 0;
3266 upl
->ubc_alias2
= 0;
3267 #endif /* UBC_DEBUG */
3279 vm_object_lock(upl
->map_object
->shadow
);
3280 queue_iterate(&upl
->map_object
->shadow
->uplq
,
3281 upl_ele
, upl_t
, uplq
) {
3282 if(upl_ele
== upl
) {
3283 queue_remove(&upl
->map_object
->shadow
->uplq
,
3284 upl_ele
, upl_t
, uplq
);
3288 vm_object_unlock(upl
->map_object
->shadow
);
3290 #endif /* UBC_DEBUG */
3291 if(!(upl
->flags
& UPL_DEVICE_MEMORY
))
3292 vm_object_deallocate(upl
->map_object
);
3293 if(upl
->flags
& UPL_INTERNAL
) {
3294 kfree((vm_offset_t
)upl
,
3295 sizeof(struct upl
) +
3296 (sizeof(struct upl_page_info
) * MAX_UPL_TRANSFER
));
3298 kfree((vm_offset_t
)upl
, sizeof(struct upl
));
3303 upl_get_internal_pagelist_offset()
3305 return sizeof(struct upl
);
3312 upl
->flags
|= UPL_CLEAR_DIRTY
;
3319 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
3324 boolean_t
upl_page_present(upl_page_info_t
*upl
, int index
);
3325 boolean_t
upl_dirty_page(upl_page_info_t
*upl
, int index
);
3326 boolean_t
upl_valid_page(upl_page_info_t
*upl
, int index
);
3327 vm_offset_t
upl_phys_page(upl_page_info_t
*upl
, int index
);
3329 boolean_t
upl_page_present(upl_page_info_t
*upl
, int index
)
3331 return(UPL_PAGE_PRESENT(upl
, index
));
3333 boolean_t
upl_dirty_page(upl_page_info_t
*upl
, int index
)
3335 return(UPL_DIRTY_PAGE(upl
, index
));
3337 boolean_t
upl_valid_page(upl_page_info_t
*upl
, int index
)
3339 return(UPL_VALID_PAGE(upl
, index
));
3341 vm_offset_t
upl_phys_page(upl_page_info_t
*upl
, int index
)
3343 return((vm_offset_t
)UPL_PHYS_PAGE(upl
, index
));
3346 void vm_countdirtypages(void)
3358 vm_page_lock_queues();
3359 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
3361 if (m
==(vm_page_t
)0) break;
3363 if(m
->dirty
) dpages
++;
3364 if(m
->pageout
) pgopages
++;
3365 if(m
->precious
) precpages
++;
3367 m
= (vm_page_t
) queue_next(&m
->pageq
);
3368 if (m
==(vm_page_t
)0) break;
3370 } while (!queue_end(&vm_page_queue_inactive
,(queue_entry_t
) m
));
3371 vm_page_unlock_queues();
3373 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
3379 vm_page_lock_queues();
3380 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
3383 if(m
== (vm_page_t
)0) break;
3384 if(m
->dirty
) dpages
++;
3385 if(m
->pageout
) pgopages
++;
3386 if(m
->precious
) precpages
++;
3388 m
= (vm_page_t
) queue_next(&m
->pageq
);
3389 if(m
== (vm_page_t
)0) break;
3391 } while (!queue_end(&vm_page_queue_active
,(queue_entry_t
) m
));
3392 vm_page_unlock_queues();
3394 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
3397 #endif /* MACH_BSD */
3400 kern_return_t
upl_ubc_alias_set(upl_t upl
, unsigned int alias1
, unsigned int alias2
)
3402 upl
->ubc_alias1
= alias1
;
3403 upl
->ubc_alias2
= alias2
;
3404 return KERN_SUCCESS
;
3406 int upl_ubc_alias_get(upl_t upl
, unsigned int * al
, unsigned int * al2
)
3409 *al
= upl
->ubc_alias1
;
3411 *al2
= upl
->ubc_alias2
;
3412 return KERN_SUCCESS
;
3414 #endif /* UBC_DEBUG */
3419 #include <ddb/db_output.h>
3420 #include <ddb/db_print.h>
3421 #include <vm/vm_print.h>
3423 #define printf kdbprintf
3424 extern int db_indent
;
3425 void db_pageout(void);
3430 extern int vm_page_gobble_count
;
3431 extern int vm_page_limbo_count
, vm_page_limbo_real_count
;
3432 extern int vm_page_pin_count
;
3434 iprintf("VM Statistics:\n");
3436 iprintf("pages:\n");
3438 iprintf("activ %5d inact %5d free %5d",
3439 vm_page_active_count
, vm_page_inactive_count
,
3440 vm_page_free_count
);
3441 printf(" wire %5d gobbl %5d\n",
3442 vm_page_wire_count
, vm_page_gobble_count
);
3443 iprintf("laund %5d limbo %5d lim_r %5d pin %5d\n",
3444 vm_page_laundry_count
, vm_page_limbo_count
,
3445 vm_page_limbo_real_count
, vm_page_pin_count
);
3447 iprintf("target:\n");
3449 iprintf("min %5d inact %5d free %5d",
3450 vm_page_free_min
, vm_page_inactive_target
,
3451 vm_page_free_target
);
3452 printf(" resrv %5d\n", vm_page_free_reserved
);
3455 iprintf("burst:\n");
3457 iprintf("max %5d min %5d wait %5d empty %5d\n",
3458 vm_pageout_burst_max
, vm_pageout_burst_min
,
3459 vm_pageout_burst_wait
, vm_pageout_empty_wait
);
3461 iprintf("pause:\n");
3463 iprintf("count %5d max %5d\n",
3464 vm_pageout_pause_count
, vm_pageout_pause_max
);
3466 iprintf("scan_continue called %8d\n", c_vm_pageout_scan_continue
);
3467 #endif /* MACH_COUNTERS */
3476 extern int c_limbo_page_free
;
3477 extern int c_limbo_convert
;
3479 extern int c_laundry_pages_freed
;
3480 #endif /* MACH_COUNTERS */
3482 iprintf("Pageout Statistics:\n");
3484 iprintf("active %5d inactv %5d\n",
3485 vm_pageout_active
, vm_pageout_inactive
);
3486 iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
3487 vm_pageout_inactive_nolock
, vm_pageout_inactive_avoid
,
3488 vm_pageout_inactive_busy
, vm_pageout_inactive_absent
);
3489 iprintf("used %5d clean %5d dirty %5d\n",
3490 vm_pageout_inactive_used
, vm_pageout_inactive_clean
,
3491 vm_pageout_inactive_dirty
);
3492 iprintf("pinned %5d limbo %5d setup_limbo %5d setup_unprep %5d\n",
3493 vm_pageout_inactive_pinned
, vm_pageout_inactive_limbo
,
3494 vm_pageout_setup_limbo
, vm_pageout_setup_unprepped
);
3495 iprintf("limbo_page_free %5d limbo_convert %5d\n",
3496 c_limbo_page_free
, c_limbo_convert
);
3498 iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed
);
3499 #endif /* MACH_COUNTERS */
3500 #if MACH_CLUSTER_STATS
3501 iprintf("Cluster Statistics:\n");
3503 iprintf("dirtied %5d cleaned %5d collisions %5d\n",
3504 vm_pageout_cluster_dirtied
, vm_pageout_cluster_cleaned
,
3505 vm_pageout_cluster_collisions
);
3506 iprintf("clusters %5d conversions %5d\n",
3507 vm_pageout_cluster_clusters
, vm_pageout_cluster_conversions
);
3509 iprintf("Target Statistics:\n");
3511 iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
3512 vm_pageout_target_collisions
, vm_pageout_target_page_dirtied
,
3513 vm_pageout_target_page_freed
);
3514 iprintf("page_pinned %5d page_limbo %5d\n",
3515 vm_pageout_target_page_pinned
, vm_pageout_target_page_limbo
);
3517 #endif /* MACH_CLUSTER_STATS */
3521 #if MACH_CLUSTER_STATS
3522 unsigned long vm_pageout_cluster_dirtied
= 0;
3523 unsigned long vm_pageout_cluster_cleaned
= 0;
3524 unsigned long vm_pageout_cluster_collisions
= 0;
3525 unsigned long vm_pageout_cluster_clusters
= 0;
3526 unsigned long vm_pageout_cluster_conversions
= 0;
3527 unsigned long vm_pageout_target_collisions
= 0;
3528 unsigned long vm_pageout_target_page_dirtied
= 0;
3529 unsigned long vm_pageout_target_page_freed
= 0;
3530 unsigned long vm_pageout_target_page_pinned
= 0;
3531 unsigned long vm_pageout_target_page_limbo
= 0;
3532 #define CLUSTER_STAT(clause) clause
3533 #else /* MACH_CLUSTER_STATS */
3534 #define CLUSTER_STAT(clause)
3535 #endif /* MACH_CLUSTER_STATS */
3537 #endif /* MACH_KDB */