2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: vm/vm_pageout.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
60 * The proverbial page-out daemon.
63 #include <mach_pagemap.h>
64 #include <mach_cluster_stats.h>
66 #include <advisory_pageout.h>
68 #include <mach/mach_types.h>
69 #include <mach/memory_object.h>
70 #include <mach/memory_object_default.h>
71 #include <mach/memory_object_control_server.h>
72 #include <mach/mach_host_server.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <kern/host_statistics.h>
76 #include <kern/counters.h>
77 #include <kern/thread.h>
80 #include <vm/vm_fault.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <machine/vm_tuning.h>
86 #include <kern/misc_protos.h>
88 extern ipc_port_t memory_manager_default
;
90 #ifndef VM_PAGE_LAUNDRY_MAX
91 #define VM_PAGE_LAUNDRY_MAX 6 /* outstanding DMM page cleans */
92 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
94 #ifndef VM_PAGEOUT_BURST_MAX
95 #define VM_PAGEOUT_BURST_MAX 32 /* simultaneous EMM page cleans */
96 #endif /* VM_PAGEOUT_BURST_MAX */
98 #ifndef VM_PAGEOUT_DISCARD_MAX
99 #define VM_PAGEOUT_DISCARD_MAX 68 /* simultaneous EMM page cleans */
100 #endif /* VM_PAGEOUT_DISCARD_MAX */
102 #ifndef VM_PAGEOUT_BURST_WAIT
103 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
104 #endif /* VM_PAGEOUT_BURST_WAIT */
106 #ifndef VM_PAGEOUT_EMPTY_WAIT
107 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
108 #endif /* VM_PAGEOUT_EMPTY_WAIT */
111 * To obtain a reasonable LRU approximation, the inactive queue
112 * needs to be large enough to give pages on it a chance to be
113 * referenced a second time. This macro defines the fraction
114 * of active+inactive pages that should be inactive.
115 * The pageout daemon uses it to update vm_page_inactive_target.
117 * If vm_page_free_count falls below vm_page_free_target and
118 * vm_page_inactive_count is below vm_page_inactive_target,
119 * then the pageout daemon starts running.
122 #ifndef VM_PAGE_INACTIVE_TARGET
123 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
124 #endif /* VM_PAGE_INACTIVE_TARGET */
127 * Once the pageout daemon starts running, it keeps going
128 * until vm_page_free_count meets or exceeds vm_page_free_target.
131 #ifndef VM_PAGE_FREE_TARGET
132 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
133 #endif /* VM_PAGE_FREE_TARGET */
136 * The pageout daemon always starts running once vm_page_free_count
137 * falls below vm_page_free_min.
140 #ifndef VM_PAGE_FREE_MIN
141 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
142 #endif /* VM_PAGE_FREE_MIN */
145 * When vm_page_free_count falls below vm_page_free_reserved,
146 * only vm-privileged threads can allocate pages. vm-privilege
147 * allows the pageout daemon and default pager (and any other
148 * associated threads needed for default pageout) to continue
149 * operation by dipping into the reserved pool of pages.
152 #ifndef VM_PAGE_FREE_RESERVED
153 #define VM_PAGE_FREE_RESERVED \
154 ((16 * VM_PAGE_LAUNDRY_MAX) + NCPUS)
155 #endif /* VM_PAGE_FREE_RESERVED */
158 * Exported variable used to broadcast the activation of the pageout scan
159 * Working Set uses this to throttle its use of pmap removes. In this
160 * way, code which runs within memory in an uncontested context does
161 * not keep encountering soft faults.
164 unsigned int vm_pageout_scan_event_counter
= 0;
167 * Forward declarations for internal routines.
169 extern void vm_pageout_continue(void);
170 extern void vm_pageout_scan(void);
171 extern void vm_pageout_throttle(vm_page_t m
);
172 extern vm_page_t
vm_pageout_cluster_page(
174 vm_object_offset_t offset
,
175 boolean_t precious_clean
);
177 unsigned int vm_pageout_reserved_internal
= 0;
178 unsigned int vm_pageout_reserved_really
= 0;
180 unsigned int vm_page_laundry_max
= 0; /* # of clusters outstanding */
181 unsigned int vm_page_laundry_min
= 0;
182 unsigned int vm_pageout_burst_max
= 0;
183 unsigned int vm_pageout_burst_wait
= 0; /* milliseconds per page */
184 unsigned int vm_pageout_empty_wait
= 0; /* milliseconds */
185 unsigned int vm_pageout_burst_min
= 0;
186 unsigned int vm_pageout_pause_count
= 0;
187 unsigned int vm_pageout_pause_max
= 0;
188 unsigned int vm_free_page_pause
= 100; /* milliseconds */
191 * Protection against zero fill flushing live working sets derived
192 * from existing backing store and files
194 unsigned int vm_accellerate_zf_pageout_trigger
= 400;
195 unsigned int vm_zf_iterator
;
196 unsigned int vm_zf_iterator_count
= 40;
197 unsigned int last_page_zf
;
198 unsigned int vm_zf_count
= 0;
201 * These variables record the pageout daemon's actions:
202 * how many pages it looks at and what happens to those pages.
203 * No locking needed because only one thread modifies the variables.
206 unsigned int vm_pageout_active
= 0; /* debugging */
207 unsigned int vm_pageout_inactive
= 0; /* debugging */
208 unsigned int vm_pageout_inactive_throttled
= 0; /* debugging */
209 unsigned int vm_pageout_inactive_forced
= 0; /* debugging */
210 unsigned int vm_pageout_inactive_nolock
= 0; /* debugging */
211 unsigned int vm_pageout_inactive_avoid
= 0; /* debugging */
212 unsigned int vm_pageout_inactive_busy
= 0; /* debugging */
213 unsigned int vm_pageout_inactive_absent
= 0; /* debugging */
214 unsigned int vm_pageout_inactive_used
= 0; /* debugging */
215 unsigned int vm_pageout_inactive_clean
= 0; /* debugging */
216 unsigned int vm_pageout_inactive_dirty
= 0; /* debugging */
217 unsigned int vm_pageout_dirty_no_pager
= 0; /* debugging */
218 unsigned int vm_stat_discard
= 0; /* debugging */
219 unsigned int vm_stat_discard_sent
= 0; /* debugging */
220 unsigned int vm_stat_discard_failure
= 0; /* debugging */
221 unsigned int vm_stat_discard_throttle
= 0; /* debugging */
222 unsigned int vm_pageout_scan_active_emm_throttle
= 0; /* debugging */
223 unsigned int vm_pageout_scan_active_emm_throttle_success
= 0; /* debugging */
224 unsigned int vm_pageout_scan_active_emm_throttle_failure
= 0; /* debugging */
225 unsigned int vm_pageout_scan_inactive_emm_throttle
= 0; /* debugging */
226 unsigned int vm_pageout_scan_inactive_emm_throttle_success
= 0; /* debugging */
227 unsigned int vm_pageout_scan_inactive_emm_throttle_failure
= 0; /* debugging */
230 unsigned int vm_pageout_out_of_line
= 0;
231 unsigned int vm_pageout_in_place
= 0;
233 * Routine: vm_pageout_object_allocate
235 * Allocate an object for use as out-of-line memory in a
236 * data_return/data_initialize message.
237 * The page must be in an unlocked object.
239 * If the page belongs to a trusted pager, cleaning in place
240 * will be used, which utilizes a special "pageout object"
241 * containing private alias pages for the real page frames.
242 * Untrusted pagers use normal out-of-line memory.
245 vm_pageout_object_allocate(
248 vm_object_offset_t offset
)
250 vm_object_t object
= m
->object
;
251 vm_object_t new_object
;
253 assert(object
->pager_ready
);
255 if (object
->pager_trusted
|| object
->internal
)
256 vm_pageout_throttle(m
);
258 new_object
= vm_object_allocate(size
);
260 if (object
->pager_trusted
) {
261 assert (offset
< object
->size
);
263 vm_object_lock(new_object
);
264 new_object
->pageout
= TRUE
;
265 new_object
->shadow
= object
;
266 new_object
->can_persist
= FALSE
;
267 new_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
268 new_object
->shadow_offset
= offset
;
269 vm_object_unlock(new_object
);
272 * Take a paging reference on the object. This will be dropped
273 * in vm_pageout_object_terminate()
275 vm_object_lock(object
);
276 vm_object_paging_begin(object
);
277 vm_object_unlock(object
);
279 vm_pageout_in_place
++;
281 vm_pageout_out_of_line
++;
285 #if MACH_CLUSTER_STATS
286 unsigned long vm_pageout_cluster_dirtied
= 0;
287 unsigned long vm_pageout_cluster_cleaned
= 0;
288 unsigned long vm_pageout_cluster_collisions
= 0;
289 unsigned long vm_pageout_cluster_clusters
= 0;
290 unsigned long vm_pageout_cluster_conversions
= 0;
291 unsigned long vm_pageout_target_collisions
= 0;
292 unsigned long vm_pageout_target_page_dirtied
= 0;
293 unsigned long vm_pageout_target_page_freed
= 0;
294 #define CLUSTER_STAT(clause) clause
295 #else /* MACH_CLUSTER_STATS */
296 #define CLUSTER_STAT(clause)
297 #endif /* MACH_CLUSTER_STATS */
300 * Routine: vm_pageout_object_terminate
302 * Destroy the pageout_object allocated by
303 * vm_pageout_object_allocate(), and perform all of the
304 * required cleanup actions.
307 * The object must be locked, and will be returned locked.
310 vm_pageout_object_terminate(
313 vm_object_t shadow_object
;
316 * Deal with the deallocation (last reference) of a pageout object
317 * (used for cleaning-in-place) by dropping the paging references/
318 * freeing pages in the original object.
321 assert(object
->pageout
);
322 shadow_object
= object
->shadow
;
323 vm_object_lock(shadow_object
);
325 while (!queue_empty(&object
->memq
)) {
327 vm_object_offset_t offset
;
329 p
= (vm_page_t
) queue_first(&object
->memq
);
334 assert(!p
->cleaning
);
340 m
= vm_page_lookup(shadow_object
,
341 offset
+ object
->shadow_offset
);
343 if(m
== VM_PAGE_NULL
)
346 /* used as a trigger on upl_commit etc to recognize the */
347 /* pageout daemon's subseqent desire to pageout a cleaning */
348 /* page. When the bit is on the upl commit code will */
349 /* respect the pageout bit in the target page over the */
350 /* caller's page list indication */
351 m
->dump_cleaning
= FALSE
;
354 * Account for the paging reference taken when
355 * m->cleaning was set on this page.
357 vm_object_paging_end(shadow_object
);
358 assert((m
->dirty
) || (m
->precious
) ||
359 (m
->busy
&& m
->cleaning
));
362 * Handle the trusted pager throttle.
364 vm_page_lock_queues();
366 vm_page_laundry_count
--;
368 if (vm_page_laundry_count
< vm_page_laundry_min
) {
369 vm_page_laundry_min
= 0;
370 thread_wakeup((event_t
) &vm_page_laundry_count
);
375 * Handle the "target" page(s). These pages are to be freed if
376 * successfully cleaned. Target pages are always busy, and are
377 * wired exactly once. The initial target pages are not mapped,
378 * (so cannot be referenced or modified) but converted target
379 * pages may have been modified between the selection as an
380 * adjacent page and conversion to a target.
384 assert(m
->wire_count
== 1);
387 #if MACH_CLUSTER_STATS
388 if (m
->wanted
) vm_pageout_target_collisions
++;
391 * Revoke all access to the page. Since the object is
392 * locked, and the page is busy, this prevents the page
393 * from being dirtied after the pmap_is_modified() call
396 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
399 * Since the page is left "dirty" but "not modifed", we
400 * can detect whether the page was redirtied during
401 * pageout by checking the modify state.
403 m
->dirty
= pmap_is_modified(m
->phys_page
);
406 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
407 vm_page_unwire(m
);/* reactivates */
408 VM_STAT(reactivations
++);
411 CLUSTER_STAT(vm_pageout_target_page_freed
++;)
412 vm_page_free(m
);/* clears busy, etc. */
414 vm_page_unlock_queues();
418 * Handle the "adjacent" pages. These pages were cleaned in
419 * place, and should be left alone.
420 * If prep_pin_count is nonzero, then someone is using the
421 * page, so make it active.
423 if (!m
->active
&& !m
->inactive
&& !m
->private) {
427 vm_page_deactivate(m
);
429 if((m
->busy
) && (m
->cleaning
)) {
431 /* the request_page_list case, (COPY_OUT_FROM FALSE) */
434 /* We do not re-set m->dirty ! */
435 /* The page was busy so no extraneous activity */
436 /* could have occured. COPY_INTO is a read into the */
437 /* new pages. CLEAN_IN_PLACE does actually write */
438 /* out the pages but handling outside of this code */
439 /* will take care of resetting dirty. We clear the */
440 /* modify however for the Programmed I/O case. */
441 pmap_clear_modify(m
->phys_page
);
444 if(shadow_object
->absent_count
== 1)
445 vm_object_absent_release(shadow_object
);
447 shadow_object
->absent_count
--;
449 m
->overwriting
= FALSE
;
450 } else if (m
->overwriting
) {
451 /* alternate request page list, write to page_list */
452 /* case. Occurs when the original page was wired */
453 /* at the time of the list request */
454 assert(m
->wire_count
!= 0);
455 vm_page_unwire(m
);/* reactivates */
456 m
->overwriting
= FALSE
;
459 * Set the dirty state according to whether or not the page was
460 * modified during the pageout. Note that we purposefully do
461 * NOT call pmap_clear_modify since the page is still mapped.
462 * If the page were to be dirtied between the 2 calls, this
463 * this fact would be lost. This code is only necessary to
464 * maintain statistics, since the pmap module is always
465 * consulted if m->dirty is false.
467 #if MACH_CLUSTER_STATS
468 m
->dirty
= pmap_is_modified(m
->phys_page
);
470 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
471 else vm_pageout_cluster_cleaned
++;
472 if (m
->wanted
) vm_pageout_cluster_collisions
++;
480 * Wakeup any thread waiting for the page to be un-cleaning.
483 vm_page_unlock_queues();
486 * Account for the paging reference taken in vm_paging_object_allocate.
488 vm_object_paging_end(shadow_object
);
489 vm_object_unlock(shadow_object
);
491 assert(object
->ref_count
== 0);
492 assert(object
->paging_in_progress
== 0);
493 assert(object
->resident_page_count
== 0);
498 * Routine: vm_pageout_setup
500 * Set up a page for pageout (clean & flush).
502 * Move the page to a new object, as part of which it will be
503 * sent to its memory manager in a memory_object_data_write or
504 * memory_object_initialize message.
506 * The "new_object" and "new_offset" arguments
507 * indicate where the page should be moved.
510 * The page in question must not be on any pageout queues,
511 * and must be busy. The object to which it belongs
512 * must be unlocked, and the caller must hold a paging
513 * reference to it. The new_object must not be locked.
515 * This routine returns a pointer to a place-holder page,
516 * inserted at the same offset, to block out-of-order
517 * requests for the page. The place-holder page must
518 * be freed after the data_write or initialize message
521 * The original page is put on a paging queue and marked
526 register vm_page_t m
,
527 register vm_object_t new_object
,
528 vm_object_offset_t new_offset
)
530 register vm_object_t old_object
= m
->object
;
531 vm_object_offset_t paging_offset
;
532 vm_object_offset_t offset
;
533 register vm_page_t holding_page
;
534 register vm_page_t new_m
;
535 register vm_page_t new_page
;
536 boolean_t need_to_wire
= FALSE
;
540 "vm_pageout_setup, obj 0x%X off 0x%X page 0x%X new obj 0x%X offset 0x%X\n",
541 (integer_t
)m
->object
, (integer_t
)m
->offset
,
542 (integer_t
)m
, (integer_t
)new_object
,
543 (integer_t
)new_offset
);
544 assert(m
&& m
->busy
&& !m
->absent
&& !m
->fictitious
&& !m
->error
&&
547 assert(m
->dirty
|| m
->precious
);
550 * Create a place-holder page where the old one was, to prevent
551 * attempted pageins of this page while we're unlocked.
553 VM_PAGE_GRAB_FICTITIOUS(holding_page
);
555 vm_object_lock(old_object
);
558 paging_offset
= offset
+ old_object
->paging_offset
;
560 if (old_object
->pager_trusted
) {
562 * This pager is trusted, so we can clean this page
563 * in place. Leave it in the old object, and mark it
564 * cleaning & pageout.
566 new_m
= holding_page
;
567 holding_page
= VM_PAGE_NULL
;
570 * Set up new page to be private shadow of real page.
572 new_m
->phys_page
= m
->phys_page
;
573 new_m
->fictitious
= FALSE
;
574 new_m
->pageout
= TRUE
;
577 * Mark real page as cleaning (indicating that we hold a
578 * paging reference to be released via m_o_d_r_c) and
579 * pageout (indicating that the page should be freed
580 * when the pageout completes).
582 pmap_clear_modify(m
->phys_page
);
583 vm_page_lock_queues();
584 new_m
->private = TRUE
;
590 assert(m
->wire_count
== 1);
591 vm_page_unlock_queues();
595 m
->page_lock
= VM_PROT_NONE
;
597 m
->unlock_request
= VM_PROT_NONE
;
600 * Cannot clean in place, so rip the old page out of the
601 * object, and stick the holding page in. Set new_m to the
602 * page in the new object.
604 vm_page_lock_queues();
605 VM_PAGE_QUEUES_REMOVE(m
);
608 vm_page_insert(holding_page
, old_object
, offset
);
609 vm_page_unlock_queues();
614 new_m
->page_lock
= VM_PROT_NONE
;
615 new_m
->unlock_request
= VM_PROT_NONE
;
617 if (old_object
->internal
)
621 * Record that this page has been written out
624 vm_external_state_set(old_object
->existence_map
, offset
);
625 #endif /* MACH_PAGEMAP */
627 vm_object_unlock(old_object
);
629 vm_object_lock(new_object
);
632 * Put the page into the new object. If it is a not wired
633 * (if it's the real page) it will be activated.
636 vm_page_lock_queues();
637 vm_page_insert(new_m
, new_object
, new_offset
);
641 vm_page_activate(new_m
);
642 PAGE_WAKEUP_DONE(new_m
);
643 vm_page_unlock_queues();
645 vm_object_unlock(new_object
);
648 * Return the placeholder page to simplify cleanup.
650 return (holding_page
);
654 * Routine: vm_pageclean_setup
656 * Purpose: setup a page to be cleaned (made non-dirty), but not
657 * necessarily flushed from the VM page cache.
658 * This is accomplished by cleaning in place.
660 * The page must not be busy, and the object and page
661 * queues must be locked.
668 vm_object_t new_object
,
669 vm_object_offset_t new_offset
)
671 vm_object_t old_object
= m
->object
;
673 assert(!m
->cleaning
);
676 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
677 (integer_t
)old_object
, m
->offset
, (integer_t
)m
,
678 (integer_t
)new_m
, new_offset
);
680 pmap_clear_modify(m
->phys_page
);
681 vm_object_paging_begin(old_object
);
684 * Record that this page has been written out
687 vm_external_state_set(old_object
->existence_map
, m
->offset
);
688 #endif /*MACH_PAGEMAP*/
691 * Mark original page as cleaning in place.
698 * Convert the fictitious page to a private shadow of
701 assert(new_m
->fictitious
);
702 new_m
->fictitious
= FALSE
;
703 new_m
->private = TRUE
;
704 new_m
->pageout
= TRUE
;
705 new_m
->phys_page
= m
->phys_page
;
708 vm_page_insert(new_m
, new_object
, new_offset
);
709 assert(!new_m
->wanted
);
717 vm_object_t new_object
,
718 vm_object_offset_t new_offset
)
721 "vm_pageclean_copy, page 0x%X new_m 0x%X new_obj 0x%X offset 0x%X\n",
722 m
, new_m
, new_object
, new_offset
, 0);
724 assert((!m
->busy
) && (!m
->cleaning
));
726 assert(!new_m
->private && !new_m
->fictitious
);
728 pmap_clear_modify(m
->phys_page
);
731 vm_object_paging_begin(m
->object
);
732 vm_page_unlock_queues();
733 vm_object_unlock(m
->object
);
736 * Copy the original page to the new page.
738 vm_page_copy(m
, new_m
);
741 * Mark the old page as clean. A request to pmap_is_modified
742 * will get the right answer.
744 vm_object_lock(m
->object
);
747 vm_object_paging_end(m
->object
);
749 vm_page_lock_queues();
750 if (!m
->active
&& !m
->inactive
)
754 vm_page_insert(new_m
, new_object
, new_offset
);
755 vm_page_activate(new_m
);
756 new_m
->busy
= FALSE
; /* No other thread can be waiting */
761 * Routine: vm_pageout_initialize_page
763 * Causes the specified page to be initialized in
764 * the appropriate memory object. This routine is used to push
765 * pages into a copy-object when they are modified in the
768 * The page is moved to a temporary object and paged out.
771 * The page in question must not be on any pageout queues.
772 * The object to which it belongs must be locked.
773 * The page must be busy, but not hold a paging reference.
776 * Move this page to a completely new object.
779 vm_pageout_initialize_page(
783 vm_object_t new_object
;
785 vm_object_offset_t paging_offset
;
786 vm_page_t holding_page
;
790 "vm_pageout_initialize_page, page 0x%X\n",
791 (integer_t
)m
, 0, 0, 0, 0);
795 * Verify that we really want to clean this page
802 * Create a paging reference to let us play with the object.
805 paging_offset
= m
->offset
+ object
->paging_offset
;
806 vm_object_paging_begin(object
);
807 vm_object_unlock(object
);
808 if (m
->absent
|| m
->error
|| m
->restart
||
809 (!m
->dirty
&& !m
->precious
)) {
811 panic("reservation without pageout?"); /* alan */
815 /* set the page for future call to vm_fault_list_request */
817 vm_object_lock(m
->object
);
818 vm_page_lock_queues();
819 pmap_clear_modify(m
->phys_page
);
822 m
->list_req_pending
= TRUE
;
826 vm_page_unlock_queues();
827 vm_object_unlock(m
->object
);
828 vm_pageout_throttle(m
);
831 * Write the data to its pager.
832 * Note that the data is passed by naming the new object,
833 * not a virtual address; the pager interface has been
834 * manipulated to use the "internal memory" data type.
835 * [The object reference from its allocation is donated
836 * to the eventual recipient.]
838 memory_object_data_initialize(object
->pager
,
842 vm_object_lock(object
);
845 #if MACH_CLUSTER_STATS
846 #define MAXCLUSTERPAGES 16
848 unsigned long pages_in_cluster
;
849 unsigned long pages_at_higher_offsets
;
850 unsigned long pages_at_lower_offsets
;
851 } cluster_stats
[MAXCLUSTERPAGES
];
852 #endif /* MACH_CLUSTER_STATS */
854 boolean_t allow_clustered_pageouts
= FALSE
;
857 * vm_pageout_cluster:
859 * Given a page, page it out, and attempt to clean adjacent pages
860 * in the same operation.
862 * The page must be busy, and the object unlocked w/ paging reference
863 * to prevent deallocation or collapse. The page must not be on any
870 vm_object_t object
= m
->object
;
871 vm_object_offset_t offset
= m
->offset
; /* from vm_object start */
872 vm_object_offset_t paging_offset
= m
->offset
+ object
->paging_offset
;
873 vm_object_t new_object
;
874 vm_object_offset_t new_offset
;
875 vm_size_t cluster_size
;
876 vm_object_offset_t cluster_offset
; /* from memory_object start */
877 vm_object_offset_t cluster_lower_bound
; /* from vm_object_start */
878 vm_object_offset_t cluster_upper_bound
; /* from vm_object_start */
879 vm_object_offset_t cluster_start
, cluster_end
;/* from vm_object start */
880 vm_object_offset_t offset_within_cluster
;
881 vm_size_t length_of_data
;
882 vm_page_t
friend, holding_page
;
884 boolean_t precious_clean
= TRUE
;
885 int pages_in_cluster
;
887 CLUSTER_STAT(int pages_at_higher_offsets
= 0;)
888 CLUSTER_STAT(int pages_at_lower_offsets
= 0;)
891 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
892 (integer_t
)object
, offset
, (integer_t
)m
, 0, 0);
894 CLUSTER_STAT(vm_pageout_cluster_clusters
++;)
896 * Only a certain kind of page is appreciated here.
898 assert(m
->busy
&& (m
->dirty
|| m
->precious
) && (m
->wire_count
== 0));
899 assert(!m
->cleaning
&& !m
->pageout
&& !m
->inactive
&& !m
->active
);
901 vm_object_lock(object
);
902 cluster_size
= object
->cluster_size
;
904 assert(cluster_size
>= PAGE_SIZE
);
905 if (cluster_size
< PAGE_SIZE
) cluster_size
= PAGE_SIZE
;
906 assert(object
->pager_created
&& object
->pager_initialized
);
907 assert(object
->internal
|| object
->pager_ready
);
909 if (m
->precious
&& !m
->dirty
)
910 precious_clean
= TRUE
;
912 if (!object
->pager_trusted
|| !allow_clustered_pageouts
)
913 cluster_size
= PAGE_SIZE
;
914 vm_object_unlock(object
);
916 cluster_offset
= paging_offset
& (vm_object_offset_t
)(cluster_size
- 1);
917 /* bytes from beginning of cluster */
919 * Due to unaligned mappings, we have to be careful
920 * of negative offsets into the VM object. Clip the cluster
921 * boundary to the VM object, not the memory object.
923 if (offset
> cluster_offset
) {
924 cluster_lower_bound
= offset
- cluster_offset
;
927 cluster_lower_bound
= 0;
929 cluster_upper_bound
= (offset
- cluster_offset
) +
930 (vm_object_offset_t
)cluster_size
;
932 /* set the page for future call to vm_fault_list_request */
934 vm_object_lock(m
->object
);
935 vm_page_lock_queues();
937 m
->list_req_pending
= TRUE
;
941 vm_page_unlock_queues();
942 vm_object_unlock(m
->object
);
943 vm_pageout_throttle(m
);
946 * Search backward for adjacent eligible pages to clean in
950 cluster_start
= offset
;
951 if (offset
) { /* avoid wrap-around at zero */
952 for (cluster_start
= offset
- PAGE_SIZE_64
;
953 cluster_start
>= cluster_lower_bound
;
954 cluster_start
-= PAGE_SIZE_64
) {
955 assert(cluster_size
> PAGE_SIZE
);
957 vm_object_lock(object
);
958 vm_page_lock_queues();
960 if ((friend = vm_pageout_cluster_page(object
, cluster_start
,
961 precious_clean
)) == VM_PAGE_NULL
) {
962 vm_page_unlock_queues();
963 vm_object_unlock(object
);
966 new_offset
= (cluster_start
+ object
->paging_offset
)
967 & (cluster_size
- 1);
969 assert(new_offset
< cluster_offset
);
970 m
->list_req_pending
= TRUE
;
972 /* do nothing except advance the write request, all we really need to */
973 /* do is push the target page and let the code at the other end decide */
974 /* what is really the right size */
975 if (vm_page_free_count
<= vm_page_free_reserved
) {
981 vm_page_unlock_queues();
982 vm_object_unlock(object
);
983 if(m
->dirty
|| m
->object
->internal
) {
984 CLUSTER_STAT(pages_at_lower_offsets
++;)
988 cluster_start
+= PAGE_SIZE_64
;
990 assert(cluster_start
>= cluster_lower_bound
);
991 assert(cluster_start
<= offset
);
993 * Search forward for adjacent eligible pages to clean in
996 for (cluster_end
= offset
+ PAGE_SIZE_64
;
997 cluster_end
< cluster_upper_bound
;
998 cluster_end
+= PAGE_SIZE_64
) {
999 assert(cluster_size
> PAGE_SIZE
);
1001 vm_object_lock(object
);
1002 vm_page_lock_queues();
1004 if ((friend = vm_pageout_cluster_page(object
, cluster_end
,
1005 precious_clean
)) == VM_PAGE_NULL
) {
1006 vm_page_unlock_queues();
1007 vm_object_unlock(object
);
1010 new_offset
= (cluster_end
+ object
->paging_offset
)
1011 & (cluster_size
- 1);
1013 assert(new_offset
< cluster_size
);
1014 m
->list_req_pending
= TRUE
;
1016 /* do nothing except advance the write request, all we really need to */
1017 /* do is push the target page and let the code at the other end decide */
1018 /* what is really the right size */
1019 if (vm_page_free_count
<= vm_page_free_reserved
) {
1025 vm_page_unlock_queues();
1026 vm_object_unlock(object
);
1028 if(m
->dirty
|| m
->object
->internal
) {
1029 CLUSTER_STAT(pages_at_higher_offsets
++;)
1032 assert(cluster_end
<= cluster_upper_bound
);
1033 assert(cluster_end
>= offset
+ PAGE_SIZE
);
1036 * (offset - cluster_offset) is beginning of cluster_object
1037 * relative to vm_object start.
1039 offset_within_cluster
= cluster_start
- (offset
- cluster_offset
);
1040 length_of_data
= cluster_end
- cluster_start
;
1042 assert(offset_within_cluster
< cluster_size
);
1043 assert((offset_within_cluster
+ length_of_data
) <= cluster_size
);
1046 assert(rc
== KERN_SUCCESS
);
1048 pages_in_cluster
= length_of_data
/PAGE_SIZE
;
1050 #if MACH_CLUSTER_STATS
1051 (cluster_stats
[pages_at_lower_offsets
].pages_at_lower_offsets
)++;
1052 (cluster_stats
[pages_at_higher_offsets
].pages_at_higher_offsets
)++;
1053 (cluster_stats
[pages_in_cluster
].pages_in_cluster
)++;
1054 #endif /* MACH_CLUSTER_STATS */
1057 * Send the data to the pager.
1059 paging_offset
= cluster_start
+ object
->paging_offset
;
1061 rc
= memory_object_data_return(object
->pager
,
1067 vm_object_lock(object
);
1068 vm_object_paging_end(object
);
1071 assert(!object
->pager_trusted
);
1072 VM_PAGE_FREE(holding_page
);
1073 vm_object_paging_end(object
);
1076 vm_object_unlock(object
);
1080 * Trusted pager throttle.
1081 * Object must be unlocked, page queues must be unlocked.
1084 vm_pageout_throttle(
1085 register vm_page_t m
)
1087 vm_page_lock_queues();
1088 assert(!m
->laundry
);
1090 while (vm_page_laundry_count
>= vm_page_laundry_max
) {
1092 * Set the threshold for when vm_page_free()
1093 * should wake us up.
1095 vm_page_laundry_min
= vm_page_laundry_max
/2;
1097 assert_wait((event_t
) &vm_page_laundry_count
, THREAD_UNINT
);
1098 vm_page_unlock_queues();
1101 * Pause to let the default pager catch up.
1103 thread_block((void (*)(void)) 0);
1104 vm_page_lock_queues();
1106 vm_page_laundry_count
++;
1107 vm_page_unlock_queues();
1111 * The global variable vm_pageout_clean_active_pages controls whether
1112 * active pages are considered valid to be cleaned in place during a
1113 * clustered pageout. Performance measurements are necessary to determine
1116 int vm_pageout_clean_active_pages
= 1;
1118 * vm_pageout_cluster_page: [Internal]
1120 * return a vm_page_t to the page at (object,offset) if it is appropriate
1121 * to clean in place. Pages that are non-existent, busy, absent, already
1122 * cleaning, or not dirty are not eligible to be cleaned as an adjacent
1123 * page in a cluster.
1125 * The object must be locked on entry, and remains locked throughout
1130 vm_pageout_cluster_page(
1132 vm_object_offset_t offset
,
1133 boolean_t precious_clean
)
1138 "vm_pageout_cluster_page, object 0x%X offset 0x%X\n",
1139 (integer_t
)object
, offset
, 0, 0, 0);
1141 if ((m
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
)
1142 return(VM_PAGE_NULL
);
1144 if (m
->busy
|| m
->absent
|| m
->cleaning
||
1145 (m
->wire_count
!= 0) || m
->error
)
1146 return(VM_PAGE_NULL
);
1148 if (vm_pageout_clean_active_pages
) {
1149 if (!m
->active
&& !m
->inactive
) return(VM_PAGE_NULL
);
1151 if (!m
->inactive
) return(VM_PAGE_NULL
);
1154 assert(!m
->private);
1155 assert(!m
->fictitious
);
1157 if (!m
->dirty
) m
->dirty
= pmap_is_modified(m
->phys_page
);
1159 if (precious_clean
) {
1160 if (!m
->precious
|| !m
->dirty
)
1161 return(VM_PAGE_NULL
);
1164 return(VM_PAGE_NULL
);
1170 * vm_pageout_scan does the dirty work for the pageout daemon.
1171 * It returns with vm_page_queue_free_lock held and
1172 * vm_page_free_wanted == 0.
1174 extern void vm_pageout_scan_continue(void); /* forward; */
1177 vm_pageout_scan(void)
1179 unsigned int burst_count
;
1180 boolean_t now
= FALSE
;
1181 unsigned int laundry_pages
;
1182 boolean_t need_more_inactive_pages
;
1183 unsigned int loop_detect
;
1185 XPR(XPR_VM_PAGEOUT
, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1188 * We want to gradually dribble pages from the active queue
1189 * to the inactive queue. If we let the inactive queue get
1190 * very small, and then suddenly dump many pages into it,
1191 * those pages won't get a sufficient chance to be referenced
1192 * before we start taking them from the inactive queue.
1194 * We must limit the rate at which we send pages to the pagers.
1195 * data_write messages consume memory, for message buffers and
1196 * for map-copy objects. If we get too far ahead of the pagers,
1197 * we can potentially run out of memory.
1199 * We can use the laundry count to limit directly the number
1200 * of pages outstanding to the default pager. A similar
1201 * strategy for external pagers doesn't work, because
1202 * external pagers don't have to deallocate the pages sent them,
1203 * and because we might have to send pages to external pagers
1204 * even if they aren't processing writes. So we also
1205 * use a burst count to limit writes to external pagers.
1207 * When memory is very tight, we can't rely on external pagers to
1208 * clean pages. They probably aren't running, because they
1209 * aren't vm-privileged. If we kept sending dirty pages to them,
1210 * we could exhaust the free list. However, we can't just ignore
1211 * pages belonging to external objects, because there might be no
1212 * pages belonging to internal objects. Hence, we get the page
1213 * into an internal object and then immediately double-page it,
1214 * sending it to the default pager.
1216 * consider_zone_gc should be last, because the other operations
1217 * might return memory to zones.
1224 mutex_lock(&vm_page_queue_free_lock
);
1225 now
= (vm_page_free_count
< vm_page_free_min
);
1226 mutex_unlock(&vm_page_queue_free_lock
);
1228 swapout_threads(now
);
1229 #endif /* THREAD_SWAPPER */
1232 consider_task_collect();
1233 consider_thread_collect();
1235 consider_machine_collect();
1237 loop_detect
= vm_page_active_count
+ vm_page_inactive_count
;
1239 if (vm_page_free_count
<= vm_page_free_reserved
) {
1240 need_more_inactive_pages
= TRUE
;
1242 need_more_inactive_pages
= FALSE
;
1245 need_more_inactive_pages
= FALSE
;
1248 for (burst_count
= 0;;) {
1249 register vm_page_t m
;
1250 register vm_object_t object
;
1253 * Recalculate vm_page_inactivate_target.
1256 vm_page_lock_queues();
1257 vm_page_inactive_target
=
1258 VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1259 vm_page_inactive_count
);
1262 * Move pages from active to inactive.
1265 while ((vm_page_inactive_count
< vm_page_inactive_target
||
1266 need_more_inactive_pages
) &&
1267 !queue_empty(&vm_page_queue_active
)) {
1268 register vm_object_t object
;
1270 vm_pageout_active
++;
1271 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1274 * If we're getting really low on memory,
1275 * try selecting a page that will go
1276 * directly to the default_pager.
1277 * If there are no such pages, we have to
1278 * page out a page backed by an EMM,
1279 * so that the default_pager can recover
1282 if (need_more_inactive_pages
&&
1283 (IP_VALID(memory_manager_default
))) {
1284 vm_pageout_scan_active_emm_throttle
++;
1286 assert(m
->active
&& !m
->inactive
);
1289 if (vm_object_lock_try(object
)) {
1291 if (object
->pager_trusted
||
1294 vm_pageout_scan_active_emm_throttle_success
++;
1295 goto object_locked_active
;
1298 vm_pageout_scan_active_emm_throttle_success
++;
1299 goto object_locked_active
;
1301 vm_object_unlock(object
);
1303 m
= (vm_page_t
) queue_next(&m
->pageq
);
1304 } while (!queue_end(&vm_page_queue_active
,
1305 (queue_entry_t
) m
));
1306 if (queue_end(&vm_page_queue_active
,
1307 (queue_entry_t
) m
)) {
1308 vm_pageout_scan_active_emm_throttle_failure
++;
1310 queue_first(&vm_page_queue_active
);
1314 assert(m
->active
&& !m
->inactive
);
1317 if (!vm_object_lock_try(object
)) {
1319 * Move page to end and continue.
1322 queue_remove(&vm_page_queue_active
, m
,
1324 queue_enter(&vm_page_queue_active
, m
,
1326 vm_page_unlock_queues();
1329 vm_page_lock_queues();
1333 object_locked_active
:
1335 * If the page is busy, then we pull it
1336 * off the active queue and leave it alone.
1340 vm_object_unlock(object
);
1341 queue_remove(&vm_page_queue_active
, m
,
1345 vm_page_active_count
--;
1350 * Deactivate the page while holding the object
1351 * locked, so we know the page is still not busy.
1352 * This should prevent races between pmap_enter
1353 * and pmap_clear_reference. The page might be
1354 * absent or fictitious, but vm_page_deactivate
1358 vm_page_deactivate(m
);
1359 vm_object_unlock(object
);
1363 * We are done if we have met our target *and*
1364 * nobody is still waiting for a page.
1366 if (vm_page_free_count
>= vm_page_free_target
) {
1367 mutex_lock(&vm_page_queue_free_lock
);
1368 if ((vm_page_free_count
>= vm_page_free_target
) &&
1369 (vm_page_free_wanted
== 0)) {
1370 vm_page_unlock_queues();
1373 mutex_unlock(&vm_page_queue_free_lock
);
1376 * Sometimes we have to pause:
1377 * 1) No inactive pages - nothing to do.
1378 * 2) Flow control - wait for untrusted pagers to catch up.
1381 if ((queue_empty(&vm_page_queue_inactive
) &&
1382 (queue_empty(&vm_page_queue_zf
))) ||
1383 ((--loop_detect
) == 0) ||
1384 (burst_count
>= vm_pageout_burst_max
)) {
1385 unsigned int pages
, msecs
;
1388 consider_machine_adjust();
1390 * vm_pageout_burst_wait is msecs/page.
1391 * If there is nothing for us to do, we wait
1392 * at least vm_pageout_empty_wait msecs.
1394 pages
= burst_count
;
1396 if (loop_detect
== 0) {
1397 printf("Warning: No physical memory suitable for pageout or reclaim, pageout thread temporarily going to sleep\n");
1398 msecs
= vm_free_page_pause
;
1401 msecs
= burst_count
* vm_pageout_burst_wait
;
1404 if (queue_empty(&vm_page_queue_inactive
) &&
1405 queue_empty(&vm_page_queue_zf
) &&
1406 (msecs
< vm_pageout_empty_wait
))
1407 msecs
= vm_pageout_empty_wait
;
1408 vm_page_unlock_queues();
1410 assert_wait_timeout(msecs
, THREAD_INTERRUPTIBLE
);
1411 counter(c_vm_pageout_scan_block
++);
1414 * Unfortunately, we don't have call_continuation
1415 * so we can't rely on tail-recursion.
1417 wait_result
= thread_block((void (*)(void)) 0);
1418 if (wait_result
!= THREAD_TIMED_OUT
)
1419 thread_cancel_timer();
1420 vm_pageout_scan_continue();
1426 vm_pageout_inactive
++;
1428 if (vm_zf_count
< vm_accellerate_zf_pageout_trigger
) {
1432 if((vm_zf_iterator
+=1) >= vm_zf_iterator_count
) {
1436 if(queue_empty(&vm_page_queue_zf
) ||
1437 (((last_page_zf
) || (vm_zf_iterator
== 0)) &&
1438 !queue_empty(&vm_page_queue_inactive
))) {
1439 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1442 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
1446 if ((vm_page_free_count
<= vm_page_free_reserved
) &&
1447 (IP_VALID(memory_manager_default
))) {
1449 * We're really low on memory. Try to select a page that
1450 * would go directly to the default_pager.
1451 * If there are no such pages, we have to page out a
1452 * page backed by an EMM, so that the default_pager
1453 * can recover it eventually.
1455 vm_pageout_scan_inactive_emm_throttle
++;
1457 assert(!m
->active
&& m
->inactive
);
1460 if (vm_object_lock_try(object
)) {
1462 if (object
->pager_trusted
||
1465 vm_pageout_scan_inactive_emm_throttle_success
++;
1466 goto object_locked_inactive
;
1469 vm_pageout_scan_inactive_emm_throttle_success
++;
1470 goto object_locked_inactive
;
1472 vm_object_unlock(object
);
1474 m
= (vm_page_t
) queue_next(&m
->pageq
);
1475 } while ((!queue_end(&vm_page_queue_zf
,
1477 && (!queue_end(&vm_page_queue_inactive
,
1478 (queue_entry_t
) m
)));
1480 if ((queue_end(&vm_page_queue_zf
,
1482 || (queue_end(&vm_page_queue_inactive
,
1483 (queue_entry_t
) m
))) {
1484 vm_pageout_scan_inactive_emm_throttle_failure
++;
1486 * We should check the "active" queue
1487 * for good candidates to page out.
1489 need_more_inactive_pages
= TRUE
;
1491 if(last_page_zf
== 0) {
1493 vm_zf_iterator
= vm_zf_iterator_count
- 1;
1496 vm_zf_iterator
= vm_zf_iterator_count
- 2;
1498 vm_page_unlock_queues();
1503 assert(!m
->active
&& m
->inactive
);
1507 * Try to lock object; since we've got the
1508 * page queues lock, we can only try for this one.
1511 if (!vm_object_lock_try(object
)) {
1513 * Move page to end and continue.
1514 * Don't re-issue ticket
1517 queue_remove(&vm_page_queue_zf
, m
,
1519 queue_enter(&vm_page_queue_zf
, m
,
1522 queue_remove(&vm_page_queue_inactive
, m
,
1524 queue_enter(&vm_page_queue_inactive
, m
,
1527 vm_page_unlock_queues();
1530 vm_pageout_inactive_nolock
++;
1534 object_locked_inactive
:
1536 * Paging out pages of objects which pager is being
1537 * created by another thread must be avoided, because
1538 * this thread may claim for memory, thus leading to a
1539 * possible dead lock between it and the pageout thread
1540 * which will wait for pager creation, if such pages are
1541 * finally chosen. The remaining assumption is that there
1542 * will finally be enough available pages in the inactive
1543 * pool to page out in order to satisfy all memory claimed
1544 * by the thread which concurrently creates the pager.
1547 if (!object
->pager_initialized
&& object
->pager_created
) {
1549 * Move page to end and continue, hoping that
1550 * there will be enough other inactive pages to
1551 * page out so that the thread which currently
1552 * initializes the pager will succeed.
1553 * Don't re-grant the ticket, the page should
1554 * pulled from the queue and paged out whenever
1555 * one of its logically adjacent fellows is
1559 queue_remove(&vm_page_queue_zf
, m
,
1561 queue_enter(&vm_page_queue_zf
, m
,
1564 vm_zf_iterator
= vm_zf_iterator_count
- 1;
1566 queue_remove(&vm_page_queue_inactive
, m
,
1568 queue_enter(&vm_page_queue_inactive
, m
,
1573 vm_page_unlock_queues();
1574 vm_object_unlock(object
);
1575 vm_pageout_inactive_avoid
++;
1580 * Remove the page from the inactive list.
1584 queue_remove(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
1586 queue_remove(&vm_page_queue_inactive
, m
, vm_page_t
, pageq
);
1588 m
->inactive
= FALSE
;
1590 vm_page_inactive_count
--;
1592 if (m
->busy
|| !object
->alive
) {
1594 * Somebody is already playing with this page.
1595 * Leave it off the pageout queues.
1598 vm_page_unlock_queues();
1599 vm_object_unlock(object
);
1600 vm_pageout_inactive_busy
++;
1605 * If it's absent or in error, we can reclaim the page.
1608 if (m
->absent
|| m
->error
) {
1609 vm_pageout_inactive_absent
++;
1612 vm_page_unlock_queues();
1613 vm_object_unlock(object
);
1617 assert(!m
->private);
1618 assert(!m
->fictitious
);
1621 * If already cleaning this page in place, convert from
1622 * "adjacent" to "target". We can leave the page mapped,
1623 * and vm_pageout_object_terminate will determine whether
1624 * to free or reactivate.
1628 #if MACH_CLUSTER_STATS
1629 vm_pageout_cluster_conversions
++;
1633 m
->dump_cleaning
= TRUE
;
1635 vm_object_unlock(object
);
1636 vm_page_unlock_queues();
1641 * If it's being used, reactivate.
1642 * (Fictitious pages are either busy or absent.)
1645 if (m
->reference
|| pmap_is_referenced(m
->phys_page
)) {
1646 vm_pageout_inactive_used
++;
1648 #if ADVISORY_PAGEOUT
1649 if (m
->discard_request
) {
1650 m
->discard_request
= FALSE
;
1652 #endif /* ADVISORY_PAGEOUT */
1654 vm_object_unlock(object
);
1655 vm_page_activate(m
);
1656 VM_STAT(reactivations
++);
1657 vm_page_unlock_queues();
1661 #if ADVISORY_PAGEOUT
1662 if (object
->advisory_pageout
) {
1663 boolean_t do_throttle
;
1664 memory_object_t pager
;
1665 vm_object_offset_t discard_offset
;
1667 if (m
->discard_request
) {
1668 vm_stat_discard_failure
++;
1669 goto mandatory_pageout
;
1672 assert(object
->pager_initialized
);
1673 m
->discard_request
= TRUE
;
1674 pager
= object
->pager
;
1676 /* system-wide throttle */
1677 do_throttle
= (vm_page_free_count
<=
1678 vm_page_free_reserved
);
1682 * JMM - Do we need a replacement throttle
1683 * mechanism for pagers?
1686 /* throttle on this pager */
1687 /* XXX lock ordering ? */
1689 do_throttle
= imq_full(&port
->ip_messages
);
1695 vm_stat_discard_throttle
++;
1697 /* ignore this page and skip to next */
1698 vm_page_unlock_queues();
1699 vm_object_unlock(object
);
1702 /* force mandatory pageout */
1703 goto mandatory_pageout
;
1707 /* proceed with discard_request */
1708 vm_page_activate(m
);
1710 VM_STAT(reactivations
++);
1711 discard_offset
= m
->offset
+ object
->paging_offset
;
1712 vm_stat_discard_sent
++;
1713 vm_page_unlock_queues();
1714 vm_object_unlock(object
);
1717 memory_object_discard_request(object->pager,
1724 #endif /* ADVISORY_PAGEOUT */
1727 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
1728 (integer_t
)object
, (integer_t
)m
->offset
, (integer_t
)m
, 0,0);
1731 * Eliminate all mappings.
1735 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
1738 m
->dirty
= pmap_is_modified(m
->phys_page
);
1740 * If it's clean and not precious, we can free the page.
1743 if (!m
->dirty
&& !m
->precious
) {
1744 vm_pageout_inactive_clean
++;
1747 vm_page_unlock_queues();
1750 * If there is no memory object for the page, create
1751 * one and hand it to the default pager.
1754 if (!object
->pager_initialized
)
1755 vm_object_collapse(object
);
1756 if (!object
->pager_initialized
)
1757 vm_object_pager_create(object
);
1758 if (!object
->pager_initialized
) {
1760 * Still no pager for the object.
1761 * Reactivate the page.
1763 * Should only happen if there is no
1766 vm_page_lock_queues();
1767 vm_page_activate(m
);
1768 vm_page_unlock_queues();
1771 * And we are done with it.
1773 PAGE_WAKEUP_DONE(m
);
1774 vm_object_unlock(object
);
1777 * break here to get back to the preemption
1778 * point in the outer loop so that we don't
1779 * spin forever if there is no default pager.
1781 vm_pageout_dirty_no_pager
++;
1783 * Well there's no pager, but we can still reclaim
1784 * free pages out of the inactive list. Go back
1785 * to top of loop and look for suitable pages.
1790 if ((object
->pager_initialized
) &&
1791 (object
->pager
== MEMORY_OBJECT_NULL
)) {
1793 * This pager has been destroyed by either
1794 * memory_object_destroy or vm_object_destroy, and
1795 * so there is nowhere for the page to go.
1796 * Just free the page.
1799 vm_object_unlock(object
);
1803 vm_pageout_inactive_dirty
++;
1805 if (!object->internal)
1808 vm_object_paging_begin(object
);
1809 vm_object_unlock(object
);
1810 vm_pageout_cluster(m
); /* flush it */
1812 consider_machine_adjust();
1815 counter(unsigned int c_vm_pageout_scan_continue
= 0;)
1818 vm_pageout_scan_continue(void)
1821 * We just paused to let the pagers catch up.
1822 * If vm_page_laundry_count is still high,
1823 * then we aren't waiting long enough.
1824 * If we have paused some vm_pageout_pause_max times without
1825 * adjusting vm_pageout_burst_wait, it might be too big,
1826 * so we decrease it.
1829 vm_page_lock_queues();
1830 counter(++c_vm_pageout_scan_continue
);
1831 if (vm_page_laundry_count
> vm_pageout_burst_min
) {
1832 vm_pageout_burst_wait
++;
1833 vm_pageout_pause_count
= 0;
1834 } else if (++vm_pageout_pause_count
> vm_pageout_pause_max
) {
1835 vm_pageout_burst_wait
= (vm_pageout_burst_wait
* 3) / 4;
1836 if (vm_pageout_burst_wait
< 1)
1837 vm_pageout_burst_wait
= 1;
1838 vm_pageout_pause_count
= 0;
1840 vm_page_unlock_queues();
1843 void vm_page_free_reserve(int pages
);
1844 int vm_page_free_count_init
;
1847 vm_page_free_reserve(
1850 int free_after_reserve
;
1852 vm_page_free_reserved
+= pages
;
1854 free_after_reserve
= vm_page_free_count_init
- vm_page_free_reserved
;
1856 vm_page_free_min
= vm_page_free_reserved
+
1857 VM_PAGE_FREE_MIN(free_after_reserve
);
1859 vm_page_free_target
= vm_page_free_reserved
+
1860 VM_PAGE_FREE_TARGET(free_after_reserve
);
1862 if (vm_page_free_target
< vm_page_free_min
+ 5)
1863 vm_page_free_target
= vm_page_free_min
+ 5;
1867 * vm_pageout is the high level pageout daemon.
1874 thread_t self
= current_thread();
1878 * Set thread privileges.
1880 self
->vm_privilege
= TRUE
;
1881 stack_privilege(self
);
1885 self
->priority
= BASEPRI_PREEMPT
- 1;
1886 set_sched_pri(self
, self
->priority
);
1887 thread_unlock(self
);
1891 * Initialize some paging parameters.
1894 if (vm_page_laundry_max
== 0)
1895 vm_page_laundry_max
= VM_PAGE_LAUNDRY_MAX
;
1897 if (vm_pageout_burst_max
== 0)
1898 vm_pageout_burst_max
= VM_PAGEOUT_BURST_MAX
;
1900 if (vm_pageout_burst_wait
== 0)
1901 vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
1903 if (vm_pageout_empty_wait
== 0)
1904 vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
1906 vm_page_free_count_init
= vm_page_free_count
;
1909 * even if we've already called vm_page_free_reserve
1910 * call it again here to insure that the targets are
1911 * accurately calculated (it uses vm_page_free_count_init)
1912 * calling it with an arg of 0 will not change the reserve
1913 * but will re-calculate free_min and free_target
1915 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED
)
1916 vm_page_free_reserve(VM_PAGE_FREE_RESERVED
- vm_page_free_reserved
);
1918 vm_page_free_reserve(0);
1921 * vm_pageout_scan will set vm_page_inactive_target.
1923 * The pageout daemon is never done, so loop forever.
1924 * We should call vm_pageout_scan at least once each
1925 * time we are woken, even if vm_page_free_wanted is
1926 * zero, to check vm_page_free_target and
1927 * vm_page_inactive_target.
1930 vm_pageout_scan_event_counter
++;
1932 /* we hold vm_page_queue_free_lock now */
1933 assert(vm_page_free_wanted
== 0);
1934 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
1935 mutex_unlock(&vm_page_queue_free_lock
);
1936 counter(c_vm_pageout_block
++);
1937 thread_block((void (*)(void)) 0);
1943 vm_pageout_emergency_availability_request()
1948 vm_page_lock_queues();
1949 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1951 while (!queue_end(&vm_page_queue_inactive
, (queue_entry_t
) m
)) {
1953 m
= (vm_page_t
) queue_next(&m
->pageq
);
1957 m
->dirty
= pmap_is_modified(m
->phys_page
);
1958 if(m
->dirty
|| m
->busy
|| m
->wire_count
|| m
->absent
1959 || m
->precious
|| m
->cleaning
1960 || m
->dump_cleaning
|| m
->error
1961 || m
->pageout
|| m
->laundry
1962 || m
->list_req_pending
1963 || m
->overwriting
) {
1964 m
= (vm_page_t
) queue_next(&m
->pageq
);
1969 if (vm_object_lock_try(object
)) {
1970 if((!object
->alive
) ||
1971 (object
->pageout
)) {
1972 vm_object_unlock(object
);
1973 m
= (vm_page_t
) queue_next(&m
->pageq
);
1977 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
1979 vm_object_unlock(object
);
1980 vm_page_unlock_queues();
1981 return KERN_SUCCESS
;
1983 m
= (vm_page_t
) queue_next(&m
->pageq
);
1986 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1988 while (!queue_end(&vm_page_queue_active
, (queue_entry_t
) m
)) {
1990 m
= (vm_page_t
) queue_next(&m
->pageq
);
1994 m
->dirty
= pmap_is_modified(m
->phys_page
);
1995 if(m
->dirty
|| m
->busy
|| m
->wire_count
|| m
->absent
1996 || m
->precious
|| m
->cleaning
1997 || m
->dump_cleaning
|| m
->error
1998 || m
->pageout
|| m
->laundry
1999 || m
->list_req_pending
2000 || m
->overwriting
) {
2001 m
= (vm_page_t
) queue_next(&m
->pageq
);
2006 if (vm_object_lock_try(object
)) {
2007 if((!object
->alive
) ||
2008 (object
->pageout
)) {
2009 vm_object_unlock(object
);
2010 m
= (vm_page_t
) queue_next(&m
->pageq
);
2014 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
2016 vm_object_unlock(object
);
2017 vm_page_unlock_queues();
2018 return KERN_SUCCESS
;
2020 m
= (vm_page_t
) queue_next(&m
->pageq
);
2022 vm_page_unlock_queues();
2023 return KERN_FAILURE
;
2033 int page_field_size
; /* bit field in word size buf */
2035 page_field_size
= 0;
2036 if (flags
& UPL_CREATE_LITE
) {
2037 page_field_size
= ((size
/PAGE_SIZE
) + 7) >> 3;
2038 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
2040 if(flags
& UPL_CREATE_INTERNAL
) {
2041 upl
= (upl_t
)kalloc(sizeof(struct upl
)
2042 + (sizeof(struct upl_page_info
)*(size
/PAGE_SIZE
))
2045 upl
= (upl_t
)kalloc(sizeof(struct upl
) + page_field_size
);
2048 upl
->src_object
= NULL
;
2049 upl
->kaddr
= (vm_offset_t
)0;
2051 upl
->map_object
= NULL
;
2055 upl
->ubc_alias1
= 0;
2056 upl
->ubc_alias2
= 0;
2057 #endif /* UBC_DEBUG */
2065 int page_field_size
; /* bit field in word size buf */
2071 if (upl
->map_object
->pageout
) {
2072 object
= upl
->map_object
->shadow
;
2074 object
= upl
->map_object
;
2076 vm_object_lock(object
);
2077 queue_iterate(&object
->uplq
, upl_ele
, upl_t
, uplq
) {
2078 if(upl_ele
== upl
) {
2079 queue_remove(&object
->uplq
,
2080 upl_ele
, upl_t
, uplq
);
2084 vm_object_unlock(object
);
2086 #endif /* UBC_DEBUG */
2087 /* drop a reference on the map_object whether or */
2088 /* not a pageout object is inserted */
2089 if(upl
->map_object
->pageout
)
2090 vm_object_deallocate(upl
->map_object
);
2092 page_field_size
= 0;
2093 if (upl
->flags
& UPL_LITE
) {
2094 page_field_size
= ((upl
->size
/PAGE_SIZE
) + 7) >> 3;
2095 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
2097 if(upl
->flags
& UPL_INTERNAL
) {
2098 kfree((vm_offset_t
)upl
,
2099 sizeof(struct upl
) +
2100 (sizeof(struct upl_page_info
) * (upl
->size
/PAGE_SIZE
))
2103 kfree((vm_offset_t
)upl
, sizeof(struct upl
) + page_field_size
);
2107 __private_extern__
void
2111 upl
->ref_count
-= 1;
2112 if(upl
->ref_count
== 0) {
2122 upl
->ref_count
-= 1;
2123 if(upl
->ref_count
== 0) {
2129 * Routine: vm_object_upl_request
2131 * Cause the population of a portion of a vm_object.
2132 * Depending on the nature of the request, the pages
2133 * returned may be contain valid data or be uninitialized.
2134 * A page list structure, listing the physical pages
2135 * will be returned upon request.
2136 * This function is called by the file system or any other
2137 * supplier of backing store to a pager.
2138 * IMPORTANT NOTE: The caller must still respect the relationship
2139 * between the vm_object and its backing memory object. The
2140 * caller MUST NOT substitute changes in the backing file
2141 * without first doing a memory_object_lock_request on the
2142 * target range unless it is know that the pages are not
2143 * shared with another entity at the pager level.
2145 * if a page list structure is present
2146 * return the mapped physical pages, where a
2147 * page is not present, return a non-initialized
2148 * one. If the no_sync bit is turned on, don't
2149 * call the pager unlock to synchronize with other
2150 * possible copies of the page. Leave pages busy
2151 * in the original object, if a page list structure
2152 * was specified. When a commit of the page list
2153 * pages is done, the dirty bit will be set for each one.
2155 * If a page list structure is present, return
2156 * all mapped pages. Where a page does not exist
2157 * map a zero filled one. Leave pages busy in
2158 * the original object. If a page list structure
2159 * is not specified, this call is a no-op.
2161 * Note: access of default pager objects has a rather interesting
2162 * twist. The caller of this routine, presumably the file system
2163 * page cache handling code, will never actually make a request
2164 * against a default pager backed object. Only the default
2165 * pager will make requests on backing store related vm_objects
2166 * In this way the default pager can maintain the relationship
2167 * between backing store files (abstract memory objects) and
2168 * the vm_objects (cache objects), they support.
2171 __private_extern__ kern_return_t
2172 vm_object_upl_request(
2174 vm_object_offset_t offset
,
2177 upl_page_info_array_t user_page_list
,
2178 unsigned int *page_list_count
,
2182 vm_object_offset_t dst_offset
= offset
;
2183 vm_size_t xfer_size
= size
;
2184 boolean_t do_m_lock
= FALSE
;
2188 boolean_t encountered_lrp
= FALSE
;
2190 vm_page_t alias_page
= NULL
;
2192 wpl_array_t lite_list
;
2194 page_ticket
= (cntrl_flags
& UPL_PAGE_TICKET_MASK
)
2195 >> UPL_PAGE_TICKET_SHIFT
;
2197 if(((size
/PAGE_SIZE
) > MAX_UPL_TRANSFER
) && !object
->phys_contiguous
) {
2198 size
= MAX_UPL_TRANSFER
* PAGE_SIZE
;
2201 if(cntrl_flags
& UPL_SET_INTERNAL
)
2202 if(page_list_count
!= NULL
)
2203 *page_list_count
= MAX_UPL_TRANSFER
;
2204 if(((cntrl_flags
& UPL_SET_INTERNAL
) && !(object
->phys_contiguous
)) &&
2205 ((page_list_count
!= NULL
) && (*page_list_count
!= 0)
2206 && *page_list_count
< (size
/page_size
)))
2207 return KERN_INVALID_ARGUMENT
;
2209 if((!object
->internal
) && (object
->paging_offset
!= 0))
2210 panic("vm_object_upl_request: vnode object with non-zero paging offset\n");
2212 if((cntrl_flags
& UPL_COPYOUT_FROM
) && (upl_ptr
== NULL
)) {
2213 return KERN_SUCCESS
;
2217 if(cntrl_flags
& UPL_SET_INTERNAL
) {
2218 if(cntrl_flags
& UPL_SET_LITE
) {
2219 vm_offset_t page_field_size
;
2221 UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
,
2223 user_page_list
= (upl_page_info_t
*)
2224 (((vm_offset_t
)upl
) + sizeof(struct upl
));
2225 lite_list
= (wpl_array_t
)
2226 (((vm_offset_t
)user_page_list
) +
2228 sizeof(upl_page_info_t
)));
2229 page_field_size
= ((size
/PAGE_SIZE
) + 7) >> 3;
2231 (page_field_size
+ 3) & 0xFFFFFFFC;
2232 bzero((char *)lite_list
, page_field_size
);
2234 UPL_LITE
| UPL_INTERNAL
;
2236 upl
= upl_create(UPL_CREATE_INTERNAL
, size
);
2237 user_page_list
= (upl_page_info_t
*)
2239 + sizeof(struct upl
));
2240 upl
->flags
= UPL_INTERNAL
;
2243 if(cntrl_flags
& UPL_SET_LITE
) {
2244 vm_offset_t page_field_size
;
2245 upl
= upl_create(UPL_CREATE_LITE
, size
);
2246 lite_list
= (wpl_array_t
)
2247 (((vm_offset_t
)upl
) + sizeof(struct upl
));
2248 page_field_size
= ((size
/PAGE_SIZE
) + 7) >> 3;
2250 (page_field_size
+ 3) & 0xFFFFFFFC;
2251 bzero((char *)lite_list
, page_field_size
);
2252 upl
->flags
= UPL_LITE
;
2254 upl
= upl_create(UPL_CREATE_EXTERNAL
, size
);
2259 if(object
->phys_contiguous
) {
2261 upl
->offset
= offset
+ object
->paging_offset
;
2263 if(user_page_list
) {
2264 user_page_list
[0].phys_addr
=
2265 (offset
+ object
->shadow_offset
)>>12;
2266 user_page_list
[0].device
= TRUE
;
2268 upl
->map_object
= object
;
2269 /* don't need any shadow mappings for this one */
2270 /* since it is already I/O memory */
2271 upl
->flags
|= UPL_DEVICE_MEMORY
;
2273 vm_object_lock(object
);
2274 vm_object_paging_begin(object
);
2275 vm_object_unlock(object
);
2277 if(page_list_count
!= NULL
) {
2278 if (upl
->flags
& UPL_INTERNAL
) {
2279 *page_list_count
= 0;
2281 *page_list_count
= 1;
2284 return KERN_SUCCESS
;
2287 if(cntrl_flags
& UPL_SET_LITE
) {
2288 upl
->map_object
= object
;
2290 upl
->map_object
= vm_object_allocate(size
);
2291 vm_object_lock(upl
->map_object
);
2292 upl
->map_object
->shadow
= object
;
2293 upl
->map_object
->pageout
= TRUE
;
2294 upl
->map_object
->can_persist
= FALSE
;
2295 upl
->map_object
->copy_strategy
=
2296 MEMORY_OBJECT_COPY_NONE
;
2297 upl
->map_object
->shadow_offset
= offset
;
2298 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
2299 vm_object_unlock(upl
->map_object
);
2302 upl
->offset
= offset
+ object
->paging_offset
;
2305 if (!(cntrl_flags
& UPL_SET_LITE
)) {
2306 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2308 vm_object_lock(object
);
2311 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
2312 #endif /* UBC_DEBUG */
2313 vm_object_paging_begin(object
);
2315 if(cntrl_flags
& UPL_COPYOUT_FROM
) {
2316 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
2318 if((alias_page
== NULL
) &&
2319 !(cntrl_flags
& UPL_SET_LITE
)) {
2320 vm_object_unlock(object
);
2321 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2322 vm_object_lock(object
);
2324 if(((dst_page
= vm_page_lookup(object
,
2325 dst_offset
)) == VM_PAGE_NULL
) ||
2326 dst_page
->fictitious
||
2329 (dst_page
->wire_count
!= 0 &&
2330 !dst_page
->pageout
) ||
2331 ((!(dst_page
->dirty
|| dst_page
->precious
||
2332 pmap_is_modified(dst_page
->phys_page
)))
2333 && (cntrl_flags
& UPL_RET_ONLY_DIRTY
)) ||
2334 ((!(dst_page
->inactive
))
2335 && (dst_page
->page_ticket
!= page_ticket
)
2336 && ((dst_page
->page_ticket
+1) != page_ticket
)
2337 && (cntrl_flags
& UPL_FOR_PAGEOUT
)) ||
2338 ((!dst_page
->list_req_pending
) &&
2339 (cntrl_flags
& UPL_RET_ONLY_DIRTY
) &&
2340 pmap_is_referenced(dst_page
->phys_page
))) {
2341 if(user_page_list
) {
2342 user_page_list
[entry
].phys_addr
= 0;
2343 user_page_list
[entry
].device
= FALSE
;
2347 if(dst_page
->busy
&&
2348 (!(dst_page
->list_req_pending
&&
2349 dst_page
->pageout
))) {
2350 if(cntrl_flags
& UPL_NOBLOCK
) {
2351 if(user_page_list
) {
2352 user_page_list
[entry
].phys_addr
= 0;
2353 user_page_list
[entry
].device
= FALSE
;
2356 dst_offset
+= PAGE_SIZE_64
;
2357 xfer_size
-= PAGE_SIZE
;
2360 /*someone else is playing with the */
2361 /* page. We will have to wait. */
2362 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
2365 /* Someone else already cleaning the page? */
2366 if((dst_page
->cleaning
|| dst_page
->absent
||
2367 dst_page
->wire_count
!= 0) &&
2368 !dst_page
->list_req_pending
) {
2369 if(user_page_list
) {
2370 user_page_list
[entry
].phys_addr
= 0;
2371 user_page_list
[entry
].device
= FALSE
;
2374 dst_offset
+= PAGE_SIZE_64
;
2375 xfer_size
-= PAGE_SIZE
;
2378 /* eliminate all mappings from the */
2379 /* original object and its prodigy */
2381 vm_page_lock_queues();
2382 if( !(cntrl_flags
& UPL_FILE_IO
)) {
2383 pmap_page_protect(dst_page
->phys_page
, VM_PROT_NONE
);
2385 /* pageout statistics gathering. count */
2386 /* all the pages we will page out that */
2387 /* were not counted in the initial */
2388 /* vm_pageout_scan work */
2389 if(dst_page
->list_req_pending
)
2390 encountered_lrp
= TRUE
;
2391 if((dst_page
->dirty
||
2392 (dst_page
->object
->internal
&&
2393 dst_page
->precious
)) &&
2394 (dst_page
->list_req_pending
2396 if(encountered_lrp
) {
2398 (pages_at_higher_offsets
++;)
2401 (pages_at_lower_offsets
++;)
2405 /* Turn off busy indication on pending */
2406 /* pageout. Note: we can only get here */
2407 /* in the request pending case. */
2408 dst_page
->list_req_pending
= FALSE
;
2409 dst_page
->busy
= FALSE
;
2410 dst_page
->cleaning
= FALSE
;
2412 dirty
= pmap_is_modified(dst_page
->phys_page
);
2413 dirty
= dirty
? TRUE
: dst_page
->dirty
;
2415 if(cntrl_flags
& UPL_SET_LITE
) {
2417 pg_num
= (dst_offset
-offset
)/PAGE_SIZE
;
2418 lite_list
[pg_num
>>5] |=
2420 pmap_clear_modify(dst_page
->phys_page
);
2422 * Record that this page has been
2426 vm_external_state_set(
2427 object
->existence_map
,
2429 #endif /*MACH_PAGEMAP*/
2432 * Mark original page as cleaning
2435 dst_page
->cleaning
= TRUE
;
2436 dst_page
->dirty
= TRUE
;
2437 dst_page
->precious
= FALSE
;
2439 /* use pageclean setup, it is more */
2440 /* convenient even for the pageout */
2442 vm_pageclean_setup(dst_page
,
2443 alias_page
, upl
->map_object
,
2446 alias_page
->absent
= FALSE
;
2451 dst_page
->dirty
= FALSE
;
2452 dst_page
->precious
= TRUE
;
2455 if(dst_page
->pageout
)
2456 dst_page
->busy
= TRUE
;
2458 if((!(cntrl_flags
& UPL_CLEAN_IN_PLACE
))
2459 || (cntrl_flags
& UPL_FOR_PAGEOUT
)) {
2460 /* deny access to the target page */
2461 /* while it is being worked on */
2462 if((!dst_page
->pageout
) &&
2463 (dst_page
->wire_count
== 0)) {
2464 dst_page
->busy
= TRUE
;
2465 dst_page
->pageout
= TRUE
;
2466 vm_page_wire(dst_page
);
2469 if(user_page_list
) {
2470 user_page_list
[entry
].phys_addr
2471 = dst_page
->phys_page
;
2472 user_page_list
[entry
].dirty
=
2474 user_page_list
[entry
].pageout
=
2476 user_page_list
[entry
].absent
=
2478 user_page_list
[entry
].precious
=
2480 user_page_list
[entry
].device
=
2484 vm_page_unlock_queues();
2487 dst_offset
+= PAGE_SIZE_64
;
2488 xfer_size
-= PAGE_SIZE
;
2492 if((alias_page
== NULL
) &&
2493 !(cntrl_flags
& UPL_SET_LITE
)) {
2494 vm_object_unlock(object
);
2495 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2496 vm_object_lock(object
);
2498 dst_page
= vm_page_lookup(object
, dst_offset
);
2499 if(dst_page
!= VM_PAGE_NULL
) {
2500 if((cntrl_flags
& UPL_RET_ONLY_ABSENT
) &&
2501 !((dst_page
->list_req_pending
)
2502 && (dst_page
->absent
))) {
2503 /* we are doing extended range */
2504 /* requests. we want to grab */
2505 /* pages around some which are */
2506 /* already present. */
2507 if(user_page_list
) {
2508 user_page_list
[entry
].phys_addr
= 0;
2509 user_page_list
[entry
].device
= FALSE
;
2512 dst_offset
+= PAGE_SIZE_64
;
2513 xfer_size
-= PAGE_SIZE
;
2516 if((dst_page
->cleaning
) &&
2517 !(dst_page
->list_req_pending
)) {
2518 /*someone else is writing to the */
2519 /* page. We will have to wait. */
2520 PAGE_SLEEP(object
,dst_page
,THREAD_UNINT
);
2523 if ((dst_page
->fictitious
&&
2524 dst_page
->list_req_pending
)) {
2525 /* dump the fictitious page */
2526 dst_page
->list_req_pending
= FALSE
;
2527 dst_page
->clustered
= FALSE
;
2528 vm_page_lock_queues();
2529 vm_page_free(dst_page
);
2530 vm_page_unlock_queues();
2531 } else if ((dst_page
->absent
&&
2532 dst_page
->list_req_pending
)) {
2533 /* the default_pager case */
2534 dst_page
->list_req_pending
= FALSE
;
2535 dst_page
->busy
= FALSE
;
2536 dst_page
->clustered
= FALSE
;
2539 if((dst_page
= vm_page_lookup(object
, dst_offset
)) ==
2541 if(object
->private) {
2543 * This is a nasty wrinkle for users
2544 * of upl who encounter device or
2545 * private memory however, it is
2546 * unavoidable, only a fault can
2547 * reslove the actual backing
2548 * physical page by asking the
2551 if(user_page_list
) {
2552 user_page_list
[entry
].phys_addr
= 0;
2553 user_page_list
[entry
].device
= FALSE
;
2556 dst_offset
+= PAGE_SIZE_64
;
2557 xfer_size
-= PAGE_SIZE
;
2560 /* need to allocate a page */
2561 dst_page
= vm_page_alloc(object
, dst_offset
);
2562 if (dst_page
== VM_PAGE_NULL
) {
2563 vm_object_unlock(object
);
2565 vm_object_lock(object
);
2568 dst_page
->busy
= FALSE
;
2570 if(cntrl_flags
& UPL_NO_SYNC
) {
2571 dst_page
->page_lock
= 0;
2572 dst_page
->unlock_request
= 0;
2575 dst_page
->absent
= TRUE
;
2576 object
->absent_count
++;
2579 if(cntrl_flags
& UPL_NO_SYNC
) {
2580 dst_page
->page_lock
= 0;
2581 dst_page
->unlock_request
= 0;
2584 dst_page
->overwriting
= TRUE
;
2585 if(dst_page
->fictitious
) {
2586 panic("need corner case for fictitious page");
2588 if(dst_page
->page_lock
) {
2593 /* eliminate all mappings from the */
2594 /* original object and its prodigy */
2596 if(dst_page
->busy
) {
2597 /*someone else is playing with the */
2598 /* page. We will have to wait. */
2599 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
2603 vm_page_lock_queues();
2604 if( !(cntrl_flags
& UPL_FILE_IO
)) {
2605 pmap_page_protect(dst_page
->phys_page
, VM_PROT_NONE
);
2607 dirty
= pmap_is_modified(dst_page
->phys_page
);
2608 dirty
= dirty
? TRUE
: dst_page
->dirty
;
2610 if(cntrl_flags
& UPL_SET_LITE
) {
2612 pg_num
= (dst_offset
-offset
)/PAGE_SIZE
;
2613 lite_list
[pg_num
>>5] |=
2615 pmap_clear_modify(dst_page
->phys_page
);
2617 * Record that this page has been
2621 vm_external_state_set(
2622 object
->existence_map
,
2624 #endif /*MACH_PAGEMAP*/
2627 * Mark original page as cleaning
2630 dst_page
->cleaning
= TRUE
;
2631 dst_page
->dirty
= TRUE
;
2632 dst_page
->precious
= FALSE
;
2634 /* use pageclean setup, it is more */
2635 /* convenient even for the pageout */
2637 vm_pageclean_setup(dst_page
,
2638 alias_page
, upl
->map_object
,
2641 alias_page
->absent
= FALSE
;
2645 if(cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
2646 /* clean in place for read implies */
2647 /* that a write will be done on all */
2648 /* the pages that are dirty before */
2649 /* a upl commit is done. The caller */
2650 /* is obligated to preserve the */
2651 /* contents of all pages marked */
2653 upl
->flags
|= UPL_CLEAR_DIRTY
;
2657 dst_page
->dirty
= FALSE
;
2658 dst_page
->precious
= TRUE
;
2661 if (dst_page
->wire_count
== 0) {
2662 /* deny access to the target page while */
2663 /* it is being worked on */
2664 dst_page
->busy
= TRUE
;
2666 vm_page_wire(dst_page
);
2668 /* expect the page to be used */
2669 dst_page
->reference
= TRUE
;
2670 dst_page
->precious
=
2671 (cntrl_flags
& UPL_PRECIOUS
)
2673 if(user_page_list
) {
2674 user_page_list
[entry
].phys_addr
2675 = dst_page
->phys_page
;
2676 user_page_list
[entry
].dirty
=
2678 user_page_list
[entry
].pageout
=
2680 user_page_list
[entry
].absent
=
2682 user_page_list
[entry
].precious
=
2684 user_page_list
[entry
].device
=
2687 vm_page_unlock_queues();
2691 dst_offset
+= PAGE_SIZE_64
;
2692 xfer_size
-= PAGE_SIZE
;
2696 if (upl
->flags
& UPL_INTERNAL
) {
2697 if(page_list_count
!= NULL
)
2698 *page_list_count
= 0;
2699 } else if (*page_list_count
> entry
) {
2700 if(page_list_count
!= NULL
)
2701 *page_list_count
= entry
;
2704 if(alias_page
!= NULL
) {
2705 vm_page_lock_queues();
2706 vm_page_free(alias_page
);
2707 vm_page_unlock_queues();
2711 vm_prot_t access_required
;
2712 /* call back all associated pages from other users of the pager */
2713 /* all future updates will be on data which is based on the */
2714 /* changes we are going to make here. Note: it is assumed that */
2715 /* we already hold copies of the data so we will not be seeing */
2716 /* an avalanche of incoming data from the pager */
2717 access_required
= (cntrl_flags
& UPL_COPYOUT_FROM
)
2718 ? VM_PROT_READ
: VM_PROT_WRITE
;
2722 if(!object
->pager_ready
) {
2723 wait_result_t wait_result
;
2725 wait_result
= vm_object_sleep(object
,
2726 VM_OBJECT_EVENT_PAGER_READY
,
2728 if (wait_result
!= THREAD_AWAKENED
) {
2729 vm_object_unlock(object
);
2730 return(KERN_FAILURE
);
2735 vm_object_unlock(object
);
2737 if (rc
= memory_object_data_unlock(
2739 dst_offset
+ object
->paging_offset
,
2742 if (rc
== MACH_SEND_INTERRUPTED
)
2745 return KERN_FAILURE
;
2750 /* lets wait on the last page requested */
2751 /* NOTE: we will have to update lock completed routine to signal */
2752 if(dst_page
!= VM_PAGE_NULL
&&
2753 (access_required
& dst_page
->page_lock
) != access_required
) {
2754 PAGE_ASSERT_WAIT(dst_page
, THREAD_UNINT
);
2755 thread_block((void (*)(void))0);
2756 vm_object_lock(object
);
2759 vm_object_unlock(object
);
2760 return KERN_SUCCESS
;
2763 /* JMM - Backward compatability for now */
2765 vm_fault_list_request(
2766 memory_object_control_t control
,
2767 vm_object_offset_t offset
,
2770 upl_page_info_t
**user_page_list_ptr
,
2771 int page_list_count
,
2774 int local_list_count
;
2775 upl_page_info_t
*user_page_list
;
2778 if (user_page_list_ptr
!= NULL
) {
2779 local_list_count
= page_list_count
;
2780 user_page_list
= *user_page_list_ptr
;
2782 local_list_count
= 0;
2783 user_page_list
= NULL
;
2785 kr
= memory_object_upl_request(control
,
2793 if(kr
!= KERN_SUCCESS
)
2796 if ((user_page_list_ptr
!= NULL
) && (cntrl_flags
& UPL_INTERNAL
)) {
2797 *user_page_list_ptr
= UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr
);
2800 return KERN_SUCCESS
;
2806 * Routine: vm_object_super_upl_request
2808 * Cause the population of a portion of a vm_object
2809 * in much the same way as memory_object_upl_request.
2810 * Depending on the nature of the request, the pages
2811 * returned may be contain valid data or be uninitialized.
2812 * However, the region may be expanded up to the super
2813 * cluster size provided.
2816 __private_extern__ kern_return_t
2817 vm_object_super_upl_request(
2819 vm_object_offset_t offset
,
2821 vm_size_t super_cluster
,
2823 upl_page_info_t
*user_page_list
,
2824 unsigned int *page_list_count
,
2827 vm_page_t target_page
;
2830 if(object
->paging_offset
> offset
)
2831 return KERN_FAILURE
;
2833 offset
= offset
- object
->paging_offset
;
2834 if(cntrl_flags
& UPL_FOR_PAGEOUT
) {
2835 if((target_page
= vm_page_lookup(object
, offset
))
2837 ticket
= target_page
->page_ticket
;
2838 cntrl_flags
= cntrl_flags
& ~(int)UPL_PAGE_TICKET_MASK
;
2839 cntrl_flags
= cntrl_flags
|
2840 ((ticket
<< UPL_PAGE_TICKET_SHIFT
)
2841 & UPL_PAGE_TICKET_MASK
);
2846 /* turns off super cluster exercised by the default_pager */
2848 super_cluster = size;
2850 if ((super_cluster
> size
) &&
2851 (vm_page_free_count
> vm_page_free_reserved
)) {
2853 vm_object_offset_t base_offset
;
2854 vm_size_t super_size
;
2856 base_offset
= (offset
&
2857 ~((vm_object_offset_t
) super_cluster
- 1));
2858 super_size
= (offset
+size
) > (base_offset
+ super_cluster
) ?
2859 super_cluster
<<1 : super_cluster
;
2860 super_size
= ((base_offset
+ super_size
) > object
->size
) ?
2861 (object
->size
- base_offset
) : super_size
;
2862 if(offset
> (base_offset
+ super_size
))
2863 panic("vm_object_super_upl_request: Missed target pageout 0x%x,0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", offset
, base_offset
, super_size
, super_cluster
, size
, object
->paging_offset
);
2864 /* apparently there is a case where the vm requests a */
2865 /* page to be written out who's offset is beyond the */
2867 if((offset
+ size
) > (base_offset
+ super_size
))
2868 super_size
= (offset
+ size
) - base_offset
;
2870 offset
= base_offset
;
2873 vm_object_upl_request(object
, offset
, size
,
2874 upl
, user_page_list
, page_list_count
,
2883 vm_offset_t
*dst_addr
)
2886 vm_object_offset_t offset
;
2891 if (upl
== UPL_NULL
)
2892 return KERN_INVALID_ARGUMENT
;
2896 /* check to see if already mapped */
2897 if(UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
2899 return KERN_FAILURE
;
2902 if((!(upl
->map_object
->pageout
)) &&
2903 !((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_IO_WIRE
)) ||
2904 (upl
->map_object
->phys_contiguous
))) {
2906 vm_page_t alias_page
;
2907 vm_object_offset_t new_offset
;
2909 wpl_array_t lite_list
;
2911 if(upl
->flags
& UPL_INTERNAL
) {
2912 lite_list
= (wpl_array_t
)
2913 ((((vm_offset_t
)upl
) + sizeof(struct upl
))
2914 + ((upl
->size
/PAGE_SIZE
)
2915 * sizeof(upl_page_info_t
)));
2917 lite_list
= (wpl_array_t
)
2918 (((vm_offset_t
)upl
) + sizeof(struct upl
));
2920 object
= upl
->map_object
;
2921 upl
->map_object
= vm_object_allocate(upl
->size
);
2922 vm_object_lock(upl
->map_object
);
2923 upl
->map_object
->shadow
= object
;
2924 upl
->map_object
->pageout
= TRUE
;
2925 upl
->map_object
->can_persist
= FALSE
;
2926 upl
->map_object
->copy_strategy
=
2927 MEMORY_OBJECT_COPY_NONE
;
2928 upl
->map_object
->shadow_offset
=
2929 upl
->offset
- object
->paging_offset
;
2930 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
2931 vm_object_unlock(upl
->map_object
);
2932 offset
= upl
->map_object
->shadow_offset
;
2935 vm_object_lock(object
);
2937 pg_num
= (new_offset
)/PAGE_SIZE
;
2938 if(lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
2939 vm_object_unlock(object
);
2940 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2941 vm_object_lock(object
);
2942 m
= vm_page_lookup(object
, offset
);
2943 if (m
== VM_PAGE_NULL
) {
2944 panic("vm_upl_map: page missing\n");
2947 vm_object_paging_begin(object
);
2950 * Convert the fictitious page to a private
2951 * shadow of the real page.
2953 assert(alias_page
->fictitious
);
2954 alias_page
->fictitious
= FALSE
;
2955 alias_page
->private = TRUE
;
2956 alias_page
->pageout
= TRUE
;
2957 alias_page
->phys_page
= m
->phys_page
;
2958 vm_page_wire(alias_page
);
2960 vm_page_insert(alias_page
,
2961 upl
->map_object
, new_offset
);
2962 assert(!alias_page
->wanted
);
2963 alias_page
->busy
= FALSE
;
2964 alias_page
->absent
= FALSE
;
2968 offset
+= PAGE_SIZE_64
;
2969 new_offset
+= PAGE_SIZE_64
;
2971 vm_object_unlock(object
);
2974 offset
= 0; /* Always map the entire object */
2977 vm_object_lock(upl
->map_object
);
2978 upl
->map_object
->ref_count
++;
2979 vm_object_res_reference(upl
->map_object
);
2980 vm_object_unlock(upl
->map_object
);
2985 /* NEED A UPL_MAP ALIAS */
2986 kr
= vm_map_enter(map
, dst_addr
, size
, (vm_offset_t
) 0, TRUE
,
2987 upl
->map_object
, offset
, FALSE
,
2988 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
2990 if (kr
!= KERN_SUCCESS
) {
2995 for(addr
=*dst_addr
; size
> 0; size
-=PAGE_SIZE
,addr
+=PAGE_SIZE
) {
2996 m
= vm_page_lookup(upl
->map_object
, offset
);
2998 unsigned int cache_attr
;
2999 cache_attr
= ((unsigned int)m
->object
->wimg_bits
) & VM_WIMG_MASK
;
3001 PMAP_ENTER(map
->pmap
, addr
,
3005 offset
+=PAGE_SIZE_64
;
3007 upl
->ref_count
++; /* hold a reference for the mapping */
3008 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
3009 upl
->kaddr
= *dst_addr
;
3011 return KERN_SUCCESS
;
3023 if (upl
== UPL_NULL
)
3024 return KERN_INVALID_ARGUMENT
;
3027 if(upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
3030 assert(upl
->ref_count
> 1);
3031 upl
->ref_count
--; /* removing mapping ref */
3032 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
3033 upl
->kaddr
= (vm_offset_t
) 0;
3036 vm_deallocate(map
, addr
, size
);
3037 return KERN_SUCCESS
;
3040 return KERN_FAILURE
;
3049 upl_page_info_t
*page_list
,
3050 mach_msg_type_number_t count
,
3053 vm_size_t xfer_size
= size
;
3054 vm_object_t shadow_object
;
3055 vm_object_t object
= upl
->map_object
;
3056 vm_object_offset_t target_offset
;
3058 wpl_array_t lite_list
;
3063 if (upl
== UPL_NULL
)
3064 return KERN_INVALID_ARGUMENT
;
3069 if(object
->pageout
) {
3070 shadow_object
= object
->shadow
;
3072 shadow_object
= object
;
3078 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
3080 } else if ((offset
+ size
) > upl
->size
) {
3082 return KERN_FAILURE
;
3085 if(upl
->flags
& UPL_INTERNAL
) {
3086 lite_list
= (wpl_array_t
)
3087 ((((vm_offset_t
)upl
) + sizeof(struct upl
))
3088 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
3090 lite_list
= (wpl_array_t
)
3091 (((vm_offset_t
)upl
) + sizeof(struct upl
));
3094 vm_object_lock(shadow_object
);
3096 entry
= offset
/PAGE_SIZE
;
3097 target_offset
= (vm_object_offset_t
)offset
;
3103 if(upl
->flags
& UPL_LITE
) {
3105 pg_num
= target_offset
/PAGE_SIZE
;
3106 if(lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
3107 lite_list
[pg_num
>>5] &= ~(1 << (pg_num
& 31));
3108 m
= vm_page_lookup(shadow_object
,
3109 target_offset
+ (upl
->offset
-
3110 shadow_object
->paging_offset
));
3113 if(object
->pageout
) {
3114 if ((t
= vm_page_lookup(object
, target_offset
))
3122 object
->shadow_offset
);
3124 if(m
!= VM_PAGE_NULL
)
3125 vm_object_paging_end(m
->object
);
3129 if(m
!= VM_PAGE_NULL
) {
3130 if(upl
->flags
& UPL_IO_WIRE
) {
3131 vm_page_lock_queues();
3133 vm_page_unlock_queues();
3135 page_list
[entry
].phys_addr
= 0;
3137 if (flags
& UPL_COMMIT_SET_DIRTY
) {
3139 } else if ((upl
->flags
& UPL_CLEAR_DIRTY
) ||
3140 (flags
& UPL_COMMIT_CLEAR_DIRTY
)) {
3141 pmap_clear_modify(m
->phys_page
);
3144 if (flags
& UPL_COMMIT_INACTIVATE
) {
3145 vm_page_deactivate(m
);
3146 m
->reference
= FALSE
;
3147 pmap_clear_reference(m
->phys_page
);
3149 target_offset
+= PAGE_SIZE_64
;
3150 xfer_size
-= PAGE_SIZE
;
3154 vm_page_lock_queues();
3155 if ((upl
->flags
& UPL_CLEAR_DIRTY
) ||
3156 (flags
& UPL_COMMIT_CLEAR_DIRTY
)) {
3157 pmap_clear_modify(m
->phys_page
);
3161 p
= &(page_list
[entry
]);
3162 if(p
->phys_addr
&& p
->pageout
&& !m
->pageout
) {
3166 } else if (page_list
[entry
].phys_addr
&&
3167 !p
->pageout
&& m
->pageout
&&
3168 !m
->dump_cleaning
) {
3171 m
->overwriting
= FALSE
;
3173 PAGE_WAKEUP_DONE(m
);
3175 page_list
[entry
].phys_addr
= 0;
3177 m
->dump_cleaning
= FALSE
;
3179 vm_page_laundry_count
--;
3181 if (vm_page_laundry_count
< vm_page_laundry_min
) {
3182 vm_page_laundry_min
= 0;
3183 thread_wakeup((event_t
)
3184 &vm_page_laundry_count
);
3188 m
->cleaning
= FALSE
;
3190 #if MACH_CLUSTER_STATS
3191 if (m
->wanted
) vm_pageout_target_collisions
++;
3193 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
3194 m
->dirty
= pmap_is_modified(m
->phys_page
);
3197 vm_pageout_target_page_dirtied
++;)
3198 vm_page_unwire(m
);/* reactivates */
3199 VM_STAT(reactivations
++);
3200 PAGE_WAKEUP_DONE(m
);
3203 vm_pageout_target_page_freed
++;)
3204 vm_page_free(m
);/* clears busy, etc. */
3205 VM_STAT(pageouts
++);
3207 vm_page_unlock_queues();
3208 target_offset
+= PAGE_SIZE_64
;
3209 xfer_size
-= PAGE_SIZE
;
3213 if (flags
& UPL_COMMIT_INACTIVATE
) {
3214 vm_page_deactivate(m
);
3215 m
->reference
= FALSE
;
3216 pmap_clear_reference(m
->phys_page
);
3217 } else if (!m
->active
&& !m
->inactive
) {
3219 vm_page_activate(m
);
3221 vm_page_deactivate(m
);
3223 #if MACH_CLUSTER_STATS
3224 m
->dirty
= pmap_is_modified(m
->phys_page
);
3226 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
3227 else vm_pageout_cluster_cleaned
++;
3228 if (m
->wanted
) vm_pageout_cluster_collisions
++;
3233 if((m
->busy
) && (m
->cleaning
)) {
3234 /* the request_page_list case */
3237 if(shadow_object
->absent_count
== 1)
3238 vm_object_absent_release(shadow_object
);
3240 shadow_object
->absent_count
--;
3242 m
->overwriting
= FALSE
;
3245 } else if (m
->overwriting
) {
3246 /* alternate request page list, write to
3247 /* page_list case. Occurs when the original
3248 /* page was wired at the time of the list
3250 assert(m
->wire_count
!= 0);
3251 vm_page_unwire(m
);/* reactivates */
3252 m
->overwriting
= FALSE
;
3254 m
->cleaning
= FALSE
;
3255 /* It is a part of the semantic of COPYOUT_FROM */
3256 /* UPLs that a commit implies cache sync */
3257 /* between the vm page and the backing store */
3258 /* this can be used to strip the precious bit */
3259 /* as well as clean */
3260 if (upl
->flags
& UPL_PAGE_SYNC_DONE
)
3261 m
->precious
= FALSE
;
3263 if (flags
& UPL_COMMIT_SET_DIRTY
) {
3267 * Wakeup any thread waiting for the page to be un-cleaning.
3270 vm_page_unlock_queues();
3273 target_offset
+= PAGE_SIZE_64
;
3274 xfer_size
-= PAGE_SIZE
;
3279 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
3281 } else if (upl
->flags
& UPL_LITE
) {
3284 pg_num
= upl
->size
/PAGE_SIZE
;
3285 pg_num
= (pg_num
+ 31) >> 5;
3287 for(i
= 0; i
<pg_num
; i
++) {
3288 if(lite_list
[i
] != 0) {
3294 if(queue_empty(&upl
->map_object
->memq
)) {
3300 if(upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) {
3303 if(object
== shadow_object
)
3304 vm_object_paging_end(shadow_object
);
3306 vm_object_unlock(shadow_object
);
3309 return KERN_SUCCESS
;
3320 vm_size_t xfer_size
= size
;
3321 vm_object_t shadow_object
;
3322 vm_object_t object
= upl
->map_object
;
3323 vm_object_offset_t target_offset
;
3324 vm_object_offset_t page_offset
;
3326 wpl_array_t lite_list
;
3331 if (upl
== UPL_NULL
)
3332 return KERN_INVALID_ARGUMENT
;
3334 if (upl
->flags
& UPL_IO_WIRE
) {
3335 return upl_commit_range(upl
,
3340 if(object
->pageout
) {
3341 shadow_object
= object
->shadow
;
3343 shadow_object
= object
;
3347 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
3349 } else if ((offset
+ size
) > upl
->size
) {
3351 return KERN_FAILURE
;
3354 vm_object_lock(shadow_object
);
3356 if(upl
->flags
& UPL_INTERNAL
) {
3357 lite_list
= (wpl_array_t
)
3358 ((((vm_offset_t
)upl
) + sizeof(struct upl
))
3359 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
3361 lite_list
= (wpl_array_t
)
3362 (((vm_offset_t
)upl
) + sizeof(struct upl
));
3365 entry
= offset
/PAGE_SIZE
;
3366 target_offset
= (vm_object_offset_t
)offset
;
3372 if(upl
->flags
& UPL_LITE
) {
3374 pg_num
= target_offset
/PAGE_SIZE
;
3375 if(lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
3376 lite_list
[pg_num
>>5] &= ~(1 << (pg_num
& 31));
3377 m
= vm_page_lookup(shadow_object
,
3378 target_offset
+ (upl
->offset
-
3379 shadow_object
->paging_offset
));
3382 if(object
->pageout
) {
3383 if ((t
= vm_page_lookup(object
, target_offset
))
3391 object
->shadow_offset
);
3393 if(m
!= VM_PAGE_NULL
)
3394 vm_object_paging_end(m
->object
);
3397 if(m
!= VM_PAGE_NULL
) {
3398 vm_page_lock_queues();
3400 /* COPYOUT = FALSE case */
3401 /* check for error conditions which must */
3402 /* be passed back to the pages customer */
3403 if(error
& UPL_ABORT_RESTART
) {
3406 vm_object_absent_release(m
->object
);
3407 m
->page_error
= KERN_MEMORY_ERROR
;
3409 } else if(error
& UPL_ABORT_UNAVAILABLE
) {
3412 m
->clustered
= FALSE
;
3413 } else if(error
& UPL_ABORT_ERROR
) {
3416 vm_object_absent_release(m
->object
);
3417 m
->page_error
= KERN_MEMORY_ERROR
;
3419 } else if(error
& UPL_ABORT_DUMP_PAGES
) {
3420 m
->clustered
= TRUE
;
3422 m
->clustered
= TRUE
;
3426 m
->cleaning
= FALSE
;
3427 m
->overwriting
= FALSE
;
3428 PAGE_WAKEUP_DONE(m
);
3432 vm_page_activate(m
);
3435 vm_page_unlock_queues();
3436 target_offset
+= PAGE_SIZE_64
;
3437 xfer_size
-= PAGE_SIZE
;
3442 * Handle the trusted pager throttle.
3445 vm_page_laundry_count
--;
3447 if (vm_page_laundry_count
3448 < vm_page_laundry_min
) {
3449 vm_page_laundry_min
= 0;
3450 thread_wakeup((event_t
)
3451 &vm_page_laundry_count
);
3456 assert(m
->wire_count
== 1);
3460 m
->dump_cleaning
= FALSE
;
3461 m
->cleaning
= FALSE
;
3463 m
->overwriting
= FALSE
;
3465 vm_external_state_clr(
3466 m
->object
->existence_map
, m
->offset
);
3467 #endif /* MACH_PAGEMAP */
3468 if(error
& UPL_ABORT_DUMP_PAGES
) {
3470 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
3474 vm_page_unlock_queues();
3476 target_offset
+= PAGE_SIZE_64
;
3477 xfer_size
-= PAGE_SIZE
;
3481 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
3483 } else if (upl
->flags
& UPL_LITE
) {
3486 pg_num
= upl
->size
/PAGE_SIZE
;
3487 pg_num
= (pg_num
+ 31) >> 5;
3489 for(i
= 0; i
<pg_num
; i
++) {
3490 if(lite_list
[i
] != 0) {
3496 if(queue_empty(&upl
->map_object
->memq
)) {
3502 if(upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) {
3505 if(object
== shadow_object
)
3506 vm_object_paging_end(shadow_object
);
3508 vm_object_unlock(shadow_object
);
3510 return KERN_SUCCESS
;
3518 vm_object_t object
= NULL
;
3519 vm_object_t shadow_object
= NULL
;
3520 vm_object_offset_t offset
;
3521 vm_object_offset_t shadow_offset
;
3522 vm_object_offset_t target_offset
;
3524 wpl_array_t lite_list
;
3528 if (upl
== UPL_NULL
)
3529 return KERN_INVALID_ARGUMENT
;
3531 if (upl
->flags
& UPL_IO_WIRE
) {
3533 return upl_commit_range(upl
,
3539 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
3541 return KERN_SUCCESS
;
3544 object
= upl
->map_object
;
3546 if (object
== NULL
) {
3547 panic("upl_abort: upl object is not backed by an object");
3549 return KERN_INVALID_ARGUMENT
;
3552 if(object
->pageout
) {
3553 shadow_object
= object
->shadow
;
3554 shadow_offset
= object
->shadow_offset
;
3556 shadow_object
= object
;
3557 shadow_offset
= upl
->offset
- object
->paging_offset
;
3560 if(upl
->flags
& UPL_INTERNAL
) {
3561 lite_list
= (wpl_array_t
)
3562 ((((vm_offset_t
)upl
) + sizeof(struct upl
))
3563 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
3565 lite_list
= (wpl_array_t
)
3566 (((vm_offset_t
)upl
) + sizeof(struct upl
));
3569 vm_object_lock(shadow_object
);
3570 for(i
= 0; i
<(upl
->size
); i
+=PAGE_SIZE
, offset
+= PAGE_SIZE_64
) {
3572 target_offset
= offset
+ shadow_offset
;
3573 if(upl
->flags
& UPL_LITE
) {
3575 pg_num
= offset
/PAGE_SIZE
;
3576 if(lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
3577 lite_list
[pg_num
>>5] &= ~(1 << (pg_num
& 31));
3579 shadow_object
, target_offset
);
3582 if(object
->pageout
) {
3583 if ((t
= vm_page_lookup(object
, offset
)) != NULL
) {
3588 shadow_object
, target_offset
);
3590 if(m
!= VM_PAGE_NULL
)
3591 vm_object_paging_end(m
->object
);
3594 if(m
!= VM_PAGE_NULL
) {
3595 vm_page_lock_queues();
3597 /* COPYOUT = FALSE case */
3598 /* check for error conditions which must */
3599 /* be passed back to the pages customer */
3600 if(error
& UPL_ABORT_RESTART
) {
3603 vm_object_absent_release(m
->object
);
3604 m
->page_error
= KERN_MEMORY_ERROR
;
3606 } else if(error
& UPL_ABORT_UNAVAILABLE
) {
3609 m
->clustered
= FALSE
;
3610 } else if(error
& UPL_ABORT_ERROR
) {
3613 vm_object_absent_release(m
->object
);
3614 m
->page_error
= KERN_MEMORY_ERROR
;
3616 } else if(error
& UPL_ABORT_DUMP_PAGES
) {
3617 m
->clustered
= TRUE
;
3619 m
->clustered
= TRUE
;
3622 m
->cleaning
= FALSE
;
3623 m
->overwriting
= FALSE
;
3624 PAGE_WAKEUP_DONE(m
);
3628 vm_page_activate(m
);
3630 vm_page_unlock_queues();
3634 * Handle the trusted pager throttle.
3637 vm_page_laundry_count
--;
3639 if (vm_page_laundry_count
3640 < vm_page_laundry_min
) {
3641 vm_page_laundry_min
= 0;
3642 thread_wakeup((event_t
)
3643 &vm_page_laundry_count
);
3648 assert(m
->wire_count
== 1);
3652 m
->dump_cleaning
= FALSE
;
3653 m
->cleaning
= FALSE
;
3655 m
->overwriting
= FALSE
;
3657 vm_external_state_clr(
3658 m
->object
->existence_map
, m
->offset
);
3659 #endif /* MACH_PAGEMAP */
3660 if(error
& UPL_ABORT_DUMP_PAGES
) {
3662 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
3666 vm_page_unlock_queues();
3670 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
3672 } else if (upl
->flags
& UPL_LITE
) {
3675 pg_num
= upl
->size
/PAGE_SIZE
;
3676 pg_num
= (pg_num
+ 31) >> 5;
3678 for(i
= 0; i
<pg_num
; i
++) {
3679 if(lite_list
[i
] != 0) {
3685 if(queue_empty(&upl
->map_object
->memq
)) {
3691 if(object
== shadow_object
)
3692 vm_object_paging_end(shadow_object
);
3694 vm_object_unlock(shadow_object
);
3696 return KERN_SUCCESS
;
3699 /* an option on commit should be wire */
3703 upl_page_info_t
*page_list
,
3704 mach_msg_type_number_t count
)
3706 if (upl
== UPL_NULL
)
3707 return KERN_INVALID_ARGUMENT
;
3709 if(upl
->flags
& (UPL_LITE
| UPL_IO_WIRE
)) {
3711 return upl_commit_range(upl
, 0, upl
->size
, 0,
3712 page_list
, count
, &empty
);
3719 if (upl
->flags
& UPL_DEVICE_MEMORY
)
3722 if ((upl
->flags
& UPL_CLEAR_DIRTY
) ||
3723 (upl
->flags
& UPL_PAGE_SYNC_DONE
) || page_list
) {
3724 vm_object_t shadow_object
= upl
->map_object
->shadow
;
3725 vm_object_t object
= upl
->map_object
;
3726 vm_object_offset_t target_offset
;
3733 vm_object_lock(shadow_object
);
3736 target_offset
= object
->shadow_offset
;
3737 xfer_end
= upl
->size
+ object
->shadow_offset
;
3739 while(target_offset
< xfer_end
) {
3741 if ((t
= vm_page_lookup(object
,
3742 target_offset
- object
->shadow_offset
))
3744 target_offset
+= PAGE_SIZE_64
;
3749 m
= vm_page_lookup(shadow_object
, target_offset
);
3750 if(m
!= VM_PAGE_NULL
) {
3751 if (upl
->flags
& UPL_CLEAR_DIRTY
) {
3752 pmap_clear_modify(m
->phys_page
);
3755 /* It is a part of the semantic of */
3756 /* COPYOUT_FROM UPLs that a commit */
3757 /* implies cache sync between the */
3758 /* vm page and the backing store */
3759 /* this can be used to strip the */
3760 /* precious bit as well as clean */
3761 if (upl
->flags
& UPL_PAGE_SYNC_DONE
)
3762 m
->precious
= FALSE
;
3765 p
= &(page_list
[entry
]);
3766 if(page_list
[entry
].phys_addr
&&
3767 p
->pageout
&& !m
->pageout
) {
3768 vm_page_lock_queues();
3772 vm_page_unlock_queues();
3773 } else if (page_list
[entry
].phys_addr
&&
3774 !p
->pageout
&& m
->pageout
&&
3775 !m
->dump_cleaning
) {
3776 vm_page_lock_queues();
3779 m
->overwriting
= FALSE
;
3781 PAGE_WAKEUP_DONE(m
);
3782 vm_page_unlock_queues();
3784 page_list
[entry
].phys_addr
= 0;
3787 target_offset
+= PAGE_SIZE_64
;
3791 vm_object_unlock(shadow_object
);
3793 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
3794 vm_object_lock(upl
->map_object
->shadow
);
3795 if(upl
->map_object
== upl
->map_object
->shadow
)
3796 vm_object_paging_end(upl
->map_object
->shadow
);
3797 vm_object_unlock(upl
->map_object
->shadow
);
3800 return KERN_SUCCESS
;
3806 vm_object_iopl_request(
3808 vm_object_offset_t offset
,
3811 upl_page_info_array_t user_page_list
,
3812 unsigned int *page_list_count
,
3816 vm_object_offset_t dst_offset
= offset
;
3817 vm_size_t xfer_size
= size
;
3820 wpl_array_t lite_list
;
3821 int page_field_size
;
3823 vm_page_t alias_page
= NULL
;
3828 if(cntrl_flags
& UPL_COPYOUT_FROM
) {
3829 prot
= VM_PROT_READ
;
3831 prot
= VM_PROT_READ
| VM_PROT_WRITE
;
3834 if(((size
/page_size
) > MAX_UPL_TRANSFER
) && !object
->phys_contiguous
) {
3835 size
= MAX_UPL_TRANSFER
* page_size
;
3838 if(cntrl_flags
& UPL_SET_INTERNAL
)
3839 if(page_list_count
!= NULL
)
3840 *page_list_count
= MAX_UPL_TRANSFER
;
3841 if(((cntrl_flags
& UPL_SET_INTERNAL
) && !(object
->phys_contiguous
)) &&
3842 ((page_list_count
!= NULL
) && (*page_list_count
!= 0)
3843 && *page_list_count
< (size
/page_size
)))
3844 return KERN_INVALID_ARGUMENT
;
3846 if((!object
->internal
) && (object
->paging_offset
!= 0))
3847 panic("vm_object_upl_request: vnode object with non-zero paging offset\n");
3849 if(object
->phys_contiguous
) {
3850 /* No paging operations are possible against this memory */
3851 /* and so no need for map object, ever */
3852 cntrl_flags
|= UPL_SET_LITE
;
3856 if(cntrl_flags
& UPL_SET_INTERNAL
) {
3857 if(cntrl_flags
& UPL_SET_LITE
) {
3859 UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
,
3861 user_page_list
= (upl_page_info_t
*)
3862 (((vm_offset_t
)upl
) + sizeof(struct upl
));
3863 lite_list
= (wpl_array_t
)
3864 (((vm_offset_t
)user_page_list
) +
3866 sizeof(upl_page_info_t
)));
3867 page_field_size
= ((size
/PAGE_SIZE
) + 7) >> 3;
3869 (page_field_size
+ 3) & 0xFFFFFFFC;
3870 bzero((char *)lite_list
, page_field_size
);
3872 UPL_LITE
| UPL_INTERNAL
| UPL_IO_WIRE
;
3874 upl
= upl_create(UPL_CREATE_INTERNAL
, size
);
3875 user_page_list
= (upl_page_info_t
*)
3877 + sizeof(struct upl
));
3878 upl
->flags
= UPL_INTERNAL
| UPL_IO_WIRE
;
3881 if(cntrl_flags
& UPL_SET_LITE
) {
3882 upl
= upl_create(UPL_CREATE_LITE
, size
);
3883 lite_list
= (wpl_array_t
)
3884 (((vm_offset_t
)upl
) + sizeof(struct upl
));
3885 page_field_size
= ((size
/PAGE_SIZE
) + 7) >> 3;
3887 (page_field_size
+ 3) & 0xFFFFFFFC;
3888 bzero((char *)lite_list
, page_field_size
);
3889 upl
->flags
= UPL_LITE
| UPL_IO_WIRE
;
3891 upl
= upl_create(UPL_CREATE_EXTERNAL
, size
);
3892 upl
->flags
= UPL_IO_WIRE
;
3896 if(object
->phys_contiguous
) {
3898 upl
->offset
= offset
+ object
->paging_offset
;
3900 if(user_page_list
) {
3901 user_page_list
[0].phys_addr
=
3902 (offset
+ object
->shadow_offset
)>>12;
3903 user_page_list
[0].device
= TRUE
;
3905 upl
->map_object
= object
;
3906 /* don't need any shadow mappings for this one */
3907 /* since it is already I/O memory */
3908 upl
->flags
|= UPL_DEVICE_MEMORY
;
3910 vm_object_lock(object
);
3911 vm_object_paging_begin(object
);
3912 vm_object_unlock(object
);
3914 if(page_list_count
!= NULL
) {
3915 if (upl
->flags
& UPL_INTERNAL
) {
3916 *page_list_count
= 0;
3918 *page_list_count
= 1;
3921 return KERN_SUCCESS
;
3925 if(cntrl_flags
& UPL_SET_LITE
) {
3926 upl
->map_object
= object
;
3928 upl
->map_object
= vm_object_allocate(size
);
3929 vm_object_lock(upl
->map_object
);
3930 upl
->map_object
->shadow
= object
;
3931 upl
->map_object
->pageout
= TRUE
;
3932 upl
->map_object
->can_persist
= FALSE
;
3933 upl
->map_object
->copy_strategy
=
3934 MEMORY_OBJECT_COPY_NONE
;
3935 upl
->map_object
->shadow_offset
= offset
;
3936 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
3937 vm_object_unlock(upl
->map_object
);
3940 upl
->offset
= offset
+ object
->paging_offset
;
3943 vm_object_lock(object
);
3945 if (!object
->phys_contiguous
) {
3946 /* Protect user space from future COW operations */
3947 object
->true_share
= TRUE
;
3948 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
3949 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
3954 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
3955 #endif /* UBC_DEBUG */
3956 vm_object_paging_begin(object
);
3959 if((alias_page
== NULL
) && !(cntrl_flags
& UPL_SET_LITE
)) {
3960 vm_object_unlock(object
);
3961 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
3962 vm_object_lock(object
);
3964 dst_page
= vm_page_lookup(object
, dst_offset
);
3965 if ((dst_page
== VM_PAGE_NULL
) || (dst_page
->busy
) ||
3966 (dst_page
->unusual
&& (dst_page
->error
||
3967 dst_page
->restart
|| dst_page
->absent
||
3968 dst_page
->fictitious
||
3969 prot
& dst_page
->page_lock
))) {
3970 vm_fault_return_t result
;
3973 kern_return_t error_code
;
3976 vm_object_offset_t lo_offset
= offset
;
3977 vm_object_offset_t hi_offset
= offset
+ size
;
3981 if(cntrl_flags
& UPL_SET_INTERRUPTIBLE
) {
3982 interruptible
= THREAD_ABORTSAFE
;
3984 interruptible
= THREAD_UNINT
;
3987 result
= vm_fault_page(object
, dst_offset
,
3988 prot
| VM_PROT_WRITE
, FALSE
,
3990 lo_offset
, hi_offset
,
3991 VM_BEHAVIOR_SEQUENTIAL
,
3992 &prot
, &dst_page
, &top_page
,
3994 &error_code
, FALSE
, FALSE
, NULL
, 0);
3997 case VM_FAULT_SUCCESS
:
3999 PAGE_WAKEUP_DONE(dst_page
);
4002 * Release paging references and
4003 * top-level placeholder page, if any.
4006 if(top_page
!= VM_PAGE_NULL
) {
4007 vm_object_t local_object
;
4011 != dst_page
->object
) {
4014 VM_PAGE_FREE(top_page
);
4015 vm_object_paging_end(
4020 VM_PAGE_FREE(top_page
);
4021 vm_object_paging_end(
4029 case VM_FAULT_RETRY
:
4030 vm_object_lock(object
);
4031 vm_object_paging_begin(object
);
4034 case VM_FAULT_FICTITIOUS_SHORTAGE
:
4035 vm_page_more_fictitious();
4036 vm_object_lock(object
);
4037 vm_object_paging_begin(object
);
4040 case VM_FAULT_MEMORY_SHORTAGE
:
4041 if (vm_page_wait(interruptible
)) {
4042 vm_object_lock(object
);
4043 vm_object_paging_begin(object
);
4048 case VM_FAULT_INTERRUPTED
:
4049 error_code
= MACH_SEND_INTERRUPTED
;
4050 case VM_FAULT_MEMORY_ERROR
:
4051 ret
= (error_code
? error_code
:
4053 vm_object_lock(object
);
4054 for(; offset
< dst_offset
;
4055 offset
+= PAGE_SIZE
) {
4056 dst_page
= vm_page_lookup(
4058 if(dst_page
== VM_PAGE_NULL
)
4059 panic("vm_object_iopl_request: Wired pages missing. \n");
4060 vm_page_lock_queues();
4061 vm_page_unwire(dst_page
);
4062 vm_page_unlock_queues();
4063 VM_STAT(reactivations
++);
4065 vm_object_unlock(object
);
4069 } while ((result
!= VM_FAULT_SUCCESS
)
4070 || (result
== VM_FAULT_INTERRUPTED
));
4073 vm_page_lock_queues();
4074 vm_page_wire(dst_page
);
4075 vm_page_unlock_queues();
4079 vm_page_lock_queues();
4080 if(cntrl_flags
& UPL_SET_LITE
) {
4082 pg_num
= (dst_offset
-offset
)/PAGE_SIZE
;
4083 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
4086 * Convert the fictitious page to a
4087 * private shadow of the real page.
4089 assert(alias_page
->fictitious
);
4090 alias_page
->fictitious
= FALSE
;
4091 alias_page
->private = TRUE
;
4092 alias_page
->pageout
= TRUE
;
4093 alias_page
->phys_page
= dst_page
->phys_page
;
4094 vm_page_wire(alias_page
);
4096 vm_page_insert(alias_page
,
4097 upl
->map_object
, size
- xfer_size
);
4098 assert(!alias_page
->wanted
);
4099 alias_page
->busy
= FALSE
;
4100 alias_page
->absent
= FALSE
;
4103 /* expect the page to be used */
4104 dst_page
->reference
= TRUE
;
4105 if (!(cntrl_flags
& UPL_COPYOUT_FROM
))
4106 dst_page
->dirty
= TRUE
;
4109 if(user_page_list
) {
4110 user_page_list
[entry
].phys_addr
4111 = dst_page
->phys_page
;
4112 user_page_list
[entry
].dirty
=
4114 user_page_list
[entry
].pageout
=
4116 user_page_list
[entry
].absent
=
4118 user_page_list
[entry
].precious
=
4121 vm_page_unlock_queues();
4124 dst_offset
+= PAGE_SIZE_64
;
4125 xfer_size
-= PAGE_SIZE
;
4128 if (upl
->flags
& UPL_INTERNAL
) {
4129 if(page_list_count
!= NULL
)
4130 *page_list_count
= 0;
4131 } else if (*page_list_count
> entry
) {
4132 if(page_list_count
!= NULL
)
4133 *page_list_count
= entry
;
4136 if(alias_page
!= NULL
) {
4137 vm_page_lock_queues();
4138 vm_page_free(alias_page
);
4139 vm_page_unlock_queues();
4142 vm_object_unlock(object
);
4143 return KERN_SUCCESS
;
4146 upl_get_internal_pagelist_offset()
4148 return sizeof(struct upl
);
4155 upl
->flags
|= UPL_CLEAR_DIRTY
;
4162 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
4168 boolean_t
upl_page_present(upl_page_info_t
*upl
, int index
)
4170 return(UPL_PAGE_PRESENT(upl
, index
));
4172 boolean_t
upl_dirty_page(upl_page_info_t
*upl
, int index
)
4174 return(UPL_DIRTY_PAGE(upl
, index
));
4176 boolean_t
upl_valid_page(upl_page_info_t
*upl
, int index
)
4178 return(UPL_VALID_PAGE(upl
, index
));
4180 vm_offset_t
upl_phys_page(upl_page_info_t
*upl
, int index
)
4182 return((vm_offset_t
)UPL_PHYS_PAGE(upl
, index
));
4186 vm_countdirtypages(void)
4198 vm_page_lock_queues();
4199 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
4201 if (m
==(vm_page_t
)0) break;
4203 if(m
->dirty
) dpages
++;
4204 if(m
->pageout
) pgopages
++;
4205 if(m
->precious
) precpages
++;
4207 m
= (vm_page_t
) queue_next(&m
->pageq
);
4208 if (m
==(vm_page_t
)0) break;
4210 } while (!queue_end(&vm_page_queue_inactive
,(queue_entry_t
) m
));
4211 vm_page_unlock_queues();
4213 vm_page_lock_queues();
4214 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
4216 if (m
==(vm_page_t
)0) break;
4218 if(m
->dirty
) dpages
++;
4219 if(m
->pageout
) pgopages
++;
4220 if(m
->precious
) precpages
++;
4222 m
= (vm_page_t
) queue_next(&m
->pageq
);
4223 if (m
==(vm_page_t
)0) break;
4225 } while (!queue_end(&vm_page_queue_zf
,(queue_entry_t
) m
));
4226 vm_page_unlock_queues();
4228 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
4234 vm_page_lock_queues();
4235 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
4238 if(m
== (vm_page_t
)0) break;
4239 if(m
->dirty
) dpages
++;
4240 if(m
->pageout
) pgopages
++;
4241 if(m
->precious
) precpages
++;
4243 m
= (vm_page_t
) queue_next(&m
->pageq
);
4244 if(m
== (vm_page_t
)0) break;
4246 } while (!queue_end(&vm_page_queue_active
,(queue_entry_t
) m
));
4247 vm_page_unlock_queues();
4249 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
4252 #endif /* MACH_BSD */
4255 kern_return_t
upl_ubc_alias_set(upl_t upl
, unsigned int alias1
, unsigned int alias2
)
4257 upl
->ubc_alias1
= alias1
;
4258 upl
->ubc_alias2
= alias2
;
4259 return KERN_SUCCESS
;
4261 int upl_ubc_alias_get(upl_t upl
, unsigned int * al
, unsigned int * al2
)
4264 *al
= upl
->ubc_alias1
;
4266 *al2
= upl
->ubc_alias2
;
4267 return KERN_SUCCESS
;
4269 #endif /* UBC_DEBUG */
4274 #include <ddb/db_output.h>
4275 #include <ddb/db_print.h>
4276 #include <vm/vm_print.h>
4278 #define printf kdbprintf
4279 extern int db_indent
;
4280 void db_pageout(void);
4285 extern int vm_page_gobble_count
;
4287 iprintf("VM Statistics:\n");
4289 iprintf("pages:\n");
4291 iprintf("activ %5d inact %5d free %5d",
4292 vm_page_active_count
, vm_page_inactive_count
,
4293 vm_page_free_count
);
4294 printf(" wire %5d gobbl %5d\n",
4295 vm_page_wire_count
, vm_page_gobble_count
);
4296 iprintf("laund %5d\n",
4297 vm_page_laundry_count
);
4299 iprintf("target:\n");
4301 iprintf("min %5d inact %5d free %5d",
4302 vm_page_free_min
, vm_page_inactive_target
,
4303 vm_page_free_target
);
4304 printf(" resrv %5d\n", vm_page_free_reserved
);
4307 iprintf("burst:\n");
4309 iprintf("max %5d min %5d wait %5d empty %5d\n",
4310 vm_pageout_burst_max
, vm_pageout_burst_min
,
4311 vm_pageout_burst_wait
, vm_pageout_empty_wait
);
4313 iprintf("pause:\n");
4315 iprintf("count %5d max %5d\n",
4316 vm_pageout_pause_count
, vm_pageout_pause_max
);
4318 iprintf("scan_continue called %8d\n", c_vm_pageout_scan_continue
);
4319 #endif /* MACH_COUNTERS */
4329 extern int c_laundry_pages_freed
;
4330 #endif /* MACH_COUNTERS */
4332 iprintf("Pageout Statistics:\n");
4334 iprintf("active %5d inactv %5d\n",
4335 vm_pageout_active
, vm_pageout_inactive
);
4336 iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
4337 vm_pageout_inactive_nolock
, vm_pageout_inactive_avoid
,
4338 vm_pageout_inactive_busy
, vm_pageout_inactive_absent
);
4339 iprintf("used %5d clean %5d dirty %5d\n",
4340 vm_pageout_inactive_used
, vm_pageout_inactive_clean
,
4341 vm_pageout_inactive_dirty
);
4343 iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed
);
4344 #endif /* MACH_COUNTERS */
4345 #if MACH_CLUSTER_STATS
4346 iprintf("Cluster Statistics:\n");
4348 iprintf("dirtied %5d cleaned %5d collisions %5d\n",
4349 vm_pageout_cluster_dirtied
, vm_pageout_cluster_cleaned
,
4350 vm_pageout_cluster_collisions
);
4351 iprintf("clusters %5d conversions %5d\n",
4352 vm_pageout_cluster_clusters
, vm_pageout_cluster_conversions
);
4354 iprintf("Target Statistics:\n");
4356 iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
4357 vm_pageout_target_collisions
, vm_pageout_target_page_dirtied
,
4358 vm_pageout_target_page_freed
);
4360 #endif /* MACH_CLUSTER_STATS */
4364 #if MACH_CLUSTER_STATS
4365 unsigned long vm_pageout_cluster_dirtied
= 0;
4366 unsigned long vm_pageout_cluster_cleaned
= 0;
4367 unsigned long vm_pageout_cluster_collisions
= 0;
4368 unsigned long vm_pageout_cluster_clusters
= 0;
4369 unsigned long vm_pageout_cluster_conversions
= 0;
4370 unsigned long vm_pageout_target_collisions
= 0;
4371 unsigned long vm_pageout_target_page_dirtied
= 0;
4372 unsigned long vm_pageout_target_page_freed
= 0;
4373 #define CLUSTER_STAT(clause) clause
4374 #else /* MACH_CLUSTER_STATS */
4375 #define CLUSTER_STAT(clause)
4376 #endif /* MACH_CLUSTER_STATS */
4378 #endif /* MACH_KDB */