]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_pageout.c
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_pageout.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Date: 1985
56 *
57 * The proverbial page-out daemon.
58 */
59 #ifdef MACH_BSD
60 /* remove after component merge */
61 extern int vnode_pager_workaround;
62 #endif
63
64 #include <mach_pagemap.h>
65 #include <mach_cluster_stats.h>
66 #include <mach_kdb.h>
67 #include <advisory_pageout.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/mach_host_server.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <kern/host_statistics.h>
76 #include <kern/counters.h>
77 #include <kern/thread.h>
78 #include <kern/thread_swap.h>
79 #include <kern/xpr.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <machine/vm_tuning.h>
86 #include <kern/misc_protos.h>
87
88 extern ipc_port_t memory_manager_default;
89
90 #ifndef VM_PAGE_LAUNDRY_MAX
91 #define VM_PAGE_LAUNDRY_MAX 10 /* outstanding DMM page cleans */
92 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
93
94 #ifndef VM_PAGEOUT_BURST_MAX
95 #define VM_PAGEOUT_BURST_MAX 32 /* simultaneous EMM page cleans */
96 #endif /* VM_PAGEOUT_BURST_MAX */
97
98 #ifndef VM_PAGEOUT_DISCARD_MAX
99 #define VM_PAGEOUT_DISCARD_MAX 68 /* simultaneous EMM page cleans */
100 #endif /* VM_PAGEOUT_DISCARD_MAX */
101
102 #ifndef VM_PAGEOUT_BURST_WAIT
103 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
104 #endif /* VM_PAGEOUT_BURST_WAIT */
105
106 #ifndef VM_PAGEOUT_EMPTY_WAIT
107 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
108 #endif /* VM_PAGEOUT_EMPTY_WAIT */
109
110 /*
111 * To obtain a reasonable LRU approximation, the inactive queue
112 * needs to be large enough to give pages on it a chance to be
113 * referenced a second time. This macro defines the fraction
114 * of active+inactive pages that should be inactive.
115 * The pageout daemon uses it to update vm_page_inactive_target.
116 *
117 * If vm_page_free_count falls below vm_page_free_target and
118 * vm_page_inactive_count is below vm_page_inactive_target,
119 * then the pageout daemon starts running.
120 */
121
122 #ifndef VM_PAGE_INACTIVE_TARGET
123 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
124 #endif /* VM_PAGE_INACTIVE_TARGET */
125
126 /*
127 * Once the pageout daemon starts running, it keeps going
128 * until vm_page_free_count meets or exceeds vm_page_free_target.
129 */
130
131 #ifndef VM_PAGE_FREE_TARGET
132 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
133 #endif /* VM_PAGE_FREE_TARGET */
134
135 /*
136 * The pageout daemon always starts running once vm_page_free_count
137 * falls below vm_page_free_min.
138 */
139
140 #ifndef VM_PAGE_FREE_MIN
141 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
142 #endif /* VM_PAGE_FREE_MIN */
143
144 /*
145 * When vm_page_free_count falls below vm_page_free_reserved,
146 * only vm-privileged threads can allocate pages. vm-privilege
147 * allows the pageout daemon and default pager (and any other
148 * associated threads needed for default pageout) to continue
149 * operation by dipping into the reserved pool of pages.
150 */
151
152 #ifndef VM_PAGE_FREE_RESERVED
153 #define VM_PAGE_FREE_RESERVED \
154 ((8 * VM_PAGE_LAUNDRY_MAX) + NCPUS)
155 #endif /* VM_PAGE_FREE_RESERVED */
156
157
158 /*
159 * Forward declarations for internal routines.
160 */
161 extern void vm_pageout_continue(void);
162 extern void vm_pageout_scan(void);
163 extern void vm_pageout_throttle(vm_page_t m);
164 extern vm_page_t vm_pageout_cluster_page(
165 vm_object_t object,
166 vm_object_offset_t offset,
167 boolean_t precious_clean);
168
169 unsigned int vm_pageout_reserved_internal = 0;
170 unsigned int vm_pageout_reserved_really = 0;
171
172 unsigned int vm_page_laundry_max = 0; /* # of clusters outstanding */
173 unsigned int vm_page_laundry_min = 0;
174 unsigned int vm_pageout_burst_max = 0;
175 unsigned int vm_pageout_burst_wait = 0; /* milliseconds per page */
176 unsigned int vm_pageout_empty_wait = 0; /* milliseconds */
177 unsigned int vm_pageout_burst_min = 0;
178 unsigned int vm_pageout_pause_count = 0;
179 unsigned int vm_pageout_pause_max = 0;
180 unsigned int vm_free_page_pause = 100; /* milliseconds */
181
182 /*
183 * These variables record the pageout daemon's actions:
184 * how many pages it looks at and what happens to those pages.
185 * No locking needed because only one thread modifies the variables.
186 */
187
188 unsigned int vm_pageout_active = 0; /* debugging */
189 unsigned int vm_pageout_inactive = 0; /* debugging */
190 unsigned int vm_pageout_inactive_throttled = 0; /* debugging */
191 unsigned int vm_pageout_inactive_forced = 0; /* debugging */
192 unsigned int vm_pageout_inactive_nolock = 0; /* debugging */
193 unsigned int vm_pageout_inactive_avoid = 0; /* debugging */
194 unsigned int vm_pageout_inactive_busy = 0; /* debugging */
195 unsigned int vm_pageout_inactive_absent = 0; /* debugging */
196 unsigned int vm_pageout_inactive_used = 0; /* debugging */
197 unsigned int vm_pageout_inactive_clean = 0; /* debugging */
198 unsigned int vm_pageout_inactive_dirty = 0; /* debugging */
199 unsigned int vm_pageout_dirty_no_pager = 0; /* debugging */
200 unsigned int vm_pageout_inactive_pinned = 0; /* debugging */
201 unsigned int vm_pageout_inactive_limbo = 0; /* debugging */
202 unsigned int vm_pageout_setup_limbo = 0; /* debugging */
203 unsigned int vm_pageout_setup_unprepped = 0; /* debugging */
204 unsigned int vm_stat_discard = 0; /* debugging */
205 unsigned int vm_stat_discard_sent = 0; /* debugging */
206 unsigned int vm_stat_discard_failure = 0; /* debugging */
207 unsigned int vm_stat_discard_throttle = 0; /* debugging */
208 unsigned int vm_pageout_scan_active_emm_throttle = 0; /* debugging */
209 unsigned int vm_pageout_scan_active_emm_throttle_success = 0; /* debugging */
210 unsigned int vm_pageout_scan_active_emm_throttle_failure = 0; /* debugging */
211 unsigned int vm_pageout_scan_inactive_emm_throttle = 0; /* debugging */
212 unsigned int vm_pageout_scan_inactive_emm_throttle_success = 0; /* debugging */
213 unsigned int vm_pageout_scan_inactive_emm_throttle_failure = 0; /* debugging */
214
215
216 unsigned int vm_pageout_out_of_line = 0;
217 unsigned int vm_pageout_in_place = 0;
218 /*
219 * Routine: vm_pageout_object_allocate
220 * Purpose:
221 * Allocate an object for use as out-of-line memory in a
222 * data_return/data_initialize message.
223 * The page must be in an unlocked object.
224 *
225 * If the page belongs to a trusted pager, cleaning in place
226 * will be used, which utilizes a special "pageout object"
227 * containing private alias pages for the real page frames.
228 * Untrusted pagers use normal out-of-line memory.
229 */
230 vm_object_t
231 vm_pageout_object_allocate(
232 vm_page_t m,
233 vm_size_t size,
234 vm_object_offset_t offset)
235 {
236 vm_object_t object = m->object;
237 vm_object_t new_object;
238
239 assert(object->pager_ready);
240
241 if (object->pager_trusted || object->internal)
242 vm_pageout_throttle(m);
243
244 new_object = vm_object_allocate(size);
245
246 if (object->pager_trusted) {
247 assert (offset < object->size);
248
249 vm_object_lock(new_object);
250 new_object->pageout = TRUE;
251 new_object->shadow = object;
252 new_object->can_persist = FALSE;
253 new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
254 new_object->shadow_offset = offset;
255 vm_object_unlock(new_object);
256
257 /*
258 * Take a paging reference on the object. This will be dropped
259 * in vm_pageout_object_terminate()
260 */
261 vm_object_lock(object);
262 vm_object_paging_begin(object);
263 vm_object_unlock(object);
264
265 vm_pageout_in_place++;
266 } else
267 vm_pageout_out_of_line++;
268 return(new_object);
269 }
270
271 #if MACH_CLUSTER_STATS
272 unsigned long vm_pageout_cluster_dirtied = 0;
273 unsigned long vm_pageout_cluster_cleaned = 0;
274 unsigned long vm_pageout_cluster_collisions = 0;
275 unsigned long vm_pageout_cluster_clusters = 0;
276 unsigned long vm_pageout_cluster_conversions = 0;
277 unsigned long vm_pageout_target_collisions = 0;
278 unsigned long vm_pageout_target_page_dirtied = 0;
279 unsigned long vm_pageout_target_page_freed = 0;
280 unsigned long vm_pageout_target_page_pinned = 0;
281 unsigned long vm_pageout_target_page_limbo = 0;
282 #define CLUSTER_STAT(clause) clause
283 #else /* MACH_CLUSTER_STATS */
284 #define CLUSTER_STAT(clause)
285 #endif /* MACH_CLUSTER_STATS */
286
287 /*
288 * Routine: vm_pageout_object_terminate
289 * Purpose:
290 * Destroy the pageout_object allocated by
291 * vm_pageout_object_allocate(), and perform all of the
292 * required cleanup actions.
293 *
294 * In/Out conditions:
295 * The object must be locked, and will be returned locked.
296 */
297 void
298 vm_pageout_object_terminate(
299 vm_object_t object)
300 {
301 vm_object_t shadow_object;
302
303 /*
304 * Deal with the deallocation (last reference) of a pageout object
305 * (used for cleaning-in-place) by dropping the paging references/
306 * freeing pages in the original object.
307 */
308
309 assert(object->pageout);
310 shadow_object = object->shadow;
311 vm_object_lock(shadow_object);
312
313 while (!queue_empty(&object->memq)) {
314 vm_page_t p, m;
315 vm_object_offset_t offset;
316
317 p = (vm_page_t) queue_first(&object->memq);
318
319 assert(p->private);
320 assert(p->pageout);
321 p->pageout = FALSE;
322 assert(!p->cleaning);
323
324 offset = p->offset;
325 VM_PAGE_FREE(p);
326 p = VM_PAGE_NULL;
327
328 m = vm_page_lookup(shadow_object,
329 offset + object->shadow_offset);
330
331 if(m == VM_PAGE_NULL)
332 continue;
333 assert(m->cleaning);
334
335 /*
336 * Account for the paging reference taken when
337 * m->cleaning was set on this page.
338 */
339 vm_object_paging_end(shadow_object);
340 assert((m->dirty) || (m->precious) ||
341 (m->busy && m->cleaning));
342
343 /*
344 * Handle the trusted pager throttle.
345 */
346 vm_page_lock_queues();
347 if (m->laundry) {
348 vm_page_laundry_count--;
349 m->laundry = FALSE;
350 if (vm_page_laundry_count < vm_page_laundry_min) {
351 vm_page_laundry_min = 0;
352 thread_wakeup((event_t) &vm_page_laundry_count);
353 }
354 }
355
356 /*
357 * Handle the "target" page(s). These pages are to be freed if
358 * successfully cleaned. Target pages are always busy, and are
359 * wired exactly once. The initial target pages are not mapped,
360 * (so cannot be referenced or modified) but converted target
361 * pages may have been modified between the selection as an
362 * adjacent page and conversion to a target.
363 */
364 if (m->pageout) {
365 assert(m->busy);
366 assert(m->wire_count == 1);
367 m->cleaning = FALSE;
368 m->pageout = FALSE;
369 #if MACH_CLUSTER_STATS
370 if (m->wanted) vm_pageout_target_collisions++;
371 #endif
372 /*
373 * Revoke all access to the page. Since the object is
374 * locked, and the page is busy, this prevents the page
375 * from being dirtied after the pmap_is_modified() call
376 * returns.
377 */
378 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
379
380 /*
381 * Since the page is left "dirty" but "not modifed", we
382 * can detect whether the page was redirtied during
383 * pageout by checking the modify state.
384 */
385 m->dirty = pmap_is_modified(m->phys_addr);
386
387 if (m->dirty) {
388 CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
389 vm_page_unwire(m);/* reactivates */
390 VM_STAT(reactivations++);
391 PAGE_WAKEUP_DONE(m);
392 } else if (m->prep_pin_count != 0) {
393 vm_page_pin_lock();
394 if (m->pin_count != 0) {
395 /* page is pinned; reactivate */
396 CLUSTER_STAT(
397 vm_pageout_target_page_pinned++;)
398 vm_page_unwire(m);/* reactivates */
399 VM_STAT(reactivations++);
400 PAGE_WAKEUP_DONE(m);
401 } else {
402 /*
403 * page is prepped but not pinned; send
404 * it into limbo. Note that
405 * vm_page_free (which will be called
406 * after releasing the pin lock) knows
407 * how to handle a page with limbo set.
408 */
409 m->limbo = TRUE;
410 CLUSTER_STAT(
411 vm_pageout_target_page_limbo++;)
412 }
413 vm_page_pin_unlock();
414 if (m->limbo)
415 vm_page_free(m);
416 } else {
417 CLUSTER_STAT(vm_pageout_target_page_freed++;)
418 vm_page_free(m);/* clears busy, etc. */
419 }
420 vm_page_unlock_queues();
421 continue;
422 }
423 /*
424 * Handle the "adjacent" pages. These pages were cleaned in
425 * place, and should be left alone.
426 * If prep_pin_count is nonzero, then someone is using the
427 * page, so make it active.
428 */
429 if (!m->active && !m->inactive) {
430 if (m->reference || m->prep_pin_count != 0)
431 vm_page_activate(m);
432 else
433 vm_page_deactivate(m);
434 }
435 if((m->busy) && (m->cleaning)) {
436
437 /* the request_page_list case, (COPY_OUT_FROM FALSE) */
438 m->busy = FALSE;
439
440 /* We do not re-set m->dirty ! */
441 /* The page was busy so no extraneous activity */
442 /* could have occured. COPY_INTO is a read into the */
443 /* new pages. CLEAN_IN_PLACE does actually write */
444 /* out the pages but handling outside of this code */
445 /* will take care of resetting dirty. We clear the */
446 /* modify however for the Programmed I/O case. */
447 pmap_clear_modify(m->phys_addr);
448 if(m->absent) {
449 m->absent = FALSE;
450 if(shadow_object->absent_count == 1)
451 vm_object_absent_release(shadow_object);
452 else
453 shadow_object->absent_count--;
454 }
455 m->overwriting = FALSE;
456 } else if (m->overwriting) {
457 /* alternate request page list, write to page_list */
458 /* case. Occurs when the original page was wired */
459 /* at the time of the list request */
460 assert(m->wire_count != 0);
461 vm_page_unwire(m);/* reactivates */
462 m->overwriting = FALSE;
463 } else {
464 /*
465 * Set the dirty state according to whether or not the page was
466 * modified during the pageout. Note that we purposefully do
467 * NOT call pmap_clear_modify since the page is still mapped.
468 * If the page were to be dirtied between the 2 calls, this
469 * this fact would be lost. This code is only necessary to
470 * maintain statistics, since the pmap module is always
471 * consulted if m->dirty is false.
472 */
473 #if MACH_CLUSTER_STATS
474 m->dirty = pmap_is_modified(m->phys_addr);
475
476 if (m->dirty) vm_pageout_cluster_dirtied++;
477 else vm_pageout_cluster_cleaned++;
478 if (m->wanted) vm_pageout_cluster_collisions++;
479 #else
480 m->dirty = 0;
481 #endif
482 }
483 m->cleaning = FALSE;
484
485
486 /*
487 * Wakeup any thread waiting for the page to be un-cleaning.
488 */
489 PAGE_WAKEUP(m);
490 vm_page_unlock_queues();
491 }
492 /*
493 * Account for the paging reference taken in vm_paging_object_allocate.
494 */
495 vm_object_paging_end(shadow_object);
496 vm_object_unlock(shadow_object);
497
498 assert(object->ref_count == 0);
499 assert(object->paging_in_progress == 0);
500 assert(object->resident_page_count == 0);
501 return;
502 }
503
504 /*
505 * Routine: vm_pageout_setup
506 * Purpose:
507 * Set up a page for pageout (clean & flush).
508 *
509 * Move the page to a new object, as part of which it will be
510 * sent to its memory manager in a memory_object_data_write or
511 * memory_object_initialize message.
512 *
513 * The "new_object" and "new_offset" arguments
514 * indicate where the page should be moved.
515 *
516 * In/Out conditions:
517 * The page in question must not be on any pageout queues,
518 * and must be busy. The object to which it belongs
519 * must be unlocked, and the caller must hold a paging
520 * reference to it. The new_object must not be locked.
521 *
522 * This routine returns a pointer to a place-holder page,
523 * inserted at the same offset, to block out-of-order
524 * requests for the page. The place-holder page must
525 * be freed after the data_write or initialize message
526 * has been sent.
527 *
528 * The original page is put on a paging queue and marked
529 * not busy on exit.
530 */
531 vm_page_t
532 vm_pageout_setup(
533 register vm_page_t m,
534 register vm_object_t new_object,
535 vm_object_offset_t new_offset)
536 {
537 register vm_object_t old_object = m->object;
538 vm_object_offset_t paging_offset;
539 vm_object_offset_t offset;
540 register vm_page_t holding_page;
541 register vm_page_t new_m;
542 register vm_page_t new_page;
543 boolean_t need_to_wire = FALSE;
544
545
546 XPR(XPR_VM_PAGEOUT,
547 "vm_pageout_setup, obj 0x%X off 0x%X page 0x%X new obj 0x%X offset 0x%X\n",
548 (integer_t)m->object, (integer_t)m->offset,
549 (integer_t)m, (integer_t)new_object,
550 (integer_t)new_offset);
551 assert(m && m->busy && !m->absent && !m->fictitious && !m->error &&
552 !m->restart);
553
554 assert(m->dirty || m->precious);
555
556 /*
557 * Create a place-holder page where the old one was, to prevent
558 * attempted pageins of this page while we're unlocked.
559 * If the pageout daemon put this page in limbo and we're not
560 * going to clean in place, get another fictitious page to
561 * exchange for it now.
562 */
563 VM_PAGE_GRAB_FICTITIOUS(holding_page);
564
565 if (m->limbo)
566 VM_PAGE_GRAB_FICTITIOUS(new_page);
567
568 vm_object_lock(old_object);
569
570 offset = m->offset;
571 paging_offset = offset + old_object->paging_offset;
572
573 if (old_object->pager_trusted) {
574 /*
575 * This pager is trusted, so we can clean this page
576 * in place. Leave it in the old object, and mark it
577 * cleaning & pageout.
578 */
579 new_m = holding_page;
580 holding_page = VM_PAGE_NULL;
581
582 /*
583 * If the pageout daemon put this page in limbo, exchange the
584 * identities of the limbo page and the new fictitious page,
585 * and continue with the new page, unless the prep count has
586 * gone to zero in the meantime (which means no one is
587 * interested in the page any more). In that case, just clear
588 * the limbo bit and free the extra fictitious page.
589 */
590 if (m->limbo) {
591 if (m->prep_pin_count == 0) {
592 /* page doesn't have to be in limbo any more */
593 m->limbo = FALSE;
594 vm_page_lock_queues();
595 vm_page_free(new_page);
596 vm_page_unlock_queues();
597 vm_pageout_setup_unprepped++;
598 } else {
599 vm_page_lock_queues();
600 VM_PAGE_QUEUES_REMOVE(m);
601 vm_page_remove(m);
602 vm_page_limbo_exchange(m, new_page);
603 vm_pageout_setup_limbo++;
604 vm_page_release_limbo(m);
605 m = new_page;
606 vm_page_insert(m, old_object, offset);
607 vm_page_unlock_queues();
608 }
609 }
610
611 /*
612 * Set up new page to be private shadow of real page.
613 */
614 new_m->phys_addr = m->phys_addr;
615 new_m->fictitious = FALSE;
616 new_m->private = TRUE;
617 new_m->pageout = TRUE;
618
619 /*
620 * Mark real page as cleaning (indicating that we hold a
621 * paging reference to be released via m_o_d_r_c) and
622 * pageout (indicating that the page should be freed
623 * when the pageout completes).
624 */
625 pmap_clear_modify(m->phys_addr);
626 vm_page_lock_queues();
627 vm_page_wire(new_m);
628 m->cleaning = TRUE;
629 m->pageout = TRUE;
630
631 vm_page_wire(m);
632 assert(m->wire_count == 1);
633 vm_page_unlock_queues();
634
635 m->dirty = TRUE;
636 m->precious = FALSE;
637 m->page_lock = VM_PROT_NONE;
638 m->unusual = FALSE;
639 m->unlock_request = VM_PROT_NONE;
640 } else {
641 /*
642 * Cannot clean in place, so rip the old page out of the
643 * object, and stick the holding page in. Set new_m to the
644 * page in the new object.
645 */
646 vm_page_lock_queues();
647 VM_PAGE_QUEUES_REMOVE(m);
648 vm_page_remove(m);
649
650 /*
651 * If the pageout daemon put this page in limbo, exchange the
652 * identities of the limbo page and the new fictitious page,
653 * and continue with the new page, unless the prep count has
654 * gone to zero in the meantime (which means no one is
655 * interested in the page any more). In that case, just clear
656 * the limbo bit and free the extra fictitious page.
657 */
658 if (m->limbo) {
659 if (m->prep_pin_count == 0) {
660 /* page doesn't have to be in limbo any more */
661 m->limbo = FALSE;
662 vm_page_free(new_page);
663 vm_pageout_setup_unprepped++;
664 } else {
665 vm_page_limbo_exchange(m, new_page);
666 vm_pageout_setup_limbo++;
667 vm_page_release_limbo(m);
668 m = new_page;
669 }
670 }
671
672 vm_page_insert(holding_page, old_object, offset);
673 vm_page_unlock_queues();
674
675 m->dirty = TRUE;
676 m->precious = FALSE;
677 new_m = m;
678 new_m->page_lock = VM_PROT_NONE;
679 new_m->unlock_request = VM_PROT_NONE;
680
681 if (old_object->internal)
682 need_to_wire = TRUE;
683 }
684 /*
685 * Record that this page has been written out
686 */
687 #if MACH_PAGEMAP
688 vm_external_state_set(old_object->existence_map, offset);
689 #endif /* MACH_PAGEMAP */
690
691 vm_object_unlock(old_object);
692
693 vm_object_lock(new_object);
694
695 /*
696 * Put the page into the new object. If it is a not wired
697 * (if it's the real page) it will be activated.
698 */
699
700 vm_page_lock_queues();
701 vm_page_insert(new_m, new_object, new_offset);
702 if (need_to_wire)
703 vm_page_wire(new_m);
704 else
705 vm_page_activate(new_m);
706 PAGE_WAKEUP_DONE(new_m);
707 vm_page_unlock_queues();
708
709 vm_object_unlock(new_object);
710
711 /*
712 * Return the placeholder page to simplify cleanup.
713 */
714 return (holding_page);
715 }
716
717 /*
718 * Routine: vm_pageclean_setup
719 *
720 * Purpose: setup a page to be cleaned (made non-dirty), but not
721 * necessarily flushed from the VM page cache.
722 * This is accomplished by cleaning in place.
723 *
724 * The page must not be busy, and the object and page
725 * queues must be locked.
726 *
727 */
728 void
729 vm_pageclean_setup(
730 vm_page_t m,
731 vm_page_t new_m,
732 vm_object_t new_object,
733 vm_object_offset_t new_offset)
734 {
735 vm_object_t old_object = m->object;
736 assert(!m->busy);
737 assert(!m->cleaning);
738
739 XPR(XPR_VM_PAGEOUT,
740 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
741 (integer_t)old_object, m->offset, (integer_t)m,
742 (integer_t)new_m, new_offset);
743
744 pmap_clear_modify(m->phys_addr);
745 vm_object_paging_begin(old_object);
746
747 /*
748 * Record that this page has been written out
749 */
750 #if MACH_PAGEMAP
751 vm_external_state_set(old_object->existence_map, m->offset);
752 #endif /*MACH_PAGEMAP*/
753
754 /*
755 * Mark original page as cleaning in place.
756 */
757 m->cleaning = TRUE;
758 m->dirty = TRUE;
759 m->precious = FALSE;
760
761 /*
762 * Convert the fictitious page to a private shadow of
763 * the real page.
764 */
765 assert(new_m->fictitious);
766 new_m->fictitious = FALSE;
767 new_m->private = TRUE;
768 new_m->pageout = TRUE;
769 new_m->phys_addr = m->phys_addr;
770 vm_page_wire(new_m);
771
772 vm_page_insert(new_m, new_object, new_offset);
773 assert(!new_m->wanted);
774 new_m->busy = FALSE;
775 }
776
777 void
778 vm_pageclean_copy(
779 vm_page_t m,
780 vm_page_t new_m,
781 vm_object_t new_object,
782 vm_object_offset_t new_offset)
783 {
784 XPR(XPR_VM_PAGEOUT,
785 "vm_pageclean_copy, page 0x%X new_m 0x%X new_obj 0x%X offset 0x%X\n",
786 m, new_m, new_object, new_offset, 0);
787
788 assert((!m->busy) && (!m->cleaning));
789
790 assert(!new_m->private && !new_m->fictitious);
791
792 pmap_clear_modify(m->phys_addr);
793
794 m->busy = TRUE;
795 vm_object_paging_begin(m->object);
796 vm_page_unlock_queues();
797 vm_object_unlock(m->object);
798
799 /*
800 * Copy the original page to the new page.
801 */
802 vm_page_copy(m, new_m);
803
804 /*
805 * Mark the old page as clean. A request to pmap_is_modified
806 * will get the right answer.
807 */
808 vm_object_lock(m->object);
809 m->dirty = FALSE;
810
811 vm_object_paging_end(m->object);
812
813 vm_page_lock_queues();
814 if (!m->active && !m->inactive)
815 vm_page_activate(m);
816 PAGE_WAKEUP_DONE(m);
817
818 vm_page_insert(new_m, new_object, new_offset);
819 vm_page_activate(new_m);
820 new_m->busy = FALSE; /* No other thread can be waiting */
821 }
822
823
824 /*
825 * Routine: vm_pageout_initialize_page
826 * Purpose:
827 * Causes the specified page to be initialized in
828 * the appropriate memory object. This routine is used to push
829 * pages into a copy-object when they are modified in the
830 * permanent object.
831 *
832 * The page is moved to a temporary object and paged out.
833 *
834 * In/out conditions:
835 * The page in question must not be on any pageout queues.
836 * The object to which it belongs must be locked.
837 * The page must be busy, but not hold a paging reference.
838 *
839 * Implementation:
840 * Move this page to a completely new object.
841 */
842 void
843 vm_pageout_initialize_page(
844 vm_page_t m)
845 {
846 vm_map_copy_t copy;
847 vm_object_t new_object;
848 vm_object_t object;
849 vm_object_offset_t paging_offset;
850 vm_page_t holding_page;
851
852
853 XPR(XPR_VM_PAGEOUT,
854 "vm_pageout_initialize_page, page 0x%X\n",
855 (integer_t)m, 0, 0, 0, 0);
856 assert(m->busy);
857
858 /*
859 * Verify that we really want to clean this page
860 */
861 assert(!m->absent);
862 assert(!m->error);
863 assert(m->dirty);
864
865 /*
866 * Create a paging reference to let us play with the object.
867 */
868 object = m->object;
869 paging_offset = m->offset + object->paging_offset;
870 vm_object_paging_begin(object);
871 vm_object_unlock(object);
872 if (m->absent || m->error || m->restart ||
873 (!m->dirty && !m->precious)) {
874 VM_PAGE_FREE(m);
875 panic("reservation without pageout?"); /* alan */
876 return;
877 }
878
879 /* set the page for future call to vm_fault_list_request */
880 holding_page = NULL;
881 vm_object_lock(m->object);
882 vm_page_lock_queues();
883 pmap_clear_modify(m->phys_addr);
884 m->dirty = TRUE;
885 m->busy = TRUE;
886 m->list_req_pending = TRUE;
887 m->cleaning = TRUE;
888 m->pageout = TRUE;
889 vm_page_wire(m);
890 vm_page_unlock_queues();
891 vm_object_unlock(m->object);
892 vm_pageout_throttle(m);
893 copy = NULL;
894
895 VM_STAT(pageouts++);
896 /* VM_STAT(pages_pagedout++); */
897
898 /*
899 * Write the data to its pager.
900 * Note that the data is passed by naming the new object,
901 * not a virtual address; the pager interface has been
902 * manipulated to use the "internal memory" data type.
903 * [The object reference from its allocation is donated
904 * to the eventual recipient.]
905 */
906 memory_object_data_initialize(object->pager,
907 object->pager_request,
908 paging_offset,
909 POINTER_T(copy),
910 PAGE_SIZE);
911
912 vm_object_lock(object);
913 }
914
915 #if MACH_CLUSTER_STATS
916 #define MAXCLUSTERPAGES 16
917 struct {
918 unsigned long pages_in_cluster;
919 unsigned long pages_at_higher_offsets;
920 unsigned long pages_at_lower_offsets;
921 } cluster_stats[MAXCLUSTERPAGES];
922 #endif /* MACH_CLUSTER_STATS */
923
924 boolean_t allow_clustered_pageouts = FALSE;
925
926 /*
927 * vm_pageout_cluster:
928 *
929 * Given a page, page it out, and attempt to clean adjacent pages
930 * in the same operation.
931 *
932 * The page must be busy, and the object unlocked w/ paging reference
933 * to prevent deallocation or collapse. The page must not be on any
934 * pageout queue.
935 */
936 void
937 vm_pageout_cluster(
938 vm_page_t m)
939 {
940 vm_object_t object = m->object;
941 vm_object_offset_t offset = m->offset; /* from vm_object start */
942 vm_object_offset_t paging_offset = m->offset + object->paging_offset;
943 vm_object_t new_object;
944 vm_object_offset_t new_offset;
945 vm_size_t cluster_size;
946 vm_object_offset_t cluster_offset; /* from memory_object start */
947 vm_object_offset_t cluster_lower_bound; /* from vm_object_start */
948 vm_object_offset_t cluster_upper_bound; /* from vm_object_start */
949 vm_object_offset_t cluster_start, cluster_end;/* from vm_object start */
950 vm_object_offset_t offset_within_cluster;
951 vm_size_t length_of_data;
952 vm_page_t friend, holding_page;
953 vm_map_copy_t copy;
954 kern_return_t rc;
955 boolean_t precious_clean = TRUE;
956 int pages_in_cluster;
957
958 CLUSTER_STAT(int pages_at_higher_offsets = 0;)
959 CLUSTER_STAT(int pages_at_lower_offsets = 0;)
960
961 XPR(XPR_VM_PAGEOUT,
962 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
963 (integer_t)object, offset, (integer_t)m, 0, 0);
964
965 CLUSTER_STAT(vm_pageout_cluster_clusters++;)
966 /*
967 * Only a certain kind of page is appreciated here.
968 */
969 assert(m->busy && (m->dirty || m->precious) && (m->wire_count == 0));
970 assert(!m->cleaning && !m->pageout && !m->inactive && !m->active);
971
972 vm_object_lock(object);
973 cluster_size = object->cluster_size;
974
975 assert(cluster_size >= PAGE_SIZE);
976 if (cluster_size < PAGE_SIZE) cluster_size = PAGE_SIZE;
977 assert(object->pager_created && object->pager_initialized);
978 assert(object->internal || object->pager_ready);
979
980 if (m->precious && !m->dirty)
981 precious_clean = TRUE;
982
983 if (!object->pager_trusted || !allow_clustered_pageouts)
984 cluster_size = PAGE_SIZE;
985 vm_object_unlock(object);
986
987 cluster_offset = paging_offset & (vm_object_offset_t)(cluster_size - 1);
988 /* bytes from beginning of cluster */
989 /*
990 * Due to unaligned mappings, we have to be careful
991 * of negative offsets into the VM object. Clip the cluster
992 * boundary to the VM object, not the memory object.
993 */
994 if (offset > cluster_offset) {
995 cluster_lower_bound = offset - cluster_offset;
996 /* from vm_object */
997 } else {
998 cluster_lower_bound = 0;
999 }
1000 cluster_upper_bound = (offset - cluster_offset) +
1001 (vm_object_offset_t)cluster_size;
1002
1003 /* set the page for future call to vm_fault_list_request */
1004 holding_page = NULL;
1005 vm_object_lock(m->object);
1006 vm_page_lock_queues();
1007 m->busy = TRUE;
1008 m->list_req_pending = TRUE;
1009 m->cleaning = TRUE;
1010 m->pageout = TRUE;
1011 vm_page_wire(m);
1012 vm_page_unlock_queues();
1013 vm_object_unlock(m->object);
1014 vm_pageout_throttle(m);
1015
1016 /*
1017 * Search backward for adjacent eligible pages to clean in
1018 * this operation.
1019 */
1020
1021 cluster_start = offset;
1022 if (offset) { /* avoid wrap-around at zero */
1023 for (cluster_start = offset - PAGE_SIZE_64;
1024 cluster_start >= cluster_lower_bound;
1025 cluster_start -= PAGE_SIZE_64) {
1026 assert(cluster_size > PAGE_SIZE);
1027
1028 vm_object_lock(object);
1029 vm_page_lock_queues();
1030
1031 if ((friend = vm_pageout_cluster_page(object, cluster_start,
1032 precious_clean)) == VM_PAGE_NULL) {
1033 vm_page_unlock_queues();
1034 vm_object_unlock(object);
1035 break;
1036 }
1037 new_offset = (cluster_start + object->paging_offset)
1038 & (cluster_size - 1);
1039
1040 assert(new_offset < cluster_offset);
1041 m->list_req_pending = TRUE;
1042 m->cleaning = TRUE;
1043 /* do nothing except advance the write request, all we really need to */
1044 /* do is push the target page and let the code at the other end decide */
1045 /* what is really the right size */
1046 if (vm_page_free_count <= vm_page_free_reserved) {
1047 m->busy = TRUE;
1048 m->pageout = TRUE;
1049 vm_page_wire(m);
1050 }
1051
1052 vm_page_unlock_queues();
1053 vm_object_unlock(object);
1054 if(m->dirty || m->object->internal) {
1055 CLUSTER_STAT(pages_at_lower_offsets++;)
1056 }
1057
1058 }
1059 cluster_start += PAGE_SIZE_64;
1060 }
1061 assert(cluster_start >= cluster_lower_bound);
1062 assert(cluster_start <= offset);
1063 /*
1064 * Search forward for adjacent eligible pages to clean in
1065 * this operation.
1066 */
1067 for (cluster_end = offset + PAGE_SIZE_64;
1068 cluster_end < cluster_upper_bound;
1069 cluster_end += PAGE_SIZE_64) {
1070 assert(cluster_size > PAGE_SIZE);
1071
1072 vm_object_lock(object);
1073 vm_page_lock_queues();
1074
1075 if ((friend = vm_pageout_cluster_page(object, cluster_end,
1076 precious_clean)) == VM_PAGE_NULL) {
1077 vm_page_unlock_queues();
1078 vm_object_unlock(object);
1079 break;
1080 }
1081 new_offset = (cluster_end + object->paging_offset)
1082 & (cluster_size - 1);
1083
1084 assert(new_offset < cluster_size);
1085 m->list_req_pending = TRUE;
1086 m->cleaning = TRUE;
1087 /* do nothing except advance the write request, all we really need to */
1088 /* do is push the target page and let the code at the other end decide */
1089 /* what is really the right size */
1090 if (vm_page_free_count <= vm_page_free_reserved) {
1091 m->busy = TRUE;
1092 m->pageout = TRUE;
1093 vm_page_wire(m);
1094 }
1095
1096 vm_page_unlock_queues();
1097 vm_object_unlock(object);
1098
1099 if(m->dirty || m->object->internal) {
1100 CLUSTER_STAT(pages_at_higher_offsets++;)
1101 }
1102 }
1103 assert(cluster_end <= cluster_upper_bound);
1104 assert(cluster_end >= offset + PAGE_SIZE);
1105
1106 /*
1107 * (offset - cluster_offset) is beginning of cluster_object
1108 * relative to vm_object start.
1109 */
1110 offset_within_cluster = cluster_start - (offset - cluster_offset);
1111 length_of_data = cluster_end - cluster_start;
1112
1113 assert(offset_within_cluster < cluster_size);
1114 assert((offset_within_cluster + length_of_data) <= cluster_size);
1115
1116 rc = KERN_SUCCESS;
1117 assert(rc == KERN_SUCCESS);
1118
1119 pages_in_cluster = length_of_data/PAGE_SIZE;
1120 if(m->dirty || m->object->internal) {
1121 VM_STAT(pageouts++);
1122 }
1123 /* VM_STAT(pages_pagedout += pages_in_cluster); */
1124
1125 #if MACH_CLUSTER_STATS
1126 (cluster_stats[pages_at_lower_offsets].pages_at_lower_offsets)++;
1127 (cluster_stats[pages_at_higher_offsets].pages_at_higher_offsets)++;
1128 (cluster_stats[pages_in_cluster].pages_in_cluster)++;
1129 #endif /* MACH_CLUSTER_STATS */
1130
1131 /*
1132 * Send the data to the pager.
1133 */
1134 paging_offset = cluster_start + object->paging_offset;
1135 #ifdef MACH_BSD
1136 if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) ==
1137 ((rpc_subsystem_t) &vnode_pager_workaround)) {
1138 rc = vnode_pager_data_return(object->pager,
1139 object->pager_request,
1140 paging_offset,
1141 POINTER_T(copy),
1142 length_of_data,
1143 !precious_clean,
1144 FALSE);
1145 } else {
1146 rc = memory_object_data_return(object->pager,
1147 object->pager_request,
1148 paging_offset,
1149 POINTER_T(copy),
1150 length_of_data,
1151 !precious_clean,
1152 FALSE);
1153 }
1154 #else
1155 rc = memory_object_data_return(object->pager,
1156 object->pager_request,
1157 paging_offset,
1158 POINTER_T(copy),
1159 length_of_data,
1160 !precious_clean,
1161 FALSE);
1162 #endif
1163 vm_object_lock(object);
1164 vm_object_paging_end(object);
1165
1166 if (holding_page) {
1167 assert(!object->pager_trusted);
1168 VM_PAGE_FREE(holding_page);
1169 vm_object_paging_end(object);
1170 }
1171
1172 vm_object_unlock(object);
1173 }
1174
1175 /*
1176 * vm_pageout_return_write_pages
1177 * Recover pages from an aborted write attempt
1178 *
1179 */
1180
1181 vm_pageout_return_write_pages(
1182 ipc_port_t control_port,
1183 vm_object_offset_t object_offset,
1184 vm_map_copy_t copy)
1185 {
1186 vm_object_t object;
1187 int offset;
1188 int size;
1189 int shadow_offset;
1190 int copy_offset;
1191 int j;
1192 vm_page_t m;
1193
1194
1195 object = copy->cpy_object;
1196 copy_offset = copy->offset;
1197 size = copy->size;
1198
1199 if((copy->type != VM_MAP_COPY_OBJECT) || (object->shadow == 0)) {
1200 object = (vm_object_t)control_port->ip_kobject;
1201 shadow_offset = (object_offset - object->paging_offset)
1202 - copy->offset;
1203 } else {
1204 /* get the offset from the copy object */
1205 shadow_offset = object->shadow_offset;
1206 /* find the backing object */
1207 object = object->shadow;
1208 }
1209 vm_object_lock(object);
1210
1211 for(offset = 0, j=0; offset < size; offset+=page_size, j++) {
1212 m = vm_page_lookup(object,
1213 offset + shadow_offset + copy_offset);
1214 if((m == VM_PAGE_NULL) || m->fictitious) {
1215
1216 vm_page_t p;
1217 int i;
1218 vm_object_t copy_object;
1219
1220 /* m might be fictitious if the original page */
1221 /* was found to be in limbo at the time of */
1222 /* vm_pageout_setup */
1223
1224 if((m != VM_PAGE_NULL) && m->fictitious) {
1225 m->cleaning = FALSE;
1226 vm_page_remove(m);
1227 /* if object is not pager trusted then */
1228 /* this fictitious page will be removed */
1229 /* as the holding page in vm_pageout_cluster */
1230 if (object->pager_trusted)
1231 vm_page_free(m);
1232 if(vm_page_laundry_count)
1233 vm_page_laundry_count--;
1234 if (vm_page_laundry_count
1235 < vm_page_laundry_min) {
1236 vm_page_laundry_min = 0;
1237 thread_wakeup((event_t)
1238 &vm_page_laundry_count);
1239 }
1240 }
1241 else if ((object->pager_trusted) &&
1242 (copy->type == VM_MAP_COPY_OBJECT)) {
1243 vm_object_paging_end(object);
1244 }
1245
1246 copy_object = copy->cpy_object;
1247
1248 if(copy->type == VM_MAP_COPY_OBJECT) {
1249 p = (vm_page_t) queue_first(&copy_object->memq);
1250
1251 for(i = 0;
1252 i < copy_object->resident_page_count;
1253 i++) {
1254 if(p->offset == (offset + copy_offset))
1255 break;
1256 p = (vm_page_t) queue_next(&p->listq);
1257 }
1258
1259 vm_page_remove(p);
1260 } else {
1261 p = copy->cpy_page_list[j];
1262 copy->cpy_page_list[j] = 0;
1263 p->gobbled = FALSE;
1264 }
1265
1266 vm_page_insert(p, object,
1267 offset + shadow_offset + copy_offset);
1268 p->busy = TRUE;
1269 p->dirty = TRUE;
1270 p->laundry = FALSE;
1271 if (p->pageout) {
1272 p->pageout = FALSE; /*dont throw away target*/
1273 vm_page_unwire(p);/* reactivates */
1274 }
1275 } else if(m->pageout) {
1276 m->pageout = FALSE; /* dont throw away target pages */
1277 vm_page_unwire(m);/* reactivates */
1278 }
1279 }
1280
1281 vm_object_unlock(object);
1282 vm_map_copy_discard(copy);
1283 vm_object_lock(object);
1284
1285 for(offset = 0; offset < size; offset+=page_size) {
1286 m = vm_page_lookup(object,
1287 offset + shadow_offset + copy_offset);
1288 m->dirty = TRUE; /* we'll send the pages home later */
1289 m->busy = FALSE; /* allow system access again */
1290 }
1291
1292 vm_object_unlock(object);
1293 }
1294
1295 /*
1296 * Trusted pager throttle.
1297 * Object must be unlocked, page queues must be unlocked.
1298 */
1299 void
1300 vm_pageout_throttle(
1301 register vm_page_t m)
1302 {
1303 vm_page_lock_queues();
1304 assert(!m->laundry);
1305 m->laundry = TRUE;
1306 while (vm_page_laundry_count >= vm_page_laundry_max) {
1307 /*
1308 * Set the threshold for when vm_page_free()
1309 * should wake us up.
1310 */
1311 vm_page_laundry_min = vm_page_laundry_max/2;
1312 assert_wait((event_t) &vm_page_laundry_count, THREAD_UNINT);
1313 vm_page_unlock_queues();
1314
1315 /*
1316 * Pause to let the default pager catch up.
1317 */
1318 thread_block((void (*)(void)) 0);
1319 vm_page_lock_queues();
1320 }
1321 vm_page_laundry_count++;
1322 vm_page_unlock_queues();
1323 }
1324
1325 /*
1326 * The global variable vm_pageout_clean_active_pages controls whether
1327 * active pages are considered valid to be cleaned in place during a
1328 * clustered pageout. Performance measurements are necessary to determine
1329 * the best policy.
1330 */
1331 int vm_pageout_clean_active_pages = 1;
1332 /*
1333 * vm_pageout_cluster_page: [Internal]
1334 *
1335 * return a vm_page_t to the page at (object,offset) if it is appropriate
1336 * to clean in place. Pages that are non-existent, busy, absent, already
1337 * cleaning, or not dirty are not eligible to be cleaned as an adjacent
1338 * page in a cluster.
1339 *
1340 * The object must be locked on entry, and remains locked throughout
1341 * this call.
1342 */
1343
1344 vm_page_t
1345 vm_pageout_cluster_page(
1346 vm_object_t object,
1347 vm_object_offset_t offset,
1348 boolean_t precious_clean)
1349 {
1350 vm_page_t m;
1351
1352 XPR(XPR_VM_PAGEOUT,
1353 "vm_pageout_cluster_page, object 0x%X offset 0x%X\n",
1354 (integer_t)object, offset, 0, 0, 0);
1355
1356 if ((m = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
1357 return(VM_PAGE_NULL);
1358
1359 if (m->busy || m->absent || m->cleaning ||
1360 m->prep_pin_count != 0 ||
1361 (m->wire_count != 0) || m->error)
1362 return(VM_PAGE_NULL);
1363
1364 if (vm_pageout_clean_active_pages) {
1365 if (!m->active && !m->inactive) return(VM_PAGE_NULL);
1366 } else {
1367 if (!m->inactive) return(VM_PAGE_NULL);
1368 }
1369
1370 assert(!m->private);
1371 assert(!m->fictitious);
1372
1373 if (!m->dirty) m->dirty = pmap_is_modified(m->phys_addr);
1374
1375 if (precious_clean) {
1376 if (!m->precious || !m->dirty)
1377 return(VM_PAGE_NULL);
1378 } else {
1379 if (!m->dirty)
1380 return(VM_PAGE_NULL);
1381 }
1382 return(m);
1383 }
1384
1385 /*
1386 * vm_pageout_scan does the dirty work for the pageout daemon.
1387 * It returns with vm_page_queue_free_lock held and
1388 * vm_page_free_wanted == 0.
1389 */
1390 extern void vm_pageout_scan_continue(void); /* forward; */
1391
1392 void
1393 vm_pageout_scan(void)
1394 {
1395 unsigned int burst_count;
1396 boolean_t now = FALSE;
1397 unsigned int laundry_pages;
1398 boolean_t need_more_inactive_pages;
1399 unsigned int loop_detect;
1400
1401 XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1402
1403 /*???*/ /*
1404 * We want to gradually dribble pages from the active queue
1405 * to the inactive queue. If we let the inactive queue get
1406 * very small, and then suddenly dump many pages into it,
1407 * those pages won't get a sufficient chance to be referenced
1408 * before we start taking them from the inactive queue.
1409 *
1410 * We must limit the rate at which we send pages to the pagers.
1411 * data_write messages consume memory, for message buffers and
1412 * for map-copy objects. If we get too far ahead of the pagers,
1413 * we can potentially run out of memory.
1414 *
1415 * We can use the laundry count to limit directly the number
1416 * of pages outstanding to the default pager. A similar
1417 * strategy for external pagers doesn't work, because
1418 * external pagers don't have to deallocate the pages sent them,
1419 * and because we might have to send pages to external pagers
1420 * even if they aren't processing writes. So we also
1421 * use a burst count to limit writes to external pagers.
1422 *
1423 * When memory is very tight, we can't rely on external pagers to
1424 * clean pages. They probably aren't running, because they
1425 * aren't vm-privileged. If we kept sending dirty pages to them,
1426 * we could exhaust the free list. However, we can't just ignore
1427 * pages belonging to external objects, because there might be no
1428 * pages belonging to internal objects. Hence, we get the page
1429 * into an internal object and then immediately double-page it,
1430 * sending it to the default pager.
1431 *
1432 * consider_zone_gc should be last, because the other operations
1433 * might return memory to zones.
1434 */
1435
1436 Restart:
1437
1438 mutex_lock(&vm_page_queue_free_lock);
1439 now = (vm_page_free_count < vm_page_free_min);
1440 mutex_unlock(&vm_page_queue_free_lock);
1441 #if THREAD_SWAPPER
1442 swapout_threads(now);
1443 #endif /* THREAD_SWAPPER */
1444
1445 stack_collect();
1446 consider_task_collect();
1447 consider_thread_collect();
1448 cleanup_limbo_queue();
1449 consider_zone_gc();
1450 consider_machine_collect();
1451
1452 loop_detect = vm_page_active_count + vm_page_inactive_count;
1453 #if 0
1454 if (vm_page_free_count <= vm_page_free_reserved) {
1455 need_more_inactive_pages = TRUE;
1456 } else {
1457 need_more_inactive_pages = FALSE;
1458 }
1459 #else
1460 need_more_inactive_pages = FALSE;
1461 #endif
1462
1463 for (burst_count = 0;;) {
1464 register vm_page_t m;
1465 register vm_object_t object;
1466 unsigned int free_count;
1467
1468 /*
1469 * Recalculate vm_page_inactivate_target.
1470 */
1471
1472 vm_page_lock_queues();
1473 vm_page_inactive_target =
1474 VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
1475 vm_page_inactive_count);
1476
1477 /*
1478 * Move pages from active to inactive.
1479 */
1480
1481 while ((vm_page_inactive_count < vm_page_inactive_target ||
1482 need_more_inactive_pages) &&
1483 !queue_empty(&vm_page_queue_active)) {
1484 register vm_object_t object;
1485
1486 vm_pageout_active++;
1487 m = (vm_page_t) queue_first(&vm_page_queue_active);
1488
1489 /*
1490 * If we're getting really low on memory,
1491 * try selecting a page that will go
1492 * directly to the default_pager.
1493 * If there are no such pages, we have to
1494 * page out a page backed by an EMM,
1495 * so that the default_pager can recover
1496 * it eventually.
1497 */
1498 if (need_more_inactive_pages &&
1499 (IP_VALID(memory_manager_default))) {
1500 vm_pageout_scan_active_emm_throttle++;
1501 do {
1502 assert(m->active && !m->inactive);
1503 object = m->object;
1504
1505 if (vm_object_lock_try(object)) {
1506 #if 0
1507 if (object->pager_trusted ||
1508 object->internal) {
1509 /* found one ! */
1510 vm_pageout_scan_active_emm_throttle_success++;
1511 goto object_locked_active;
1512 }
1513 #else
1514 vm_pageout_scan_active_emm_throttle_success++;
1515 goto object_locked_active;
1516 #endif
1517 vm_object_unlock(object);
1518 }
1519 m = (vm_page_t) queue_next(&m->pageq);
1520 } while (!queue_end(&vm_page_queue_active,
1521 (queue_entry_t) m));
1522 if (queue_end(&vm_page_queue_active,
1523 (queue_entry_t) m)) {
1524 vm_pageout_scan_active_emm_throttle_failure++;
1525 m = (vm_page_t)
1526 queue_first(&vm_page_queue_active);
1527 }
1528 }
1529
1530 assert(m->active && !m->inactive);
1531
1532 object = m->object;
1533 if (!vm_object_lock_try(object)) {
1534 /*
1535 * Move page to end and continue.
1536 */
1537
1538 queue_remove(&vm_page_queue_active, m,
1539 vm_page_t, pageq);
1540 queue_enter(&vm_page_queue_active, m,
1541 vm_page_t, pageq);
1542 vm_page_unlock_queues();
1543 mutex_pause();
1544 vm_page_lock_queues();
1545 continue;
1546 }
1547
1548 object_locked_active:
1549 /*
1550 * If the page is busy, then we pull it
1551 * off the active queue and leave it alone.
1552 */
1553
1554 if (m->busy) {
1555 vm_object_unlock(object);
1556 queue_remove(&vm_page_queue_active, m,
1557 vm_page_t, pageq);
1558 m->active = FALSE;
1559 if (!m->fictitious)
1560 vm_page_active_count--;
1561 continue;
1562 }
1563
1564 /*
1565 * Deactivate the page while holding the object
1566 * locked, so we know the page is still not busy.
1567 * This should prevent races between pmap_enter
1568 * and pmap_clear_reference. The page might be
1569 * absent or fictitious, but vm_page_deactivate
1570 * can handle that.
1571 */
1572
1573 vm_page_deactivate(m);
1574 vm_object_unlock(object);
1575 }
1576
1577 /*
1578 * We are done if we have met our target *and*
1579 * nobody is still waiting for a page.
1580 */
1581
1582 mutex_lock(&vm_page_queue_free_lock);
1583 free_count = vm_page_free_count;
1584 if ((free_count >= vm_page_free_target) &&
1585 (vm_page_free_wanted == 0)) {
1586 vm_page_unlock_queues();
1587 break;
1588 }
1589 mutex_unlock(&vm_page_queue_free_lock);
1590
1591 /*
1592 * Sometimes we have to pause:
1593 * 1) No inactive pages - nothing to do.
1594 * 2) Flow control - wait for untrusted pagers to catch up.
1595 */
1596
1597 if (queue_empty(&vm_page_queue_inactive) ||
1598 ((--loop_detect) == 0) ||
1599 (burst_count >= vm_pageout_burst_max)) {
1600 unsigned int pages, msecs;
1601 int wait_result;
1602
1603 consider_machine_adjust();
1604 /*
1605 * vm_pageout_burst_wait is msecs/page.
1606 * If there is nothing for us to do, we wait
1607 * at least vm_pageout_empty_wait msecs.
1608 */
1609 pages = burst_count;
1610
1611 if (loop_detect == 0) {
1612 printf("Warning: No physical memory suitable for pageout or reclaim, pageout thread temporarily going to sleep\n");
1613 msecs = vm_free_page_pause;
1614 }
1615 else {
1616 msecs = burst_count * vm_pageout_burst_wait;
1617 }
1618
1619 if (queue_empty(&vm_page_queue_inactive) &&
1620 (msecs < vm_pageout_empty_wait))
1621 msecs = vm_pageout_empty_wait;
1622 vm_page_unlock_queues();
1623 assert_wait_timeout(msecs, THREAD_INTERRUPTIBLE);
1624 counter(c_vm_pageout_scan_block++);
1625
1626 /*
1627 * Unfortunately, we don't have call_continuation
1628 * so we can't rely on tail-recursion.
1629 */
1630 wait_result = thread_block((void (*)(void)) 0);
1631 if (wait_result != THREAD_TIMED_OUT)
1632 thread_cancel_timer();
1633 vm_pageout_scan_continue();
1634 goto Restart;
1635 /*NOTREACHED*/
1636 }
1637
1638 vm_pageout_inactive++;
1639 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
1640
1641 if ((vm_page_free_count <= vm_page_free_reserved) &&
1642 (IP_VALID(memory_manager_default))) {
1643 /*
1644 * We're really low on memory. Try to select a page that
1645 * would go directly to the default_pager.
1646 * If there are no such pages, we have to page out a
1647 * page backed by an EMM, so that the default_pager
1648 * can recover it eventually.
1649 */
1650 vm_pageout_scan_inactive_emm_throttle++;
1651 do {
1652 assert(!m->active && m->inactive);
1653 object = m->object;
1654
1655 if (vm_object_lock_try(object)) {
1656 #if 0
1657 if (object->pager_trusted ||
1658 object->internal) {
1659 /* found one ! */
1660 vm_pageout_scan_inactive_emm_throttle_success++;
1661 goto object_locked_inactive;
1662 }
1663 #else
1664 vm_pageout_scan_inactive_emm_throttle_success++;
1665 goto object_locked_inactive;
1666 #endif /* 0 */
1667 vm_object_unlock(object);
1668 }
1669 m = (vm_page_t) queue_next(&m->pageq);
1670 } while (!queue_end(&vm_page_queue_inactive,
1671 (queue_entry_t) m));
1672 if (queue_end(&vm_page_queue_inactive,
1673 (queue_entry_t) m)) {
1674 vm_pageout_scan_inactive_emm_throttle_failure++;
1675 /*
1676 * We should check the "active" queue
1677 * for good candidates to page out.
1678 */
1679 need_more_inactive_pages = TRUE;
1680
1681 m = (vm_page_t)
1682 queue_first(&vm_page_queue_inactive);
1683 }
1684 }
1685
1686 assert(!m->active && m->inactive);
1687 object = m->object;
1688
1689 /*
1690 * Try to lock object; since we've got the
1691 * page queues lock, we can only try for this one.
1692 */
1693
1694 if (!vm_object_lock_try(object)) {
1695 /*
1696 * Move page to end and continue.
1697 */
1698 queue_remove(&vm_page_queue_inactive, m,
1699 vm_page_t, pageq);
1700 queue_enter(&vm_page_queue_inactive, m,
1701 vm_page_t, pageq);
1702 vm_page_unlock_queues();
1703 mutex_pause();
1704 vm_pageout_inactive_nolock++;
1705 continue;
1706 }
1707
1708 object_locked_inactive:
1709 /*
1710 * Paging out pages of objects which pager is being
1711 * created by another thread must be avoided, because
1712 * this thread may claim for memory, thus leading to a
1713 * possible dead lock between it and the pageout thread
1714 * which will wait for pager creation, if such pages are
1715 * finally chosen. The remaining assumption is that there
1716 * will finally be enough available pages in the inactive
1717 * pool to page out in order to satisfy all memory claimed
1718 * by the thread which concurrently creates the pager.
1719 */
1720
1721 if (!object->pager_initialized && object->pager_created) {
1722 /*
1723 * Move page to end and continue, hoping that
1724 * there will be enough other inactive pages to
1725 * page out so that the thread which currently
1726 * initializes the pager will succeed.
1727 */
1728 queue_remove(&vm_page_queue_inactive, m,
1729 vm_page_t, pageq);
1730 queue_enter(&vm_page_queue_inactive, m,
1731 vm_page_t, pageq);
1732 vm_page_unlock_queues();
1733 vm_object_unlock(object);
1734 vm_pageout_inactive_avoid++;
1735 continue;
1736 }
1737
1738 /*
1739 * Remove the page from the inactive list.
1740 */
1741
1742 queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
1743 m->inactive = FALSE;
1744 if (!m->fictitious)
1745 vm_page_inactive_count--;
1746
1747 if (m->busy || !object->alive) {
1748 /*
1749 * Somebody is already playing with this page.
1750 * Leave it off the pageout queues.
1751 */
1752
1753 vm_page_unlock_queues();
1754 vm_object_unlock(object);
1755 vm_pageout_inactive_busy++;
1756 continue;
1757 }
1758
1759 /*
1760 * If it's absent or in error, we can reclaim the page.
1761 */
1762
1763 if (m->absent || m->error) {
1764 vm_pageout_inactive_absent++;
1765 reclaim_page:
1766 vm_page_free(m);
1767 vm_page_unlock_queues();
1768 vm_object_unlock(object);
1769 continue;
1770 }
1771
1772 assert(!m->private);
1773 assert(!m->fictitious);
1774
1775 /*
1776 * If already cleaning this page in place, convert from
1777 * "adjacent" to "target". We can leave the page mapped,
1778 * and vm_pageout_object_terminate will determine whether
1779 * to free or reactivate.
1780 */
1781
1782 if (m->cleaning) {
1783 #if MACH_CLUSTER_STATS
1784 vm_pageout_cluster_conversions++;
1785 #endif
1786 if (m->prep_pin_count == 0) {
1787 m->busy = TRUE;
1788 m->pageout = TRUE;
1789 vm_page_wire(m);
1790 }
1791 vm_object_unlock(object);
1792 vm_page_unlock_queues();
1793 continue;
1794 }
1795
1796 /*
1797 * If it's being used, reactivate.
1798 * (Fictitious pages are either busy or absent.)
1799 */
1800
1801 if (m->reference || pmap_is_referenced(m->phys_addr)) {
1802 vm_pageout_inactive_used++;
1803 reactivate_page:
1804 #if ADVISORY_PAGEOUT
1805 if (m->discard_request) {
1806 m->discard_request = FALSE;
1807 }
1808 #endif /* ADVISORY_PAGEOUT */
1809 vm_object_unlock(object);
1810 vm_page_activate(m);
1811 VM_STAT(reactivations++);
1812 vm_page_unlock_queues();
1813 continue;
1814 }
1815
1816 if (m->prep_pin_count != 0) {
1817 boolean_t pinned = FALSE;
1818
1819 vm_page_pin_lock();
1820 if (m->pin_count != 0) {
1821 /* skip and reactivate pinned page */
1822 pinned = TRUE;
1823 vm_pageout_inactive_pinned++;
1824 } else {
1825 /* page is prepped; send it into limbo */
1826 m->limbo = TRUE;
1827 vm_pageout_inactive_limbo++;
1828 }
1829 vm_page_pin_unlock();
1830 if (pinned)
1831 goto reactivate_page;
1832 }
1833
1834 #if ADVISORY_PAGEOUT
1835 if (object->advisory_pageout) {
1836 boolean_t do_throttle;
1837 ipc_port_t port;
1838 vm_object_offset_t discard_offset;
1839
1840 if (m->discard_request) {
1841 vm_stat_discard_failure++;
1842 goto mandatory_pageout;
1843 }
1844
1845 assert(object->pager_initialized);
1846 m->discard_request = TRUE;
1847 port = object->pager;
1848
1849 /* system-wide throttle */
1850 do_throttle = (vm_page_free_count <=
1851 vm_page_free_reserved);
1852 if (!do_throttle) {
1853 /* throttle on this pager */
1854 /* XXX lock ordering ? */
1855 ip_lock(port);
1856 do_throttle= imq_full(&port->ip_messages);
1857 ip_unlock(port);
1858 }
1859 if (do_throttle) {
1860 vm_stat_discard_throttle++;
1861 #if 0
1862 /* ignore this page and skip to next */
1863 vm_page_unlock_queues();
1864 vm_object_unlock(object);
1865 continue;
1866 #else
1867 /* force mandatory pageout */
1868 goto mandatory_pageout;
1869 #endif
1870 }
1871
1872 /* proceed with discard_request */
1873 vm_page_activate(m);
1874 vm_stat_discard++;
1875 VM_STAT(reactivations++);
1876 discard_offset = m->offset + object->paging_offset;
1877 vm_stat_discard_sent++;
1878 vm_page_unlock_queues();
1879 vm_object_unlock(object);
1880 /*
1881 memory_object_discard_request(object->pager,
1882 object->pager_request,
1883 discard_offset,
1884 PAGE_SIZE);
1885 */
1886 continue;
1887 }
1888 mandatory_pageout:
1889 #endif /* ADVISORY_PAGEOUT */
1890
1891 XPR(XPR_VM_PAGEOUT,
1892 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
1893 (integer_t)object, (integer_t)m->offset, (integer_t)m, 0,0);
1894
1895 /*
1896 * Eliminate all mappings.
1897 */
1898
1899 m->busy = TRUE;
1900 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
1901 if (!m->dirty)
1902 m->dirty = pmap_is_modified(m->phys_addr);
1903
1904 /*
1905 * If it's clean and not precious, we can free the page.
1906 */
1907
1908 if (!m->dirty && !m->precious) {
1909 vm_pageout_inactive_clean++;
1910 goto reclaim_page;
1911 }
1912 vm_page_unlock_queues();
1913
1914 /*
1915 * If there is no memory object for the page, create
1916 * one and hand it to the default pager.
1917 */
1918
1919 if (!object->pager_initialized)
1920 vm_object_collapse(object);
1921 if (!object->pager_initialized)
1922 vm_object_pager_create(object);
1923 if (!object->pager_initialized) {
1924 /*
1925 * Still no pager for the object.
1926 * Reactivate the page.
1927 *
1928 * Should only happen if there is no
1929 * default pager.
1930 */
1931 vm_page_lock_queues();
1932 vm_page_activate(m);
1933 vm_page_unlock_queues();
1934
1935 /*
1936 * And we are done with it.
1937 */
1938 PAGE_WAKEUP_DONE(m);
1939 vm_object_unlock(object);
1940
1941 /*
1942 * break here to get back to the preemption
1943 * point in the outer loop so that we don't
1944 * spin forever if there is no default pager.
1945 */
1946 vm_pageout_dirty_no_pager++;
1947 /*
1948 * Well there's no pager, but we can still reclaim
1949 * free pages out of the inactive list. Go back
1950 * to top of loop and look for suitable pages.
1951 */
1952 continue;
1953 }
1954
1955 if (object->pager_initialized && object->pager == IP_NULL) {
1956 /*
1957 * This pager has been destroyed by either
1958 * memory_object_destroy or vm_object_destroy, and
1959 * so there is nowhere for the page to go.
1960 * Just free the page.
1961 */
1962 VM_PAGE_FREE(m);
1963 vm_object_unlock(object);
1964 continue;
1965 }
1966
1967 vm_pageout_inactive_dirty++;
1968 /*
1969 if (!object->internal)
1970 burst_count++;
1971 */
1972 vm_object_paging_begin(object);
1973 vm_object_unlock(object);
1974 vm_pageout_cluster(m); /* flush it */
1975 }
1976 consider_machine_adjust();
1977 }
1978
1979 counter(unsigned int c_vm_pageout_scan_continue = 0;)
1980
1981 void
1982 vm_pageout_scan_continue(void)
1983 {
1984 /*
1985 * We just paused to let the pagers catch up.
1986 * If vm_page_laundry_count is still high,
1987 * then we aren't waiting long enough.
1988 * If we have paused some vm_pageout_pause_max times without
1989 * adjusting vm_pageout_burst_wait, it might be too big,
1990 * so we decrease it.
1991 */
1992
1993 vm_page_lock_queues();
1994 counter(++c_vm_pageout_scan_continue);
1995 if (vm_page_laundry_count > vm_pageout_burst_min) {
1996 vm_pageout_burst_wait++;
1997 vm_pageout_pause_count = 0;
1998 } else if (++vm_pageout_pause_count > vm_pageout_pause_max) {
1999 vm_pageout_burst_wait = (vm_pageout_burst_wait * 3) / 4;
2000 if (vm_pageout_burst_wait < 1)
2001 vm_pageout_burst_wait = 1;
2002 vm_pageout_pause_count = 0;
2003 }
2004 vm_page_unlock_queues();
2005 }
2006
2007 void vm_page_free_reserve(int pages);
2008 int vm_page_free_count_init;
2009
2010 void
2011 vm_page_free_reserve(
2012 int pages)
2013 {
2014 int free_after_reserve;
2015
2016 vm_page_free_reserved += pages;
2017
2018 free_after_reserve = vm_page_free_count_init - vm_page_free_reserved;
2019
2020 vm_page_free_min = vm_page_free_reserved +
2021 VM_PAGE_FREE_MIN(free_after_reserve);
2022
2023 vm_page_free_target = vm_page_free_reserved +
2024 VM_PAGE_FREE_TARGET(free_after_reserve);
2025
2026 if (vm_page_free_target < vm_page_free_min + 5)
2027 vm_page_free_target = vm_page_free_min + 5;
2028 }
2029
2030 /*
2031 * vm_pageout is the high level pageout daemon.
2032 */
2033
2034
2035 void
2036 vm_pageout(void)
2037 {
2038 thread_t self = current_thread();
2039
2040 /*
2041 * Set thread privileges.
2042 */
2043 self->vm_privilege = TRUE;
2044 stack_privilege(self);
2045 thread_swappable(current_act(), FALSE);
2046
2047 /*
2048 * Initialize some paging parameters.
2049 */
2050
2051 if (vm_page_laundry_max == 0)
2052 vm_page_laundry_max = VM_PAGE_LAUNDRY_MAX;
2053
2054 if (vm_pageout_burst_max == 0)
2055 vm_pageout_burst_max = VM_PAGEOUT_BURST_MAX;
2056
2057 if (vm_pageout_burst_wait == 0)
2058 vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
2059
2060 if (vm_pageout_empty_wait == 0)
2061 vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
2062
2063 vm_page_free_count_init = vm_page_free_count;
2064 /*
2065 * even if we've already called vm_page_free_reserve
2066 * call it again here to insure that the targets are
2067 * accurately calculated (it uses vm_page_free_count_init)
2068 * calling it with an arg of 0 will not change the reserve
2069 * but will re-calculate free_min and free_target
2070 */
2071 if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED)
2072 vm_page_free_reserve(VM_PAGE_FREE_RESERVED - vm_page_free_reserved);
2073 else
2074 vm_page_free_reserve(0);
2075
2076 /*
2077 * vm_pageout_scan will set vm_page_inactive_target.
2078 *
2079 * The pageout daemon is never done, so loop forever.
2080 * We should call vm_pageout_scan at least once each
2081 * time we are woken, even if vm_page_free_wanted is
2082 * zero, to check vm_page_free_target and
2083 * vm_page_inactive_target.
2084 */
2085 for (;;) {
2086 vm_pageout_scan();
2087 /* we hold vm_page_queue_free_lock now */
2088 assert(vm_page_free_wanted == 0);
2089 assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
2090 mutex_unlock(&vm_page_queue_free_lock);
2091 counter(c_vm_pageout_block++);
2092 thread_block((void (*)(void)) 0);
2093 }
2094 /*NOTREACHED*/
2095 }
2096
2097
2098 void
2099 upl_dealloc(
2100 upl_t upl)
2101 {
2102 upl->ref_count -= 1;
2103 if(upl->ref_count == 0) {
2104 upl_destroy(upl);
2105 }
2106 }
2107
2108
2109 /*
2110 * Routine: vm_fault_list_request
2111 * Purpose:
2112 * Cause the population of a portion of a vm_object.
2113 * Depending on the nature of the request, the pages
2114 * returned may be contain valid data or be uninitialized.
2115 * A page list structure, listing the physical pages
2116 * will be returned upon request.
2117 * This function is called by the file system or any other
2118 * supplier of backing store to a pager.
2119 * IMPORTANT NOTE: The caller must still respect the relationship
2120 * between the vm_object and its backing memory object. The
2121 * caller MUST NOT substitute changes in the backing file
2122 * without first doing a memory_object_lock_request on the
2123 * target range unless it is know that the pages are not
2124 * shared with another entity at the pager level.
2125 * Copy_in_to:
2126 * if a page list structure is present
2127 * return the mapped physical pages, where a
2128 * page is not present, return a non-initialized
2129 * one. If the no_sync bit is turned on, don't
2130 * call the pager unlock to synchronize with other
2131 * possible copies of the page. Leave pages busy
2132 * in the original object, if a page list structure
2133 * was specified. When a commit of the page list
2134 * pages is done, the dirty bit will be set for each one.
2135 * Copy_out_from:
2136 * If a page list structure is present, return
2137 * all mapped pages. Where a page does not exist
2138 * map a zero filled one. Leave pages busy in
2139 * the original object. If a page list structure
2140 * is not specified, this call is a no-op.
2141 *
2142 * Note: access of default pager objects has a rather interesting
2143 * twist. The caller of this routine, presumably the file system
2144 * page cache handling code, will never actually make a request
2145 * against a default pager backed object. Only the default
2146 * pager will make requests on backing store related vm_objects
2147 * In this way the default pager can maintain the relationship
2148 * between backing store files (abstract memory objects) and
2149 * the vm_objects (cache objects), they support.
2150 *
2151 */
2152 kern_return_t
2153 vm_fault_list_request(
2154 vm_object_t object,
2155 vm_object_offset_t offset,
2156 vm_size_t size,
2157 upl_t *upl_ptr,
2158 upl_page_info_t **user_page_list_ptr,
2159 int page_list_count,
2160 int cntrl_flags)
2161 {
2162 vm_page_t dst_page;
2163 vm_object_offset_t dst_offset = offset;
2164 upl_page_info_t *user_page_list;
2165 vm_size_t xfer_size = size;
2166 boolean_t do_m_lock = FALSE;
2167 boolean_t dirty;
2168 upl_t upl = NULL;
2169 int entry;
2170 boolean_t encountered_lrp = FALSE;
2171
2172 vm_page_t alias_page = NULL;
2173
2174 if(cntrl_flags & UPL_SET_INTERNAL)
2175 page_list_count = MAX_UPL_TRANSFER;
2176 if(((user_page_list_ptr || (cntrl_flags & UPL_SET_INTERNAL)) &&
2177 !(object->private)) && (page_list_count < (size/page_size)))
2178 return KERN_INVALID_ARGUMENT;
2179
2180 if((!object->internal) && (object->paging_offset != 0))
2181 panic("vm_fault_list_request: vnode object with non-zero paging offset\n");
2182
2183 if((cntrl_flags & UPL_COPYOUT_FROM) && (upl_ptr == NULL)) {
2184 return KERN_SUCCESS;
2185 }
2186 if(upl_ptr) {
2187 if((cntrl_flags & UPL_SET_INTERNAL) && !(object->private)) {
2188 upl = upl_create(TRUE);
2189 user_page_list = (upl_page_info_t *)
2190 (((vm_offset_t)upl) + sizeof(struct upl));
2191 if(user_page_list_ptr)
2192 *user_page_list_ptr = user_page_list;
2193 upl->flags |= UPL_INTERNAL;
2194 } else {
2195 upl = upl_create(FALSE);
2196 if(user_page_list_ptr)
2197 user_page_list = *user_page_list_ptr;
2198 else
2199 user_page_list = NULL;
2200 if(object->private) {
2201 upl->size = size;
2202 upl->offset = offset;
2203 *upl_ptr = upl;
2204 if(user_page_list) {
2205 user_page_list[0].phys_addr = offset;
2206 user_page_list[0].device = TRUE;
2207 }
2208 upl->flags = UPL_DEVICE_MEMORY;
2209 return KERN_SUCCESS;
2210 }
2211
2212
2213 }
2214 upl->map_object = vm_object_allocate(size);
2215 vm_object_lock(upl->map_object);
2216 upl->map_object->shadow = object;
2217 upl->size = size;
2218 upl->offset = offset + object->paging_offset;
2219 upl->map_object->pageout = TRUE;
2220 upl->map_object->can_persist = FALSE;
2221 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
2222 upl->map_object->shadow_offset = offset;
2223 vm_object_unlock(upl->map_object);
2224 *upl_ptr = upl;
2225 }
2226 VM_PAGE_GRAB_FICTITIOUS(alias_page);
2227 vm_object_lock(object);
2228 #ifdef UBC_DEBUG
2229 if(upl_ptr)
2230 queue_enter(&object->uplq, upl, upl_t, uplq);
2231 #endif /* UBC_DEBUG */
2232 vm_object_paging_begin(object);
2233 entry = 0;
2234 if(cntrl_flags & UPL_COPYOUT_FROM) {
2235 upl->flags |= UPL_PAGE_SYNC_DONE;
2236 while (xfer_size) {
2237 if(alias_page == NULL) {
2238 vm_object_unlock(object);
2239 VM_PAGE_GRAB_FICTITIOUS(alias_page);
2240 vm_object_lock(object);
2241 }
2242 if(((dst_page = vm_page_lookup(object,
2243 dst_offset)) == VM_PAGE_NULL) ||
2244 dst_page->fictitious ||
2245 dst_page->absent ||
2246 dst_page->error ||
2247 (dst_page->wire_count != 0 &&
2248 !dst_page->pageout) ||
2249 ((!(dst_page->dirty || dst_page->precious ||
2250 pmap_is_modified(dst_page->phys_addr)))
2251 && (cntrl_flags & UPL_RET_ONLY_DIRTY))) {
2252 if(user_page_list)
2253 user_page_list[entry].phys_addr = 0;
2254 } else {
2255
2256 if(dst_page->busy &&
2257 (!(dst_page->list_req_pending &&
2258 dst_page->pageout))) {
2259 if(cntrl_flags & UPL_NOBLOCK) {
2260 if(user_page_list)
2261 user_page_list[entry]
2262 .phys_addr = 0;
2263 entry++;
2264 dst_offset += PAGE_SIZE_64;
2265 xfer_size -= PAGE_SIZE;
2266 continue;
2267 }
2268 /*someone else is playing with the */
2269 /* page. We will have to wait. */
2270 PAGE_ASSERT_WAIT(
2271 dst_page, THREAD_UNINT);
2272 vm_object_unlock(object);
2273 thread_block((void(*)(void))0);
2274 vm_object_lock(object);
2275 continue;
2276 }
2277 /* Someone else already cleaning the page? */
2278 if((dst_page->cleaning || dst_page->absent ||
2279 dst_page->prep_pin_count != 0 ||
2280 dst_page->wire_count != 0) &&
2281 !dst_page->list_req_pending) {
2282 if(user_page_list)
2283 user_page_list[entry].phys_addr = 0;
2284 entry++;
2285 dst_offset += PAGE_SIZE_64;
2286 xfer_size -= PAGE_SIZE;
2287 continue;
2288 }
2289 /* eliminate all mappings from the */
2290 /* original object and its prodigy */
2291
2292 vm_page_lock_queues();
2293 pmap_page_protect(dst_page->phys_addr,
2294 VM_PROT_NONE);
2295
2296 /* pageout statistics gathering. count */
2297 /* all the pages we will page out that */
2298 /* were not counted in the initial */
2299 /* vm_pageout_scan work */
2300 if(dst_page->list_req_pending)
2301 encountered_lrp = TRUE;
2302 if((dst_page->dirty ||
2303 (dst_page->object->internal &&
2304 dst_page->precious)) &&
2305 (dst_page->list_req_pending
2306 == FALSE)) {
2307 if(encountered_lrp) {
2308 CLUSTER_STAT
2309 (pages_at_higher_offsets++;)
2310 } else {
2311 CLUSTER_STAT
2312 (pages_at_lower_offsets++;)
2313 }
2314 }
2315
2316 /* Turn off busy indication on pending */
2317 /* pageout. Note: we can only get here */
2318 /* in the request pending case. */
2319 dst_page->list_req_pending = FALSE;
2320 dst_page->busy = FALSE;
2321 dst_page->cleaning = FALSE;
2322
2323 dirty = pmap_is_modified(dst_page->phys_addr);
2324 dirty = dirty ? TRUE : dst_page->dirty;
2325
2326 /* use pageclean setup, it is more convenient */
2327 /* even for the pageout cases here */
2328 vm_pageclean_setup(dst_page, alias_page,
2329 upl->map_object, size - xfer_size);
2330
2331 if(!dirty) {
2332 dst_page->dirty = FALSE;
2333 dst_page->precious = TRUE;
2334 }
2335
2336 if(dst_page->pageout)
2337 dst_page->busy = TRUE;
2338
2339 alias_page->absent = FALSE;
2340 alias_page = NULL;
2341 if(!(cntrl_flags & UPL_CLEAN_IN_PLACE)) {
2342 /* deny access to the target page */
2343 /* while it is being worked on */
2344 if((!dst_page->pageout) &&
2345 (dst_page->wire_count == 0)) {
2346 dst_page->busy = TRUE;
2347 dst_page->pageout = TRUE;
2348 vm_page_wire(dst_page);
2349 }
2350 }
2351 if(user_page_list) {
2352 user_page_list[entry].phys_addr
2353 = dst_page->phys_addr;
2354 user_page_list[entry].dirty =
2355 dst_page->dirty;
2356 user_page_list[entry].pageout =
2357 dst_page->pageout;
2358 user_page_list[entry].absent =
2359 dst_page->absent;
2360 user_page_list[entry].precious =
2361 dst_page->precious;
2362 }
2363
2364 vm_page_unlock_queues();
2365 }
2366 entry++;
2367 dst_offset += PAGE_SIZE_64;
2368 xfer_size -= PAGE_SIZE;
2369 }
2370 } else {
2371 while (xfer_size) {
2372 if(alias_page == NULL) {
2373 vm_object_unlock(object);
2374 VM_PAGE_GRAB_FICTITIOUS(alias_page);
2375 vm_object_lock(object);
2376 }
2377 dst_page = vm_page_lookup(object, dst_offset);
2378 if(dst_page != VM_PAGE_NULL) {
2379 if((dst_page->cleaning) &&
2380 !(dst_page->list_req_pending)) {
2381 /*someone else is writing to the */
2382 /* page. We will have to wait. */
2383 PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT);
2384 vm_object_unlock(object);
2385 thread_block((void(*)(void))0);
2386 vm_object_lock(object);
2387 continue;
2388 }
2389 if ((dst_page->fictitious &&
2390 dst_page->list_req_pending)) {
2391 /* dump the fictitious page */
2392 dst_page->list_req_pending = FALSE;
2393 dst_page->clustered = FALSE;
2394 vm_page_lock_queues();
2395 vm_page_free(dst_page);
2396 vm_page_unlock_queues();
2397 } else if ((dst_page->absent &&
2398 dst_page->list_req_pending)) {
2399 /* the default_pager case */
2400 dst_page->list_req_pending = FALSE;
2401 dst_page->busy = FALSE;
2402 dst_page->clustered = FALSE;
2403 }
2404 }
2405 if((dst_page = vm_page_lookup(
2406 object, dst_offset)) == VM_PAGE_NULL) {
2407 /* need to allocate a page */
2408 dst_page = vm_page_alloc(object, dst_offset);
2409 if (dst_page == VM_PAGE_NULL) {
2410 vm_object_unlock(object);
2411 VM_PAGE_WAIT();
2412 vm_object_lock(object);
2413 continue;
2414 }
2415 dst_page->busy = FALSE;
2416 #if 0
2417 if(cntrl_flags & UPL_NO_SYNC) {
2418 dst_page->page_lock = 0;
2419 dst_page->unlock_request = 0;
2420 }
2421 #endif
2422 dst_page->absent = TRUE;
2423 object->absent_count++;
2424 }
2425 #if 1
2426 if(cntrl_flags & UPL_NO_SYNC) {
2427 dst_page->page_lock = 0;
2428 dst_page->unlock_request = 0;
2429 }
2430 #endif /* 1 */
2431 dst_page->overwriting = TRUE;
2432 if(dst_page->fictitious) {
2433 panic("need corner case for fictitious page");
2434 }
2435 if(dst_page->page_lock) {
2436 do_m_lock = TRUE;
2437 }
2438 if(upl_ptr) {
2439
2440 /* eliminate all mappings from the */
2441 /* original object and its prodigy */
2442
2443 if(dst_page->busy) {
2444 /*someone else is playing with the */
2445 /* page. We will have to wait. */
2446 PAGE_ASSERT_WAIT(
2447 dst_page, THREAD_UNINT);
2448 vm_object_unlock(object);
2449 thread_block((void(*)(void))0);
2450 vm_object_lock(object);
2451 continue;
2452 }
2453
2454 vm_page_lock_queues();
2455 pmap_page_protect(dst_page->phys_addr,
2456 VM_PROT_NONE);
2457 dirty = pmap_is_modified(dst_page->phys_addr);
2458 dirty = dirty ? TRUE : dst_page->dirty;
2459
2460 vm_pageclean_setup(dst_page, alias_page,
2461 upl->map_object, size - xfer_size);
2462
2463 if(cntrl_flags & UPL_CLEAN_IN_PLACE) {
2464 /* clean in place for read implies */
2465 /* that a write will be done on all */
2466 /* the pages that are dirty before */
2467 /* a upl commit is done. The caller */
2468 /* is obligated to preserve the */
2469 /* contents of all pages marked */
2470 /* dirty. */
2471 upl->flags |= UPL_CLEAR_DIRTY;
2472 }
2473
2474 if(!dirty) {
2475 dst_page->dirty = FALSE;
2476 dst_page->precious = TRUE;
2477 }
2478
2479 if (dst_page->wire_count == 0) {
2480 /* deny access to the target page while */
2481 /* it is being worked on */
2482 dst_page->busy = TRUE;
2483 } else {
2484 vm_page_wire(dst_page);
2485 }
2486 /* expect the page to be used */
2487 dst_page->reference = TRUE;
2488 dst_page->precious =
2489 (cntrl_flags & UPL_PRECIOUS)
2490 ? TRUE : FALSE;
2491 alias_page->absent = FALSE;
2492 alias_page = NULL;
2493 if(user_page_list) {
2494 user_page_list[entry].phys_addr
2495 = dst_page->phys_addr;
2496 user_page_list[entry].dirty =
2497 dst_page->dirty;
2498 user_page_list[entry].pageout =
2499 dst_page->pageout;
2500 user_page_list[entry].absent =
2501 dst_page->absent;
2502 user_page_list[entry].precious =
2503 dst_page->precious;
2504 }
2505 vm_page_unlock_queues();
2506 }
2507 entry++;
2508 dst_offset += PAGE_SIZE_64;
2509 xfer_size -= PAGE_SIZE;
2510 }
2511 }
2512 if(alias_page != NULL) {
2513 vm_page_lock_queues();
2514 vm_page_free(alias_page);
2515 vm_page_unlock_queues();
2516 }
2517 if(do_m_lock) {
2518 vm_prot_t access_required;
2519 /* call back all associated pages from other users of the pager */
2520 /* all future updates will be on data which is based on the */
2521 /* changes we are going to make here. Note: it is assumed that */
2522 /* we already hold copies of the data so we will not be seeing */
2523 /* an avalanche of incoming data from the pager */
2524 access_required = (cntrl_flags & UPL_COPYOUT_FROM)
2525 ? VM_PROT_READ : VM_PROT_WRITE;
2526 while (TRUE) {
2527 kern_return_t rc;
2528 thread_t thread;
2529
2530 if(!object->pager_ready) {
2531 thread = current_thread();
2532 vm_object_assert_wait(object,
2533 VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
2534 vm_object_unlock(object);
2535 thread_block((void (*)(void))0);
2536 if (thread->wait_result != THREAD_AWAKENED) {
2537 return(KERN_FAILURE);
2538 }
2539 vm_object_lock(object);
2540 continue;
2541 }
2542
2543 vm_object_unlock(object);
2544
2545 if (rc = memory_object_data_unlock(
2546 object->pager,
2547 object->pager_request,
2548 dst_offset + object->paging_offset,
2549 size,
2550 access_required)) {
2551 if (rc == MACH_SEND_INTERRUPTED)
2552 continue;
2553 else
2554 return KERN_FAILURE;
2555 }
2556 break;
2557
2558 }
2559 /* lets wait on the last page requested */
2560 /* NOTE: we will have to update lock completed routine to signal */
2561 if(dst_page != VM_PAGE_NULL &&
2562 (access_required & dst_page->page_lock) != access_required) {
2563 PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT);
2564 thread_block((void (*)(void))0);
2565 vm_object_lock(object);
2566 }
2567 }
2568 vm_object_unlock(object);
2569 return KERN_SUCCESS;
2570 }
2571
2572
2573 kern_return_t
2574 upl_system_list_request(
2575 vm_object_t object,
2576 vm_object_offset_t offset,
2577 vm_size_t size,
2578 vm_size_t super_cluster,
2579 upl_t *upl,
2580 upl_page_info_t **user_page_list_ptr,
2581 int page_list_count,
2582 int cntrl_flags)
2583 {
2584 if(object->paging_offset > offset)
2585 return KERN_FAILURE;
2586 offset = offset - object->paging_offset;
2587
2588 /* turns off super cluster exercised by the default_pager */
2589 /*
2590 super_cluster = size;
2591 */
2592 if ((super_cluster > size) &&
2593 (vm_page_free_count > vm_page_free_reserved)) {
2594
2595 vm_object_offset_t base_offset;
2596 vm_size_t super_size;
2597
2598 base_offset = (offset &
2599 ~((vm_object_offset_t) super_cluster - 1));
2600 super_size = (offset+size) > (base_offset + super_cluster) ?
2601 super_cluster<<1 : super_cluster;
2602 super_size = ((base_offset + super_size) > object->size) ?
2603 (object->size - base_offset) : super_size;
2604 if(offset > (base_offset + super_size))
2605 panic("upl_system_list_request: Missed target pageout 0x%x,0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", offset, base_offset, super_size, super_cluster, size, object->paging_offset);
2606 /* apparently there is a case where the vm requests a */
2607 /* page to be written out who's offset is beyond the */
2608 /* object size */
2609 if((offset + size) > (base_offset + super_size))
2610 super_size = (offset + size) - base_offset;
2611
2612 offset = base_offset;
2613 size = super_size;
2614 }
2615 vm_fault_list_request(object, offset, size, upl, user_page_list_ptr,
2616 page_list_count, cntrl_flags);
2617 }
2618
2619
2620 kern_return_t
2621 uc_upl_map(
2622 vm_map_t map,
2623 upl_t upl,
2624 vm_offset_t *dst_addr)
2625 {
2626 vm_size_t size;
2627 vm_object_offset_t offset;
2628 vm_offset_t addr;
2629 vm_page_t m;
2630 kern_return_t kr;
2631
2632 /* check to see if already mapped */
2633 if(UPL_PAGE_LIST_MAPPED & upl->flags)
2634 return KERN_FAILURE;
2635
2636 offset = 0; /* Always map the entire object */
2637 size = upl->size;
2638
2639 vm_object_lock(upl->map_object);
2640 upl->map_object->ref_count++;
2641 vm_object_res_reference(upl->map_object);
2642 vm_object_unlock(upl->map_object);
2643
2644 *dst_addr = 0;
2645
2646
2647 /* NEED A UPL_MAP ALIAS */
2648 kr = vm_map_enter(map, dst_addr, size, (vm_offset_t) 0, TRUE,
2649 upl->map_object, offset, FALSE,
2650 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
2651
2652 if (kr != KERN_SUCCESS)
2653 return(kr);
2654
2655 for(addr=*dst_addr; size > 0; size-=PAGE_SIZE,addr+=PAGE_SIZE) {
2656 m = vm_page_lookup(upl->map_object, offset);
2657 if(m) {
2658 PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, TRUE);
2659 }
2660 offset+=PAGE_SIZE_64;
2661 }
2662
2663 upl->flags |= UPL_PAGE_LIST_MAPPED;
2664 upl->kaddr = *dst_addr;
2665 return KERN_SUCCESS;
2666 }
2667
2668
2669 kern_return_t
2670 uc_upl_un_map(
2671 vm_map_t map,
2672 upl_t upl)
2673 {
2674 vm_size_t size;
2675
2676 if(upl->flags & UPL_PAGE_LIST_MAPPED) {
2677 size = upl->size;
2678 vm_deallocate(map, upl->kaddr, size);
2679 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
2680 upl->kaddr = (vm_offset_t) 0;
2681 return KERN_SUCCESS;
2682 } else {
2683 return KERN_FAILURE;
2684 }
2685 }
2686
2687 kern_return_t
2688 uc_upl_commit_range(
2689 upl_t upl,
2690 vm_offset_t offset,
2691 vm_size_t size,
2692 int flags,
2693 upl_page_info_t *page_list)
2694 {
2695 vm_size_t xfer_size = size;
2696 vm_object_t shadow_object = upl->map_object->shadow;
2697 vm_object_t object = upl->map_object;
2698 vm_object_offset_t target_offset;
2699 vm_object_offset_t page_offset;
2700 int entry;
2701
2702 if(upl->flags & UPL_DEVICE_MEMORY) {
2703 xfer_size = 0;
2704 } else if ((offset + size) > upl->size) {
2705 return KERN_FAILURE;
2706 }
2707
2708 vm_object_lock(shadow_object);
2709
2710 entry = offset/PAGE_SIZE;
2711 target_offset = (vm_object_offset_t)offset;
2712 while(xfer_size) {
2713 vm_page_t t,m;
2714 upl_page_info_t *p;
2715
2716 if((t = vm_page_lookup(object, target_offset)) != NULL) {
2717
2718 t->pageout = FALSE;
2719 page_offset = t->offset;
2720 VM_PAGE_FREE(t);
2721 t = VM_PAGE_NULL;
2722 m = vm_page_lookup(shadow_object,
2723 page_offset + object->shadow_offset);
2724 if(m != VM_PAGE_NULL) {
2725 vm_object_paging_end(shadow_object);
2726 vm_page_lock_queues();
2727 if ((upl->flags & UPL_CLEAR_DIRTY) ||
2728 (flags & UPL_COMMIT_CLEAR_DIRTY)) {
2729 pmap_clear_modify(m->phys_addr);
2730 m->dirty = FALSE;
2731 }
2732 if(page_list) {
2733 p = &(page_list[entry]);
2734 if(p->phys_addr && p->pageout && !m->pageout) {
2735 m->busy = TRUE;
2736 m->pageout = TRUE;
2737 vm_page_wire(m);
2738 } else if (page_list[entry].phys_addr &&
2739 !p->pageout && m->pageout) {
2740 m->pageout = FALSE;
2741 m->absent = FALSE;
2742 m->overwriting = FALSE;
2743 vm_page_unwire(m);
2744 PAGE_WAKEUP_DONE(m);
2745 }
2746 page_list[entry].phys_addr = 0;
2747 }
2748 if(m->laundry) {
2749 vm_page_laundry_count--;
2750 m->laundry = FALSE;
2751 if (vm_page_laundry_count < vm_page_laundry_min) {
2752 vm_page_laundry_min = 0;
2753 thread_wakeup((event_t)
2754 &vm_page_laundry_count);
2755 }
2756 }
2757 if(m->pageout) {
2758 m->cleaning = FALSE;
2759 m->pageout = FALSE;
2760 #if MACH_CLUSTER_STATS
2761 if (m->wanted) vm_pageout_target_collisions++;
2762 #endif
2763 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
2764 m->dirty = pmap_is_modified(m->phys_addr);
2765 if(m->dirty) {
2766 CLUSTER_STAT(
2767 vm_pageout_target_page_dirtied++;)
2768 vm_page_unwire(m);/* reactivates */
2769 VM_STAT(reactivations++);
2770 PAGE_WAKEUP_DONE(m);
2771 } else if (m->prep_pin_count != 0) {
2772 vm_page_pin_lock();
2773 if (m->pin_count != 0) {
2774 /* page is pinned; reactivate */
2775 CLUSTER_STAT(
2776 vm_pageout_target_page_pinned++;)
2777 vm_page_unwire(m);/* reactivates */
2778 VM_STAT(reactivations++);
2779 PAGE_WAKEUP_DONE(m);
2780 } else {
2781 /*
2782 * page is prepped but not pinned;
2783 * send it into limbo. Note that
2784 * vm_page_free (which will be
2785 * called after releasing the pin
2786 * lock) knows how to handle a page
2787 * with limbo set.
2788 */
2789 m->limbo = TRUE;
2790 CLUSTER_STAT(
2791 vm_pageout_target_page_limbo++;)
2792 }
2793 vm_page_pin_unlock();
2794 if (m->limbo)
2795 vm_page_free(m);
2796 } else {
2797 CLUSTER_STAT(
2798 vm_pageout_target_page_freed++;)
2799 vm_page_free(m);/* clears busy, etc. */
2800 }
2801 vm_page_unlock_queues();
2802 target_offset += PAGE_SIZE_64;
2803 xfer_size -= PAGE_SIZE;
2804 entry++;
2805 continue;
2806 }
2807 if (flags & UPL_COMMIT_INACTIVATE) {
2808 vm_page_deactivate(m);
2809 m->reference = FALSE;
2810 pmap_clear_reference(m->phys_addr);
2811 } else if (!m->active && !m->inactive) {
2812 if (m->reference || m->prep_pin_count != 0)
2813 vm_page_activate(m);
2814 else
2815 vm_page_deactivate(m);
2816 }
2817 #if MACH_CLUSTER_STATS
2818 m->dirty = pmap_is_modified(m->phys_addr);
2819
2820 if (m->dirty) vm_pageout_cluster_dirtied++;
2821 else vm_pageout_cluster_cleaned++;
2822 if (m->wanted) vm_pageout_cluster_collisions++;
2823 #else
2824 m->dirty = 0;
2825 #endif
2826
2827 if((m->busy) && (m->cleaning)) {
2828 /* the request_page_list case */
2829 if(m->absent) {
2830 m->absent = FALSE;
2831 if(shadow_object->absent_count == 1)
2832 vm_object_absent_release(shadow_object);
2833 else
2834 shadow_object->absent_count--;
2835 }
2836 m->overwriting = FALSE;
2837 m->busy = FALSE;
2838 m->dirty = FALSE;
2839 }
2840 else if (m->overwriting) {
2841 /* alternate request page list, write to
2842 /* page_list case. Occurs when the original
2843 /* page was wired at the time of the list
2844 /* request */
2845 assert(m->wire_count != 0);
2846 vm_page_unwire(m);/* reactivates */
2847 m->overwriting = FALSE;
2848 }
2849 m->cleaning = FALSE;
2850 /* It is a part of the semantic of COPYOUT_FROM */
2851 /* UPLs that a commit implies cache sync */
2852 /* between the vm page and the backing store */
2853 /* this can be used to strip the precious bit */
2854 /* as well as clean */
2855 if (upl->flags & UPL_PAGE_SYNC_DONE)
2856 m->precious = FALSE;
2857
2858 if (flags & UPL_COMMIT_SET_DIRTY) {
2859 m->dirty = TRUE;
2860 }
2861 /*
2862 * Wakeup any thread waiting for the page to be un-cleaning.
2863 */
2864 PAGE_WAKEUP(m);
2865 vm_page_unlock_queues();
2866
2867 }
2868 }
2869 target_offset += PAGE_SIZE_64;
2870 xfer_size -= PAGE_SIZE;
2871 entry++;
2872 }
2873
2874 vm_object_unlock(shadow_object);
2875 if(flags & UPL_COMMIT_FREE_ON_EMPTY) {
2876 if((upl->flags & UPL_DEVICE_MEMORY)
2877 || (queue_empty(&upl->map_object->memq))) {
2878 upl_dealloc(upl);
2879 }
2880 }
2881 return KERN_SUCCESS;
2882 }
2883
2884 uc_upl_abort_range(
2885 upl_t upl,
2886 vm_offset_t offset,
2887 vm_size_t size,
2888 int error)
2889 {
2890 vm_size_t xfer_size = size;
2891 vm_object_t shadow_object = upl->map_object->shadow;
2892 vm_object_t object = upl->map_object;
2893 vm_object_offset_t target_offset;
2894 vm_object_offset_t page_offset;
2895 int entry;
2896
2897 if(upl->flags & UPL_DEVICE_MEMORY) {
2898 xfer_size = 0;
2899 } else if ((offset + size) > upl->size) {
2900 return KERN_FAILURE;
2901 }
2902
2903
2904 vm_object_lock(shadow_object);
2905
2906 entry = offset/PAGE_SIZE;
2907 target_offset = (vm_object_offset_t)offset;
2908 while(xfer_size) {
2909 vm_page_t t,m;
2910 upl_page_info_t *p;
2911
2912 if((t = vm_page_lookup(object, target_offset)) != NULL) {
2913
2914 t->pageout = FALSE;
2915 page_offset = t->offset;
2916 VM_PAGE_FREE(t);
2917 t = VM_PAGE_NULL;
2918 m = vm_page_lookup(shadow_object,
2919 page_offset + object->shadow_offset);
2920 if(m != VM_PAGE_NULL) {
2921 vm_object_paging_end(m->object);
2922 vm_page_lock_queues();
2923 if(m->absent) {
2924 /* COPYOUT = FALSE case */
2925 /* check for error conditions which must */
2926 /* be passed back to the pages customer */
2927 if(error & UPL_ABORT_RESTART) {
2928 m->restart = TRUE;
2929 m->absent = FALSE;
2930 vm_object_absent_release(m->object);
2931 m->page_error = KERN_MEMORY_ERROR;
2932 m->error = TRUE;
2933 } else if(error & UPL_ABORT_UNAVAILABLE) {
2934 m->restart = FALSE;
2935 m->unusual = TRUE;
2936 m->clustered = FALSE;
2937 } else if(error & UPL_ABORT_ERROR) {
2938 m->restart = FALSE;
2939 m->absent = FALSE;
2940 vm_object_absent_release(m->object);
2941 m->page_error = KERN_MEMORY_ERROR;
2942 m->error = TRUE;
2943 } else if(error & UPL_ABORT_DUMP_PAGES) {
2944 m->clustered = TRUE;
2945 } else {
2946 m->clustered = TRUE;
2947 }
2948
2949
2950 m->cleaning = FALSE;
2951 m->overwriting = FALSE;
2952 PAGE_WAKEUP_DONE(m);
2953 if(m->clustered) {
2954 vm_page_free(m);
2955 } else {
2956 vm_page_activate(m);
2957 }
2958
2959 vm_page_unlock_queues();
2960 target_offset += PAGE_SIZE_64;
2961 xfer_size -= PAGE_SIZE;
2962 entry++;
2963 continue;
2964 }
2965 /*
2966 * Handle the trusted pager throttle.
2967 */
2968 if (m->laundry) {
2969 vm_page_laundry_count--;
2970 m->laundry = FALSE;
2971 if (vm_page_laundry_count
2972 < vm_page_laundry_min) {
2973 vm_page_laundry_min = 0;
2974 thread_wakeup((event_t)
2975 &vm_page_laundry_count);
2976 }
2977 }
2978 if(m->pageout) {
2979 assert(m->busy);
2980 assert(m->wire_count == 1);
2981 m->pageout = FALSE;
2982 vm_page_unwire(m);
2983 }
2984 m->cleaning = FALSE;
2985 m->busy = FALSE;
2986 m->overwriting = FALSE;
2987 #if MACH_PAGEMAP
2988 vm_external_state_clr(
2989 m->object->existence_map, m->offset);
2990 #endif /* MACH_PAGEMAP */
2991 if(error & UPL_ABORT_DUMP_PAGES) {
2992 vm_page_free(m);
2993 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
2994 } else {
2995 PAGE_WAKEUP(m);
2996 }
2997 vm_page_unlock_queues();
2998 }
2999 }
3000 target_offset += PAGE_SIZE_64;
3001 xfer_size -= PAGE_SIZE;
3002 entry++;
3003 }
3004 vm_object_unlock(shadow_object);
3005 if(error & UPL_ABORT_FREE_ON_EMPTY) {
3006 if((upl->flags & UPL_DEVICE_MEMORY)
3007 || (queue_empty(&upl->map_object->memq))) {
3008 upl_dealloc(upl);
3009 }
3010 }
3011 return KERN_SUCCESS;
3012 }
3013
3014 kern_return_t
3015 uc_upl_abort(
3016 upl_t upl,
3017 int error)
3018 {
3019 vm_object_t object = NULL;
3020 vm_object_t shadow_object = NULL;
3021 vm_object_offset_t offset;
3022 vm_object_offset_t shadow_offset;
3023 vm_object_offset_t target_offset;
3024 int i;
3025 vm_page_t t,m;
3026
3027 if(upl->flags & UPL_DEVICE_MEMORY) {
3028 upl_dealloc(upl);
3029 return KERN_SUCCESS;
3030 }
3031 object = upl->map_object;
3032
3033 if(object == NULL) {
3034 panic("upl_abort: upl object is not backed by an object");
3035 return KERN_INVALID_ARGUMENT;
3036 }
3037
3038 shadow_object = upl->map_object->shadow;
3039 shadow_offset = upl->map_object->shadow_offset;
3040 offset = 0;
3041 vm_object_lock(shadow_object);
3042 for(i = 0; i<(upl->size); i+=PAGE_SIZE, offset += PAGE_SIZE_64) {
3043 if((t = vm_page_lookup(object,offset)) != NULL) {
3044 target_offset = t->offset + shadow_offset;
3045 if((m = vm_page_lookup(shadow_object, target_offset)) != NULL) {
3046 vm_object_paging_end(m->object);
3047 vm_page_lock_queues();
3048 if(m->absent) {
3049 /* COPYOUT = FALSE case */
3050 /* check for error conditions which must */
3051 /* be passed back to the pages customer */
3052 if(error & UPL_ABORT_RESTART) {
3053 m->restart = TRUE;
3054 m->absent = FALSE;
3055 vm_object_absent_release(m->object);
3056 m->page_error = KERN_MEMORY_ERROR;
3057 m->error = TRUE;
3058 } else if(error & UPL_ABORT_UNAVAILABLE) {
3059 m->restart = FALSE;
3060 m->unusual = TRUE;
3061 m->clustered = FALSE;
3062 } else if(error & UPL_ABORT_ERROR) {
3063 m->restart = FALSE;
3064 m->absent = FALSE;
3065 vm_object_absent_release(m->object);
3066 m->page_error = KERN_MEMORY_ERROR;
3067 m->error = TRUE;
3068 } else if(error & UPL_ABORT_DUMP_PAGES) {
3069 m->clustered = TRUE;
3070 } else {
3071 m->clustered = TRUE;
3072 }
3073
3074 m->cleaning = FALSE;
3075 m->overwriting = FALSE;
3076 PAGE_WAKEUP_DONE(m);
3077 if(m->clustered) {
3078 vm_page_free(m);
3079 } else {
3080 vm_page_activate(m);
3081 }
3082 vm_page_unlock_queues();
3083 continue;
3084 }
3085 /*
3086 * Handle the trusted pager throttle.
3087 */
3088 if (m->laundry) {
3089 vm_page_laundry_count--;
3090 m->laundry = FALSE;
3091 if (vm_page_laundry_count
3092 < vm_page_laundry_min) {
3093 vm_page_laundry_min = 0;
3094 thread_wakeup((event_t)
3095 &vm_page_laundry_count);
3096 }
3097 }
3098 if(m->pageout) {
3099 assert(m->busy);
3100 assert(m->wire_count == 1);
3101 m->pageout = FALSE;
3102 vm_page_unwire(m);
3103 }
3104 m->cleaning = FALSE;
3105 m->busy = FALSE;
3106 m->overwriting = FALSE;
3107 #if MACH_PAGEMAP
3108 vm_external_state_clr(
3109 m->object->existence_map, m->offset);
3110 #endif /* MACH_PAGEMAP */
3111 if(error & UPL_ABORT_DUMP_PAGES) {
3112 vm_page_free(m);
3113 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
3114 } else {
3115 PAGE_WAKEUP(m);
3116 }
3117 vm_page_unlock_queues();
3118 }
3119 }
3120 }
3121 vm_object_unlock(shadow_object);
3122 /* Remove all the pages from the map object so */
3123 /* vm_pageout_object_terminate will work properly. */
3124 while (!queue_empty(&upl->map_object->memq)) {
3125 vm_page_t p;
3126
3127 p = (vm_page_t) queue_first(&upl->map_object->memq);
3128
3129 assert(p->private);
3130 assert(p->pageout);
3131 p->pageout = FALSE;
3132 assert(!p->cleaning);
3133
3134 VM_PAGE_FREE(p);
3135 }
3136 upl_dealloc(upl);
3137 return KERN_SUCCESS;
3138 }
3139
3140 /* an option on commit should be wire */
3141 kern_return_t
3142 uc_upl_commit(
3143 upl_t upl,
3144 upl_page_info_t *page_list)
3145 {
3146 if (upl->flags & UPL_DEVICE_MEMORY)
3147 page_list = NULL;
3148 if ((upl->flags & UPL_CLEAR_DIRTY) ||
3149 (upl->flags & UPL_PAGE_SYNC_DONE)) {
3150 vm_object_t shadow_object = upl->map_object->shadow;
3151 vm_object_t object = upl->map_object;
3152 vm_object_offset_t target_offset;
3153 vm_size_t xfer_end;
3154
3155 vm_page_t t,m;
3156
3157 vm_object_lock(shadow_object);
3158
3159 target_offset = object->shadow_offset;
3160 xfer_end = upl->size + object->shadow_offset;
3161
3162 while(target_offset < xfer_end) {
3163 if ((t = vm_page_lookup(object,
3164 target_offset - object->shadow_offset))
3165 != NULL) {
3166 m = vm_page_lookup(
3167 shadow_object, target_offset);
3168 if(m != VM_PAGE_NULL) {
3169 if (upl->flags & UPL_CLEAR_DIRTY) {
3170 pmap_clear_modify(m->phys_addr);
3171 m->dirty = FALSE;
3172 }
3173 /* It is a part of the semantic of */
3174 /* COPYOUT_FROM UPLs that a commit */
3175 /* implies cache sync between the */
3176 /* vm page and the backing store */
3177 /* this can be used to strip the */
3178 /* precious bit as well as clean */
3179 if (upl->flags & UPL_PAGE_SYNC_DONE)
3180 m->precious = FALSE;
3181 }
3182 }
3183 target_offset += PAGE_SIZE_64;
3184 }
3185 vm_object_unlock(shadow_object);
3186 }
3187 if (page_list) {
3188 vm_object_t shadow_object = upl->map_object->shadow;
3189 vm_object_t object = upl->map_object;
3190 vm_object_offset_t target_offset;
3191 vm_size_t xfer_end;
3192 int entry;
3193
3194 vm_page_t t, m;
3195 upl_page_info_t *p;
3196
3197 vm_object_lock(shadow_object);
3198
3199 entry = 0;
3200 target_offset = object->shadow_offset;
3201 xfer_end = upl->size + object->shadow_offset;
3202
3203 while(target_offset < xfer_end) {
3204
3205 if ((t = vm_page_lookup(object,
3206 target_offset - object->shadow_offset))
3207 == NULL) {
3208 target_offset += PAGE_SIZE_64;
3209 entry++;
3210 continue;
3211 }
3212
3213 m = vm_page_lookup(shadow_object, target_offset);
3214 if(m != VM_PAGE_NULL) {
3215 p = &(page_list[entry]);
3216 if(page_list[entry].phys_addr &&
3217 p->pageout && !m->pageout) {
3218 vm_page_lock_queues();
3219 m->busy = TRUE;
3220 m->pageout = TRUE;
3221 vm_page_wire(m);
3222 vm_page_unlock_queues();
3223 } else if (page_list[entry].phys_addr &&
3224 !p->pageout && m->pageout) {
3225 vm_page_lock_queues();
3226 m->pageout = FALSE;
3227 m->absent = FALSE;
3228 m->overwriting = FALSE;
3229 vm_page_unwire(m);
3230 PAGE_WAKEUP_DONE(m);
3231 vm_page_unlock_queues();
3232 }
3233 page_list[entry].phys_addr = 0;
3234 }
3235 target_offset += PAGE_SIZE_64;
3236 entry++;
3237 }
3238
3239 vm_object_unlock(shadow_object);
3240 }
3241 upl_dealloc(upl);
3242 return KERN_SUCCESS;
3243 }
3244
3245 upl_t
3246 upl_create(
3247 boolean_t internal)
3248 {
3249 upl_t upl;
3250
3251 if(internal) {
3252 upl = (upl_t)kalloc(sizeof(struct upl)
3253 + (sizeof(struct upl_page_info)*MAX_UPL_TRANSFER));
3254 } else {
3255 upl = (upl_t)kalloc(sizeof(struct upl));
3256 }
3257 upl->flags = 0;
3258 upl->src_object = NULL;
3259 upl->kaddr = (vm_offset_t)0;
3260 upl->size = 0;
3261 upl->map_object = NULL;
3262 upl->ref_count = 1;
3263 upl_lock_init(upl);
3264 #ifdef UBC_DEBUG
3265 upl->ubc_alias1 = 0;
3266 upl->ubc_alias2 = 0;
3267 #endif /* UBC_DEBUG */
3268 return(upl);
3269 }
3270
3271 void
3272 upl_destroy(
3273 upl_t upl)
3274 {
3275
3276 #ifdef UBC_DEBUG
3277 {
3278 upl_t upl_ele;
3279 vm_object_lock(upl->map_object->shadow);
3280 queue_iterate(&upl->map_object->shadow->uplq,
3281 upl_ele, upl_t, uplq) {
3282 if(upl_ele == upl) {
3283 queue_remove(&upl->map_object->shadow->uplq,
3284 upl_ele, upl_t, uplq);
3285 break;
3286 }
3287 }
3288 vm_object_unlock(upl->map_object->shadow);
3289 }
3290 #endif /* UBC_DEBUG */
3291 if(!(upl->flags & UPL_DEVICE_MEMORY))
3292 vm_object_deallocate(upl->map_object);
3293 if(upl->flags & UPL_INTERNAL) {
3294 kfree((vm_offset_t)upl,
3295 sizeof(struct upl) +
3296 (sizeof(struct upl_page_info) * MAX_UPL_TRANSFER));
3297 } else {
3298 kfree((vm_offset_t)upl, sizeof(struct upl));
3299 }
3300 }
3301
3302 vm_size_t
3303 upl_get_internal_pagelist_offset()
3304 {
3305 return sizeof(struct upl);
3306 }
3307
3308 void
3309 upl_set_dirty(
3310 upl_t upl)
3311 {
3312 upl->flags |= UPL_CLEAR_DIRTY;
3313 }
3314
3315 void
3316 upl_clear_dirty(
3317 upl_t upl)
3318 {
3319 upl->flags &= ~UPL_CLEAR_DIRTY;
3320 }
3321
3322
3323 #ifdef MACH_BSD
3324 boolean_t upl_page_present(upl_page_info_t *upl, int index);
3325 boolean_t upl_dirty_page(upl_page_info_t *upl, int index);
3326 boolean_t upl_valid_page(upl_page_info_t *upl, int index);
3327 vm_offset_t upl_phys_page(upl_page_info_t *upl, int index);
3328
3329 boolean_t upl_page_present(upl_page_info_t *upl, int index)
3330 {
3331 return(UPL_PAGE_PRESENT(upl, index));
3332 }
3333 boolean_t upl_dirty_page(upl_page_info_t *upl, int index)
3334 {
3335 return(UPL_DIRTY_PAGE(upl, index));
3336 }
3337 boolean_t upl_valid_page(upl_page_info_t *upl, int index)
3338 {
3339 return(UPL_VALID_PAGE(upl, index));
3340 }
3341 vm_offset_t upl_phys_page(upl_page_info_t *upl, int index)
3342 {
3343 return((vm_offset_t)UPL_PHYS_PAGE(upl, index));
3344 }
3345
3346 void vm_countdirtypages(void)
3347 {
3348 vm_page_t m;
3349 int dpages;
3350 int pgopages;
3351 int precpages;
3352
3353
3354 dpages=0;
3355 pgopages=0;
3356 precpages=0;
3357
3358 vm_page_lock_queues();
3359 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
3360 do {
3361 if (m ==(vm_page_t )0) break;
3362
3363 if(m->dirty) dpages++;
3364 if(m->pageout) pgopages++;
3365 if(m->precious) precpages++;
3366
3367 m = (vm_page_t) queue_next(&m->pageq);
3368 if (m ==(vm_page_t )0) break;
3369
3370 } while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
3371 vm_page_unlock_queues();
3372
3373 printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
3374
3375 dpages=0;
3376 pgopages=0;
3377 precpages=0;
3378
3379 vm_page_lock_queues();
3380 m = (vm_page_t) queue_first(&vm_page_queue_active);
3381
3382 do {
3383 if(m == (vm_page_t )0) break;
3384 if(m->dirty) dpages++;
3385 if(m->pageout) pgopages++;
3386 if(m->precious) precpages++;
3387
3388 m = (vm_page_t) queue_next(&m->pageq);
3389 if(m == (vm_page_t )0) break;
3390
3391 } while (!queue_end(&vm_page_queue_active,(queue_entry_t) m));
3392 vm_page_unlock_queues();
3393
3394 printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
3395
3396 }
3397 #endif /* MACH_BSD */
3398
3399 #ifdef UBC_DEBUG
3400 kern_return_t upl_ubc_alias_set(upl_t upl, unsigned int alias1, unsigned int alias2)
3401 {
3402 upl->ubc_alias1 = alias1;
3403 upl->ubc_alias2 = alias2;
3404 return KERN_SUCCESS;
3405 }
3406 int upl_ubc_alias_get(upl_t upl, unsigned int * al, unsigned int * al2)
3407 {
3408 if(al)
3409 *al = upl->ubc_alias1;
3410 if(al2)
3411 *al2 = upl->ubc_alias2;
3412 return KERN_SUCCESS;
3413 }
3414 #endif /* UBC_DEBUG */
3415
3416
3417
3418 #if MACH_KDB
3419 #include <ddb/db_output.h>
3420 #include <ddb/db_print.h>
3421 #include <vm/vm_print.h>
3422
3423 #define printf kdbprintf
3424 extern int db_indent;
3425 void db_pageout(void);
3426
3427 void
3428 db_vm(void)
3429 {
3430 extern int vm_page_gobble_count;
3431 extern int vm_page_limbo_count, vm_page_limbo_real_count;
3432 extern int vm_page_pin_count;
3433
3434 iprintf("VM Statistics:\n");
3435 db_indent += 2;
3436 iprintf("pages:\n");
3437 db_indent += 2;
3438 iprintf("activ %5d inact %5d free %5d",
3439 vm_page_active_count, vm_page_inactive_count,
3440 vm_page_free_count);
3441 printf(" wire %5d gobbl %5d\n",
3442 vm_page_wire_count, vm_page_gobble_count);
3443 iprintf("laund %5d limbo %5d lim_r %5d pin %5d\n",
3444 vm_page_laundry_count, vm_page_limbo_count,
3445 vm_page_limbo_real_count, vm_page_pin_count);
3446 db_indent -= 2;
3447 iprintf("target:\n");
3448 db_indent += 2;
3449 iprintf("min %5d inact %5d free %5d",
3450 vm_page_free_min, vm_page_inactive_target,
3451 vm_page_free_target);
3452 printf(" resrv %5d\n", vm_page_free_reserved);
3453 db_indent -= 2;
3454
3455 iprintf("burst:\n");
3456 db_indent += 2;
3457 iprintf("max %5d min %5d wait %5d empty %5d\n",
3458 vm_pageout_burst_max, vm_pageout_burst_min,
3459 vm_pageout_burst_wait, vm_pageout_empty_wait);
3460 db_indent -= 2;
3461 iprintf("pause:\n");
3462 db_indent += 2;
3463 iprintf("count %5d max %5d\n",
3464 vm_pageout_pause_count, vm_pageout_pause_max);
3465 #if MACH_COUNTERS
3466 iprintf("scan_continue called %8d\n", c_vm_pageout_scan_continue);
3467 #endif /* MACH_COUNTERS */
3468 db_indent -= 2;
3469 db_pageout();
3470 db_indent -= 2;
3471 }
3472
3473 void
3474 db_pageout(void)
3475 {
3476 extern int c_limbo_page_free;
3477 extern int c_limbo_convert;
3478 #if MACH_COUNTERS
3479 extern int c_laundry_pages_freed;
3480 #endif /* MACH_COUNTERS */
3481
3482 iprintf("Pageout Statistics:\n");
3483 db_indent += 2;
3484 iprintf("active %5d inactv %5d\n",
3485 vm_pageout_active, vm_pageout_inactive);
3486 iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
3487 vm_pageout_inactive_nolock, vm_pageout_inactive_avoid,
3488 vm_pageout_inactive_busy, vm_pageout_inactive_absent);
3489 iprintf("used %5d clean %5d dirty %5d\n",
3490 vm_pageout_inactive_used, vm_pageout_inactive_clean,
3491 vm_pageout_inactive_dirty);
3492 iprintf("pinned %5d limbo %5d setup_limbo %5d setup_unprep %5d\n",
3493 vm_pageout_inactive_pinned, vm_pageout_inactive_limbo,
3494 vm_pageout_setup_limbo, vm_pageout_setup_unprepped);
3495 iprintf("limbo_page_free %5d limbo_convert %5d\n",
3496 c_limbo_page_free, c_limbo_convert);
3497 #if MACH_COUNTERS
3498 iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed);
3499 #endif /* MACH_COUNTERS */
3500 #if MACH_CLUSTER_STATS
3501 iprintf("Cluster Statistics:\n");
3502 db_indent += 2;
3503 iprintf("dirtied %5d cleaned %5d collisions %5d\n",
3504 vm_pageout_cluster_dirtied, vm_pageout_cluster_cleaned,
3505 vm_pageout_cluster_collisions);
3506 iprintf("clusters %5d conversions %5d\n",
3507 vm_pageout_cluster_clusters, vm_pageout_cluster_conversions);
3508 db_indent -= 2;
3509 iprintf("Target Statistics:\n");
3510 db_indent += 2;
3511 iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
3512 vm_pageout_target_collisions, vm_pageout_target_page_dirtied,
3513 vm_pageout_target_page_freed);
3514 iprintf("page_pinned %5d page_limbo %5d\n",
3515 vm_pageout_target_page_pinned, vm_pageout_target_page_limbo);
3516 db_indent -= 2;
3517 #endif /* MACH_CLUSTER_STATS */
3518 db_indent -= 2;
3519 }
3520
3521 #if MACH_CLUSTER_STATS
3522 unsigned long vm_pageout_cluster_dirtied = 0;
3523 unsigned long vm_pageout_cluster_cleaned = 0;
3524 unsigned long vm_pageout_cluster_collisions = 0;
3525 unsigned long vm_pageout_cluster_clusters = 0;
3526 unsigned long vm_pageout_cluster_conversions = 0;
3527 unsigned long vm_pageout_target_collisions = 0;
3528 unsigned long vm_pageout_target_page_dirtied = 0;
3529 unsigned long vm_pageout_target_page_freed = 0;
3530 unsigned long vm_pageout_target_page_pinned = 0;
3531 unsigned long vm_pageout_target_page_limbo = 0;
3532 #define CLUSTER_STAT(clause) clause
3533 #else /* MACH_CLUSTER_STATS */
3534 #define CLUSTER_STAT(clause)
3535 #endif /* MACH_CLUSTER_STATS */
3536
3537 #endif /* MACH_KDB */