]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_pageout.c
825b29485e4764a43d307f03a17492252bd30230
[apple/xnu.git] / osfmk / vm / vm_pageout.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: vm/vm_pageout.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
58 * Date: 1985
59 *
60 * The proverbial page-out daemon.
61 */
62
63 #include <mach_pagemap.h>
64 #include <mach_cluster_stats.h>
65 #include <mach_kdb.h>
66 #include <advisory_pageout.h>
67
68 #include <mach/mach_types.h>
69 #include <mach/memory_object.h>
70 #include <mach/memory_object_default.h>
71 #include <mach/memory_object_control_server.h>
72 #include <mach/mach_host_server.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <kern/host_statistics.h>
76 #include <kern/counters.h>
77 #include <kern/thread.h>
78 #include <kern/xpr.h>
79 #include <vm/pmap.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <machine/vm_tuning.h>
85 #include <kern/misc_protos.h>
86
87 extern ipc_port_t memory_manager_default;
88
89 #ifndef VM_PAGE_LAUNDRY_MAX
90 #define VM_PAGE_LAUNDRY_MAX 6 /* outstanding DMM page cleans */
91 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
92
93 #ifndef VM_PAGEOUT_BURST_MAX
94 #define VM_PAGEOUT_BURST_MAX 32 /* simultaneous EMM page cleans */
95 #endif /* VM_PAGEOUT_BURST_MAX */
96
97 #ifndef VM_PAGEOUT_DISCARD_MAX
98 #define VM_PAGEOUT_DISCARD_MAX 68 /* simultaneous EMM page cleans */
99 #endif /* VM_PAGEOUT_DISCARD_MAX */
100
101 #ifndef VM_PAGEOUT_BURST_WAIT
102 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
103 #endif /* VM_PAGEOUT_BURST_WAIT */
104
105 #ifndef VM_PAGEOUT_EMPTY_WAIT
106 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
107 #endif /* VM_PAGEOUT_EMPTY_WAIT */
108
109 /*
110 * To obtain a reasonable LRU approximation, the inactive queue
111 * needs to be large enough to give pages on it a chance to be
112 * referenced a second time. This macro defines the fraction
113 * of active+inactive pages that should be inactive.
114 * The pageout daemon uses it to update vm_page_inactive_target.
115 *
116 * If vm_page_free_count falls below vm_page_free_target and
117 * vm_page_inactive_count is below vm_page_inactive_target,
118 * then the pageout daemon starts running.
119 */
120
121 #ifndef VM_PAGE_INACTIVE_TARGET
122 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
123 #endif /* VM_PAGE_INACTIVE_TARGET */
124
125 /*
126 * Once the pageout daemon starts running, it keeps going
127 * until vm_page_free_count meets or exceeds vm_page_free_target.
128 */
129
130 #ifndef VM_PAGE_FREE_TARGET
131 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
132 #endif /* VM_PAGE_FREE_TARGET */
133
134 /*
135 * The pageout daemon always starts running once vm_page_free_count
136 * falls below vm_page_free_min.
137 */
138
139 #ifndef VM_PAGE_FREE_MIN
140 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
141 #endif /* VM_PAGE_FREE_MIN */
142
143 /*
144 * When vm_page_free_count falls below vm_page_free_reserved,
145 * only vm-privileged threads can allocate pages. vm-privilege
146 * allows the pageout daemon and default pager (and any other
147 * associated threads needed for default pageout) to continue
148 * operation by dipping into the reserved pool of pages.
149 */
150
151 #ifndef VM_PAGE_FREE_RESERVED
152 #define VM_PAGE_FREE_RESERVED \
153 ((16 * VM_PAGE_LAUNDRY_MAX) + NCPUS)
154 #endif /* VM_PAGE_FREE_RESERVED */
155
156 /*
157 * Exported variable used to broadcast the activation of the pageout scan
158 * Working Set uses this to throttle its use of pmap removes. In this
159 * way, code which runs within memory in an uncontested context does
160 * not keep encountering soft faults.
161 */
162
163 unsigned int vm_pageout_scan_event_counter = 0;
164
165 /*
166 * Forward declarations for internal routines.
167 */
168 extern void vm_pageout_continue(void);
169 extern void vm_pageout_scan(void);
170 extern void vm_pageout_throttle(vm_page_t m);
171 extern vm_page_t vm_pageout_cluster_page(
172 vm_object_t object,
173 vm_object_offset_t offset,
174 boolean_t precious_clean);
175
176 unsigned int vm_pageout_reserved_internal = 0;
177 unsigned int vm_pageout_reserved_really = 0;
178
179 unsigned int vm_page_laundry_max = 0; /* # of clusters outstanding */
180 unsigned int vm_page_laundry_min = 0;
181 unsigned int vm_pageout_burst_max = 0;
182 unsigned int vm_pageout_burst_wait = 0; /* milliseconds per page */
183 unsigned int vm_pageout_empty_wait = 0; /* milliseconds */
184 unsigned int vm_pageout_burst_min = 0;
185 unsigned int vm_pageout_pause_count = 0;
186 unsigned int vm_pageout_pause_max = 0;
187 unsigned int vm_free_page_pause = 100; /* milliseconds */
188
189 /*
190 * Protection against zero fill flushing live working sets derived
191 * from existing backing store and files
192 */
193 unsigned int vm_accellerate_zf_pageout_trigger = 400;
194 unsigned int vm_zf_iterator;
195 unsigned int vm_zf_iterator_count = 40;
196 unsigned int last_page_zf;
197 unsigned int vm_zf_count = 0;
198
199 /*
200 * These variables record the pageout daemon's actions:
201 * how many pages it looks at and what happens to those pages.
202 * No locking needed because only one thread modifies the variables.
203 */
204
205 unsigned int vm_pageout_active = 0; /* debugging */
206 unsigned int vm_pageout_inactive = 0; /* debugging */
207 unsigned int vm_pageout_inactive_throttled = 0; /* debugging */
208 unsigned int vm_pageout_inactive_forced = 0; /* debugging */
209 unsigned int vm_pageout_inactive_nolock = 0; /* debugging */
210 unsigned int vm_pageout_inactive_avoid = 0; /* debugging */
211 unsigned int vm_pageout_inactive_busy = 0; /* debugging */
212 unsigned int vm_pageout_inactive_absent = 0; /* debugging */
213 unsigned int vm_pageout_inactive_used = 0; /* debugging */
214 unsigned int vm_pageout_inactive_clean = 0; /* debugging */
215 unsigned int vm_pageout_inactive_dirty = 0; /* debugging */
216 unsigned int vm_pageout_dirty_no_pager = 0; /* debugging */
217 unsigned int vm_stat_discard = 0; /* debugging */
218 unsigned int vm_stat_discard_sent = 0; /* debugging */
219 unsigned int vm_stat_discard_failure = 0; /* debugging */
220 unsigned int vm_stat_discard_throttle = 0; /* debugging */
221 unsigned int vm_pageout_scan_active_emm_throttle = 0; /* debugging */
222 unsigned int vm_pageout_scan_active_emm_throttle_success = 0; /* debugging */
223 unsigned int vm_pageout_scan_active_emm_throttle_failure = 0; /* debugging */
224 unsigned int vm_pageout_scan_inactive_emm_throttle = 0; /* debugging */
225 unsigned int vm_pageout_scan_inactive_emm_throttle_success = 0; /* debugging */
226 unsigned int vm_pageout_scan_inactive_emm_throttle_failure = 0; /* debugging */
227
228
229 unsigned int vm_pageout_out_of_line = 0;
230 unsigned int vm_pageout_in_place = 0;
231 /*
232 * Routine: vm_pageout_object_allocate
233 * Purpose:
234 * Allocate an object for use as out-of-line memory in a
235 * data_return/data_initialize message.
236 * The page must be in an unlocked object.
237 *
238 * If the page belongs to a trusted pager, cleaning in place
239 * will be used, which utilizes a special "pageout object"
240 * containing private alias pages for the real page frames.
241 * Untrusted pagers use normal out-of-line memory.
242 */
243 vm_object_t
244 vm_pageout_object_allocate(
245 vm_page_t m,
246 vm_size_t size,
247 vm_object_offset_t offset)
248 {
249 vm_object_t object = m->object;
250 vm_object_t new_object;
251
252 assert(object->pager_ready);
253
254 if (object->pager_trusted || object->internal)
255 vm_pageout_throttle(m);
256
257 new_object = vm_object_allocate(size);
258
259 if (object->pager_trusted) {
260 assert (offset < object->size);
261
262 vm_object_lock(new_object);
263 new_object->pageout = TRUE;
264 new_object->shadow = object;
265 new_object->can_persist = FALSE;
266 new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
267 new_object->shadow_offset = offset;
268 vm_object_unlock(new_object);
269
270 /*
271 * Take a paging reference on the object. This will be dropped
272 * in vm_pageout_object_terminate()
273 */
274 vm_object_lock(object);
275 vm_object_paging_begin(object);
276 vm_object_unlock(object);
277
278 vm_pageout_in_place++;
279 } else
280 vm_pageout_out_of_line++;
281 return(new_object);
282 }
283
284 #if MACH_CLUSTER_STATS
285 unsigned long vm_pageout_cluster_dirtied = 0;
286 unsigned long vm_pageout_cluster_cleaned = 0;
287 unsigned long vm_pageout_cluster_collisions = 0;
288 unsigned long vm_pageout_cluster_clusters = 0;
289 unsigned long vm_pageout_cluster_conversions = 0;
290 unsigned long vm_pageout_target_collisions = 0;
291 unsigned long vm_pageout_target_page_dirtied = 0;
292 unsigned long vm_pageout_target_page_freed = 0;
293 #define CLUSTER_STAT(clause) clause
294 #else /* MACH_CLUSTER_STATS */
295 #define CLUSTER_STAT(clause)
296 #endif /* MACH_CLUSTER_STATS */
297
298 /*
299 * Routine: vm_pageout_object_terminate
300 * Purpose:
301 * Destroy the pageout_object allocated by
302 * vm_pageout_object_allocate(), and perform all of the
303 * required cleanup actions.
304 *
305 * In/Out conditions:
306 * The object must be locked, and will be returned locked.
307 */
308 void
309 vm_pageout_object_terminate(
310 vm_object_t object)
311 {
312 vm_object_t shadow_object;
313
314 /*
315 * Deal with the deallocation (last reference) of a pageout object
316 * (used for cleaning-in-place) by dropping the paging references/
317 * freeing pages in the original object.
318 */
319
320 assert(object->pageout);
321 shadow_object = object->shadow;
322 vm_object_lock(shadow_object);
323
324 while (!queue_empty(&object->memq)) {
325 vm_page_t p, m;
326 vm_object_offset_t offset;
327
328 p = (vm_page_t) queue_first(&object->memq);
329
330 assert(p->private);
331 assert(p->pageout);
332 p->pageout = FALSE;
333 assert(!p->cleaning);
334
335 offset = p->offset;
336 VM_PAGE_FREE(p);
337 p = VM_PAGE_NULL;
338
339 m = vm_page_lookup(shadow_object,
340 offset + object->shadow_offset);
341
342 if(m == VM_PAGE_NULL)
343 continue;
344 assert(m->cleaning);
345 /* used as a trigger on upl_commit etc to recognize the */
346 /* pageout daemon's subseqent desire to pageout a cleaning */
347 /* page. When the bit is on the upl commit code will */
348 /* respect the pageout bit in the target page over the */
349 /* caller's page list indication */
350 m->dump_cleaning = FALSE;
351
352 /*
353 * Account for the paging reference taken when
354 * m->cleaning was set on this page.
355 */
356 vm_object_paging_end(shadow_object);
357 assert((m->dirty) || (m->precious) ||
358 (m->busy && m->cleaning));
359
360 /*
361 * Handle the trusted pager throttle.
362 */
363 vm_page_lock_queues();
364 if (m->laundry) {
365 vm_page_laundry_count--;
366 m->laundry = FALSE;
367 if (vm_page_laundry_count < vm_page_laundry_min) {
368 vm_page_laundry_min = 0;
369 thread_wakeup((event_t) &vm_page_laundry_count);
370 }
371 }
372
373 /*
374 * Handle the "target" page(s). These pages are to be freed if
375 * successfully cleaned. Target pages are always busy, and are
376 * wired exactly once. The initial target pages are not mapped,
377 * (so cannot be referenced or modified) but converted target
378 * pages may have been modified between the selection as an
379 * adjacent page and conversion to a target.
380 */
381 if (m->pageout) {
382 assert(m->busy);
383 assert(m->wire_count == 1);
384 m->cleaning = FALSE;
385 m->pageout = FALSE;
386 #if MACH_CLUSTER_STATS
387 if (m->wanted) vm_pageout_target_collisions++;
388 #endif
389 /*
390 * Revoke all access to the page. Since the object is
391 * locked, and the page is busy, this prevents the page
392 * from being dirtied after the pmap_is_modified() call
393 * returns.
394 */
395 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
396
397 /*
398 * Since the page is left "dirty" but "not modifed", we
399 * can detect whether the page was redirtied during
400 * pageout by checking the modify state.
401 */
402 m->dirty = pmap_is_modified(m->phys_addr);
403
404 if (m->dirty) {
405 CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
406 vm_page_unwire(m);/* reactivates */
407 VM_STAT(reactivations++);
408 PAGE_WAKEUP_DONE(m);
409 } else {
410 CLUSTER_STAT(vm_pageout_target_page_freed++;)
411 vm_page_free(m);/* clears busy, etc. */
412 }
413 vm_page_unlock_queues();
414 continue;
415 }
416 /*
417 * Handle the "adjacent" pages. These pages were cleaned in
418 * place, and should be left alone.
419 * If prep_pin_count is nonzero, then someone is using the
420 * page, so make it active.
421 */
422 if (!m->active && !m->inactive && !m->private) {
423 if (m->reference)
424 vm_page_activate(m);
425 else
426 vm_page_deactivate(m);
427 }
428 if((m->busy) && (m->cleaning)) {
429
430 /* the request_page_list case, (COPY_OUT_FROM FALSE) */
431 m->busy = FALSE;
432
433 /* We do not re-set m->dirty ! */
434 /* The page was busy so no extraneous activity */
435 /* could have occured. COPY_INTO is a read into the */
436 /* new pages. CLEAN_IN_PLACE does actually write */
437 /* out the pages but handling outside of this code */
438 /* will take care of resetting dirty. We clear the */
439 /* modify however for the Programmed I/O case. */
440 pmap_clear_modify(m->phys_addr);
441 if(m->absent) {
442 m->absent = FALSE;
443 if(shadow_object->absent_count == 1)
444 vm_object_absent_release(shadow_object);
445 else
446 shadow_object->absent_count--;
447 }
448 m->overwriting = FALSE;
449 } else if (m->overwriting) {
450 /* alternate request page list, write to page_list */
451 /* case. Occurs when the original page was wired */
452 /* at the time of the list request */
453 assert(m->wire_count != 0);
454 vm_page_unwire(m);/* reactivates */
455 m->overwriting = FALSE;
456 } else {
457 /*
458 * Set the dirty state according to whether or not the page was
459 * modified during the pageout. Note that we purposefully do
460 * NOT call pmap_clear_modify since the page is still mapped.
461 * If the page were to be dirtied between the 2 calls, this
462 * this fact would be lost. This code is only necessary to
463 * maintain statistics, since the pmap module is always
464 * consulted if m->dirty is false.
465 */
466 #if MACH_CLUSTER_STATS
467 m->dirty = pmap_is_modified(m->phys_addr);
468
469 if (m->dirty) vm_pageout_cluster_dirtied++;
470 else vm_pageout_cluster_cleaned++;
471 if (m->wanted) vm_pageout_cluster_collisions++;
472 #else
473 m->dirty = 0;
474 #endif
475 }
476 m->cleaning = FALSE;
477
478
479 /*
480 * Wakeup any thread waiting for the page to be un-cleaning.
481 */
482 PAGE_WAKEUP(m);
483 vm_page_unlock_queues();
484 }
485 /*
486 * Account for the paging reference taken in vm_paging_object_allocate.
487 */
488 vm_object_paging_end(shadow_object);
489 vm_object_unlock(shadow_object);
490
491 assert(object->ref_count == 0);
492 assert(object->paging_in_progress == 0);
493 assert(object->resident_page_count == 0);
494 return;
495 }
496
497 /*
498 * Routine: vm_pageout_setup
499 * Purpose:
500 * Set up a page for pageout (clean & flush).
501 *
502 * Move the page to a new object, as part of which it will be
503 * sent to its memory manager in a memory_object_data_write or
504 * memory_object_initialize message.
505 *
506 * The "new_object" and "new_offset" arguments
507 * indicate where the page should be moved.
508 *
509 * In/Out conditions:
510 * The page in question must not be on any pageout queues,
511 * and must be busy. The object to which it belongs
512 * must be unlocked, and the caller must hold a paging
513 * reference to it. The new_object must not be locked.
514 *
515 * This routine returns a pointer to a place-holder page,
516 * inserted at the same offset, to block out-of-order
517 * requests for the page. The place-holder page must
518 * be freed after the data_write or initialize message
519 * has been sent.
520 *
521 * The original page is put on a paging queue and marked
522 * not busy on exit.
523 */
524 vm_page_t
525 vm_pageout_setup(
526 register vm_page_t m,
527 register vm_object_t new_object,
528 vm_object_offset_t new_offset)
529 {
530 register vm_object_t old_object = m->object;
531 vm_object_offset_t paging_offset;
532 vm_object_offset_t offset;
533 register vm_page_t holding_page;
534 register vm_page_t new_m;
535 register vm_page_t new_page;
536 boolean_t need_to_wire = FALSE;
537
538
539 XPR(XPR_VM_PAGEOUT,
540 "vm_pageout_setup, obj 0x%X off 0x%X page 0x%X new obj 0x%X offset 0x%X\n",
541 (integer_t)m->object, (integer_t)m->offset,
542 (integer_t)m, (integer_t)new_object,
543 (integer_t)new_offset);
544 assert(m && m->busy && !m->absent && !m->fictitious && !m->error &&
545 !m->restart);
546
547 assert(m->dirty || m->precious);
548
549 /*
550 * Create a place-holder page where the old one was, to prevent
551 * attempted pageins of this page while we're unlocked.
552 */
553 VM_PAGE_GRAB_FICTITIOUS(holding_page);
554
555 vm_object_lock(old_object);
556
557 offset = m->offset;
558 paging_offset = offset + old_object->paging_offset;
559
560 if (old_object->pager_trusted) {
561 /*
562 * This pager is trusted, so we can clean this page
563 * in place. Leave it in the old object, and mark it
564 * cleaning & pageout.
565 */
566 new_m = holding_page;
567 holding_page = VM_PAGE_NULL;
568
569 /*
570 * Set up new page to be private shadow of real page.
571 */
572 new_m->phys_addr = m->phys_addr;
573 new_m->fictitious = FALSE;
574 new_m->pageout = TRUE;
575
576 /*
577 * Mark real page as cleaning (indicating that we hold a
578 * paging reference to be released via m_o_d_r_c) and
579 * pageout (indicating that the page should be freed
580 * when the pageout completes).
581 */
582 pmap_clear_modify(m->phys_addr);
583 vm_page_lock_queues();
584 new_m->private = TRUE;
585 vm_page_wire(new_m);
586 m->cleaning = TRUE;
587 m->pageout = TRUE;
588
589 vm_page_wire(m);
590 assert(m->wire_count == 1);
591 vm_page_unlock_queues();
592
593 m->dirty = TRUE;
594 m->precious = FALSE;
595 m->page_lock = VM_PROT_NONE;
596 m->unusual = FALSE;
597 m->unlock_request = VM_PROT_NONE;
598 } else {
599 /*
600 * Cannot clean in place, so rip the old page out of the
601 * object, and stick the holding page in. Set new_m to the
602 * page in the new object.
603 */
604 vm_page_lock_queues();
605 VM_PAGE_QUEUES_REMOVE(m);
606 vm_page_remove(m);
607
608 vm_page_insert(holding_page, old_object, offset);
609 vm_page_unlock_queues();
610
611 m->dirty = TRUE;
612 m->precious = FALSE;
613 new_m = m;
614 new_m->page_lock = VM_PROT_NONE;
615 new_m->unlock_request = VM_PROT_NONE;
616
617 if (old_object->internal)
618 need_to_wire = TRUE;
619 }
620 /*
621 * Record that this page has been written out
622 */
623 #if MACH_PAGEMAP
624 vm_external_state_set(old_object->existence_map, offset);
625 #endif /* MACH_PAGEMAP */
626
627 vm_object_unlock(old_object);
628
629 vm_object_lock(new_object);
630
631 /*
632 * Put the page into the new object. If it is a not wired
633 * (if it's the real page) it will be activated.
634 */
635
636 vm_page_lock_queues();
637 vm_page_insert(new_m, new_object, new_offset);
638 if (need_to_wire)
639 vm_page_wire(new_m);
640 else
641 vm_page_activate(new_m);
642 PAGE_WAKEUP_DONE(new_m);
643 vm_page_unlock_queues();
644
645 vm_object_unlock(new_object);
646
647 /*
648 * Return the placeholder page to simplify cleanup.
649 */
650 return (holding_page);
651 }
652
653 /*
654 * Routine: vm_pageclean_setup
655 *
656 * Purpose: setup a page to be cleaned (made non-dirty), but not
657 * necessarily flushed from the VM page cache.
658 * This is accomplished by cleaning in place.
659 *
660 * The page must not be busy, and the object and page
661 * queues must be locked.
662 *
663 */
664 void
665 vm_pageclean_setup(
666 vm_page_t m,
667 vm_page_t new_m,
668 vm_object_t new_object,
669 vm_object_offset_t new_offset)
670 {
671 vm_object_t old_object = m->object;
672 assert(!m->busy);
673 assert(!m->cleaning);
674
675 XPR(XPR_VM_PAGEOUT,
676 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
677 (integer_t)old_object, m->offset, (integer_t)m,
678 (integer_t)new_m, new_offset);
679
680 pmap_clear_modify(m->phys_addr);
681 vm_object_paging_begin(old_object);
682
683 /*
684 * Record that this page has been written out
685 */
686 #if MACH_PAGEMAP
687 vm_external_state_set(old_object->existence_map, m->offset);
688 #endif /*MACH_PAGEMAP*/
689
690 /*
691 * Mark original page as cleaning in place.
692 */
693 m->cleaning = TRUE;
694 m->dirty = TRUE;
695 m->precious = FALSE;
696
697 /*
698 * Convert the fictitious page to a private shadow of
699 * the real page.
700 */
701 assert(new_m->fictitious);
702 new_m->fictitious = FALSE;
703 new_m->private = TRUE;
704 new_m->pageout = TRUE;
705 new_m->phys_addr = m->phys_addr;
706 vm_page_wire(new_m);
707
708 vm_page_insert(new_m, new_object, new_offset);
709 assert(!new_m->wanted);
710 new_m->busy = FALSE;
711 }
712
713 void
714 vm_pageclean_copy(
715 vm_page_t m,
716 vm_page_t new_m,
717 vm_object_t new_object,
718 vm_object_offset_t new_offset)
719 {
720 XPR(XPR_VM_PAGEOUT,
721 "vm_pageclean_copy, page 0x%X new_m 0x%X new_obj 0x%X offset 0x%X\n",
722 m, new_m, new_object, new_offset, 0);
723
724 assert((!m->busy) && (!m->cleaning));
725
726 assert(!new_m->private && !new_m->fictitious);
727
728 pmap_clear_modify(m->phys_addr);
729
730 m->busy = TRUE;
731 vm_object_paging_begin(m->object);
732 vm_page_unlock_queues();
733 vm_object_unlock(m->object);
734
735 /*
736 * Copy the original page to the new page.
737 */
738 vm_page_copy(m, new_m);
739
740 /*
741 * Mark the old page as clean. A request to pmap_is_modified
742 * will get the right answer.
743 */
744 vm_object_lock(m->object);
745 m->dirty = FALSE;
746
747 vm_object_paging_end(m->object);
748
749 vm_page_lock_queues();
750 if (!m->active && !m->inactive)
751 vm_page_activate(m);
752 PAGE_WAKEUP_DONE(m);
753
754 vm_page_insert(new_m, new_object, new_offset);
755 vm_page_activate(new_m);
756 new_m->busy = FALSE; /* No other thread can be waiting */
757 }
758
759
760 /*
761 * Routine: vm_pageout_initialize_page
762 * Purpose:
763 * Causes the specified page to be initialized in
764 * the appropriate memory object. This routine is used to push
765 * pages into a copy-object when they are modified in the
766 * permanent object.
767 *
768 * The page is moved to a temporary object and paged out.
769 *
770 * In/out conditions:
771 * The page in question must not be on any pageout queues.
772 * The object to which it belongs must be locked.
773 * The page must be busy, but not hold a paging reference.
774 *
775 * Implementation:
776 * Move this page to a completely new object.
777 */
778 void
779 vm_pageout_initialize_page(
780 vm_page_t m)
781 {
782 vm_map_copy_t copy;
783 vm_object_t new_object;
784 vm_object_t object;
785 vm_object_offset_t paging_offset;
786 vm_page_t holding_page;
787
788
789 XPR(XPR_VM_PAGEOUT,
790 "vm_pageout_initialize_page, page 0x%X\n",
791 (integer_t)m, 0, 0, 0, 0);
792 assert(m->busy);
793
794 /*
795 * Verify that we really want to clean this page
796 */
797 assert(!m->absent);
798 assert(!m->error);
799 assert(m->dirty);
800
801 /*
802 * Create a paging reference to let us play with the object.
803 */
804 object = m->object;
805 paging_offset = m->offset + object->paging_offset;
806 vm_object_paging_begin(object);
807 vm_object_unlock(object);
808 if (m->absent || m->error || m->restart ||
809 (!m->dirty && !m->precious)) {
810 VM_PAGE_FREE(m);
811 panic("reservation without pageout?"); /* alan */
812 return;
813 }
814
815 /* set the page for future call to vm_fault_list_request */
816 holding_page = NULL;
817 vm_object_lock(m->object);
818 vm_page_lock_queues();
819 pmap_clear_modify(m->phys_addr);
820 m->dirty = TRUE;
821 m->busy = TRUE;
822 m->list_req_pending = TRUE;
823 m->cleaning = TRUE;
824 m->pageout = TRUE;
825 vm_page_wire(m);
826 vm_page_unlock_queues();
827 vm_object_unlock(m->object);
828 vm_pageout_throttle(m);
829
830 /*
831 * Write the data to its pager.
832 * Note that the data is passed by naming the new object,
833 * not a virtual address; the pager interface has been
834 * manipulated to use the "internal memory" data type.
835 * [The object reference from its allocation is donated
836 * to the eventual recipient.]
837 */
838 memory_object_data_initialize(object->pager,
839 paging_offset,
840 PAGE_SIZE);
841
842 vm_object_lock(object);
843 }
844
845 #if MACH_CLUSTER_STATS
846 #define MAXCLUSTERPAGES 16
847 struct {
848 unsigned long pages_in_cluster;
849 unsigned long pages_at_higher_offsets;
850 unsigned long pages_at_lower_offsets;
851 } cluster_stats[MAXCLUSTERPAGES];
852 #endif /* MACH_CLUSTER_STATS */
853
854 boolean_t allow_clustered_pageouts = FALSE;
855
856 /*
857 * vm_pageout_cluster:
858 *
859 * Given a page, page it out, and attempt to clean adjacent pages
860 * in the same operation.
861 *
862 * The page must be busy, and the object unlocked w/ paging reference
863 * to prevent deallocation or collapse. The page must not be on any
864 * pageout queue.
865 */
866 void
867 vm_pageout_cluster(
868 vm_page_t m)
869 {
870 vm_object_t object = m->object;
871 vm_object_offset_t offset = m->offset; /* from vm_object start */
872 vm_object_offset_t paging_offset = m->offset + object->paging_offset;
873 vm_object_t new_object;
874 vm_object_offset_t new_offset;
875 vm_size_t cluster_size;
876 vm_object_offset_t cluster_offset; /* from memory_object start */
877 vm_object_offset_t cluster_lower_bound; /* from vm_object_start */
878 vm_object_offset_t cluster_upper_bound; /* from vm_object_start */
879 vm_object_offset_t cluster_start, cluster_end;/* from vm_object start */
880 vm_object_offset_t offset_within_cluster;
881 vm_size_t length_of_data;
882 vm_page_t friend, holding_page;
883 kern_return_t rc;
884 boolean_t precious_clean = TRUE;
885 int pages_in_cluster;
886
887 CLUSTER_STAT(int pages_at_higher_offsets = 0;)
888 CLUSTER_STAT(int pages_at_lower_offsets = 0;)
889
890 XPR(XPR_VM_PAGEOUT,
891 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
892 (integer_t)object, offset, (integer_t)m, 0, 0);
893
894 CLUSTER_STAT(vm_pageout_cluster_clusters++;)
895 /*
896 * Only a certain kind of page is appreciated here.
897 */
898 assert(m->busy && (m->dirty || m->precious) && (m->wire_count == 0));
899 assert(!m->cleaning && !m->pageout && !m->inactive && !m->active);
900
901 vm_object_lock(object);
902 cluster_size = object->cluster_size;
903
904 assert(cluster_size >= PAGE_SIZE);
905 if (cluster_size < PAGE_SIZE) cluster_size = PAGE_SIZE;
906 assert(object->pager_created && object->pager_initialized);
907 assert(object->internal || object->pager_ready);
908
909 if (m->precious && !m->dirty)
910 precious_clean = TRUE;
911
912 if (!object->pager_trusted || !allow_clustered_pageouts)
913 cluster_size = PAGE_SIZE;
914 vm_object_unlock(object);
915
916 cluster_offset = paging_offset & (vm_object_offset_t)(cluster_size - 1);
917 /* bytes from beginning of cluster */
918 /*
919 * Due to unaligned mappings, we have to be careful
920 * of negative offsets into the VM object. Clip the cluster
921 * boundary to the VM object, not the memory object.
922 */
923 if (offset > cluster_offset) {
924 cluster_lower_bound = offset - cluster_offset;
925 /* from vm_object */
926 } else {
927 cluster_lower_bound = 0;
928 }
929 cluster_upper_bound = (offset - cluster_offset) +
930 (vm_object_offset_t)cluster_size;
931
932 /* set the page for future call to vm_fault_list_request */
933 holding_page = NULL;
934 vm_object_lock(m->object);
935 vm_page_lock_queues();
936 m->busy = TRUE;
937 m->list_req_pending = TRUE;
938 m->cleaning = TRUE;
939 m->pageout = TRUE;
940 vm_page_wire(m);
941 vm_page_unlock_queues();
942 vm_object_unlock(m->object);
943 vm_pageout_throttle(m);
944
945 /*
946 * Search backward for adjacent eligible pages to clean in
947 * this operation.
948 */
949
950 cluster_start = offset;
951 if (offset) { /* avoid wrap-around at zero */
952 for (cluster_start = offset - PAGE_SIZE_64;
953 cluster_start >= cluster_lower_bound;
954 cluster_start -= PAGE_SIZE_64) {
955 assert(cluster_size > PAGE_SIZE);
956
957 vm_object_lock(object);
958 vm_page_lock_queues();
959
960 if ((friend = vm_pageout_cluster_page(object, cluster_start,
961 precious_clean)) == VM_PAGE_NULL) {
962 vm_page_unlock_queues();
963 vm_object_unlock(object);
964 break;
965 }
966 new_offset = (cluster_start + object->paging_offset)
967 & (cluster_size - 1);
968
969 assert(new_offset < cluster_offset);
970 m->list_req_pending = TRUE;
971 m->cleaning = TRUE;
972 /* do nothing except advance the write request, all we really need to */
973 /* do is push the target page and let the code at the other end decide */
974 /* what is really the right size */
975 if (vm_page_free_count <= vm_page_free_reserved) {
976 m->busy = TRUE;
977 m->pageout = TRUE;
978 vm_page_wire(m);
979 }
980
981 vm_page_unlock_queues();
982 vm_object_unlock(object);
983 if(m->dirty || m->object->internal) {
984 CLUSTER_STAT(pages_at_lower_offsets++;)
985 }
986
987 }
988 cluster_start += PAGE_SIZE_64;
989 }
990 assert(cluster_start >= cluster_lower_bound);
991 assert(cluster_start <= offset);
992 /*
993 * Search forward for adjacent eligible pages to clean in
994 * this operation.
995 */
996 for (cluster_end = offset + PAGE_SIZE_64;
997 cluster_end < cluster_upper_bound;
998 cluster_end += PAGE_SIZE_64) {
999 assert(cluster_size > PAGE_SIZE);
1000
1001 vm_object_lock(object);
1002 vm_page_lock_queues();
1003
1004 if ((friend = vm_pageout_cluster_page(object, cluster_end,
1005 precious_clean)) == VM_PAGE_NULL) {
1006 vm_page_unlock_queues();
1007 vm_object_unlock(object);
1008 break;
1009 }
1010 new_offset = (cluster_end + object->paging_offset)
1011 & (cluster_size - 1);
1012
1013 assert(new_offset < cluster_size);
1014 m->list_req_pending = TRUE;
1015 m->cleaning = TRUE;
1016 /* do nothing except advance the write request, all we really need to */
1017 /* do is push the target page and let the code at the other end decide */
1018 /* what is really the right size */
1019 if (vm_page_free_count <= vm_page_free_reserved) {
1020 m->busy = TRUE;
1021 m->pageout = TRUE;
1022 vm_page_wire(m);
1023 }
1024
1025 vm_page_unlock_queues();
1026 vm_object_unlock(object);
1027
1028 if(m->dirty || m->object->internal) {
1029 CLUSTER_STAT(pages_at_higher_offsets++;)
1030 }
1031 }
1032 assert(cluster_end <= cluster_upper_bound);
1033 assert(cluster_end >= offset + PAGE_SIZE);
1034
1035 /*
1036 * (offset - cluster_offset) is beginning of cluster_object
1037 * relative to vm_object start.
1038 */
1039 offset_within_cluster = cluster_start - (offset - cluster_offset);
1040 length_of_data = cluster_end - cluster_start;
1041
1042 assert(offset_within_cluster < cluster_size);
1043 assert((offset_within_cluster + length_of_data) <= cluster_size);
1044
1045 rc = KERN_SUCCESS;
1046 assert(rc == KERN_SUCCESS);
1047
1048 pages_in_cluster = length_of_data/PAGE_SIZE;
1049
1050 #if MACH_CLUSTER_STATS
1051 (cluster_stats[pages_at_lower_offsets].pages_at_lower_offsets)++;
1052 (cluster_stats[pages_at_higher_offsets].pages_at_higher_offsets)++;
1053 (cluster_stats[pages_in_cluster].pages_in_cluster)++;
1054 #endif /* MACH_CLUSTER_STATS */
1055
1056 /*
1057 * Send the data to the pager.
1058 */
1059 paging_offset = cluster_start + object->paging_offset;
1060
1061 rc = memory_object_data_return(object->pager,
1062 paging_offset,
1063 length_of_data,
1064 !precious_clean,
1065 FALSE);
1066
1067 vm_object_lock(object);
1068 vm_object_paging_end(object);
1069
1070 if (holding_page) {
1071 assert(!object->pager_trusted);
1072 VM_PAGE_FREE(holding_page);
1073 vm_object_paging_end(object);
1074 }
1075
1076 vm_object_unlock(object);
1077 }
1078
1079 /*
1080 * Trusted pager throttle.
1081 * Object must be unlocked, page queues must be unlocked.
1082 */
1083 void
1084 vm_pageout_throttle(
1085 register vm_page_t m)
1086 {
1087 vm_page_lock_queues();
1088 assert(!m->laundry);
1089 m->laundry = TRUE;
1090 while (vm_page_laundry_count >= vm_page_laundry_max) {
1091 /*
1092 * Set the threshold for when vm_page_free()
1093 * should wake us up.
1094 */
1095 vm_page_laundry_min = vm_page_laundry_max/2;
1096
1097 assert_wait((event_t) &vm_page_laundry_count, THREAD_UNINT);
1098 vm_page_unlock_queues();
1099
1100 /*
1101 * Pause to let the default pager catch up.
1102 */
1103 thread_block((void (*)(void)) 0);
1104 vm_page_lock_queues();
1105 }
1106 vm_page_laundry_count++;
1107 vm_page_unlock_queues();
1108 }
1109
1110 /*
1111 * The global variable vm_pageout_clean_active_pages controls whether
1112 * active pages are considered valid to be cleaned in place during a
1113 * clustered pageout. Performance measurements are necessary to determine
1114 * the best policy.
1115 */
1116 int vm_pageout_clean_active_pages = 1;
1117 /*
1118 * vm_pageout_cluster_page: [Internal]
1119 *
1120 * return a vm_page_t to the page at (object,offset) if it is appropriate
1121 * to clean in place. Pages that are non-existent, busy, absent, already
1122 * cleaning, or not dirty are not eligible to be cleaned as an adjacent
1123 * page in a cluster.
1124 *
1125 * The object must be locked on entry, and remains locked throughout
1126 * this call.
1127 */
1128
1129 vm_page_t
1130 vm_pageout_cluster_page(
1131 vm_object_t object,
1132 vm_object_offset_t offset,
1133 boolean_t precious_clean)
1134 {
1135 vm_page_t m;
1136
1137 XPR(XPR_VM_PAGEOUT,
1138 "vm_pageout_cluster_page, object 0x%X offset 0x%X\n",
1139 (integer_t)object, offset, 0, 0, 0);
1140
1141 if ((m = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
1142 return(VM_PAGE_NULL);
1143
1144 if (m->busy || m->absent || m->cleaning ||
1145 (m->wire_count != 0) || m->error)
1146 return(VM_PAGE_NULL);
1147
1148 if (vm_pageout_clean_active_pages) {
1149 if (!m->active && !m->inactive) return(VM_PAGE_NULL);
1150 } else {
1151 if (!m->inactive) return(VM_PAGE_NULL);
1152 }
1153
1154 assert(!m->private);
1155 assert(!m->fictitious);
1156
1157 if (!m->dirty) m->dirty = pmap_is_modified(m->phys_addr);
1158
1159 if (precious_clean) {
1160 if (!m->precious || !m->dirty)
1161 return(VM_PAGE_NULL);
1162 } else {
1163 if (!m->dirty)
1164 return(VM_PAGE_NULL);
1165 }
1166 return(m);
1167 }
1168
1169 /*
1170 * vm_pageout_scan does the dirty work for the pageout daemon.
1171 * It returns with vm_page_queue_free_lock held and
1172 * vm_page_free_wanted == 0.
1173 */
1174 extern void vm_pageout_scan_continue(void); /* forward; */
1175
1176 void
1177 vm_pageout_scan(void)
1178 {
1179 unsigned int burst_count;
1180 boolean_t now = FALSE;
1181 unsigned int laundry_pages;
1182 boolean_t need_more_inactive_pages;
1183 unsigned int loop_detect;
1184
1185 XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1186
1187 /*???*/ /*
1188 * We want to gradually dribble pages from the active queue
1189 * to the inactive queue. If we let the inactive queue get
1190 * very small, and then suddenly dump many pages into it,
1191 * those pages won't get a sufficient chance to be referenced
1192 * before we start taking them from the inactive queue.
1193 *
1194 * We must limit the rate at which we send pages to the pagers.
1195 * data_write messages consume memory, for message buffers and
1196 * for map-copy objects. If we get too far ahead of the pagers,
1197 * we can potentially run out of memory.
1198 *
1199 * We can use the laundry count to limit directly the number
1200 * of pages outstanding to the default pager. A similar
1201 * strategy for external pagers doesn't work, because
1202 * external pagers don't have to deallocate the pages sent them,
1203 * and because we might have to send pages to external pagers
1204 * even if they aren't processing writes. So we also
1205 * use a burst count to limit writes to external pagers.
1206 *
1207 * When memory is very tight, we can't rely on external pagers to
1208 * clean pages. They probably aren't running, because they
1209 * aren't vm-privileged. If we kept sending dirty pages to them,
1210 * we could exhaust the free list. However, we can't just ignore
1211 * pages belonging to external objects, because there might be no
1212 * pages belonging to internal objects. Hence, we get the page
1213 * into an internal object and then immediately double-page it,
1214 * sending it to the default pager.
1215 *
1216 * consider_zone_gc should be last, because the other operations
1217 * might return memory to zones.
1218 */
1219
1220
1221 Restart:
1222
1223 #if THREAD_SWAPPER
1224 mutex_lock(&vm_page_queue_free_lock);
1225 now = (vm_page_free_count < vm_page_free_min);
1226 mutex_unlock(&vm_page_queue_free_lock);
1227
1228 swapout_threads(now);
1229 #endif /* THREAD_SWAPPER */
1230
1231 stack_collect();
1232 consider_task_collect();
1233 consider_thread_collect();
1234 consider_zone_gc();
1235 consider_machine_collect();
1236
1237 loop_detect = vm_page_active_count + vm_page_inactive_count;
1238 #if 0
1239 if (vm_page_free_count <= vm_page_free_reserved) {
1240 need_more_inactive_pages = TRUE;
1241 } else {
1242 need_more_inactive_pages = FALSE;
1243 }
1244 #else
1245 need_more_inactive_pages = FALSE;
1246 #endif
1247
1248 for (burst_count = 0;;) {
1249 register vm_page_t m;
1250 register vm_object_t object;
1251
1252 /*
1253 * Recalculate vm_page_inactivate_target.
1254 */
1255
1256 vm_page_lock_queues();
1257 vm_page_inactive_target =
1258 VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
1259 vm_page_inactive_count);
1260
1261 /*
1262 * Move pages from active to inactive.
1263 */
1264
1265 while ((vm_page_inactive_count < vm_page_inactive_target ||
1266 need_more_inactive_pages) &&
1267 !queue_empty(&vm_page_queue_active)) {
1268 register vm_object_t object;
1269
1270 vm_pageout_active++;
1271 m = (vm_page_t) queue_first(&vm_page_queue_active);
1272
1273 /*
1274 * If we're getting really low on memory,
1275 * try selecting a page that will go
1276 * directly to the default_pager.
1277 * If there are no such pages, we have to
1278 * page out a page backed by an EMM,
1279 * so that the default_pager can recover
1280 * it eventually.
1281 */
1282 if (need_more_inactive_pages &&
1283 (IP_VALID(memory_manager_default))) {
1284 vm_pageout_scan_active_emm_throttle++;
1285 do {
1286 assert(m->active && !m->inactive);
1287 object = m->object;
1288
1289 if (vm_object_lock_try(object)) {
1290 #if 0
1291 if (object->pager_trusted ||
1292 object->internal) {
1293 /* found one ! */
1294 vm_pageout_scan_active_emm_throttle_success++;
1295 goto object_locked_active;
1296 }
1297 #else
1298 vm_pageout_scan_active_emm_throttle_success++;
1299 goto object_locked_active;
1300 #endif
1301 vm_object_unlock(object);
1302 }
1303 m = (vm_page_t) queue_next(&m->pageq);
1304 } while (!queue_end(&vm_page_queue_active,
1305 (queue_entry_t) m));
1306 if (queue_end(&vm_page_queue_active,
1307 (queue_entry_t) m)) {
1308 vm_pageout_scan_active_emm_throttle_failure++;
1309 m = (vm_page_t)
1310 queue_first(&vm_page_queue_active);
1311 }
1312 }
1313
1314 assert(m->active && !m->inactive);
1315
1316 object = m->object;
1317 if (!vm_object_lock_try(object)) {
1318 /*
1319 * Move page to end and continue.
1320 */
1321
1322 queue_remove(&vm_page_queue_active, m,
1323 vm_page_t, pageq);
1324 queue_enter(&vm_page_queue_active, m,
1325 vm_page_t, pageq);
1326 vm_page_unlock_queues();
1327
1328 mutex_pause();
1329 vm_page_lock_queues();
1330 continue;
1331 }
1332
1333 object_locked_active:
1334 /*
1335 * If the page is busy, then we pull it
1336 * off the active queue and leave it alone.
1337 */
1338
1339 if (m->busy) {
1340 vm_object_unlock(object);
1341 queue_remove(&vm_page_queue_active, m,
1342 vm_page_t, pageq);
1343 m->active = FALSE;
1344 if (!m->fictitious)
1345 vm_page_active_count--;
1346 continue;
1347 }
1348
1349 /*
1350 * Deactivate the page while holding the object
1351 * locked, so we know the page is still not busy.
1352 * This should prevent races between pmap_enter
1353 * and pmap_clear_reference. The page might be
1354 * absent or fictitious, but vm_page_deactivate
1355 * can handle that.
1356 */
1357
1358 vm_page_deactivate(m);
1359 vm_object_unlock(object);
1360 }
1361
1362 /*
1363 * We are done if we have met our target *and*
1364 * nobody is still waiting for a page.
1365 */
1366 if (vm_page_free_count >= vm_page_free_target) {
1367 mutex_lock(&vm_page_queue_free_lock);
1368 if ((vm_page_free_count >= vm_page_free_target) &&
1369 (vm_page_free_wanted == 0)) {
1370 vm_page_unlock_queues();
1371 break;
1372 }
1373 mutex_unlock(&vm_page_queue_free_lock);
1374 }
1375 /*
1376 * Sometimes we have to pause:
1377 * 1) No inactive pages - nothing to do.
1378 * 2) Flow control - wait for untrusted pagers to catch up.
1379 */
1380
1381 if ((queue_empty(&vm_page_queue_inactive) &&
1382 (queue_empty(&vm_page_queue_zf))) ||
1383 ((--loop_detect) == 0) ||
1384 (burst_count >= vm_pageout_burst_max)) {
1385 unsigned int pages, msecs;
1386 int wait_result;
1387
1388 consider_machine_adjust();
1389 /*
1390 * vm_pageout_burst_wait is msecs/page.
1391 * If there is nothing for us to do, we wait
1392 * at least vm_pageout_empty_wait msecs.
1393 */
1394 pages = burst_count;
1395
1396 if (loop_detect == 0) {
1397 printf("Warning: No physical memory suitable for pageout or reclaim, pageout thread temporarily going to sleep\n");
1398 msecs = vm_free_page_pause;
1399 }
1400 else {
1401 msecs = burst_count * vm_pageout_burst_wait;
1402 }
1403
1404 if (queue_empty(&vm_page_queue_inactive) &&
1405 queue_empty(&vm_page_queue_zf) &&
1406 (msecs < vm_pageout_empty_wait))
1407 msecs = vm_pageout_empty_wait;
1408 vm_page_unlock_queues();
1409
1410 assert_wait_timeout(msecs, THREAD_INTERRUPTIBLE);
1411 counter(c_vm_pageout_scan_block++);
1412
1413 /*
1414 * Unfortunately, we don't have call_continuation
1415 * so we can't rely on tail-recursion.
1416 */
1417 wait_result = thread_block((void (*)(void)) 0);
1418 if (wait_result != THREAD_TIMED_OUT)
1419 thread_cancel_timer();
1420 vm_pageout_scan_continue();
1421
1422 goto Restart;
1423 /*NOTREACHED*/
1424 }
1425
1426 vm_pageout_inactive++;
1427
1428 if (vm_zf_count < vm_accellerate_zf_pageout_trigger) {
1429 vm_zf_iterator = 0;
1430 } else {
1431 last_page_zf = 0;
1432 if((vm_zf_iterator+=1) >= vm_zf_iterator_count) {
1433 vm_zf_iterator = 0;
1434 }
1435 }
1436 if(queue_empty(&vm_page_queue_zf) ||
1437 (((last_page_zf) || (vm_zf_iterator == 0)) &&
1438 !queue_empty(&vm_page_queue_inactive))) {
1439 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
1440 last_page_zf = 0;
1441 } else {
1442 m = (vm_page_t) queue_first(&vm_page_queue_zf);
1443 last_page_zf = 1;
1444 }
1445
1446 if ((vm_page_free_count <= vm_page_free_reserved) &&
1447 (IP_VALID(memory_manager_default))) {
1448 /*
1449 * We're really low on memory. Try to select a page that
1450 * would go directly to the default_pager.
1451 * If there are no such pages, we have to page out a
1452 * page backed by an EMM, so that the default_pager
1453 * can recover it eventually.
1454 */
1455 vm_pageout_scan_inactive_emm_throttle++;
1456 do {
1457 assert(!m->active && m->inactive);
1458 object = m->object;
1459
1460 if (vm_object_lock_try(object)) {
1461 #if 0
1462 if (object->pager_trusted ||
1463 object->internal) {
1464 /* found one ! */
1465 vm_pageout_scan_inactive_emm_throttle_success++;
1466 goto object_locked_inactive;
1467 }
1468 #else
1469 vm_pageout_scan_inactive_emm_throttle_success++;
1470 goto object_locked_inactive;
1471 #endif /* 0 */
1472 vm_object_unlock(object);
1473 }
1474 m = (vm_page_t) queue_next(&m->pageq);
1475 } while ((!queue_end(&vm_page_queue_zf,
1476 (queue_entry_t) m))
1477 && (!queue_end(&vm_page_queue_inactive,
1478 (queue_entry_t) m)));
1479
1480 if ((queue_end(&vm_page_queue_zf,
1481 (queue_entry_t) m))
1482 || (queue_end(&vm_page_queue_inactive,
1483 (queue_entry_t) m))) {
1484 vm_pageout_scan_inactive_emm_throttle_failure++;
1485 /*
1486 * We should check the "active" queue
1487 * for good candidates to page out.
1488 */
1489 need_more_inactive_pages = TRUE;
1490
1491 if(last_page_zf == 0) {
1492 last_page_zf = 1;
1493 vm_zf_iterator = vm_zf_iterator_count - 1;
1494 } else {
1495 last_page_zf = 0;
1496 vm_zf_iterator = vm_zf_iterator_count - 2;
1497 }
1498 vm_page_unlock_queues();
1499 goto Restart;
1500 }
1501 }
1502
1503 assert(!m->active && m->inactive);
1504 object = m->object;
1505
1506 /*
1507 * Try to lock object; since we've got the
1508 * page queues lock, we can only try for this one.
1509 */
1510
1511 if (!vm_object_lock_try(object)) {
1512 /*
1513 * Move page to end and continue.
1514 * Don't re-issue ticket
1515 */
1516 if(m->zero_fill) {
1517 queue_remove(&vm_page_queue_zf, m,
1518 vm_page_t, pageq);
1519 queue_enter(&vm_page_queue_zf, m,
1520 vm_page_t, pageq);
1521 } else {
1522 queue_remove(&vm_page_queue_inactive, m,
1523 vm_page_t, pageq);
1524 queue_enter(&vm_page_queue_inactive, m,
1525 vm_page_t, pageq);
1526 }
1527 vm_page_unlock_queues();
1528
1529 mutex_pause();
1530 vm_pageout_inactive_nolock++;
1531 continue;
1532 }
1533
1534 object_locked_inactive:
1535 /*
1536 * Paging out pages of objects which pager is being
1537 * created by another thread must be avoided, because
1538 * this thread may claim for memory, thus leading to a
1539 * possible dead lock between it and the pageout thread
1540 * which will wait for pager creation, if such pages are
1541 * finally chosen. The remaining assumption is that there
1542 * will finally be enough available pages in the inactive
1543 * pool to page out in order to satisfy all memory claimed
1544 * by the thread which concurrently creates the pager.
1545 */
1546
1547 if (!object->pager_initialized && object->pager_created) {
1548 /*
1549 * Move page to end and continue, hoping that
1550 * there will be enough other inactive pages to
1551 * page out so that the thread which currently
1552 * initializes the pager will succeed.
1553 * Don't re-grant the ticket, the page should
1554 * pulled from the queue and paged out whenever
1555 * one of its logically adjacent fellows is
1556 * targeted.
1557 */
1558 if(m->zero_fill) {
1559 queue_remove(&vm_page_queue_zf, m,
1560 vm_page_t, pageq);
1561 queue_enter(&vm_page_queue_zf, m,
1562 vm_page_t, pageq);
1563 last_page_zf = 1;
1564 vm_zf_iterator = vm_zf_iterator_count - 1;
1565 } else {
1566 queue_remove(&vm_page_queue_inactive, m,
1567 vm_page_t, pageq);
1568 queue_enter(&vm_page_queue_inactive, m,
1569 vm_page_t, pageq);
1570 last_page_zf = 0;
1571 vm_zf_iterator = 1;
1572 }
1573 vm_page_unlock_queues();
1574 vm_object_unlock(object);
1575 vm_pageout_inactive_avoid++;
1576 continue;
1577 }
1578
1579 /*
1580 * Remove the page from the inactive list.
1581 */
1582
1583 if(m->zero_fill) {
1584 queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
1585 } else {
1586 queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
1587 }
1588 m->inactive = FALSE;
1589 if (!m->fictitious)
1590 vm_page_inactive_count--;
1591
1592 if (m->busy || !object->alive) {
1593 /*
1594 * Somebody is already playing with this page.
1595 * Leave it off the pageout queues.
1596 */
1597
1598 vm_page_unlock_queues();
1599 vm_object_unlock(object);
1600 vm_pageout_inactive_busy++;
1601 continue;
1602 }
1603
1604 /*
1605 * If it's absent or in error, we can reclaim the page.
1606 */
1607
1608 if (m->absent || m->error) {
1609 vm_pageout_inactive_absent++;
1610 reclaim_page:
1611 vm_page_free(m);
1612 vm_page_unlock_queues();
1613 vm_object_unlock(object);
1614 continue;
1615 }
1616
1617 assert(!m->private);
1618 assert(!m->fictitious);
1619
1620 /*
1621 * If already cleaning this page in place, convert from
1622 * "adjacent" to "target". We can leave the page mapped,
1623 * and vm_pageout_object_terminate will determine whether
1624 * to free or reactivate.
1625 */
1626
1627 if (m->cleaning) {
1628 #if MACH_CLUSTER_STATS
1629 vm_pageout_cluster_conversions++;
1630 #endif
1631 m->busy = TRUE;
1632 m->pageout = TRUE;
1633 m->dump_cleaning = TRUE;
1634 vm_page_wire(m);
1635 vm_object_unlock(object);
1636 vm_page_unlock_queues();
1637 continue;
1638 }
1639
1640 /*
1641 * If it's being used, reactivate.
1642 * (Fictitious pages are either busy or absent.)
1643 */
1644
1645 if (m->reference || pmap_is_referenced(m->phys_addr)) {
1646 vm_pageout_inactive_used++;
1647 reactivate_page:
1648 #if ADVISORY_PAGEOUT
1649 if (m->discard_request) {
1650 m->discard_request = FALSE;
1651 }
1652 #endif /* ADVISORY_PAGEOUT */
1653 last_page_zf = 0;
1654 vm_object_unlock(object);
1655 vm_page_activate(m);
1656 VM_STAT(reactivations++);
1657 vm_page_unlock_queues();
1658 continue;
1659 }
1660
1661 #if ADVISORY_PAGEOUT
1662 if (object->advisory_pageout) {
1663 boolean_t do_throttle;
1664 memory_object_t pager;
1665 vm_object_offset_t discard_offset;
1666
1667 if (m->discard_request) {
1668 vm_stat_discard_failure++;
1669 goto mandatory_pageout;
1670 }
1671
1672 assert(object->pager_initialized);
1673 m->discard_request = TRUE;
1674 pager = object->pager;
1675
1676 /* system-wide throttle */
1677 do_throttle = (vm_page_free_count <=
1678 vm_page_free_reserved);
1679
1680 #if 0
1681 /*
1682 * JMM - Do we need a replacement throttle
1683 * mechanism for pagers?
1684 */
1685 if (!do_throttle) {
1686 /* throttle on this pager */
1687 /* XXX lock ordering ? */
1688 ip_lock(port);
1689 do_throttle= imq_full(&port->ip_messages);
1690 ip_unlock(port);
1691 }
1692 #endif
1693
1694 if (do_throttle) {
1695 vm_stat_discard_throttle++;
1696 #if 0
1697 /* ignore this page and skip to next */
1698 vm_page_unlock_queues();
1699 vm_object_unlock(object);
1700 continue;
1701 #else
1702 /* force mandatory pageout */
1703 goto mandatory_pageout;
1704 #endif
1705 }
1706
1707 /* proceed with discard_request */
1708 vm_page_activate(m);
1709 vm_stat_discard++;
1710 VM_STAT(reactivations++);
1711 discard_offset = m->offset + object->paging_offset;
1712 vm_stat_discard_sent++;
1713 vm_page_unlock_queues();
1714 vm_object_unlock(object);
1715
1716 /*
1717 memory_object_discard_request(object->pager,
1718 discard_offset,
1719 PAGE_SIZE);
1720 */
1721 continue;
1722 }
1723 mandatory_pageout:
1724 #endif /* ADVISORY_PAGEOUT */
1725
1726 XPR(XPR_VM_PAGEOUT,
1727 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
1728 (integer_t)object, (integer_t)m->offset, (integer_t)m, 0,0);
1729
1730 /*
1731 * Eliminate all mappings.
1732 */
1733
1734 m->busy = TRUE;
1735 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
1736
1737 if (!m->dirty)
1738 m->dirty = pmap_is_modified(m->phys_addr);
1739 /*
1740 * If it's clean and not precious, we can free the page.
1741 */
1742
1743 if (!m->dirty && !m->precious) {
1744 vm_pageout_inactive_clean++;
1745 goto reclaim_page;
1746 }
1747 vm_page_unlock_queues();
1748
1749 /*
1750 * If there is no memory object for the page, create
1751 * one and hand it to the default pager.
1752 */
1753
1754 if (!object->pager_initialized)
1755 vm_object_collapse(object);
1756 if (!object->pager_initialized)
1757 vm_object_pager_create(object);
1758 if (!object->pager_initialized) {
1759 /*
1760 * Still no pager for the object.
1761 * Reactivate the page.
1762 *
1763 * Should only happen if there is no
1764 * default pager.
1765 */
1766 vm_page_lock_queues();
1767 vm_page_activate(m);
1768 vm_page_unlock_queues();
1769
1770 /*
1771 * And we are done with it.
1772 */
1773 PAGE_WAKEUP_DONE(m);
1774 vm_object_unlock(object);
1775
1776 /*
1777 * break here to get back to the preemption
1778 * point in the outer loop so that we don't
1779 * spin forever if there is no default pager.
1780 */
1781 vm_pageout_dirty_no_pager++;
1782 /*
1783 * Well there's no pager, but we can still reclaim
1784 * free pages out of the inactive list. Go back
1785 * to top of loop and look for suitable pages.
1786 */
1787 continue;
1788 }
1789
1790 if ((object->pager_initialized) &&
1791 (object->pager == MEMORY_OBJECT_NULL)) {
1792 /*
1793 * This pager has been destroyed by either
1794 * memory_object_destroy or vm_object_destroy, and
1795 * so there is nowhere for the page to go.
1796 * Just free the page.
1797 */
1798 VM_PAGE_FREE(m);
1799 vm_object_unlock(object);
1800 continue;
1801 }
1802
1803 vm_pageout_inactive_dirty++;
1804 /*
1805 if (!object->internal)
1806 burst_count++;
1807 */
1808 vm_object_paging_begin(object);
1809 vm_object_unlock(object);
1810 vm_pageout_cluster(m); /* flush it */
1811 }
1812 consider_machine_adjust();
1813 }
1814
1815 counter(unsigned int c_vm_pageout_scan_continue = 0;)
1816
1817 void
1818 vm_pageout_scan_continue(void)
1819 {
1820 /*
1821 * We just paused to let the pagers catch up.
1822 * If vm_page_laundry_count is still high,
1823 * then we aren't waiting long enough.
1824 * If we have paused some vm_pageout_pause_max times without
1825 * adjusting vm_pageout_burst_wait, it might be too big,
1826 * so we decrease it.
1827 */
1828
1829 vm_page_lock_queues();
1830 counter(++c_vm_pageout_scan_continue);
1831 if (vm_page_laundry_count > vm_pageout_burst_min) {
1832 vm_pageout_burst_wait++;
1833 vm_pageout_pause_count = 0;
1834 } else if (++vm_pageout_pause_count > vm_pageout_pause_max) {
1835 vm_pageout_burst_wait = (vm_pageout_burst_wait * 3) / 4;
1836 if (vm_pageout_burst_wait < 1)
1837 vm_pageout_burst_wait = 1;
1838 vm_pageout_pause_count = 0;
1839 }
1840 vm_page_unlock_queues();
1841 }
1842
1843 void vm_page_free_reserve(int pages);
1844 int vm_page_free_count_init;
1845
1846 void
1847 vm_page_free_reserve(
1848 int pages)
1849 {
1850 int free_after_reserve;
1851
1852 vm_page_free_reserved += pages;
1853
1854 free_after_reserve = vm_page_free_count_init - vm_page_free_reserved;
1855
1856 vm_page_free_min = vm_page_free_reserved +
1857 VM_PAGE_FREE_MIN(free_after_reserve);
1858
1859 vm_page_free_target = vm_page_free_reserved +
1860 VM_PAGE_FREE_TARGET(free_after_reserve);
1861
1862 if (vm_page_free_target < vm_page_free_min + 5)
1863 vm_page_free_target = vm_page_free_min + 5;
1864 }
1865
1866 /*
1867 * vm_pageout is the high level pageout daemon.
1868 */
1869
1870
1871 void
1872 vm_pageout(void)
1873 {
1874 thread_t self = current_thread();
1875 spl_t s;
1876
1877 /*
1878 * Set thread privileges.
1879 */
1880 self->vm_privilege = TRUE;
1881 stack_privilege(self);
1882
1883 s = splsched();
1884 thread_lock(self);
1885 self->priority = BASEPRI_PREEMPT - 1;
1886 set_sched_pri(self, self->priority);
1887 thread_unlock(self);
1888 splx(s);
1889
1890 /*
1891 * Initialize some paging parameters.
1892 */
1893
1894 if (vm_page_laundry_max == 0)
1895 vm_page_laundry_max = VM_PAGE_LAUNDRY_MAX;
1896
1897 if (vm_pageout_burst_max == 0)
1898 vm_pageout_burst_max = VM_PAGEOUT_BURST_MAX;
1899
1900 if (vm_pageout_burst_wait == 0)
1901 vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
1902
1903 if (vm_pageout_empty_wait == 0)
1904 vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
1905
1906 vm_page_free_count_init = vm_page_free_count;
1907 vm_zf_iterator = 0;
1908 /*
1909 * even if we've already called vm_page_free_reserve
1910 * call it again here to insure that the targets are
1911 * accurately calculated (it uses vm_page_free_count_init)
1912 * calling it with an arg of 0 will not change the reserve
1913 * but will re-calculate free_min and free_target
1914 */
1915 if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED)
1916 vm_page_free_reserve(VM_PAGE_FREE_RESERVED - vm_page_free_reserved);
1917 else
1918 vm_page_free_reserve(0);
1919
1920 /*
1921 * vm_pageout_scan will set vm_page_inactive_target.
1922 *
1923 * The pageout daemon is never done, so loop forever.
1924 * We should call vm_pageout_scan at least once each
1925 * time we are woken, even if vm_page_free_wanted is
1926 * zero, to check vm_page_free_target and
1927 * vm_page_inactive_target.
1928 */
1929 for (;;) {
1930 vm_pageout_scan_event_counter++;
1931 vm_pageout_scan();
1932 /* we hold vm_page_queue_free_lock now */
1933 assert(vm_page_free_wanted == 0);
1934 assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
1935 mutex_unlock(&vm_page_queue_free_lock);
1936 counter(c_vm_pageout_block++);
1937 thread_block((void (*)(void)) 0);
1938 }
1939 /*NOTREACHED*/
1940 }
1941
1942 kern_return_t
1943 vm_pageout_emergency_availability_request()
1944 {
1945 vm_page_t m;
1946 vm_object_t object;
1947
1948 vm_page_lock_queues();
1949 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
1950
1951 while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
1952 if(m->fictitious) {
1953 m = (vm_page_t) queue_next(&m->pageq);
1954 continue;
1955 }
1956 if (!m->dirty)
1957 m->dirty = pmap_is_modified(m->phys_addr);
1958 if(m->dirty || m->busy || m->wire_count || m->absent
1959 || m->precious || m->cleaning
1960 || m->dump_cleaning || m->error
1961 || m->pageout || m->laundry
1962 || m->list_req_pending
1963 || m->overwriting) {
1964 m = (vm_page_t) queue_next(&m->pageq);
1965 continue;
1966 }
1967 object = m->object;
1968
1969 if (vm_object_lock_try(object)) {
1970 if((!object->alive) ||
1971 (object->pageout)) {
1972 vm_object_unlock(object);
1973 m = (vm_page_t) queue_next(&m->pageq);
1974 continue;
1975 }
1976 m->busy = TRUE;
1977 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
1978 vm_page_free(m);
1979 vm_object_unlock(object);
1980 vm_page_unlock_queues();
1981 return KERN_SUCCESS;
1982 }
1983 m = (vm_page_t) queue_next(&m->pageq);
1984 }
1985
1986 m = (vm_page_t) queue_first(&vm_page_queue_active);
1987
1988 while (!queue_end(&vm_page_queue_active, (queue_entry_t) m)) {
1989 if(m->fictitious) {
1990 m = (vm_page_t) queue_next(&m->pageq);
1991 continue;
1992 }
1993 if (!m->dirty)
1994 m->dirty = pmap_is_modified(m->phys_addr);
1995 if(m->dirty || m->busy || m->wire_count || m->absent
1996 || m->precious || m->cleaning
1997 || m->dump_cleaning || m->error
1998 || m->pageout || m->laundry
1999 || m->list_req_pending
2000 || m->overwriting) {
2001 m = (vm_page_t) queue_next(&m->pageq);
2002 continue;
2003 }
2004 object = m->object;
2005
2006 if (vm_object_lock_try(object)) {
2007 if((!object->alive) ||
2008 (object->pageout)) {
2009 vm_object_unlock(object);
2010 m = (vm_page_t) queue_next(&m->pageq);
2011 continue;
2012 }
2013 m->busy = TRUE;
2014 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
2015 vm_page_free(m);
2016 vm_object_unlock(object);
2017 vm_page_unlock_queues();
2018 return KERN_SUCCESS;
2019 }
2020 m = (vm_page_t) queue_next(&m->pageq);
2021 }
2022 vm_page_unlock_queues();
2023 return KERN_FAILURE;
2024 }
2025
2026
2027 static upl_t
2028 upl_create(
2029 boolean_t internal,
2030 vm_size_t size)
2031 {
2032 upl_t upl;
2033
2034 if(internal) {
2035 upl = (upl_t)kalloc(sizeof(struct upl)
2036 + (sizeof(struct upl_page_info)*(size/page_size)));
2037 } else {
2038 upl = (upl_t)kalloc(sizeof(struct upl));
2039 }
2040 upl->flags = 0;
2041 upl->src_object = NULL;
2042 upl->kaddr = (vm_offset_t)0;
2043 upl->size = 0;
2044 upl->map_object = NULL;
2045 upl->ref_count = 1;
2046 upl_lock_init(upl);
2047 #ifdef UBC_DEBUG
2048 upl->ubc_alias1 = 0;
2049 upl->ubc_alias2 = 0;
2050 #endif /* UBC_DEBUG */
2051 return(upl);
2052 }
2053
2054 static void
2055 upl_destroy(
2056 upl_t upl)
2057 {
2058
2059 #ifdef UBC_DEBUG
2060 {
2061 upl_t upl_ele;
2062 vm_object_lock(upl->map_object->shadow);
2063 queue_iterate(&upl->map_object->shadow->uplq,
2064 upl_ele, upl_t, uplq) {
2065 if(upl_ele == upl) {
2066 queue_remove(&upl->map_object->shadow->uplq,
2067 upl_ele, upl_t, uplq);
2068 break;
2069 }
2070 }
2071 vm_object_unlock(upl->map_object->shadow);
2072 }
2073 #endif /* UBC_DEBUG */
2074 #ifdef notdefcdy
2075 if(!(upl->flags & UPL_DEVICE_MEMORY))
2076 #endif
2077 vm_object_deallocate(upl->map_object);
2078 if(upl->flags & UPL_INTERNAL) {
2079 kfree((vm_offset_t)upl,
2080 sizeof(struct upl) +
2081 (sizeof(struct upl_page_info) * (upl->size/page_size)));
2082 } else {
2083 kfree((vm_offset_t)upl, sizeof(struct upl));
2084 }
2085 }
2086
2087 __private_extern__ void
2088 uc_upl_dealloc(
2089 upl_t upl)
2090 {
2091 upl->ref_count -= 1;
2092 if(upl->ref_count == 0) {
2093 upl_destroy(upl);
2094 }
2095 }
2096
2097 void
2098 upl_deallocate(
2099 upl_t upl)
2100 {
2101
2102 upl->ref_count -= 1;
2103 if(upl->ref_count == 0) {
2104 upl_destroy(upl);
2105 }
2106 }
2107
2108 /*
2109 * Routine: vm_object_upl_request
2110 * Purpose:
2111 * Cause the population of a portion of a vm_object.
2112 * Depending on the nature of the request, the pages
2113 * returned may be contain valid data or be uninitialized.
2114 * A page list structure, listing the physical pages
2115 * will be returned upon request.
2116 * This function is called by the file system or any other
2117 * supplier of backing store to a pager.
2118 * IMPORTANT NOTE: The caller must still respect the relationship
2119 * between the vm_object and its backing memory object. The
2120 * caller MUST NOT substitute changes in the backing file
2121 * without first doing a memory_object_lock_request on the
2122 * target range unless it is know that the pages are not
2123 * shared with another entity at the pager level.
2124 * Copy_in_to:
2125 * if a page list structure is present
2126 * return the mapped physical pages, where a
2127 * page is not present, return a non-initialized
2128 * one. If the no_sync bit is turned on, don't
2129 * call the pager unlock to synchronize with other
2130 * possible copies of the page. Leave pages busy
2131 * in the original object, if a page list structure
2132 * was specified. When a commit of the page list
2133 * pages is done, the dirty bit will be set for each one.
2134 * Copy_out_from:
2135 * If a page list structure is present, return
2136 * all mapped pages. Where a page does not exist
2137 * map a zero filled one. Leave pages busy in
2138 * the original object. If a page list structure
2139 * is not specified, this call is a no-op.
2140 *
2141 * Note: access of default pager objects has a rather interesting
2142 * twist. The caller of this routine, presumably the file system
2143 * page cache handling code, will never actually make a request
2144 * against a default pager backed object. Only the default
2145 * pager will make requests on backing store related vm_objects
2146 * In this way the default pager can maintain the relationship
2147 * between backing store files (abstract memory objects) and
2148 * the vm_objects (cache objects), they support.
2149 *
2150 */
2151 __private_extern__ kern_return_t
2152 vm_object_upl_request(
2153 vm_object_t object,
2154 vm_object_offset_t offset,
2155 vm_size_t size,
2156 upl_t *upl_ptr,
2157 upl_page_info_array_t user_page_list,
2158 unsigned int *page_list_count,
2159 int cntrl_flags)
2160 {
2161 vm_page_t dst_page;
2162 vm_object_offset_t dst_offset = offset;
2163 vm_size_t xfer_size = size;
2164 boolean_t do_m_lock = FALSE;
2165 boolean_t dirty;
2166 upl_t upl = NULL;
2167 int entry;
2168 boolean_t encountered_lrp = FALSE;
2169
2170 vm_page_t alias_page = NULL;
2171 int page_ticket;
2172
2173
2174 page_ticket = (cntrl_flags & UPL_PAGE_TICKET_MASK)
2175 >> UPL_PAGE_TICKET_SHIFT;
2176
2177 if(((size/page_size) > MAX_UPL_TRANSFER) && !object->phys_contiguous) {
2178 size = MAX_UPL_TRANSFER * page_size;
2179 }
2180
2181 if(cntrl_flags & UPL_SET_INTERNAL)
2182 if(page_list_count != NULL)
2183 *page_list_count = MAX_UPL_TRANSFER;
2184 if(((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
2185 ((page_list_count != NULL) && (*page_list_count != 0)
2186 && *page_list_count < (size/page_size)))
2187 return KERN_INVALID_ARGUMENT;
2188
2189 if((!object->internal) && (object->paging_offset != 0))
2190 panic("vm_object_upl_request: vnode object with non-zero paging offset\n");
2191
2192 if((cntrl_flags & UPL_COPYOUT_FROM) && (upl_ptr == NULL)) {
2193 return KERN_SUCCESS;
2194 }
2195 if(upl_ptr) {
2196 if(cntrl_flags & UPL_SET_INTERNAL) {
2197 upl = upl_create(TRUE, size);
2198 user_page_list = (upl_page_info_t *)
2199 (((vm_offset_t)upl) + sizeof(struct upl));
2200 upl->flags |= UPL_INTERNAL;
2201 } else {
2202 upl = upl_create(FALSE, size);
2203 }
2204 if(object->phys_contiguous) {
2205 upl->size = size;
2206 upl->offset = offset + object->paging_offset;
2207 *upl_ptr = upl;
2208 if(user_page_list) {
2209 user_page_list[0].phys_addr =
2210 offset + object->shadow_offset;
2211 user_page_list[0].device = TRUE;
2212 }
2213 upl->map_object = vm_object_allocate(size);
2214 vm_object_lock(upl->map_object);
2215 upl->map_object->shadow = object;
2216 upl->flags = UPL_DEVICE_MEMORY | UPL_INTERNAL;
2217 upl->map_object->pageout = TRUE;
2218 upl->map_object->can_persist = FALSE;
2219 upl->map_object->copy_strategy
2220 = MEMORY_OBJECT_COPY_NONE;
2221 upl->map_object->shadow_offset = offset;
2222 vm_object_unlock(upl->map_object);
2223 return KERN_SUCCESS;
2224 }
2225
2226
2227 upl->map_object = vm_object_allocate(size);
2228 vm_object_lock(upl->map_object);
2229 upl->map_object->shadow = object;
2230 upl->size = size;
2231 upl->offset = offset + object->paging_offset;
2232 upl->map_object->pageout = TRUE;
2233 upl->map_object->can_persist = FALSE;
2234 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
2235 upl->map_object->shadow_offset = offset;
2236 upl->map_object->wimg_bits = object->wimg_bits;
2237 vm_object_unlock(upl->map_object);
2238 *upl_ptr = upl;
2239 }
2240 VM_PAGE_GRAB_FICTITIOUS(alias_page);
2241 vm_object_lock(object);
2242 #ifdef UBC_DEBUG
2243 if(upl_ptr)
2244 queue_enter(&object->uplq, upl, upl_t, uplq);
2245 #endif /* UBC_DEBUG */
2246 vm_object_paging_begin(object);
2247 entry = 0;
2248 if(cntrl_flags & UPL_COPYOUT_FROM) {
2249 upl->flags |= UPL_PAGE_SYNC_DONE;
2250 while (xfer_size) {
2251 if(alias_page == NULL) {
2252 vm_object_unlock(object);
2253 VM_PAGE_GRAB_FICTITIOUS(alias_page);
2254 vm_object_lock(object);
2255 }
2256 if(((dst_page = vm_page_lookup(object,
2257 dst_offset)) == VM_PAGE_NULL) ||
2258 dst_page->fictitious ||
2259 dst_page->absent ||
2260 dst_page->error ||
2261 (dst_page->wire_count != 0 &&
2262 !dst_page->pageout) ||
2263 ((!(dst_page->dirty || dst_page->precious ||
2264 pmap_is_modified(dst_page->phys_addr)))
2265 && (cntrl_flags & UPL_RET_ONLY_DIRTY)) ||
2266 ((!(dst_page->inactive))
2267 && (dst_page->page_ticket != page_ticket)
2268 && ((dst_page->page_ticket+1) != page_ticket)
2269 && (cntrl_flags & UPL_PAGEOUT)) ||
2270 ((!dst_page->list_req_pending) &&
2271 (cntrl_flags & UPL_RET_ONLY_DIRTY) &&
2272 pmap_is_referenced(dst_page->phys_addr))) {
2273 if(user_page_list)
2274 user_page_list[entry].phys_addr = 0;
2275 } else {
2276
2277 if(dst_page->busy &&
2278 (!(dst_page->list_req_pending &&
2279 dst_page->pageout))) {
2280 if(cntrl_flags & UPL_NOBLOCK) {
2281 if(user_page_list)
2282 user_page_list[entry]
2283 .phys_addr = 0;
2284 entry++;
2285 dst_offset += PAGE_SIZE_64;
2286 xfer_size -= PAGE_SIZE;
2287 continue;
2288 }
2289 /*someone else is playing with the */
2290 /* page. We will have to wait. */
2291 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
2292 continue;
2293 }
2294 /* Someone else already cleaning the page? */
2295 if((dst_page->cleaning || dst_page->absent ||
2296 dst_page->wire_count != 0) &&
2297 !dst_page->list_req_pending) {
2298 if(user_page_list)
2299 user_page_list[entry].phys_addr = 0;
2300 entry++;
2301 dst_offset += PAGE_SIZE_64;
2302 xfer_size -= PAGE_SIZE;
2303 continue;
2304 }
2305 /* eliminate all mappings from the */
2306 /* original object and its prodigy */
2307
2308 vm_page_lock_queues();
2309 if( !(cntrl_flags & UPL_FILE_IO)) {
2310 pmap_page_protect(dst_page->phys_addr, VM_PROT_NONE);
2311 }
2312 /* pageout statistics gathering. count */
2313 /* all the pages we will page out that */
2314 /* were not counted in the initial */
2315 /* vm_pageout_scan work */
2316 if(dst_page->list_req_pending)
2317 encountered_lrp = TRUE;
2318 if((dst_page->dirty ||
2319 (dst_page->object->internal &&
2320 dst_page->precious)) &&
2321 (dst_page->list_req_pending
2322 == FALSE)) {
2323 if(encountered_lrp) {
2324 CLUSTER_STAT
2325 (pages_at_higher_offsets++;)
2326 } else {
2327 CLUSTER_STAT
2328 (pages_at_lower_offsets++;)
2329 }
2330 }
2331
2332 /* Turn off busy indication on pending */
2333 /* pageout. Note: we can only get here */
2334 /* in the request pending case. */
2335 dst_page->list_req_pending = FALSE;
2336 dst_page->busy = FALSE;
2337 dst_page->cleaning = FALSE;
2338
2339 dirty = pmap_is_modified(dst_page->phys_addr);
2340 dirty = dirty ? TRUE : dst_page->dirty;
2341
2342 /* use pageclean setup, it is more convenient */
2343 /* even for the pageout cases here */
2344 vm_pageclean_setup(dst_page, alias_page,
2345 upl->map_object, size - xfer_size);
2346
2347 if(!dirty) {
2348 dst_page->dirty = FALSE;
2349 dst_page->precious = TRUE;
2350 }
2351
2352 if(dst_page->pageout)
2353 dst_page->busy = TRUE;
2354
2355 alias_page->absent = FALSE;
2356 alias_page = NULL;
2357 if((!(cntrl_flags & UPL_CLEAN_IN_PLACE))
2358 || (cntrl_flags & UPL_PAGEOUT)) {
2359 /* deny access to the target page */
2360 /* while it is being worked on */
2361 if((!dst_page->pageout) &&
2362 (dst_page->wire_count == 0)) {
2363 dst_page->busy = TRUE;
2364 dst_page->pageout = TRUE;
2365 vm_page_wire(dst_page);
2366 }
2367 }
2368 if(user_page_list) {
2369 user_page_list[entry].phys_addr
2370 = dst_page->phys_addr;
2371 user_page_list[entry].dirty =
2372 dst_page->dirty;
2373 user_page_list[entry].pageout =
2374 dst_page->pageout;
2375 user_page_list[entry].absent =
2376 dst_page->absent;
2377 user_page_list[entry].precious =
2378 dst_page->precious;
2379 }
2380
2381 vm_page_unlock_queues();
2382 }
2383 entry++;
2384 dst_offset += PAGE_SIZE_64;
2385 xfer_size -= PAGE_SIZE;
2386 }
2387 } else {
2388 while (xfer_size) {
2389 if(alias_page == NULL) {
2390 vm_object_unlock(object);
2391 VM_PAGE_GRAB_FICTITIOUS(alias_page);
2392 vm_object_lock(object);
2393 }
2394 dst_page = vm_page_lookup(object, dst_offset);
2395 if(dst_page != VM_PAGE_NULL) {
2396 if((cntrl_flags & UPL_RET_ONLY_ABSENT) &&
2397 !((dst_page->list_req_pending)
2398 && (dst_page->absent))) {
2399 /* we are doing extended range */
2400 /* requests. we want to grab */
2401 /* pages around some which are */
2402 /* already present. */
2403 if(user_page_list)
2404 user_page_list[entry].phys_addr = 0;
2405 entry++;
2406 dst_offset += PAGE_SIZE_64;
2407 xfer_size -= PAGE_SIZE;
2408 continue;
2409 }
2410 if((dst_page->cleaning) &&
2411 !(dst_page->list_req_pending)) {
2412 /*someone else is writing to the */
2413 /* page. We will have to wait. */
2414 PAGE_SLEEP(object,dst_page,THREAD_UNINT);
2415 continue;
2416 }
2417 if ((dst_page->fictitious &&
2418 dst_page->list_req_pending)) {
2419 /* dump the fictitious page */
2420 dst_page->list_req_pending = FALSE;
2421 dst_page->clustered = FALSE;
2422 vm_page_lock_queues();
2423 vm_page_free(dst_page);
2424 vm_page_unlock_queues();
2425 } else if ((dst_page->absent &&
2426 dst_page->list_req_pending)) {
2427 /* the default_pager case */
2428 dst_page->list_req_pending = FALSE;
2429 dst_page->busy = FALSE;
2430 dst_page->clustered = FALSE;
2431 }
2432 }
2433 if((dst_page = vm_page_lookup(object, dst_offset)) ==
2434 VM_PAGE_NULL) {
2435 if(object->private) {
2436 /*
2437 * This is a nasty wrinkle for users
2438 * of upl who encounter device or
2439 * private memory however, it is
2440 * unavoidable, only a fault can
2441 * reslove the actual backing
2442 * physical page by asking the
2443 * backing device.
2444 */
2445 if(user_page_list)
2446 user_page_list[entry]
2447 .phys_addr = 0;
2448 entry++;
2449 dst_offset += PAGE_SIZE_64;
2450 xfer_size -= PAGE_SIZE;
2451 continue;
2452 }
2453 /* need to allocate a page */
2454 dst_page = vm_page_alloc(object, dst_offset);
2455 if (dst_page == VM_PAGE_NULL) {
2456 vm_object_unlock(object);
2457 VM_PAGE_WAIT();
2458 vm_object_lock(object);
2459 continue;
2460 }
2461 dst_page->busy = FALSE;
2462 #if 0
2463 if(cntrl_flags & UPL_NO_SYNC) {
2464 dst_page->page_lock = 0;
2465 dst_page->unlock_request = 0;
2466 }
2467 #endif
2468 dst_page->absent = TRUE;
2469 object->absent_count++;
2470 }
2471 #if 1
2472 if(cntrl_flags & UPL_NO_SYNC) {
2473 dst_page->page_lock = 0;
2474 dst_page->unlock_request = 0;
2475 }
2476 #endif /* 1 */
2477 dst_page->overwriting = TRUE;
2478 if(dst_page->fictitious) {
2479 panic("need corner case for fictitious page");
2480 }
2481 if(dst_page->page_lock) {
2482 do_m_lock = TRUE;
2483 }
2484 if(upl_ptr) {
2485
2486 /* eliminate all mappings from the */
2487 /* original object and its prodigy */
2488
2489 if(dst_page->busy) {
2490 /*someone else is playing with the */
2491 /* page. We will have to wait. */
2492 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
2493 continue;
2494 }
2495
2496 vm_page_lock_queues();
2497 if( !(cntrl_flags & UPL_FILE_IO)) {
2498 pmap_page_protect(dst_page->phys_addr, VM_PROT_NONE);
2499 }
2500 dirty = pmap_is_modified(dst_page->phys_addr);
2501 dirty = dirty ? TRUE : dst_page->dirty;
2502
2503 vm_pageclean_setup(dst_page, alias_page,
2504 upl->map_object, size - xfer_size);
2505
2506 if(cntrl_flags & UPL_CLEAN_IN_PLACE) {
2507 /* clean in place for read implies */
2508 /* that a write will be done on all */
2509 /* the pages that are dirty before */
2510 /* a upl commit is done. The caller */
2511 /* is obligated to preserve the */
2512 /* contents of all pages marked */
2513 /* dirty. */
2514 upl->flags |= UPL_CLEAR_DIRTY;
2515 }
2516
2517 if(!dirty) {
2518 dst_page->dirty = FALSE;
2519 dst_page->precious = TRUE;
2520 }
2521
2522 if (dst_page->wire_count == 0) {
2523 /* deny access to the target page while */
2524 /* it is being worked on */
2525 dst_page->busy = TRUE;
2526 } else {
2527 vm_page_wire(dst_page);
2528 }
2529 /* expect the page to be used */
2530 dst_page->reference = TRUE;
2531 dst_page->precious =
2532 (cntrl_flags & UPL_PRECIOUS)
2533 ? TRUE : FALSE;
2534 alias_page->absent = FALSE;
2535 alias_page = NULL;
2536 if(user_page_list) {
2537 user_page_list[entry].phys_addr
2538 = dst_page->phys_addr;
2539 user_page_list[entry].dirty =
2540 dst_page->dirty;
2541 user_page_list[entry].pageout =
2542 dst_page->pageout;
2543 user_page_list[entry].absent =
2544 dst_page->absent;
2545 user_page_list[entry].precious =
2546 dst_page->precious;
2547 }
2548 vm_page_unlock_queues();
2549 }
2550 entry++;
2551 dst_offset += PAGE_SIZE_64;
2552 xfer_size -= PAGE_SIZE;
2553 }
2554 }
2555
2556 if (upl->flags & UPL_INTERNAL) {
2557 if(page_list_count != NULL)
2558 *page_list_count = 0;
2559 } else if (*page_list_count > entry) {
2560 if(page_list_count != NULL)
2561 *page_list_count = entry;
2562 }
2563
2564 if(alias_page != NULL) {
2565 vm_page_lock_queues();
2566 vm_page_free(alias_page);
2567 vm_page_unlock_queues();
2568 }
2569
2570 if(do_m_lock) {
2571 vm_prot_t access_required;
2572 /* call back all associated pages from other users of the pager */
2573 /* all future updates will be on data which is based on the */
2574 /* changes we are going to make here. Note: it is assumed that */
2575 /* we already hold copies of the data so we will not be seeing */
2576 /* an avalanche of incoming data from the pager */
2577 access_required = (cntrl_flags & UPL_COPYOUT_FROM)
2578 ? VM_PROT_READ : VM_PROT_WRITE;
2579 while (TRUE) {
2580 kern_return_t rc;
2581
2582 if(!object->pager_ready) {
2583 wait_result_t wait_result;
2584
2585 wait_result = vm_object_sleep(object,
2586 VM_OBJECT_EVENT_PAGER_READY,
2587 THREAD_UNINT);
2588 if (wait_result != THREAD_AWAKENED) {
2589 vm_object_unlock(object);
2590 return(KERN_FAILURE);
2591 }
2592 continue;
2593 }
2594
2595 vm_object_unlock(object);
2596
2597 if (rc = memory_object_data_unlock(
2598 object->pager,
2599 dst_offset + object->paging_offset,
2600 size,
2601 access_required)) {
2602 if (rc == MACH_SEND_INTERRUPTED)
2603 continue;
2604 else
2605 return KERN_FAILURE;
2606 }
2607 break;
2608
2609 }
2610 /* lets wait on the last page requested */
2611 /* NOTE: we will have to update lock completed routine to signal */
2612 if(dst_page != VM_PAGE_NULL &&
2613 (access_required & dst_page->page_lock) != access_required) {
2614 PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT);
2615 thread_block((void (*)(void))0);
2616 vm_object_lock(object);
2617 }
2618 }
2619 vm_object_unlock(object);
2620 return KERN_SUCCESS;
2621 }
2622
2623 /* JMM - Backward compatability for now */
2624 kern_return_t
2625 vm_fault_list_request(
2626 memory_object_control_t control,
2627 vm_object_offset_t offset,
2628 vm_size_t size,
2629 upl_t *upl_ptr,
2630 upl_page_info_t **user_page_list_ptr,
2631 int page_list_count,
2632 int cntrl_flags)
2633 {
2634 int local_list_count;
2635 upl_page_info_t *user_page_list;
2636 kern_return_t kr;
2637
2638 if (user_page_list_ptr != NULL) {
2639 local_list_count = page_list_count;
2640 user_page_list = *user_page_list_ptr;
2641 } else {
2642 local_list_count = 0;
2643 user_page_list = NULL;
2644 }
2645 kr = memory_object_upl_request(control,
2646 offset,
2647 size,
2648 upl_ptr,
2649 user_page_list,
2650 &local_list_count,
2651 cntrl_flags);
2652
2653 if(kr != KERN_SUCCESS)
2654 return kr;
2655
2656 if ((user_page_list_ptr != NULL) && (cntrl_flags & UPL_INTERNAL)) {
2657 *user_page_list_ptr = UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr);
2658 }
2659
2660 return KERN_SUCCESS;
2661 }
2662
2663
2664
2665 /*
2666 * Routine: vm_object_super_upl_request
2667 * Purpose:
2668 * Cause the population of a portion of a vm_object
2669 * in much the same way as memory_object_upl_request.
2670 * Depending on the nature of the request, the pages
2671 * returned may be contain valid data or be uninitialized.
2672 * However, the region may be expanded up to the super
2673 * cluster size provided.
2674 */
2675
2676 __private_extern__ kern_return_t
2677 vm_object_super_upl_request(
2678 vm_object_t object,
2679 vm_object_offset_t offset,
2680 vm_size_t size,
2681 vm_size_t super_cluster,
2682 upl_t *upl,
2683 upl_page_info_t *user_page_list,
2684 unsigned int *page_list_count,
2685 int cntrl_flags)
2686 {
2687 vm_page_t target_page;
2688 int ticket;
2689
2690 if(object->paging_offset > offset)
2691 return KERN_FAILURE;
2692
2693 offset = offset - object->paging_offset;
2694 if(cntrl_flags & UPL_PAGEOUT) {
2695 if((target_page = vm_page_lookup(object, offset))
2696 != VM_PAGE_NULL) {
2697 ticket = target_page->page_ticket;
2698 cntrl_flags = cntrl_flags & ~(int)UPL_PAGE_TICKET_MASK;
2699 cntrl_flags = cntrl_flags |
2700 ((ticket << UPL_PAGE_TICKET_SHIFT)
2701 & UPL_PAGE_TICKET_MASK);
2702 }
2703 }
2704
2705
2706 /* turns off super cluster exercised by the default_pager */
2707 /*
2708 super_cluster = size;
2709 */
2710 if ((super_cluster > size) &&
2711 (vm_page_free_count > vm_page_free_reserved)) {
2712
2713 vm_object_offset_t base_offset;
2714 vm_size_t super_size;
2715
2716 base_offset = (offset &
2717 ~((vm_object_offset_t) super_cluster - 1));
2718 super_size = (offset+size) > (base_offset + super_cluster) ?
2719 super_cluster<<1 : super_cluster;
2720 super_size = ((base_offset + super_size) > object->size) ?
2721 (object->size - base_offset) : super_size;
2722 if(offset > (base_offset + super_size))
2723 panic("vm_object_super_upl_request: Missed target pageout 0x%x,0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", offset, base_offset, super_size, super_cluster, size, object->paging_offset);
2724 /* apparently there is a case where the vm requests a */
2725 /* page to be written out who's offset is beyond the */
2726 /* object size */
2727 if((offset + size) > (base_offset + super_size))
2728 super_size = (offset + size) - base_offset;
2729
2730 offset = base_offset;
2731 size = super_size;
2732 }
2733 vm_object_upl_request(object, offset, size,
2734 upl, user_page_list, page_list_count,
2735 cntrl_flags);
2736 }
2737
2738
2739 kern_return_t
2740 vm_upl_map(
2741 vm_map_t map,
2742 upl_t upl,
2743 vm_offset_t *dst_addr)
2744 {
2745 vm_size_t size;
2746 vm_object_offset_t offset;
2747 vm_offset_t addr;
2748 vm_page_t m;
2749 kern_return_t kr;
2750
2751 if (upl == UPL_NULL)
2752 return KERN_INVALID_ARGUMENT;
2753
2754 upl_lock(upl);
2755
2756 /* check to see if already mapped */
2757 if(UPL_PAGE_LIST_MAPPED & upl->flags) {
2758 upl_unlock(upl);
2759 return KERN_FAILURE;
2760 }
2761
2762 offset = 0; /* Always map the entire object */
2763 size = upl->size;
2764
2765 vm_object_lock(upl->map_object);
2766 upl->map_object->ref_count++;
2767 vm_object_res_reference(upl->map_object);
2768 vm_object_unlock(upl->map_object);
2769
2770 *dst_addr = 0;
2771
2772
2773 /* NEED A UPL_MAP ALIAS */
2774 kr = vm_map_enter(map, dst_addr, size, (vm_offset_t) 0, TRUE,
2775 upl->map_object, offset, FALSE,
2776 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
2777
2778 if (kr != KERN_SUCCESS) {
2779 upl_unlock(upl);
2780 return(kr);
2781 }
2782
2783 for(addr=*dst_addr; size > 0; size-=PAGE_SIZE,addr+=PAGE_SIZE) {
2784 m = vm_page_lookup(upl->map_object, offset);
2785 if(m) {
2786 unsigned int cache_attr;
2787 cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
2788
2789 PMAP_ENTER(map->pmap, addr,
2790 m, VM_PROT_ALL,
2791 cache_attr, TRUE);
2792 }
2793 offset+=PAGE_SIZE_64;
2794 }
2795 upl->ref_count++; /* hold a reference for the mapping */
2796 upl->flags |= UPL_PAGE_LIST_MAPPED;
2797 upl->kaddr = *dst_addr;
2798 upl_unlock(upl);
2799 return KERN_SUCCESS;
2800 }
2801
2802
2803 kern_return_t
2804 vm_upl_unmap(
2805 vm_map_t map,
2806 upl_t upl)
2807 {
2808 vm_address_t addr;
2809 vm_size_t size;
2810
2811 if (upl == UPL_NULL)
2812 return KERN_INVALID_ARGUMENT;
2813
2814 upl_lock(upl);
2815 if(upl->flags & UPL_PAGE_LIST_MAPPED) {
2816 addr = upl->kaddr;
2817 size = upl->size;
2818 assert(upl->ref_count > 1);
2819 upl->ref_count--; /* removing mapping ref */
2820 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
2821 upl->kaddr = (vm_offset_t) 0;
2822 upl_unlock(upl);
2823
2824 vm_deallocate(map, addr, size);
2825 return KERN_SUCCESS;
2826 }
2827 upl_unlock(upl);
2828 return KERN_FAILURE;
2829 }
2830
2831 kern_return_t
2832 upl_commit_range(
2833 upl_t upl,
2834 vm_offset_t offset,
2835 vm_size_t size,
2836 int flags,
2837 upl_page_info_t *page_list,
2838 mach_msg_type_number_t count,
2839 boolean_t *empty)
2840 {
2841 vm_size_t xfer_size = size;
2842 vm_object_t shadow_object = upl->map_object->shadow;
2843 vm_object_t object = upl->map_object;
2844 vm_object_offset_t target_offset;
2845 vm_object_offset_t page_offset;
2846 int entry;
2847
2848 *empty = FALSE;
2849
2850 if (upl == UPL_NULL)
2851 return KERN_INVALID_ARGUMENT;
2852
2853 if (count == 0)
2854 page_list = NULL;
2855
2856 upl_lock(upl);
2857 if(upl->flags & UPL_DEVICE_MEMORY) {
2858 xfer_size = 0;
2859 } else if ((offset + size) > upl->size) {
2860 upl_unlock(upl);
2861 return KERN_FAILURE;
2862 }
2863
2864 vm_object_lock(shadow_object);
2865
2866 entry = offset/PAGE_SIZE;
2867 target_offset = (vm_object_offset_t)offset;
2868 while(xfer_size) {
2869 vm_page_t t,m;
2870 upl_page_info_t *p;
2871
2872 if((t = vm_page_lookup(object, target_offset)) != NULL) {
2873
2874 t->pageout = FALSE;
2875 page_offset = t->offset;
2876 VM_PAGE_FREE(t);
2877 t = VM_PAGE_NULL;
2878 m = vm_page_lookup(shadow_object,
2879 page_offset + object->shadow_offset);
2880 if(m != VM_PAGE_NULL) {
2881 vm_object_paging_end(shadow_object);
2882 vm_page_lock_queues();
2883 if ((upl->flags & UPL_CLEAR_DIRTY) ||
2884 (flags & UPL_COMMIT_CLEAR_DIRTY)) {
2885 pmap_clear_modify(m->phys_addr);
2886 m->dirty = FALSE;
2887 }
2888 if(page_list) {
2889 p = &(page_list[entry]);
2890 if(p->phys_addr && p->pageout && !m->pageout) {
2891 m->busy = TRUE;
2892 m->pageout = TRUE;
2893 vm_page_wire(m);
2894 } else if (page_list[entry].phys_addr &&
2895 !p->pageout && m->pageout &&
2896 !m->dump_cleaning) {
2897 m->pageout = FALSE;
2898 m->absent = FALSE;
2899 m->overwriting = FALSE;
2900 vm_page_unwire(m);
2901 PAGE_WAKEUP_DONE(m);
2902 }
2903 page_list[entry].phys_addr = 0;
2904 }
2905 m->dump_cleaning = FALSE;
2906 if(m->laundry) {
2907 vm_page_laundry_count--;
2908 m->laundry = FALSE;
2909 if (vm_page_laundry_count < vm_page_laundry_min) {
2910 vm_page_laundry_min = 0;
2911 thread_wakeup((event_t)
2912 &vm_page_laundry_count);
2913 }
2914 }
2915 if(m->pageout) {
2916 m->cleaning = FALSE;
2917 m->pageout = FALSE;
2918 #if MACH_CLUSTER_STATS
2919 if (m->wanted) vm_pageout_target_collisions++;
2920 #endif
2921 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
2922 m->dirty = pmap_is_modified(m->phys_addr);
2923 if(m->dirty) {
2924 CLUSTER_STAT(
2925 vm_pageout_target_page_dirtied++;)
2926 vm_page_unwire(m);/* reactivates */
2927 VM_STAT(reactivations++);
2928 PAGE_WAKEUP_DONE(m);
2929 } else {
2930 CLUSTER_STAT(
2931 vm_pageout_target_page_freed++;)
2932 vm_page_free(m);/* clears busy, etc. */
2933 VM_STAT(pageouts++);
2934 }
2935 vm_page_unlock_queues();
2936 target_offset += PAGE_SIZE_64;
2937 xfer_size -= PAGE_SIZE;
2938 entry++;
2939 continue;
2940 }
2941 if (flags & UPL_COMMIT_INACTIVATE) {
2942 vm_page_deactivate(m);
2943 m->reference = FALSE;
2944 pmap_clear_reference(m->phys_addr);
2945 } else if (!m->active && !m->inactive) {
2946 if (m->reference)
2947 vm_page_activate(m);
2948 else
2949 vm_page_deactivate(m);
2950 }
2951 #if MACH_CLUSTER_STATS
2952 m->dirty = pmap_is_modified(m->phys_addr);
2953
2954 if (m->dirty) vm_pageout_cluster_dirtied++;
2955 else vm_pageout_cluster_cleaned++;
2956 if (m->wanted) vm_pageout_cluster_collisions++;
2957 #else
2958 m->dirty = 0;
2959 #endif
2960
2961 if((m->busy) && (m->cleaning)) {
2962 /* the request_page_list case */
2963 if(m->absent) {
2964 m->absent = FALSE;
2965 if(shadow_object->absent_count == 1)
2966 vm_object_absent_release(shadow_object);
2967 else
2968 shadow_object->absent_count--;
2969 }
2970 m->overwriting = FALSE;
2971 m->busy = FALSE;
2972 m->dirty = FALSE;
2973 }
2974 else if (m->overwriting) {
2975 /* alternate request page list, write to
2976 /* page_list case. Occurs when the original
2977 /* page was wired at the time of the list
2978 /* request */
2979 assert(m->wire_count != 0);
2980 vm_page_unwire(m);/* reactivates */
2981 m->overwriting = FALSE;
2982 }
2983 m->cleaning = FALSE;
2984 /* It is a part of the semantic of COPYOUT_FROM */
2985 /* UPLs that a commit implies cache sync */
2986 /* between the vm page and the backing store */
2987 /* this can be used to strip the precious bit */
2988 /* as well as clean */
2989 if (upl->flags & UPL_PAGE_SYNC_DONE)
2990 m->precious = FALSE;
2991
2992 if (flags & UPL_COMMIT_SET_DIRTY) {
2993 m->dirty = TRUE;
2994 }
2995 /*
2996 * Wakeup any thread waiting for the page to be un-cleaning.
2997 */
2998 PAGE_WAKEUP(m);
2999 vm_page_unlock_queues();
3000
3001 }
3002 }
3003 target_offset += PAGE_SIZE_64;
3004 xfer_size -= PAGE_SIZE;
3005 entry++;
3006 }
3007
3008 vm_object_unlock(shadow_object);
3009 if(flags & UPL_COMMIT_NOTIFY_EMPTY) {
3010 if((upl->flags & UPL_DEVICE_MEMORY)
3011 || (queue_empty(&upl->map_object->memq)))
3012 *empty = TRUE;
3013 }
3014 upl_unlock(upl);
3015
3016 return KERN_SUCCESS;
3017 }
3018
3019 kern_return_t
3020 upl_abort_range(
3021 upl_t upl,
3022 vm_offset_t offset,
3023 vm_size_t size,
3024 int error,
3025 boolean_t *empty)
3026 {
3027 vm_size_t xfer_size = size;
3028 vm_object_t shadow_object = upl->map_object->shadow;
3029 vm_object_t object = upl->map_object;
3030 vm_object_offset_t target_offset;
3031 vm_object_offset_t page_offset;
3032 int entry;
3033
3034 *empty = FALSE;
3035
3036 if (upl == UPL_NULL)
3037 return KERN_INVALID_ARGUMENT;
3038
3039 upl_lock(upl);
3040 if(upl->flags & UPL_DEVICE_MEMORY) {
3041 xfer_size = 0;
3042 } else if ((offset + size) > upl->size) {
3043 upl_unlock(upl);
3044 return KERN_FAILURE;
3045 }
3046
3047 vm_object_lock(shadow_object);
3048
3049 entry = offset/PAGE_SIZE;
3050 target_offset = (vm_object_offset_t)offset;
3051 while(xfer_size) {
3052 vm_page_t t,m;
3053 upl_page_info_t *p;
3054
3055 if((t = vm_page_lookup(object, target_offset)) != NULL) {
3056
3057 t->pageout = FALSE;
3058 page_offset = t->offset;
3059 VM_PAGE_FREE(t);
3060 t = VM_PAGE_NULL;
3061 m = vm_page_lookup(shadow_object,
3062 page_offset + object->shadow_offset);
3063 if(m != VM_PAGE_NULL) {
3064 vm_object_paging_end(m->object);
3065 vm_page_lock_queues();
3066 if(m->absent) {
3067 /* COPYOUT = FALSE case */
3068 /* check for error conditions which must */
3069 /* be passed back to the pages customer */
3070 if(error & UPL_ABORT_RESTART) {
3071 m->restart = TRUE;
3072 m->absent = FALSE;
3073 vm_object_absent_release(m->object);
3074 m->page_error = KERN_MEMORY_ERROR;
3075 m->error = TRUE;
3076 } else if(error & UPL_ABORT_UNAVAILABLE) {
3077 m->restart = FALSE;
3078 m->unusual = TRUE;
3079 m->clustered = FALSE;
3080 } else if(error & UPL_ABORT_ERROR) {
3081 m->restart = FALSE;
3082 m->absent = FALSE;
3083 vm_object_absent_release(m->object);
3084 m->page_error = KERN_MEMORY_ERROR;
3085 m->error = TRUE;
3086 } else if(error & UPL_ABORT_DUMP_PAGES) {
3087 m->clustered = TRUE;
3088 } else {
3089 m->clustered = TRUE;
3090 }
3091
3092
3093 m->cleaning = FALSE;
3094 m->overwriting = FALSE;
3095 PAGE_WAKEUP_DONE(m);
3096 if(m->clustered) {
3097 vm_page_free(m);
3098 } else {
3099 vm_page_activate(m);
3100 }
3101
3102 vm_page_unlock_queues();
3103 target_offset += PAGE_SIZE_64;
3104 xfer_size -= PAGE_SIZE;
3105 entry++;
3106 continue;
3107 }
3108 /*
3109 * Handle the trusted pager throttle.
3110 */
3111 if (m->laundry) {
3112 vm_page_laundry_count--;
3113 m->laundry = FALSE;
3114 if (vm_page_laundry_count
3115 < vm_page_laundry_min) {
3116 vm_page_laundry_min = 0;
3117 thread_wakeup((event_t)
3118 &vm_page_laundry_count);
3119 }
3120 }
3121 if(m->pageout) {
3122 assert(m->busy);
3123 assert(m->wire_count == 1);
3124 m->pageout = FALSE;
3125 vm_page_unwire(m);
3126 }
3127 m->dump_cleaning = FALSE;
3128 m->cleaning = FALSE;
3129 m->busy = FALSE;
3130 m->overwriting = FALSE;
3131 #if MACH_PAGEMAP
3132 vm_external_state_clr(
3133 m->object->existence_map, m->offset);
3134 #endif /* MACH_PAGEMAP */
3135 if(error & UPL_ABORT_DUMP_PAGES) {
3136 vm_page_free(m);
3137 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
3138 } else {
3139 PAGE_WAKEUP(m);
3140 }
3141 vm_page_unlock_queues();
3142 }
3143 }
3144 target_offset += PAGE_SIZE_64;
3145 xfer_size -= PAGE_SIZE;
3146 entry++;
3147 }
3148 vm_object_unlock(shadow_object);
3149 if(error & UPL_ABORT_NOTIFY_EMPTY) {
3150 if((upl->flags & UPL_DEVICE_MEMORY)
3151 || (queue_empty(&upl->map_object->memq)))
3152 *empty = TRUE;
3153 }
3154 upl_unlock(upl);
3155 return KERN_SUCCESS;
3156 }
3157
3158 kern_return_t
3159 upl_abort(
3160 upl_t upl,
3161 int error)
3162 {
3163 vm_object_t object = NULL;
3164 vm_object_t shadow_object = NULL;
3165 vm_object_offset_t offset;
3166 vm_object_offset_t shadow_offset;
3167 vm_object_offset_t target_offset;
3168 int i;
3169 vm_page_t t,m;
3170
3171 if (upl == UPL_NULL)
3172 return KERN_INVALID_ARGUMENT;
3173
3174 upl_lock(upl);
3175 if(upl->flags & UPL_DEVICE_MEMORY) {
3176 upl_unlock(upl);
3177 return KERN_SUCCESS;
3178 }
3179
3180 object = upl->map_object;
3181
3182 if (object == NULL) {
3183 panic("upl_abort: upl object is not backed by an object");
3184 upl_unlock(upl);
3185 return KERN_INVALID_ARGUMENT;
3186 }
3187
3188 shadow_object = upl->map_object->shadow;
3189 shadow_offset = upl->map_object->shadow_offset;
3190 offset = 0;
3191 vm_object_lock(shadow_object);
3192 for(i = 0; i<(upl->size); i+=PAGE_SIZE, offset += PAGE_SIZE_64) {
3193 if((t = vm_page_lookup(object,offset)) != NULL) {
3194 target_offset = t->offset + shadow_offset;
3195 if((m = vm_page_lookup(shadow_object, target_offset)) != NULL) {
3196 vm_object_paging_end(m->object);
3197 vm_page_lock_queues();
3198 if(m->absent) {
3199 /* COPYOUT = FALSE case */
3200 /* check for error conditions which must */
3201 /* be passed back to the pages customer */
3202 if(error & UPL_ABORT_RESTART) {
3203 m->restart = TRUE;
3204 m->absent = FALSE;
3205 vm_object_absent_release(m->object);
3206 m->page_error = KERN_MEMORY_ERROR;
3207 m->error = TRUE;
3208 } else if(error & UPL_ABORT_UNAVAILABLE) {
3209 m->restart = FALSE;
3210 m->unusual = TRUE;
3211 m->clustered = FALSE;
3212 } else if(error & UPL_ABORT_ERROR) {
3213 m->restart = FALSE;
3214 m->absent = FALSE;
3215 vm_object_absent_release(m->object);
3216 m->page_error = KERN_MEMORY_ERROR;
3217 m->error = TRUE;
3218 } else if(error & UPL_ABORT_DUMP_PAGES) {
3219 m->clustered = TRUE;
3220 } else {
3221 m->clustered = TRUE;
3222 }
3223
3224 m->cleaning = FALSE;
3225 m->overwriting = FALSE;
3226 PAGE_WAKEUP_DONE(m);
3227 if(m->clustered) {
3228 vm_page_free(m);
3229 } else {
3230 vm_page_activate(m);
3231 }
3232 vm_page_unlock_queues();
3233 continue;
3234 }
3235 /*
3236 * Handle the trusted pager throttle.
3237 */
3238 if (m->laundry) {
3239 vm_page_laundry_count--;
3240 m->laundry = FALSE;
3241 if (vm_page_laundry_count
3242 < vm_page_laundry_min) {
3243 vm_page_laundry_min = 0;
3244 thread_wakeup((event_t)
3245 &vm_page_laundry_count);
3246 }
3247 }
3248 if(m->pageout) {
3249 assert(m->busy);
3250 assert(m->wire_count == 1);
3251 m->pageout = FALSE;
3252 vm_page_unwire(m);
3253 }
3254 m->dump_cleaning = FALSE;
3255 m->cleaning = FALSE;
3256 m->busy = FALSE;
3257 m->overwriting = FALSE;
3258 #if MACH_PAGEMAP
3259 vm_external_state_clr(
3260 m->object->existence_map, m->offset);
3261 #endif /* MACH_PAGEMAP */
3262 if(error & UPL_ABORT_DUMP_PAGES) {
3263 vm_page_free(m);
3264 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
3265 } else {
3266 PAGE_WAKEUP(m);
3267 }
3268 vm_page_unlock_queues();
3269 }
3270 }
3271 }
3272 vm_object_unlock(shadow_object);
3273 /* Remove all the pages from the map object so */
3274 /* vm_pageout_object_terminate will work properly. */
3275 while (!queue_empty(&upl->map_object->memq)) {
3276 vm_page_t p;
3277
3278 p = (vm_page_t) queue_first(&upl->map_object->memq);
3279
3280 assert(p->private);
3281 assert(p->pageout);
3282 p->pageout = FALSE;
3283 assert(!p->cleaning);
3284
3285 VM_PAGE_FREE(p);
3286 }
3287 upl_unlock(upl);
3288 return KERN_SUCCESS;
3289 }
3290
3291 /* an option on commit should be wire */
3292 kern_return_t
3293 upl_commit(
3294 upl_t upl,
3295 upl_page_info_t *page_list,
3296 mach_msg_type_number_t count)
3297 {
3298 if (upl == UPL_NULL)
3299 return KERN_INVALID_ARGUMENT;
3300
3301 if (count == 0)
3302 page_list = NULL;
3303
3304 upl_lock(upl);
3305 if (upl->flags & UPL_DEVICE_MEMORY)
3306 page_list = NULL;
3307 if ((upl->flags & UPL_CLEAR_DIRTY) ||
3308 (upl->flags & UPL_PAGE_SYNC_DONE)) {
3309 vm_object_t shadow_object = upl->map_object->shadow;
3310 vm_object_t object = upl->map_object;
3311 vm_object_offset_t target_offset;
3312 vm_size_t xfer_end;
3313
3314 vm_page_t t,m;
3315
3316 vm_object_lock(shadow_object);
3317
3318 target_offset = object->shadow_offset;
3319 xfer_end = upl->size + object->shadow_offset;
3320
3321 while(target_offset < xfer_end) {
3322 if ((t = vm_page_lookup(object,
3323 target_offset - object->shadow_offset))
3324 != NULL) {
3325 m = vm_page_lookup(
3326 shadow_object, target_offset);
3327 if(m != VM_PAGE_NULL) {
3328 if (upl->flags & UPL_CLEAR_DIRTY) {
3329 pmap_clear_modify(m->phys_addr);
3330 m->dirty = FALSE;
3331 }
3332 /* It is a part of the semantic of */
3333 /* COPYOUT_FROM UPLs that a commit */
3334 /* implies cache sync between the */
3335 /* vm page and the backing store */
3336 /* this can be used to strip the */
3337 /* precious bit as well as clean */
3338 if (upl->flags & UPL_PAGE_SYNC_DONE)
3339 m->precious = FALSE;
3340 }
3341 }
3342 target_offset += PAGE_SIZE_64;
3343 }
3344 vm_object_unlock(shadow_object);
3345 }
3346 if (page_list) {
3347 vm_object_t shadow_object = upl->map_object->shadow;
3348 vm_object_t object = upl->map_object;
3349 vm_object_offset_t target_offset;
3350 vm_size_t xfer_end;
3351 int entry;
3352
3353 vm_page_t t, m;
3354 upl_page_info_t *p;
3355
3356 vm_object_lock(shadow_object);
3357
3358 entry = 0;
3359 target_offset = object->shadow_offset;
3360 xfer_end = upl->size + object->shadow_offset;
3361
3362 while(target_offset < xfer_end) {
3363
3364 if ((t = vm_page_lookup(object,
3365 target_offset - object->shadow_offset))
3366 == NULL) {
3367 target_offset += PAGE_SIZE_64;
3368 entry++;
3369 continue;
3370 }
3371
3372 m = vm_page_lookup(shadow_object, target_offset);
3373 if(m != VM_PAGE_NULL) {
3374 p = &(page_list[entry]);
3375 if(page_list[entry].phys_addr &&
3376 p->pageout && !m->pageout) {
3377 vm_page_lock_queues();
3378 m->busy = TRUE;
3379 m->pageout = TRUE;
3380 vm_page_wire(m);
3381 vm_page_unlock_queues();
3382 } else if (page_list[entry].phys_addr &&
3383 !p->pageout && m->pageout &&
3384 !m->dump_cleaning) {
3385 vm_page_lock_queues();
3386 m->pageout = FALSE;
3387 m->absent = FALSE;
3388 m->overwriting = FALSE;
3389 vm_page_unwire(m);
3390 PAGE_WAKEUP_DONE(m);
3391 vm_page_unlock_queues();
3392 }
3393 page_list[entry].phys_addr = 0;
3394 }
3395 target_offset += PAGE_SIZE_64;
3396 entry++;
3397 }
3398
3399 vm_object_unlock(shadow_object);
3400 }
3401 upl_unlock(upl);
3402 return KERN_SUCCESS;
3403 }
3404
3405 vm_size_t
3406 upl_get_internal_pagelist_offset()
3407 {
3408 return sizeof(struct upl);
3409 }
3410
3411 void
3412 upl_set_dirty(
3413 upl_t upl)
3414 {
3415 upl->flags |= UPL_CLEAR_DIRTY;
3416 }
3417
3418 void
3419 upl_clear_dirty(
3420 upl_t upl)
3421 {
3422 upl->flags &= ~UPL_CLEAR_DIRTY;
3423 }
3424
3425
3426 #ifdef MACH_BSD
3427
3428 boolean_t upl_page_present(upl_page_info_t *upl, int index)
3429 {
3430 return(UPL_PAGE_PRESENT(upl, index));
3431 }
3432 boolean_t upl_dirty_page(upl_page_info_t *upl, int index)
3433 {
3434 return(UPL_DIRTY_PAGE(upl, index));
3435 }
3436 boolean_t upl_valid_page(upl_page_info_t *upl, int index)
3437 {
3438 return(UPL_VALID_PAGE(upl, index));
3439 }
3440 vm_offset_t upl_phys_page(upl_page_info_t *upl, int index)
3441 {
3442 return((vm_offset_t)UPL_PHYS_PAGE(upl, index));
3443 }
3444
3445 void
3446 vm_countdirtypages(void)
3447 {
3448 vm_page_t m;
3449 int dpages;
3450 int pgopages;
3451 int precpages;
3452
3453
3454 dpages=0;
3455 pgopages=0;
3456 precpages=0;
3457
3458 vm_page_lock_queues();
3459 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
3460 do {
3461 if (m ==(vm_page_t )0) break;
3462
3463 if(m->dirty) dpages++;
3464 if(m->pageout) pgopages++;
3465 if(m->precious) precpages++;
3466
3467 m = (vm_page_t) queue_next(&m->pageq);
3468 if (m ==(vm_page_t )0) break;
3469
3470 } while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
3471 vm_page_unlock_queues();
3472
3473 vm_page_lock_queues();
3474 m = (vm_page_t) queue_first(&vm_page_queue_zf);
3475 do {
3476 if (m ==(vm_page_t )0) break;
3477
3478 if(m->dirty) dpages++;
3479 if(m->pageout) pgopages++;
3480 if(m->precious) precpages++;
3481
3482 m = (vm_page_t) queue_next(&m->pageq);
3483 if (m ==(vm_page_t )0) break;
3484
3485 } while (!queue_end(&vm_page_queue_zf,(queue_entry_t) m));
3486 vm_page_unlock_queues();
3487
3488 printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
3489
3490 dpages=0;
3491 pgopages=0;
3492 precpages=0;
3493
3494 vm_page_lock_queues();
3495 m = (vm_page_t) queue_first(&vm_page_queue_active);
3496
3497 do {
3498 if(m == (vm_page_t )0) break;
3499 if(m->dirty) dpages++;
3500 if(m->pageout) pgopages++;
3501 if(m->precious) precpages++;
3502
3503 m = (vm_page_t) queue_next(&m->pageq);
3504 if(m == (vm_page_t )0) break;
3505
3506 } while (!queue_end(&vm_page_queue_active,(queue_entry_t) m));
3507 vm_page_unlock_queues();
3508
3509 printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
3510
3511 }
3512 #endif /* MACH_BSD */
3513
3514 #ifdef UBC_DEBUG
3515 kern_return_t upl_ubc_alias_set(upl_t upl, unsigned int alias1, unsigned int alias2)
3516 {
3517 upl->ubc_alias1 = alias1;
3518 upl->ubc_alias2 = alias2;
3519 return KERN_SUCCESS;
3520 }
3521 int upl_ubc_alias_get(upl_t upl, unsigned int * al, unsigned int * al2)
3522 {
3523 if(al)
3524 *al = upl->ubc_alias1;
3525 if(al2)
3526 *al2 = upl->ubc_alias2;
3527 return KERN_SUCCESS;
3528 }
3529 #endif /* UBC_DEBUG */
3530
3531
3532
3533 #if MACH_KDB
3534 #include <ddb/db_output.h>
3535 #include <ddb/db_print.h>
3536 #include <vm/vm_print.h>
3537
3538 #define printf kdbprintf
3539 extern int db_indent;
3540 void db_pageout(void);
3541
3542 void
3543 db_vm(void)
3544 {
3545 extern int vm_page_gobble_count;
3546
3547 iprintf("VM Statistics:\n");
3548 db_indent += 2;
3549 iprintf("pages:\n");
3550 db_indent += 2;
3551 iprintf("activ %5d inact %5d free %5d",
3552 vm_page_active_count, vm_page_inactive_count,
3553 vm_page_free_count);
3554 printf(" wire %5d gobbl %5d\n",
3555 vm_page_wire_count, vm_page_gobble_count);
3556 iprintf("laund %5d\n",
3557 vm_page_laundry_count);
3558 db_indent -= 2;
3559 iprintf("target:\n");
3560 db_indent += 2;
3561 iprintf("min %5d inact %5d free %5d",
3562 vm_page_free_min, vm_page_inactive_target,
3563 vm_page_free_target);
3564 printf(" resrv %5d\n", vm_page_free_reserved);
3565 db_indent -= 2;
3566
3567 iprintf("burst:\n");
3568 db_indent += 2;
3569 iprintf("max %5d min %5d wait %5d empty %5d\n",
3570 vm_pageout_burst_max, vm_pageout_burst_min,
3571 vm_pageout_burst_wait, vm_pageout_empty_wait);
3572 db_indent -= 2;
3573 iprintf("pause:\n");
3574 db_indent += 2;
3575 iprintf("count %5d max %5d\n",
3576 vm_pageout_pause_count, vm_pageout_pause_max);
3577 #if MACH_COUNTERS
3578 iprintf("scan_continue called %8d\n", c_vm_pageout_scan_continue);
3579 #endif /* MACH_COUNTERS */
3580 db_indent -= 2;
3581 db_pageout();
3582 db_indent -= 2;
3583 }
3584
3585 void
3586 db_pageout(void)
3587 {
3588 #if MACH_COUNTERS
3589 extern int c_laundry_pages_freed;
3590 #endif /* MACH_COUNTERS */
3591
3592 iprintf("Pageout Statistics:\n");
3593 db_indent += 2;
3594 iprintf("active %5d inactv %5d\n",
3595 vm_pageout_active, vm_pageout_inactive);
3596 iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
3597 vm_pageout_inactive_nolock, vm_pageout_inactive_avoid,
3598 vm_pageout_inactive_busy, vm_pageout_inactive_absent);
3599 iprintf("used %5d clean %5d dirty %5d\n",
3600 vm_pageout_inactive_used, vm_pageout_inactive_clean,
3601 vm_pageout_inactive_dirty);
3602 #if MACH_COUNTERS
3603 iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed);
3604 #endif /* MACH_COUNTERS */
3605 #if MACH_CLUSTER_STATS
3606 iprintf("Cluster Statistics:\n");
3607 db_indent += 2;
3608 iprintf("dirtied %5d cleaned %5d collisions %5d\n",
3609 vm_pageout_cluster_dirtied, vm_pageout_cluster_cleaned,
3610 vm_pageout_cluster_collisions);
3611 iprintf("clusters %5d conversions %5d\n",
3612 vm_pageout_cluster_clusters, vm_pageout_cluster_conversions);
3613 db_indent -= 2;
3614 iprintf("Target Statistics:\n");
3615 db_indent += 2;
3616 iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
3617 vm_pageout_target_collisions, vm_pageout_target_page_dirtied,
3618 vm_pageout_target_page_freed);
3619 db_indent -= 2;
3620 #endif /* MACH_CLUSTER_STATS */
3621 db_indent -= 2;
3622 }
3623
3624 #if MACH_CLUSTER_STATS
3625 unsigned long vm_pageout_cluster_dirtied = 0;
3626 unsigned long vm_pageout_cluster_cleaned = 0;
3627 unsigned long vm_pageout_cluster_collisions = 0;
3628 unsigned long vm_pageout_cluster_clusters = 0;
3629 unsigned long vm_pageout_cluster_conversions = 0;
3630 unsigned long vm_pageout_target_collisions = 0;
3631 unsigned long vm_pageout_target_page_dirtied = 0;
3632 unsigned long vm_pageout_target_page_freed = 0;
3633 #define CLUSTER_STAT(clause) clause
3634 #else /* MACH_CLUSTER_STATS */
3635 #define CLUSTER_STAT(clause)
3636 #endif /* MACH_CLUSTER_STATS */
3637
3638 #endif /* MACH_KDB */