]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_pageout.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * The proverbial page-out daemon.
64 */
1c79356b 65
91447636
A
66#include <stdint.h>
67
68#include <debug.h>
1c79356b
A
69#include <mach_pagemap.h>
70#include <mach_cluster_stats.h>
71#include <mach_kdb.h>
72#include <advisory_pageout.h>
73
74#include <mach/mach_types.h>
75#include <mach/memory_object.h>
76#include <mach/memory_object_default.h>
0b4e3aa0 77#include <mach/memory_object_control_server.h>
1c79356b 78#include <mach/mach_host_server.h>
91447636
A
79#include <mach/upl.h>
80#include <mach/vm_map.h>
1c79356b
A
81#include <mach/vm_param.h>
82#include <mach/vm_statistics.h>
2d21ac55 83#include <mach/sdt.h>
91447636
A
84
85#include <kern/kern_types.h>
1c79356b 86#include <kern/counters.h>
91447636
A
87#include <kern/host_statistics.h>
88#include <kern/machine.h>
89#include <kern/misc_protos.h>
b0d623f7 90#include <kern/sched.h>
1c79356b 91#include <kern/thread.h>
1c79356b 92#include <kern/xpr.h>
91447636
A
93#include <kern/kalloc.h>
94
95#include <machine/vm_tuning.h>
b0d623f7 96#include <machine/commpage.h>
91447636 97
2d21ac55 98#include <sys/kern_memorystatus.h>
2d21ac55 99
1c79356b 100#include <vm/pmap.h>
55e303ae 101#include <vm/vm_fault.h>
1c79356b
A
102#include <vm/vm_map.h>
103#include <vm/vm_object.h>
104#include <vm/vm_page.h>
105#include <vm/vm_pageout.h>
91447636 106#include <vm/vm_protos.h> /* must be last */
2d21ac55
A
107#include <vm/memory_object.h>
108#include <vm/vm_purgeable_internal.h>
6d2010ae 109#include <vm/vm_shared_region.h>
91447636
A
110/*
111 * ENCRYPTED SWAP:
112 */
91447636 113#include <../bsd/crypto/aes/aes.h>
b0d623f7 114extern u_int32_t random(void); /* from <libkern/libkern.h> */
55e303ae 115
b0d623f7
A
116#if UPL_DEBUG
117#include <libkern/OSDebug.h>
118#endif
91447636 119
6d2010ae
A
120extern void consider_pressure_events(void);
121
2d21ac55 122#ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE /* maximum iterations of the active queue to move pages to inactive */
2d21ac55
A
123#define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 100
124#endif
91447636 125
2d21ac55
A
126#ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
127#ifdef CONFIG_EMBEDDED
128#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
129#else
130#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
131#endif
91447636
A
132#endif
133
134#ifndef VM_PAGEOUT_DEADLOCK_RELIEF
135#define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
136#endif
137
138#ifndef VM_PAGEOUT_INACTIVE_RELIEF
139#define VM_PAGEOUT_INACTIVE_RELIEF 50 /* minimum number of pages to move to the inactive q */
140#endif
141
1c79356b 142#ifndef VM_PAGE_LAUNDRY_MAX
6d2010ae 143#define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
1c79356b
A
144#endif /* VM_PAGEOUT_LAUNDRY_MAX */
145
1c79356b 146#ifndef VM_PAGEOUT_BURST_WAIT
6d2010ae 147#define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds */
1c79356b
A
148#endif /* VM_PAGEOUT_BURST_WAIT */
149
150#ifndef VM_PAGEOUT_EMPTY_WAIT
151#define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
152#endif /* VM_PAGEOUT_EMPTY_WAIT */
153
91447636
A
154#ifndef VM_PAGEOUT_DEADLOCK_WAIT
155#define VM_PAGEOUT_DEADLOCK_WAIT 300 /* milliseconds */
156#endif /* VM_PAGEOUT_DEADLOCK_WAIT */
157
158#ifndef VM_PAGEOUT_IDLE_WAIT
159#define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
160#endif /* VM_PAGEOUT_IDLE_WAIT */
161
6d2010ae
A
162unsigned int vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
163unsigned int vm_page_speculative_percentage = 5;
164
2d21ac55 165#ifndef VM_PAGE_SPECULATIVE_TARGET
6d2010ae 166#define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_page_speculative_percentage))
2d21ac55
A
167#endif /* VM_PAGE_SPECULATIVE_TARGET */
168
6d2010ae 169
2d21ac55
A
170#ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
171#define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
172#endif /* VM_PAGE_INACTIVE_HEALTHY_LIMIT */
173
91447636 174
1c79356b
A
175/*
176 * To obtain a reasonable LRU approximation, the inactive queue
177 * needs to be large enough to give pages on it a chance to be
178 * referenced a second time. This macro defines the fraction
179 * of active+inactive pages that should be inactive.
180 * The pageout daemon uses it to update vm_page_inactive_target.
181 *
182 * If vm_page_free_count falls below vm_page_free_target and
183 * vm_page_inactive_count is below vm_page_inactive_target,
184 * then the pageout daemon starts running.
185 */
186
187#ifndef VM_PAGE_INACTIVE_TARGET
188#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
189#endif /* VM_PAGE_INACTIVE_TARGET */
190
191/*
192 * Once the pageout daemon starts running, it keeps going
193 * until vm_page_free_count meets or exceeds vm_page_free_target.
194 */
195
196#ifndef VM_PAGE_FREE_TARGET
2d21ac55
A
197#ifdef CONFIG_EMBEDDED
198#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
199#else
1c79356b 200#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
2d21ac55 201#endif
1c79356b
A
202#endif /* VM_PAGE_FREE_TARGET */
203
204/*
205 * The pageout daemon always starts running once vm_page_free_count
206 * falls below vm_page_free_min.
207 */
208
209#ifndef VM_PAGE_FREE_MIN
2d21ac55
A
210#ifdef CONFIG_EMBEDDED
211#define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
212#else
213#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
214#endif
1c79356b
A
215#endif /* VM_PAGE_FREE_MIN */
216
6d2010ae 217#define VM_PAGE_FREE_RESERVED_LIMIT 100
2d21ac55
A
218#define VM_PAGE_FREE_MIN_LIMIT 1500
219#define VM_PAGE_FREE_TARGET_LIMIT 2000
220
221
1c79356b
A
222/*
223 * When vm_page_free_count falls below vm_page_free_reserved,
224 * only vm-privileged threads can allocate pages. vm-privilege
225 * allows the pageout daemon and default pager (and any other
226 * associated threads needed for default pageout) to continue
227 * operation by dipping into the reserved pool of pages.
228 */
229
230#ifndef VM_PAGE_FREE_RESERVED
91447636 231#define VM_PAGE_FREE_RESERVED(n) \
b0d623f7 232 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
1c79356b
A
233#endif /* VM_PAGE_FREE_RESERVED */
234
2d21ac55
A
235/*
236 * When we dequeue pages from the inactive list, they are
237 * reactivated (ie, put back on the active queue) if referenced.
238 * However, it is possible to starve the free list if other
239 * processors are referencing pages faster than we can turn off
240 * the referenced bit. So we limit the number of reactivations
241 * we will make per call of vm_pageout_scan().
242 */
243#define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
244#ifndef VM_PAGE_REACTIVATE_LIMIT
245#ifdef CONFIG_EMBEDDED
246#define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
247#else
248#define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
249#endif
250#endif /* VM_PAGE_REACTIVATE_LIMIT */
251#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 100
252
91447636 253
0b4e3aa0
A
254/*
255 * Exported variable used to broadcast the activation of the pageout scan
256 * Working Set uses this to throttle its use of pmap removes. In this
257 * way, code which runs within memory in an uncontested context does
258 * not keep encountering soft faults.
259 */
260
261unsigned int vm_pageout_scan_event_counter = 0;
1c79356b
A
262
263/*
264 * Forward declarations for internal routines.
265 */
91447636
A
266
267static void vm_pageout_garbage_collect(int);
268static void vm_pageout_iothread_continue(struct vm_pageout_queue *);
269static void vm_pageout_iothread_external(void);
270static void vm_pageout_iothread_internal(void);
91447636 271
1c79356b
A
272extern void vm_pageout_continue(void);
273extern void vm_pageout_scan(void);
1c79356b 274
2d21ac55
A
275static thread_t vm_pageout_external_iothread = THREAD_NULL;
276static thread_t vm_pageout_internal_iothread = THREAD_NULL;
277
1c79356b
A
278unsigned int vm_pageout_reserved_internal = 0;
279unsigned int vm_pageout_reserved_really = 0;
280
91447636 281unsigned int vm_pageout_idle_wait = 0; /* milliseconds */
55e303ae 282unsigned int vm_pageout_empty_wait = 0; /* milliseconds */
91447636
A
283unsigned int vm_pageout_burst_wait = 0; /* milliseconds */
284unsigned int vm_pageout_deadlock_wait = 0; /* milliseconds */
285unsigned int vm_pageout_deadlock_relief = 0;
286unsigned int vm_pageout_inactive_relief = 0;
287unsigned int vm_pageout_burst_active_throttle = 0;
288unsigned int vm_pageout_burst_inactive_throttle = 0;
1c79356b 289
6d2010ae
A
290int vm_upl_wait_for_pages = 0;
291
9bccf70c
A
292/*
293 * Protection against zero fill flushing live working sets derived
294 * from existing backing store and files
295 */
296unsigned int vm_accellerate_zf_pageout_trigger = 400;
2d21ac55 297unsigned int zf_queue_min_count = 100;
2d21ac55 298unsigned int vm_zf_queue_count = 0;
9bccf70c 299
b0d623f7 300uint64_t vm_zf_count __attribute__((aligned(8))) = 0;
b0d623f7 301
1c79356b
A
302/*
303 * These variables record the pageout daemon's actions:
304 * how many pages it looks at and what happens to those pages.
305 * No locking needed because only one thread modifies the variables.
306 */
307
308unsigned int vm_pageout_active = 0; /* debugging */
6d2010ae 309unsigned int vm_pageout_active_busy = 0; /* debugging */
1c79356b
A
310unsigned int vm_pageout_inactive = 0; /* debugging */
311unsigned int vm_pageout_inactive_throttled = 0; /* debugging */
312unsigned int vm_pageout_inactive_forced = 0; /* debugging */
313unsigned int vm_pageout_inactive_nolock = 0; /* debugging */
314unsigned int vm_pageout_inactive_avoid = 0; /* debugging */
315unsigned int vm_pageout_inactive_busy = 0; /* debugging */
6d2010ae 316unsigned int vm_pageout_inactive_error = 0; /* debugging */
1c79356b 317unsigned int vm_pageout_inactive_absent = 0; /* debugging */
6d2010ae 318unsigned int vm_pageout_inactive_notalive = 0; /* debugging */
1c79356b 319unsigned int vm_pageout_inactive_used = 0; /* debugging */
6d2010ae 320unsigned int vm_pageout_cache_evicted = 0; /* debugging */
1c79356b 321unsigned int vm_pageout_inactive_clean = 0; /* debugging */
6d2010ae
A
322unsigned int vm_pageout_speculative_clean = 0; /* debugging */
323unsigned int vm_pageout_inactive_dirty_internal = 0; /* debugging */
324unsigned int vm_pageout_inactive_dirty_external = 0; /* debugging */
b0d623f7
A
325unsigned int vm_pageout_inactive_deactivated = 0; /* debugging */
326unsigned int vm_pageout_inactive_zf = 0; /* debugging */
1c79356b 327unsigned int vm_pageout_dirty_no_pager = 0; /* debugging */
91447636 328unsigned int vm_pageout_purged_objects = 0; /* debugging */
1c79356b
A
329unsigned int vm_stat_discard = 0; /* debugging */
330unsigned int vm_stat_discard_sent = 0; /* debugging */
331unsigned int vm_stat_discard_failure = 0; /* debugging */
332unsigned int vm_stat_discard_throttle = 0; /* debugging */
2d21ac55
A
333unsigned int vm_pageout_reactivation_limit_exceeded = 0; /* debugging */
334unsigned int vm_pageout_catch_ups = 0; /* debugging */
335unsigned int vm_pageout_inactive_force_reclaim = 0; /* debugging */
1c79356b 336
6d2010ae 337unsigned int vm_pageout_scan_reclaimed_throttled = 0;
91447636 338unsigned int vm_pageout_scan_active_throttled = 0;
6d2010ae
A
339unsigned int vm_pageout_scan_inactive_throttled_internal = 0;
340unsigned int vm_pageout_scan_inactive_throttled_external = 0;
91447636 341unsigned int vm_pageout_scan_throttle = 0; /* debugging */
b0d623f7 342unsigned int vm_pageout_scan_throttle_aborted = 0; /* debugging */
91447636
A
343unsigned int vm_pageout_scan_burst_throttle = 0; /* debugging */
344unsigned int vm_pageout_scan_empty_throttle = 0; /* debugging */
345unsigned int vm_pageout_scan_deadlock_detected = 0; /* debugging */
346unsigned int vm_pageout_scan_active_throttle_success = 0; /* debugging */
347unsigned int vm_pageout_scan_inactive_throttle_success = 0; /* debugging */
6d2010ae 348unsigned int vm_pageout_inactive_external_forced_reactivate_count = 0; /* debugging */
b0d623f7
A
349unsigned int vm_page_speculative_count_drifts = 0;
350unsigned int vm_page_speculative_count_drift_max = 0;
351
55e303ae
A
352/*
353 * Backing store throttle when BS is exhausted
354 */
355unsigned int vm_backing_store_low = 0;
1c79356b
A
356
357unsigned int vm_pageout_out_of_line = 0;
358unsigned int vm_pageout_in_place = 0;
55e303ae 359
b0d623f7
A
360unsigned int vm_page_steal_pageout_page = 0;
361
91447636
A
362/*
363 * ENCRYPTED SWAP:
364 * counters and statistics...
365 */
366unsigned long vm_page_decrypt_counter = 0;
367unsigned long vm_page_decrypt_for_upl_counter = 0;
368unsigned long vm_page_encrypt_counter = 0;
369unsigned long vm_page_encrypt_abort_counter = 0;
370unsigned long vm_page_encrypt_already_encrypted_counter = 0;
371boolean_t vm_pages_encrypted = FALSE; /* are there encrypted pages ? */
372
91447636
A
373struct vm_pageout_queue vm_pageout_queue_internal;
374struct vm_pageout_queue vm_pageout_queue_external;
375
2d21ac55
A
376unsigned int vm_page_speculative_target = 0;
377
378vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
379
0b4c1975 380boolean_t (* volatile consider_buffer_cache_collect)(int) = NULL;
b0d623f7
A
381
382#if DEVELOPMENT || DEBUG
4a3eedf9 383unsigned long vm_cs_validated_resets = 0;
b0d623f7 384#endif
55e303ae 385
6d2010ae
A
386int vm_debug_events = 0;
387
388
55e303ae
A
389/*
390 * Routine: vm_backing_store_disable
391 * Purpose:
392 * Suspend non-privileged threads wishing to extend
393 * backing store when we are low on backing store
394 * (Synchronized by caller)
395 */
396void
397vm_backing_store_disable(
398 boolean_t disable)
399{
400 if(disable) {
401 vm_backing_store_low = 1;
402 } else {
403 if(vm_backing_store_low) {
404 vm_backing_store_low = 0;
405 thread_wakeup((event_t) &vm_backing_store_low);
406 }
407 }
408}
409
410
1c79356b
A
411#if MACH_CLUSTER_STATS
412unsigned long vm_pageout_cluster_dirtied = 0;
413unsigned long vm_pageout_cluster_cleaned = 0;
414unsigned long vm_pageout_cluster_collisions = 0;
415unsigned long vm_pageout_cluster_clusters = 0;
416unsigned long vm_pageout_cluster_conversions = 0;
417unsigned long vm_pageout_target_collisions = 0;
418unsigned long vm_pageout_target_page_dirtied = 0;
419unsigned long vm_pageout_target_page_freed = 0;
1c79356b
A
420#define CLUSTER_STAT(clause) clause
421#else /* MACH_CLUSTER_STATS */
422#define CLUSTER_STAT(clause)
423#endif /* MACH_CLUSTER_STATS */
424
425/*
426 * Routine: vm_pageout_object_terminate
427 * Purpose:
2d21ac55 428 * Destroy the pageout_object, and perform all of the
1c79356b
A
429 * required cleanup actions.
430 *
431 * In/Out conditions:
432 * The object must be locked, and will be returned locked.
433 */
434void
435vm_pageout_object_terminate(
436 vm_object_t object)
437{
438 vm_object_t shadow_object;
439
440 /*
441 * Deal with the deallocation (last reference) of a pageout object
442 * (used for cleaning-in-place) by dropping the paging references/
443 * freeing pages in the original object.
444 */
445
446 assert(object->pageout);
447 shadow_object = object->shadow;
448 vm_object_lock(shadow_object);
449
450 while (!queue_empty(&object->memq)) {
451 vm_page_t p, m;
452 vm_object_offset_t offset;
453
454 p = (vm_page_t) queue_first(&object->memq);
455
456 assert(p->private);
457 assert(p->pageout);
458 p->pageout = FALSE;
459 assert(!p->cleaning);
460
461 offset = p->offset;
462 VM_PAGE_FREE(p);
463 p = VM_PAGE_NULL;
464
465 m = vm_page_lookup(shadow_object,
6d2010ae 466 offset + object->vo_shadow_offset);
1c79356b
A
467
468 if(m == VM_PAGE_NULL)
469 continue;
470 assert(m->cleaning);
0b4e3aa0
A
471 /* used as a trigger on upl_commit etc to recognize the */
472 /* pageout daemon's subseqent desire to pageout a cleaning */
473 /* page. When the bit is on the upl commit code will */
474 /* respect the pageout bit in the target page over the */
475 /* caller's page list indication */
476 m->dump_cleaning = FALSE;
1c79356b 477
1c79356b
A
478 assert((m->dirty) || (m->precious) ||
479 (m->busy && m->cleaning));
480
481 /*
482 * Handle the trusted pager throttle.
55e303ae 483 * Also decrement the burst throttle (if external).
1c79356b
A
484 */
485 vm_page_lock_queues();
486 if (m->laundry) {
91447636 487 vm_pageout_throttle_up(m);
1c79356b
A
488 }
489
490 /*
491 * Handle the "target" page(s). These pages are to be freed if
492 * successfully cleaned. Target pages are always busy, and are
493 * wired exactly once. The initial target pages are not mapped,
494 * (so cannot be referenced or modified) but converted target
495 * pages may have been modified between the selection as an
496 * adjacent page and conversion to a target.
497 */
498 if (m->pageout) {
499 assert(m->busy);
500 assert(m->wire_count == 1);
501 m->cleaning = FALSE;
2d21ac55 502 m->encrypted_cleaning = FALSE;
1c79356b
A
503 m->pageout = FALSE;
504#if MACH_CLUSTER_STATS
505 if (m->wanted) vm_pageout_target_collisions++;
506#endif
507 /*
508 * Revoke all access to the page. Since the object is
509 * locked, and the page is busy, this prevents the page
91447636 510 * from being dirtied after the pmap_disconnect() call
1c79356b 511 * returns.
91447636 512 *
1c79356b
A
513 * Since the page is left "dirty" but "not modifed", we
514 * can detect whether the page was redirtied during
515 * pageout by checking the modify state.
516 */
91447636
A
517 if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)
518 m->dirty = TRUE;
519 else
520 m->dirty = FALSE;
1c79356b
A
521
522 if (m->dirty) {
523 CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
0b4c1975 524 vm_page_unwire(m, TRUE); /* reactivates */
2d21ac55 525 VM_STAT_INCR(reactivations);
1c79356b 526 PAGE_WAKEUP_DONE(m);
1c79356b
A
527 } else {
528 CLUSTER_STAT(vm_pageout_target_page_freed++;)
529 vm_page_free(m);/* clears busy, etc. */
530 }
531 vm_page_unlock_queues();
532 continue;
533 }
534 /*
535 * Handle the "adjacent" pages. These pages were cleaned in
536 * place, and should be left alone.
537 * If prep_pin_count is nonzero, then someone is using the
538 * page, so make it active.
539 */
2d21ac55 540 if (!m->active && !m->inactive && !m->throttled && !m->private) {
0b4e3aa0 541 if (m->reference)
1c79356b
A
542 vm_page_activate(m);
543 else
544 vm_page_deactivate(m);
545 }
6d2010ae
A
546 if (m->overwriting) {
547 /*
548 * the (COPY_OUT_FROM == FALSE) request_page_list case
549 */
550 if (m->busy) {
551 /*
552 * We do not re-set m->dirty !
553 * The page was busy so no extraneous activity
554 * could have occurred. COPY_INTO is a read into the
555 * new pages. CLEAN_IN_PLACE does actually write
556 * out the pages but handling outside of this code
557 * will take care of resetting dirty. We clear the
558 * modify however for the Programmed I/O case.
559 */
560 pmap_clear_modify(m->phys_page);
2d21ac55 561
6d2010ae
A
562 m->busy = FALSE;
563 m->absent = FALSE;
564 } else {
565 /*
566 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
567 * Occurs when the original page was wired
568 * at the time of the list request
569 */
570 assert(VM_PAGE_WIRED(m));
571 vm_page_unwire(m, TRUE); /* reactivates */
572 }
1c79356b
A
573 m->overwriting = FALSE;
574 } else {
6d2010ae
A
575 /*
576 * Set the dirty state according to whether or not the page was
577 * modified during the pageout. Note that we purposefully do
578 * NOT call pmap_clear_modify since the page is still mapped.
579 * If the page were to be dirtied between the 2 calls, this
580 * this fact would be lost. This code is only necessary to
581 * maintain statistics, since the pmap module is always
582 * consulted if m->dirty is false.
583 */
1c79356b 584#if MACH_CLUSTER_STATS
55e303ae 585 m->dirty = pmap_is_modified(m->phys_page);
1c79356b
A
586
587 if (m->dirty) vm_pageout_cluster_dirtied++;
588 else vm_pageout_cluster_cleaned++;
589 if (m->wanted) vm_pageout_cluster_collisions++;
590#else
591 m->dirty = 0;
592#endif
593 }
6d2010ae
A
594 if (m->encrypted_cleaning == TRUE) {
595 m->encrypted_cleaning = FALSE;
596 m->busy = FALSE;
597 }
1c79356b
A
598 m->cleaning = FALSE;
599
1c79356b
A
600 /*
601 * Wakeup any thread waiting for the page to be un-cleaning.
602 */
603 PAGE_WAKEUP(m);
604 vm_page_unlock_queues();
605 }
606 /*
607 * Account for the paging reference taken in vm_paging_object_allocate.
608 */
b0d623f7 609 vm_object_activity_end(shadow_object);
1c79356b
A
610 vm_object_unlock(shadow_object);
611
612 assert(object->ref_count == 0);
613 assert(object->paging_in_progress == 0);
b0d623f7 614 assert(object->activity_in_progress == 0);
1c79356b
A
615 assert(object->resident_page_count == 0);
616 return;
617}
618
1c79356b
A
619/*
620 * Routine: vm_pageclean_setup
621 *
622 * Purpose: setup a page to be cleaned (made non-dirty), but not
623 * necessarily flushed from the VM page cache.
624 * This is accomplished by cleaning in place.
625 *
b0d623f7
A
626 * The page must not be busy, and new_object
627 * must be locked.
628 *
1c79356b
A
629 */
630void
631vm_pageclean_setup(
632 vm_page_t m,
633 vm_page_t new_m,
634 vm_object_t new_object,
635 vm_object_offset_t new_offset)
636{
1c79356b 637 assert(!m->busy);
2d21ac55 638#if 0
1c79356b 639 assert(!m->cleaning);
2d21ac55 640#endif
1c79356b
A
641
642 XPR(XPR_VM_PAGEOUT,
643 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
b0d623f7
A
644 m->object, m->offset, m,
645 new_m, new_offset);
1c79356b 646
55e303ae 647 pmap_clear_modify(m->phys_page);
1c79356b
A
648
649 /*
650 * Mark original page as cleaning in place.
651 */
652 m->cleaning = TRUE;
653 m->dirty = TRUE;
654 m->precious = FALSE;
655
656 /*
657 * Convert the fictitious page to a private shadow of
658 * the real page.
659 */
660 assert(new_m->fictitious);
2d21ac55 661 assert(new_m->phys_page == vm_page_fictitious_addr);
1c79356b
A
662 new_m->fictitious = FALSE;
663 new_m->private = TRUE;
664 new_m->pageout = TRUE;
55e303ae 665 new_m->phys_page = m->phys_page;
b0d623f7
A
666
667 vm_page_lockspin_queues();
1c79356b 668 vm_page_wire(new_m);
b0d623f7 669 vm_page_unlock_queues();
1c79356b
A
670
671 vm_page_insert(new_m, new_object, new_offset);
672 assert(!new_m->wanted);
673 new_m->busy = FALSE;
674}
675
1c79356b
A
676/*
677 * Routine: vm_pageout_initialize_page
678 * Purpose:
679 * Causes the specified page to be initialized in
680 * the appropriate memory object. This routine is used to push
681 * pages into a copy-object when they are modified in the
682 * permanent object.
683 *
684 * The page is moved to a temporary object and paged out.
685 *
686 * In/out conditions:
687 * The page in question must not be on any pageout queues.
688 * The object to which it belongs must be locked.
689 * The page must be busy, but not hold a paging reference.
690 *
691 * Implementation:
692 * Move this page to a completely new object.
693 */
694void
695vm_pageout_initialize_page(
696 vm_page_t m)
697{
1c79356b
A
698 vm_object_t object;
699 vm_object_offset_t paging_offset;
700 vm_page_t holding_page;
2d21ac55 701 memory_object_t pager;
1c79356b
A
702
703 XPR(XPR_VM_PAGEOUT,
704 "vm_pageout_initialize_page, page 0x%X\n",
b0d623f7 705 m, 0, 0, 0, 0);
1c79356b
A
706 assert(m->busy);
707
708 /*
709 * Verify that we really want to clean this page
710 */
711 assert(!m->absent);
712 assert(!m->error);
713 assert(m->dirty);
714
715 /*
716 * Create a paging reference to let us play with the object.
717 */
718 object = m->object;
719 paging_offset = m->offset + object->paging_offset;
2d21ac55
A
720
721 if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) {
1c79356b
A
722 VM_PAGE_FREE(m);
723 panic("reservation without pageout?"); /* alan */
2d21ac55
A
724 vm_object_unlock(object);
725
726 return;
727 }
728
729 /*
730 * If there's no pager, then we can't clean the page. This should
731 * never happen since this should be a copy object and therefore not
732 * an external object, so the pager should always be there.
733 */
734
735 pager = object->pager;
736
737 if (pager == MEMORY_OBJECT_NULL) {
738 VM_PAGE_FREE(m);
739 panic("missing pager for copy object");
1c79356b
A
740 return;
741 }
742
743 /* set the page for future call to vm_fault_list_request */
2d21ac55 744 vm_object_paging_begin(object);
1c79356b 745 holding_page = NULL;
b0d623f7 746
55e303ae 747 pmap_clear_modify(m->phys_page);
1c79356b 748 m->dirty = TRUE;
55e303ae
A
749 m->busy = TRUE;
750 m->list_req_pending = TRUE;
751 m->cleaning = TRUE;
1c79356b 752 m->pageout = TRUE;
b0d623f7
A
753
754 vm_page_lockspin_queues();
1c79356b 755 vm_page_wire(m);
55e303ae 756 vm_page_unlock_queues();
b0d623f7 757
55e303ae 758 vm_object_unlock(object);
1c79356b
A
759
760 /*
761 * Write the data to its pager.
762 * Note that the data is passed by naming the new object,
763 * not a virtual address; the pager interface has been
764 * manipulated to use the "internal memory" data type.
765 * [The object reference from its allocation is donated
766 * to the eventual recipient.]
767 */
2d21ac55 768 memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
1c79356b
A
769
770 vm_object_lock(object);
2d21ac55 771 vm_object_paging_end(object);
1c79356b
A
772}
773
774#if MACH_CLUSTER_STATS
775#define MAXCLUSTERPAGES 16
776struct {
777 unsigned long pages_in_cluster;
778 unsigned long pages_at_higher_offsets;
779 unsigned long pages_at_lower_offsets;
780} cluster_stats[MAXCLUSTERPAGES];
781#endif /* MACH_CLUSTER_STATS */
782
1c79356b
A
783
784/*
785 * vm_pageout_cluster:
786 *
91447636
A
787 * Given a page, queue it to the appropriate I/O thread,
788 * which will page it out and attempt to clean adjacent pages
1c79356b
A
789 * in the same operation.
790 *
91447636 791 * The page must be busy, and the object and queues locked. We will take a
55e303ae 792 * paging reference to prevent deallocation or collapse when we
91447636
A
793 * release the object lock back at the call site. The I/O thread
794 * is responsible for consuming this reference
55e303ae
A
795 *
796 * The page must not be on any pageout queue.
1c79356b 797 */
91447636 798
1c79356b 799void
91447636 800vm_pageout_cluster(vm_page_t m)
1c79356b
A
801{
802 vm_object_t object = m->object;
91447636
A
803 struct vm_pageout_queue *q;
804
1c79356b
A
805
806 XPR(XPR_VM_PAGEOUT,
807 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
b0d623f7
A
808 object, m->offset, m, 0, 0);
809
810 VM_PAGE_CHECK(m);
6d2010ae
A
811#if DEBUG
812 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
813#endif
814 vm_object_lock_assert_exclusive(object);
1c79356b 815
91447636
A
816 /*
817 * Only a certain kind of page is appreciated here.
818 */
b0d623f7 819 assert(m->busy && (m->dirty || m->precious) && (!VM_PAGE_WIRED(m)));
6d2010ae
A
820 assert(!m->cleaning && !m->pageout);
821#ifndef CONFIG_FREEZE
822 assert(!m->inactive && !m->active);
2d21ac55 823 assert(!m->throttled);
6d2010ae 824#endif
55e303ae
A
825
826 /*
827 * protect the object from collapse -
828 * locking in the object's paging_offset.
829 */
830 vm_object_paging_begin(object);
55e303ae 831
1c79356b 832 /*
91447636
A
833 * set the page for future call to vm_fault_list_request
834 * page should already be marked busy
1c79356b 835 */
91447636 836 vm_page_wire(m);
55e303ae
A
837 m->list_req_pending = TRUE;
838 m->cleaning = TRUE;
1c79356b 839 m->pageout = TRUE;
1c79356b 840
91447636
A
841 if (object->internal == TRUE)
842 q = &vm_pageout_queue_internal;
843 else
844 q = &vm_pageout_queue_external;
d1ecb069
A
845
846 /*
847 * pgo_laundry count is tied to the laundry bit
848 */
6d2010ae 849 m->laundry = TRUE;
91447636 850 q->pgo_laundry++;
1c79356b 851
91447636
A
852 m->pageout_queue = TRUE;
853 queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
854
855 if (q->pgo_idle == TRUE) {
856 q->pgo_idle = FALSE;
857 thread_wakeup((event_t) &q->pgo_pending);
1c79356b 858 }
b0d623f7
A
859
860 VM_PAGE_CHECK(m);
1c79356b
A
861}
862
55e303ae 863
91447636 864unsigned long vm_pageout_throttle_up_count = 0;
1c79356b
A
865
866/*
b0d623f7
A
867 * A page is back from laundry or we are stealing it back from
868 * the laundering state. See if there are some pages waiting to
91447636 869 * go to laundry and if we can let some of them go now.
1c79356b 870 *
91447636 871 * Object and page queues must be locked.
1c79356b 872 */
91447636
A
873void
874vm_pageout_throttle_up(
6d2010ae 875 vm_page_t m)
1c79356b 876{
6d2010ae 877 struct vm_pageout_queue *q;
1c79356b 878
6d2010ae
A
879 assert(m->object != VM_OBJECT_NULL);
880 assert(m->object != kernel_object);
1c79356b 881
6d2010ae 882 vm_pageout_throttle_up_count++;
0b4c1975 883
6d2010ae
A
884 if (m->object->internal == TRUE)
885 q = &vm_pageout_queue_internal;
886 else
887 q = &vm_pageout_queue_external;
d1ecb069 888
6d2010ae 889 if (m->pageout_queue == TRUE) {
0b4c1975 890
6d2010ae
A
891 queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
892 m->pageout_queue = FALSE;
1c79356b 893
6d2010ae
A
894 m->pageq.next = NULL;
895 m->pageq.prev = NULL;
91447636 896
6d2010ae
A
897 vm_object_paging_end(m->object);
898 }
1c79356b 899
6d2010ae 900 if ( m->laundry == TRUE ) {
91447636 901
6d2010ae
A
902 m->laundry = FALSE;
903 q->pgo_laundry--;
91447636 904
6d2010ae
A
905 if (q->pgo_throttled == TRUE) {
906 q->pgo_throttled = FALSE;
907 thread_wakeup((event_t) &q->pgo_laundry);
908 }
909 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
910 q->pgo_draining = FALSE;
911 thread_wakeup((event_t) (&q->pgo_laundry+1));
912 }
913 }
914}
91447636 915
b0d623f7
A
916
917/*
918 * VM memory pressure monitoring.
919 *
920 * vm_pageout_scan() keeps track of the number of pages it considers and
921 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
922 *
923 * compute_memory_pressure() is called every second from compute_averages()
924 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
925 * of recalimed pages in a new vm_pageout_stat[] bucket.
926 *
927 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
928 * The caller provides the number of seconds ("nsecs") worth of statistics
929 * it wants, up to 30 seconds.
930 * It computes the number of pages reclaimed in the past "nsecs" seconds and
931 * also returns the number of pages the system still needs to reclaim at this
932 * moment in time.
933 */
934#define VM_PAGEOUT_STAT_SIZE 31
935struct vm_pageout_stat {
936 unsigned int considered;
937 unsigned int reclaimed;
938} vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0,0}, };
939unsigned int vm_pageout_stat_now = 0;
940unsigned int vm_memory_pressure = 0;
941
942#define VM_PAGEOUT_STAT_BEFORE(i) \
943 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
944#define VM_PAGEOUT_STAT_AFTER(i) \
945 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
946
947/*
948 * Called from compute_averages().
949 */
950void
951compute_memory_pressure(
952 __unused void *arg)
953{
954 unsigned int vm_pageout_next;
955
956 vm_memory_pressure =
957 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].reclaimed;
958
959 commpage_set_memory_pressure( vm_memory_pressure );
960
961 /* move "now" forward */
962 vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
963 vm_pageout_stats[vm_pageout_next].considered = 0;
964 vm_pageout_stats[vm_pageout_next].reclaimed = 0;
965 vm_pageout_stat_now = vm_pageout_next;
966}
967
968unsigned int
969mach_vm_ctl_page_free_wanted(void)
970{
971 unsigned int page_free_target, page_free_count, page_free_wanted;
972
973 page_free_target = vm_page_free_target;
974 page_free_count = vm_page_free_count;
975 if (page_free_target > page_free_count) {
976 page_free_wanted = page_free_target - page_free_count;
977 } else {
978 page_free_wanted = 0;
979 }
980
981 return page_free_wanted;
982}
983
984kern_return_t
985mach_vm_pressure_monitor(
986 boolean_t wait_for_pressure,
987 unsigned int nsecs_monitored,
988 unsigned int *pages_reclaimed_p,
989 unsigned int *pages_wanted_p)
990{
991 wait_result_t wr;
992 unsigned int vm_pageout_then, vm_pageout_now;
993 unsigned int pages_reclaimed;
994
995 /*
996 * We don't take the vm_page_queue_lock here because we don't want
997 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
998 * thread when it's trying to reclaim memory. We don't need fully
999 * accurate monitoring anyway...
1000 */
1001
1002 if (wait_for_pressure) {
1003 /* wait until there's memory pressure */
1004 while (vm_page_free_count >= vm_page_free_target) {
1005 wr = assert_wait((event_t) &vm_page_free_wanted,
1006 THREAD_INTERRUPTIBLE);
1007 if (wr == THREAD_WAITING) {
1008 wr = thread_block(THREAD_CONTINUE_NULL);
1009 }
1010 if (wr == THREAD_INTERRUPTED) {
1011 return KERN_ABORTED;
1012 }
1013 if (wr == THREAD_AWAKENED) {
1014 /*
1015 * The memory pressure might have already
1016 * been relieved but let's not block again
1017 * and let's report that there was memory
1018 * pressure at some point.
1019 */
1020 break;
1021 }
1022 }
1023 }
1024
1025 /* provide the number of pages the system wants to reclaim */
1026 if (pages_wanted_p != NULL) {
1027 *pages_wanted_p = mach_vm_ctl_page_free_wanted();
1028 }
1029
1030 if (pages_reclaimed_p == NULL) {
1031 return KERN_SUCCESS;
1032 }
1033
1034 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1035 do {
1036 vm_pageout_now = vm_pageout_stat_now;
1037 pages_reclaimed = 0;
1038 for (vm_pageout_then =
1039 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
1040 vm_pageout_then != vm_pageout_now &&
1041 nsecs_monitored-- != 0;
1042 vm_pageout_then =
1043 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
1044 pages_reclaimed += vm_pageout_stats[vm_pageout_then].reclaimed;
1045 }
1046 } while (vm_pageout_now != vm_pageout_stat_now);
1047 *pages_reclaimed_p = pages_reclaimed;
1048
1049 return KERN_SUCCESS;
1050}
1051
1052/* Page States: Used below to maintain the page state
1053 before it's removed from it's Q. This saved state
1054 helps us do the right accounting in certain cases
1055*/
1056
6d2010ae
A
1057#define PAGE_STATE_SPECULATIVE 1
1058#define PAGE_STATE_ZEROFILL 2
1059#define PAGE_STATE_INACTIVE 3
1060#define PAGE_STATE_INACTIVE_FIRST 4
b0d623f7
A
1061
1062#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m) \
1063 MACRO_BEGIN \
1064 /* \
1065 * If a "reusable" page somehow made it back into \
1066 * the active queue, it's been re-used and is not \
1067 * quite re-usable. \
1068 * If the VM object was "all_reusable", consider it \
1069 * as "all re-used" instead of converting it to \
1070 * "partially re-used", which could be expensive. \
1071 */ \
1072 if ((m)->reusable || \
1073 (m)->object->all_reusable) { \
1074 vm_object_reuse_pages((m)->object, \
1075 (m)->offset, \
1076 (m)->offset + PAGE_SIZE_64, \
1077 FALSE); \
1078 } \
1079 MACRO_END
1080
6d2010ae
A
1081
1082#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 128
1083#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1084
1085#define FCS_IDLE 0
1086#define FCS_DELAYED 1
1087#define FCS_DEADLOCK_DETECTED 2
1088
1089struct flow_control {
1090 int state;
1091 mach_timespec_t ts;
1092};
1093
1094
1095/*
1096 * vm_pageout_scan does the dirty work for the pageout daemon.
1097 * It returns with vm_page_queue_free_lock held and
1098 * vm_page_free_wanted == 0.
1099 */
1c79356b
A
1100void
1101vm_pageout_scan(void)
1102{
91447636
A
1103 unsigned int loop_count = 0;
1104 unsigned int inactive_burst_count = 0;
1105 unsigned int active_burst_count = 0;
2d21ac55
A
1106 unsigned int reactivated_this_call;
1107 unsigned int reactivate_limit;
1108 vm_page_t local_freeq = NULL;
55e303ae 1109 int local_freed = 0;
2d21ac55 1110 int delayed_unlock;
6d2010ae 1111 int delayed_unlock_limit = 0;
91447636
A
1112 int refmod_state = 0;
1113 int vm_pageout_deadlock_target = 0;
1114 struct vm_pageout_queue *iq;
1115 struct vm_pageout_queue *eq;
2d21ac55 1116 struct vm_speculative_age_q *sq;
b0d623f7 1117 struct flow_control flow_control = { 0, { 0, 0 } };
91447636 1118 boolean_t inactive_throttled = FALSE;
2d21ac55 1119 boolean_t try_failed;
6d2010ae
A
1120 mach_timespec_t ts;
1121 unsigned int msecs = 0;
91447636 1122 vm_object_t object;
2d21ac55 1123 vm_object_t last_object_tried;
b0d623f7
A
1124 uint64_t zf_ratio;
1125 uint64_t zf_run_count;
2d21ac55
A
1126 uint32_t catch_up_count = 0;
1127 uint32_t inactive_reclaim_run;
1128 boolean_t forced_reclaim;
b0d623f7 1129 int page_prev_state = 0;
6d2010ae
A
1130 int cache_evict_throttle = 0;
1131 uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
1132
1133 VM_DEBUG_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
1134 vm_pageout_speculative_clean, vm_pageout_inactive_clean,
1135 vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
91447636
A
1136
1137 flow_control.state = FCS_IDLE;
1138 iq = &vm_pageout_queue_internal;
1139 eq = &vm_pageout_queue_external;
2d21ac55
A
1140 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1141
1c79356b
A
1142
1143 XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1144
2d21ac55
A
1145
1146 vm_page_lock_queues();
1147 delayed_unlock = 1; /* must be nonzero if Qs are locked, 0 if unlocked */
1148
1149 /*
1150 * Calculate the max number of referenced pages on the inactive
1151 * queue that we will reactivate.
1152 */
1153 reactivated_this_call = 0;
1154 reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
1155 vm_page_inactive_count);
1156 inactive_reclaim_run = 0;
1157
1158
6d2010ae 1159 /*
1c79356b
A
1160 * We want to gradually dribble pages from the active queue
1161 * to the inactive queue. If we let the inactive queue get
1162 * very small, and then suddenly dump many pages into it,
1163 * those pages won't get a sufficient chance to be referenced
1164 * before we start taking them from the inactive queue.
1165 *
6d2010ae
A
1166 * We must limit the rate at which we send pages to the pagers
1167 * so that we don't tie up too many pages in the I/O queues.
1168 * We implement a throttling mechanism using the laundry count
1169 * to limit the number of pages outstanding to the default
1170 * and external pagers. We can bypass the throttles and look
1171 * for clean pages if the pageout queues don't drain in a timely
1172 * fashion since this may indicate that the pageout paths are
1173 * stalled waiting for memory, which only we can provide.
1c79356b 1174 */
91447636 1175
1c79356b 1176
91447636 1177Restart:
2d21ac55
A
1178 assert(delayed_unlock!=0);
1179
1180 /*
1181 * A page is "zero-filled" if it was not paged in from somewhere,
1182 * and it belongs to an object at least VM_ZF_OBJECT_SIZE_THRESHOLD big.
1183 * Recalculate the zero-filled page ratio. We use this to apportion
1184 * victimized pages between the normal and zero-filled inactive
1185 * queues according to their relative abundance in memory. Thus if a task
1186 * is flooding memory with zf pages, we begin to hunt them down.
1187 * It would be better to throttle greedy tasks at a higher level,
1188 * but at the moment mach vm cannot do this.
1189 */
1190 {
b0d623f7
A
1191 uint64_t total = vm_page_active_count + vm_page_inactive_count;
1192 uint64_t normal = total - vm_zf_count;
b0d623f7 1193
2d21ac55
A
1194 /* zf_ratio is the number of zf pages we victimize per normal page */
1195
1196 if (vm_zf_count < vm_accellerate_zf_pageout_trigger)
1197 zf_ratio = 0;
1198 else if ((vm_zf_count <= normal) || (normal == 0))
1199 zf_ratio = 1;
1200 else
1201 zf_ratio = vm_zf_count / normal;
1202
1203 zf_run_count = 0;
1204 }
1205
91447636
A
1206 /*
1207 * Recalculate vm_page_inactivate_target.
1208 */
1209 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
2d21ac55
A
1210 vm_page_inactive_count +
1211 vm_page_speculative_count);
1212 /*
1213 * don't want to wake the pageout_scan thread up everytime we fall below
1214 * the targets... set a low water mark at 0.25% below the target
1215 */
1216 vm_page_inactive_min = vm_page_inactive_target - (vm_page_inactive_target / 400);
1c79356b 1217
6d2010ae
A
1218 if (vm_page_speculative_percentage > 50)
1219 vm_page_speculative_percentage = 50;
1220 else if (vm_page_speculative_percentage <= 0)
1221 vm_page_speculative_percentage = 1;
1222
2d21ac55
A
1223 vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
1224 vm_page_inactive_count);
6d2010ae
A
1225
1226 vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
1227
2d21ac55
A
1228 object = NULL;
1229 last_object_tried = NULL;
1230 try_failed = FALSE;
1231
1232 if ((vm_page_inactive_count + vm_page_speculative_count) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count))
1233 catch_up_count = vm_page_inactive_count + vm_page_speculative_count;
1234 else
1235 catch_up_count = 0;
1236
55e303ae 1237 for (;;) {
91447636 1238 vm_page_t m;
1c79356b 1239
2d21ac55 1240 DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
1c79356b 1241
2d21ac55
A
1242 if (delayed_unlock == 0) {
1243 vm_page_lock_queues();
1244 delayed_unlock = 1;
1245 }
6d2010ae
A
1246 if (vm_upl_wait_for_pages < 0)
1247 vm_upl_wait_for_pages = 0;
91447636 1248
6d2010ae
A
1249 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
1250
1251 if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX)
1252 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
91447636 1253
1c79356b 1254 /*
6d2010ae 1255 * Move pages from active to inactive if we're below the target
1c79356b 1256 */
b0d623f7 1257 if ((vm_page_inactive_count + vm_page_speculative_count) >= vm_page_inactive_target)
2d21ac55
A
1258 goto done_moving_active_pages;
1259
6d2010ae
A
1260 if (object != NULL) {
1261 vm_object_unlock(object);
1262 object = NULL;
1263 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1264 }
1265 /*
1266 * Don't sweep through active queue more than the throttle
1267 * which should be kept relatively low
1268 */
1269 active_burst_count = MIN(vm_pageout_burst_active_throttle,
1270 vm_page_active_count);
1271
1272 VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_START,
1273 vm_pageout_inactive, vm_pageout_inactive_used, vm_page_free_count, local_freed);
1274
1275 VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_NONE,
1276 vm_pageout_speculative_clean, vm_pageout_inactive_clean,
1277 vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
2d21ac55 1278
6d2010ae 1279 while (!queue_empty(&vm_page_queue_active) && active_burst_count--) {
1c79356b 1280
1c79356b 1281 vm_pageout_active++;
55e303ae 1282
1c79356b 1283 m = (vm_page_t) queue_first(&vm_page_queue_active);
91447636
A
1284
1285 assert(m->active && !m->inactive);
1286 assert(!m->laundry);
1287 assert(m->object != kernel_object);
2d21ac55
A
1288 assert(m->phys_page != vm_page_guard_addr);
1289
1290 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
1c79356b
A
1291
1292 /*
6d2010ae
A
1293 * The page might be absent or busy,
1294 * but vm_page_deactivate can handle that.
1c79356b 1295 */
91447636 1296 vm_page_deactivate(m);
2d21ac55 1297
6d2010ae 1298 if (delayed_unlock++ > delayed_unlock_limit) {
1c79356b 1299
91447636 1300 if (local_freeq) {
b0d623f7 1301 vm_page_unlock_queues();
6d2010ae
A
1302
1303 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1304 vm_page_free_count, local_freed, delayed_unlock_limit, 1);
1305
b0d623f7 1306 vm_page_free_list(local_freeq, TRUE);
91447636 1307
6d2010ae
A
1308 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1309 vm_page_free_count, 0, 0, 1);
1310
2d21ac55 1311 local_freeq = NULL;
91447636 1312 local_freed = 0;
b0d623f7
A
1313 vm_page_lock_queues();
1314 } else
1315 lck_mtx_yield(&vm_page_queue_lock);
2d21ac55
A
1316
1317 delayed_unlock = 1;
91447636 1318
91447636
A
1319 /*
1320 * continue the while loop processing
1321 * the active queue... need to hold
1322 * the page queues lock
1323 */
55e303ae 1324 }
1c79356b 1325 }
91447636 1326
6d2010ae
A
1327 VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_END,
1328 vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count, vm_page_inactive_target);
91447636
A
1329
1330
1331 /**********************************************************************
1332 * above this point we're playing with the active queue
1333 * below this point we're playing with the throttling mechanisms
1334 * and the inactive queue
1335 **********************************************************************/
1336
2d21ac55 1337done_moving_active_pages:
91447636 1338
55e303ae 1339 if (vm_page_free_count + local_freed >= vm_page_free_target) {
91447636
A
1340 if (object != NULL) {
1341 vm_object_unlock(object);
1342 object = NULL;
1343 }
2d21ac55
A
1344 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1345
55e303ae 1346 if (local_freeq) {
b0d623f7 1347 vm_page_unlock_queues();
6d2010ae
A
1348
1349 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1350 vm_page_free_count, local_freed, delayed_unlock_limit, 2);
1351
b0d623f7 1352 vm_page_free_list(local_freeq, TRUE);
55e303ae 1353
6d2010ae
A
1354 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1355 vm_page_free_count, local_freed, 0, 2);
1356
2d21ac55 1357 local_freeq = NULL;
55e303ae 1358 local_freed = 0;
b0d623f7 1359 vm_page_lock_queues();
55e303ae 1360 }
2d21ac55 1361 /*
6d2010ae 1362 * recalculate vm_page_inactivate_target
593a1d5f
A
1363 */
1364 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
1365 vm_page_inactive_count +
1366 vm_page_speculative_count);
593a1d5f 1367#ifndef CONFIG_EMBEDDED
2d21ac55 1368 if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
6d2010ae
A
1369 !queue_empty(&vm_page_queue_active)) {
1370 /*
1371 * inactive target still not met... keep going
1372 * until we get the queues balanced...
1373 */
2d21ac55 1374 continue;
6d2010ae 1375 }
593a1d5f 1376#endif
b0d623f7 1377 lck_mtx_lock(&vm_page_queue_free_lock);
55e303ae 1378
0b4e3aa0 1379 if ((vm_page_free_count >= vm_page_free_target) &&
2d21ac55 1380 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
6d2010ae
A
1381 /*
1382 * done - we have met our target *and*
1383 * there is no one waiting for a page.
1384 */
0b4e3aa0 1385 vm_page_unlock_queues();
91447636
A
1386
1387 thread_wakeup((event_t) &vm_pageout_garbage_collect);
2d21ac55
A
1388
1389 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
1390
6d2010ae
A
1391 VM_DEBUG_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
1392 vm_pageout_inactive, vm_pageout_inactive_used, 0, 0);
1393 VM_DEBUG_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
1394 vm_pageout_speculative_clean, vm_pageout_inactive_clean,
1395 vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
1396
91447636 1397 return;
0b4e3aa0 1398 }
b0d623f7 1399 lck_mtx_unlock(&vm_page_queue_free_lock);
1c79356b 1400 }
b0d623f7 1401
2d21ac55 1402 /*
b0d623f7
A
1403 * Before anything, we check if we have any ripe volatile
1404 * objects around. If so, try to purge the first object.
1405 * If the purge fails, fall through to reclaim a page instead.
1406 * If the purge succeeds, go back to the top and reevalute
1407 * the new memory situation.
2d21ac55
A
1408 */
1409 assert (available_for_purge>=0);
1410 if (available_for_purge)
1411 {
1412 if (object != NULL) {
1413 vm_object_unlock(object);
1414 object = NULL;
1415 }
6d2010ae
A
1416
1417 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
1418
1419 if (TRUE == vm_purgeable_object_purge_one()) {
1420
1421 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
1422
b0d623f7
A
1423 continue;
1424 }
6d2010ae 1425 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
2d21ac55 1426 }
2d21ac55
A
1427 if (queue_empty(&sq->age_q) && vm_page_speculative_count) {
1428 /*
6d2010ae 1429 * try to pull pages from the aging bins...
2d21ac55
A
1430 * see vm_page.h for an explanation of how
1431 * this mechanism works
1432 */
1433 struct vm_speculative_age_q *aq;
1434 mach_timespec_t ts_fully_aged;
1435 boolean_t can_steal = FALSE;
b0d623f7 1436 int num_scanned_queues;
2d21ac55
A
1437
1438 aq = &vm_page_queue_speculative[speculative_steal_index];
1439
b0d623f7
A
1440 num_scanned_queues = 0;
1441 while (queue_empty(&aq->age_q) &&
1442 num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
2d21ac55
A
1443
1444 speculative_steal_index++;
1445
1446 if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
1447 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
1448
1449 aq = &vm_page_queue_speculative[speculative_steal_index];
1450 }
b0d623f7 1451
6d2010ae 1452 if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
b0d623f7
A
1453 /*
1454 * XXX We've scanned all the speculative
1455 * queues but still haven't found one
1456 * that is not empty, even though
1457 * vm_page_speculative_count is not 0.
6d2010ae
A
1458 *
1459 * report the anomaly...
b0d623f7 1460 */
b0d623f7
A
1461 printf("vm_pageout_scan: "
1462 "all speculative queues empty "
1463 "but count=%d. Re-adjusting.\n",
1464 vm_page_speculative_count);
6d2010ae 1465 if (vm_page_speculative_count > vm_page_speculative_count_drift_max)
b0d623f7
A
1466 vm_page_speculative_count_drift_max = vm_page_speculative_count;
1467 vm_page_speculative_count_drifts++;
1468#if 6553678
1469 Debugger("vm_pageout_scan: no speculative pages");
1470#endif
1471 /* readjust... */
1472 vm_page_speculative_count = 0;
1473 /* ... and continue */
1474 continue;
1475 }
1476
2d21ac55
A
1477 if (vm_page_speculative_count > vm_page_speculative_target)
1478 can_steal = TRUE;
1479 else {
6d2010ae
A
1480 ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) / 1000;
1481 ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) % 1000)
2d21ac55
A
1482 * 1000 * NSEC_PER_USEC;
1483
1484 ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
55e303ae 1485
b0d623f7
A
1486 clock_sec_t sec;
1487 clock_nsec_t nsec;
1488 clock_get_system_nanotime(&sec, &nsec);
1489 ts.tv_sec = (unsigned int) sec;
1490 ts.tv_nsec = nsec;
2d21ac55
A
1491
1492 if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
1493 can_steal = TRUE;
1494 }
1495 if (can_steal == TRUE)
1496 vm_page_speculate_ageit(aq);
1497 }
6d2010ae
A
1498 if (queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
1499 int pages_evicted;
1500
1501 if (object != NULL) {
1502 vm_object_unlock(object);
1503 object = NULL;
1504 }
1505 pages_evicted = vm_object_cache_evict(100, 10);
1506
1507 if (pages_evicted) {
1508
1509 vm_pageout_cache_evicted += pages_evicted;
1510
1511 VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
1512 vm_page_free_count, pages_evicted, vm_pageout_cache_evicted, 0);
1513
1514 /*
1515 * we just freed up to 100 pages,
1516 * so go back to the top of the main loop
1517 * and re-evaulate the memory situation
1518 */
1519 continue;
1520 } else
1521 cache_evict_throttle = 100;
1522 }
1523 if (cache_evict_throttle)
1524 cache_evict_throttle--;
1525
91447636 1526
1c79356b
A
1527 /*
1528 * Sometimes we have to pause:
1529 * 1) No inactive pages - nothing to do.
91447636
A
1530 * 2) Flow control - default pageout queue is full
1531 * 3) Loop control - no acceptable pages found on the inactive queue
1532 * within the last vm_pageout_burst_inactive_throttle iterations
1c79356b 1533 */
6d2010ae 1534 if (queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_zf) && queue_empty(&sq->age_q)) {
91447636
A
1535 vm_pageout_scan_empty_throttle++;
1536 msecs = vm_pageout_empty_wait;
1537 goto vm_pageout_scan_delay;
1538
b0d623f7 1539 } else if (inactive_burst_count >=
593a1d5f
A
1540 MIN(vm_pageout_burst_inactive_throttle,
1541 (vm_page_inactive_count +
1542 vm_page_speculative_count))) {
91447636
A
1543 vm_pageout_scan_burst_throttle++;
1544 msecs = vm_pageout_burst_wait;
1545 goto vm_pageout_scan_delay;
1546
6d2010ae
A
1547 } else if (VM_PAGE_Q_THROTTLED(iq) &&
1548 VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) {
b0d623f7
A
1549 clock_sec_t sec;
1550 clock_nsec_t nsec;
91447636
A
1551
1552 switch (flow_control.state) {
1553
1554 case FCS_IDLE:
1555reset_deadlock_timer:
1556 ts.tv_sec = vm_pageout_deadlock_wait / 1000;
1557 ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
b0d623f7
A
1558 clock_get_system_nanotime(&sec, &nsec);
1559 flow_control.ts.tv_sec = (unsigned int) sec;
1560 flow_control.ts.tv_nsec = nsec;
91447636
A
1561 ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
1562
1563 flow_control.state = FCS_DELAYED;
1564 msecs = vm_pageout_deadlock_wait;
1c79356b 1565
91447636
A
1566 break;
1567
1568 case FCS_DELAYED:
b0d623f7
A
1569 clock_get_system_nanotime(&sec, &nsec);
1570 ts.tv_sec = (unsigned int) sec;
1571 ts.tv_nsec = nsec;
91447636
A
1572
1573 if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
1574 /*
1575 * the pageout thread for the default pager is potentially
1576 * deadlocked since the
1577 * default pager queue has been throttled for more than the
1578 * allowable time... we need to move some clean pages or dirty
1579 * pages belonging to the external pagers if they aren't throttled
1580 * vm_page_free_wanted represents the number of threads currently
1581 * blocked waiting for pages... we'll move one page for each of
1582 * these plus a fixed amount to break the logjam... once we're done
1583 * moving this number of pages, we'll re-enter the FSC_DELAYED state
1584 * with a new timeout target since we have no way of knowing
1585 * whether we've broken the deadlock except through observation
1586 * of the queue associated with the default pager... we need to
2d21ac55 1587 * stop moving pages and allow the system to run to see what
91447636
A
1588 * state it settles into.
1589 */
2d21ac55 1590 vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
91447636
A
1591 vm_pageout_scan_deadlock_detected++;
1592 flow_control.state = FCS_DEADLOCK_DETECTED;
55e303ae 1593
91447636
A
1594 thread_wakeup((event_t) &vm_pageout_garbage_collect);
1595 goto consider_inactive;
1596 }
1597 /*
1598 * just resniff instead of trying
1599 * to compute a new delay time... we're going to be
1600 * awakened immediately upon a laundry completion,
1601 * so we won't wait any longer than necessary
1602 */
1603 msecs = vm_pageout_idle_wait;
1604 break;
1c79356b 1605
91447636
A
1606 case FCS_DEADLOCK_DETECTED:
1607 if (vm_pageout_deadlock_target)
1608 goto consider_inactive;
1609 goto reset_deadlock_timer;
55e303ae 1610
91447636
A
1611 }
1612 vm_pageout_scan_throttle++;
1613 iq->pgo_throttled = TRUE;
1614vm_pageout_scan_delay:
1615 if (object != NULL) {
1616 vm_object_unlock(object);
1617 object = NULL;
1618 }
2d21ac55
A
1619 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1620
55e303ae 1621 if (local_freeq) {
b0d623f7 1622 vm_page_unlock_queues();
6d2010ae
A
1623
1624 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1625 vm_page_free_count, local_freed, delayed_unlock_limit, 3);
1626
b0d623f7 1627 vm_page_free_list(local_freeq, TRUE);
55e303ae 1628
6d2010ae
A
1629 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1630 vm_page_free_count, local_freed, 0, 3);
1631
2d21ac55 1632 local_freeq = NULL;
55e303ae 1633 local_freed = 0;
b0d623f7
A
1634 vm_page_lock_queues();
1635
1636 if (flow_control.state == FCS_DELAYED &&
1637 !VM_PAGE_Q_THROTTLED(iq)) {
1638 flow_control.state = FCS_IDLE;
1639 vm_pageout_scan_throttle_aborted++;
1640 goto consider_inactive;
1641 }
55e303ae 1642 }
0b4e3aa0 1643
6d2010ae
A
1644 VM_CHECK_MEMORYSTATUS;
1645
2d21ac55 1646 assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
2d21ac55 1647 counter(c_vm_pageout_scan_block++);
1c79356b 1648
91447636 1649 vm_page_unlock_queues();
2d21ac55
A
1650
1651 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
b0d623f7 1652
6d2010ae
A
1653 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
1654 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
1655
91447636
A
1656 thread_block(THREAD_CONTINUE_NULL);
1657
6d2010ae
A
1658 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
1659 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
1660
91447636
A
1661 vm_page_lock_queues();
1662 delayed_unlock = 1;
1663
1664 iq->pgo_throttled = FALSE;
0b4e3aa0 1665
2d21ac55 1666 if (loop_count >= vm_page_inactive_count)
55e303ae 1667 loop_count = 0;
91447636
A
1668 inactive_burst_count = 0;
1669
1c79356b
A
1670 goto Restart;
1671 /*NOTREACHED*/
1672 }
1673
91447636
A
1674
1675 flow_control.state = FCS_IDLE;
1676consider_inactive:
6d2010ae
A
1677 vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
1678 vm_pageout_inactive_external_forced_reactivate_limit);
91447636
A
1679 loop_count++;
1680 inactive_burst_count++;
1c79356b 1681 vm_pageout_inactive++;
9bccf70c 1682
2d21ac55
A
1683 /* Choose a victim. */
1684
1685 while (1) {
1686 m = NULL;
91447636 1687
6d2010ae 1688 if (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) {
b0d623f7
A
1689 assert(vm_page_throttled_count == 0);
1690 assert(queue_empty(&vm_page_queue_throttled));
91447636 1691 }
2d21ac55
A
1692
1693 /*
b0d623f7 1694 * The most eligible pages are ones we paged in speculatively,
2d21ac55
A
1695 * but which have not yet been touched.
1696 */
1697 if ( !queue_empty(&sq->age_q) ) {
1698 m = (vm_page_t) queue_first(&sq->age_q);
6d2010ae
A
1699
1700 page_prev_state = PAGE_STATE_SPECULATIVE;
2d21ac55 1701 break;
9bccf70c 1702 }
2d21ac55
A
1703 /*
1704 * Time for a zero-filled inactive page?
1705 */
1706 if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
1707 queue_empty(&vm_page_queue_inactive)) {
1708 if ( !queue_empty(&vm_page_queue_zf) ) {
1709 m = (vm_page_t) queue_first(&vm_page_queue_zf);
6d2010ae
A
1710
1711 page_prev_state = PAGE_STATE_ZEROFILL;
2d21ac55
A
1712 zf_run_count++;
1713 break;
1714 }
1715 }
1716 /*
1717 * It's either a normal inactive page or nothing.
1718 */
1719 if ( !queue_empty(&vm_page_queue_inactive) ) {
1720 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
6d2010ae
A
1721
1722 page_prev_state = PAGE_STATE_INACTIVE;
2d21ac55
A
1723 zf_run_count = 0;
1724 break;
1725 }
1726
1727 panic("vm_pageout: no victim");
9bccf70c 1728 }
6d2010ae 1729 VM_PAGE_QUEUES_REMOVE(m);
2d21ac55 1730
91447636 1731 assert(!m->laundry);
6d2010ae
A
1732 assert(!m->private);
1733 assert(!m->fictitious);
91447636 1734 assert(m->object != kernel_object);
2d21ac55
A
1735 assert(m->phys_page != vm_page_guard_addr);
1736
6d2010ae
A
1737
1738 if (page_prev_state != PAGE_STATE_SPECULATIVE)
b0d623f7 1739 vm_pageout_stats[vm_pageout_stat_now].considered++;
b0d623f7 1740
2d21ac55 1741 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
1c79356b 1742
91447636 1743 /*
2d21ac55
A
1744 * check to see if we currently are working
1745 * with the same object... if so, we've
1746 * already got the lock
91447636
A
1747 */
1748 if (m->object != object) {
2d21ac55
A
1749 /*
1750 * the object associated with candidate page is
1751 * different from the one we were just working
1752 * with... dump the lock if we still own it
1753 */
91447636
A
1754 if (object != NULL) {
1755 vm_object_unlock(object);
1756 object = NULL;
2d21ac55 1757 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
91447636 1758 }
2d21ac55
A
1759 /*
1760 * Try to lock object; since we've alread got the
1761 * page queues lock, we can only 'try' for this one.
1762 * if the 'try' fails, we need to do a mutex_pause
1763 * to allow the owner of the object lock a chance to
1764 * run... otherwise, we're likely to trip over this
1765 * object in the same state as we work our way through
1766 * the queue... clumps of pages associated with the same
1767 * object are fairly typical on the inactive and active queues
1768 */
1769 if (!vm_object_lock_try_scan(m->object)) {
6d2010ae
A
1770 vm_page_t m_want = NULL;
1771
b0d623f7
A
1772 vm_pageout_inactive_nolock++;
1773
6d2010ae
A
1774 if (page_prev_state == PAGE_STATE_SPECULATIVE)
1775 page_prev_state = PAGE_STATE_INACTIVE_FIRST;
2d21ac55 1776
2d21ac55
A
1777 pmap_clear_reference(m->phys_page);
1778 m->reference = FALSE;
1779
6d2010ae
A
1780 /*
1781 * m->object must be stable since we hold the page queues lock...
1782 * we can update the scan_collisions field sans the object lock
1783 * since it is a separate field and this is the only spot that does
1784 * a read-modify-write operation and it is never executed concurrently...
1785 * we can asynchronously set this field to 0 when creating a UPL, so it
1786 * is possible for the value to be a bit non-determistic, but that's ok
1787 * since it's only used as a hint
1788 */
1789 m->object->scan_collisions++;
1790
2d21ac55 1791 if ( !queue_empty(&sq->age_q) )
6d2010ae 1792 m_want = (vm_page_t) queue_first(&sq->age_q);
2d21ac55
A
1793 else if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
1794 queue_empty(&vm_page_queue_inactive)) {
1795 if ( !queue_empty(&vm_page_queue_zf) )
6d2010ae 1796 m_want = (vm_page_t) queue_first(&vm_page_queue_zf);
2d21ac55 1797 } else if ( !queue_empty(&vm_page_queue_inactive) ) {
6d2010ae 1798 m_want = (vm_page_t) queue_first(&vm_page_queue_inactive);
2d21ac55
A
1799 }
1800 /*
1801 * this is the next object we're going to be interested in
1802 * try to make sure its available after the mutex_yield
1803 * returns control
1804 */
6d2010ae
A
1805 if (m_want)
1806 vm_pageout_scan_wants_object = m_want->object;
2d21ac55 1807
91447636
A
1808 /*
1809 * force us to dump any collected free pages
1810 * and to pause before moving on
1811 */
2d21ac55 1812 try_failed = TRUE;
55e303ae 1813
6d2010ae 1814 goto requeue_page;
1c79356b 1815 }
91447636 1816 object = m->object;
2d21ac55 1817 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
0b4e3aa0 1818
2d21ac55 1819 try_failed = FALSE;
1c79356b 1820 }
6d2010ae
A
1821 if (catch_up_count)
1822 catch_up_count--;
1c79356b 1823
6d2010ae
A
1824 if (m->busy) {
1825 if (m->encrypted_cleaning) {
1826 /*
1827 * ENCRYPTED SWAP:
1828 * if this page has already been picked up as
1829 * part of a page-out cluster, it will be busy
1830 * because it is being encrypted (see
1831 * vm_object_upl_request()). But we still
1832 * want to demote it from "clean-in-place"
1833 * (aka "adjacent") to "clean-and-free" (aka
1834 * "target"), so let's ignore its "busy" bit
1835 * here and proceed to check for "cleaning" a
1836 * little bit below...
1837 *
1838 * CAUTION CAUTION:
1839 * A "busy" page should still be left alone for
1840 * most purposes, so we have to be very careful
1841 * not to process that page too much.
1842 */
1843 assert(m->cleaning);
1844 goto consider_inactive_page;
2d21ac55 1845 }
2d21ac55 1846
1c79356b
A
1847 /*
1848 * Somebody is already playing with this page.
6d2010ae 1849 * Put it back on the appropriate queue
2d21ac55 1850 *
1c79356b 1851 */
1c79356b 1852 vm_pageout_inactive_busy++;
6d2010ae
A
1853requeue_page:
1854 switch (page_prev_state) {
1855
1856 case PAGE_STATE_SPECULATIVE:
1857 vm_page_speculate(m, FALSE);
1858 break;
91447636 1859
6d2010ae
A
1860 case PAGE_STATE_ZEROFILL:
1861 m->zero_fill = TRUE;
1862 /*
1863 * fall through to add in the
1864 * inactive state
1865 */
1866 case PAGE_STATE_INACTIVE:
1867 VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
1868 break;
1869
1870 case PAGE_STATE_INACTIVE_FIRST:
1871 VM_PAGE_ENQUEUE_INACTIVE(m, TRUE);
1872 break;
1873 }
91447636 1874 goto done_with_inactivepage;
1c79356b
A
1875 }
1876
6d2010ae 1877
1c79356b 1878 /*
6d2010ae
A
1879 * If it's absent, in error or the object is no longer alive,
1880 * we can reclaim the page... in the no longer alive case,
1881 * there are 2 states the page can be in that preclude us
1882 * from reclaiming it - busy or cleaning - that we've already
1883 * dealt with
1c79356b 1884 */
6d2010ae 1885 if (m->absent || m->error || !object->alive) {
1c79356b 1886
6d2010ae
A
1887 if (m->absent)
1888 vm_pageout_inactive_absent++;
1889 else if (!object->alive)
1890 vm_pageout_inactive_notalive++;
1891 else
1892 vm_pageout_inactive_error++;
91447636
A
1893reclaim_page:
1894 if (vm_pageout_deadlock_target) {
1895 vm_pageout_scan_inactive_throttle_success++;
1896 vm_pageout_deadlock_target--;
1897 }
2d21ac55
A
1898
1899 DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
1900
b0d623f7 1901 if (object->internal) {
2d21ac55
A
1902 DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
1903 } else {
1904 DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
1905 }
b0d623f7 1906 vm_page_free_prepare_queues(m);
2d21ac55 1907
b0d623f7
A
1908 /*
1909 * remove page from object here since we're already
1910 * behind the object lock... defer the rest of the work
1911 * we'd normally do in vm_page_free_prepare_object
1912 * until 'vm_page_free_list' is called
1913 */
1914 if (m->tabled)
1915 vm_page_remove(m, TRUE);
55e303ae 1916
91447636
A
1917 assert(m->pageq.next == NULL &&
1918 m->pageq.prev == NULL);
55e303ae
A
1919 m->pageq.next = (queue_entry_t)local_freeq;
1920 local_freeq = m;
91447636 1921 local_freed++;
55e303ae 1922
91447636
A
1923 inactive_burst_count = 0;
1924
6d2010ae 1925 if (page_prev_state != PAGE_STATE_SPECULATIVE)
b0d623f7 1926 vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
b0d623f7 1927
91447636 1928 goto done_with_inactivepage;
1c79356b 1929 }
b0d623f7
A
1930 /*
1931 * If the object is empty, the page must be reclaimed even
1932 * if dirty or used.
1933 * If the page belongs to a volatile object, we stick it back
1934 * on.
1935 */
1936 if (object->copy == VM_OBJECT_NULL) {
1937 if (object->purgable == VM_PURGABLE_EMPTY) {
1938 m->busy = TRUE;
1939 if (m->pmapped == TRUE) {
1940 /* unmap the page */
1941 refmod_state = pmap_disconnect(m->phys_page);
1942 if (refmod_state & VM_MEM_MODIFIED) {
1943 m->dirty = TRUE;
1944 }
1945 }
1946 if (m->dirty || m->precious) {
1947 /* we saved the cost of cleaning this page ! */
1948 vm_page_purged_count++;
1949 }
1950 goto reclaim_page;
1951 }
1952 if (object->purgable == VM_PURGABLE_VOLATILE) {
1953 /* if it's wired, we can't put it on our queue */
1954 assert(!VM_PAGE_WIRED(m));
6d2010ae 1955
b0d623f7 1956 /* just stick it back on! */
6d2010ae 1957 reactivated_this_call++;
b0d623f7
A
1958 goto reactivate_page;
1959 }
1960 }
1961
6d2010ae
A
1962 consider_inactive_page:
1963 if (m->busy) {
1964 /*
1965 * CAUTION CAUTION:
1966 * A "busy" page should always be left alone, except...
1967 */
1968 if (m->cleaning && m->encrypted_cleaning) {
1969 /*
1970 * ENCRYPTED_SWAP:
1971 * We could get here with a "busy" page
1972 * if it's being encrypted during a
1973 * "clean-in-place" operation. We'll deal
1974 * with it right away by testing if it has been
1975 * referenced and either reactivating it or
1976 * promoting it from "clean-in-place" to
1977 * "clean-and-free".
1978 */
1979 } else {
1980 panic("\"busy\" page considered for pageout\n");
1981 }
1982 }
1983
1c79356b
A
1984 /*
1985 * If it's being used, reactivate.
1986 * (Fictitious pages are either busy or absent.)
2d21ac55
A
1987 * First, update the reference and dirty bits
1988 * to make sure the page is unreferenced.
1c79356b 1989 */
2d21ac55
A
1990 refmod_state = -1;
1991
1992 if (m->reference == FALSE && m->pmapped == TRUE) {
91447636
A
1993 refmod_state = pmap_get_refmod(m->phys_page);
1994
1995 if (refmod_state & VM_MEM_REFERENCED)
1996 m->reference = TRUE;
1997 if (refmod_state & VM_MEM_MODIFIED)
1998 m->dirty = TRUE;
1999 }
b0d623f7 2000
6d2010ae
A
2001 /*
2002 * If already cleaning this page in place and it hasn't
2003 * been recently referenced, convert from
2004 * "adjacent" to "target". We can leave the page mapped,
2005 * and upl_commit_range will determine whether
2006 * to free or reactivate.
2007 *
2008 * note: if m->encrypted_cleaning == TRUE, then
2009 * m->cleaning == TRUE
2010 * and we'll handle it here
2011 */
2012 if (m->cleaning) {
2013
2014 if (m->reference == TRUE) {
2015 reactivated_this_call++;
2016 goto reactivate_page;
2017 }
2018 m->busy = TRUE;
2019 m->pageout = TRUE;
2020 m->dump_cleaning = TRUE;
2021 vm_page_wire(m);
2022
2023 CLUSTER_STAT(vm_pageout_cluster_conversions++);
2024
2025 inactive_burst_count = 0;
2026
2027 goto done_with_inactivepage;
2028 }
2029
b0d623f7
A
2030 if (m->reference || m->dirty) {
2031 /* deal with a rogue "reusable" page */
2032 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
2033 }
2034
2d21ac55
A
2035 if (m->reference && !m->no_cache) {
2036 /*
2037 * The page we pulled off the inactive list has
2038 * been referenced. It is possible for other
2039 * processors to be touching pages faster than we
2040 * can clear the referenced bit and traverse the
2041 * inactive queue, so we limit the number of
2042 * reactivations.
2043 */
2044 if (++reactivated_this_call >= reactivate_limit) {
2045 vm_pageout_reactivation_limit_exceeded++;
2046 } else if (catch_up_count) {
2047 vm_pageout_catch_ups++;
2048 } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
2049 vm_pageout_inactive_force_reclaim++;
2050 } else {
b0d623f7 2051 uint32_t isinuse;
2d21ac55 2052reactivate_page:
b0d623f7
A
2053 if ( !object->internal && object->pager != MEMORY_OBJECT_NULL &&
2054 vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
2055 /*
2056 * no explict mappings of this object exist
2057 * and it's not open via the filesystem
2058 */
2059 vm_page_deactivate(m);
2060 vm_pageout_inactive_deactivated++;
2061 } else {
2062 /*
2063 * The page was/is being used, so put back on active list.
2064 */
2065 vm_page_activate(m);
2066 VM_STAT_INCR(reactivations);
2067 }
2d21ac55
A
2068 vm_pageout_inactive_used++;
2069 inactive_burst_count = 0;
55e303ae 2070
2d21ac55
A
2071 goto done_with_inactivepage;
2072 }
2073 /*
2074 * Make sure we call pmap_get_refmod() if it
2075 * wasn't already called just above, to update
2076 * the dirty bit.
2077 */
2078 if ((refmod_state == -1) && !m->dirty && m->pmapped) {
2079 refmod_state = pmap_get_refmod(m->phys_page);
2080 if (refmod_state & VM_MEM_MODIFIED)
2081 m->dirty = TRUE;
2082 }
2083 forced_reclaim = TRUE;
2084 } else {
2085 forced_reclaim = FALSE;
1c79356b
A
2086 }
2087
91447636
A
2088 XPR(XPR_VM_PAGEOUT,
2089 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
b0d623f7 2090 object, m->offset, m, 0,0);
0b4e3aa0 2091
91447636
A
2092 /*
2093 * we've got a candidate page to steal...
2094 *
2095 * m->dirty is up to date courtesy of the
2096 * preceding check for m->reference... if
2097 * we get here, then m->reference had to be
2d21ac55
A
2098 * FALSE (or possibly "reactivate_limit" was
2099 * exceeded), but in either case we called
2100 * pmap_get_refmod() and updated both
2101 * m->reference and m->dirty
91447636
A
2102 *
2103 * if it's dirty or precious we need to
2104 * see if the target queue is throtttled
2105 * it if is, we need to skip over it by moving it back
2106 * to the end of the inactive queue
2107 */
b0d623f7 2108
91447636
A
2109 inactive_throttled = FALSE;
2110
2111 if (m->dirty || m->precious) {
2112 if (object->internal) {
2d21ac55 2113 if (VM_PAGE_Q_THROTTLED(iq))
91447636
A
2114 inactive_throttled = TRUE;
2115 } else if (VM_PAGE_Q_THROTTLED(eq)) {
2d21ac55 2116 inactive_throttled = TRUE;
1c79356b 2117 }
91447636 2118 }
2d21ac55 2119throttle_inactive:
6d2010ae
A
2120 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
2121 object->internal && m->dirty &&
2122 (object->purgable == VM_PURGABLE_DENY ||
2123 object->purgable == VM_PURGABLE_NONVOLATILE ||
2124 object->purgable == VM_PURGABLE_VOLATILE)) {
2125 queue_enter(&vm_page_queue_throttled, m,
2126 vm_page_t, pageq);
2127 m->throttled = TRUE;
2128 vm_page_throttled_count++;
2129
2130 vm_pageout_scan_reclaimed_throttled++;
2131
2132 goto done_with_inactivepage;
2133 }
2134 if (inactive_throttled == TRUE) {
2135
2136 if (object->internal)
2137 vm_pageout_scan_inactive_throttled_internal++;
2138 else
2139 vm_pageout_scan_inactive_throttled_external++;
2140
2141 if (page_prev_state == PAGE_STATE_SPECULATIVE)
2142 page_prev_state = PAGE_STATE_INACTIVE;
2143
2144 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && object->internal == FALSE) {
2145 /*
2146 * a) The external pageout queue is throttled
2147 * b) We're done with the active queue and moved on to the inactive queue
2148 * c) We start noticing dirty pages and usually we would put them at the end of the inactive queue, but,
2149 * d) We don't have a default pager, and so,
2150 * e) We push these onto the active queue in an effort to cause a re-evaluation of the active queue
2151 * and get back some, possibly clean, pages.
2152 *
2153 * We also keep a count of the pages of this kind, since, these will be a good indicator of us being in a deadlock
2154 * on systems without a dynamic pager, where:
2155 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2156 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2157 * c) Most of the pages in the inactive queue belong to this file.
2158 */
2159
2160 vm_page_activate(m);
2161 vm_pageout_inactive_external_forced_reactivate_count++;
2162 vm_pageout_inactive_external_forced_reactivate_limit--;
2163
2164 if (vm_pageout_inactive_external_forced_reactivate_limit <= 0){
2165 vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
2166#if CONFIG_EMBEDDED
2167 /*
2168 * Possible deadlock scenario so request jetsam action
2169 */
2170 assert(object);
2171 vm_object_unlock(object);
2172 object = VM_OBJECT_NULL;
2173 vm_page_unlock_queues();
2174
2175 if (jetsam_kill_top_proc(TRUE, kJetsamFlagsKilledVM) < 0){
2176 panic("vm_pageout_scan: Jetsam request failed\n");
2177 }
2178
2179 vm_page_lock_queues();
2180 delayed_unlock = 1;
2181#endif
2d21ac55 2182 }
6d2010ae
A
2183 inactive_burst_count = 0;
2184 goto done_with_inactivepage;
2185 } else {
2186 goto requeue_page;
1c79356b 2187 }
1c79356b 2188 }
2d21ac55 2189
1c79356b 2190 /*
91447636
A
2191 * we've got a page that we can steal...
2192 * eliminate all mappings and make sure
2193 * we have the up-to-date modified state
2194 * first take the page BUSY, so that no new
2195 * mappings can be made
1c79356b 2196 */
1c79356b 2197 m->busy = TRUE;
55e303ae 2198
91447636
A
2199 /*
2200 * if we need to do a pmap_disconnect then we
2201 * need to re-evaluate m->dirty since the pmap_disconnect
2202 * provides the true state atomically... the
2203 * page was still mapped up to the pmap_disconnect
2204 * and may have been dirtied at the last microsecond
2205 *
2206 * we also check for the page being referenced 'late'
2207 * if it was, we first need to do a WAKEUP_DONE on it
2208 * since we already set m->busy = TRUE, before
2209 * going off to reactivate it
2210 *
2d21ac55
A
2211 * Note that if 'pmapped' is FALSE then the page is not
2212 * and has not been in any map, so there is no point calling
2213 * pmap_disconnect(). m->dirty and/or m->reference could
2214 * have been set in anticipation of likely usage of the page.
91447636 2215 */
2d21ac55 2216 if (m->pmapped == TRUE) {
91447636 2217 refmod_state = pmap_disconnect(m->phys_page);
0b4e3aa0 2218
91447636
A
2219 if (refmod_state & VM_MEM_MODIFIED)
2220 m->dirty = TRUE;
2221 if (refmod_state & VM_MEM_REFERENCED) {
2d21ac55
A
2222
2223 /* If m->reference is already set, this page must have
2224 * already failed the reactivate_limit test, so don't
2225 * bump the counts twice.
2226 */
2227 if ( ! m->reference ) {
2228 m->reference = TRUE;
2229 if (forced_reclaim ||
2230 ++reactivated_this_call >= reactivate_limit)
2231 vm_pageout_reactivation_limit_exceeded++;
2232 else {
2233 PAGE_WAKEUP_DONE(m);
2234 goto reactivate_page;
2235 }
2236 }
91447636
A
2237 }
2238 }
2d21ac55
A
2239 /*
2240 * reset our count of pages that have been reclaimed
2241 * since the last page was 'stolen'
2242 */
2243 inactive_reclaim_run = 0;
2244
1c79356b
A
2245 /*
2246 * If it's clean and not precious, we can free the page.
2247 */
1c79356b 2248 if (!m->dirty && !m->precious) {
b0d623f7 2249
6d2010ae
A
2250 if (page_prev_state == PAGE_STATE_SPECULATIVE)
2251 vm_pageout_speculative_clean++;
2252 else {
2253 if (page_prev_state == PAGE_STATE_ZEROFILL)
2254 vm_pageout_inactive_zf++;
2255 vm_pageout_inactive_clean++;
2256 }
1c79356b
A
2257 goto reclaim_page;
2258 }
2d21ac55
A
2259
2260 /*
2261 * The page may have been dirtied since the last check
2262 * for a throttled target queue (which may have been skipped
2263 * if the page was clean then). With the dirty page
2264 * disconnected here, we can make one final check.
2265 */
6d2010ae
A
2266 if (object->internal) {
2267 if (VM_PAGE_Q_THROTTLED(iq))
2268 inactive_throttled = TRUE;
2269 } else if (VM_PAGE_Q_THROTTLED(eq)) {
2270 inactive_throttled = TRUE;
2271 }
2d21ac55 2272
6d2010ae
A
2273 if (inactive_throttled == TRUE) {
2274 /*
2275 * we set busy before issuing the pmap_disconnect,
2276 * so clear it and wakeup anyone that happened upon
2277 * it in that state
2278 */
2279 PAGE_WAKEUP_DONE(m);
2280 goto throttle_inactive;
2d21ac55
A
2281 }
2282
b0d623f7
A
2283 vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
2284
91447636 2285 vm_pageout_cluster(m);
1c79356b 2286
6d2010ae 2287 if (page_prev_state == PAGE_STATE_ZEROFILL)
b0d623f7 2288 vm_pageout_inactive_zf++;
6d2010ae
A
2289 if (object->internal)
2290 vm_pageout_inactive_dirty_internal++;
2291 else
2292 vm_pageout_inactive_dirty_external++;
2293
91447636 2294 inactive_burst_count = 0;
1c79356b 2295
91447636 2296done_with_inactivepage:
6d2010ae 2297 if (delayed_unlock++ > delayed_unlock_limit || try_failed == TRUE) {
1c79356b 2298
91447636 2299 if (object != NULL) {
b0d623f7 2300 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
91447636
A
2301 vm_object_unlock(object);
2302 object = NULL;
2303 }
2304 if (local_freeq) {
b0d623f7 2305 vm_page_unlock_queues();
6d2010ae
A
2306
2307 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
2308 vm_page_free_count, local_freed, delayed_unlock_limit, 4);
2309
b0d623f7 2310 vm_page_free_list(local_freeq, TRUE);
91447636 2311
6d2010ae
A
2312 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
2313 vm_page_free_count, local_freed, 0, 4);
2314
2d21ac55 2315 local_freeq = NULL;
91447636 2316 local_freed = 0;
b0d623f7
A
2317 vm_page_lock_queues();
2318 } else
2319 lck_mtx_yield(&vm_page_queue_lock);
2d21ac55
A
2320
2321 delayed_unlock = 1;
1c79356b 2322 }
91447636
A
2323 /*
2324 * back to top of pageout scan loop
2325 */
1c79356b 2326 }
1c79356b
A
2327}
2328
1c79356b 2329
1c79356b
A
2330int vm_page_free_count_init;
2331
2332void
2333vm_page_free_reserve(
2334 int pages)
2335{
2336 int free_after_reserve;
2337
2338 vm_page_free_reserved += pages;
2339
6d2010ae
A
2340 if (vm_page_free_reserved > VM_PAGE_FREE_RESERVED_LIMIT)
2341 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
2342
1c79356b
A
2343 free_after_reserve = vm_page_free_count_init - vm_page_free_reserved;
2344
2345 vm_page_free_min = vm_page_free_reserved +
2346 VM_PAGE_FREE_MIN(free_after_reserve);
2347
2d21ac55
A
2348 if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT)
2349 vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
2350
1c79356b
A
2351 vm_page_free_target = vm_page_free_reserved +
2352 VM_PAGE_FREE_TARGET(free_after_reserve);
2353
2d21ac55
A
2354 if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT)
2355 vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
2356
1c79356b
A
2357 if (vm_page_free_target < vm_page_free_min + 5)
2358 vm_page_free_target = vm_page_free_min + 5;
2d21ac55 2359
b0d623f7
A
2360 vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 3);
2361 vm_page_creation_throttle = vm_page_free_target / 2;
1c79356b
A
2362}
2363
2364/*
2365 * vm_pageout is the high level pageout daemon.
2366 */
2367
55e303ae
A
2368void
2369vm_pageout_continue(void)
2370{
2d21ac55 2371 DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
55e303ae
A
2372 vm_pageout_scan_event_counter++;
2373 vm_pageout_scan();
2374 /* we hold vm_page_queue_free_lock now */
2375 assert(vm_page_free_wanted == 0);
2d21ac55 2376 assert(vm_page_free_wanted_privileged == 0);
55e303ae 2377 assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
b0d623f7 2378 lck_mtx_unlock(&vm_page_queue_free_lock);
55e303ae
A
2379
2380 counter(c_vm_pageout_block++);
91447636 2381 thread_block((thread_continue_t)vm_pageout_continue);
55e303ae
A
2382 /*NOTREACHED*/
2383}
1c79356b 2384
91447636 2385
91447636 2386#ifdef FAKE_DEADLOCK
1c79356b 2387
91447636
A
2388#define FAKE_COUNT 5000
2389
2390int internal_count = 0;
2391int fake_deadlock = 0;
2392
2393#endif
2394
2395static void
2396vm_pageout_iothread_continue(struct vm_pageout_queue *q)
2397{
2398 vm_page_t m = NULL;
2399 vm_object_t object;
2d21ac55
A
2400 memory_object_t pager;
2401 thread_t self = current_thread();
91447636 2402
2d21ac55
A
2403 if ((vm_pageout_internal_iothread != THREAD_NULL)
2404 && (self == vm_pageout_external_iothread )
2405 && (self->options & TH_OPT_VMPRIV))
2406 self->options &= ~TH_OPT_VMPRIV;
2407
2408 vm_page_lockspin_queues();
91447636
A
2409
2410 while ( !queue_empty(&q->pgo_pending) ) {
2411
2412 q->pgo_busy = TRUE;
2413 queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
6d2010ae
A
2414 if (m->object == slide_info.slide_object) {
2415 panic("slid page %p not allowed on this path\n", m);
2416 }
b0d623f7 2417 VM_PAGE_CHECK(m);
91447636 2418 m->pageout_queue = FALSE;
91447636
A
2419 m->pageq.next = NULL;
2420 m->pageq.prev = NULL;
b0d623f7
A
2421 vm_page_unlock_queues();
2422
91447636
A
2423#ifdef FAKE_DEADLOCK
2424 if (q == &vm_pageout_queue_internal) {
2425 vm_offset_t addr;
2426 int pg_count;
2427
2428 internal_count++;
2429
2430 if ((internal_count == FAKE_COUNT)) {
2431
2432 pg_count = vm_page_free_count + vm_page_free_reserved;
2433
2434 if (kmem_alloc(kernel_map, &addr, PAGE_SIZE * pg_count) == KERN_SUCCESS) {
2435 kmem_free(kernel_map, addr, PAGE_SIZE * pg_count);
2436 }
2437 internal_count = 0;
2438 fake_deadlock++;
2439 }
2440 }
2441#endif
2442 object = m->object;
2443
2d21ac55
A
2444 vm_object_lock(object);
2445
91447636 2446 if (!object->pager_initialized) {
91447636
A
2447
2448 /*
2449 * If there is no memory object for the page, create
2450 * one and hand it to the default pager.
2451 */
2452
2453 if (!object->pager_initialized)
0c530ab8
A
2454 vm_object_collapse(object,
2455 (vm_object_offset_t) 0,
2456 TRUE);
91447636
A
2457 if (!object->pager_initialized)
2458 vm_object_pager_create(object);
2459 if (!object->pager_initialized) {
2460 /*
2461 * Still no pager for the object.
2462 * Reactivate the page.
2463 *
2464 * Should only happen if there is no
2465 * default pager.
2466 */
2d21ac55 2467 vm_page_lockspin_queues();
b0d623f7
A
2468
2469 vm_pageout_queue_steal(m, TRUE);
91447636 2470 vm_page_activate(m);
6d2010ae 2471 vm_pageout_dirty_no_pager++;
b0d623f7 2472
91447636
A
2473 vm_page_unlock_queues();
2474
2475 /*
2476 * And we are done with it.
2477 */
2478 PAGE_WAKEUP_DONE(m);
2479
2480 vm_object_paging_end(object);
2481 vm_object_unlock(object);
2482
2d21ac55 2483 vm_page_lockspin_queues();
91447636 2484 continue;
2d21ac55
A
2485 }
2486 }
2487 pager = object->pager;
2488 if (pager == MEMORY_OBJECT_NULL) {
2489 /*
2490 * This pager has been destroyed by either
2491 * memory_object_destroy or vm_object_destroy, and
2492 * so there is nowhere for the page to go.
2d21ac55 2493 */
0b4c1975
A
2494 if (m->pageout) {
2495 /*
2496 * Just free the page... VM_PAGE_FREE takes
2497 * care of cleaning up all the state...
2498 * including doing the vm_pageout_throttle_up
2499 */
2500 VM_PAGE_FREE(m);
2501 } else {
2502 vm_page_lockspin_queues();
91447636 2503
0b4c1975
A
2504 vm_pageout_queue_steal(m, TRUE);
2505 vm_page_activate(m);
2506
2507 vm_page_unlock_queues();
91447636 2508
0b4c1975
A
2509 /*
2510 * And we are done with it.
2511 */
2512 PAGE_WAKEUP_DONE(m);
2513 }
2d21ac55 2514 vm_object_paging_end(object);
91447636 2515 vm_object_unlock(object);
2d21ac55
A
2516
2517 vm_page_lockspin_queues();
2518 continue;
91447636 2519 }
b0d623f7 2520 VM_PAGE_CHECK(m);
2d21ac55 2521 vm_object_unlock(object);
91447636
A
2522 /*
2523 * we expect the paging_in_progress reference to have
2524 * already been taken on the object before it was added
2525 * to the appropriate pageout I/O queue... this will
2526 * keep the object from being terminated and/or the
2527 * paging_offset from changing until the I/O has
2528 * completed... therefore no need to lock the object to
2529 * pull the paging_offset from it.
2530 *
2531 * Send the data to the pager.
2532 * any pageout clustering happens there
2533 */
2d21ac55 2534 memory_object_data_return(pager,
91447636
A
2535 m->offset + object->paging_offset,
2536 PAGE_SIZE,
2537 NULL,
2538 NULL,
2539 FALSE,
2540 FALSE,
2541 0);
2542
2543 vm_object_lock(object);
2544 vm_object_paging_end(object);
2545 vm_object_unlock(object);
2546
2d21ac55 2547 vm_page_lockspin_queues();
91447636
A
2548 }
2549 assert_wait((event_t) q, THREAD_UNINT);
2550
91447636
A
2551 if (q->pgo_throttled == TRUE && !VM_PAGE_Q_THROTTLED(q)) {
2552 q->pgo_throttled = FALSE;
0b4c1975
A
2553 thread_wakeup((event_t) &q->pgo_laundry);
2554 }
2555 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
2556 q->pgo_draining = FALSE;
2557 thread_wakeup((event_t) (&q->pgo_laundry+1));
2558 }
91447636
A
2559 q->pgo_busy = FALSE;
2560 q->pgo_idle = TRUE;
2561 vm_page_unlock_queues();
2562
91447636
A
2563 thread_block_parameter((thread_continue_t)vm_pageout_iothread_continue, (void *) &q->pgo_pending);
2564 /*NOTREACHED*/
2565}
2566
2567
2568static void
2569vm_pageout_iothread_external(void)
2570{
2d21ac55
A
2571 thread_t self = current_thread();
2572
2573 self->options |= TH_OPT_VMPRIV;
91447636
A
2574
2575 vm_pageout_iothread_continue(&vm_pageout_queue_external);
2576 /*NOTREACHED*/
2577}
2578
2579
2580static void
2581vm_pageout_iothread_internal(void)
2582{
2583 thread_t self = current_thread();
2584
2585 self->options |= TH_OPT_VMPRIV;
2586
2587 vm_pageout_iothread_continue(&vm_pageout_queue_internal);
2588 /*NOTREACHED*/
2589}
2590
b0d623f7 2591kern_return_t
0b4c1975 2592vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
b0d623f7
A
2593{
2594 if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
2595 return KERN_SUCCESS;
2596 } else {
2597 return KERN_FAILURE; /* Already set */
2598 }
2599}
2600
91447636
A
2601static void
2602vm_pageout_garbage_collect(int collect)
2603{
2604 if (collect) {
b0d623f7 2605 boolean_t buf_large_zfree = FALSE;
91447636
A
2606 stack_collect();
2607
2608 /*
2609 * consider_zone_gc should be last, because the other operations
2610 * might return memory to zones.
2611 */
2612 consider_machine_collect();
b0d623f7 2613 if (consider_buffer_cache_collect != NULL) {
0b4c1975 2614 buf_large_zfree = (*consider_buffer_cache_collect)(0);
b0d623f7
A
2615 }
2616 consider_zone_gc(buf_large_zfree);
91447636
A
2617
2618 consider_machine_adjust();
6d2010ae
A
2619 consider_pressure_events();
2620
91447636
A
2621 }
2622
2623 assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
2624
2625 thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
2626 /*NOTREACHED*/
2627}
2628
2629
2630
2631void
2632vm_pageout(void)
2633{
2634 thread_t self = current_thread();
2635 thread_t thread;
2636 kern_return_t result;
2637 spl_t s;
2638
2639 /*
2640 * Set thread privileges.
2641 */
2642 s = splsched();
2643 thread_lock(self);
2644 self->priority = BASEPRI_PREEMPT - 1;
2645 set_sched_pri(self, self->priority);
2646 thread_unlock(self);
2d21ac55
A
2647
2648 if (!self->reserved_stack)
2649 self->reserved_stack = self->kernel_stack;
2650
91447636
A
2651 splx(s);
2652
2653 /*
2654 * Initialize some paging parameters.
2655 */
2656
2657 if (vm_pageout_idle_wait == 0)
2658 vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
2659
2660 if (vm_pageout_burst_wait == 0)
2661 vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
2662
2663 if (vm_pageout_empty_wait == 0)
2664 vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
2665
2666 if (vm_pageout_deadlock_wait == 0)
2667 vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
2668
2669 if (vm_pageout_deadlock_relief == 0)
2670 vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
2671
2672 if (vm_pageout_inactive_relief == 0)
2673 vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
2674
2675 if (vm_pageout_burst_active_throttle == 0)
2676 vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
2677
2678 if (vm_pageout_burst_inactive_throttle == 0)
2679 vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
2680
2681 /*
2682 * Set kernel task to low backing store privileged
55e303ae
A
2683 * status
2684 */
2685 task_lock(kernel_task);
2686 kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
2687 task_unlock(kernel_task);
2688
1c79356b 2689 vm_page_free_count_init = vm_page_free_count;
2d21ac55 2690
1c79356b
A
2691 /*
2692 * even if we've already called vm_page_free_reserve
2693 * call it again here to insure that the targets are
2694 * accurately calculated (it uses vm_page_free_count_init)
2695 * calling it with an arg of 0 will not change the reserve
2696 * but will re-calculate free_min and free_target
2697 */
91447636
A
2698 if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
2699 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
55e303ae 2700 } else
1c79356b
A
2701 vm_page_free_reserve(0);
2702
55e303ae 2703
91447636
A
2704 queue_init(&vm_pageout_queue_external.pgo_pending);
2705 vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
2706 vm_pageout_queue_external.pgo_laundry = 0;
2707 vm_pageout_queue_external.pgo_idle = FALSE;
2708 vm_pageout_queue_external.pgo_busy = FALSE;
2709 vm_pageout_queue_external.pgo_throttled = FALSE;
0b4c1975 2710 vm_pageout_queue_external.pgo_draining = FALSE;
55e303ae 2711
91447636 2712 queue_init(&vm_pageout_queue_internal.pgo_pending);
2d21ac55 2713 vm_pageout_queue_internal.pgo_maxlaundry = 0;
91447636
A
2714 vm_pageout_queue_internal.pgo_laundry = 0;
2715 vm_pageout_queue_internal.pgo_idle = FALSE;
2716 vm_pageout_queue_internal.pgo_busy = FALSE;
2717 vm_pageout_queue_internal.pgo_throttled = FALSE;
0b4c1975 2718 vm_pageout_queue_internal.pgo_draining = FALSE;
9bccf70c 2719
55e303ae 2720
2d21ac55
A
2721 /* internal pageout thread started when default pager registered first time */
2722 /* external pageout and garbage collection threads started here */
55e303ae 2723
2d21ac55
A
2724 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
2725 BASEPRI_PREEMPT - 1,
2726 &vm_pageout_external_iothread);
91447636
A
2727 if (result != KERN_SUCCESS)
2728 panic("vm_pageout_iothread_external: create failed");
55e303ae 2729
2d21ac55 2730 thread_deallocate(vm_pageout_external_iothread);
9bccf70c 2731
2d21ac55
A
2732 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
2733 MINPRI_KERNEL,
2734 &thread);
91447636
A
2735 if (result != KERN_SUCCESS)
2736 panic("vm_pageout_garbage_collect: create failed");
55e303ae 2737
91447636 2738 thread_deallocate(thread);
55e303ae 2739
8f6c56a5
A
2740 vm_object_reaper_init();
2741
2d21ac55 2742
91447636 2743 vm_pageout_continue();
2d21ac55
A
2744
2745 /*
2746 * Unreached code!
2747 *
2748 * The vm_pageout_continue() call above never returns, so the code below is never
2749 * executed. We take advantage of this to declare several DTrace VM related probe
2750 * points that our kernel doesn't have an analog for. These are probe points that
2751 * exist in Solaris and are in the DTrace documentation, so people may have written
2752 * scripts that use them. Declaring the probe points here means their scripts will
2753 * compile and execute which we want for portability of the scripts, but since this
2754 * section of code is never reached, the probe points will simply never fire. Yes,
2755 * this is basically a hack. The problem is the DTrace probe points were chosen with
2756 * Solaris specific VM events in mind, not portability to different VM implementations.
2757 */
2758
2759 DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
2760 DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
2761 DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
2762 DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
2763 DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
2764 DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
2765 DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
91447636 2766 /*NOTREACHED*/
9bccf70c
A
2767}
2768
2d21ac55
A
2769kern_return_t
2770vm_pageout_internal_start(void)
2771{
2772 kern_return_t result;
2773
2774 vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
2775 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, NULL, BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
2776 if (result == KERN_SUCCESS)
2777 thread_deallocate(vm_pageout_internal_iothread);
2778 return result;
2779}
2780
1c79356b 2781
b0d623f7
A
2782static upl_t
2783upl_create(int type, int flags, upl_size_t size)
0b4e3aa0
A
2784{
2785 upl_t upl;
2d21ac55
A
2786 int page_field_size = 0;
2787 int upl_flags = 0;
2788 int upl_size = sizeof(struct upl);
0b4e3aa0 2789
b0d623f7
A
2790 size = round_page_32(size);
2791
2d21ac55 2792 if (type & UPL_CREATE_LITE) {
b0d623f7 2793 page_field_size = (atop(size) + 7) >> 3;
55e303ae 2794 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
2d21ac55
A
2795
2796 upl_flags |= UPL_LITE;
55e303ae 2797 }
2d21ac55 2798 if (type & UPL_CREATE_INTERNAL) {
b0d623f7 2799 upl_size += (int) sizeof(struct upl_page_info) * atop(size);
2d21ac55
A
2800
2801 upl_flags |= UPL_INTERNAL;
0b4e3aa0 2802 }
2d21ac55
A
2803 upl = (upl_t)kalloc(upl_size + page_field_size);
2804
2805 if (page_field_size)
2806 bzero((char *)upl + upl_size, page_field_size);
2807
2808 upl->flags = upl_flags | flags;
0b4e3aa0
A
2809 upl->src_object = NULL;
2810 upl->kaddr = (vm_offset_t)0;
2811 upl->size = 0;
2812 upl->map_object = NULL;
2813 upl->ref_count = 1;
6d2010ae 2814 upl->ext_ref_count = 0;
0c530ab8 2815 upl->highest_page = 0;
0b4e3aa0 2816 upl_lock_init(upl);
b0d623f7
A
2817 upl->vector_upl = NULL;
2818#if UPL_DEBUG
0b4e3aa0
A
2819 upl->ubc_alias1 = 0;
2820 upl->ubc_alias2 = 0;
b0d623f7
A
2821
2822 upl->upl_creator = current_thread();
2823 upl->upl_state = 0;
2824 upl->upl_commit_index = 0;
2825 bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
2826
2827 (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
91447636 2828#endif /* UPL_DEBUG */
b0d623f7 2829
0b4e3aa0
A
2830 return(upl);
2831}
2832
2833static void
2d21ac55 2834upl_destroy(upl_t upl)
0b4e3aa0 2835{
55e303ae 2836 int page_field_size; /* bit field in word size buf */
2d21ac55 2837 int size;
0b4e3aa0 2838
6d2010ae
A
2839 if (upl->ext_ref_count) {
2840 panic("upl(%p) ext_ref_count", upl);
2841 }
2842
b0d623f7 2843#if UPL_DEBUG
0b4e3aa0 2844 {
55e303ae 2845 vm_object_t object;
2d21ac55
A
2846
2847 if (upl->flags & UPL_SHADOWED) {
55e303ae
A
2848 object = upl->map_object->shadow;
2849 } else {
2850 object = upl->map_object;
2851 }
2852 vm_object_lock(object);
2d21ac55 2853 queue_remove(&object->uplq, upl, upl_t, uplq);
55e303ae 2854 vm_object_unlock(object);
0b4e3aa0 2855 }
91447636 2856#endif /* UPL_DEBUG */
2d21ac55
A
2857 /*
2858 * drop a reference on the map_object whether or
2859 * not a pageout object is inserted
2860 */
2861 if (upl->flags & UPL_SHADOWED)
0b4e3aa0 2862 vm_object_deallocate(upl->map_object);
55e303ae 2863
2d21ac55
A
2864 if (upl->flags & UPL_DEVICE_MEMORY)
2865 size = PAGE_SIZE;
2866 else
2867 size = upl->size;
55e303ae 2868 page_field_size = 0;
2d21ac55 2869
55e303ae 2870 if (upl->flags & UPL_LITE) {
2d21ac55 2871 page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
55e303ae
A
2872 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
2873 }
b0d623f7
A
2874 upl_lock_destroy(upl);
2875 upl->vector_upl = (vector_upl_t) 0xfeedbeef;
2d21ac55 2876 if (upl->flags & UPL_INTERNAL) {
91447636
A
2877 kfree(upl,
2878 sizeof(struct upl) +
2d21ac55 2879 (sizeof(struct upl_page_info) * (size/PAGE_SIZE))
91447636 2880 + page_field_size);
0b4e3aa0 2881 } else {
91447636 2882 kfree(upl, sizeof(struct upl) + page_field_size);
0b4e3aa0
A
2883 }
2884}
2885
0b4e3aa0 2886void
2d21ac55 2887upl_deallocate(upl_t upl)
0b4e3aa0 2888{
b0d623f7
A
2889 if (--upl->ref_count == 0) {
2890 if(vector_upl_is_valid(upl))
2891 vector_upl_deallocate(upl);
0b4e3aa0 2892 upl_destroy(upl);
b0d623f7 2893 }
0b4e3aa0 2894}
1c79356b 2895
b0d623f7
A
2896#if DEVELOPMENT || DEBUG
2897/*/*
91447636
A
2898 * Statistics about UPL enforcement of copy-on-write obligations.
2899 */
2900unsigned long upl_cow = 0;
2901unsigned long upl_cow_again = 0;
91447636
A
2902unsigned long upl_cow_pages = 0;
2903unsigned long upl_cow_again_pages = 0;
b0d623f7
A
2904
2905unsigned long iopl_cow = 0;
2906unsigned long iopl_cow_pages = 0;
2907#endif
91447636 2908
1c79356b 2909/*
0b4e3aa0 2910 * Routine: vm_object_upl_request
1c79356b
A
2911 * Purpose:
2912 * Cause the population of a portion of a vm_object.
2913 * Depending on the nature of the request, the pages
2914 * returned may be contain valid data or be uninitialized.
2915 * A page list structure, listing the physical pages
2916 * will be returned upon request.
2917 * This function is called by the file system or any other
2918 * supplier of backing store to a pager.
2919 * IMPORTANT NOTE: The caller must still respect the relationship
2920 * between the vm_object and its backing memory object. The
2921 * caller MUST NOT substitute changes in the backing file
2922 * without first doing a memory_object_lock_request on the
2923 * target range unless it is know that the pages are not
2924 * shared with another entity at the pager level.
2925 * Copy_in_to:
2926 * if a page list structure is present
2927 * return the mapped physical pages, where a
2928 * page is not present, return a non-initialized
2929 * one. If the no_sync bit is turned on, don't
2930 * call the pager unlock to synchronize with other
2931 * possible copies of the page. Leave pages busy
2932 * in the original object, if a page list structure
2933 * was specified. When a commit of the page list
2934 * pages is done, the dirty bit will be set for each one.
2935 * Copy_out_from:
2936 * If a page list structure is present, return
2937 * all mapped pages. Where a page does not exist
2938 * map a zero filled one. Leave pages busy in
2939 * the original object. If a page list structure
2940 * is not specified, this call is a no-op.
2941 *
2942 * Note: access of default pager objects has a rather interesting
2943 * twist. The caller of this routine, presumably the file system
2944 * page cache handling code, will never actually make a request
2945 * against a default pager backed object. Only the default
2946 * pager will make requests on backing store related vm_objects
2947 * In this way the default pager can maintain the relationship
2948 * between backing store files (abstract memory objects) and
2949 * the vm_objects (cache objects), they support.
2950 *
2951 */
91447636 2952
0b4e3aa0
A
2953__private_extern__ kern_return_t
2954vm_object_upl_request(
1c79356b 2955 vm_object_t object,
91447636
A
2956 vm_object_offset_t offset,
2957 upl_size_t size,
1c79356b 2958 upl_t *upl_ptr,
0b4e3aa0
A
2959 upl_page_info_array_t user_page_list,
2960 unsigned int *page_list_count,
91447636 2961 int cntrl_flags)
1c79356b 2962{
91447636 2963 vm_page_t dst_page = VM_PAGE_NULL;
2d21ac55
A
2964 vm_object_offset_t dst_offset;
2965 upl_size_t xfer_size;
6d2010ae 2966 unsigned int size_in_pages;
1c79356b 2967 boolean_t dirty;
55e303ae 2968 boolean_t hw_dirty;
1c79356b 2969 upl_t upl = NULL;
91447636
A
2970 unsigned int entry;
2971#if MACH_CLUSTER_STATS
1c79356b 2972 boolean_t encountered_lrp = FALSE;
91447636 2973#endif
1c79356b 2974 vm_page_t alias_page = NULL;
2d21ac55 2975 int refmod_state = 0;
91447636
A
2976 wpl_array_t lite_list = NULL;
2977 vm_object_t last_copy_object;
6d2010ae
A
2978 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
2979 struct vm_page_delayed_work *dwp;
b0d623f7 2980 int dw_count;
6d2010ae 2981 int dw_limit;
91447636
A
2982
2983 if (cntrl_flags & ~UPL_VALID_FLAGS) {
2984 /*
2985 * For forward compatibility's sake,
2986 * reject any unknown flag.
2987 */
2988 return KERN_INVALID_VALUE;
2989 }
2d21ac55
A
2990 if ( (!object->internal) && (object->paging_offset != 0) )
2991 panic("vm_object_upl_request: external object with non-zero paging offset\n");
2992 if (object->phys_contiguous)
2993 panic("vm_object_upl_request: contiguous object specified\n");
0b4e3aa0 2994
0b4e3aa0 2995
cf7d32b8
A
2996 if ((size / PAGE_SIZE) > MAX_UPL_SIZE)
2997 size = MAX_UPL_SIZE * PAGE_SIZE;
1c79356b 2998
2d21ac55 2999 if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
cf7d32b8 3000 *page_list_count = MAX_UPL_SIZE;
1c79356b 3001
2d21ac55
A
3002 if (cntrl_flags & UPL_SET_INTERNAL) {
3003 if (cntrl_flags & UPL_SET_LITE) {
55e303ae 3004
2d21ac55 3005 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, 0, size);
91447636 3006
2d21ac55
A
3007 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
3008 lite_list = (wpl_array_t)
91447636 3009 (((uintptr_t)user_page_list) +
2d21ac55 3010 ((size/PAGE_SIZE) * sizeof(upl_page_info_t)));
b0d623f7
A
3011 if (size == 0) {
3012 user_page_list = NULL;
3013 lite_list = NULL;
3014 }
1c79356b 3015 } else {
2d21ac55 3016 upl = upl_create(UPL_CREATE_INTERNAL, 0, size);
55e303ae 3017
2d21ac55 3018 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
b0d623f7
A
3019 if (size == 0) {
3020 user_page_list = NULL;
3021 }
55e303ae 3022 }
2d21ac55
A
3023 } else {
3024 if (cntrl_flags & UPL_SET_LITE) {
91447636 3025
2d21ac55 3026 upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE, 0, size);
55e303ae 3027
2d21ac55 3028 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
b0d623f7
A
3029 if (size == 0) {
3030 lite_list = NULL;
3031 }
55e303ae 3032 } else {
2d21ac55 3033 upl = upl_create(UPL_CREATE_EXTERNAL, 0, size);
0b4e3aa0 3034 }
55e303ae 3035 }
2d21ac55
A
3036 *upl_ptr = upl;
3037
3038 if (user_page_list)
3039 user_page_list[0].device = FALSE;
91447636 3040
2d21ac55
A
3041 if (cntrl_flags & UPL_SET_LITE) {
3042 upl->map_object = object;
3043 } else {
3044 upl->map_object = vm_object_allocate(size);
3045 /*
3046 * No neeed to lock the new object: nobody else knows
3047 * about it yet, so it's all ours so far.
3048 */
3049 upl->map_object->shadow = object;
3050 upl->map_object->pageout = TRUE;
3051 upl->map_object->can_persist = FALSE;
3052 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
6d2010ae 3053 upl->map_object->vo_shadow_offset = offset;
2d21ac55
A
3054 upl->map_object->wimg_bits = object->wimg_bits;
3055
3056 VM_PAGE_GRAB_FICTITIOUS(alias_page);
3057
3058 upl->flags |= UPL_SHADOWED;
3059 }
3060 /*
91447636
A
3061 * ENCRYPTED SWAP:
3062 * Just mark the UPL as "encrypted" here.
3063 * We'll actually encrypt the pages later,
3064 * in upl_encrypt(), when the caller has
3065 * selected which pages need to go to swap.
3066 */
2d21ac55 3067 if (cntrl_flags & UPL_ENCRYPT)
91447636 3068 upl->flags |= UPL_ENCRYPTED;
2d21ac55
A
3069
3070 if (cntrl_flags & UPL_FOR_PAGEOUT)
91447636 3071 upl->flags |= UPL_PAGEOUT;
2d21ac55 3072
55e303ae 3073 vm_object_lock(object);
b0d623f7 3074 vm_object_activity_begin(object);
2d21ac55
A
3075
3076 /*
3077 * we can lock in the paging_offset once paging_in_progress is set
3078 */
3079 upl->size = size;
3080 upl->offset = offset + object->paging_offset;
55e303ae 3081
b0d623f7 3082#if UPL_DEBUG
2d21ac55 3083 queue_enter(&object->uplq, upl, upl_t, uplq);
91447636 3084#endif /* UPL_DEBUG */
91447636 3085
2d21ac55 3086 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
91447636 3087 /*
2d21ac55
A
3088 * Honor copy-on-write obligations
3089 *
91447636
A
3090 * The caller is gathering these pages and
3091 * might modify their contents. We need to
3092 * make sure that the copy object has its own
3093 * private copies of these pages before we let
3094 * the caller modify them.
3095 */
3096 vm_object_update(object,
3097 offset,
3098 size,
3099 NULL,
3100 NULL,
3101 FALSE, /* should_return */
3102 MEMORY_OBJECT_COPY_SYNC,
3103 VM_PROT_NO_CHANGE);
b0d623f7 3104#if DEVELOPMENT || DEBUG
91447636
A
3105 upl_cow++;
3106 upl_cow_pages += size >> PAGE_SHIFT;
b0d623f7 3107#endif
55e303ae 3108 }
2d21ac55
A
3109 /*
3110 * remember which copy object we synchronized with
3111 */
91447636 3112 last_copy_object = object->copy;
1c79356b 3113 entry = 0;
55e303ae 3114
2d21ac55
A
3115 xfer_size = size;
3116 dst_offset = offset;
6d2010ae 3117 size_in_pages = size / PAGE_SIZE;
2d21ac55 3118
b0d623f7
A
3119 dwp = &dw_array[0];
3120 dw_count = 0;
6d2010ae
A
3121 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
3122
3123 if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
3124 object->resident_page_count < (MAX_UPL_SIZE * 2))
3125 object->scan_collisions = 0;
b0d623f7 3126
2d21ac55
A
3127 while (xfer_size) {
3128
b0d623f7
A
3129 dwp->dw_mask = 0;
3130
2d21ac55 3131 if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
2d21ac55
A
3132 vm_object_unlock(object);
3133 VM_PAGE_GRAB_FICTITIOUS(alias_page);
b0d623f7 3134 vm_object_lock(object);
4a3eedf9 3135 }
2d21ac55
A
3136 if (cntrl_flags & UPL_COPYOUT_FROM) {
3137 upl->flags |= UPL_PAGE_SYNC_DONE;
3138
91447636 3139 if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
1c79356b
A
3140 dst_page->fictitious ||
3141 dst_page->absent ||
3142 dst_page->error ||
b0d623f7 3143 (VM_PAGE_WIRED(dst_page) && !dst_page->pageout && !dst_page->list_req_pending)) {
91447636
A
3144
3145 if (user_page_list)
1c79356b 3146 user_page_list[entry].phys_addr = 0;
2d21ac55 3147
b0d623f7 3148 goto try_next_page;
2d21ac55
A
3149 }
3150 /*
3151 * grab this up front...
3152 * a high percentange of the time we're going to
3153 * need the hardware modification state a bit later
3154 * anyway... so we can eliminate an extra call into
3155 * the pmap layer by grabbing it here and recording it
3156 */
3157 if (dst_page->pmapped)
3158 refmod_state = pmap_get_refmod(dst_page->phys_page);
3159 else
3160 refmod_state = 0;
3161
3162 if ( (refmod_state & VM_MEM_REFERENCED) && dst_page->inactive ) {
91447636 3163 /*
2d21ac55
A
3164 * page is on inactive list and referenced...
3165 * reactivate it now... this gets it out of the
3166 * way of vm_pageout_scan which would have to
3167 * reactivate it upon tripping over it
91447636 3168 */
b0d623f7 3169 dwp->dw_mask |= DW_vm_page_activate;
2d21ac55
A
3170 }
3171 if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
3172 /*
3173 * we're only asking for DIRTY pages to be returned
3174 */
3175 if (dst_page->list_req_pending || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
91447636 3176 /*
2d21ac55
A
3177 * if we were the page stolen by vm_pageout_scan to be
3178 * cleaned (as opposed to a buddy being clustered in
3179 * or this request is not being driven by a PAGEOUT cluster
3180 * then we only need to check for the page being dirty or
3181 * precious to decide whether to return it
91447636 3182 */
2d21ac55 3183 if (dst_page->dirty || dst_page->precious || (refmod_state & VM_MEM_MODIFIED))
91447636 3184 goto check_busy;
2d21ac55 3185 goto dont_return;
1c79356b 3186 }
2d21ac55
A
3187 /*
3188 * this is a request for a PAGEOUT cluster and this page
3189 * is merely along for the ride as a 'buddy'... not only
3190 * does it have to be dirty to be returned, but it also
3191 * can't have been referenced recently... note that we've
3192 * already filtered above based on whether this page is
3193 * currently on the inactive queue or it meets the page
3194 * ticket (generation count) check
3195 */
6d2010ae 3196 if ( (cntrl_flags & UPL_CLEAN_IN_PLACE || !(refmod_state & VM_MEM_REFERENCED) || dst_page->throttled) &&
2d21ac55
A
3197 ((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
3198 goto check_busy;
1c79356b 3199 }
2d21ac55
A
3200dont_return:
3201 /*
3202 * if we reach here, we're not to return
3203 * the page... go on to the next one
3204 */
3205 if (user_page_list)
3206 user_page_list[entry].phys_addr = 0;
55e303ae 3207
b0d623f7 3208 goto try_next_page;
2d21ac55
A
3209 }
3210check_busy:
0b4c1975 3211 if (dst_page->busy && (!(dst_page->list_req_pending && (dst_page->pageout || dst_page->cleaning)))) {
2d21ac55
A
3212 if (cntrl_flags & UPL_NOBLOCK) {
3213 if (user_page_list)
3214 user_page_list[entry].phys_addr = 0;
55e303ae 3215
b0d623f7 3216 goto try_next_page;
1c79356b 3217 }
2d21ac55
A
3218 /*
3219 * someone else is playing with the
3220 * page. We will have to wait.
3221 */
2d21ac55 3222 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
1c79356b 3223
2d21ac55
A
3224 continue;
3225 }
3226 /*
3227 * Someone else already cleaning the page?
3228 */
b0d623f7 3229 if ((dst_page->cleaning || dst_page->absent || VM_PAGE_WIRED(dst_page)) && !dst_page->list_req_pending) {
2d21ac55
A
3230 if (user_page_list)
3231 user_page_list[entry].phys_addr = 0;
91447636 3232
b0d623f7 3233 goto try_next_page;
2d21ac55
A
3234 }
3235 /*
3236 * ENCRYPTED SWAP:
3237 * The caller is gathering this page and might
3238 * access its contents later on. Decrypt the
3239 * page before adding it to the UPL, so that
3240 * the caller never sees encrypted data.
3241 */
3242 if (! (cntrl_flags & UPL_ENCRYPT) && dst_page->encrypted) {
3243 int was_busy;
91447636
A
3244
3245 /*
2d21ac55
A
3246 * save the current state of busy
3247 * mark page as busy while decrypt
3248 * is in progress since it will drop
3249 * the object lock...
91447636 3250 */
2d21ac55
A
3251 was_busy = dst_page->busy;
3252 dst_page->busy = TRUE;
91447636 3253
2d21ac55
A
3254 vm_page_decrypt(dst_page, 0);
3255 vm_page_decrypt_for_upl_counter++;
3256 /*
3257 * restore to original busy state
3258 */
3259 dst_page->busy = was_busy;
b0d623f7
A
3260 }
3261 if (dst_page->pageout_queue == TRUE) {
91447636 3262
b0d623f7
A
3263 vm_page_lockspin_queues();
3264
6d2010ae 3265 if (dst_page->pageout_queue == TRUE) {
b0d623f7
A
3266 /*
3267 * we've buddied up a page for a clustered pageout
3268 * that has already been moved to the pageout
3269 * queue by pageout_scan... we need to remove
3270 * it from the queue and drop the laundry count
3271 * on that queue
3272 */
3273 vm_pageout_throttle_up(dst_page);
3274 }
3275 vm_page_unlock_queues();
91447636 3276 }
2d21ac55
A
3277#if MACH_CLUSTER_STATS
3278 /*
3279 * pageout statistics gathering. count
3280 * all the pages we will page out that
3281 * were not counted in the initial
3282 * vm_pageout_scan work
3283 */
3284 if (dst_page->list_req_pending)
3285 encountered_lrp = TRUE;
3286 if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious)) && !dst_page->list_req_pending) {
3287 if (encountered_lrp)
3288 CLUSTER_STAT(pages_at_higher_offsets++;)
3289 else
3290 CLUSTER_STAT(pages_at_lower_offsets++;)
3291 }
3292#endif
3293 /*
3294 * Turn off busy indication on pending
3295 * pageout. Note: we can only get here
3296 * in the request pending case.
3297 */
3298 dst_page->list_req_pending = FALSE;
3299 dst_page->busy = FALSE;
3300
3301 hw_dirty = refmod_state & VM_MEM_MODIFIED;
3302 dirty = hw_dirty ? TRUE : dst_page->dirty;
3303
3304 if (dst_page->phys_page > upl->highest_page)
3305 upl->highest_page = dst_page->phys_page;
3306
3307 if (cntrl_flags & UPL_SET_LITE) {
b0d623f7 3308 unsigned int pg_num;
2d21ac55 3309
b0d623f7
A
3310 pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
3311 assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
2d21ac55
A
3312 lite_list[pg_num>>5] |= 1 << (pg_num & 31);
3313
3314 if (hw_dirty)
3315 pmap_clear_modify(dst_page->phys_page);
3316
3317 /*
3318 * Mark original page as cleaning
3319 * in place.
3320 */
3321 dst_page->cleaning = TRUE;
3322 dst_page->precious = FALSE;
3323 } else {
3324 /*
3325 * use pageclean setup, it is more
3326 * convenient even for the pageout
3327 * cases here
3328 */
3329 vm_object_lock(upl->map_object);
3330 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
3331 vm_object_unlock(upl->map_object);
3332
3333 alias_page->absent = FALSE;
3334 alias_page = NULL;
1c79356b 3335 }
2d21ac55
A
3336#if MACH_PAGEMAP
3337 /*
3338 * Record that this page has been
3339 * written out
3340 */
3341 vm_external_state_set(object->existence_map, dst_page->offset);
3342#endif /*MACH_PAGEMAP*/
3343 dst_page->dirty = dirty;
55e303ae 3344
2d21ac55
A
3345 if (!dirty)
3346 dst_page->precious = TRUE;
91447636 3347
2d21ac55
A
3348 if (dst_page->pageout)
3349 dst_page->busy = TRUE;
3350
3351 if ( (cntrl_flags & UPL_ENCRYPT) ) {
3352 /*
3353 * ENCRYPTED SWAP:
3354 * We want to deny access to the target page
3355 * because its contents are about to be
3356 * encrypted and the user would be very
3357 * confused to see encrypted data instead
3358 * of their data.
3359 * We also set "encrypted_cleaning" to allow
3360 * vm_pageout_scan() to demote that page
3361 * from "adjacent/clean-in-place" to
3362 * "target/clean-and-free" if it bumps into
3363 * this page during its scanning while we're
3364 * still processing this cluster.
3365 */
3366 dst_page->busy = TRUE;
3367 dst_page->encrypted_cleaning = TRUE;
3368 }
3369 if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
3370 /*
3371 * deny access to the target page
3372 * while it is being worked on
3373 */
b0d623f7 3374 if ((!dst_page->pageout) && ( !VM_PAGE_WIRED(dst_page))) {
2d21ac55
A
3375 dst_page->busy = TRUE;
3376 dst_page->pageout = TRUE;
b0d623f7
A
3377
3378 dwp->dw_mask |= DW_vm_page_wire;
2d21ac55
A
3379 }
3380 }
3381 } else {
3382 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
91447636 3383 /*
2d21ac55
A
3384 * Honor copy-on-write obligations
3385 *
91447636
A
3386 * The copy object has changed since we
3387 * last synchronized for copy-on-write.
3388 * Another copy object might have been
3389 * inserted while we released the object's
3390 * lock. Since someone could have seen the
3391 * original contents of the remaining pages
3392 * through that new object, we have to
3393 * synchronize with it again for the remaining
3394 * pages only. The previous pages are "busy"
3395 * so they can not be seen through the new
3396 * mapping. The new mapping will see our
3397 * upcoming changes for those previous pages,
3398 * but that's OK since they couldn't see what
3399 * was there before. It's just a race anyway
3400 * and there's no guarantee of consistency or
3401 * atomicity. We just don't want new mappings
3402 * to see both the *before* and *after* pages.
3403 */
3404 if (object->copy != VM_OBJECT_NULL) {
3405 vm_object_update(
3406 object,
3407 dst_offset,/* current offset */
3408 xfer_size, /* remaining size */
3409 NULL,
3410 NULL,
3411 FALSE, /* should_return */
3412 MEMORY_OBJECT_COPY_SYNC,
3413 VM_PROT_NO_CHANGE);
2d21ac55 3414
b0d623f7 3415#if DEVELOPMENT || DEBUG
91447636 3416 upl_cow_again++;
2d21ac55 3417 upl_cow_again_pages += xfer_size >> PAGE_SHIFT;
b0d623f7 3418#endif
91447636 3419 }
2d21ac55
A
3420 /*
3421 * remember the copy object we synced with
3422 */
91447636
A
3423 last_copy_object = object->copy;
3424 }
91447636
A
3425 dst_page = vm_page_lookup(object, dst_offset);
3426
2d21ac55 3427 if (dst_page != VM_PAGE_NULL) {
b0d623f7
A
3428
3429 if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
3430
3431 if ( !(dst_page->absent && dst_page->list_req_pending) ) {
3432 /*
2d21ac55
A
3433 * skip over pages already present in the cache
3434 */
b0d623f7
A
3435 if (user_page_list)
3436 user_page_list[entry].phys_addr = 0;
2d21ac55 3437
b0d623f7 3438 goto try_next_page;
55e303ae 3439 }
b0d623f7
A
3440 }
3441 if ( !(dst_page->list_req_pending) ) {
3442
2d21ac55
A
3443 if (dst_page->cleaning) {
3444 /*
3445 * someone else is writing to the page... wait...
3446 */
2d21ac55
A
3447 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
3448
3449 continue;
3450 }
3451 } else {
3452 if (dst_page->fictitious &&
3453 dst_page->phys_page == vm_page_fictitious_addr) {
3454 assert( !dst_page->speculative);
3455 /*
3456 * dump the fictitious page
3457 */
3458 dst_page->list_req_pending = FALSE;
55e303ae 3459
b0d623f7 3460 VM_PAGE_FREE(dst_page);
2d21ac55
A
3461
3462 dst_page = NULL;
b0d623f7 3463
2d21ac55
A
3464 } else if (dst_page->absent) {
3465 /*
3466 * the default_pager case
3467 */
3468 dst_page->list_req_pending = FALSE;
6d2010ae 3469 PAGE_WAKEUP_DONE(dst_page);
b0d623f7 3470
0b4c1975 3471 } else if (dst_page->pageout || dst_page->cleaning) {
b0d623f7
A
3472 /*
3473 * page was earmarked by vm_pageout_scan
3474 * to be cleaned and stolen... we're going
3475 * to take it back since we are not attempting
3476 * to read that page and we don't want to stall
3477 * waiting for it to be cleaned for 2 reasons...
3478 * 1 - no use paging it out and back in
3479 * 2 - if we stall, we may casue a deadlock in
3480 * the FS trying to acquire the its locks
3481 * on the VNOP_PAGEOUT path presuming that
3482 * those locks are already held on the read
3483 * path before trying to create this UPL
3484 *
3485 * so undo all of the state that vm_pageout_scan
3486 * hung on this page
3487 */
b0d623f7 3488 vm_pageout_queue_steal(dst_page, FALSE);
6d2010ae 3489 PAGE_WAKEUP_DONE(dst_page);
2d21ac55 3490 }
0b4e3aa0 3491 }
1c79356b 3492 }
2d21ac55
A
3493 if (dst_page == VM_PAGE_NULL) {
3494 if (object->private) {
0b4e3aa0
A
3495 /*
3496 * This is a nasty wrinkle for users
3497 * of upl who encounter device or
3498 * private memory however, it is
3499 * unavoidable, only a fault can
2d21ac55 3500 * resolve the actual backing
0b4e3aa0
A
3501 * physical page by asking the
3502 * backing device.
3503 */
2d21ac55 3504 if (user_page_list)
55e303ae 3505 user_page_list[entry].phys_addr = 0;
2d21ac55 3506
b0d623f7 3507 goto try_next_page;
0b4e3aa0 3508 }
6d2010ae
A
3509 if (object->scan_collisions) {
3510 /*
3511 * the pageout_scan thread is trying to steal
3512 * pages from this object, but has run into our
3513 * lock... grab 2 pages from the head of the object...
3514 * the first is freed on behalf of pageout_scan, the
3515 * 2nd is for our own use... we use vm_object_page_grab
3516 * in both cases to avoid taking pages from the free
3517 * list since we are under memory pressure and our
3518 * lock on this object is getting in the way of
3519 * relieving it
3520 */
3521 dst_page = vm_object_page_grab(object);
3522
3523 if (dst_page != VM_PAGE_NULL)
3524 vm_page_release(dst_page);
2d21ac55 3525
6d2010ae
A
3526 dst_page = vm_object_page_grab(object);
3527 }
3528 if (dst_page == VM_PAGE_NULL) {
3529 /*
3530 * need to allocate a page
3531 */
3532 dst_page = vm_page_grab();
3533 }
1c79356b 3534 if (dst_page == VM_PAGE_NULL) {
2d21ac55
A
3535 if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
3536 /*
3537 * we don't want to stall waiting for pages to come onto the free list
3538 * while we're already holding absent pages in this UPL
3539 * the caller will deal with the empty slots
3540 */
3541 if (user_page_list)
3542 user_page_list[entry].phys_addr = 0;
3543
3544 goto try_next_page;
3545 }
3546 /*
3547 * no pages available... wait
3548 * then try again for the same
3549 * offset...
3550 */
0b4e3aa0 3551 vm_object_unlock(object);
6d2010ae
A
3552
3553 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
3554
3555 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
3556
0b4e3aa0 3557 VM_PAGE_WAIT();
6d2010ae
A
3558 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
3559
3560 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
3561
b0d623f7 3562 vm_object_lock(object);
2d21ac55 3563
0b4e3aa0 3564 continue;
1c79356b 3565 }
b0d623f7 3566 vm_page_insert(dst_page, object, dst_offset);
4a3eedf9 3567
2d21ac55 3568 dst_page->absent = TRUE;
4a3eedf9 3569 dst_page->busy = FALSE;
2d21ac55
A
3570
3571 if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
91447636
A
3572 /*
3573 * if UPL_RET_ONLY_ABSENT was specified,
3574 * than we're definitely setting up a
3575 * upl for a clustered read/pagein
3576 * operation... mark the pages as clustered
2d21ac55
A
3577 * so upl_commit_range can put them on the
3578 * speculative list
91447636
A
3579 */
3580 dst_page->clustered = TRUE;
3581 }
1c79356b 3582 }
b0d623f7
A
3583 if (dst_page->fictitious) {
3584 panic("need corner case for fictitious page");
3585 }
3586 if (dst_page->busy) {
3587 /*
3588 * someone else is playing with the
3589 * page. We will have to wait.
3590 */
3591 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
3592
3593 continue;
3594 }
91447636
A
3595 /*
3596 * ENCRYPTED SWAP:
3597 */
3598 if (cntrl_flags & UPL_ENCRYPT) {
3599 /*
3600 * The page is going to be encrypted when we
3601 * get it from the pager, so mark it so.
3602 */
3603 dst_page->encrypted = TRUE;
3604 } else {
3605 /*
3606 * Otherwise, the page will not contain
3607 * encrypted data.
3608 */
3609 dst_page->encrypted = FALSE;
3610 }
1c79356b 3611 dst_page->overwriting = TRUE;
2d21ac55 3612
2d21ac55
A
3613 if (dst_page->pmapped) {
3614 if ( !(cntrl_flags & UPL_FILE_IO))
3615 /*
3616 * eliminate all mappings from the
3617 * original object and its prodigy
55e303ae 3618 */
2d21ac55
A
3619 refmod_state = pmap_disconnect(dst_page->phys_page);
3620 else
3621 refmod_state = pmap_get_refmod(dst_page->phys_page);
3622 } else
3623 refmod_state = 0;
55e303ae 3624
2d21ac55
A
3625 hw_dirty = refmod_state & VM_MEM_MODIFIED;
3626 dirty = hw_dirty ? TRUE : dst_page->dirty;
1c79356b 3627
2d21ac55 3628 if (cntrl_flags & UPL_SET_LITE) {
b0d623f7 3629 unsigned int pg_num;
1c79356b 3630
b0d623f7
A
3631 pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
3632 assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
2d21ac55 3633 lite_list[pg_num>>5] |= 1 << (pg_num & 31);
91447636 3634
2d21ac55
A
3635 if (hw_dirty)
3636 pmap_clear_modify(dst_page->phys_page);
0b4e3aa0 3637
2d21ac55
A
3638 /*
3639 * Mark original page as cleaning
3640 * in place.
3641 */
3642 dst_page->cleaning = TRUE;
3643 dst_page->precious = FALSE;
3644 } else {
3645 /*
3646 * use pageclean setup, it is more
3647 * convenient even for the pageout
3648 * cases here
3649 */
3650 vm_object_lock(upl->map_object);
3651 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
3652 vm_object_unlock(upl->map_object);
0b4e3aa0 3653
2d21ac55
A
3654 alias_page->absent = FALSE;
3655 alias_page = NULL;
3656 }
1c79356b 3657
6d2010ae
A
3658 if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
3659 upl->flags &= ~UPL_CLEAR_DIRTY;
3660 upl->flags |= UPL_SET_DIRTY;
3661 dirty = TRUE;
3662 upl->flags |= UPL_SET_DIRTY;
3663 } else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
2d21ac55
A
3664 /*
3665 * clean in place for read implies
3666 * that a write will be done on all
3667 * the pages that are dirty before
3668 * a upl commit is done. The caller
3669 * is obligated to preserve the
3670 * contents of all pages marked dirty
3671 */
3672 upl->flags |= UPL_CLEAR_DIRTY;
3673 }
3674 dst_page->dirty = dirty;
91447636 3675
2d21ac55
A
3676 if (!dirty)
3677 dst_page->precious = TRUE;
3678
b0d623f7 3679 if ( !VM_PAGE_WIRED(dst_page)) {
2d21ac55
A
3680 /*
3681 * deny access to the target page while
3682 * it is being worked on
3683 */
3684 dst_page->busy = TRUE;
3685 } else
b0d623f7 3686 dwp->dw_mask |= DW_vm_page_wire;
2d21ac55 3687
b0d623f7
A
3688 /*
3689 * We might be about to satisfy a fault which has been
3690 * requested. So no need for the "restart" bit.
3691 */
3692 dst_page->restart = FALSE;
3693 if (!dst_page->absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
2d21ac55
A
3694 /*
3695 * expect the page to be used
3696 */
b0d623f7 3697 dwp->dw_mask |= DW_set_reference;
2d21ac55 3698 }
6d2010ae
A
3699 if (cntrl_flags & UPL_PRECIOUS) {
3700 if (dst_page->object->internal) {
3701 dst_page->dirty = TRUE;
3702 dst_page->precious = FALSE;
3703 } else {
3704 dst_page->precious = TRUE;
3705 }
3706 } else {
3707 dst_page->precious = FALSE;
3708 }
2d21ac55 3709 }
d41d1dae
A
3710 if (dst_page->busy)
3711 upl->flags |= UPL_HAS_BUSY;
3712
2d21ac55
A
3713 if (dst_page->phys_page > upl->highest_page)
3714 upl->highest_page = dst_page->phys_page;
3715 if (user_page_list) {
3716 user_page_list[entry].phys_addr = dst_page->phys_page;
2d21ac55
A
3717 user_page_list[entry].pageout = dst_page->pageout;
3718 user_page_list[entry].absent = dst_page->absent;
593a1d5f 3719 user_page_list[entry].dirty = dst_page->dirty;
2d21ac55 3720 user_page_list[entry].precious = dst_page->precious;
593a1d5f 3721 user_page_list[entry].device = FALSE;
2d21ac55
A
3722 if (dst_page->clustered == TRUE)
3723 user_page_list[entry].speculative = dst_page->speculative;
3724 else
3725 user_page_list[entry].speculative = FALSE;
593a1d5f
A
3726 user_page_list[entry].cs_validated = dst_page->cs_validated;
3727 user_page_list[entry].cs_tainted = dst_page->cs_tainted;
2d21ac55
A
3728 }
3729 /*
3730 * if UPL_RET_ONLY_ABSENT is set, then
3731 * we are working with a fresh page and we've
3732 * just set the clustered flag on it to
3733 * indicate that it was drug in as part of a
3734 * speculative cluster... so leave it alone
3735 */
3736 if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
3737 /*
3738 * someone is explicitly grabbing this page...
3739 * update clustered and speculative state
3740 *
3741 */
3742 VM_PAGE_CONSUME_CLUSTERED(dst_page);
3743 }
b0d623f7
A
3744try_next_page:
3745 if (dwp->dw_mask) {
3746 if (dwp->dw_mask & DW_vm_page_activate)
3747 VM_STAT_INCR(reactivations);
4a3eedf9 3748
6d2010ae 3749 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
b0d623f7 3750
6d2010ae
A
3751 if (dw_count >= dw_limit) {
3752 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
b0d623f7
A
3753
3754 dwp = &dw_array[0];
3755 dw_count = 0;
4a3eedf9 3756 }
2d21ac55 3757 }
2d21ac55
A
3758 entry++;
3759 dst_offset += PAGE_SIZE_64;
3760 xfer_size -= PAGE_SIZE;
3761 }
b0d623f7 3762 if (dw_count)
6d2010ae 3763 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
b0d623f7 3764
2d21ac55 3765 if (alias_page != NULL) {
b0d623f7 3766 VM_PAGE_FREE(alias_page);
1c79356b 3767 }
91447636 3768
2d21ac55
A
3769 if (page_list_count != NULL) {
3770 if (upl->flags & UPL_INTERNAL)
3771 *page_list_count = 0;
3772 else if (*page_list_count > entry)
3773 *page_list_count = entry;
3774 }
b0d623f7
A
3775#if UPL_DEBUG
3776 upl->upl_state = 1;
3777#endif
1c79356b 3778 vm_object_unlock(object);
2d21ac55 3779
1c79356b
A
3780 return KERN_SUCCESS;
3781}
3782
0b4e3aa0 3783/* JMM - Backward compatability for now */
1c79356b 3784kern_return_t
91447636
A
3785vm_fault_list_request( /* forward */
3786 memory_object_control_t control,
3787 vm_object_offset_t offset,
3788 upl_size_t size,
3789 upl_t *upl_ptr,
3790 upl_page_info_t **user_page_list_ptr,
2d21ac55 3791 unsigned int page_list_count,
91447636
A
3792 int cntrl_flags);
3793kern_return_t
0b4e3aa0
A
3794vm_fault_list_request(
3795 memory_object_control_t control,
1c79356b 3796 vm_object_offset_t offset,
91447636 3797 upl_size_t size,
0b4e3aa0 3798 upl_t *upl_ptr,
1c79356b 3799 upl_page_info_t **user_page_list_ptr,
2d21ac55 3800 unsigned int page_list_count,
1c79356b
A
3801 int cntrl_flags)
3802{
0c530ab8 3803 unsigned int local_list_count;
0b4e3aa0
A
3804 upl_page_info_t *user_page_list;
3805 kern_return_t kr;
3806
b0d623f7
A
3807 if((cntrl_flags & UPL_VECTOR)==UPL_VECTOR)
3808 return KERN_INVALID_ARGUMENT;
3809
0b4e3aa0
A
3810 if (user_page_list_ptr != NULL) {
3811 local_list_count = page_list_count;
3812 user_page_list = *user_page_list_ptr;
3813 } else {
3814 local_list_count = 0;
3815 user_page_list = NULL;
3816 }
3817 kr = memory_object_upl_request(control,
3818 offset,
3819 size,
3820 upl_ptr,
3821 user_page_list,
3822 &local_list_count,
3823 cntrl_flags);
3824
3825 if(kr != KERN_SUCCESS)
3826 return kr;
3827
3828 if ((user_page_list_ptr != NULL) && (cntrl_flags & UPL_INTERNAL)) {
3829 *user_page_list_ptr = UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr);
3830 }
3831
3832 return KERN_SUCCESS;
3833}
3834
3835
3836
3837/*
3838 * Routine: vm_object_super_upl_request
3839 * Purpose:
3840 * Cause the population of a portion of a vm_object
3841 * in much the same way as memory_object_upl_request.
3842 * Depending on the nature of the request, the pages
3843 * returned may be contain valid data or be uninitialized.
3844 * However, the region may be expanded up to the super
3845 * cluster size provided.
3846 */
3847
3848__private_extern__ kern_return_t
3849vm_object_super_upl_request(
3850 vm_object_t object,
3851 vm_object_offset_t offset,
91447636
A
3852 upl_size_t size,
3853 upl_size_t super_cluster,
0b4e3aa0
A
3854 upl_t *upl,
3855 upl_page_info_t *user_page_list,
3856 unsigned int *page_list_count,
3857 int cntrl_flags)
3858{
b0d623f7 3859 if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR))
1c79356b 3860 return KERN_FAILURE;
0b4e3aa0 3861
55e303ae 3862 assert(object->paging_in_progress);
1c79356b 3863 offset = offset - object->paging_offset;
91447636 3864
91447636 3865 if (super_cluster > size) {
1c79356b
A
3866
3867 vm_object_offset_t base_offset;
91447636 3868 upl_size_t super_size;
b0d623f7 3869 vm_object_size_t super_size_64;
1c79356b 3870
2d21ac55
A
3871 base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
3872 super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster;
6d2010ae 3873 super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
b0d623f7
A
3874 super_size = (upl_size_t) super_size_64;
3875 assert(super_size == super_size_64);
2d21ac55
A
3876
3877 if (offset > (base_offset + super_size)) {
3878 panic("vm_object_super_upl_request: Missed target pageout"
3879 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
3880 offset, base_offset, super_size, super_cluster,
3881 size, object->paging_offset);
3882 }
91447636
A
3883 /*
3884 * apparently there is a case where the vm requests a
3885 * page to be written out who's offset is beyond the
3886 * object size
3887 */
b0d623f7
A
3888 if ((offset + size) > (base_offset + super_size)) {
3889 super_size_64 = (offset + size) - base_offset;
3890 super_size = (upl_size_t) super_size_64;
3891 assert(super_size == super_size_64);
3892 }
1c79356b
A
3893
3894 offset = base_offset;
3895 size = super_size;
3896 }
2d21ac55 3897 return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
1c79356b
A
3898}
3899
b0d623f7 3900
91447636
A
3901kern_return_t
3902vm_map_create_upl(
3903 vm_map_t map,
3904 vm_map_address_t offset,
3905 upl_size_t *upl_size,
3906 upl_t *upl,
3907 upl_page_info_array_t page_list,
3908 unsigned int *count,
3909 int *flags)
3910{
3911 vm_map_entry_t entry;
3912 int caller_flags;
3913 int force_data_sync;
3914 int sync_cow_data;
3915 vm_object_t local_object;
3916 vm_map_offset_t local_offset;
3917 vm_map_offset_t local_start;
3918 kern_return_t ret;
3919
3920 caller_flags = *flags;
3921
3922 if (caller_flags & ~UPL_VALID_FLAGS) {
3923 /*
3924 * For forward compatibility's sake,
3925 * reject any unknown flag.
3926 */
3927 return KERN_INVALID_VALUE;
3928 }
91447636
A
3929 force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
3930 sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
3931
2d21ac55 3932 if (upl == NULL)
91447636
A
3933 return KERN_INVALID_ARGUMENT;
3934
91447636 3935REDISCOVER_ENTRY:
b0d623f7 3936 vm_map_lock_read(map);
2d21ac55 3937
91447636 3938 if (vm_map_lookup_entry(map, offset, &entry)) {
2d21ac55 3939
b0d623f7
A
3940 if ((entry->vme_end - offset) < *upl_size) {
3941 *upl_size = (upl_size_t) (entry->vme_end - offset);
3942 assert(*upl_size == entry->vme_end - offset);
3943 }
2d21ac55 3944
91447636 3945 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
2d21ac55
A
3946 *flags = 0;
3947
b0d623f7 3948 if ( !entry->is_sub_map && entry->object.vm_object != VM_OBJECT_NULL) {
2d21ac55
A
3949 if (entry->object.vm_object->private)
3950 *flags = UPL_DEV_MEMORY;
3951
3952 if (entry->object.vm_object->phys_contiguous)
91447636 3953 *flags |= UPL_PHYS_CONTIG;
91447636 3954 }
b0d623f7 3955 vm_map_unlock_read(map);
2d21ac55 3956
91447636
A
3957 return KERN_SUCCESS;
3958 }
e2d2fc5c
A
3959
3960 if (entry->is_sub_map) {
3961 vm_map_t submap;
3962
3963 submap = entry->object.sub_map;
3964 local_start = entry->vme_start;
3965 local_offset = entry->offset;
3966
3967 vm_map_reference(submap);
3968 vm_map_unlock_read(map);
3969
3970 ret = vm_map_create_upl(submap,
3971 local_offset + (offset - local_start),
3972 upl_size, upl, page_list, count, flags);
3973 vm_map_deallocate(submap);
3974
3975 return ret;
3976 }
3977
2d21ac55 3978 if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
b0d623f7
A
3979 if ((*upl_size/PAGE_SIZE) > MAX_UPL_SIZE)
3980 *upl_size = MAX_UPL_SIZE * PAGE_SIZE;
2d21ac55 3981 }
e2d2fc5c 3982
91447636
A
3983 /*
3984 * Create an object if necessary.
3985 */
3986 if (entry->object.vm_object == VM_OBJECT_NULL) {
b0d623f7
A
3987
3988 if (vm_map_lock_read_to_write(map))
3989 goto REDISCOVER_ENTRY;
3990
2d21ac55 3991 entry->object.vm_object = vm_object_allocate((vm_size_t)(entry->vme_end - entry->vme_start));
91447636 3992 entry->offset = 0;
b0d623f7
A
3993
3994 vm_map_lock_write_to_read(map);
91447636
A
3995 }
3996 if (!(caller_flags & UPL_COPYOUT_FROM)) {
3997 if (!(entry->protection & VM_PROT_WRITE)) {
b0d623f7 3998 vm_map_unlock_read(map);
91447636
A
3999 return KERN_PROTECTION_FAILURE;
4000 }
e2d2fc5c
A
4001
4002#if !CONFIG_EMBEDDED
4003 local_object = entry->object.vm_object;
4004 if (vm_map_entry_should_cow_for_true_share(entry) &&
4005 local_object->vo_size > *upl_size &&
4006 *upl_size != 0) {
4007 vm_prot_t prot;
4008
4009 /*
4010 * Set up the targeted range for copy-on-write to avoid
4011 * applying true_share/copy_delay to the entire object.
4012 */
4013
4014 if (vm_map_lock_read_to_write(map)) {
4015 goto REDISCOVER_ENTRY;
4016 }
4017
4018 vm_map_clip_start(map, entry, vm_map_trunc_page(offset));
4019 vm_map_clip_end(map, entry, vm_map_round_page(offset + *upl_size));
4020 prot = entry->protection & ~VM_PROT_WRITE;
4021 if (override_nx(map, entry->alias) && prot)
4022 prot |= VM_PROT_EXECUTE;
4023 vm_object_pmap_protect(local_object,
4024 entry->offset,
4025 entry->vme_end - entry->vme_start,
4026 ((entry->is_shared || map->mapped)
4027 ? PMAP_NULL
4028 : map->pmap),
4029 entry->vme_start,
4030 prot);
4031 entry->needs_copy = TRUE;
4032
4033 vm_map_lock_write_to_read(map);
4034 }
4035#endif /* !CONFIG_EMBEDDED */
4036
91447636 4037 if (entry->needs_copy) {
b0d623f7
A
4038 /*
4039 * Honor copy-on-write for COPY_SYMMETRIC
4040 * strategy.
4041 */
91447636
A
4042 vm_map_t local_map;
4043 vm_object_t object;
91447636
A
4044 vm_object_offset_t new_offset;
4045 vm_prot_t prot;
4046 boolean_t wired;
91447636
A
4047 vm_map_version_t version;
4048 vm_map_t real_map;
4049
4050 local_map = map;
2d21ac55
A
4051
4052 if (vm_map_lookup_locked(&local_map,
4053 offset, VM_PROT_WRITE,
4054 OBJECT_LOCK_EXCLUSIVE,
4055 &version, &object,
4056 &new_offset, &prot, &wired,
4057 NULL,
b0d623f7
A
4058 &real_map) != KERN_SUCCESS) {
4059 vm_map_unlock_read(local_map);
91447636
A
4060 return KERN_FAILURE;
4061 }
2d21ac55 4062 if (real_map != map)
91447636 4063 vm_map_unlock(real_map);
b0d623f7
A
4064 vm_map_unlock_read(local_map);
4065
91447636 4066 vm_object_unlock(object);
91447636
A
4067
4068 goto REDISCOVER_ENTRY;
4069 }
4070 }
91447636 4071 if (sync_cow_data) {
2d21ac55 4072 if (entry->object.vm_object->shadow || entry->object.vm_object->copy) {
91447636
A
4073 local_object = entry->object.vm_object;
4074 local_start = entry->vme_start;
4075 local_offset = entry->offset;
2d21ac55 4076
91447636 4077 vm_object_reference(local_object);
b0d623f7 4078 vm_map_unlock_read(map);
91447636 4079
b0d623f7 4080 if (local_object->shadow && local_object->copy) {
2d21ac55
A
4081 vm_object_lock_request(
4082 local_object->shadow,
4083 (vm_object_offset_t)
4084 ((offset - local_start) +
4085 local_offset) +
6d2010ae 4086 local_object->vo_shadow_offset,
2d21ac55
A
4087 *upl_size, FALSE,
4088 MEMORY_OBJECT_DATA_SYNC,
4089 VM_PROT_NO_CHANGE);
91447636
A
4090 }
4091 sync_cow_data = FALSE;
4092 vm_object_deallocate(local_object);
2d21ac55 4093
91447636
A
4094 goto REDISCOVER_ENTRY;
4095 }
4096 }
91447636 4097 if (force_data_sync) {
91447636
A
4098 local_object = entry->object.vm_object;
4099 local_start = entry->vme_start;
4100 local_offset = entry->offset;
2d21ac55 4101
91447636 4102 vm_object_reference(local_object);
b0d623f7 4103 vm_map_unlock_read(map);
91447636
A
4104
4105 vm_object_lock_request(
2d21ac55
A
4106 local_object,
4107 (vm_object_offset_t)
4108 ((offset - local_start) + local_offset),
4109 (vm_object_size_t)*upl_size, FALSE,
4110 MEMORY_OBJECT_DATA_SYNC,
4111 VM_PROT_NO_CHANGE);
4112
91447636
A
4113 force_data_sync = FALSE;
4114 vm_object_deallocate(local_object);
2d21ac55 4115
91447636
A
4116 goto REDISCOVER_ENTRY;
4117 }
2d21ac55
A
4118 if (entry->object.vm_object->private)
4119 *flags = UPL_DEV_MEMORY;
4120 else
4121 *flags = 0;
4122
4123 if (entry->object.vm_object->phys_contiguous)
4124 *flags |= UPL_PHYS_CONTIG;
91447636 4125
91447636
A
4126 local_object = entry->object.vm_object;
4127 local_offset = entry->offset;
4128 local_start = entry->vme_start;
2d21ac55 4129
91447636 4130 vm_object_reference(local_object);
b0d623f7 4131 vm_map_unlock_read(map);
2d21ac55
A
4132
4133 ret = vm_object_iopl_request(local_object,
4134 (vm_object_offset_t) ((offset - local_start) + local_offset),
4135 *upl_size,
4136 upl,
4137 page_list,
4138 count,
4139 caller_flags);
91447636 4140 vm_object_deallocate(local_object);
2d21ac55 4141
91447636
A
4142 return(ret);
4143 }
b0d623f7 4144 vm_map_unlock_read(map);
1c79356b 4145
2d21ac55 4146 return(KERN_FAILURE);
91447636
A
4147}
4148
4149/*
4150 * Internal routine to enter a UPL into a VM map.
4151 *
4152 * JMM - This should just be doable through the standard
4153 * vm_map_enter() API.
4154 */
1c79356b 4155kern_return_t
91447636
A
4156vm_map_enter_upl(
4157 vm_map_t map,
4158 upl_t upl,
b0d623f7 4159 vm_map_offset_t *dst_addr)
1c79356b 4160{
91447636 4161 vm_map_size_t size;
1c79356b 4162 vm_object_offset_t offset;
91447636 4163 vm_map_offset_t addr;
1c79356b
A
4164 vm_page_t m;
4165 kern_return_t kr;
b0d623f7
A
4166 int isVectorUPL = 0, curr_upl=0;
4167 upl_t vector_upl = NULL;
4168 vm_offset_t vector_upl_dst_addr = 0;
4169 vm_map_t vector_upl_submap = NULL;
4170 upl_offset_t subupl_offset = 0;
4171 upl_size_t subupl_size = 0;
1c79356b 4172
0b4e3aa0
A
4173 if (upl == UPL_NULL)
4174 return KERN_INVALID_ARGUMENT;
4175
b0d623f7
A
4176 if((isVectorUPL = vector_upl_is_valid(upl))) {
4177 int mapped=0,valid_upls=0;
4178 vector_upl = upl;
4179
4180 upl_lock(vector_upl);
4181 for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
4182 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
4183 if(upl == NULL)
4184 continue;
4185 valid_upls++;
4186 if (UPL_PAGE_LIST_MAPPED & upl->flags)
4187 mapped++;
4188 }
4189
4190 if(mapped) {
4191 if(mapped != valid_upls)
4192 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
4193 else {
4194 upl_unlock(vector_upl);
4195 return KERN_FAILURE;
4196 }
4197 }
4198
4199 kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, VM_FLAGS_ANYWHERE, &vector_upl_submap);
4200 if( kr != KERN_SUCCESS )
4201 panic("Vector UPL submap allocation failed\n");
4202 map = vector_upl_submap;
4203 vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
4204 curr_upl=0;
4205 }
4206 else
4207 upl_lock(upl);
4208
4209process_upl_to_enter:
4210 if(isVectorUPL){
4211 if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
4212 *dst_addr = vector_upl_dst_addr;
4213 upl_unlock(vector_upl);
4214 return KERN_SUCCESS;
4215 }
4216 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
4217 if(upl == NULL)
4218 goto process_upl_to_enter;
6d2010ae 4219
b0d623f7
A
4220 vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
4221 *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
d41d1dae
A
4222 } else {
4223 /*
4224 * check to see if already mapped
4225 */
4226 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
4227 upl_unlock(upl);
4228 return KERN_FAILURE;
4229 }
b0d623f7 4230 }
d41d1dae
A
4231 if ((!(upl->flags & UPL_SHADOWED)) &&
4232 ((upl->flags & UPL_HAS_BUSY) ||
4233 !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
0b4e3aa0 4234
55e303ae
A
4235 vm_object_t object;
4236 vm_page_t alias_page;
4237 vm_object_offset_t new_offset;
b0d623f7 4238 unsigned int pg_num;
55e303ae
A
4239 wpl_array_t lite_list;
4240
2d21ac55 4241 if (upl->flags & UPL_INTERNAL) {
55e303ae 4242 lite_list = (wpl_array_t)
91447636 4243 ((((uintptr_t)upl) + sizeof(struct upl))
2d21ac55 4244 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
55e303ae 4245 } else {
2d21ac55 4246 lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
55e303ae
A
4247 }
4248 object = upl->map_object;
4249 upl->map_object = vm_object_allocate(upl->size);
2d21ac55 4250
55e303ae 4251 vm_object_lock(upl->map_object);
2d21ac55 4252
55e303ae
A
4253 upl->map_object->shadow = object;
4254 upl->map_object->pageout = TRUE;
4255 upl->map_object->can_persist = FALSE;
2d21ac55 4256 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
6d2010ae 4257 upl->map_object->vo_shadow_offset = upl->offset - object->paging_offset;
55e303ae 4258 upl->map_object->wimg_bits = object->wimg_bits;
6d2010ae 4259 offset = upl->map_object->vo_shadow_offset;
55e303ae
A
4260 new_offset = 0;
4261 size = upl->size;
91447636 4262
2d21ac55 4263 upl->flags |= UPL_SHADOWED;
91447636 4264
2d21ac55 4265 while (size) {
b0d623f7
A
4266 pg_num = (unsigned int) (new_offset / PAGE_SIZE);
4267 assert(pg_num == new_offset / PAGE_SIZE);
55e303ae 4268
2d21ac55 4269 if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
55e303ae 4270
2d21ac55 4271 VM_PAGE_GRAB_FICTITIOUS(alias_page);
91447636 4272
2d21ac55 4273 vm_object_lock(object);
91447636 4274
2d21ac55
A
4275 m = vm_page_lookup(object, offset);
4276 if (m == VM_PAGE_NULL) {
4277 panic("vm_upl_map: page missing\n");
4278 }
55e303ae 4279
2d21ac55
A
4280 /*
4281 * Convert the fictitious page to a private
4282 * shadow of the real page.
4283 */
4284 assert(alias_page->fictitious);
4285 alias_page->fictitious = FALSE;
4286 alias_page->private = TRUE;
4287 alias_page->pageout = TRUE;
4288 /*
4289 * since m is a page in the upl it must
4290 * already be wired or BUSY, so it's
4291 * safe to assign the underlying physical
4292 * page to the alias
4293 */
4294 alias_page->phys_page = m->phys_page;
4295
4296 vm_object_unlock(object);
4297
4298 vm_page_lockspin_queues();
4299 vm_page_wire(alias_page);
4300 vm_page_unlock_queues();
4301
4302 /*
4303 * ENCRYPTED SWAP:
4304 * The virtual page ("m") has to be wired in some way
4305 * here or its physical page ("m->phys_page") could
4306 * be recycled at any time.
4307 * Assuming this is enforced by the caller, we can't
4308 * get an encrypted page here. Since the encryption
4309 * key depends on the VM page's "pager" object and
4310 * the "paging_offset", we couldn't handle 2 pageable
4311 * VM pages (with different pagers and paging_offsets)
4312 * sharing the same physical page: we could end up
4313 * encrypting with one key (via one VM page) and
4314 * decrypting with another key (via the alias VM page).
4315 */
4316 ASSERT_PAGE_DECRYPTED(m);
55e303ae 4317
2d21ac55
A
4318 vm_page_insert(alias_page, upl->map_object, new_offset);
4319
4320 assert(!alias_page->wanted);
4321 alias_page->busy = FALSE;
4322 alias_page->absent = FALSE;
4323 }
4324 size -= PAGE_SIZE;
4325 offset += PAGE_SIZE_64;
4326 new_offset += PAGE_SIZE_64;
55e303ae 4327 }
91447636 4328 vm_object_unlock(upl->map_object);
55e303ae 4329 }
d41d1dae 4330 if (upl->flags & UPL_SHADOWED)
55e303ae 4331 offset = 0;
d41d1dae
A
4332 else
4333 offset = upl->offset - upl->map_object->paging_offset;
6d2010ae 4334
1c79356b
A
4335 size = upl->size;
4336
2d21ac55 4337 vm_object_reference(upl->map_object);
1c79356b 4338
b0d623f7
A
4339 if(!isVectorUPL) {
4340 *dst_addr = 0;
4341 /*
4342 * NEED A UPL_MAP ALIAS
4343 */
4344 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
4345 VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
4346 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
d41d1dae
A
4347
4348 if (kr != KERN_SUCCESS) {
4349 upl_unlock(upl);
4350 return(kr);
4351 }
b0d623f7
A
4352 }
4353 else {
4354 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
4355 VM_FLAGS_FIXED, upl->map_object, offset, FALSE,
4356 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
4357 if(kr)
4358 panic("vm_map_enter failed for a Vector UPL\n");
4359 }
91447636
A
4360 vm_object_lock(upl->map_object);
4361
2d21ac55 4362 for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
1c79356b 4363 m = vm_page_lookup(upl->map_object, offset);
2d21ac55
A
4364
4365 if (m) {
2d21ac55 4366 m->pmapped = TRUE;
b0d623f7
A
4367
4368 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
4369 * but only in kernel space. If this was on a user map,
4370 * we'd have to set the wpmapped bit. */
4371 /* m->wpmapped = TRUE; */
4372 assert(map==kernel_map);
9bccf70c 4373
6d2010ae 4374 PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, 0, TRUE);
1c79356b 4375 }
2d21ac55 4376 offset += PAGE_SIZE_64;
1c79356b 4377 }
91447636
A
4378 vm_object_unlock(upl->map_object);
4379
2d21ac55
A
4380 /*
4381 * hold a reference for the mapping
4382 */
4383 upl->ref_count++;
1c79356b 4384 upl->flags |= UPL_PAGE_LIST_MAPPED;
b0d623f7
A
4385 upl->kaddr = (vm_offset_t) *dst_addr;
4386 assert(upl->kaddr == *dst_addr);
4387
d41d1dae 4388 if(isVectorUPL)
b0d623f7 4389 goto process_upl_to_enter;
2d21ac55 4390
d41d1dae
A
4391 upl_unlock(upl);
4392
1c79356b
A
4393 return KERN_SUCCESS;
4394}
4395
91447636
A
4396/*
4397 * Internal routine to remove a UPL mapping from a VM map.
4398 *
4399 * XXX - This should just be doable through a standard
4400 * vm_map_remove() operation. Otherwise, implicit clean-up
4401 * of the target map won't be able to correctly remove
4402 * these (and release the reference on the UPL). Having
4403 * to do this means we can't map these into user-space
4404 * maps yet.
4405 */
1c79356b 4406kern_return_t
91447636 4407vm_map_remove_upl(
1c79356b
A
4408 vm_map_t map,
4409 upl_t upl)
4410{
0b4e3aa0 4411 vm_address_t addr;
91447636 4412 upl_size_t size;
b0d623f7
A
4413 int isVectorUPL = 0, curr_upl = 0;
4414 upl_t vector_upl = NULL;
1c79356b 4415
0b4e3aa0
A
4416 if (upl == UPL_NULL)
4417 return KERN_INVALID_ARGUMENT;
4418
b0d623f7
A
4419 if((isVectorUPL = vector_upl_is_valid(upl))) {
4420 int unmapped=0, valid_upls=0;
4421 vector_upl = upl;
4422 upl_lock(vector_upl);
4423 for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
4424 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
4425 if(upl == NULL)
4426 continue;
4427 valid_upls++;
4428 if (!(UPL_PAGE_LIST_MAPPED & upl->flags))
4429 unmapped++;
4430 }
4431
4432 if(unmapped) {
4433 if(unmapped != valid_upls)
4434 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
4435 else {
4436 upl_unlock(vector_upl);
4437 return KERN_FAILURE;
4438 }
4439 }
4440 curr_upl=0;
4441 }
4442 else
4443 upl_lock(upl);
4444
4445process_upl_to_remove:
4446 if(isVectorUPL) {
4447 if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
4448 vm_map_t v_upl_submap;
4449 vm_offset_t v_upl_submap_dst_addr;
4450 vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
4451
4452 vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_NO_FLAGS);
4453 vm_map_deallocate(v_upl_submap);
4454 upl_unlock(vector_upl);
4455 return KERN_SUCCESS;
4456 }
4457
4458 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
4459 if(upl == NULL)
4460 goto process_upl_to_remove;
4461 }
2d21ac55
A
4462
4463 if (upl->flags & UPL_PAGE_LIST_MAPPED) {
0b4e3aa0 4464 addr = upl->kaddr;
1c79356b 4465 size = upl->size;
2d21ac55 4466
0b4e3aa0
A
4467 assert(upl->ref_count > 1);
4468 upl->ref_count--; /* removing mapping ref */
2d21ac55 4469
1c79356b
A
4470 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
4471 upl->kaddr = (vm_offset_t) 0;
b0d623f7
A
4472
4473 if(!isVectorUPL) {
4474 upl_unlock(upl);
4475
4476 vm_map_remove(map,
4477 vm_map_trunc_page(addr),
4478 vm_map_round_page(addr + size),
4479 VM_MAP_NO_FLAGS);
4480
4481 return KERN_SUCCESS;
4482 }
4483 else {
4484 /*
4485 * If it's a Vectored UPL, we'll be removing the entire
4486 * submap anyways, so no need to remove individual UPL
4487 * element mappings from within the submap
4488 */
4489 goto process_upl_to_remove;
4490 }
1c79356b 4491 }
0b4e3aa0 4492 upl_unlock(upl);
2d21ac55 4493
0b4e3aa0 4494 return KERN_FAILURE;
1c79356b
A
4495}
4496
b0d623f7 4497
1c79356b 4498kern_return_t
0b4e3aa0 4499upl_commit_range(
1c79356b 4500 upl_t upl,
91447636
A
4501 upl_offset_t offset,
4502 upl_size_t size,
1c79356b 4503 int flags,
0b4e3aa0
A
4504 upl_page_info_t *page_list,
4505 mach_msg_type_number_t count,
4506 boolean_t *empty)
1c79356b 4507{
b0d623f7 4508 upl_size_t xfer_size, subupl_size = size;
55e303ae 4509 vm_object_t shadow_object;
2d21ac55 4510 vm_object_t object;
1c79356b 4511 vm_object_offset_t target_offset;
b0d623f7 4512 upl_offset_t subupl_offset = offset;
1c79356b 4513 int entry;
55e303ae
A
4514 wpl_array_t lite_list;
4515 int occupied;
91447636 4516 int clear_refmod = 0;
2d21ac55 4517 int pgpgout_count = 0;
6d2010ae
A
4518 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
4519 struct vm_page_delayed_work *dwp;
4520 int dw_count;
4521 int dw_limit;
4522 int isVectorUPL = 0;
b0d623f7 4523 upl_t vector_upl = NULL;
6d2010ae 4524 boolean_t should_be_throttled = FALSE;
1c79356b 4525
0b4e3aa0
A
4526 *empty = FALSE;
4527
4528 if (upl == UPL_NULL)
4529 return KERN_INVALID_ARGUMENT;
4530
4531 if (count == 0)
4532 page_list = NULL;
4533
b0d623f7
A
4534 if((isVectorUPL = vector_upl_is_valid(upl))) {
4535 vector_upl = upl;
4536 upl_lock(vector_upl);
4537 }
4538 else
4539 upl_lock(upl);
4540
4541process_upl_to_commit:
4542
4543 if(isVectorUPL) {
4544 size = subupl_size;
4545 offset = subupl_offset;
4546 if(size == 0) {
4547 upl_unlock(vector_upl);
4548 return KERN_SUCCESS;
4549 }
4550 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
4551 if(upl == NULL) {
4552 upl_unlock(vector_upl);
4553 return KERN_FAILURE;
4554 }
4555 page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
4556 subupl_size -= size;
4557 subupl_offset += size;
4558 }
4559
4560#if UPL_DEBUG
4561 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
4562 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
4563
4564 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
4565 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
4566
4567 upl->upl_commit_index++;
4568 }
4569#endif
2d21ac55
A
4570 if (upl->flags & UPL_DEVICE_MEMORY)
4571 xfer_size = 0;
4572 else if ((offset + size) <= upl->size)
4573 xfer_size = size;
b0d623f7
A
4574 else {
4575 if(!isVectorUPL)
4576 upl_unlock(upl);
4577 else {
4578 upl_unlock(vector_upl);
4579 }
2d21ac55 4580 return KERN_FAILURE;
91447636 4581 }
6d2010ae
A
4582 if (upl->flags & UPL_SET_DIRTY)
4583 flags |= UPL_COMMIT_SET_DIRTY;
55e303ae
A
4584 if (upl->flags & UPL_CLEAR_DIRTY)
4585 flags |= UPL_COMMIT_CLEAR_DIRTY;
4586
2d21ac55
A
4587 if (upl->flags & UPL_INTERNAL)
4588 lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
4589 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
4590 else
4591 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
1c79356b 4592
2d21ac55
A
4593 object = upl->map_object;
4594
4595 if (upl->flags & UPL_SHADOWED) {
4596 vm_object_lock(object);
4597 shadow_object = object->shadow;
55e303ae 4598 } else {
2d21ac55 4599 shadow_object = object;
55e303ae 4600 }
1c79356b
A
4601 entry = offset/PAGE_SIZE;
4602 target_offset = (vm_object_offset_t)offset;
55e303ae 4603
b0d623f7
A
4604 if (upl->flags & UPL_KERNEL_OBJECT)
4605 vm_object_lock_shared(shadow_object);
4606 else
4607 vm_object_lock(shadow_object);
4a3eedf9 4608
b0d623f7
A
4609 if (upl->flags & UPL_ACCESS_BLOCKED) {
4610 assert(shadow_object->blocked_access);
4611 shadow_object->blocked_access = FALSE;
4612 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
4a3eedf9 4613 }
4a3eedf9 4614
593a1d5f
A
4615 if (shadow_object->code_signed) {
4616 /*
4617 * CODE SIGNING:
4618 * If the object is code-signed, do not let this UPL tell
4619 * us if the pages are valid or not. Let the pages be
4620 * validated by VM the normal way (when they get mapped or
4621 * copied).
4622 */
4623 flags &= ~UPL_COMMIT_CS_VALIDATED;
4624 }
4625 if (! page_list) {
4626 /*
4627 * No page list to get the code-signing info from !?
4628 */
4629 flags &= ~UPL_COMMIT_CS_VALIDATED;
4630 }
6d2010ae
A
4631 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && shadow_object->internal)
4632 should_be_throttled = TRUE;
593a1d5f 4633
b0d623f7
A
4634 dwp = &dw_array[0];
4635 dw_count = 0;
6d2010ae 4636 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
b0d623f7 4637
91447636 4638 while (xfer_size) {
2d21ac55
A
4639 vm_page_t t, m;
4640
b0d623f7
A
4641 dwp->dw_mask = 0;
4642 clear_refmod = 0;
4643
55e303ae 4644 m = VM_PAGE_NULL;
d7e50217 4645
55e303ae 4646 if (upl->flags & UPL_LITE) {
b0d623f7 4647 unsigned int pg_num;
55e303ae 4648
b0d623f7
A
4649 pg_num = (unsigned int) (target_offset/PAGE_SIZE);
4650 assert(pg_num == target_offset/PAGE_SIZE);
55e303ae
A
4651
4652 if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
4653 lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
2d21ac55 4654
b0d623f7
A
4655 if (!(upl->flags & UPL_KERNEL_OBJECT))
4656 m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
55e303ae
A
4657 }
4658 }
2d21ac55
A
4659 if (upl->flags & UPL_SHADOWED) {
4660 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
4661
55e303ae
A
4662 t->pageout = FALSE;
4663
b0d623f7 4664 VM_PAGE_FREE(t);
55e303ae 4665
2d21ac55 4666 if (m == VM_PAGE_NULL)
6d2010ae 4667 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
55e303ae
A
4668 }
4669 }
b0d623f7 4670 if ((upl->flags & UPL_KERNEL_OBJECT) || m == VM_PAGE_NULL)
593a1d5f 4671 goto commit_next_page;
55e303ae 4672
593a1d5f
A
4673 if (flags & UPL_COMMIT_CS_VALIDATED) {
4674 /*
4675 * CODE SIGNING:
4676 * Set the code signing bits according to
4677 * what the UPL says they should be.
4678 */
4679 m->cs_validated = page_list[entry].cs_validated;
4680 m->cs_tainted = page_list[entry].cs_tainted;
4681 }
4682 if (upl->flags & UPL_IO_WIRE) {
55e303ae 4683
593a1d5f
A
4684 if (page_list)
4685 page_list[entry].phys_addr = 0;
2d21ac55 4686
6d2010ae 4687 if (flags & UPL_COMMIT_SET_DIRTY) {
593a1d5f 4688 m->dirty = TRUE;
6d2010ae 4689 } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
593a1d5f 4690 m->dirty = FALSE;
b0d623f7 4691
593a1d5f
A
4692 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
4693 m->cs_validated && !m->cs_tainted) {
4a3eedf9
A
4694 /*
4695 * CODE SIGNING:
4696 * This page is no longer dirty
4697 * but could have been modified,
4698 * so it will need to be
4699 * re-validated.
4700 */
4701 m->cs_validated = FALSE;
b0d623f7 4702#if DEVELOPMENT || DEBUG
4a3eedf9 4703 vm_cs_validated_resets++;
b0d623f7
A
4704#endif
4705 pmap_disconnect(m->phys_page);
4a3eedf9 4706 }
91447636 4707 clear_refmod |= VM_MEM_MODIFIED;
55e303ae 4708 }
b0d623f7
A
4709 if (flags & UPL_COMMIT_INACTIVATE) {
4710 dwp->dw_mask |= DW_vm_page_deactivate_internal;
4711 clear_refmod |= VM_MEM_REFERENCED;
4712 }
4713 if (upl->flags & UPL_ACCESS_BLOCKED) {
593a1d5f
A
4714 /*
4715 * We blocked access to the pages in this UPL.
4716 * Clear the "busy" bit and wake up any waiter
4717 * for this page.
4718 */
b0d623f7 4719 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
593a1d5f 4720 }
0b4c1975
A
4721 if (m->absent) {
4722 if (flags & UPL_COMMIT_FREE_ABSENT)
4723 dwp->dw_mask |= DW_vm_page_free;
d41d1dae 4724 else {
0b4c1975 4725 m->absent = FALSE;
d41d1dae 4726 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7ddcb079
A
4727
4728 if ( !(dwp->dw_mask & DW_vm_page_deactivate_internal))
4729 dwp->dw_mask |= DW_vm_page_activate;
d41d1dae
A
4730 }
4731 } else
4732 dwp->dw_mask |= DW_vm_page_unwire;
4733
593a1d5f
A
4734 goto commit_next_page;
4735 }
4736 /*
4737 * make sure to clear the hardware
4738 * modify or reference bits before
4739 * releasing the BUSY bit on this page
4740 * otherwise we risk losing a legitimate
4741 * change of state
4742 */
4743 if (flags & UPL_COMMIT_CLEAR_DIRTY) {
4744 m->dirty = FALSE;
2d21ac55 4745
593a1d5f
A
4746 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
4747 m->cs_validated && !m->cs_tainted) {
4748 /*
4749 * CODE SIGNING:
4750 * This page is no longer dirty
4751 * but could have been modified,
4752 * so it will need to be
4753 * re-validated.
4754 */
4755 m->cs_validated = FALSE;
4756#if DEVELOPMENT || DEBUG
4757 vm_cs_validated_resets++;
4758#endif
b0d623f7 4759 pmap_disconnect(m->phys_page);
55e303ae 4760 }
593a1d5f
A
4761 clear_refmod |= VM_MEM_MODIFIED;
4762 }
593a1d5f
A
4763 if (page_list) {
4764 upl_page_info_t *p;
2d21ac55 4765
593a1d5f 4766 p = &(page_list[entry]);
b0d623f7 4767
593a1d5f
A
4768 if (p->phys_addr && p->pageout && !m->pageout) {
4769 m->busy = TRUE;
4770 m->pageout = TRUE;
b0d623f7
A
4771
4772 dwp->dw_mask |= DW_vm_page_wire;
4773
593a1d5f
A
4774 } else if (p->phys_addr &&
4775 !p->pageout && m->pageout &&
4776 !m->dump_cleaning) {
2d21ac55 4777 m->pageout = FALSE;
593a1d5f
A
4778 m->absent = FALSE;
4779 m->overwriting = FALSE;
b0d623f7
A
4780
4781 dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
593a1d5f
A
4782 }
4783 page_list[entry].phys_addr = 0;
4784 }
4785 m->dump_cleaning = FALSE;
2d21ac55 4786
593a1d5f 4787 if (m->laundry)
b0d623f7 4788 dwp->dw_mask |= DW_vm_pageout_throttle_up;
91447636 4789
593a1d5f
A
4790 if (m->pageout) {
4791 m->cleaning = FALSE;
4792 m->encrypted_cleaning = FALSE;
4793 m->pageout = FALSE;
1c79356b 4794#if MACH_CLUSTER_STATS
593a1d5f 4795 if (m->wanted) vm_pageout_target_collisions++;
1c79356b 4796#endif
2d21ac55 4797 m->dirty = FALSE;
b0d623f7 4798
593a1d5f
A
4799 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
4800 m->cs_validated && !m->cs_tainted) {
4a3eedf9
A
4801 /*
4802 * CODE SIGNING:
4803 * This page is no longer dirty
4804 * but could have been modified,
4805 * so it will need to be
4806 * re-validated.
4807 */
4808 m->cs_validated = FALSE;
593a1d5f 4809#if DEVELOPMENT || DEBUG
4a3eedf9 4810 vm_cs_validated_resets++;
593a1d5f 4811#endif
b0d623f7 4812 pmap_disconnect(m->phys_page);
4a3eedf9 4813 }
b0d623f7
A
4814
4815 if ((flags & UPL_COMMIT_SET_DIRTY) ||
4816 (m->pmapped && (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)))
593a1d5f 4817 m->dirty = TRUE;
b0d623f7 4818
593a1d5f
A
4819 if (m->dirty) {
4820 /*
4821 * page was re-dirtied after we started
4822 * the pageout... reactivate it since
4823 * we don't know whether the on-disk
4824 * copy matches what is now in memory
2d21ac55 4825 */
b0d623f7
A
4826 dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
4827
593a1d5f
A
4828 if (upl->flags & UPL_PAGEOUT) {
4829 CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
4830 VM_STAT_INCR(reactivations);
4831 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
4832 }
593a1d5f
A
4833 } else {
4834 /*
4835 * page has been successfully cleaned
4836 * go ahead and free it for other use
2d21ac55 4837 */
b0d623f7 4838
593a1d5f
A
4839 if (m->object->internal) {
4840 DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
4841 } else {
4842 DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
4843 }
b0d623f7
A
4844 dwp->dw_mask |= DW_vm_page_free;
4845
593a1d5f
A
4846 if (upl->flags & UPL_PAGEOUT) {
4847 CLUSTER_STAT(vm_pageout_target_page_freed++;)
b0d623f7 4848
593a1d5f
A
4849 if (page_list[entry].dirty) {
4850 VM_STAT_INCR(pageouts);
4851 DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
4852 pgpgout_count++;
4853 }
4854 }
de355530 4855 }
593a1d5f
A
4856 goto commit_next_page;
4857 }
4858#if MACH_CLUSTER_STATS
4859 if (m->wpmapped)
4860 m->dirty = pmap_is_modified(m->phys_page);
4861
4862 if (m->dirty) vm_pageout_cluster_dirtied++;
4863 else vm_pageout_cluster_cleaned++;
4864 if (m->wanted) vm_pageout_cluster_collisions++;
4865#endif
4866 m->dirty = FALSE;
91447636 4867
593a1d5f
A
4868 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
4869 m->cs_validated && !m->cs_tainted) {
2d21ac55 4870 /*
593a1d5f
A
4871 * CODE SIGNING:
4872 * This page is no longer dirty
4873 * but could have been modified,
4874 * so it will need to be
4875 * re-validated.
2d21ac55 4876 */
593a1d5f
A
4877 m->cs_validated = FALSE;
4878#if DEVELOPMENT || DEBUG
4879 vm_cs_validated_resets++;
4880#endif
b0d623f7 4881 pmap_disconnect(m->phys_page);
593a1d5f 4882 }
55e303ae 4883
6d2010ae 4884 if (m->overwriting) {
593a1d5f 4885 /*
6d2010ae 4886 * the (COPY_OUT_FROM == FALSE) request_page_list case
593a1d5f 4887 */
6d2010ae
A
4888 if (m->busy) {
4889 m->absent = FALSE;
b0d623f7 4890
6d2010ae
A
4891 dwp->dw_mask |= DW_clear_busy;
4892 } else {
4893 /*
4894 * alternate (COPY_OUT_FROM == FALSE) page_list case
4895 * Occurs when the original page was wired
4896 * at the time of the list request
4897 */
4898 assert(VM_PAGE_WIRED(m));
b0d623f7 4899
6d2010ae
A
4900 dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
4901 }
593a1d5f 4902 m->overwriting = FALSE;
6d2010ae
A
4903 }
4904 if (m->encrypted_cleaning == TRUE) {
4905 m->encrypted_cleaning = FALSE;
b0d623f7 4906
6d2010ae 4907 dwp->dw_mask |= DW_clear_busy;
593a1d5f
A
4908 }
4909 m->cleaning = FALSE;
b0d623f7 4910
593a1d5f
A
4911 /*
4912 * It is a part of the semantic of COPYOUT_FROM
4913 * UPLs that a commit implies cache sync
4914 * between the vm page and the backing store
4915 * this can be used to strip the precious bit
4916 * as well as clean
4917 */
b0d623f7 4918 if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS))
593a1d5f 4919 m->precious = FALSE;
b0d623f7 4920
593a1d5f
A
4921 if (flags & UPL_COMMIT_SET_DIRTY)
4922 m->dirty = TRUE;
b0d623f7 4923
6d2010ae
A
4924 if (should_be_throttled == TRUE && !m->active && !m->inactive && !m->speculative && !m->throttled) {
4925 /*
4926 * page coming back in from being 'frozen'...
4927 * it was dirty before it was frozen, so keep it so
4928 * the vm_page_activate will notice that it really belongs
4929 * on the throttle queue and put it there
4930 */
4931 m->dirty = TRUE;
4932 dwp->dw_mask |= DW_vm_page_activate;
b0d623f7 4933
6d2010ae
A
4934 } else {
4935 if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && !m->speculative) {
b0d623f7
A
4936 dwp->dw_mask |= DW_vm_page_deactivate_internal;
4937 clear_refmod |= VM_MEM_REFERENCED;
6d2010ae
A
4938 } else if (!m->active && !m->inactive && !m->speculative) {
4939
4940 if (m->clustered || (flags & UPL_COMMIT_SPECULATE))
4941 dwp->dw_mask |= DW_vm_page_speculate;
4942 else if (m->reference)
4943 dwp->dw_mask |= DW_vm_page_activate;
4944 else {
4945 dwp->dw_mask |= DW_vm_page_deactivate_internal;
4946 clear_refmod |= VM_MEM_REFERENCED;
4947 }
b0d623f7 4948 }
593a1d5f 4949 }
b0d623f7 4950 if (upl->flags & UPL_ACCESS_BLOCKED) {
2d21ac55 4951 /*
593a1d5f
A
4952 * We blocked access to the pages in this URL.
4953 * Clear the "busy" bit on this page before we
4954 * wake up any waiter.
2d21ac55 4955 */
b0d623f7 4956 dwp->dw_mask |= DW_clear_busy;
1c79356b 4957 }
593a1d5f
A
4958 /*
4959 * Wakeup any thread waiting for the page to be un-cleaning.
4960 */
b0d623f7 4961 dwp->dw_mask |= DW_PAGE_WAKEUP;
593a1d5f 4962
2d21ac55 4963commit_next_page:
b0d623f7
A
4964 if (clear_refmod)
4965 pmap_clear_refmod(m->phys_page, clear_refmod);
4966
1c79356b
A
4967 target_offset += PAGE_SIZE_64;
4968 xfer_size -= PAGE_SIZE;
4969 entry++;
2d21ac55 4970
b0d623f7
A
4971 if (dwp->dw_mask) {
4972 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
6d2010ae 4973 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
4a3eedf9 4974
6d2010ae
A
4975 if (dw_count >= dw_limit) {
4976 vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
b0d623f7
A
4977
4978 dwp = &dw_array[0];
4979 dw_count = 0;
4980 }
4981 } else {
4982 if (dwp->dw_mask & DW_clear_busy)
4983 m->busy = FALSE;
4984
4985 if (dwp->dw_mask & DW_PAGE_WAKEUP)
4986 PAGE_WAKEUP(m);
4a3eedf9 4987 }
2d21ac55 4988 }
1c79356b 4989 }
b0d623f7 4990 if (dw_count)
6d2010ae 4991 vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
55e303ae
A
4992
4993 occupied = 1;
4994
4995 if (upl->flags & UPL_DEVICE_MEMORY) {
4996 occupied = 0;
4997 } else if (upl->flags & UPL_LITE) {
4998 int pg_num;
4999 int i;
2d21ac55 5000
55e303ae
A
5001 pg_num = upl->size/PAGE_SIZE;
5002 pg_num = (pg_num + 31) >> 5;
5003 occupied = 0;
2d21ac55
A
5004
5005 for (i = 0; i < pg_num; i++) {
5006 if (lite_list[i] != 0) {
55e303ae
A
5007 occupied = 1;
5008 break;
5009 }
5010 }
5011 } else {
2d21ac55 5012 if (queue_empty(&upl->map_object->memq))
55e303ae 5013 occupied = 0;
55e303ae 5014 }
2d21ac55 5015 if (occupied == 0) {
b0d623f7
A
5016 /*
5017 * If this UPL element belongs to a Vector UPL and is
5018 * empty, then this is the right function to deallocate
5019 * it. So go ahead set the *empty variable. The flag
5020 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
5021 * should be considered relevant for the Vector UPL and not
5022 * the internal UPLs.
5023 */
5024 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
0b4e3aa0 5025 *empty = TRUE;
2d21ac55 5026
b0d623f7 5027 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
2d21ac55
A
5028 /*
5029 * this is not a paging object
5030 * so we need to drop the paging reference
5031 * that was taken when we created the UPL
5032 * against this object
5033 */
b0d623f7 5034 vm_object_activity_end(shadow_object);
2d21ac55
A
5035 } else {
5036 /*
5037 * we dontated the paging reference to
5038 * the map object... vm_pageout_object_terminate
5039 * will drop this reference
5040 */
5041 }
1c79356b 5042 }
55e303ae 5043 vm_object_unlock(shadow_object);
91447636
A
5044 if (object != shadow_object)
5045 vm_object_unlock(object);
b0d623f7
A
5046
5047 if(!isVectorUPL)
5048 upl_unlock(upl);
5049 else {
5050 /*
5051 * If we completed our operations on an UPL that is
5052 * part of a Vectored UPL and if empty is TRUE, then
5053 * we should go ahead and deallocate this UPL element.
5054 * Then we check if this was the last of the UPL elements
5055 * within that Vectored UPL. If so, set empty to TRUE
5056 * so that in ubc_upl_commit_range or ubc_upl_commit, we
5057 * can go ahead and deallocate the Vector UPL too.
5058 */
5059 if(*empty==TRUE) {
5060 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
5061 upl_deallocate(upl);
5062 }
5063 goto process_upl_to_commit;
5064 }
0b4e3aa0 5065
2d21ac55
A
5066 if (pgpgout_count) {
5067 DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
5068 }
5069
1c79356b
A
5070 return KERN_SUCCESS;
5071}
5072
0b4e3aa0
A
5073kern_return_t
5074upl_abort_range(
1c79356b 5075 upl_t upl,
91447636
A
5076 upl_offset_t offset,
5077 upl_size_t size,
0b4e3aa0
A
5078 int error,
5079 boolean_t *empty)
1c79356b 5080{
b0d623f7 5081 upl_size_t xfer_size, subupl_size = size;
55e303ae 5082 vm_object_t shadow_object;
2d21ac55 5083 vm_object_t object;
1c79356b 5084 vm_object_offset_t target_offset;
b0d623f7 5085 upl_offset_t subupl_offset = offset;
1c79356b 5086 int entry;
55e303ae
A
5087 wpl_array_t lite_list;
5088 int occupied;
6d2010ae
A
5089 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
5090 struct vm_page_delayed_work *dwp;
5091 int dw_count;
5092 int dw_limit;
5093 int isVectorUPL = 0;
b0d623f7 5094 upl_t vector_upl = NULL;
1c79356b 5095
0b4e3aa0
A
5096 *empty = FALSE;
5097
5098 if (upl == UPL_NULL)
5099 return KERN_INVALID_ARGUMENT;
5100
2d21ac55 5101 if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
0b4c1975 5102 return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
55e303ae 5103
b0d623f7
A
5104 if((isVectorUPL = vector_upl_is_valid(upl))) {
5105 vector_upl = upl;
5106 upl_lock(vector_upl);
5107 }
5108 else
5109 upl_lock(upl);
5110
5111process_upl_to_abort:
5112 if(isVectorUPL) {
5113 size = subupl_size;
5114 offset = subupl_offset;
5115 if(size == 0) {
5116 upl_unlock(vector_upl);
5117 return KERN_SUCCESS;
5118 }
5119 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
5120 if(upl == NULL) {
5121 upl_unlock(vector_upl);
5122 return KERN_FAILURE;
5123 }
5124 subupl_size -= size;
5125 subupl_offset += size;
5126 }
5127
5128 *empty = FALSE;
5129
5130#if UPL_DEBUG
5131 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
5132 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
5133
5134 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
5135 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
5136 upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
5137
5138 upl->upl_commit_index++;
5139 }
5140#endif
2d21ac55 5141 if (upl->flags & UPL_DEVICE_MEMORY)
1c79356b 5142 xfer_size = 0;
2d21ac55
A
5143 else if ((offset + size) <= upl->size)
5144 xfer_size = size;
b0d623f7
A
5145 else {
5146 if(!isVectorUPL)
5147 upl_unlock(upl);
5148 else {
5149 upl_unlock(vector_upl);
5150 }
55e303ae 5151
b0d623f7
A
5152 return KERN_FAILURE;
5153 }
2d21ac55 5154 if (upl->flags & UPL_INTERNAL) {
55e303ae 5155 lite_list = (wpl_array_t)
91447636 5156 ((((uintptr_t)upl) + sizeof(struct upl))
55e303ae
A
5157 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
5158 } else {
5159 lite_list = (wpl_array_t)
91447636 5160 (((uintptr_t)upl) + sizeof(struct upl));
55e303ae 5161 }
2d21ac55
A
5162 object = upl->map_object;
5163
5164 if (upl->flags & UPL_SHADOWED) {
5165 vm_object_lock(object);
5166 shadow_object = object->shadow;
5167 } else
5168 shadow_object = object;
5169
1c79356b
A
5170 entry = offset/PAGE_SIZE;
5171 target_offset = (vm_object_offset_t)offset;
2d21ac55 5172
b0d623f7
A
5173 if (upl->flags & UPL_KERNEL_OBJECT)
5174 vm_object_lock_shared(shadow_object);
5175 else
5176 vm_object_lock(shadow_object);
4a3eedf9 5177
b0d623f7
A
5178 if (upl->flags & UPL_ACCESS_BLOCKED) {
5179 assert(shadow_object->blocked_access);
5180 shadow_object->blocked_access = FALSE;
5181 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
4a3eedf9 5182 }
b0d623f7
A
5183
5184 dwp = &dw_array[0];
5185 dw_count = 0;
6d2010ae 5186 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
b0d623f7
A
5187
5188 if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT))
5189 panic("upl_abort_range: kernel_object being DUMPED");
4a3eedf9 5190
2d21ac55
A
5191 while (xfer_size) {
5192 vm_page_t t, m;
5193
b0d623f7
A
5194 dwp->dw_mask = 0;
5195
55e303ae 5196 m = VM_PAGE_NULL;
2d21ac55
A
5197
5198 if (upl->flags & UPL_LITE) {
b0d623f7
A
5199 unsigned int pg_num;
5200
5201 pg_num = (unsigned int) (target_offset/PAGE_SIZE);
5202 assert(pg_num == target_offset/PAGE_SIZE);
5203
2d21ac55
A
5204
5205 if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
55e303ae 5206 lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
2d21ac55 5207
b0d623f7
A
5208 if ( !(upl->flags & UPL_KERNEL_OBJECT))
5209 m = vm_page_lookup(shadow_object, target_offset +
5210 (upl->offset - shadow_object->paging_offset));
55e303ae
A
5211 }
5212 }
2d21ac55
A
5213 if (upl->flags & UPL_SHADOWED) {
5214 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
5215 t->pageout = FALSE;
5216
b0d623f7 5217 VM_PAGE_FREE(t);
2d21ac55
A
5218
5219 if (m == VM_PAGE_NULL)
6d2010ae 5220 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
55e303ae
A
5221 }
5222 }
b0d623f7
A
5223 if ((upl->flags & UPL_KERNEL_OBJECT))
5224 goto abort_next_page;
5225
2d21ac55
A
5226 if (m != VM_PAGE_NULL) {
5227
5228 if (m->absent) {
91447636
A
5229 boolean_t must_free = TRUE;
5230
2d21ac55
A
5231 /*
5232 * COPYOUT = FALSE case
5233 * check for error conditions which must
5234 * be passed back to the pages customer
5235 */
5236 if (error & UPL_ABORT_RESTART) {
1c79356b
A
5237 m->restart = TRUE;
5238 m->absent = FALSE;
2d21ac55 5239 m->unusual = TRUE;
91447636 5240 must_free = FALSE;
2d21ac55 5241 } else if (error & UPL_ABORT_UNAVAILABLE) {
1c79356b
A
5242 m->restart = FALSE;
5243 m->unusual = TRUE;
91447636 5244 must_free = FALSE;
2d21ac55 5245 } else if (error & UPL_ABORT_ERROR) {
1c79356b
A
5246 m->restart = FALSE;
5247 m->absent = FALSE;
1c79356b 5248 m->error = TRUE;
2d21ac55 5249 m->unusual = TRUE;
91447636 5250 must_free = FALSE;
1c79356b 5251 }
6d2010ae
A
5252 if (m->clustered) {
5253 /*
5254 * This page was a part of a speculative
5255 * read-ahead initiated by the kernel
5256 * itself. No one is expecting this
5257 * page and no one will clean up its
5258 * error state if it ever becomes valid
5259 * in the future.
5260 * We have to free it here.
5261 */
5262 must_free = TRUE;
5263 }
91447636
A
5264
5265 /*
5266 * ENCRYPTED SWAP:
5267 * If the page was already encrypted,
5268 * we don't really need to decrypt it
5269 * now. It will get decrypted later,
5270 * on demand, as soon as someone needs
5271 * to access its contents.
5272 */
1c79356b
A
5273
5274 m->cleaning = FALSE;
2d21ac55 5275 m->encrypted_cleaning = FALSE;
6d2010ae
A
5276
5277 if (m->overwriting && !m->busy) {
5278 /*
5279 * this shouldn't happen since
5280 * this is an 'absent' page, but
5281 * it doesn't hurt to check for
5282 * the 'alternate' method of
5283 * stabilizing the page...
5284 * we will mark 'busy' to be cleared
5285 * in the following code which will
5286 * take care of the primary stabilzation
5287 * method (i.e. setting 'busy' to TRUE)
5288 */
5289 dwp->dw_mask |= DW_vm_page_unwire;
5290 }
1c79356b 5291 m->overwriting = FALSE;
b0d623f7
A
5292
5293 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
91447636 5294
2d21ac55 5295 if (must_free == TRUE)
b0d623f7 5296 dwp->dw_mask |= DW_vm_page_free;
2d21ac55 5297 else
b0d623f7 5298 dwp->dw_mask |= DW_vm_page_activate;
2d21ac55
A
5299 } else {
5300 /*
5301 * Handle the trusted pager throttle.
5302 */
5303 if (m->laundry)
b0d623f7 5304 dwp->dw_mask |= DW_vm_pageout_throttle_up;
2d21ac55 5305
6d2010ae
A
5306 if (upl->flags & UPL_ACCESS_BLOCKED) {
5307 /*
5308 * We blocked access to the pages in this UPL.
5309 * Clear the "busy" bit and wake up any waiter
5310 * for this page.
5311 */
5312 dwp->dw_mask |= DW_clear_busy;
5313 }
2d21ac55
A
5314 if (m->pageout) {
5315 assert(m->busy);
5316 assert(m->wire_count == 1);
5317 m->pageout = FALSE;
b0d623f7 5318
6d2010ae
A
5319 dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy);
5320 }
5321 if (m->overwriting) {
5322 if (m->busy)
5323 dwp->dw_mask |= DW_clear_busy;
5324 else {
5325 /*
5326 * deal with the 'alternate' method
5327 * of stabilizing the page...
5328 * we will either free the page
5329 * or mark 'busy' to be cleared
5330 * in the following code which will
5331 * take care of the primary stabilzation
5332 * method (i.e. setting 'busy' to TRUE)
5333 */
5334 dwp->dw_mask |= DW_vm_page_unwire;
5335 }
5336 m->overwriting = FALSE;
5337 }
5338 if (m->encrypted_cleaning == TRUE) {
5339 m->encrypted_cleaning = FALSE;
5340
5341 dwp->dw_mask |= DW_clear_busy;
1c79356b 5342 }
2d21ac55
A
5343 m->dump_cleaning = FALSE;
5344 m->cleaning = FALSE;
1c79356b 5345#if MACH_PAGEMAP
2d21ac55 5346 vm_external_state_clr(m->object->existence_map, m->offset);
1c79356b 5347#endif /* MACH_PAGEMAP */
2d21ac55
A
5348 if (error & UPL_ABORT_DUMP_PAGES) {
5349 pmap_disconnect(m->phys_page);
b0d623f7
A
5350
5351 dwp->dw_mask |= DW_vm_page_free;
2d21ac55
A
5352 } else {
5353 if (error & UPL_ABORT_REFERENCE) {
5354 /*
5355 * we've been told to explictly
5356 * reference this page... for
5357 * file I/O, this is done by
5358 * implementing an LRU on the inactive q
5359 */
b0d623f7 5360 dwp->dw_mask |= DW_vm_page_lru;
2d21ac55 5361 }
6d2010ae 5362 dwp->dw_mask |= DW_PAGE_WAKEUP;
2d21ac55 5363 }
1c79356b 5364 }
2d21ac55 5365 }
b0d623f7 5366abort_next_page:
55e303ae
A
5367 target_offset += PAGE_SIZE_64;
5368 xfer_size -= PAGE_SIZE;
5369 entry++;
b0d623f7
A
5370
5371 if (dwp->dw_mask) {
5372 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
6d2010ae 5373 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
b0d623f7 5374
6d2010ae
A
5375 if (dw_count >= dw_limit) {
5376 vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
b0d623f7
A
5377
5378 dwp = &dw_array[0];
5379 dw_count = 0;
5380 }
5381 } else {
5382 if (dwp->dw_mask & DW_clear_busy)
5383 m->busy = FALSE;
5384
5385 if (dwp->dw_mask & DW_PAGE_WAKEUP)
5386 PAGE_WAKEUP(m);
5387 }
5388 }
d7e50217 5389 }
b0d623f7 5390 if (dw_count)
6d2010ae 5391 vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
2d21ac55 5392
55e303ae 5393 occupied = 1;
2d21ac55 5394
55e303ae
A
5395 if (upl->flags & UPL_DEVICE_MEMORY) {
5396 occupied = 0;
5397 } else if (upl->flags & UPL_LITE) {
5398 int pg_num;
5399 int i;
2d21ac55 5400
55e303ae
A
5401 pg_num = upl->size/PAGE_SIZE;
5402 pg_num = (pg_num + 31) >> 5;
5403 occupied = 0;
2d21ac55
A
5404
5405 for (i = 0; i < pg_num; i++) {
5406 if (lite_list[i] != 0) {
55e303ae
A
5407 occupied = 1;
5408 break;
5409 }
5410 }
5411 } else {
2d21ac55 5412 if (queue_empty(&upl->map_object->memq))
55e303ae 5413 occupied = 0;
55e303ae 5414 }
2d21ac55 5415 if (occupied == 0) {
b0d623f7
A
5416 /*
5417 * If this UPL element belongs to a Vector UPL and is
5418 * empty, then this is the right function to deallocate
5419 * it. So go ahead set the *empty variable. The flag
5420 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
5421 * should be considered relevant for the Vector UPL and
5422 * not the internal UPLs.
5423 */
5424 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
0b4e3aa0 5425 *empty = TRUE;
2d21ac55 5426
b0d623f7 5427 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
2d21ac55
A
5428 /*
5429 * this is not a paging object
5430 * so we need to drop the paging reference
5431 * that was taken when we created the UPL
5432 * against this object
5433 */
b0d623f7 5434 vm_object_activity_end(shadow_object);
2d21ac55
A
5435 } else {
5436 /*
5437 * we dontated the paging reference to
5438 * the map object... vm_pageout_object_terminate
5439 * will drop this reference
5440 */
5441 }
1c79356b 5442 }
55e303ae 5443 vm_object_unlock(shadow_object);
91447636
A
5444 if (object != shadow_object)
5445 vm_object_unlock(object);
b0d623f7
A
5446
5447 if(!isVectorUPL)
5448 upl_unlock(upl);
5449 else {
5450 /*
5451 * If we completed our operations on an UPL that is
5452 * part of a Vectored UPL and if empty is TRUE, then
5453 * we should go ahead and deallocate this UPL element.
5454 * Then we check if this was the last of the UPL elements
5455 * within that Vectored UPL. If so, set empty to TRUE
5456 * so that in ubc_upl_abort_range or ubc_upl_abort, we
5457 * can go ahead and deallocate the Vector UPL too.
5458 */
5459 if(*empty == TRUE) {
5460 *empty = vector_upl_set_subupl(vector_upl, upl,0);
5461 upl_deallocate(upl);
5462 }
5463 goto process_upl_to_abort;
5464 }
55e303ae 5465
1c79356b
A
5466 return KERN_SUCCESS;
5467}
5468
2d21ac55 5469
1c79356b 5470kern_return_t
0b4e3aa0 5471upl_abort(
1c79356b
A
5472 upl_t upl,
5473 int error)
2d21ac55
A
5474{
5475 boolean_t empty;
5476
5477 return upl_abort_range(upl, 0, upl->size, error, &empty);
1c79356b
A
5478}
5479
55e303ae 5480
2d21ac55
A
5481/* an option on commit should be wire */
5482kern_return_t
5483upl_commit(
5484 upl_t upl,
5485 upl_page_info_t *page_list,
5486 mach_msg_type_number_t count)
5487{
5488 boolean_t empty;
5489
5490 return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
5491}
5492
55e303ae 5493
b0d623f7
A
5494unsigned int vm_object_iopl_request_sleep_for_cleaning = 0;
5495
55e303ae
A
5496kern_return_t
5497vm_object_iopl_request(
5498 vm_object_t object,
5499 vm_object_offset_t offset,
91447636 5500 upl_size_t size,
55e303ae
A
5501 upl_t *upl_ptr,
5502 upl_page_info_array_t user_page_list,
5503 unsigned int *page_list_count,
5504 int cntrl_flags)
5505{
5506 vm_page_t dst_page;
2d21ac55
A
5507 vm_object_offset_t dst_offset;
5508 upl_size_t xfer_size;
55e303ae 5509 upl_t upl = NULL;
91447636
A
5510 unsigned int entry;
5511 wpl_array_t lite_list = NULL;
91447636 5512 int no_zero_fill = FALSE;
6d2010ae 5513 unsigned int size_in_pages;
2d21ac55 5514 u_int32_t psize;
55e303ae
A
5515 kern_return_t ret;
5516 vm_prot_t prot;
2d21ac55 5517 struct vm_object_fault_info fault_info;
6d2010ae
A
5518 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
5519 struct vm_page_delayed_work *dwp;
b0d623f7 5520 int dw_count;
6d2010ae 5521 int dw_limit;
b0d623f7 5522 int dw_index;
55e303ae 5523
91447636
A
5524 if (cntrl_flags & ~UPL_VALID_FLAGS) {
5525 /*
5526 * For forward compatibility's sake,
5527 * reject any unknown flag.
5528 */
5529 return KERN_INVALID_VALUE;
5530 }
0b4c1975 5531 if (vm_lopage_needed == FALSE)
0c530ab8
A
5532 cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
5533
5534 if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
5535 if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE))
5536 return KERN_INVALID_VALUE;
5537
5538 if (object->phys_contiguous) {
6d2010ae 5539 if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
0c530ab8 5540 return KERN_INVALID_ADDRESS;
2d21ac55 5541
6d2010ae 5542 if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
0c530ab8
A
5543 return KERN_INVALID_ADDRESS;
5544 }
5545 }
91447636
A
5546
5547 if (cntrl_flags & UPL_ENCRYPT) {
5548 /*
5549 * ENCRYPTED SWAP:
5550 * The paging path doesn't use this interface,
5551 * so we don't support the UPL_ENCRYPT flag
5552 * here. We won't encrypt the pages.
5553 */
5554 assert(! (cntrl_flags & UPL_ENCRYPT));
5555 }
91447636
A
5556 if (cntrl_flags & UPL_NOZEROFILL)
5557 no_zero_fill = TRUE;
5558
5559 if (cntrl_flags & UPL_COPYOUT_FROM)
55e303ae 5560 prot = VM_PROT_READ;
91447636 5561 else
55e303ae 5562 prot = VM_PROT_READ | VM_PROT_WRITE;
55e303ae 5563
b0d623f7
A
5564 if (((size/PAGE_SIZE) > MAX_UPL_SIZE) && !object->phys_contiguous)
5565 size = MAX_UPL_SIZE * PAGE_SIZE;
55e303ae 5566
2d21ac55
A
5567 if (cntrl_flags & UPL_SET_INTERNAL) {
5568 if (page_list_count != NULL)
cf7d32b8 5569 *page_list_count = MAX_UPL_SIZE;
2d21ac55
A
5570 }
5571 if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
5572 ((page_list_count != NULL) && (*page_list_count != 0) && *page_list_count < (size/page_size)))
5573 return KERN_INVALID_ARGUMENT;
55e303ae 5574
2d21ac55
A
5575 if ((!object->internal) && (object->paging_offset != 0))
5576 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
5577
5578
5579 if (object->phys_contiguous)
5580 psize = PAGE_SIZE;
5581 else
5582 psize = size;
5583
5584 if (cntrl_flags & UPL_SET_INTERNAL) {
5585 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, UPL_IO_WIRE, psize);
5586
5587 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
5588 lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
5589 ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
b0d623f7
A
5590 if (size == 0) {
5591 user_page_list = NULL;
5592 lite_list = NULL;
5593 }
2d21ac55
A
5594 } else {
5595 upl = upl_create(UPL_CREATE_LITE, UPL_IO_WIRE, psize);
55e303ae 5596
2d21ac55 5597 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
b0d623f7
A
5598 if (size == 0) {
5599 lite_list = NULL;
5600 }
55e303ae 5601 }
2d21ac55
A
5602 if (user_page_list)
5603 user_page_list[0].device = FALSE;
5604 *upl_ptr = upl;
55e303ae 5605
2d21ac55
A
5606 upl->map_object = object;
5607 upl->size = size;
5608
6d2010ae
A
5609 size_in_pages = size / PAGE_SIZE;
5610
b0d623f7
A
5611 if (object == kernel_object &&
5612 !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
5613 upl->flags |= UPL_KERNEL_OBJECT;
5614#if UPL_DEBUG
5615 vm_object_lock(object);
5616#else
5617 vm_object_lock_shared(object);
5618#endif
5619 } else {
5620 vm_object_lock(object);
5621 vm_object_activity_begin(object);
5622 }
2d21ac55
A
5623 /*
5624 * paging in progress also protects the paging_offset
5625 */
5626 upl->offset = offset + object->paging_offset;
55e303ae 5627
b0d623f7
A
5628 if (cntrl_flags & UPL_BLOCK_ACCESS) {
5629 /*
5630 * The user requested that access to the pages in this URL
5631 * be blocked until the UPL is commited or aborted.
5632 */
5633 upl->flags |= UPL_ACCESS_BLOCKED;
5634 }
5635
2d21ac55 5636 if (object->phys_contiguous) {
b0d623f7 5637#if UPL_DEBUG
2d21ac55
A
5638 queue_enter(&object->uplq, upl, upl_t, uplq);
5639#endif /* UPL_DEBUG */
55e303ae 5640
b0d623f7
A
5641 if (upl->flags & UPL_ACCESS_BLOCKED) {
5642 assert(!object->blocked_access);
5643 object->blocked_access = TRUE;
5644 }
5645
2d21ac55 5646 vm_object_unlock(object);
55e303ae 5647
2d21ac55
A
5648 /*
5649 * don't need any shadow mappings for this one
5650 * since it is already I/O memory
5651 */
5652 upl->flags |= UPL_DEVICE_MEMORY;
55e303ae 5653
6d2010ae 5654 upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1)>>PAGE_SHIFT);
2d21ac55
A
5655
5656 if (user_page_list) {
6d2010ae 5657 user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset)>>PAGE_SHIFT);
2d21ac55 5658 user_page_list[0].device = TRUE;
55e303ae 5659 }
2d21ac55
A
5660 if (page_list_count != NULL) {
5661 if (upl->flags & UPL_INTERNAL)
5662 *page_list_count = 0;
5663 else
5664 *page_list_count = 1;
55e303ae 5665 }
2d21ac55 5666 return KERN_SUCCESS;
55e303ae 5667 }
b0d623f7
A
5668 if (object != kernel_object) {
5669 /*
5670 * Protect user space from future COW operations
5671 */
5672 object->true_share = TRUE;
55e303ae 5673
b0d623f7
A
5674 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
5675 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
5676 }
6d2010ae 5677
b0d623f7 5678#if UPL_DEBUG
2d21ac55 5679 queue_enter(&object->uplq, upl, upl_t, uplq);
91447636 5680#endif /* UPL_DEBUG */
91447636 5681
b0d623f7
A
5682 if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
5683 object->copy != VM_OBJECT_NULL) {
91447636 5684 /*
b0d623f7
A
5685 * Honor copy-on-write obligations
5686 *
5687 * The caller is gathering these pages and
5688 * might modify their contents. We need to
5689 * make sure that the copy object has its own
5690 * private copies of these pages before we let
5691 * the caller modify them.
5692 *
5693 * NOTE: someone else could map the original object
5694 * after we've done this copy-on-write here, and they
5695 * could then see an inconsistent picture of the memory
5696 * while it's being modified via the UPL. To prevent this,
5697 * we would have to block access to these pages until the
5698 * UPL is released. We could use the UPL_BLOCK_ACCESS
5699 * code path for that...
91447636 5700 */
b0d623f7
A
5701 vm_object_update(object,
5702 offset,
5703 size,
5704 NULL,
5705 NULL,
5706 FALSE, /* should_return */
5707 MEMORY_OBJECT_COPY_SYNC,
5708 VM_PROT_NO_CHANGE);
5709#if DEVELOPMENT || DEBUG
5710 iopl_cow++;
5711 iopl_cow_pages += size >> PAGE_SHIFT;
5712#endif
55e303ae 5713 }
b0d623f7
A
5714
5715
55e303ae 5716 entry = 0;
2d21ac55
A
5717
5718 xfer_size = size;
5719 dst_offset = offset;
5720
5721 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
5722 fault_info.user_tag = 0;
5723 fault_info.lo_offset = offset;
5724 fault_info.hi_offset = offset + xfer_size;
5725 fault_info.no_cache = FALSE;
b0d623f7 5726 fault_info.stealth = FALSE;
6d2010ae
A
5727 fault_info.io_sync = FALSE;
5728 fault_info.cs_bypass = FALSE;
0b4c1975 5729 fault_info.mark_zf_absent = TRUE;
b0d623f7
A
5730
5731 dwp = &dw_array[0];
5732 dw_count = 0;
6d2010ae 5733 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
2d21ac55 5734
55e303ae 5735 while (xfer_size) {
2d21ac55 5736 vm_fault_return_t result;
b0d623f7
A
5737 unsigned int pg_num;
5738
5739 dwp->dw_mask = 0;
2d21ac55 5740
55e303ae
A
5741 dst_page = vm_page_lookup(object, dst_offset);
5742
91447636
A
5743 /*
5744 * ENCRYPTED SWAP:
5745 * If the page is encrypted, we need to decrypt it,
5746 * so force a soft page fault.
5747 */
b0d623f7
A
5748 if (dst_page == VM_PAGE_NULL ||
5749 dst_page->busy ||
5750 dst_page->encrypted ||
5751 dst_page->error ||
5752 dst_page->restart ||
5753 dst_page->absent ||
5754 dst_page->fictitious) {
5755
5756 if (object == kernel_object)
5757 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
2d21ac55 5758
55e303ae
A
5759 do {
5760 vm_page_t top_page;
5761 kern_return_t error_code;
5762 int interruptible;
5763
2d21ac55 5764 if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
55e303ae 5765 interruptible = THREAD_ABORTSAFE;
2d21ac55 5766 else
55e303ae 5767 interruptible = THREAD_UNINT;
2d21ac55
A
5768
5769 fault_info.interruptible = interruptible;
5770 fault_info.cluster_size = xfer_size;
55e303ae 5771
b0d623f7
A
5772 vm_object_paging_begin(object);
5773
55e303ae 5774 result = vm_fault_page(object, dst_offset,
2d21ac55
A
5775 prot | VM_PROT_WRITE, FALSE,
5776 &prot, &dst_page, &top_page,
5777 (int *)0,
5778 &error_code, no_zero_fill,
5779 FALSE, &fault_info);
5780
5781 switch (result) {
5782
55e303ae
A
5783 case VM_FAULT_SUCCESS:
5784
d41d1dae
A
5785 if ( !dst_page->absent) {
5786 PAGE_WAKEUP_DONE(dst_page);
5787 } else {
5788 /*
5789 * we only get back an absent page if we
5790 * requested that it not be zero-filled
5791 * because we are about to fill it via I/O
5792 *
5793 * absent pages should be left BUSY
5794 * to prevent them from being faulted
5795 * into an address space before we've
5796 * had a chance to complete the I/O on
5797 * them since they may contain info that
5798 * shouldn't be seen by the faulting task
5799 */
5800 }
55e303ae
A
5801 /*
5802 * Release paging references and
5803 * top-level placeholder page, if any.
5804 */
2d21ac55 5805 if (top_page != VM_PAGE_NULL) {
55e303ae 5806 vm_object_t local_object;
2d21ac55
A
5807
5808 local_object = top_page->object;
5809
5810 if (top_page->object != dst_page->object) {
5811 vm_object_lock(local_object);
55e303ae 5812 VM_PAGE_FREE(top_page);
2d21ac55
A
5813 vm_object_paging_end(local_object);
5814 vm_object_unlock(local_object);
55e303ae
A
5815 } else {
5816 VM_PAGE_FREE(top_page);
2d21ac55 5817 vm_object_paging_end(local_object);
55e303ae
A
5818 }
5819 }
b0d623f7 5820 vm_object_paging_end(object);
55e303ae
A
5821 break;
5822
55e303ae
A
5823 case VM_FAULT_RETRY:
5824 vm_object_lock(object);
55e303ae
A
5825 break;
5826
6d2010ae
A
5827 case VM_FAULT_MEMORY_SHORTAGE:
5828 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
2d21ac55 5829
6d2010ae 5830 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
55e303ae 5831
55e303ae 5832 if (vm_page_wait(interruptible)) {
6d2010ae
A
5833 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
5834
5835 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
55e303ae 5836 vm_object_lock(object);
6d2010ae 5837
55e303ae
A
5838 break;
5839 }
6d2010ae
A
5840 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
5841
5842 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
5843
55e303ae
A
5844 /* fall thru */
5845
5846 case VM_FAULT_INTERRUPTED:
5847 error_code = MACH_SEND_INTERRUPTED;
5848 case VM_FAULT_MEMORY_ERROR:
b0d623f7 5849 memory_error:
2d21ac55 5850 ret = (error_code ? error_code: KERN_MEMORY_ERROR);
0c530ab8 5851
2d21ac55 5852 vm_object_lock(object);
0c530ab8 5853 goto return_err;
b0d623f7
A
5854
5855 case VM_FAULT_SUCCESS_NO_VM_PAGE:
5856 /* success but no page: fail */
5857 vm_object_paging_end(object);
5858 vm_object_unlock(object);
5859 goto memory_error;
5860
5861 default:
5862 panic("vm_object_iopl_request: unexpected error"
5863 " 0x%x from vm_fault_page()\n", result);
55e303ae 5864 }
2d21ac55 5865 } while (result != VM_FAULT_SUCCESS);
b0d623f7 5866
55e303ae 5867 }
b0d623f7
A
5868 if (upl->flags & UPL_KERNEL_OBJECT)
5869 goto record_phys_addr;
5870
5871 if (dst_page->cleaning) {
5872 /*
5873 * Someone else is cleaning this page in place.as
5874 * In theory, we should be able to proceed and use this
5875 * page but they'll probably end up clearing the "busy"
5876 * bit on it in upl_commit_range() but they didn't set
5877 * it, so they would clear our "busy" bit and open
5878 * us to race conditions.
5879 * We'd better wait for the cleaning to complete and
5880 * then try again.
5881 */
5882 vm_object_iopl_request_sleep_for_cleaning++;
5883 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
5884 continue;
5885 }
0c530ab8
A
5886 if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
5887 dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
5888 vm_page_t low_page;
5889 int refmod;
5890
5891 /*
5892 * support devices that can't DMA above 32 bits
5893 * by substituting pages from a pool of low address
5894 * memory for any pages we find above the 4G mark
5895 * can't substitute if the page is already wired because
5896 * we don't know whether that physical address has been
5897 * handed out to some other 64 bit capable DMA device to use
5898 */
b0d623f7 5899 if (VM_PAGE_WIRED(dst_page)) {
0c530ab8
A
5900 ret = KERN_PROTECTION_FAILURE;
5901 goto return_err;
5902 }
0c530ab8
A
5903 low_page = vm_page_grablo();
5904
5905 if (low_page == VM_PAGE_NULL) {
5906 ret = KERN_RESOURCE_SHORTAGE;
5907 goto return_err;
5908 }
5909 /*
5910 * from here until the vm_page_replace completes
5911 * we musn't drop the object lock... we don't
5912 * want anyone refaulting this page in and using
5913 * it after we disconnect it... we want the fault
5914 * to find the new page being substituted.
5915 */
2d21ac55
A
5916 if (dst_page->pmapped)
5917 refmod = pmap_disconnect(dst_page->phys_page);
5918 else
5919 refmod = 0;
d41d1dae 5920
6d2010ae 5921 if (!dst_page->absent)
d41d1dae 5922 vm_page_copy(dst_page, low_page);
2d21ac55 5923
0c530ab8
A
5924 low_page->reference = dst_page->reference;
5925 low_page->dirty = dst_page->dirty;
d41d1dae 5926 low_page->absent = dst_page->absent;
0c530ab8
A
5927
5928 if (refmod & VM_MEM_REFERENCED)
5929 low_page->reference = TRUE;
5930 if (refmod & VM_MEM_MODIFIED)
5931 low_page->dirty = TRUE;
5932
0c530ab8 5933 vm_page_replace(low_page, object, dst_offset);
0c530ab8
A
5934
5935 dst_page = low_page;
5936 /*
5937 * vm_page_grablo returned the page marked
5938 * BUSY... we don't need a PAGE_WAKEUP_DONE
5939 * here, because we've never dropped the object lock
5940 */
d41d1dae
A
5941 if ( !dst_page->absent)
5942 dst_page->busy = FALSE;
0c530ab8 5943 }
d41d1dae
A
5944 if ( !dst_page->busy)
5945 dwp->dw_mask |= DW_vm_page_wire;
55e303ae 5946
91447636
A
5947 if (cntrl_flags & UPL_BLOCK_ACCESS) {
5948 /*
5949 * Mark the page "busy" to block any future page fault
6d2010ae
A
5950 * on this page in addition to wiring it.
5951 * We'll also remove the mapping
91447636
A
5952 * of all these pages before leaving this routine.
5953 */
5954 assert(!dst_page->fictitious);
5955 dst_page->busy = TRUE;
5956 }
2d21ac55
A
5957 /*
5958 * expect the page to be used
5959 * page queues lock must be held to set 'reference'
5960 */
b0d623f7 5961 dwp->dw_mask |= DW_set_reference;
55e303ae 5962
2d21ac55
A
5963 if (!(cntrl_flags & UPL_COPYOUT_FROM))
5964 dst_page->dirty = TRUE;
b0d623f7 5965record_phys_addr:
d41d1dae
A
5966 if (dst_page->busy)
5967 upl->flags |= UPL_HAS_BUSY;
5968
b0d623f7
A
5969 pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
5970 assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
5971 lite_list[pg_num>>5] |= 1 << (pg_num & 31);
55e303ae 5972
2d21ac55
A
5973 if (dst_page->phys_page > upl->highest_page)
5974 upl->highest_page = dst_page->phys_page;
55e303ae 5975
2d21ac55
A
5976 if (user_page_list) {
5977 user_page_list[entry].phys_addr = dst_page->phys_page;
2d21ac55
A
5978 user_page_list[entry].pageout = dst_page->pageout;
5979 user_page_list[entry].absent = dst_page->absent;
593a1d5f 5980 user_page_list[entry].dirty = dst_page->dirty;
2d21ac55 5981 user_page_list[entry].precious = dst_page->precious;
593a1d5f 5982 user_page_list[entry].device = FALSE;
2d21ac55
A
5983 if (dst_page->clustered == TRUE)
5984 user_page_list[entry].speculative = dst_page->speculative;
5985 else
5986 user_page_list[entry].speculative = FALSE;
593a1d5f
A
5987 user_page_list[entry].cs_validated = dst_page->cs_validated;
5988 user_page_list[entry].cs_tainted = dst_page->cs_tainted;
55e303ae 5989 }
b0d623f7
A
5990 if (object != kernel_object) {
5991 /*
5992 * someone is explicitly grabbing this page...
5993 * update clustered and speculative state
5994 *
5995 */
5996 VM_PAGE_CONSUME_CLUSTERED(dst_page);
55e303ae
A
5997 }
5998 entry++;
5999 dst_offset += PAGE_SIZE_64;
6000 xfer_size -= PAGE_SIZE;
b0d623f7
A
6001
6002 if (dwp->dw_mask) {
6d2010ae 6003 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
b0d623f7 6004
6d2010ae
A
6005 if (dw_count >= dw_limit) {
6006 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
b0d623f7
A
6007
6008 dwp = &dw_array[0];
6009 dw_count = 0;
6010 }
6011 }
55e303ae 6012 }
b0d623f7 6013 if (dw_count)
6d2010ae 6014 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
55e303ae 6015
2d21ac55
A
6016 if (page_list_count != NULL) {
6017 if (upl->flags & UPL_INTERNAL)
55e303ae 6018 *page_list_count = 0;
2d21ac55 6019 else if (*page_list_count > entry)
55e303ae
A
6020 *page_list_count = entry;
6021 }
55e303ae 6022 vm_object_unlock(object);
55e303ae 6023
91447636
A
6024 if (cntrl_flags & UPL_BLOCK_ACCESS) {
6025 /*
6026 * We've marked all the pages "busy" so that future
6027 * page faults will block.
6028 * Now remove the mapping for these pages, so that they
6029 * can't be accessed without causing a page fault.
6030 */
6031 vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
6032 PMAP_NULL, 0, VM_PROT_NONE);
b0d623f7
A
6033 assert(!object->blocked_access);
6034 object->blocked_access = TRUE;
91447636 6035 }
91447636 6036 return KERN_SUCCESS;
0c530ab8 6037
0c530ab8 6038return_err:
b0d623f7 6039 dw_index = 0;
0c530ab8
A
6040
6041 for (; offset < dst_offset; offset += PAGE_SIZE) {
0b4c1975
A
6042 boolean_t need_unwire;
6043
0c530ab8
A
6044 dst_page = vm_page_lookup(object, offset);
6045
6046 if (dst_page == VM_PAGE_NULL)
d41d1dae 6047 panic("vm_object_iopl_request: Wired page missing. \n");
2d21ac55 6048
0b4c1975
A
6049 /*
6050 * if we've already processed this page in an earlier
6051 * dw_do_work, we need to undo the wiring... we will
6052 * leave the dirty and reference bits on if they
6053 * were set, since we don't have a good way of knowing
6054 * what the previous state was and we won't get here
6055 * under any normal circumstances... we will always
6056 * clear BUSY and wakeup any waiters via vm_page_free
6057 * or PAGE_WAKEUP_DONE
6058 */
6059 need_unwire = TRUE;
6060
b0d623f7
A
6061 if (dw_count) {
6062 if (dw_array[dw_index].dw_m == dst_page) {
0b4c1975
A
6063 /*
6064 * still in the deferred work list
6065 * which means we haven't yet called
6066 * vm_page_wire on this page
6067 */
6068 need_unwire = FALSE;
d41d1dae
A
6069
6070 dw_index++;
6071 dw_count--;
b0d623f7
A
6072 }
6073 }
0b4c1975
A
6074 vm_page_lock_queues();
6075
d41d1dae
A
6076 if (dst_page->absent) {
6077 vm_page_free(dst_page);
0b4c1975 6078
d41d1dae
A
6079 need_unwire = FALSE;
6080 } else {
6081 if (need_unwire == TRUE)
6082 vm_page_unwire(dst_page, TRUE);
0b4c1975 6083
0b4c1975 6084 PAGE_WAKEUP_DONE(dst_page);
6d2010ae 6085 }
0c530ab8 6086 vm_page_unlock_queues();
2d21ac55 6087
0b4c1975
A
6088 if (need_unwire == TRUE)
6089 VM_STAT_INCR(reactivations);
0c530ab8 6090 }
b0d623f7
A
6091#if UPL_DEBUG
6092 upl->upl_state = 2;
6093#endif
6094 if (! (upl->flags & UPL_KERNEL_OBJECT)) {
6095 vm_object_activity_end(object);
6096 }
0c530ab8
A
6097 vm_object_unlock(object);
6098 upl_destroy(upl);
6099
6100 return ret;
1c79356b
A
6101}
6102
91447636
A
6103kern_return_t
6104upl_transpose(
6105 upl_t upl1,
6106 upl_t upl2)
1c79356b 6107{
91447636
A
6108 kern_return_t retval;
6109 boolean_t upls_locked;
6110 vm_object_t object1, object2;
1c79356b 6111
b0d623f7 6112 if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR)==UPL_VECTOR) || ((upl2->flags & UPL_VECTOR)==UPL_VECTOR)) {
91447636
A
6113 return KERN_INVALID_ARGUMENT;
6114 }
6115
6116 upls_locked = FALSE;
1c79356b 6117
91447636
A
6118 /*
6119 * Since we need to lock both UPLs at the same time,
6120 * avoid deadlocks by always taking locks in the same order.
6121 */
6122 if (upl1 < upl2) {
6123 upl_lock(upl1);
6124 upl_lock(upl2);
6125 } else {
6126 upl_lock(upl2);
6127 upl_lock(upl1);
6128 }
6129 upls_locked = TRUE; /* the UPLs will need to be unlocked */
6130
6131 object1 = upl1->map_object;
6132 object2 = upl2->map_object;
6133
6134 if (upl1->offset != 0 || upl2->offset != 0 ||
6135 upl1->size != upl2->size) {
6136 /*
6137 * We deal only with full objects, not subsets.
6138 * That's because we exchange the entire backing store info
6139 * for the objects: pager, resident pages, etc... We can't do
6140 * only part of it.
6141 */
6142 retval = KERN_INVALID_VALUE;
6143 goto done;
6144 }
6145
6146 /*
6147 * Tranpose the VM objects' backing store.
6148 */
6149 retval = vm_object_transpose(object1, object2,
6150 (vm_object_size_t) upl1->size);
6151
6152 if (retval == KERN_SUCCESS) {
6153 /*
6154 * Make each UPL point to the correct VM object, i.e. the
6155 * object holding the pages that the UPL refers to...
6156 */
b0d623f7 6157#if UPL_DEBUG
2d21ac55
A
6158 queue_remove(&object1->uplq, upl1, upl_t, uplq);
6159 queue_remove(&object2->uplq, upl2, upl_t, uplq);
6160#endif
91447636
A
6161 upl1->map_object = object2;
6162 upl2->map_object = object1;
b0d623f7 6163#if UPL_DEBUG
2d21ac55
A
6164 queue_enter(&object1->uplq, upl2, upl_t, uplq);
6165 queue_enter(&object2->uplq, upl1, upl_t, uplq);
6166#endif
91447636
A
6167 }
6168
6169done:
6170 /*
6171 * Cleanup.
6172 */
6173 if (upls_locked) {
6174 upl_unlock(upl1);
6175 upl_unlock(upl2);
6176 upls_locked = FALSE;
6177 }
6178
6179 return retval;
6180}
6181
6182/*
6183 * ENCRYPTED SWAP:
6184 *
6185 * Rationale: the user might have some encrypted data on disk (via
6186 * FileVault or any other mechanism). That data is then decrypted in
6187 * memory, which is safe as long as the machine is secure. But that
6188 * decrypted data in memory could be paged out to disk by the default
6189 * pager. The data would then be stored on disk in clear (not encrypted)
6190 * and it could be accessed by anyone who gets physical access to the
6191 * disk (if the laptop or the disk gets stolen for example). This weakens
6192 * the security offered by FileVault.
6193 *
6194 * Solution: the default pager will optionally request that all the
6195 * pages it gathers for pageout be encrypted, via the UPL interfaces,
6196 * before it sends this UPL to disk via the vnode_pageout() path.
6197 *
6198 * Notes:
6199 *
6200 * To avoid disrupting the VM LRU algorithms, we want to keep the
6201 * clean-in-place mechanisms, which allow us to send some extra pages to
6202 * swap (clustering) without actually removing them from the user's
6203 * address space. We don't want the user to unknowingly access encrypted
6204 * data, so we have to actually remove the encrypted pages from the page
6205 * table. When the user accesses the data, the hardware will fail to
6206 * locate the virtual page in its page table and will trigger a page
6207 * fault. We can then decrypt the page and enter it in the page table
6208 * again. Whenever we allow the user to access the contents of a page,
6209 * we have to make sure it's not encrypted.
6210 *
6211 *
6212 */
6213/*
6214 * ENCRYPTED SWAP:
6215 * Reserve of virtual addresses in the kernel address space.
6216 * We need to map the physical pages in the kernel, so that we
6217 * can call the encryption/decryption routines with a kernel
6218 * virtual address. We keep this pool of pre-allocated kernel
6219 * virtual addresses so that we don't have to scan the kernel's
6d2010ae 6220 * virtual address space each time we need to encrypt or decrypt
91447636
A
6221 * a physical page.
6222 * It would be nice to be able to encrypt and decrypt in physical
6223 * mode but that might not always be more efficient...
6224 */
6225decl_simple_lock_data(,vm_paging_lock)
6226#define VM_PAGING_NUM_PAGES 64
6227vm_map_offset_t vm_paging_base_address = 0;
6228boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
6229int vm_paging_max_index = 0;
2d21ac55
A
6230int vm_paging_page_waiter = 0;
6231int vm_paging_page_waiter_total = 0;
91447636
A
6232unsigned long vm_paging_no_kernel_page = 0;
6233unsigned long vm_paging_objects_mapped = 0;
6234unsigned long vm_paging_pages_mapped = 0;
6235unsigned long vm_paging_objects_mapped_slow = 0;
6236unsigned long vm_paging_pages_mapped_slow = 0;
6237
2d21ac55
A
6238void
6239vm_paging_map_init(void)
6240{
6241 kern_return_t kr;
6242 vm_map_offset_t page_map_offset;
6243 vm_map_entry_t map_entry;
6244
6245 assert(vm_paging_base_address == 0);
6246
6247 /*
6248 * Initialize our pool of pre-allocated kernel
6249 * virtual addresses.
6250 */
6251 page_map_offset = 0;
6252 kr = vm_map_find_space(kernel_map,
6253 &page_map_offset,
6254 VM_PAGING_NUM_PAGES * PAGE_SIZE,
6255 0,
6256 0,
6257 &map_entry);
6258 if (kr != KERN_SUCCESS) {
6259 panic("vm_paging_map_init: kernel_map full\n");
6260 }
6261 map_entry->object.vm_object = kernel_object;
b0d623f7 6262 map_entry->offset = page_map_offset;
6d2010ae
A
6263 map_entry->protection = VM_PROT_NONE;
6264 map_entry->max_protection = VM_PROT_NONE;
6265 map_entry->permanent = TRUE;
2d21ac55
A
6266 vm_object_reference(kernel_object);
6267 vm_map_unlock(kernel_map);
6268
6269 assert(vm_paging_base_address == 0);
6270 vm_paging_base_address = page_map_offset;
6271}
6272
91447636
A
6273/*
6274 * ENCRYPTED SWAP:
6275 * vm_paging_map_object:
6276 * Maps part of a VM object's pages in the kernel
6277 * virtual address space, using the pre-allocated
6278 * kernel virtual addresses, if possible.
6279 * Context:
6280 * The VM object is locked. This lock will get
2d21ac55
A
6281 * dropped and re-acquired though, so the caller
6282 * must make sure the VM object is kept alive
6283 * (by holding a VM map that has a reference
6284 * on it, for example, or taking an extra reference).
6285 * The page should also be kept busy to prevent
6286 * it from being reclaimed.
91447636
A
6287 */
6288kern_return_t
6289vm_paging_map_object(
6290 vm_map_offset_t *address,
6291 vm_page_t page,
6292 vm_object_t object,
6293 vm_object_offset_t offset,
2d21ac55 6294 vm_map_size_t *size,
593a1d5f 6295 vm_prot_t protection,
2d21ac55 6296 boolean_t can_unlock_object)
91447636
A
6297{
6298 kern_return_t kr;
6299 vm_map_offset_t page_map_offset;
6300 vm_map_size_t map_size;
6301 vm_object_offset_t object_offset;
91447636 6302 int i;
91447636 6303
593a1d5f 6304
91447636 6305 if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
2d21ac55 6306 assert(page->busy);
91447636 6307 /*
91447636
A
6308 * Use one of the pre-allocated kernel virtual addresses
6309 * and just enter the VM page in the kernel address space
6310 * at that virtual address.
6311 */
91447636
A
6312 simple_lock(&vm_paging_lock);
6313
91447636
A
6314 /*
6315 * Try and find an available kernel virtual address
6316 * from our pre-allocated pool.
6317 */
6318 page_map_offset = 0;
2d21ac55
A
6319 for (;;) {
6320 for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
6321 if (vm_paging_page_inuse[i] == FALSE) {
6322 page_map_offset =
6323 vm_paging_base_address +
6324 (i * PAGE_SIZE);
6325 break;
6326 }
6327 }
6328 if (page_map_offset != 0) {
6329 /* found a space to map our page ! */
6330 break;
6331 }
6332
6333 if (can_unlock_object) {
6334 /*
6335 * If we can afford to unlock the VM object,
6336 * let's take the slow path now...
6337 */
91447636
A
6338 break;
6339 }
2d21ac55
A
6340 /*
6341 * We can't afford to unlock the VM object, so
6342 * let's wait for a space to become available...
6343 */
6344 vm_paging_page_waiter_total++;
6345 vm_paging_page_waiter++;
6346 thread_sleep_fast_usimple_lock(&vm_paging_page_waiter,
6347 &vm_paging_lock,
6348 THREAD_UNINT);
6349 vm_paging_page_waiter--;
6350 /* ... and try again */
91447636
A
6351 }
6352
6353 if (page_map_offset != 0) {
6354 /*
6355 * We found a kernel virtual address;
6356 * map the physical page to that virtual address.
6357 */
6358 if (i > vm_paging_max_index) {
6359 vm_paging_max_index = i;
6360 }
6361 vm_paging_page_inuse[i] = TRUE;
6362 simple_unlock(&vm_paging_lock);
2d21ac55 6363
2d21ac55
A
6364 page->pmapped = TRUE;
6365
6366 /*
6367 * Keep the VM object locked over the PMAP_ENTER
6368 * and the actual use of the page by the kernel,
6369 * or this pmap mapping might get undone by a
6370 * vm_object_pmap_protect() call...
6371 */
0c530ab8
A
6372 PMAP_ENTER(kernel_pmap,
6373 page_map_offset,
6374 page,
593a1d5f 6375 protection,
6d2010ae 6376 0,
0c530ab8 6377 TRUE);
91447636
A
6378 vm_paging_objects_mapped++;
6379 vm_paging_pages_mapped++;
6380 *address = page_map_offset;
91447636
A
6381
6382 /* all done and mapped, ready to use ! */
6383 return KERN_SUCCESS;
6384 }
6385
6386 /*
6387 * We ran out of pre-allocated kernel virtual
6388 * addresses. Just map the page in the kernel
6389 * the slow and regular way.
6390 */
6391 vm_paging_no_kernel_page++;
6392 simple_unlock(&vm_paging_lock);
2d21ac55
A
6393 }
6394
6395 if (! can_unlock_object) {
6396 return KERN_NOT_SUPPORTED;
91447636 6397 }
91447636
A
6398
6399 object_offset = vm_object_trunc_page(offset);
6400 map_size = vm_map_round_page(*size);
6401
6402 /*
6403 * Try and map the required range of the object
6404 * in the kernel_map
6405 */
6406
91447636
A
6407 vm_object_reference_locked(object); /* for the map entry */
6408 vm_object_unlock(object);
6409
6410 kr = vm_map_enter(kernel_map,
6411 address,
6412 map_size,
6413 0,
6414 VM_FLAGS_ANYWHERE,
6415 object,
6416 object_offset,
6417 FALSE,
593a1d5f 6418 protection,
91447636
A
6419 VM_PROT_ALL,
6420 VM_INHERIT_NONE);
6421 if (kr != KERN_SUCCESS) {
6422 *address = 0;
6423 *size = 0;
6424 vm_object_deallocate(object); /* for the map entry */
2d21ac55 6425 vm_object_lock(object);
91447636
A
6426 return kr;
6427 }
6428
6429 *size = map_size;
6430
6431 /*
6432 * Enter the mapped pages in the page table now.
6433 */
6434 vm_object_lock(object);
2d21ac55
A
6435 /*
6436 * VM object must be kept locked from before PMAP_ENTER()
6437 * until after the kernel is done accessing the page(s).
6438 * Otherwise, the pmap mappings in the kernel could be
6439 * undone by a call to vm_object_pmap_protect().
6440 */
6441
91447636
A
6442 for (page_map_offset = 0;
6443 map_size != 0;
6444 map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
91447636
A
6445
6446 page = vm_page_lookup(object, offset + page_map_offset);
6447 if (page == VM_PAGE_NULL) {
2d21ac55
A
6448 printf("vm_paging_map_object: no page !?");
6449 vm_object_unlock(object);
6450 kr = vm_map_remove(kernel_map, *address, *size,
6451 VM_MAP_NO_FLAGS);
6452 assert(kr == KERN_SUCCESS);
6453 *address = 0;
6454 *size = 0;
6455 vm_object_lock(object);
6456 return KERN_MEMORY_ERROR;
91447636 6457 }
2d21ac55 6458 page->pmapped = TRUE;
91447636 6459
2d21ac55 6460 //assert(pmap_verify_free(page->phys_page));
91447636
A
6461 PMAP_ENTER(kernel_pmap,
6462 *address + page_map_offset,
6463 page,
593a1d5f 6464 protection,
6d2010ae 6465 0,
0c530ab8 6466 TRUE);
91447636
A
6467 }
6468
6469 vm_paging_objects_mapped_slow++;
b0d623f7 6470 vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
91447636
A
6471
6472 return KERN_SUCCESS;
6473}
6474
6475/*
6476 * ENCRYPTED SWAP:
6477 * vm_paging_unmap_object:
6478 * Unmaps part of a VM object's pages from the kernel
6479 * virtual address space.
6480 * Context:
6481 * The VM object is locked. This lock will get
6482 * dropped and re-acquired though.
6483 */
6484void
6485vm_paging_unmap_object(
6486 vm_object_t object,
6487 vm_map_offset_t start,
6488 vm_map_offset_t end)
6489{
6490 kern_return_t kr;
91447636 6491 int i;
91447636 6492
0c530ab8 6493 if ((vm_paging_base_address == 0) ||
8f6c56a5
A
6494 (start < vm_paging_base_address) ||
6495 (end > (vm_paging_base_address
2d21ac55 6496 + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
91447636
A
6497 /*
6498 * We didn't use our pre-allocated pool of
6499 * kernel virtual address. Deallocate the
6500 * virtual memory.
6501 */
6502 if (object != VM_OBJECT_NULL) {
6503 vm_object_unlock(object);
6504 }
6505 kr = vm_map_remove(kernel_map, start, end, VM_MAP_NO_FLAGS);
6506 if (object != VM_OBJECT_NULL) {
6507 vm_object_lock(object);
6508 }
6509 assert(kr == KERN_SUCCESS);
6510 } else {
6511 /*
6512 * We used a kernel virtual address from our
6513 * pre-allocated pool. Put it back in the pool
6514 * for next time.
6515 */
91447636 6516 assert(end - start == PAGE_SIZE);
b0d623f7
A
6517 i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
6518 assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
91447636
A
6519
6520 /* undo the pmap mapping */
0c530ab8 6521 pmap_remove(kernel_pmap, start, end);
91447636
A
6522
6523 simple_lock(&vm_paging_lock);
6524 vm_paging_page_inuse[i] = FALSE;
2d21ac55
A
6525 if (vm_paging_page_waiter) {
6526 thread_wakeup(&vm_paging_page_waiter);
6527 }
91447636 6528 simple_unlock(&vm_paging_lock);
91447636
A
6529 }
6530}
6531
2d21ac55 6532#if CRYPTO
91447636
A
6533/*
6534 * Encryption data.
6535 * "iv" is the "initial vector". Ideally, we want to
6536 * have a different one for each page we encrypt, so that
6537 * crackers can't find encryption patterns too easily.
6538 */
6539#define SWAP_CRYPT_AES_KEY_SIZE 128 /* XXX 192 and 256 don't work ! */
6540boolean_t swap_crypt_ctx_initialized = FALSE;
6541aes_32t swap_crypt_key[8]; /* big enough for a 256 key */
6542aes_ctx swap_crypt_ctx;
6543const unsigned char swap_crypt_null_iv[AES_BLOCK_SIZE] = {0xa, };
6544
6545#if DEBUG
6546boolean_t swap_crypt_ctx_tested = FALSE;
6547unsigned char swap_crypt_test_page_ref[4096] __attribute__((aligned(4096)));
6548unsigned char swap_crypt_test_page_encrypt[4096] __attribute__((aligned(4096)));
6549unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096)));
6550#endif /* DEBUG */
6551
91447636
A
6552/*
6553 * Initialize the encryption context: key and key size.
6554 */
6555void swap_crypt_ctx_initialize(void); /* forward */
6556void
6557swap_crypt_ctx_initialize(void)
6558{
6559 unsigned int i;
6560
6561 /*
6562 * No need for locking to protect swap_crypt_ctx_initialized
6563 * because the first use of encryption will come from the
6564 * pageout thread (we won't pagein before there's been a pageout)
6565 * and there's only one pageout thread.
6566 */
6567 if (swap_crypt_ctx_initialized == FALSE) {
6568 for (i = 0;
6569 i < (sizeof (swap_crypt_key) /
6570 sizeof (swap_crypt_key[0]));
6571 i++) {
6572 swap_crypt_key[i] = random();
6573 }
6574 aes_encrypt_key((const unsigned char *) swap_crypt_key,
6575 SWAP_CRYPT_AES_KEY_SIZE,
6576 &swap_crypt_ctx.encrypt);
6577 aes_decrypt_key((const unsigned char *) swap_crypt_key,
6578 SWAP_CRYPT_AES_KEY_SIZE,
6579 &swap_crypt_ctx.decrypt);
6580 swap_crypt_ctx_initialized = TRUE;
6581 }
6582
6583#if DEBUG
6584 /*
6585 * Validate the encryption algorithms.
6586 */
6587 if (swap_crypt_ctx_tested == FALSE) {
6588 /* initialize */
6589 for (i = 0; i < 4096; i++) {
6590 swap_crypt_test_page_ref[i] = (char) i;
6591 }
6592 /* encrypt */
6593 aes_encrypt_cbc(swap_crypt_test_page_ref,
6594 swap_crypt_null_iv,
6595 PAGE_SIZE / AES_BLOCK_SIZE,
6596 swap_crypt_test_page_encrypt,
6597 &swap_crypt_ctx.encrypt);
6598 /* decrypt */
6599 aes_decrypt_cbc(swap_crypt_test_page_encrypt,
6600 swap_crypt_null_iv,
6601 PAGE_SIZE / AES_BLOCK_SIZE,
6602 swap_crypt_test_page_decrypt,
6603 &swap_crypt_ctx.decrypt);
6604 /* compare result with original */
6605 for (i = 0; i < 4096; i ++) {
6606 if (swap_crypt_test_page_decrypt[i] !=
6607 swap_crypt_test_page_ref[i]) {
6608 panic("encryption test failed");
6609 }
6610 }
6611
6612 /* encrypt again */
6613 aes_encrypt_cbc(swap_crypt_test_page_decrypt,
6614 swap_crypt_null_iv,
6615 PAGE_SIZE / AES_BLOCK_SIZE,
6616 swap_crypt_test_page_decrypt,
6617 &swap_crypt_ctx.encrypt);
6618 /* decrypt in place */
6619 aes_decrypt_cbc(swap_crypt_test_page_decrypt,
6620 swap_crypt_null_iv,
6621 PAGE_SIZE / AES_BLOCK_SIZE,
6622 swap_crypt_test_page_decrypt,
6623 &swap_crypt_ctx.decrypt);
6624 for (i = 0; i < 4096; i ++) {
6625 if (swap_crypt_test_page_decrypt[i] !=
6626 swap_crypt_test_page_ref[i]) {
6627 panic("in place encryption test failed");
6628 }
6629 }
6630
6631 swap_crypt_ctx_tested = TRUE;
6632 }
6633#endif /* DEBUG */
6634}
6635
6636/*
6637 * ENCRYPTED SWAP:
6638 * vm_page_encrypt:
6639 * Encrypt the given page, for secure paging.
6640 * The page might already be mapped at kernel virtual
6641 * address "kernel_mapping_offset". Otherwise, we need
6642 * to map it.
6643 *
6644 * Context:
6645 * The page's object is locked, but this lock will be released
6646 * and re-acquired.
6647 * The page is busy and not accessible by users (not entered in any pmap).
6648 */
6649void
6650vm_page_encrypt(
6651 vm_page_t page,
6652 vm_map_offset_t kernel_mapping_offset)
6653{
91447636 6654 kern_return_t kr;
91447636
A
6655 vm_map_size_t kernel_mapping_size;
6656 vm_offset_t kernel_vaddr;
6657 union {
6658 unsigned char aes_iv[AES_BLOCK_SIZE];
6659 struct {
6660 memory_object_t pager_object;
6661 vm_object_offset_t paging_offset;
6662 } vm;
6663 } encrypt_iv;
6664
6665 if (! vm_pages_encrypted) {
6666 vm_pages_encrypted = TRUE;
6667 }
6668
6669 assert(page->busy);
6670 assert(page->dirty || page->precious);
6671
6672 if (page->encrypted) {
6673 /*
6674 * Already encrypted: no need to do it again.
6675 */
6676 vm_page_encrypt_already_encrypted_counter++;
6677 return;
6678 }
6679 ASSERT_PAGE_DECRYPTED(page);
6680
6681 /*
2d21ac55
A
6682 * Take a paging-in-progress reference to keep the object
6683 * alive even if we have to unlock it (in vm_paging_map_object()
6684 * for example)...
91447636 6685 */
2d21ac55 6686 vm_object_paging_begin(page->object);
91447636
A
6687
6688 if (kernel_mapping_offset == 0) {
6689 /*
6690 * The page hasn't already been mapped in kernel space
6691 * by the caller. Map it now, so that we can access
6692 * its contents and encrypt them.
6693 */
6694 kernel_mapping_size = PAGE_SIZE;
6695 kr = vm_paging_map_object(&kernel_mapping_offset,
6696 page,
6697 page->object,
6698 page->offset,
2d21ac55 6699 &kernel_mapping_size,
593a1d5f 6700 VM_PROT_READ | VM_PROT_WRITE,
2d21ac55 6701 FALSE);
91447636
A
6702 if (kr != KERN_SUCCESS) {
6703 panic("vm_page_encrypt: "
6704 "could not map page in kernel: 0x%x\n",
6705 kr);
6706 }
6707 } else {
6708 kernel_mapping_size = 0;
6709 }
6710 kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
6711
6712 if (swap_crypt_ctx_initialized == FALSE) {
6713 swap_crypt_ctx_initialize();
6714 }
6715 assert(swap_crypt_ctx_initialized);
6716
6717 /*
6718 * Prepare an "initial vector" for the encryption.
6719 * We use the "pager" and the "paging_offset" for that
6720 * page to obfuscate the encrypted data a bit more and
6721 * prevent crackers from finding patterns that they could
6722 * use to break the key.
6723 */
6724 bzero(&encrypt_iv.aes_iv[0], sizeof (encrypt_iv.aes_iv));
6725 encrypt_iv.vm.pager_object = page->object->pager;
6726 encrypt_iv.vm.paging_offset =
6727 page->object->paging_offset + page->offset;
6728
91447636
A
6729 /* encrypt the "initial vector" */
6730 aes_encrypt_cbc((const unsigned char *) &encrypt_iv.aes_iv[0],
6731 swap_crypt_null_iv,
6732 1,
6733 &encrypt_iv.aes_iv[0],
6734 &swap_crypt_ctx.encrypt);
6735
6736 /*
6737 * Encrypt the page.
6738 */
6739 aes_encrypt_cbc((const unsigned char *) kernel_vaddr,
6740 &encrypt_iv.aes_iv[0],
6741 PAGE_SIZE / AES_BLOCK_SIZE,
6742 (unsigned char *) kernel_vaddr,
6743 &swap_crypt_ctx.encrypt);
6744
6745 vm_page_encrypt_counter++;
6746
91447636
A
6747 /*
6748 * Unmap the page from the kernel's address space,
6749 * if we had to map it ourselves. Otherwise, let
6750 * the caller undo the mapping if needed.
6751 */
6752 if (kernel_mapping_size != 0) {
6753 vm_paging_unmap_object(page->object,
6754 kernel_mapping_offset,
6755 kernel_mapping_offset + kernel_mapping_size);
6756 }
6757
6758 /*
2d21ac55 6759 * Clear the "reference" and "modified" bits.
91447636
A
6760 * This should clean up any impact the encryption had
6761 * on them.
2d21ac55
A
6762 * The page was kept busy and disconnected from all pmaps,
6763 * so it can't have been referenced or modified from user
6764 * space.
6765 * The software bits will be reset later after the I/O
6766 * has completed (in upl_commit_range()).
91447636 6767 */
2d21ac55 6768 pmap_clear_refmod(page->phys_page, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
91447636
A
6769
6770 page->encrypted = TRUE;
2d21ac55
A
6771
6772 vm_object_paging_end(page->object);
91447636
A
6773}
6774
6775/*
6776 * ENCRYPTED SWAP:
6777 * vm_page_decrypt:
6778 * Decrypt the given page.
6779 * The page might already be mapped at kernel virtual
6780 * address "kernel_mapping_offset". Otherwise, we need
6781 * to map it.
6782 *
6783 * Context:
6784 * The page's VM object is locked but will be unlocked and relocked.
6785 * The page is busy and not accessible by users (not entered in any pmap).
6786 */
6787void
6788vm_page_decrypt(
6789 vm_page_t page,
6790 vm_map_offset_t kernel_mapping_offset)
6791{
91447636
A
6792 kern_return_t kr;
6793 vm_map_size_t kernel_mapping_size;
6794 vm_offset_t kernel_vaddr;
91447636
A
6795 union {
6796 unsigned char aes_iv[AES_BLOCK_SIZE];
6797 struct {
6798 memory_object_t pager_object;
6799 vm_object_offset_t paging_offset;
6800 } vm;
6801 } decrypt_iv;
6d2010ae 6802 boolean_t was_dirty;
91447636
A
6803
6804 assert(page->busy);
6805 assert(page->encrypted);
6806
6d2010ae
A
6807 was_dirty = page->dirty;
6808
91447636 6809 /*
2d21ac55
A
6810 * Take a paging-in-progress reference to keep the object
6811 * alive even if we have to unlock it (in vm_paging_map_object()
6812 * for example)...
91447636 6813 */
2d21ac55 6814 vm_object_paging_begin(page->object);
91447636
A
6815
6816 if (kernel_mapping_offset == 0) {
6817 /*
6818 * The page hasn't already been mapped in kernel space
6819 * by the caller. Map it now, so that we can access
6820 * its contents and decrypt them.
6821 */
6822 kernel_mapping_size = PAGE_SIZE;
6823 kr = vm_paging_map_object(&kernel_mapping_offset,
6824 page,
6825 page->object,
6826 page->offset,
2d21ac55 6827 &kernel_mapping_size,
593a1d5f 6828 VM_PROT_READ | VM_PROT_WRITE,
2d21ac55 6829 FALSE);
91447636
A
6830 if (kr != KERN_SUCCESS) {
6831 panic("vm_page_decrypt: "
2d21ac55
A
6832 "could not map page in kernel: 0x%x\n",
6833 kr);
91447636
A
6834 }
6835 } else {
6836 kernel_mapping_size = 0;
6837 }
6838 kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
6839
6840 assert(swap_crypt_ctx_initialized);
6841
6842 /*
6843 * Prepare an "initial vector" for the decryption.
6844 * It has to be the same as the "initial vector" we
6845 * used to encrypt that page.
6846 */
6847 bzero(&decrypt_iv.aes_iv[0], sizeof (decrypt_iv.aes_iv));
6848 decrypt_iv.vm.pager_object = page->object->pager;
6849 decrypt_iv.vm.paging_offset =
6850 page->object->paging_offset + page->offset;
6851
91447636
A
6852 /* encrypt the "initial vector" */
6853 aes_encrypt_cbc((const unsigned char *) &decrypt_iv.aes_iv[0],
6854 swap_crypt_null_iv,
6855 1,
6856 &decrypt_iv.aes_iv[0],
6857 &swap_crypt_ctx.encrypt);
6858
6859 /*
6860 * Decrypt the page.
6861 */
6862 aes_decrypt_cbc((const unsigned char *) kernel_vaddr,
6863 &decrypt_iv.aes_iv[0],
6864 PAGE_SIZE / AES_BLOCK_SIZE,
6865 (unsigned char *) kernel_vaddr,
6866 &swap_crypt_ctx.decrypt);
6867 vm_page_decrypt_counter++;
6868
91447636
A
6869 /*
6870 * Unmap the page from the kernel's address space,
6871 * if we had to map it ourselves. Otherwise, let
6872 * the caller undo the mapping if needed.
6873 */
6874 if (kernel_mapping_size != 0) {
6875 vm_paging_unmap_object(page->object,
6876 kernel_vaddr,
6877 kernel_vaddr + PAGE_SIZE);
6878 }
6879
6d2010ae
A
6880 if (was_dirty) {
6881 /*
6882 * The pager did not specify that the page would be
6883 * clean when it got paged in, so let's not clean it here
6884 * either.
6885 */
6886 } else {
6887 /*
6888 * After decryption, the page is actually still clean.
6889 * It was encrypted as part of paging, which "cleans"
6890 * the "dirty" pages.
6891 * Noone could access it after it was encrypted
6892 * and the decryption doesn't count.
6893 */
6894 page->dirty = FALSE;
6895 assert (page->cs_validated == FALSE);
6896 pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6897 }
91447636
A
6898 page->encrypted = FALSE;
6899
6900 /*
6901 * We've just modified the page's contents via the data cache and part
6902 * of the new contents might still be in the cache and not yet in RAM.
6903 * Since the page is now available and might get gathered in a UPL to
6904 * be part of a DMA transfer from a driver that expects the memory to
6905 * be coherent at this point, we have to flush the data cache.
6906 */
0c530ab8 6907 pmap_sync_page_attributes_phys(page->phys_page);
91447636
A
6908 /*
6909 * Since the page is not mapped yet, some code might assume that it
6910 * doesn't need to invalidate the instruction cache when writing to
2d21ac55
A
6911 * that page. That code relies on "pmapped" being FALSE, so that the
6912 * caches get synchronized when the page is first mapped.
91447636 6913 */
2d21ac55
A
6914 assert(pmap_verify_free(page->phys_page));
6915 page->pmapped = FALSE;
4a3eedf9 6916 page->wpmapped = FALSE;
2d21ac55
A
6917
6918 vm_object_paging_end(page->object);
91447636
A
6919}
6920
b0d623f7 6921#if DEVELOPMENT || DEBUG
91447636
A
6922unsigned long upl_encrypt_upls = 0;
6923unsigned long upl_encrypt_pages = 0;
b0d623f7 6924#endif
91447636
A
6925
6926/*
6927 * ENCRYPTED SWAP:
6928 *
6929 * upl_encrypt:
6930 * Encrypts all the pages in the UPL, within the specified range.
6931 *
6932 */
6933void
6934upl_encrypt(
6935 upl_t upl,
6936 upl_offset_t crypt_offset,
6937 upl_size_t crypt_size)
6938{
b0d623f7
A
6939 upl_size_t upl_size, subupl_size=crypt_size;
6940 upl_offset_t offset_in_upl, subupl_offset=crypt_offset;
91447636 6941 vm_object_t upl_object;
b0d623f7 6942 vm_object_offset_t upl_offset;
91447636
A
6943 vm_page_t page;
6944 vm_object_t shadow_object;
6945 vm_object_offset_t shadow_offset;
6946 vm_object_offset_t paging_offset;
6947 vm_object_offset_t base_offset;
b0d623f7
A
6948 int isVectorUPL = 0;
6949 upl_t vector_upl = NULL;
6950
6951 if((isVectorUPL = vector_upl_is_valid(upl)))
6952 vector_upl = upl;
6953
6954process_upl_to_encrypt:
6955 if(isVectorUPL) {
6956 crypt_size = subupl_size;
6957 crypt_offset = subupl_offset;
6958 upl = vector_upl_subupl_byoffset(vector_upl, &crypt_offset, &crypt_size);
6959 if(upl == NULL)
6960 panic("upl_encrypt: Accessing a sub-upl that doesn't exist\n");
6961 subupl_size -= crypt_size;
6962 subupl_offset += crypt_size;
6963 }
91447636 6964
b0d623f7 6965#if DEVELOPMENT || DEBUG
91447636
A
6966 upl_encrypt_upls++;
6967 upl_encrypt_pages += crypt_size / PAGE_SIZE;
b0d623f7 6968#endif
91447636
A
6969 upl_object = upl->map_object;
6970 upl_offset = upl->offset;
6971 upl_size = upl->size;
6972
91447636
A
6973 vm_object_lock(upl_object);
6974
6975 /*
6976 * Find the VM object that contains the actual pages.
6977 */
6978 if (upl_object->pageout) {
6979 shadow_object = upl_object->shadow;
6980 /*
6981 * The offset in the shadow object is actually also
6982 * accounted for in upl->offset. It possibly shouldn't be
6983 * this way, but for now don't account for it twice.
6984 */
6985 shadow_offset = 0;
6986 assert(upl_object->paging_offset == 0); /* XXX ? */
6987 vm_object_lock(shadow_object);
6988 } else {
6989 shadow_object = upl_object;
6990 shadow_offset = 0;
6991 }
6992
6993 paging_offset = shadow_object->paging_offset;
6994 vm_object_paging_begin(shadow_object);
6995
2d21ac55
A
6996 if (shadow_object != upl_object)
6997 vm_object_unlock(upl_object);
6998
91447636
A
6999
7000 base_offset = shadow_offset;
7001 base_offset += upl_offset;
7002 base_offset += crypt_offset;
7003 base_offset -= paging_offset;
91447636 7004
2d21ac55 7005 assert(crypt_offset + crypt_size <= upl_size);
91447636 7006
b0d623f7
A
7007 for (offset_in_upl = 0;
7008 offset_in_upl < crypt_size;
7009 offset_in_upl += PAGE_SIZE) {
91447636 7010 page = vm_page_lookup(shadow_object,
b0d623f7 7011 base_offset + offset_in_upl);
91447636
A
7012 if (page == VM_PAGE_NULL) {
7013 panic("upl_encrypt: "
6d2010ae 7014 "no page for (obj=%p,off=0x%llx+0x%x)!\n",
91447636
A
7015 shadow_object,
7016 base_offset,
b0d623f7 7017 offset_in_upl);
91447636 7018 }
2d21ac55
A
7019 /*
7020 * Disconnect the page from all pmaps, so that nobody can
7021 * access it while it's encrypted. After that point, all
7022 * accesses to this page will cause a page fault and block
7023 * while the page is busy being encrypted. After the
7024 * encryption completes, any access will cause a
7025 * page fault and the page gets decrypted at that time.
7026 */
7027 pmap_disconnect(page->phys_page);
91447636 7028 vm_page_encrypt(page, 0);
2d21ac55 7029
b0d623f7 7030 if (vm_object_lock_avoid(shadow_object)) {
2d21ac55
A
7031 /*
7032 * Give vm_pageout_scan() a chance to convert more
7033 * pages from "clean-in-place" to "clean-and-free",
7034 * if it's interested in the same pages we selected
7035 * in this cluster.
7036 */
7037 vm_object_unlock(shadow_object);
b0d623f7 7038 mutex_pause(2);
2d21ac55
A
7039 vm_object_lock(shadow_object);
7040 }
91447636
A
7041 }
7042
7043 vm_object_paging_end(shadow_object);
7044 vm_object_unlock(shadow_object);
b0d623f7
A
7045
7046 if(isVectorUPL && subupl_size)
7047 goto process_upl_to_encrypt;
91447636
A
7048}
7049
2d21ac55
A
7050#else /* CRYPTO */
7051void
7052upl_encrypt(
7053 __unused upl_t upl,
7054 __unused upl_offset_t crypt_offset,
7055 __unused upl_size_t crypt_size)
7056{
7057}
7058
7059void
7060vm_page_encrypt(
7061 __unused vm_page_t page,
7062 __unused vm_map_offset_t kernel_mapping_offset)
7063{
7064}
7065
7066void
7067vm_page_decrypt(
7068 __unused vm_page_t page,
7069 __unused vm_map_offset_t kernel_mapping_offset)
7070{
7071}
7072
7073#endif /* CRYPTO */
7074
b0d623f7
A
7075void
7076vm_pageout_queue_steal(vm_page_t page, boolean_t queues_locked)
7077{
0b4c1975
A
7078 boolean_t pageout;
7079
7080 pageout = page->pageout;
7081
b0d623f7
A
7082 page->list_req_pending = FALSE;
7083 page->cleaning = FALSE;
7084 page->pageout = FALSE;
7085
7086 if (!queues_locked) {
7087 vm_page_lockspin_queues();
7088 }
7089
7090 /*
7091 * need to drop the laundry count...
7092 * we may also need to remove it
7093 * from the I/O paging queue...
7094 * vm_pageout_throttle_up handles both cases
7095 *
7096 * the laundry and pageout_queue flags are cleared...
7097 */
7098 vm_pageout_throttle_up(page);
b0d623f7 7099
0b4c1975
A
7100 if (pageout == TRUE) {
7101 /*
7102 * toss the wire count we picked up
7103 * when we intially set this page up
7104 * to be cleaned...
7105 */
7106 vm_page_unwire(page, TRUE);
7107 }
b0d623f7
A
7108 vm_page_steal_pageout_page++;
7109
7110 if (!queues_locked) {
7111 vm_page_unlock_queues();
7112 }
7113}
7114
7115upl_t
7116vector_upl_create(vm_offset_t upl_offset)
7117{
7118 int vector_upl_size = sizeof(struct _vector_upl);
7119 int i=0;
7120 upl_t upl;
7121 vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
7122
7123 upl = upl_create(0,UPL_VECTOR,0);
7124 upl->vector_upl = vector_upl;
7125 upl->offset = upl_offset;
7126 vector_upl->size = 0;
7127 vector_upl->offset = upl_offset;
7128 vector_upl->invalid_upls=0;
7129 vector_upl->num_upls=0;
7130 vector_upl->pagelist = NULL;
7131
7132 for(i=0; i < MAX_VECTOR_UPL_ELEMENTS ; i++) {
7133 vector_upl->upl_iostates[i].size = 0;
7134 vector_upl->upl_iostates[i].offset = 0;
7135
7136 }
7137 return upl;
7138}
7139
7140void
7141vector_upl_deallocate(upl_t upl)
7142{
7143 if(upl) {
7144 vector_upl_t vector_upl = upl->vector_upl;
7145 if(vector_upl) {
7146 if(vector_upl->invalid_upls != vector_upl->num_upls)
7147 panic("Deallocating non-empty Vectored UPL\n");
7148 kfree(vector_upl->pagelist,(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)));
7149 vector_upl->invalid_upls=0;
7150 vector_upl->num_upls = 0;
7151 vector_upl->pagelist = NULL;
7152 vector_upl->size = 0;
7153 vector_upl->offset = 0;
7154 kfree(vector_upl, sizeof(struct _vector_upl));
7155 vector_upl = (vector_upl_t)0xdeadbeef;
7156 }
7157 else
7158 panic("vector_upl_deallocate was passed a non-vectored upl\n");
7159 }
7160 else
7161 panic("vector_upl_deallocate was passed a NULL upl\n");
7162}
7163
7164boolean_t
7165vector_upl_is_valid(upl_t upl)
7166{
7167 if(upl && ((upl->flags & UPL_VECTOR)==UPL_VECTOR)) {
7168 vector_upl_t vector_upl = upl->vector_upl;
7169 if(vector_upl == NULL || vector_upl == (vector_upl_t)0xdeadbeef || vector_upl == (vector_upl_t)0xfeedbeef)
7170 return FALSE;
7171 else
7172 return TRUE;
7173 }
7174 return FALSE;
7175}
7176
7177boolean_t
7178vector_upl_set_subupl(upl_t upl,upl_t subupl, uint32_t io_size)
7179{
7180 if(vector_upl_is_valid(upl)) {
7181 vector_upl_t vector_upl = upl->vector_upl;
7182
7183 if(vector_upl) {
7184 if(subupl) {
7185 if(io_size) {
7186 if(io_size < PAGE_SIZE)
7187 io_size = PAGE_SIZE;
7188 subupl->vector_upl = (void*)vector_upl;
7189 vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
7190 vector_upl->size += io_size;
7191 upl->size += io_size;
7192 }
7193 else {
7194 uint32_t i=0,invalid_upls=0;
7195 for(i = 0; i < vector_upl->num_upls; i++) {
7196 if(vector_upl->upl_elems[i] == subupl)
7197 break;
7198 }
7199 if(i == vector_upl->num_upls)
7200 panic("Trying to remove sub-upl when none exists");
7201
7202 vector_upl->upl_elems[i] = NULL;
7203 invalid_upls = hw_atomic_add(&(vector_upl)->invalid_upls, 1);
7204 if(invalid_upls == vector_upl->num_upls)
7205 return TRUE;
7206 else
7207 return FALSE;
7208 }
7209 }
7210 else
7211 panic("vector_upl_set_subupl was passed a NULL upl element\n");
7212 }
7213 else
7214 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
7215 }
7216 else
7217 panic("vector_upl_set_subupl was passed a NULL upl\n");
7218
7219 return FALSE;
7220}
7221
7222void
7223vector_upl_set_pagelist(upl_t upl)
7224{
7225 if(vector_upl_is_valid(upl)) {
7226 uint32_t i=0;
7227 vector_upl_t vector_upl = upl->vector_upl;
7228
7229 if(vector_upl) {
7230 vm_offset_t pagelist_size=0, cur_upl_pagelist_size=0;
7231
7232 vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE));
7233
7234 for(i=0; i < vector_upl->num_upls; i++) {
7235 cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size/PAGE_SIZE;
7236 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
7237 pagelist_size += cur_upl_pagelist_size;
7238 if(vector_upl->upl_elems[i]->highest_page > upl->highest_page)
7239 upl->highest_page = vector_upl->upl_elems[i]->highest_page;
7240 }
7241 assert( pagelist_size == (sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)) );
7242 }
7243 else
7244 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
7245 }
7246 else
7247 panic("vector_upl_set_pagelist was passed a NULL upl\n");
7248
7249}
7250
7251upl_t
7252vector_upl_subupl_byindex(upl_t upl, uint32_t index)
7253{
7254 if(vector_upl_is_valid(upl)) {
7255 vector_upl_t vector_upl = upl->vector_upl;
7256 if(vector_upl) {
7257 if(index < vector_upl->num_upls)
7258 return vector_upl->upl_elems[index];
7259 }
7260 else
7261 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
7262 }
7263 return NULL;
7264}
7265
7266upl_t
7267vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
7268{
7269 if(vector_upl_is_valid(upl)) {
7270 uint32_t i=0;
7271 vector_upl_t vector_upl = upl->vector_upl;
7272
7273 if(vector_upl) {
7274 upl_t subupl = NULL;
7275 vector_upl_iostates_t subupl_state;
7276
7277 for(i=0; i < vector_upl->num_upls; i++) {
7278 subupl = vector_upl->upl_elems[i];
7279 subupl_state = vector_upl->upl_iostates[i];
7280 if( *upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
7281 /* We could have been passed an offset/size pair that belongs
7282 * to an UPL element that has already been committed/aborted.
7283 * If so, return NULL.
7284 */
7285 if(subupl == NULL)
7286 return NULL;
7287 if((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
7288 *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
7289 if(*upl_size > subupl_state.size)
7290 *upl_size = subupl_state.size;
7291 }
7292 if(*upl_offset >= subupl_state.offset)
7293 *upl_offset -= subupl_state.offset;
7294 else if(i)
7295 panic("Vector UPL offset miscalculation\n");
7296 return subupl;
7297 }
7298 }
7299 }
7300 else
7301 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
7302 }
7303 return NULL;
7304}
7305
7306void
7307vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
7308{
7309 *v_upl_submap = NULL;
7310
7311 if(vector_upl_is_valid(upl)) {
7312 vector_upl_t vector_upl = upl->vector_upl;
7313 if(vector_upl) {
7314 *v_upl_submap = vector_upl->submap;
7315 *submap_dst_addr = vector_upl->submap_dst_addr;
7316 }
7317 else
7318 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
7319 }
7320 else
7321 panic("vector_upl_get_submap was passed a null UPL\n");
7322}
7323
7324void
7325vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
7326{
7327 if(vector_upl_is_valid(upl)) {
7328 vector_upl_t vector_upl = upl->vector_upl;
7329 if(vector_upl) {
7330 vector_upl->submap = submap;
7331 vector_upl->submap_dst_addr = submap_dst_addr;
7332 }
7333 else
7334 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
7335 }
7336 else
7337 panic("vector_upl_get_submap was passed a NULL UPL\n");
7338}
7339
7340void
7341vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
7342{
7343 if(vector_upl_is_valid(upl)) {
7344 uint32_t i = 0;
7345 vector_upl_t vector_upl = upl->vector_upl;
7346
7347 if(vector_upl) {
7348 for(i = 0; i < vector_upl->num_upls; i++) {
7349 if(vector_upl->upl_elems[i] == subupl)
7350 break;
7351 }
7352
7353 if(i == vector_upl->num_upls)
7354 panic("setting sub-upl iostate when none exists");
7355
7356 vector_upl->upl_iostates[i].offset = offset;
7357 if(size < PAGE_SIZE)
7358 size = PAGE_SIZE;
7359 vector_upl->upl_iostates[i].size = size;
7360 }
7361 else
7362 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
7363 }
7364 else
7365 panic("vector_upl_set_iostate was passed a NULL UPL\n");
7366}
7367
7368void
7369vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
7370{
7371 if(vector_upl_is_valid(upl)) {
7372 uint32_t i = 0;
7373 vector_upl_t vector_upl = upl->vector_upl;
7374
7375 if(vector_upl) {
7376 for(i = 0; i < vector_upl->num_upls; i++) {
7377 if(vector_upl->upl_elems[i] == subupl)
7378 break;
7379 }
7380
7381 if(i == vector_upl->num_upls)
7382 panic("getting sub-upl iostate when none exists");
7383
7384 *offset = vector_upl->upl_iostates[i].offset;
7385 *size = vector_upl->upl_iostates[i].size;
7386 }
7387 else
7388 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
7389 }
7390 else
7391 panic("vector_upl_get_iostate was passed a NULL UPL\n");
7392}
7393
7394void
7395vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
7396{
7397 if(vector_upl_is_valid(upl)) {
7398 vector_upl_t vector_upl = upl->vector_upl;
7399 if(vector_upl) {
7400 if(index < vector_upl->num_upls) {
7401 *offset = vector_upl->upl_iostates[index].offset;
7402 *size = vector_upl->upl_iostates[index].size;
7403 }
7404 else
7405 *offset = *size = 0;
7406 }
7407 else
7408 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
7409 }
7410 else
7411 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
7412}
7413
7414upl_page_info_t *
7415upl_get_internal_vectorupl_pagelist(upl_t upl)
7416{
7417 return ((vector_upl_t)(upl->vector_upl))->pagelist;
7418}
7419
7420void *
7421upl_get_internal_vectorupl(upl_t upl)
7422{
7423 return upl->vector_upl;
7424}
7425
91447636
A
7426vm_size_t
7427upl_get_internal_pagelist_offset(void)
7428{
7429 return sizeof(struct upl);
7430}
7431
91447636
A
7432void
7433upl_clear_dirty(
0c530ab8
A
7434 upl_t upl,
7435 boolean_t value)
91447636 7436{
0c530ab8
A
7437 if (value) {
7438 upl->flags |= UPL_CLEAR_DIRTY;
7439 } else {
7440 upl->flags &= ~UPL_CLEAR_DIRTY;
7441 }
91447636
A
7442}
7443
6d2010ae
A
7444void
7445upl_set_referenced(
7446 upl_t upl,
7447 boolean_t value)
7448{
7449 upl_lock(upl);
7450 if (value) {
7451 upl->ext_ref_count++;
7452 } else {
7453 if (!upl->ext_ref_count) {
7454 panic("upl_set_referenced not %p\n", upl);
7455 }
7456 upl->ext_ref_count--;
7457 }
7458 upl_unlock(upl);
7459}
7460
7461boolean_t
7462vm_page_is_slideable(vm_page_t m)
7463{
7464 boolean_t result = FALSE;
7465 vm_object_t slide_object = slide_info.slide_object;
7466 mach_vm_offset_t start = slide_info.start;
7467 mach_vm_offset_t end = slide_info.end;
7468
7469 /* make sure our page belongs to the one object allowed to do this */
7470 if (slide_object == VM_OBJECT_NULL) {
7471 return result;
7472 }
7473
7474 /*Should we traverse down the chain?*/
7475 if (m->object != slide_object) {
7476 return result;
7477 }
7478
7479 if(!m->slid && (start <= m->offset && end > m->offset)) {
7480 result = TRUE;
7481 }
7482 return result;
7483}
7484
7485int vm_page_slide_counter = 0;
7486int vm_page_slide_errors = 0;
7487kern_return_t
7488vm_page_slide(
7489 vm_page_t page,
7490 vm_map_offset_t kernel_mapping_offset)
7491{
7492 kern_return_t kr;
7493 vm_map_size_t kernel_mapping_size;
7494 vm_offset_t kernel_vaddr;
7495 uint32_t pageIndex = 0;
7496
7497 assert(!page->slid);
7498
7499 /*
7500 * Take a paging-in-progress reference to keep the object
7501 * alive even if we have to unlock it (in vm_paging_map_object()
7502 * for example)...
7503 */
7504 vm_object_paging_begin(page->object);
7505
7506 if (kernel_mapping_offset == 0) {
7507 /*
7508 * The page hasn't already been mapped in kernel space
7509 * by the caller. Map it now, so that we can access
7510 * its contents and decrypt them.
7511 */
7512 kernel_mapping_size = PAGE_SIZE;
7513 kr = vm_paging_map_object(&kernel_mapping_offset,
7514 page,
7515 page->object,
7516 page->offset,
7517 &kernel_mapping_size,
7518 VM_PROT_READ | VM_PROT_WRITE,
7519 FALSE);
7520 if (kr != KERN_SUCCESS) {
7521 panic("vm_page_slide: "
7522 "could not map page in kernel: 0x%x\n",
7523 kr);
7524 }
7525 } else {
7526 kernel_mapping_size = 0;
7527 }
7528 kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
7529
7530 /*
7531 * Slide the pointers on the page.
7532 */
7533
7534 /*assert that slide_file_info.start/end are page-aligned?*/
7535
7536 pageIndex = (uint32_t)((page->offset - slide_info.start)/PAGE_SIZE);
7537 kr = vm_shared_region_slide(kernel_vaddr, pageIndex);
7538 vm_page_slide_counter++;
7539
7540 /*
7541 * Unmap the page from the kernel's address space,
7542 */
7543 if (kernel_mapping_size != 0) {
7544 vm_paging_unmap_object(page->object,
7545 kernel_vaddr,
7546 kernel_vaddr + PAGE_SIZE);
7547 }
7548
7549 page->dirty = FALSE;
7550 pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
7551
7552 if (kr == KERN_SUCCESS) {
7553 page->slid = TRUE;
7554 } else {
7555 page->error = TRUE;
7556 vm_page_slide_errors++;
7557 }
7558
7559 vm_object_paging_end(page->object);
7560
7561 return kr;
7562}
7563
91447636
A
7564
7565#ifdef MACH_BSD
1c79356b 7566
2d21ac55
A
7567boolean_t upl_device_page(upl_page_info_t *upl)
7568{
7569 return(UPL_DEVICE_PAGE(upl));
7570}
1c79356b
A
7571boolean_t upl_page_present(upl_page_info_t *upl, int index)
7572{
7573 return(UPL_PAGE_PRESENT(upl, index));
7574}
2d21ac55
A
7575boolean_t upl_speculative_page(upl_page_info_t *upl, int index)
7576{
7577 return(UPL_SPECULATIVE_PAGE(upl, index));
7578}
1c79356b
A
7579boolean_t upl_dirty_page(upl_page_info_t *upl, int index)
7580{
7581 return(UPL_DIRTY_PAGE(upl, index));
7582}
7583boolean_t upl_valid_page(upl_page_info_t *upl, int index)
7584{
7585 return(UPL_VALID_PAGE(upl, index));
7586}
91447636 7587ppnum_t upl_phys_page(upl_page_info_t *upl, int index)
1c79356b 7588{
91447636 7589 return(UPL_PHYS_PAGE(upl, index));
1c79356b
A
7590}
7591
2d21ac55 7592
0b4e3aa0
A
7593void
7594vm_countdirtypages(void)
1c79356b
A
7595{
7596 vm_page_t m;
7597 int dpages;
7598 int pgopages;
7599 int precpages;
7600
7601
7602 dpages=0;
7603 pgopages=0;
7604 precpages=0;
7605
7606 vm_page_lock_queues();
7607 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
7608 do {
7609 if (m ==(vm_page_t )0) break;
7610
7611 if(m->dirty) dpages++;
7612 if(m->pageout) pgopages++;
7613 if(m->precious) precpages++;
7614
91447636 7615 assert(m->object != kernel_object);
1c79356b
A
7616 m = (vm_page_t) queue_next(&m->pageq);
7617 if (m ==(vm_page_t )0) break;
7618
7619 } while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
7620 vm_page_unlock_queues();
9bccf70c 7621
2d21ac55
A
7622 vm_page_lock_queues();
7623 m = (vm_page_t) queue_first(&vm_page_queue_throttled);
7624 do {
7625 if (m ==(vm_page_t )0) break;
7626
7627 dpages++;
7628 assert(m->dirty);
7629 assert(!m->pageout);
7630 assert(m->object != kernel_object);
7631 m = (vm_page_t) queue_next(&m->pageq);
7632 if (m ==(vm_page_t )0) break;
7633
7634 } while (!queue_end(&vm_page_queue_throttled,(queue_entry_t) m));
7635 vm_page_unlock_queues();
7636
9bccf70c
A
7637 vm_page_lock_queues();
7638 m = (vm_page_t) queue_first(&vm_page_queue_zf);
7639 do {
7640 if (m ==(vm_page_t )0) break;
7641
7642 if(m->dirty) dpages++;
7643 if(m->pageout) pgopages++;
7644 if(m->precious) precpages++;
7645
91447636 7646 assert(m->object != kernel_object);
9bccf70c
A
7647 m = (vm_page_t) queue_next(&m->pageq);
7648 if (m ==(vm_page_t )0) break;
7649
7650 } while (!queue_end(&vm_page_queue_zf,(queue_entry_t) m));
7651 vm_page_unlock_queues();
1c79356b
A
7652
7653 printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
7654
7655 dpages=0;
7656 pgopages=0;
7657 precpages=0;
7658
7659 vm_page_lock_queues();
7660 m = (vm_page_t) queue_first(&vm_page_queue_active);
7661
7662 do {
7663 if(m == (vm_page_t )0) break;
7664 if(m->dirty) dpages++;
7665 if(m->pageout) pgopages++;
7666 if(m->precious) precpages++;
7667
91447636 7668 assert(m->object != kernel_object);
1c79356b
A
7669 m = (vm_page_t) queue_next(&m->pageq);
7670 if(m == (vm_page_t )0) break;
7671
7672 } while (!queue_end(&vm_page_queue_active,(queue_entry_t) m));
7673 vm_page_unlock_queues();
7674
7675 printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
7676
7677}
7678#endif /* MACH_BSD */
7679
0c530ab8 7680ppnum_t upl_get_highest_page(
2d21ac55 7681 upl_t upl)
0c530ab8 7682{
2d21ac55 7683 return upl->highest_page;
0c530ab8
A
7684}
7685
b0d623f7
A
7686upl_size_t upl_get_size(
7687 upl_t upl)
7688{
7689 return upl->size;
7690}
7691
7692#if UPL_DEBUG
7693kern_return_t upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
1c79356b
A
7694{
7695 upl->ubc_alias1 = alias1;
7696 upl->ubc_alias2 = alias2;
7697 return KERN_SUCCESS;
7698}
b0d623f7 7699int upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
1c79356b
A
7700{
7701 if(al)
7702 *al = upl->ubc_alias1;
7703 if(al2)
7704 *al2 = upl->ubc_alias2;
7705 return KERN_SUCCESS;
7706}
91447636 7707#endif /* UPL_DEBUG */
1c79356b
A
7708
7709
7710
7711#if MACH_KDB
7712#include <ddb/db_output.h>
7713#include <ddb/db_print.h>
7714#include <vm/vm_print.h>
7715
7716#define printf kdbprintf
1c79356b
A
7717void db_pageout(void);
7718
7719void
7720db_vm(void)
7721{
1c79356b
A
7722
7723 iprintf("VM Statistics:\n");
7724 db_indent += 2;
7725 iprintf("pages:\n");
7726 db_indent += 2;
7727 iprintf("activ %5d inact %5d free %5d",
7728 vm_page_active_count, vm_page_inactive_count,
7729 vm_page_free_count);
7730 printf(" wire %5d gobbl %5d\n",
7731 vm_page_wire_count, vm_page_gobble_count);
1c79356b
A
7732 db_indent -= 2;
7733 iprintf("target:\n");
7734 db_indent += 2;
7735 iprintf("min %5d inact %5d free %5d",
7736 vm_page_free_min, vm_page_inactive_target,
7737 vm_page_free_target);
7738 printf(" resrv %5d\n", vm_page_free_reserved);
7739 db_indent -= 2;
1c79356b 7740 iprintf("pause:\n");
1c79356b
A
7741 db_pageout();
7742 db_indent -= 2;
7743}
7744
1c79356b 7745#if MACH_COUNTERS
91447636 7746extern int c_laundry_pages_freed;
1c79356b
A
7747#endif /* MACH_COUNTERS */
7748
91447636
A
7749void
7750db_pageout(void)
7751{
1c79356b
A
7752 iprintf("Pageout Statistics:\n");
7753 db_indent += 2;
7754 iprintf("active %5d inactv %5d\n",
7755 vm_pageout_active, vm_pageout_inactive);
7756 iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
7757 vm_pageout_inactive_nolock, vm_pageout_inactive_avoid,
7758 vm_pageout_inactive_busy, vm_pageout_inactive_absent);
6d2010ae 7759 iprintf("used %5d clean %5d dirty(internal) %5d dirty(external) %5d\n",
1c79356b 7760 vm_pageout_inactive_used, vm_pageout_inactive_clean,
6d2010ae 7761 vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
1c79356b
A
7762#if MACH_COUNTERS
7763 iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed);
7764#endif /* MACH_COUNTERS */
7765#if MACH_CLUSTER_STATS
7766 iprintf("Cluster Statistics:\n");
7767 db_indent += 2;
7768 iprintf("dirtied %5d cleaned %5d collisions %5d\n",
7769 vm_pageout_cluster_dirtied, vm_pageout_cluster_cleaned,
7770 vm_pageout_cluster_collisions);
7771 iprintf("clusters %5d conversions %5d\n",
7772 vm_pageout_cluster_clusters, vm_pageout_cluster_conversions);
7773 db_indent -= 2;
7774 iprintf("Target Statistics:\n");
7775 db_indent += 2;
7776 iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
7777 vm_pageout_target_collisions, vm_pageout_target_page_dirtied,
7778 vm_pageout_target_page_freed);
1c79356b
A
7779 db_indent -= 2;
7780#endif /* MACH_CLUSTER_STATS */
7781 db_indent -= 2;
7782}
7783
1c79356b 7784#endif /* MACH_KDB */