]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_pageout.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * The proverbial page-out daemon.
64 */
1c79356b 65
91447636
A
66#include <stdint.h>
67
68#include <debug.h>
1c79356b
A
69#include <mach_pagemap.h>
70#include <mach_cluster_stats.h>
71#include <mach_kdb.h>
72#include <advisory_pageout.h>
73
74#include <mach/mach_types.h>
75#include <mach/memory_object.h>
76#include <mach/memory_object_default.h>
0b4e3aa0 77#include <mach/memory_object_control_server.h>
1c79356b 78#include <mach/mach_host_server.h>
91447636
A
79#include <mach/upl.h>
80#include <mach/vm_map.h>
1c79356b
A
81#include <mach/vm_param.h>
82#include <mach/vm_statistics.h>
2d21ac55 83#include <mach/sdt.h>
91447636
A
84
85#include <kern/kern_types.h>
1c79356b 86#include <kern/counters.h>
91447636
A
87#include <kern/host_statistics.h>
88#include <kern/machine.h>
89#include <kern/misc_protos.h>
b0d623f7 90#include <kern/sched.h>
1c79356b 91#include <kern/thread.h>
1c79356b 92#include <kern/xpr.h>
91447636
A
93#include <kern/kalloc.h>
94
95#include <machine/vm_tuning.h>
b0d623f7 96#include <machine/commpage.h>
91447636 97
2d21ac55
A
98#if CONFIG_EMBEDDED
99#include <sys/kern_memorystatus.h>
100#endif
101
1c79356b 102#include <vm/pmap.h>
55e303ae 103#include <vm/vm_fault.h>
1c79356b
A
104#include <vm/vm_map.h>
105#include <vm/vm_object.h>
106#include <vm/vm_page.h>
107#include <vm/vm_pageout.h>
91447636 108#include <vm/vm_protos.h> /* must be last */
2d21ac55
A
109#include <vm/memory_object.h>
110#include <vm/vm_purgeable_internal.h>
1c79356b 111
91447636
A
112/*
113 * ENCRYPTED SWAP:
114 */
91447636 115#include <../bsd/crypto/aes/aes.h>
b0d623f7 116extern u_int32_t random(void); /* from <libkern/libkern.h> */
55e303ae 117
b0d623f7
A
118#if UPL_DEBUG
119#include <libkern/OSDebug.h>
120#endif
91447636 121
2d21ac55 122#ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE /* maximum iterations of the active queue to move pages to inactive */
2d21ac55
A
123#define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 100
124#endif
91447636 125
2d21ac55
A
126#ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
127#ifdef CONFIG_EMBEDDED
128#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
129#else
130#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
131#endif
91447636
A
132#endif
133
134#ifndef VM_PAGEOUT_DEADLOCK_RELIEF
135#define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
136#endif
137
138#ifndef VM_PAGEOUT_INACTIVE_RELIEF
139#define VM_PAGEOUT_INACTIVE_RELIEF 50 /* minimum number of pages to move to the inactive q */
140#endif
141
1c79356b 142#ifndef VM_PAGE_LAUNDRY_MAX
91447636 143#define VM_PAGE_LAUNDRY_MAX 16UL /* maximum pageouts on a given pageout queue */
1c79356b
A
144#endif /* VM_PAGEOUT_LAUNDRY_MAX */
145
1c79356b
A
146#ifndef VM_PAGEOUT_BURST_WAIT
147#define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
148#endif /* VM_PAGEOUT_BURST_WAIT */
149
150#ifndef VM_PAGEOUT_EMPTY_WAIT
151#define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
152#endif /* VM_PAGEOUT_EMPTY_WAIT */
153
91447636
A
154#ifndef VM_PAGEOUT_DEADLOCK_WAIT
155#define VM_PAGEOUT_DEADLOCK_WAIT 300 /* milliseconds */
156#endif /* VM_PAGEOUT_DEADLOCK_WAIT */
157
158#ifndef VM_PAGEOUT_IDLE_WAIT
159#define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
160#endif /* VM_PAGEOUT_IDLE_WAIT */
161
2d21ac55
A
162#ifndef VM_PAGE_SPECULATIVE_TARGET
163#define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / 20)
164#endif /* VM_PAGE_SPECULATIVE_TARGET */
165
166#ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
167#define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
168#endif /* VM_PAGE_INACTIVE_HEALTHY_LIMIT */
169
91447636 170
1c79356b
A
171/*
172 * To obtain a reasonable LRU approximation, the inactive queue
173 * needs to be large enough to give pages on it a chance to be
174 * referenced a second time. This macro defines the fraction
175 * of active+inactive pages that should be inactive.
176 * The pageout daemon uses it to update vm_page_inactive_target.
177 *
178 * If vm_page_free_count falls below vm_page_free_target and
179 * vm_page_inactive_count is below vm_page_inactive_target,
180 * then the pageout daemon starts running.
181 */
182
183#ifndef VM_PAGE_INACTIVE_TARGET
184#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
185#endif /* VM_PAGE_INACTIVE_TARGET */
186
187/*
188 * Once the pageout daemon starts running, it keeps going
189 * until vm_page_free_count meets or exceeds vm_page_free_target.
190 */
191
192#ifndef VM_PAGE_FREE_TARGET
2d21ac55
A
193#ifdef CONFIG_EMBEDDED
194#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
195#else
1c79356b 196#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
2d21ac55 197#endif
1c79356b
A
198#endif /* VM_PAGE_FREE_TARGET */
199
200/*
201 * The pageout daemon always starts running once vm_page_free_count
202 * falls below vm_page_free_min.
203 */
204
205#ifndef VM_PAGE_FREE_MIN
2d21ac55
A
206#ifdef CONFIG_EMBEDDED
207#define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
208#else
209#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
210#endif
1c79356b
A
211#endif /* VM_PAGE_FREE_MIN */
212
2d21ac55
A
213#define VM_PAGE_FREE_MIN_LIMIT 1500
214#define VM_PAGE_FREE_TARGET_LIMIT 2000
215
216
1c79356b
A
217/*
218 * When vm_page_free_count falls below vm_page_free_reserved,
219 * only vm-privileged threads can allocate pages. vm-privilege
220 * allows the pageout daemon and default pager (and any other
221 * associated threads needed for default pageout) to continue
222 * operation by dipping into the reserved pool of pages.
223 */
224
225#ifndef VM_PAGE_FREE_RESERVED
91447636 226#define VM_PAGE_FREE_RESERVED(n) \
b0d623f7 227 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
1c79356b
A
228#endif /* VM_PAGE_FREE_RESERVED */
229
2d21ac55
A
230/*
231 * When we dequeue pages from the inactive list, they are
232 * reactivated (ie, put back on the active queue) if referenced.
233 * However, it is possible to starve the free list if other
234 * processors are referencing pages faster than we can turn off
235 * the referenced bit. So we limit the number of reactivations
236 * we will make per call of vm_pageout_scan().
237 */
238#define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
239#ifndef VM_PAGE_REACTIVATE_LIMIT
240#ifdef CONFIG_EMBEDDED
241#define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
242#else
243#define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
244#endif
245#endif /* VM_PAGE_REACTIVATE_LIMIT */
246#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 100
247
91447636
A
248
249/*
250 * must hold the page queues lock to
251 * manipulate this structure
252 */
253struct vm_pageout_queue {
254 queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */
255 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */
256 unsigned int pgo_maxlaundry;
257
258 unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */
259 pgo_busy:1, /* iothread is currently processing request from pgo_pending */
260 pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
261 :0;
262};
263
264#define VM_PAGE_Q_THROTTLED(q) \
265 ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
266
267
0b4e3aa0
A
268/*
269 * Exported variable used to broadcast the activation of the pageout scan
270 * Working Set uses this to throttle its use of pmap removes. In this
271 * way, code which runs within memory in an uncontested context does
272 * not keep encountering soft faults.
273 */
274
275unsigned int vm_pageout_scan_event_counter = 0;
1c79356b
A
276
277/*
278 * Forward declarations for internal routines.
279 */
91447636
A
280
281static void vm_pageout_garbage_collect(int);
282static void vm_pageout_iothread_continue(struct vm_pageout_queue *);
283static void vm_pageout_iothread_external(void);
284static void vm_pageout_iothread_internal(void);
91447636 285
1c79356b
A
286extern void vm_pageout_continue(void);
287extern void vm_pageout_scan(void);
1c79356b 288
2d21ac55
A
289static thread_t vm_pageout_external_iothread = THREAD_NULL;
290static thread_t vm_pageout_internal_iothread = THREAD_NULL;
291
1c79356b
A
292unsigned int vm_pageout_reserved_internal = 0;
293unsigned int vm_pageout_reserved_really = 0;
294
91447636 295unsigned int vm_pageout_idle_wait = 0; /* milliseconds */
55e303ae 296unsigned int vm_pageout_empty_wait = 0; /* milliseconds */
91447636
A
297unsigned int vm_pageout_burst_wait = 0; /* milliseconds */
298unsigned int vm_pageout_deadlock_wait = 0; /* milliseconds */
299unsigned int vm_pageout_deadlock_relief = 0;
300unsigned int vm_pageout_inactive_relief = 0;
301unsigned int vm_pageout_burst_active_throttle = 0;
302unsigned int vm_pageout_burst_inactive_throttle = 0;
1c79356b 303
9bccf70c
A
304/*
305 * Protection against zero fill flushing live working sets derived
306 * from existing backing store and files
307 */
308unsigned int vm_accellerate_zf_pageout_trigger = 400;
2d21ac55 309unsigned int zf_queue_min_count = 100;
2d21ac55 310unsigned int vm_zf_queue_count = 0;
9bccf70c 311
b0d623f7
A
312#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
313unsigned int vm_zf_count = 0;
314#else
315uint64_t vm_zf_count __attribute__((aligned(8))) = 0;
316#endif
317
1c79356b
A
318/*
319 * These variables record the pageout daemon's actions:
320 * how many pages it looks at and what happens to those pages.
321 * No locking needed because only one thread modifies the variables.
322 */
323
324unsigned int vm_pageout_active = 0; /* debugging */
325unsigned int vm_pageout_inactive = 0; /* debugging */
326unsigned int vm_pageout_inactive_throttled = 0; /* debugging */
327unsigned int vm_pageout_inactive_forced = 0; /* debugging */
328unsigned int vm_pageout_inactive_nolock = 0; /* debugging */
329unsigned int vm_pageout_inactive_avoid = 0; /* debugging */
330unsigned int vm_pageout_inactive_busy = 0; /* debugging */
331unsigned int vm_pageout_inactive_absent = 0; /* debugging */
332unsigned int vm_pageout_inactive_used = 0; /* debugging */
333unsigned int vm_pageout_inactive_clean = 0; /* debugging */
334unsigned int vm_pageout_inactive_dirty = 0; /* debugging */
b0d623f7
A
335unsigned int vm_pageout_inactive_deactivated = 0; /* debugging */
336unsigned int vm_pageout_inactive_zf = 0; /* debugging */
1c79356b 337unsigned int vm_pageout_dirty_no_pager = 0; /* debugging */
91447636 338unsigned int vm_pageout_purged_objects = 0; /* debugging */
1c79356b
A
339unsigned int vm_stat_discard = 0; /* debugging */
340unsigned int vm_stat_discard_sent = 0; /* debugging */
341unsigned int vm_stat_discard_failure = 0; /* debugging */
342unsigned int vm_stat_discard_throttle = 0; /* debugging */
2d21ac55
A
343unsigned int vm_pageout_reactivation_limit_exceeded = 0; /* debugging */
344unsigned int vm_pageout_catch_ups = 0; /* debugging */
345unsigned int vm_pageout_inactive_force_reclaim = 0; /* debugging */
1c79356b 346
91447636
A
347unsigned int vm_pageout_scan_active_throttled = 0;
348unsigned int vm_pageout_scan_inactive_throttled = 0;
349unsigned int vm_pageout_scan_throttle = 0; /* debugging */
b0d623f7 350unsigned int vm_pageout_scan_throttle_aborted = 0; /* debugging */
91447636
A
351unsigned int vm_pageout_scan_burst_throttle = 0; /* debugging */
352unsigned int vm_pageout_scan_empty_throttle = 0; /* debugging */
353unsigned int vm_pageout_scan_deadlock_detected = 0; /* debugging */
354unsigned int vm_pageout_scan_active_throttle_success = 0; /* debugging */
355unsigned int vm_pageout_scan_inactive_throttle_success = 0; /* debugging */
b0d623f7
A
356
357unsigned int vm_page_speculative_count_drifts = 0;
358unsigned int vm_page_speculative_count_drift_max = 0;
359
55e303ae
A
360/*
361 * Backing store throttle when BS is exhausted
362 */
363unsigned int vm_backing_store_low = 0;
1c79356b
A
364
365unsigned int vm_pageout_out_of_line = 0;
366unsigned int vm_pageout_in_place = 0;
55e303ae 367
b0d623f7
A
368unsigned int vm_page_steal_pageout_page = 0;
369
91447636
A
370/*
371 * ENCRYPTED SWAP:
372 * counters and statistics...
373 */
374unsigned long vm_page_decrypt_counter = 0;
375unsigned long vm_page_decrypt_for_upl_counter = 0;
376unsigned long vm_page_encrypt_counter = 0;
377unsigned long vm_page_encrypt_abort_counter = 0;
378unsigned long vm_page_encrypt_already_encrypted_counter = 0;
379boolean_t vm_pages_encrypted = FALSE; /* are there encrypted pages ? */
380
91447636
A
381struct vm_pageout_queue vm_pageout_queue_internal;
382struct vm_pageout_queue vm_pageout_queue_external;
383
2d21ac55
A
384unsigned int vm_page_speculative_target = 0;
385
386vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
387
b0d623f7
A
388static boolean_t (* volatile consider_buffer_cache_collect)(void) = NULL;
389
390#if DEVELOPMENT || DEBUG
4a3eedf9 391unsigned long vm_cs_validated_resets = 0;
b0d623f7 392#endif
55e303ae
A
393
394/*
395 * Routine: vm_backing_store_disable
396 * Purpose:
397 * Suspend non-privileged threads wishing to extend
398 * backing store when we are low on backing store
399 * (Synchronized by caller)
400 */
401void
402vm_backing_store_disable(
403 boolean_t disable)
404{
405 if(disable) {
406 vm_backing_store_low = 1;
407 } else {
408 if(vm_backing_store_low) {
409 vm_backing_store_low = 0;
410 thread_wakeup((event_t) &vm_backing_store_low);
411 }
412 }
413}
414
415
1c79356b
A
416#if MACH_CLUSTER_STATS
417unsigned long vm_pageout_cluster_dirtied = 0;
418unsigned long vm_pageout_cluster_cleaned = 0;
419unsigned long vm_pageout_cluster_collisions = 0;
420unsigned long vm_pageout_cluster_clusters = 0;
421unsigned long vm_pageout_cluster_conversions = 0;
422unsigned long vm_pageout_target_collisions = 0;
423unsigned long vm_pageout_target_page_dirtied = 0;
424unsigned long vm_pageout_target_page_freed = 0;
1c79356b
A
425#define CLUSTER_STAT(clause) clause
426#else /* MACH_CLUSTER_STATS */
427#define CLUSTER_STAT(clause)
428#endif /* MACH_CLUSTER_STATS */
429
430/*
431 * Routine: vm_pageout_object_terminate
432 * Purpose:
2d21ac55 433 * Destroy the pageout_object, and perform all of the
1c79356b
A
434 * required cleanup actions.
435 *
436 * In/Out conditions:
437 * The object must be locked, and will be returned locked.
438 */
439void
440vm_pageout_object_terminate(
441 vm_object_t object)
442{
443 vm_object_t shadow_object;
444
445 /*
446 * Deal with the deallocation (last reference) of a pageout object
447 * (used for cleaning-in-place) by dropping the paging references/
448 * freeing pages in the original object.
449 */
450
451 assert(object->pageout);
452 shadow_object = object->shadow;
453 vm_object_lock(shadow_object);
454
455 while (!queue_empty(&object->memq)) {
456 vm_page_t p, m;
457 vm_object_offset_t offset;
458
459 p = (vm_page_t) queue_first(&object->memq);
460
461 assert(p->private);
462 assert(p->pageout);
463 p->pageout = FALSE;
464 assert(!p->cleaning);
465
466 offset = p->offset;
467 VM_PAGE_FREE(p);
468 p = VM_PAGE_NULL;
469
470 m = vm_page_lookup(shadow_object,
471 offset + object->shadow_offset);
472
473 if(m == VM_PAGE_NULL)
474 continue;
475 assert(m->cleaning);
0b4e3aa0
A
476 /* used as a trigger on upl_commit etc to recognize the */
477 /* pageout daemon's subseqent desire to pageout a cleaning */
478 /* page. When the bit is on the upl commit code will */
479 /* respect the pageout bit in the target page over the */
480 /* caller's page list indication */
481 m->dump_cleaning = FALSE;
1c79356b 482
1c79356b
A
483 assert((m->dirty) || (m->precious) ||
484 (m->busy && m->cleaning));
485
486 /*
487 * Handle the trusted pager throttle.
55e303ae 488 * Also decrement the burst throttle (if external).
1c79356b
A
489 */
490 vm_page_lock_queues();
491 if (m->laundry) {
91447636 492 vm_pageout_throttle_up(m);
1c79356b
A
493 }
494
495 /*
496 * Handle the "target" page(s). These pages are to be freed if
497 * successfully cleaned. Target pages are always busy, and are
498 * wired exactly once. The initial target pages are not mapped,
499 * (so cannot be referenced or modified) but converted target
500 * pages may have been modified between the selection as an
501 * adjacent page and conversion to a target.
502 */
503 if (m->pageout) {
504 assert(m->busy);
505 assert(m->wire_count == 1);
506 m->cleaning = FALSE;
2d21ac55 507 m->encrypted_cleaning = FALSE;
1c79356b
A
508 m->pageout = FALSE;
509#if MACH_CLUSTER_STATS
510 if (m->wanted) vm_pageout_target_collisions++;
511#endif
512 /*
513 * Revoke all access to the page. Since the object is
514 * locked, and the page is busy, this prevents the page
91447636 515 * from being dirtied after the pmap_disconnect() call
1c79356b 516 * returns.
91447636 517 *
1c79356b
A
518 * Since the page is left "dirty" but "not modifed", we
519 * can detect whether the page was redirtied during
520 * pageout by checking the modify state.
521 */
91447636
A
522 if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)
523 m->dirty = TRUE;
524 else
525 m->dirty = FALSE;
1c79356b
A
526
527 if (m->dirty) {
528 CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
529 vm_page_unwire(m);/* reactivates */
2d21ac55 530 VM_STAT_INCR(reactivations);
1c79356b 531 PAGE_WAKEUP_DONE(m);
1c79356b
A
532 } else {
533 CLUSTER_STAT(vm_pageout_target_page_freed++;)
534 vm_page_free(m);/* clears busy, etc. */
535 }
536 vm_page_unlock_queues();
537 continue;
538 }
539 /*
540 * Handle the "adjacent" pages. These pages were cleaned in
541 * place, and should be left alone.
542 * If prep_pin_count is nonzero, then someone is using the
543 * page, so make it active.
544 */
2d21ac55 545 if (!m->active && !m->inactive && !m->throttled && !m->private) {
0b4e3aa0 546 if (m->reference)
1c79356b
A
547 vm_page_activate(m);
548 else
549 vm_page_deactivate(m);
550 }
551 if((m->busy) && (m->cleaning)) {
552
553 /* the request_page_list case, (COPY_OUT_FROM FALSE) */
554 m->busy = FALSE;
555
556 /* We do not re-set m->dirty ! */
557 /* The page was busy so no extraneous activity */
91447636 558 /* could have occurred. COPY_INTO is a read into the */
1c79356b
A
559 /* new pages. CLEAN_IN_PLACE does actually write */
560 /* out the pages but handling outside of this code */
561 /* will take care of resetting dirty. We clear the */
562 /* modify however for the Programmed I/O case. */
55e303ae 563 pmap_clear_modify(m->phys_page);
2d21ac55
A
564
565 m->absent = FALSE;
1c79356b
A
566 m->overwriting = FALSE;
567 } else if (m->overwriting) {
568 /* alternate request page list, write to page_list */
569 /* case. Occurs when the original page was wired */
570 /* at the time of the list request */
b0d623f7 571 assert(VM_PAGE_WIRED(m));
1c79356b
A
572 vm_page_unwire(m);/* reactivates */
573 m->overwriting = FALSE;
574 } else {
575 /*
576 * Set the dirty state according to whether or not the page was
577 * modified during the pageout. Note that we purposefully do
578 * NOT call pmap_clear_modify since the page is still mapped.
579 * If the page were to be dirtied between the 2 calls, this
580 * this fact would be lost. This code is only necessary to
581 * maintain statistics, since the pmap module is always
582 * consulted if m->dirty is false.
583 */
584#if MACH_CLUSTER_STATS
55e303ae 585 m->dirty = pmap_is_modified(m->phys_page);
1c79356b
A
586
587 if (m->dirty) vm_pageout_cluster_dirtied++;
588 else vm_pageout_cluster_cleaned++;
589 if (m->wanted) vm_pageout_cluster_collisions++;
590#else
591 m->dirty = 0;
592#endif
593 }
594 m->cleaning = FALSE;
2d21ac55 595 m->encrypted_cleaning = FALSE;
1c79356b 596
1c79356b
A
597 /*
598 * Wakeup any thread waiting for the page to be un-cleaning.
599 */
600 PAGE_WAKEUP(m);
601 vm_page_unlock_queues();
602 }
603 /*
604 * Account for the paging reference taken in vm_paging_object_allocate.
605 */
b0d623f7 606 vm_object_activity_end(shadow_object);
1c79356b
A
607 vm_object_unlock(shadow_object);
608
609 assert(object->ref_count == 0);
610 assert(object->paging_in_progress == 0);
b0d623f7 611 assert(object->activity_in_progress == 0);
1c79356b
A
612 assert(object->resident_page_count == 0);
613 return;
614}
615
1c79356b
A
616/*
617 * Routine: vm_pageclean_setup
618 *
619 * Purpose: setup a page to be cleaned (made non-dirty), but not
620 * necessarily flushed from the VM page cache.
621 * This is accomplished by cleaning in place.
622 *
b0d623f7
A
623 * The page must not be busy, and new_object
624 * must be locked.
625 *
1c79356b
A
626 */
627void
628vm_pageclean_setup(
629 vm_page_t m,
630 vm_page_t new_m,
631 vm_object_t new_object,
632 vm_object_offset_t new_offset)
633{
1c79356b 634 assert(!m->busy);
2d21ac55 635#if 0
1c79356b 636 assert(!m->cleaning);
2d21ac55 637#endif
1c79356b
A
638
639 XPR(XPR_VM_PAGEOUT,
640 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
b0d623f7
A
641 m->object, m->offset, m,
642 new_m, new_offset);
1c79356b 643
55e303ae 644 pmap_clear_modify(m->phys_page);
1c79356b
A
645
646 /*
647 * Mark original page as cleaning in place.
648 */
649 m->cleaning = TRUE;
650 m->dirty = TRUE;
651 m->precious = FALSE;
652
653 /*
654 * Convert the fictitious page to a private shadow of
655 * the real page.
656 */
657 assert(new_m->fictitious);
2d21ac55 658 assert(new_m->phys_page == vm_page_fictitious_addr);
1c79356b
A
659 new_m->fictitious = FALSE;
660 new_m->private = TRUE;
661 new_m->pageout = TRUE;
55e303ae 662 new_m->phys_page = m->phys_page;
b0d623f7
A
663
664 vm_page_lockspin_queues();
1c79356b 665 vm_page_wire(new_m);
b0d623f7 666 vm_page_unlock_queues();
1c79356b
A
667
668 vm_page_insert(new_m, new_object, new_offset);
669 assert(!new_m->wanted);
670 new_m->busy = FALSE;
671}
672
1c79356b
A
673/*
674 * Routine: vm_pageout_initialize_page
675 * Purpose:
676 * Causes the specified page to be initialized in
677 * the appropriate memory object. This routine is used to push
678 * pages into a copy-object when they are modified in the
679 * permanent object.
680 *
681 * The page is moved to a temporary object and paged out.
682 *
683 * In/out conditions:
684 * The page in question must not be on any pageout queues.
685 * The object to which it belongs must be locked.
686 * The page must be busy, but not hold a paging reference.
687 *
688 * Implementation:
689 * Move this page to a completely new object.
690 */
691void
692vm_pageout_initialize_page(
693 vm_page_t m)
694{
1c79356b
A
695 vm_object_t object;
696 vm_object_offset_t paging_offset;
697 vm_page_t holding_page;
2d21ac55 698 memory_object_t pager;
1c79356b
A
699
700 XPR(XPR_VM_PAGEOUT,
701 "vm_pageout_initialize_page, page 0x%X\n",
b0d623f7 702 m, 0, 0, 0, 0);
1c79356b
A
703 assert(m->busy);
704
705 /*
706 * Verify that we really want to clean this page
707 */
708 assert(!m->absent);
709 assert(!m->error);
710 assert(m->dirty);
711
712 /*
713 * Create a paging reference to let us play with the object.
714 */
715 object = m->object;
716 paging_offset = m->offset + object->paging_offset;
2d21ac55
A
717
718 if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) {
1c79356b
A
719 VM_PAGE_FREE(m);
720 panic("reservation without pageout?"); /* alan */
2d21ac55
A
721 vm_object_unlock(object);
722
723 return;
724 }
725
726 /*
727 * If there's no pager, then we can't clean the page. This should
728 * never happen since this should be a copy object and therefore not
729 * an external object, so the pager should always be there.
730 */
731
732 pager = object->pager;
733
734 if (pager == MEMORY_OBJECT_NULL) {
735 VM_PAGE_FREE(m);
736 panic("missing pager for copy object");
1c79356b
A
737 return;
738 }
739
740 /* set the page for future call to vm_fault_list_request */
2d21ac55 741 vm_object_paging_begin(object);
1c79356b 742 holding_page = NULL;
b0d623f7 743
55e303ae 744 pmap_clear_modify(m->phys_page);
1c79356b 745 m->dirty = TRUE;
55e303ae
A
746 m->busy = TRUE;
747 m->list_req_pending = TRUE;
748 m->cleaning = TRUE;
1c79356b 749 m->pageout = TRUE;
b0d623f7
A
750
751 vm_page_lockspin_queues();
1c79356b 752 vm_page_wire(m);
55e303ae 753 vm_page_unlock_queues();
b0d623f7 754
55e303ae 755 vm_object_unlock(object);
1c79356b
A
756
757 /*
758 * Write the data to its pager.
759 * Note that the data is passed by naming the new object,
760 * not a virtual address; the pager interface has been
761 * manipulated to use the "internal memory" data type.
762 * [The object reference from its allocation is donated
763 * to the eventual recipient.]
764 */
2d21ac55 765 memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
1c79356b
A
766
767 vm_object_lock(object);
2d21ac55 768 vm_object_paging_end(object);
1c79356b
A
769}
770
771#if MACH_CLUSTER_STATS
772#define MAXCLUSTERPAGES 16
773struct {
774 unsigned long pages_in_cluster;
775 unsigned long pages_at_higher_offsets;
776 unsigned long pages_at_lower_offsets;
777} cluster_stats[MAXCLUSTERPAGES];
778#endif /* MACH_CLUSTER_STATS */
779
1c79356b
A
780
781/*
782 * vm_pageout_cluster:
783 *
91447636
A
784 * Given a page, queue it to the appropriate I/O thread,
785 * which will page it out and attempt to clean adjacent pages
1c79356b
A
786 * in the same operation.
787 *
91447636 788 * The page must be busy, and the object and queues locked. We will take a
55e303ae 789 * paging reference to prevent deallocation or collapse when we
91447636
A
790 * release the object lock back at the call site. The I/O thread
791 * is responsible for consuming this reference
55e303ae
A
792 *
793 * The page must not be on any pageout queue.
1c79356b 794 */
91447636 795
1c79356b 796void
91447636 797vm_pageout_cluster(vm_page_t m)
1c79356b
A
798{
799 vm_object_t object = m->object;
91447636
A
800 struct vm_pageout_queue *q;
801
1c79356b
A
802
803 XPR(XPR_VM_PAGEOUT,
804 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
b0d623f7
A
805 object, m->offset, m, 0, 0);
806
807 VM_PAGE_CHECK(m);
1c79356b 808
91447636
A
809 /*
810 * Only a certain kind of page is appreciated here.
811 */
b0d623f7 812 assert(m->busy && (m->dirty || m->precious) && (!VM_PAGE_WIRED(m)));
91447636 813 assert(!m->cleaning && !m->pageout && !m->inactive && !m->active);
2d21ac55 814 assert(!m->throttled);
55e303ae
A
815
816 /*
817 * protect the object from collapse -
818 * locking in the object's paging_offset.
819 */
820 vm_object_paging_begin(object);
55e303ae 821
1c79356b 822 /*
91447636
A
823 * set the page for future call to vm_fault_list_request
824 * page should already be marked busy
1c79356b 825 */
91447636 826 vm_page_wire(m);
55e303ae
A
827 m->list_req_pending = TRUE;
828 m->cleaning = TRUE;
1c79356b 829 m->pageout = TRUE;
91447636 830 m->laundry = TRUE;
1c79356b 831
91447636
A
832 if (object->internal == TRUE)
833 q = &vm_pageout_queue_internal;
834 else
835 q = &vm_pageout_queue_external;
836 q->pgo_laundry++;
1c79356b 837
91447636
A
838 m->pageout_queue = TRUE;
839 queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
840
841 if (q->pgo_idle == TRUE) {
842 q->pgo_idle = FALSE;
843 thread_wakeup((event_t) &q->pgo_pending);
1c79356b 844 }
b0d623f7
A
845
846 VM_PAGE_CHECK(m);
1c79356b
A
847}
848
55e303ae 849
91447636 850unsigned long vm_pageout_throttle_up_count = 0;
1c79356b
A
851
852/*
b0d623f7
A
853 * A page is back from laundry or we are stealing it back from
854 * the laundering state. See if there are some pages waiting to
91447636 855 * go to laundry and if we can let some of them go now.
1c79356b 856 *
91447636 857 * Object and page queues must be locked.
1c79356b 858 */
91447636
A
859void
860vm_pageout_throttle_up(
861 vm_page_t m)
1c79356b 862{
91447636 863 struct vm_pageout_queue *q;
1c79356b 864
91447636
A
865 assert(m->laundry);
866 assert(m->object != VM_OBJECT_NULL);
867 assert(m->object != kernel_object);
1c79356b 868
b0d623f7
A
869 vm_pageout_throttle_up_count++;
870
91447636
A
871 if (m->object->internal == TRUE)
872 q = &vm_pageout_queue_internal;
873 else
874 q = &vm_pageout_queue_external;
1c79356b 875
b0d623f7
A
876 if (m->pageout_queue == TRUE) {
877 m->pageout_queue = FALSE;
878
879 queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
880 m->pageq.next = NULL;
881 m->pageq.prev = NULL;
882
883 vm_object_paging_end(m->object);
884 }
91447636
A
885 m->laundry = FALSE;
886 q->pgo_laundry--;
1c79356b 887
91447636
A
888 if (q->pgo_throttled == TRUE) {
889 q->pgo_throttled = FALSE;
890 thread_wakeup((event_t) &q->pgo_laundry);
1c79356b 891 }
1c79356b
A
892}
893
91447636 894
1c79356b
A
895/*
896 * vm_pageout_scan does the dirty work for the pageout daemon.
897 * It returns with vm_page_queue_free_lock held and
898 * vm_page_free_wanted == 0.
899 */
1c79356b 900
2d21ac55 901#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT (3 * MAX_UPL_TRANSFER)
91447636
A
902
903#define FCS_IDLE 0
904#define FCS_DELAYED 1
905#define FCS_DEADLOCK_DETECTED 2
906
907struct flow_control {
908 int state;
909 mach_timespec_t ts;
910};
911
b0d623f7
A
912
913/*
914 * VM memory pressure monitoring.
915 *
916 * vm_pageout_scan() keeps track of the number of pages it considers and
917 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
918 *
919 * compute_memory_pressure() is called every second from compute_averages()
920 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
921 * of recalimed pages in a new vm_pageout_stat[] bucket.
922 *
923 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
924 * The caller provides the number of seconds ("nsecs") worth of statistics
925 * it wants, up to 30 seconds.
926 * It computes the number of pages reclaimed in the past "nsecs" seconds and
927 * also returns the number of pages the system still needs to reclaim at this
928 * moment in time.
929 */
930#define VM_PAGEOUT_STAT_SIZE 31
931struct vm_pageout_stat {
932 unsigned int considered;
933 unsigned int reclaimed;
934} vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0,0}, };
935unsigned int vm_pageout_stat_now = 0;
936unsigned int vm_memory_pressure = 0;
937
938#define VM_PAGEOUT_STAT_BEFORE(i) \
939 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
940#define VM_PAGEOUT_STAT_AFTER(i) \
941 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
942
943/*
944 * Called from compute_averages().
945 */
946void
947compute_memory_pressure(
948 __unused void *arg)
949{
950 unsigned int vm_pageout_next;
951
952 vm_memory_pressure =
953 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].reclaimed;
954
955 commpage_set_memory_pressure( vm_memory_pressure );
956
957 /* move "now" forward */
958 vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
959 vm_pageout_stats[vm_pageout_next].considered = 0;
960 vm_pageout_stats[vm_pageout_next].reclaimed = 0;
961 vm_pageout_stat_now = vm_pageout_next;
962}
963
964unsigned int
965mach_vm_ctl_page_free_wanted(void)
966{
967 unsigned int page_free_target, page_free_count, page_free_wanted;
968
969 page_free_target = vm_page_free_target;
970 page_free_count = vm_page_free_count;
971 if (page_free_target > page_free_count) {
972 page_free_wanted = page_free_target - page_free_count;
973 } else {
974 page_free_wanted = 0;
975 }
976
977 return page_free_wanted;
978}
979
980kern_return_t
981mach_vm_pressure_monitor(
982 boolean_t wait_for_pressure,
983 unsigned int nsecs_monitored,
984 unsigned int *pages_reclaimed_p,
985 unsigned int *pages_wanted_p)
986{
987 wait_result_t wr;
988 unsigned int vm_pageout_then, vm_pageout_now;
989 unsigned int pages_reclaimed;
990
991 /*
992 * We don't take the vm_page_queue_lock here because we don't want
993 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
994 * thread when it's trying to reclaim memory. We don't need fully
995 * accurate monitoring anyway...
996 */
997
998 if (wait_for_pressure) {
999 /* wait until there's memory pressure */
1000 while (vm_page_free_count >= vm_page_free_target) {
1001 wr = assert_wait((event_t) &vm_page_free_wanted,
1002 THREAD_INTERRUPTIBLE);
1003 if (wr == THREAD_WAITING) {
1004 wr = thread_block(THREAD_CONTINUE_NULL);
1005 }
1006 if (wr == THREAD_INTERRUPTED) {
1007 return KERN_ABORTED;
1008 }
1009 if (wr == THREAD_AWAKENED) {
1010 /*
1011 * The memory pressure might have already
1012 * been relieved but let's not block again
1013 * and let's report that there was memory
1014 * pressure at some point.
1015 */
1016 break;
1017 }
1018 }
1019 }
1020
1021 /* provide the number of pages the system wants to reclaim */
1022 if (pages_wanted_p != NULL) {
1023 *pages_wanted_p = mach_vm_ctl_page_free_wanted();
1024 }
1025
1026 if (pages_reclaimed_p == NULL) {
1027 return KERN_SUCCESS;
1028 }
1029
1030 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1031 do {
1032 vm_pageout_now = vm_pageout_stat_now;
1033 pages_reclaimed = 0;
1034 for (vm_pageout_then =
1035 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
1036 vm_pageout_then != vm_pageout_now &&
1037 nsecs_monitored-- != 0;
1038 vm_pageout_then =
1039 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
1040 pages_reclaimed += vm_pageout_stats[vm_pageout_then].reclaimed;
1041 }
1042 } while (vm_pageout_now != vm_pageout_stat_now);
1043 *pages_reclaimed_p = pages_reclaimed;
1044
1045 return KERN_SUCCESS;
1046}
1047
1048/* Page States: Used below to maintain the page state
1049 before it's removed from it's Q. This saved state
1050 helps us do the right accounting in certain cases
1051*/
1052
1053#define PAGE_STATE_SPECULATIVE 1
1054#define PAGE_STATE_THROTTLED 2
1055#define PAGE_STATE_ZEROFILL 3
1056#define PAGE_STATE_INACTIVE 4
1057
1058#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m) \
1059 MACRO_BEGIN \
1060 /* \
1061 * If a "reusable" page somehow made it back into \
1062 * the active queue, it's been re-used and is not \
1063 * quite re-usable. \
1064 * If the VM object was "all_reusable", consider it \
1065 * as "all re-used" instead of converting it to \
1066 * "partially re-used", which could be expensive. \
1067 */ \
1068 if ((m)->reusable || \
1069 (m)->object->all_reusable) { \
1070 vm_object_reuse_pages((m)->object, \
1071 (m)->offset, \
1072 (m)->offset + PAGE_SIZE_64, \
1073 FALSE); \
1074 } \
1075 MACRO_END
1076
1c79356b
A
1077void
1078vm_pageout_scan(void)
1079{
91447636
A
1080 unsigned int loop_count = 0;
1081 unsigned int inactive_burst_count = 0;
1082 unsigned int active_burst_count = 0;
2d21ac55
A
1083 unsigned int reactivated_this_call;
1084 unsigned int reactivate_limit;
1085 vm_page_t local_freeq = NULL;
55e303ae 1086 int local_freed = 0;
2d21ac55 1087 int delayed_unlock;
91447636
A
1088 int refmod_state = 0;
1089 int vm_pageout_deadlock_target = 0;
1090 struct vm_pageout_queue *iq;
1091 struct vm_pageout_queue *eq;
2d21ac55 1092 struct vm_speculative_age_q *sq;
b0d623f7 1093 struct flow_control flow_control = { 0, { 0, 0 } };
91447636 1094 boolean_t inactive_throttled = FALSE;
2d21ac55 1095 boolean_t try_failed;
91447636
A
1096 mach_timespec_t ts;
1097 unsigned int msecs = 0;
1098 vm_object_t object;
2d21ac55 1099 vm_object_t last_object_tried;
b0d623f7
A
1100#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
1101 unsigned int zf_ratio;
1102 unsigned int zf_run_count;
1103#else
1104 uint64_t zf_ratio;
1105 uint64_t zf_run_count;
1106#endif
2d21ac55
A
1107 uint32_t catch_up_count = 0;
1108 uint32_t inactive_reclaim_run;
1109 boolean_t forced_reclaim;
b0d623f7 1110 int page_prev_state = 0;
91447636
A
1111
1112 flow_control.state = FCS_IDLE;
1113 iq = &vm_pageout_queue_internal;
1114 eq = &vm_pageout_queue_external;
2d21ac55
A
1115 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1116
1c79356b
A
1117
1118 XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1119
2d21ac55
A
1120
1121 vm_page_lock_queues();
1122 delayed_unlock = 1; /* must be nonzero if Qs are locked, 0 if unlocked */
1123
1124 /*
1125 * Calculate the max number of referenced pages on the inactive
1126 * queue that we will reactivate.
1127 */
1128 reactivated_this_call = 0;
1129 reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
1130 vm_page_inactive_count);
1131 inactive_reclaim_run = 0;
1132
1133
1c79356b
A
1134/*???*/ /*
1135 * We want to gradually dribble pages from the active queue
1136 * to the inactive queue. If we let the inactive queue get
1137 * very small, and then suddenly dump many pages into it,
1138 * those pages won't get a sufficient chance to be referenced
1139 * before we start taking them from the inactive queue.
1140 *
1141 * We must limit the rate at which we send pages to the pagers.
1142 * data_write messages consume memory, for message buffers and
1143 * for map-copy objects. If we get too far ahead of the pagers,
1144 * we can potentially run out of memory.
1145 *
1146 * We can use the laundry count to limit directly the number
1147 * of pages outstanding to the default pager. A similar
1148 * strategy for external pagers doesn't work, because
1149 * external pagers don't have to deallocate the pages sent them,
1150 * and because we might have to send pages to external pagers
1151 * even if they aren't processing writes. So we also
1152 * use a burst count to limit writes to external pagers.
1153 *
1154 * When memory is very tight, we can't rely on external pagers to
1155 * clean pages. They probably aren't running, because they
1156 * aren't vm-privileged. If we kept sending dirty pages to them,
55e303ae 1157 * we could exhaust the free list.
1c79356b 1158 */
91447636 1159
1c79356b 1160
91447636 1161Restart:
2d21ac55
A
1162 assert(delayed_unlock!=0);
1163
1164 /*
1165 * A page is "zero-filled" if it was not paged in from somewhere,
1166 * and it belongs to an object at least VM_ZF_OBJECT_SIZE_THRESHOLD big.
1167 * Recalculate the zero-filled page ratio. We use this to apportion
1168 * victimized pages between the normal and zero-filled inactive
1169 * queues according to their relative abundance in memory. Thus if a task
1170 * is flooding memory with zf pages, we begin to hunt them down.
1171 * It would be better to throttle greedy tasks at a higher level,
1172 * but at the moment mach vm cannot do this.
1173 */
1174 {
b0d623f7
A
1175#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
1176 uint32_t total = vm_page_active_count + vm_page_inactive_count;
1177 uint32_t normal = total - vm_zf_count;
1178#else
1179 uint64_t total = vm_page_active_count + vm_page_inactive_count;
1180 uint64_t normal = total - vm_zf_count;
1181#endif
1182
2d21ac55
A
1183 /* zf_ratio is the number of zf pages we victimize per normal page */
1184
1185 if (vm_zf_count < vm_accellerate_zf_pageout_trigger)
1186 zf_ratio = 0;
1187 else if ((vm_zf_count <= normal) || (normal == 0))
1188 zf_ratio = 1;
1189 else
1190 zf_ratio = vm_zf_count / normal;
1191
1192 zf_run_count = 0;
1193 }
1194
91447636
A
1195 /*
1196 * Recalculate vm_page_inactivate_target.
1197 */
1198 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
2d21ac55
A
1199 vm_page_inactive_count +
1200 vm_page_speculative_count);
1201 /*
1202 * don't want to wake the pageout_scan thread up everytime we fall below
1203 * the targets... set a low water mark at 0.25% below the target
1204 */
1205 vm_page_inactive_min = vm_page_inactive_target - (vm_page_inactive_target / 400);
1c79356b 1206
2d21ac55
A
1207 vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
1208 vm_page_inactive_count);
1209 object = NULL;
1210 last_object_tried = NULL;
1211 try_failed = FALSE;
1212
1213 if ((vm_page_inactive_count + vm_page_speculative_count) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count))
1214 catch_up_count = vm_page_inactive_count + vm_page_speculative_count;
1215 else
1216 catch_up_count = 0;
1217
55e303ae 1218 for (;;) {
91447636 1219 vm_page_t m;
1c79356b 1220
2d21ac55 1221 DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
1c79356b 1222
2d21ac55
A
1223 if (delayed_unlock == 0) {
1224 vm_page_lock_queues();
1225 delayed_unlock = 1;
1226 }
91447636 1227
2d21ac55
A
1228 /*
1229 * Don't sweep through active queue more than the throttle
1230 * which should be kept relatively low
1231 */
b0d623f7
A
1232 active_burst_count = MIN(vm_pageout_burst_active_throttle,
1233 vm_page_active_count);
91447636 1234
1c79356b
A
1235 /*
1236 * Move pages from active to inactive.
1237 */
b0d623f7 1238 if ((vm_page_inactive_count + vm_page_speculative_count) >= vm_page_inactive_target)
2d21ac55
A
1239 goto done_moving_active_pages;
1240
b0d623f7 1241 while (!queue_empty(&vm_page_queue_active) && active_burst_count) {
2d21ac55
A
1242
1243 if (active_burst_count)
1244 active_burst_count--;
1c79356b 1245
1c79356b 1246 vm_pageout_active++;
55e303ae 1247
1c79356b 1248 m = (vm_page_t) queue_first(&vm_page_queue_active);
91447636
A
1249
1250 assert(m->active && !m->inactive);
1251 assert(!m->laundry);
1252 assert(m->object != kernel_object);
2d21ac55
A
1253 assert(m->phys_page != vm_page_guard_addr);
1254
1255 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
1c79356b
A
1256
1257 /*
91447636
A
1258 * Try to lock object; since we've already got the
1259 * page queues lock, we can only 'try' for this one.
1260 * if the 'try' fails, we need to do a mutex_pause
1261 * to allow the owner of the object lock a chance to
1262 * run... otherwise, we're likely to trip over this
1263 * object in the same state as we work our way through
1264 * the queue... clumps of pages associated with the same
1265 * object are fairly typical on the inactive and active queues
1c79356b 1266 */
91447636
A
1267 if (m->object != object) {
1268 if (object != NULL) {
1269 vm_object_unlock(object);
1270 object = NULL;
2d21ac55 1271 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1c79356b 1272 }
2d21ac55 1273 if (!vm_object_lock_try_scan(m->object)) {
91447636
A
1274 /*
1275 * move page to end of active queue and continue
1276 */
1277 queue_remove(&vm_page_queue_active, m,
1278 vm_page_t, pageq);
1279 queue_enter(&vm_page_queue_active, m,
1280 vm_page_t, pageq);
2d21ac55
A
1281
1282 try_failed = TRUE;
55e303ae 1283
2d21ac55
A
1284 m = (vm_page_t) queue_first(&vm_page_queue_active);
1285 /*
1286 * this is the next object we're going to be interested in
b0d623f7 1287 * try to make sure it's available after the mutex_yield
2d21ac55
A
1288 * returns control
1289 */
1290 vm_pageout_scan_wants_object = m->object;
1291
91447636 1292 goto done_with_activepage;
55e303ae 1293 }
91447636 1294 object = m->object;
2d21ac55
A
1295
1296 try_failed = FALSE;
1c79356b 1297 }
2d21ac55 1298
1c79356b 1299 /*
91447636
A
1300 * if the page is BUSY, then we pull it
1301 * off the active queue and leave it alone.
1302 * when BUSY is cleared, it will get stuck
1303 * back on the appropriate queue
1c79356b 1304 */
1c79356b 1305 if (m->busy) {
1c79356b
A
1306 queue_remove(&vm_page_queue_active, m,
1307 vm_page_t, pageq);
91447636
A
1308 m->pageq.next = NULL;
1309 m->pageq.prev = NULL;
1310
1c79356b
A
1311 if (!m->fictitious)
1312 vm_page_active_count--;
91447636
A
1313 m->active = FALSE;
1314
1315 goto done_with_activepage;
1c79356b 1316 }
91447636 1317
b0d623f7
A
1318 /* deal with a rogue "reusable" page */
1319 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
1320
1c79356b
A
1321 /*
1322 * Deactivate the page while holding the object
1323 * locked, so we know the page is still not busy.
1324 * This should prevent races between pmap_enter
1325 * and pmap_clear_reference. The page might be
1326 * absent or fictitious, but vm_page_deactivate
1327 * can handle that.
1328 */
91447636 1329 vm_page_deactivate(m);
2d21ac55 1330
91447636 1331done_with_activepage:
2d21ac55 1332 if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
1c79356b 1333
91447636 1334 if (object != NULL) {
b0d623f7 1335 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
91447636
A
1336 vm_object_unlock(object);
1337 object = NULL;
1338 }
1339 if (local_freeq) {
b0d623f7
A
1340 vm_page_unlock_queues();
1341 vm_page_free_list(local_freeq, TRUE);
91447636 1342
2d21ac55 1343 local_freeq = NULL;
91447636 1344 local_freed = 0;
b0d623f7
A
1345 vm_page_lock_queues();
1346 } else
1347 lck_mtx_yield(&vm_page_queue_lock);
2d21ac55
A
1348
1349 delayed_unlock = 1;
91447636 1350
91447636
A
1351 /*
1352 * continue the while loop processing
1353 * the active queue... need to hold
1354 * the page queues lock
1355 */
55e303ae 1356 }
1c79356b 1357 }
91447636
A
1358
1359
1360
1361 /**********************************************************************
1362 * above this point we're playing with the active queue
1363 * below this point we're playing with the throttling mechanisms
1364 * and the inactive queue
1365 **********************************************************************/
1366
2d21ac55 1367done_moving_active_pages:
91447636 1368
1c79356b
A
1369 /*
1370 * We are done if we have met our target *and*
1371 * nobody is still waiting for a page.
1372 */
55e303ae 1373 if (vm_page_free_count + local_freed >= vm_page_free_target) {
91447636
A
1374 if (object != NULL) {
1375 vm_object_unlock(object);
1376 object = NULL;
1377 }
2d21ac55
A
1378 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1379
55e303ae 1380 if (local_freeq) {
b0d623f7
A
1381 vm_page_unlock_queues();
1382 vm_page_free_list(local_freeq, TRUE);
55e303ae 1383
2d21ac55 1384 local_freeq = NULL;
55e303ae 1385 local_freed = 0;
b0d623f7 1386 vm_page_lock_queues();
55e303ae 1387 }
2d21ac55
A
1388 /*
1389 * inactive target still not met... keep going
1390 * until we get the queues balanced
1391 */
593a1d5f
A
1392
1393 /*
1394 * Recalculate vm_page_inactivate_target.
1395 */
1396 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
1397 vm_page_inactive_count +
1398 vm_page_speculative_count);
1399
1400#ifndef CONFIG_EMBEDDED
1401 /*
1402 * XXX: if no active pages can be reclaimed, pageout scan can be stuck trying
1403 * to balance the queues
1404 */
2d21ac55
A
1405 if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
1406 !queue_empty(&vm_page_queue_active))
1407 continue;
593a1d5f 1408#endif
2d21ac55 1409
b0d623f7 1410 lck_mtx_lock(&vm_page_queue_free_lock);
55e303ae 1411
0b4e3aa0 1412 if ((vm_page_free_count >= vm_page_free_target) &&
2d21ac55 1413 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
55e303ae 1414
0b4e3aa0 1415 vm_page_unlock_queues();
91447636
A
1416
1417 thread_wakeup((event_t) &vm_pageout_garbage_collect);
2d21ac55
A
1418
1419 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
1420
91447636 1421 return;
0b4e3aa0 1422 }
b0d623f7 1423 lck_mtx_unlock(&vm_page_queue_free_lock);
1c79356b 1424 }
b0d623f7 1425
2d21ac55 1426 /*
b0d623f7
A
1427 * Before anything, we check if we have any ripe volatile
1428 * objects around. If so, try to purge the first object.
1429 * If the purge fails, fall through to reclaim a page instead.
1430 * If the purge succeeds, go back to the top and reevalute
1431 * the new memory situation.
2d21ac55
A
1432 */
1433 assert (available_for_purge>=0);
1434 if (available_for_purge)
1435 {
1436 if (object != NULL) {
1437 vm_object_unlock(object);
1438 object = NULL;
1439 }
b0d623f7
A
1440 if(TRUE == vm_purgeable_object_purge_one()) {
1441 continue;
1442 }
2d21ac55
A
1443 }
1444
1445 if (queue_empty(&sq->age_q) && vm_page_speculative_count) {
1446 /*
1447 * try to pull pages from the aging bins
1448 * see vm_page.h for an explanation of how
1449 * this mechanism works
1450 */
1451 struct vm_speculative_age_q *aq;
1452 mach_timespec_t ts_fully_aged;
1453 boolean_t can_steal = FALSE;
b0d623f7 1454 int num_scanned_queues;
2d21ac55
A
1455
1456 aq = &vm_page_queue_speculative[speculative_steal_index];
1457
b0d623f7
A
1458 num_scanned_queues = 0;
1459 while (queue_empty(&aq->age_q) &&
1460 num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
2d21ac55
A
1461
1462 speculative_steal_index++;
1463
1464 if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
1465 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
1466
1467 aq = &vm_page_queue_speculative[speculative_steal_index];
1468 }
b0d623f7
A
1469
1470 if (num_scanned_queues ==
1471 VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
1472 /*
1473 * XXX We've scanned all the speculative
1474 * queues but still haven't found one
1475 * that is not empty, even though
1476 * vm_page_speculative_count is not 0.
1477 */
1478 /* report the anomaly... */
1479 printf("vm_pageout_scan: "
1480 "all speculative queues empty "
1481 "but count=%d. Re-adjusting.\n",
1482 vm_page_speculative_count);
1483 if (vm_page_speculative_count >
1484 vm_page_speculative_count_drift_max)
1485 vm_page_speculative_count_drift_max = vm_page_speculative_count;
1486 vm_page_speculative_count_drifts++;
1487#if 6553678
1488 Debugger("vm_pageout_scan: no speculative pages");
1489#endif
1490 /* readjust... */
1491 vm_page_speculative_count = 0;
1492 /* ... and continue */
1493 continue;
1494 }
1495
2d21ac55
A
1496 if (vm_page_speculative_count > vm_page_speculative_target)
1497 can_steal = TRUE;
1498 else {
1499 ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) / 1000;
1500 ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) % 1000)
1501 * 1000 * NSEC_PER_USEC;
1502
1503 ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
55e303ae 1504
b0d623f7
A
1505 clock_sec_t sec;
1506 clock_nsec_t nsec;
1507 clock_get_system_nanotime(&sec, &nsec);
1508 ts.tv_sec = (unsigned int) sec;
1509 ts.tv_nsec = nsec;
2d21ac55
A
1510
1511 if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
1512 can_steal = TRUE;
1513 }
1514 if (can_steal == TRUE)
1515 vm_page_speculate_ageit(aq);
1516 }
91447636 1517
1c79356b
A
1518 /*
1519 * Sometimes we have to pause:
1520 * 1) No inactive pages - nothing to do.
91447636
A
1521 * 2) Flow control - default pageout queue is full
1522 * 3) Loop control - no acceptable pages found on the inactive queue
1523 * within the last vm_pageout_burst_inactive_throttle iterations
1c79356b 1524 */
2d21ac55
A
1525 if (queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_zf) && queue_empty(&sq->age_q) &&
1526 (VM_PAGE_Q_THROTTLED(iq) || queue_empty(&vm_page_queue_throttled))) {
91447636
A
1527 vm_pageout_scan_empty_throttle++;
1528 msecs = vm_pageout_empty_wait;
1529 goto vm_pageout_scan_delay;
1530
b0d623f7 1531 } else if (inactive_burst_count >=
593a1d5f
A
1532 MIN(vm_pageout_burst_inactive_throttle,
1533 (vm_page_inactive_count +
1534 vm_page_speculative_count))) {
91447636
A
1535 vm_pageout_scan_burst_throttle++;
1536 msecs = vm_pageout_burst_wait;
1537 goto vm_pageout_scan_delay;
1538
2d21ac55 1539 } else if (VM_PAGE_Q_THROTTLED(iq) && IP_VALID(memory_manager_default)) {
b0d623f7
A
1540 clock_sec_t sec;
1541 clock_nsec_t nsec;
91447636
A
1542
1543 switch (flow_control.state) {
1544
1545 case FCS_IDLE:
1546reset_deadlock_timer:
1547 ts.tv_sec = vm_pageout_deadlock_wait / 1000;
1548 ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
b0d623f7
A
1549 clock_get_system_nanotime(&sec, &nsec);
1550 flow_control.ts.tv_sec = (unsigned int) sec;
1551 flow_control.ts.tv_nsec = nsec;
91447636
A
1552 ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
1553
1554 flow_control.state = FCS_DELAYED;
1555 msecs = vm_pageout_deadlock_wait;
1c79356b 1556
91447636
A
1557 break;
1558
1559 case FCS_DELAYED:
b0d623f7
A
1560 clock_get_system_nanotime(&sec, &nsec);
1561 ts.tv_sec = (unsigned int) sec;
1562 ts.tv_nsec = nsec;
91447636
A
1563
1564 if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
1565 /*
1566 * the pageout thread for the default pager is potentially
1567 * deadlocked since the
1568 * default pager queue has been throttled for more than the
1569 * allowable time... we need to move some clean pages or dirty
1570 * pages belonging to the external pagers if they aren't throttled
1571 * vm_page_free_wanted represents the number of threads currently
1572 * blocked waiting for pages... we'll move one page for each of
1573 * these plus a fixed amount to break the logjam... once we're done
1574 * moving this number of pages, we'll re-enter the FSC_DELAYED state
1575 * with a new timeout target since we have no way of knowing
1576 * whether we've broken the deadlock except through observation
1577 * of the queue associated with the default pager... we need to
2d21ac55 1578 * stop moving pages and allow the system to run to see what
91447636
A
1579 * state it settles into.
1580 */
2d21ac55 1581 vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
91447636
A
1582 vm_pageout_scan_deadlock_detected++;
1583 flow_control.state = FCS_DEADLOCK_DETECTED;
55e303ae 1584
91447636
A
1585 thread_wakeup((event_t) &vm_pageout_garbage_collect);
1586 goto consider_inactive;
1587 }
1588 /*
1589 * just resniff instead of trying
1590 * to compute a new delay time... we're going to be
1591 * awakened immediately upon a laundry completion,
1592 * so we won't wait any longer than necessary
1593 */
1594 msecs = vm_pageout_idle_wait;
1595 break;
1c79356b 1596
91447636
A
1597 case FCS_DEADLOCK_DETECTED:
1598 if (vm_pageout_deadlock_target)
1599 goto consider_inactive;
1600 goto reset_deadlock_timer;
55e303ae 1601
91447636
A
1602 }
1603 vm_pageout_scan_throttle++;
1604 iq->pgo_throttled = TRUE;
1605vm_pageout_scan_delay:
1606 if (object != NULL) {
1607 vm_object_unlock(object);
1608 object = NULL;
1609 }
2d21ac55
A
1610 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1611
55e303ae 1612 if (local_freeq) {
b0d623f7
A
1613 vm_page_unlock_queues();
1614 vm_page_free_list(local_freeq, TRUE);
55e303ae 1615
2d21ac55 1616 local_freeq = NULL;
55e303ae 1617 local_freed = 0;
b0d623f7
A
1618 vm_page_lock_queues();
1619
1620 if (flow_control.state == FCS_DELAYED &&
1621 !VM_PAGE_Q_THROTTLED(iq)) {
1622 flow_control.state = FCS_IDLE;
1623 vm_pageout_scan_throttle_aborted++;
1624 goto consider_inactive;
1625 }
55e303ae 1626 }
2d21ac55
A
1627#if CONFIG_EMBEDDED
1628 {
1629 int percent_avail;
0b4e3aa0 1630
2d21ac55
A
1631 /*
1632 * Decide if we need to send a memory status notification.
1633 */
1634 percent_avail =
1635 (vm_page_active_count + vm_page_inactive_count +
1636 vm_page_speculative_count + vm_page_free_count +
cf7d32b8 1637 (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
2d21ac55
A
1638 atop_64(max_mem);
1639 if (percent_avail >= (kern_memorystatus_level + 5) ||
1640 percent_avail <= (kern_memorystatus_level - 5)) {
1641 kern_memorystatus_level = percent_avail;
1642 thread_wakeup((event_t)&kern_memorystatus_wakeup);
1643 }
1644 }
1645#endif
1646 assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
2d21ac55 1647 counter(c_vm_pageout_scan_block++);
1c79356b 1648
91447636 1649 vm_page_unlock_queues();
2d21ac55
A
1650
1651 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
b0d623f7 1652
91447636
A
1653 thread_block(THREAD_CONTINUE_NULL);
1654
1655 vm_page_lock_queues();
1656 delayed_unlock = 1;
1657
1658 iq->pgo_throttled = FALSE;
0b4e3aa0 1659
2d21ac55 1660 if (loop_count >= vm_page_inactive_count)
55e303ae 1661 loop_count = 0;
91447636
A
1662 inactive_burst_count = 0;
1663
1c79356b
A
1664 goto Restart;
1665 /*NOTREACHED*/
1666 }
1667
91447636
A
1668
1669 flow_control.state = FCS_IDLE;
1670consider_inactive:
1671 loop_count++;
1672 inactive_burst_count++;
1c79356b 1673 vm_pageout_inactive++;
9bccf70c 1674
2d21ac55
A
1675 /* Choose a victim. */
1676
1677 while (1) {
1678 m = NULL;
91447636 1679
b0d623f7
A
1680 if (IP_VALID(memory_manager_default)) {
1681 assert(vm_page_throttled_count == 0);
1682 assert(queue_empty(&vm_page_queue_throttled));
91447636 1683 }
2d21ac55
A
1684
1685 /*
b0d623f7 1686 * The most eligible pages are ones we paged in speculatively,
2d21ac55
A
1687 * but which have not yet been touched.
1688 */
1689 if ( !queue_empty(&sq->age_q) ) {
1690 m = (vm_page_t) queue_first(&sq->age_q);
1691 break;
9bccf70c 1692 }
2d21ac55
A
1693 /*
1694 * Time for a zero-filled inactive page?
1695 */
1696 if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
1697 queue_empty(&vm_page_queue_inactive)) {
1698 if ( !queue_empty(&vm_page_queue_zf) ) {
1699 m = (vm_page_t) queue_first(&vm_page_queue_zf);
1700 zf_run_count++;
1701 break;
1702 }
1703 }
1704 /*
1705 * It's either a normal inactive page or nothing.
1706 */
1707 if ( !queue_empty(&vm_page_queue_inactive) ) {
1708 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
1709 zf_run_count = 0;
1710 break;
1711 }
1712
1713 panic("vm_pageout: no victim");
9bccf70c 1714 }
2d21ac55
A
1715
1716 assert(!m->active && (m->inactive || m->speculative || m->throttled));
91447636
A
1717 assert(!m->laundry);
1718 assert(m->object != kernel_object);
2d21ac55
A
1719 assert(m->phys_page != vm_page_guard_addr);
1720
b0d623f7
A
1721 if (!m->speculative) {
1722 vm_pageout_stats[vm_pageout_stat_now].considered++;
1723 }
1724
2d21ac55 1725 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
1c79356b 1726
91447636 1727 /*
2d21ac55
A
1728 * check to see if we currently are working
1729 * with the same object... if so, we've
1730 * already got the lock
91447636
A
1731 */
1732 if (m->object != object) {
2d21ac55
A
1733 /*
1734 * the object associated with candidate page is
1735 * different from the one we were just working
1736 * with... dump the lock if we still own it
1737 */
91447636
A
1738 if (object != NULL) {
1739 vm_object_unlock(object);
1740 object = NULL;
2d21ac55 1741 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
91447636 1742 }
2d21ac55
A
1743 /*
1744 * Try to lock object; since we've alread got the
1745 * page queues lock, we can only 'try' for this one.
1746 * if the 'try' fails, we need to do a mutex_pause
1747 * to allow the owner of the object lock a chance to
1748 * run... otherwise, we're likely to trip over this
1749 * object in the same state as we work our way through
1750 * the queue... clumps of pages associated with the same
1751 * object are fairly typical on the inactive and active queues
1752 */
1753 if (!vm_object_lock_try_scan(m->object)) {
b0d623f7
A
1754 vm_pageout_inactive_nolock++;
1755
1756 requeue_page:
91447636
A
1757 /*
1758 * Move page to end and continue.
1759 * Don't re-issue ticket
1760 */
1761 if (m->zero_fill) {
b0d623f7
A
1762 if (m->speculative) {
1763 panic("vm_pageout_scan(): page %p speculative and zero-fill !?\n", m);
1764 }
1765 assert(!m->speculative);
91447636
A
1766 queue_remove(&vm_page_queue_zf, m,
1767 vm_page_t, pageq);
1768 queue_enter(&vm_page_queue_zf, m,
1769 vm_page_t, pageq);
2d21ac55
A
1770 } else if (m->speculative) {
1771 remque(&m->pageq);
1772 m->speculative = FALSE;
1773 vm_page_speculative_count--;
1774
1775 /*
b0d623f7 1776 * move to the head of the inactive queue
2d21ac55
A
1777 * to get it out of the way... the speculative
1778 * queue is generally too small to depend
1779 * on there being enough pages from other
1780 * objects to make cycling it back on the
1781 * same queue a winning proposition
1782 */
b0d623f7
A
1783 queue_enter_first(&vm_page_queue_inactive, m,
1784 vm_page_t, pageq);
2d21ac55
A
1785 m->inactive = TRUE;
1786 vm_page_inactive_count++;
1787 token_new_pagecount++;
1788 } else if (m->throttled) {
1789 queue_remove(&vm_page_queue_throttled, m,
1790 vm_page_t, pageq);
1791 m->throttled = FALSE;
1792 vm_page_throttled_count--;
cf7d32b8 1793
2d21ac55
A
1794 /*
1795 * not throttled any more, so can stick
1796 * it on the inactive queue.
1797 */
1798 queue_enter(&vm_page_queue_inactive, m,
1799 vm_page_t, pageq);
1800 m->inactive = TRUE;
1801 vm_page_inactive_count++;
1802 token_new_pagecount++;
91447636
A
1803 } else {
1804 queue_remove(&vm_page_queue_inactive, m,
1805 vm_page_t, pageq);
2d21ac55
A
1806#if MACH_ASSERT
1807 vm_page_inactive_count--; /* balance for purgeable queue asserts */
1808#endif
cf7d32b8 1809 vm_purgeable_q_advance_all();
2d21ac55 1810
91447636
A
1811 queue_enter(&vm_page_queue_inactive, m,
1812 vm_page_t, pageq);
2d21ac55
A
1813#if MACH_ASSERT
1814 vm_page_inactive_count++; /* balance for purgeable queue asserts */
1815#endif
1816 token_new_pagecount++;
55e303ae 1817 }
2d21ac55
A
1818 pmap_clear_reference(m->phys_page);
1819 m->reference = FALSE;
1820
2d21ac55
A
1821 if ( !queue_empty(&sq->age_q) )
1822 m = (vm_page_t) queue_first(&sq->age_q);
1823 else if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
1824 queue_empty(&vm_page_queue_inactive)) {
1825 if ( !queue_empty(&vm_page_queue_zf) )
1826 m = (vm_page_t) queue_first(&vm_page_queue_zf);
1827 } else if ( !queue_empty(&vm_page_queue_inactive) ) {
1828 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
1829 }
1830 /*
1831 * this is the next object we're going to be interested in
1832 * try to make sure its available after the mutex_yield
1833 * returns control
1834 */
1835 vm_pageout_scan_wants_object = m->object;
1836
91447636
A
1837 /*
1838 * force us to dump any collected free pages
1839 * and to pause before moving on
1840 */
2d21ac55 1841 try_failed = TRUE;
55e303ae 1842
91447636 1843 goto done_with_inactivepage;
1c79356b 1844 }
91447636 1845 object = m->object;
2d21ac55 1846 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
0b4e3aa0 1847
2d21ac55 1848 try_failed = FALSE;
1c79356b
A
1849 }
1850
1c79356b 1851 /*
55e303ae
A
1852 * Paging out pages of external objects which
1853 * are currently being created must be avoided.
1854 * The pager may claim for memory, thus leading to a
1855 * possible dead lock between it and the pageout thread,
1856 * if such pages are finally chosen. The remaining assumption
1857 * is that there will finally be enough available pages in the
1858 * inactive pool to page out in order to satisfy all memory
1859 * claimed by the thread which concurrently creates the pager.
1c79356b 1860 */
1c79356b
A
1861 if (!object->pager_initialized && object->pager_created) {
1862 /*
1863 * Move page to end and continue, hoping that
1864 * there will be enough other inactive pages to
1865 * page out so that the thread which currently
1866 * initializes the pager will succeed.
0b4e3aa0
A
1867 * Don't re-grant the ticket, the page should
1868 * pulled from the queue and paged out whenever
1869 * one of its logically adjacent fellows is
1870 * targeted.
1c79356b 1871 */
1c79356b 1872 vm_pageout_inactive_avoid++;
b0d623f7 1873 goto requeue_page;
91447636 1874 }
1c79356b 1875 /*
2d21ac55 1876 * Remove the page from its list.
1c79356b 1877 */
2d21ac55
A
1878 if (m->speculative) {
1879 remque(&m->pageq);
b0d623f7 1880 page_prev_state = PAGE_STATE_SPECULATIVE;
2d21ac55
A
1881 m->speculative = FALSE;
1882 vm_page_speculative_count--;
1883 } else if (m->throttled) {
1884 queue_remove(&vm_page_queue_throttled, m, vm_page_t, pageq);
b0d623f7 1885 page_prev_state = PAGE_STATE_THROTTLED;
2d21ac55
A
1886 m->throttled = FALSE;
1887 vm_page_throttled_count--;
9bccf70c 1888 } else {
2d21ac55
A
1889 if (m->zero_fill) {
1890 queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
b0d623f7 1891 page_prev_state = PAGE_STATE_ZEROFILL;
2d21ac55
A
1892 vm_zf_queue_count--;
1893 } else {
b0d623f7 1894 page_prev_state = PAGE_STATE_INACTIVE;
2d21ac55
A
1895 queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
1896 }
1897 m->inactive = FALSE;
1898 if (!m->fictitious)
1899 vm_page_inactive_count--;
b0d623f7 1900 vm_purgeable_q_advance_all();
2d21ac55
A
1901 }
1902
91447636
A
1903 m->pageq.next = NULL;
1904 m->pageq.prev = NULL;
1c79356b 1905
2d21ac55
A
1906 if ( !m->fictitious && catch_up_count)
1907 catch_up_count--;
1908
1909 /*
1910 * ENCRYPTED SWAP:
1911 * if this page has already been picked up as part of a
1912 * page-out cluster, it will be busy because it is being
1913 * encrypted (see vm_object_upl_request()). But we still
1914 * want to demote it from "clean-in-place" (aka "adjacent")
1915 * to "clean-and-free" (aka "target"), so let's ignore its
1916 * "busy" bit here and proceed to check for "cleaning" a
1917 * little bit below...
1918 */
1919 if ( !m->encrypted_cleaning && (m->busy || !object->alive)) {
1c79356b
A
1920 /*
1921 * Somebody is already playing with this page.
1922 * Leave it off the pageout queues.
2d21ac55 1923 *
1c79356b 1924 */
1c79356b 1925 vm_pageout_inactive_busy++;
91447636
A
1926
1927 goto done_with_inactivepage;
1c79356b
A
1928 }
1929
1930 /*
1931 * If it's absent or in error, we can reclaim the page.
1932 */
1933
1934 if (m->absent || m->error) {
1935 vm_pageout_inactive_absent++;
91447636
A
1936reclaim_page:
1937 if (vm_pageout_deadlock_target) {
1938 vm_pageout_scan_inactive_throttle_success++;
1939 vm_pageout_deadlock_target--;
1940 }
2d21ac55
A
1941
1942 DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
1943
b0d623f7 1944 if (object->internal) {
2d21ac55
A
1945 DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
1946 } else {
1947 DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
1948 }
b0d623f7 1949 vm_page_free_prepare_queues(m);
2d21ac55 1950
b0d623f7
A
1951 /*
1952 * remove page from object here since we're already
1953 * behind the object lock... defer the rest of the work
1954 * we'd normally do in vm_page_free_prepare_object
1955 * until 'vm_page_free_list' is called
1956 */
1957 if (m->tabled)
1958 vm_page_remove(m, TRUE);
55e303ae 1959
91447636
A
1960 assert(m->pageq.next == NULL &&
1961 m->pageq.prev == NULL);
55e303ae
A
1962 m->pageq.next = (queue_entry_t)local_freeq;
1963 local_freeq = m;
91447636 1964 local_freed++;
55e303ae 1965
91447636
A
1966 inactive_burst_count = 0;
1967
b0d623f7
A
1968 if(page_prev_state != PAGE_STATE_SPECULATIVE) {
1969 vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
1970 page_prev_state = 0;
1971 }
1972
91447636 1973 goto done_with_inactivepage;
1c79356b
A
1974 }
1975
1976 assert(!m->private);
1977 assert(!m->fictitious);
1978
1979 /*
1980 * If already cleaning this page in place, convert from
1981 * "adjacent" to "target". We can leave the page mapped,
1982 * and vm_pageout_object_terminate will determine whether
1983 * to free or reactivate.
1984 */
1985
1986 if (m->cleaning) {
0b4e3aa0
A
1987 m->busy = TRUE;
1988 m->pageout = TRUE;
1989 m->dump_cleaning = TRUE;
1990 vm_page_wire(m);
55e303ae 1991
91447636
A
1992 CLUSTER_STAT(vm_pageout_cluster_conversions++);
1993
1994 inactive_burst_count = 0;
1995
1996 goto done_with_inactivepage;
1c79356b
A
1997 }
1998
b0d623f7
A
1999 /*
2000 * If the object is empty, the page must be reclaimed even
2001 * if dirty or used.
2002 * If the page belongs to a volatile object, we stick it back
2003 * on.
2004 */
2005 if (object->copy == VM_OBJECT_NULL) {
2006 if (object->purgable == VM_PURGABLE_EMPTY) {
2007 m->busy = TRUE;
2008 if (m->pmapped == TRUE) {
2009 /* unmap the page */
2010 refmod_state = pmap_disconnect(m->phys_page);
2011 if (refmod_state & VM_MEM_MODIFIED) {
2012 m->dirty = TRUE;
2013 }
2014 }
2015 if (m->dirty || m->precious) {
2016 /* we saved the cost of cleaning this page ! */
2017 vm_page_purged_count++;
2018 }
2019 goto reclaim_page;
2020 }
2021 if (object->purgable == VM_PURGABLE_VOLATILE) {
2022 /* if it's wired, we can't put it on our queue */
2023 assert(!VM_PAGE_WIRED(m));
2024 /* just stick it back on! */
2025 goto reactivate_page;
2026 }
2027 }
2028
1c79356b
A
2029 /*
2030 * If it's being used, reactivate.
2031 * (Fictitious pages are either busy or absent.)
2d21ac55
A
2032 * First, update the reference and dirty bits
2033 * to make sure the page is unreferenced.
1c79356b 2034 */
2d21ac55
A
2035 refmod_state = -1;
2036
2037 if (m->reference == FALSE && m->pmapped == TRUE) {
91447636
A
2038 refmod_state = pmap_get_refmod(m->phys_page);
2039
2040 if (refmod_state & VM_MEM_REFERENCED)
2041 m->reference = TRUE;
2042 if (refmod_state & VM_MEM_MODIFIED)
2043 m->dirty = TRUE;
2044 }
b0d623f7
A
2045
2046 if (m->reference || m->dirty) {
2047 /* deal with a rogue "reusable" page */
2048 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
2049 }
2050
2d21ac55
A
2051 if (m->reference && !m->no_cache) {
2052 /*
2053 * The page we pulled off the inactive list has
2054 * been referenced. It is possible for other
2055 * processors to be touching pages faster than we
2056 * can clear the referenced bit and traverse the
2057 * inactive queue, so we limit the number of
2058 * reactivations.
2059 */
2060 if (++reactivated_this_call >= reactivate_limit) {
2061 vm_pageout_reactivation_limit_exceeded++;
2062 } else if (catch_up_count) {
2063 vm_pageout_catch_ups++;
2064 } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
2065 vm_pageout_inactive_force_reclaim++;
2066 } else {
b0d623f7 2067 uint32_t isinuse;
2d21ac55 2068reactivate_page:
b0d623f7
A
2069 if ( !object->internal && object->pager != MEMORY_OBJECT_NULL &&
2070 vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
2071 /*
2072 * no explict mappings of this object exist
2073 * and it's not open via the filesystem
2074 */
2075 vm_page_deactivate(m);
2076 vm_pageout_inactive_deactivated++;
2077 } else {
2078 /*
2079 * The page was/is being used, so put back on active list.
2080 */
2081 vm_page_activate(m);
2082 VM_STAT_INCR(reactivations);
2083 }
2d21ac55
A
2084 vm_pageout_inactive_used++;
2085 inactive_burst_count = 0;
55e303ae 2086
2d21ac55
A
2087 goto done_with_inactivepage;
2088 }
2089 /*
2090 * Make sure we call pmap_get_refmod() if it
2091 * wasn't already called just above, to update
2092 * the dirty bit.
2093 */
2094 if ((refmod_state == -1) && !m->dirty && m->pmapped) {
2095 refmod_state = pmap_get_refmod(m->phys_page);
2096 if (refmod_state & VM_MEM_MODIFIED)
2097 m->dirty = TRUE;
2098 }
2099 forced_reclaim = TRUE;
2100 } else {
2101 forced_reclaim = FALSE;
1c79356b
A
2102 }
2103
91447636
A
2104 XPR(XPR_VM_PAGEOUT,
2105 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
b0d623f7 2106 object, m->offset, m, 0,0);
0b4e3aa0 2107
91447636
A
2108 /*
2109 * we've got a candidate page to steal...
2110 *
2111 * m->dirty is up to date courtesy of the
2112 * preceding check for m->reference... if
2113 * we get here, then m->reference had to be
2d21ac55
A
2114 * FALSE (or possibly "reactivate_limit" was
2115 * exceeded), but in either case we called
2116 * pmap_get_refmod() and updated both
2117 * m->reference and m->dirty
91447636
A
2118 *
2119 * if it's dirty or precious we need to
2120 * see if the target queue is throtttled
2121 * it if is, we need to skip over it by moving it back
2122 * to the end of the inactive queue
2123 */
b0d623f7 2124
91447636
A
2125 inactive_throttled = FALSE;
2126
2127 if (m->dirty || m->precious) {
2128 if (object->internal) {
2d21ac55 2129 if (VM_PAGE_Q_THROTTLED(iq))
91447636
A
2130 inactive_throttled = TRUE;
2131 } else if (VM_PAGE_Q_THROTTLED(eq)) {
2d21ac55 2132 inactive_throttled = TRUE;
1c79356b 2133 }
91447636
A
2134 }
2135 if (inactive_throttled == TRUE) {
2d21ac55
A
2136throttle_inactive:
2137 if (!IP_VALID(memory_manager_default) &&
2138 object->internal &&
2139 (object->purgable == VM_PURGABLE_DENY ||
cf7d32b8
A
2140 object->purgable == VM_PURGABLE_NONVOLATILE ||
2141 object->purgable == VM_PURGABLE_VOLATILE )) {
2d21ac55 2142 queue_enter(&vm_page_queue_throttled, m,
91447636 2143 vm_page_t, pageq);
2d21ac55
A
2144 m->throttled = TRUE;
2145 vm_page_throttled_count++;
91447636 2146 } else {
2d21ac55
A
2147 if (m->zero_fill) {
2148 queue_enter(&vm_page_queue_zf, m,
2149 vm_page_t, pageq);
2150 vm_zf_queue_count++;
2151 } else
2152 queue_enter(&vm_page_queue_inactive, m,
2153 vm_page_t, pageq);
2154 m->inactive = TRUE;
2155 if (!m->fictitious) {
2156 vm_page_inactive_count++;
2157 token_new_pagecount++;
2158 }
1c79356b 2159 }
91447636 2160 vm_pageout_scan_inactive_throttled++;
91447636 2161 goto done_with_inactivepage;
1c79356b 2162 }
2d21ac55 2163
1c79356b 2164 /*
91447636
A
2165 * we've got a page that we can steal...
2166 * eliminate all mappings and make sure
2167 * we have the up-to-date modified state
2168 * first take the page BUSY, so that no new
2169 * mappings can be made
1c79356b 2170 */
1c79356b 2171 m->busy = TRUE;
55e303ae 2172
91447636
A
2173 /*
2174 * if we need to do a pmap_disconnect then we
2175 * need to re-evaluate m->dirty since the pmap_disconnect
2176 * provides the true state atomically... the
2177 * page was still mapped up to the pmap_disconnect
2178 * and may have been dirtied at the last microsecond
2179 *
2180 * we also check for the page being referenced 'late'
2181 * if it was, we first need to do a WAKEUP_DONE on it
2182 * since we already set m->busy = TRUE, before
2183 * going off to reactivate it
2184 *
2d21ac55
A
2185 * Note that if 'pmapped' is FALSE then the page is not
2186 * and has not been in any map, so there is no point calling
2187 * pmap_disconnect(). m->dirty and/or m->reference could
2188 * have been set in anticipation of likely usage of the page.
91447636 2189 */
2d21ac55 2190 if (m->pmapped == TRUE) {
91447636 2191 refmod_state = pmap_disconnect(m->phys_page);
0b4e3aa0 2192
91447636
A
2193 if (refmod_state & VM_MEM_MODIFIED)
2194 m->dirty = TRUE;
2195 if (refmod_state & VM_MEM_REFERENCED) {
2d21ac55
A
2196
2197 /* If m->reference is already set, this page must have
2198 * already failed the reactivate_limit test, so don't
2199 * bump the counts twice.
2200 */
2201 if ( ! m->reference ) {
2202 m->reference = TRUE;
2203 if (forced_reclaim ||
2204 ++reactivated_this_call >= reactivate_limit)
2205 vm_pageout_reactivation_limit_exceeded++;
2206 else {
2207 PAGE_WAKEUP_DONE(m);
2208 goto reactivate_page;
2209 }
2210 }
91447636
A
2211 }
2212 }
2d21ac55
A
2213 /*
2214 * reset our count of pages that have been reclaimed
2215 * since the last page was 'stolen'
2216 */
2217 inactive_reclaim_run = 0;
2218
1c79356b
A
2219 /*
2220 * If it's clean and not precious, we can free the page.
2221 */
1c79356b 2222 if (!m->dirty && !m->precious) {
b0d623f7
A
2223 if (m->zero_fill)
2224 vm_pageout_inactive_zf++;
1c79356b 2225 vm_pageout_inactive_clean++;
b0d623f7 2226
1c79356b
A
2227 goto reclaim_page;
2228 }
2d21ac55
A
2229
2230 /*
2231 * The page may have been dirtied since the last check
2232 * for a throttled target queue (which may have been skipped
2233 * if the page was clean then). With the dirty page
2234 * disconnected here, we can make one final check.
2235 */
2236 {
2237 boolean_t disconnect_throttled = FALSE;
2238 if (object->internal) {
2239 if (VM_PAGE_Q_THROTTLED(iq))
2240 disconnect_throttled = TRUE;
2241 } else if (VM_PAGE_Q_THROTTLED(eq)) {
2242 disconnect_throttled = TRUE;
2243 }
2244
2245 if (disconnect_throttled == TRUE) {
2246 PAGE_WAKEUP_DONE(m);
2247 goto throttle_inactive;
2248 }
2249 }
2250
b0d623f7
A
2251 vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
2252
91447636 2253 vm_pageout_cluster(m);
1c79356b 2254
b0d623f7
A
2255 if (m->zero_fill)
2256 vm_pageout_inactive_zf++;
91447636 2257 vm_pageout_inactive_dirty++;
1c79356b 2258
91447636 2259 inactive_burst_count = 0;
1c79356b 2260
91447636 2261done_with_inactivepage:
2d21ac55 2262 if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
1c79356b 2263
91447636 2264 if (object != NULL) {
b0d623f7 2265 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
91447636
A
2266 vm_object_unlock(object);
2267 object = NULL;
2268 }
2269 if (local_freeq) {
b0d623f7
A
2270 vm_page_unlock_queues();
2271 vm_page_free_list(local_freeq, TRUE);
91447636 2272
2d21ac55 2273 local_freeq = NULL;
91447636 2274 local_freed = 0;
b0d623f7
A
2275 vm_page_lock_queues();
2276 } else
2277 lck_mtx_yield(&vm_page_queue_lock);
2d21ac55
A
2278
2279 delayed_unlock = 1;
1c79356b 2280 }
91447636
A
2281 /*
2282 * back to top of pageout scan loop
2283 */
1c79356b 2284 }
1c79356b
A
2285}
2286
1c79356b 2287
1c79356b
A
2288int vm_page_free_count_init;
2289
2290void
2291vm_page_free_reserve(
2292 int pages)
2293{
2294 int free_after_reserve;
2295
2296 vm_page_free_reserved += pages;
2297
2298 free_after_reserve = vm_page_free_count_init - vm_page_free_reserved;
2299
2300 vm_page_free_min = vm_page_free_reserved +
2301 VM_PAGE_FREE_MIN(free_after_reserve);
2302
2d21ac55
A
2303 if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT)
2304 vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
2305
1c79356b
A
2306 vm_page_free_target = vm_page_free_reserved +
2307 VM_PAGE_FREE_TARGET(free_after_reserve);
2308
2d21ac55
A
2309 if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT)
2310 vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
2311
1c79356b
A
2312 if (vm_page_free_target < vm_page_free_min + 5)
2313 vm_page_free_target = vm_page_free_min + 5;
2d21ac55 2314
b0d623f7
A
2315 vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 3);
2316 vm_page_creation_throttle = vm_page_free_target / 2;
1c79356b
A
2317}
2318
2319/*
2320 * vm_pageout is the high level pageout daemon.
2321 */
2322
55e303ae
A
2323void
2324vm_pageout_continue(void)
2325{
2d21ac55 2326 DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
55e303ae
A
2327 vm_pageout_scan_event_counter++;
2328 vm_pageout_scan();
2329 /* we hold vm_page_queue_free_lock now */
2330 assert(vm_page_free_wanted == 0);
2d21ac55 2331 assert(vm_page_free_wanted_privileged == 0);
55e303ae 2332 assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
b0d623f7 2333 lck_mtx_unlock(&vm_page_queue_free_lock);
55e303ae
A
2334
2335 counter(c_vm_pageout_block++);
91447636 2336 thread_block((thread_continue_t)vm_pageout_continue);
55e303ae
A
2337 /*NOTREACHED*/
2338}
1c79356b 2339
91447636 2340
91447636 2341#ifdef FAKE_DEADLOCK
1c79356b 2342
91447636
A
2343#define FAKE_COUNT 5000
2344
2345int internal_count = 0;
2346int fake_deadlock = 0;
2347
2348#endif
2349
2350static void
2351vm_pageout_iothread_continue(struct vm_pageout_queue *q)
2352{
2353 vm_page_t m = NULL;
2354 vm_object_t object;
2355 boolean_t need_wakeup;
2d21ac55
A
2356 memory_object_t pager;
2357 thread_t self = current_thread();
91447636 2358
2d21ac55
A
2359 if ((vm_pageout_internal_iothread != THREAD_NULL)
2360 && (self == vm_pageout_external_iothread )
2361 && (self->options & TH_OPT_VMPRIV))
2362 self->options &= ~TH_OPT_VMPRIV;
2363
2364 vm_page_lockspin_queues();
91447636
A
2365
2366 while ( !queue_empty(&q->pgo_pending) ) {
2367
2368 q->pgo_busy = TRUE;
2369 queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
b0d623f7 2370 VM_PAGE_CHECK(m);
91447636 2371 m->pageout_queue = FALSE;
91447636
A
2372 m->pageq.next = NULL;
2373 m->pageq.prev = NULL;
b0d623f7
A
2374 vm_page_unlock_queues();
2375
91447636
A
2376#ifdef FAKE_DEADLOCK
2377 if (q == &vm_pageout_queue_internal) {
2378 vm_offset_t addr;
2379 int pg_count;
2380
2381 internal_count++;
2382
2383 if ((internal_count == FAKE_COUNT)) {
2384
2385 pg_count = vm_page_free_count + vm_page_free_reserved;
2386
2387 if (kmem_alloc(kernel_map, &addr, PAGE_SIZE * pg_count) == KERN_SUCCESS) {
2388 kmem_free(kernel_map, addr, PAGE_SIZE * pg_count);
2389 }
2390 internal_count = 0;
2391 fake_deadlock++;
2392 }
2393 }
2394#endif
2395 object = m->object;
2396
2d21ac55
A
2397 vm_object_lock(object);
2398
91447636 2399 if (!object->pager_initialized) {
91447636
A
2400
2401 /*
2402 * If there is no memory object for the page, create
2403 * one and hand it to the default pager.
2404 */
2405
2406 if (!object->pager_initialized)
0c530ab8
A
2407 vm_object_collapse(object,
2408 (vm_object_offset_t) 0,
2409 TRUE);
91447636
A
2410 if (!object->pager_initialized)
2411 vm_object_pager_create(object);
2412 if (!object->pager_initialized) {
2413 /*
2414 * Still no pager for the object.
2415 * Reactivate the page.
2416 *
2417 * Should only happen if there is no
2418 * default pager.
2419 */
2d21ac55 2420 vm_page_lockspin_queues();
b0d623f7
A
2421
2422 vm_pageout_queue_steal(m, TRUE);
91447636
A
2423 vm_pageout_dirty_no_pager++;
2424 vm_page_activate(m);
b0d623f7 2425
91447636
A
2426 vm_page_unlock_queues();
2427
2428 /*
2429 * And we are done with it.
2430 */
2431 PAGE_WAKEUP_DONE(m);
2432
2433 vm_object_paging_end(object);
2434 vm_object_unlock(object);
2435
2d21ac55 2436 vm_page_lockspin_queues();
91447636 2437 continue;
2d21ac55
A
2438 }
2439 }
2440 pager = object->pager;
2441 if (pager == MEMORY_OBJECT_NULL) {
2442 /*
2443 * This pager has been destroyed by either
2444 * memory_object_destroy or vm_object_destroy, and
2445 * so there is nowhere for the page to go.
2446 * Just free the page... VM_PAGE_FREE takes
2447 * care of cleaning up all the state...
2448 * including doing the vm_pageout_throttle_up
2449 */
91447636 2450
2d21ac55 2451 VM_PAGE_FREE(m);
91447636 2452
2d21ac55 2453 vm_object_paging_end(object);
91447636 2454 vm_object_unlock(object);
2d21ac55
A
2455
2456 vm_page_lockspin_queues();
2457 continue;
91447636 2458 }
b0d623f7 2459 VM_PAGE_CHECK(m);
2d21ac55 2460 vm_object_unlock(object);
91447636
A
2461 /*
2462 * we expect the paging_in_progress reference to have
2463 * already been taken on the object before it was added
2464 * to the appropriate pageout I/O queue... this will
2465 * keep the object from being terminated and/or the
2466 * paging_offset from changing until the I/O has
2467 * completed... therefore no need to lock the object to
2468 * pull the paging_offset from it.
2469 *
2470 * Send the data to the pager.
2471 * any pageout clustering happens there
2472 */
2d21ac55 2473 memory_object_data_return(pager,
91447636
A
2474 m->offset + object->paging_offset,
2475 PAGE_SIZE,
2476 NULL,
2477 NULL,
2478 FALSE,
2479 FALSE,
2480 0);
2481
2482 vm_object_lock(object);
2483 vm_object_paging_end(object);
2484 vm_object_unlock(object);
2485
2d21ac55 2486 vm_page_lockspin_queues();
91447636
A
2487 }
2488 assert_wait((event_t) q, THREAD_UNINT);
2489
2490
2491 if (q->pgo_throttled == TRUE && !VM_PAGE_Q_THROTTLED(q)) {
2492 q->pgo_throttled = FALSE;
2493 need_wakeup = TRUE;
2494 } else
2495 need_wakeup = FALSE;
2496
2497 q->pgo_busy = FALSE;
2498 q->pgo_idle = TRUE;
2499 vm_page_unlock_queues();
2500
2501 if (need_wakeup == TRUE)
2502 thread_wakeup((event_t) &q->pgo_laundry);
2503
2504 thread_block_parameter((thread_continue_t)vm_pageout_iothread_continue, (void *) &q->pgo_pending);
2505 /*NOTREACHED*/
2506}
2507
2508
2509static void
2510vm_pageout_iothread_external(void)
2511{
2d21ac55
A
2512 thread_t self = current_thread();
2513
2514 self->options |= TH_OPT_VMPRIV;
91447636
A
2515
2516 vm_pageout_iothread_continue(&vm_pageout_queue_external);
2517 /*NOTREACHED*/
2518}
2519
2520
2521static void
2522vm_pageout_iothread_internal(void)
2523{
2524 thread_t self = current_thread();
2525
2526 self->options |= TH_OPT_VMPRIV;
2527
2528 vm_pageout_iothread_continue(&vm_pageout_queue_internal);
2529 /*NOTREACHED*/
2530}
2531
b0d623f7
A
2532kern_return_t
2533vm_set_buffer_cleanup_callout(boolean_t (*func)(void))
2534{
2535 if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
2536 return KERN_SUCCESS;
2537 } else {
2538 return KERN_FAILURE; /* Already set */
2539 }
2540}
2541
91447636
A
2542static void
2543vm_pageout_garbage_collect(int collect)
2544{
2545 if (collect) {
b0d623f7 2546 boolean_t buf_large_zfree = FALSE;
91447636
A
2547 stack_collect();
2548
2549 /*
2550 * consider_zone_gc should be last, because the other operations
2551 * might return memory to zones.
2552 */
2553 consider_machine_collect();
b0d623f7
A
2554 if (consider_buffer_cache_collect != NULL) {
2555 buf_large_zfree = (*consider_buffer_cache_collect)();
2556 }
2557 consider_zone_gc(buf_large_zfree);
91447636
A
2558
2559 consider_machine_adjust();
2560 }
2561
2562 assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
2563
2564 thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
2565 /*NOTREACHED*/
2566}
2567
2568
2569
2570void
2571vm_pageout(void)
2572{
2573 thread_t self = current_thread();
2574 thread_t thread;
2575 kern_return_t result;
2576 spl_t s;
2577
2578 /*
2579 * Set thread privileges.
2580 */
2581 s = splsched();
2582 thread_lock(self);
2583 self->priority = BASEPRI_PREEMPT - 1;
2584 set_sched_pri(self, self->priority);
2585 thread_unlock(self);
2d21ac55
A
2586
2587 if (!self->reserved_stack)
2588 self->reserved_stack = self->kernel_stack;
2589
91447636
A
2590 splx(s);
2591
2592 /*
2593 * Initialize some paging parameters.
2594 */
2595
2596 if (vm_pageout_idle_wait == 0)
2597 vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
2598
2599 if (vm_pageout_burst_wait == 0)
2600 vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
2601
2602 if (vm_pageout_empty_wait == 0)
2603 vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
2604
2605 if (vm_pageout_deadlock_wait == 0)
2606 vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
2607
2608 if (vm_pageout_deadlock_relief == 0)
2609 vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
2610
2611 if (vm_pageout_inactive_relief == 0)
2612 vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
2613
2614 if (vm_pageout_burst_active_throttle == 0)
2615 vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
2616
2617 if (vm_pageout_burst_inactive_throttle == 0)
2618 vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
2619
2620 /*
2621 * Set kernel task to low backing store privileged
55e303ae
A
2622 * status
2623 */
2624 task_lock(kernel_task);
2625 kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
2626 task_unlock(kernel_task);
2627
1c79356b 2628 vm_page_free_count_init = vm_page_free_count;
2d21ac55 2629
1c79356b
A
2630 /*
2631 * even if we've already called vm_page_free_reserve
2632 * call it again here to insure that the targets are
2633 * accurately calculated (it uses vm_page_free_count_init)
2634 * calling it with an arg of 0 will not change the reserve
2635 * but will re-calculate free_min and free_target
2636 */
91447636
A
2637 if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
2638 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
55e303ae 2639 } else
1c79356b
A
2640 vm_page_free_reserve(0);
2641
55e303ae 2642
91447636
A
2643 queue_init(&vm_pageout_queue_external.pgo_pending);
2644 vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
2645 vm_pageout_queue_external.pgo_laundry = 0;
2646 vm_pageout_queue_external.pgo_idle = FALSE;
2647 vm_pageout_queue_external.pgo_busy = FALSE;
2648 vm_pageout_queue_external.pgo_throttled = FALSE;
55e303ae 2649
91447636 2650 queue_init(&vm_pageout_queue_internal.pgo_pending);
2d21ac55 2651 vm_pageout_queue_internal.pgo_maxlaundry = 0;
91447636
A
2652 vm_pageout_queue_internal.pgo_laundry = 0;
2653 vm_pageout_queue_internal.pgo_idle = FALSE;
2654 vm_pageout_queue_internal.pgo_busy = FALSE;
2655 vm_pageout_queue_internal.pgo_throttled = FALSE;
9bccf70c 2656
55e303ae 2657
2d21ac55
A
2658 /* internal pageout thread started when default pager registered first time */
2659 /* external pageout and garbage collection threads started here */
55e303ae 2660
2d21ac55
A
2661 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
2662 BASEPRI_PREEMPT - 1,
2663 &vm_pageout_external_iothread);
91447636
A
2664 if (result != KERN_SUCCESS)
2665 panic("vm_pageout_iothread_external: create failed");
55e303ae 2666
2d21ac55 2667 thread_deallocate(vm_pageout_external_iothread);
9bccf70c 2668
2d21ac55
A
2669 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
2670 MINPRI_KERNEL,
2671 &thread);
91447636
A
2672 if (result != KERN_SUCCESS)
2673 panic("vm_pageout_garbage_collect: create failed");
55e303ae 2674
91447636 2675 thread_deallocate(thread);
55e303ae 2676
8f6c56a5
A
2677 vm_object_reaper_init();
2678
2d21ac55 2679
91447636 2680 vm_pageout_continue();
2d21ac55
A
2681
2682 /*
2683 * Unreached code!
2684 *
2685 * The vm_pageout_continue() call above never returns, so the code below is never
2686 * executed. We take advantage of this to declare several DTrace VM related probe
2687 * points that our kernel doesn't have an analog for. These are probe points that
2688 * exist in Solaris and are in the DTrace documentation, so people may have written
2689 * scripts that use them. Declaring the probe points here means their scripts will
2690 * compile and execute which we want for portability of the scripts, but since this
2691 * section of code is never reached, the probe points will simply never fire. Yes,
2692 * this is basically a hack. The problem is the DTrace probe points were chosen with
2693 * Solaris specific VM events in mind, not portability to different VM implementations.
2694 */
2695
2696 DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
2697 DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
2698 DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
2699 DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
2700 DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
2701 DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
2702 DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
91447636 2703 /*NOTREACHED*/
9bccf70c
A
2704}
2705
2d21ac55
A
2706kern_return_t
2707vm_pageout_internal_start(void)
2708{
2709 kern_return_t result;
2710
2711 vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
2712 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, NULL, BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
2713 if (result == KERN_SUCCESS)
2714 thread_deallocate(vm_pageout_internal_iothread);
2715 return result;
2716}
2717
1c79356b 2718
b0d623f7
A
2719/*
2720 * when marshalling pages into a UPL and subsequently committing
2721 * or aborting them, it is necessary to hold
2722 * the vm_page_queue_lock (a hot global lock) for certain operations
2723 * on the page... however, the majority of the work can be done
2724 * while merely holding the object lock... in fact there are certain
2725 * collections of pages that don't require any work brokered by the
2726 * vm_page_queue_lock... to mitigate the time spent behind the global
2727 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
2728 * while doing all of the work that doesn't require the vm_page_queue_lock...
2729 * then call dw_do_work to acquire the vm_page_queue_lock and do the
2730 * necessary work for each page... we will grab the busy bit on the page
2731 * if it's not already held so that dw_do_work can drop the object lock
2732 * if it can't immediately take the vm_page_queue_lock in order to compete
2733 * for the locks in the same order that vm_pageout_scan takes them.
2734 * the operation names are modeled after the names of the routines that
2735 * need to be called in order to make the changes very obvious in the
2736 * original loop
2737 */
2738
2739#define DELAYED_WORK_LIMIT 32
2740
2741#define DW_vm_page_unwire 0x01
2742#define DW_vm_page_wire 0x02
2743#define DW_vm_page_free 0x04
2744#define DW_vm_page_activate 0x08
2745#define DW_vm_page_deactivate_internal 0x10
2746#define DW_vm_page_speculate 0x20
2747#define DW_vm_page_lru 0x40
2748#define DW_vm_pageout_throttle_up 0x80
2749#define DW_PAGE_WAKEUP 0x100
2750#define DW_clear_busy 0x200
2751#define DW_clear_reference 0x400
2752#define DW_set_reference 0x800
2753
2754struct dw {
2755 vm_page_t dw_m;
2756 int dw_mask;
2757};
2758
2759
2760static void dw_do_work(vm_object_t object, struct dw *dwp, int dw_count);
2761
2762
2763
2764static upl_t
2765upl_create(int type, int flags, upl_size_t size)
0b4e3aa0
A
2766{
2767 upl_t upl;
2d21ac55
A
2768 int page_field_size = 0;
2769 int upl_flags = 0;
2770 int upl_size = sizeof(struct upl);
0b4e3aa0 2771
b0d623f7
A
2772 size = round_page_32(size);
2773
2d21ac55 2774 if (type & UPL_CREATE_LITE) {
b0d623f7 2775 page_field_size = (atop(size) + 7) >> 3;
55e303ae 2776 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
2d21ac55
A
2777
2778 upl_flags |= UPL_LITE;
55e303ae 2779 }
2d21ac55 2780 if (type & UPL_CREATE_INTERNAL) {
b0d623f7 2781 upl_size += (int) sizeof(struct upl_page_info) * atop(size);
2d21ac55
A
2782
2783 upl_flags |= UPL_INTERNAL;
0b4e3aa0 2784 }
2d21ac55
A
2785 upl = (upl_t)kalloc(upl_size + page_field_size);
2786
2787 if (page_field_size)
2788 bzero((char *)upl + upl_size, page_field_size);
2789
2790 upl->flags = upl_flags | flags;
0b4e3aa0
A
2791 upl->src_object = NULL;
2792 upl->kaddr = (vm_offset_t)0;
2793 upl->size = 0;
2794 upl->map_object = NULL;
2795 upl->ref_count = 1;
0c530ab8 2796 upl->highest_page = 0;
0b4e3aa0 2797 upl_lock_init(upl);
b0d623f7
A
2798 upl->vector_upl = NULL;
2799#if UPL_DEBUG
0b4e3aa0
A
2800 upl->ubc_alias1 = 0;
2801 upl->ubc_alias2 = 0;
b0d623f7
A
2802
2803 upl->upl_creator = current_thread();
2804 upl->upl_state = 0;
2805 upl->upl_commit_index = 0;
2806 bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
2807
2808 (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
91447636 2809#endif /* UPL_DEBUG */
b0d623f7 2810
0b4e3aa0
A
2811 return(upl);
2812}
2813
2814static void
2d21ac55 2815upl_destroy(upl_t upl)
0b4e3aa0 2816{
55e303ae 2817 int page_field_size; /* bit field in word size buf */
2d21ac55 2818 int size;
0b4e3aa0 2819
b0d623f7 2820#if UPL_DEBUG
0b4e3aa0 2821 {
55e303ae 2822 vm_object_t object;
2d21ac55
A
2823
2824 if (upl->flags & UPL_SHADOWED) {
55e303ae
A
2825 object = upl->map_object->shadow;
2826 } else {
2827 object = upl->map_object;
2828 }
2829 vm_object_lock(object);
2d21ac55 2830 queue_remove(&object->uplq, upl, upl_t, uplq);
55e303ae 2831 vm_object_unlock(object);
0b4e3aa0 2832 }
91447636 2833#endif /* UPL_DEBUG */
2d21ac55
A
2834 /*
2835 * drop a reference on the map_object whether or
2836 * not a pageout object is inserted
2837 */
2838 if (upl->flags & UPL_SHADOWED)
0b4e3aa0 2839 vm_object_deallocate(upl->map_object);
55e303ae 2840
2d21ac55
A
2841 if (upl->flags & UPL_DEVICE_MEMORY)
2842 size = PAGE_SIZE;
2843 else
2844 size = upl->size;
55e303ae 2845 page_field_size = 0;
2d21ac55 2846
55e303ae 2847 if (upl->flags & UPL_LITE) {
2d21ac55 2848 page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
55e303ae
A
2849 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
2850 }
b0d623f7
A
2851 upl_lock_destroy(upl);
2852 upl->vector_upl = (vector_upl_t) 0xfeedbeef;
2d21ac55 2853 if (upl->flags & UPL_INTERNAL) {
91447636
A
2854 kfree(upl,
2855 sizeof(struct upl) +
2d21ac55 2856 (sizeof(struct upl_page_info) * (size/PAGE_SIZE))
91447636 2857 + page_field_size);
0b4e3aa0 2858 } else {
91447636 2859 kfree(upl, sizeof(struct upl) + page_field_size);
0b4e3aa0
A
2860 }
2861}
2862
91447636 2863void uc_upl_dealloc(upl_t upl);
0b4e3aa0 2864__private_extern__ void
2d21ac55 2865uc_upl_dealloc(upl_t upl)
1c79356b 2866{
2d21ac55 2867 if (--upl->ref_count == 0)
1c79356b 2868 upl_destroy(upl);
1c79356b
A
2869}
2870
0b4e3aa0 2871void
2d21ac55 2872upl_deallocate(upl_t upl)
0b4e3aa0 2873{
b0d623f7
A
2874 if (--upl->ref_count == 0) {
2875 if(vector_upl_is_valid(upl))
2876 vector_upl_deallocate(upl);
0b4e3aa0 2877 upl_destroy(upl);
b0d623f7 2878 }
0b4e3aa0 2879}
1c79356b 2880
b0d623f7
A
2881#if DEVELOPMENT || DEBUG
2882/*/*
91447636
A
2883 * Statistics about UPL enforcement of copy-on-write obligations.
2884 */
2885unsigned long upl_cow = 0;
2886unsigned long upl_cow_again = 0;
91447636
A
2887unsigned long upl_cow_pages = 0;
2888unsigned long upl_cow_again_pages = 0;
b0d623f7
A
2889
2890unsigned long iopl_cow = 0;
2891unsigned long iopl_cow_pages = 0;
2892#endif
91447636 2893
1c79356b 2894/*
0b4e3aa0 2895 * Routine: vm_object_upl_request
1c79356b
A
2896 * Purpose:
2897 * Cause the population of a portion of a vm_object.
2898 * Depending on the nature of the request, the pages
2899 * returned may be contain valid data or be uninitialized.
2900 * A page list structure, listing the physical pages
2901 * will be returned upon request.
2902 * This function is called by the file system or any other
2903 * supplier of backing store to a pager.
2904 * IMPORTANT NOTE: The caller must still respect the relationship
2905 * between the vm_object and its backing memory object. The
2906 * caller MUST NOT substitute changes in the backing file
2907 * without first doing a memory_object_lock_request on the
2908 * target range unless it is know that the pages are not
2909 * shared with another entity at the pager level.
2910 * Copy_in_to:
2911 * if a page list structure is present
2912 * return the mapped physical pages, where a
2913 * page is not present, return a non-initialized
2914 * one. If the no_sync bit is turned on, don't
2915 * call the pager unlock to synchronize with other
2916 * possible copies of the page. Leave pages busy
2917 * in the original object, if a page list structure
2918 * was specified. When a commit of the page list
2919 * pages is done, the dirty bit will be set for each one.
2920 * Copy_out_from:
2921 * If a page list structure is present, return
2922 * all mapped pages. Where a page does not exist
2923 * map a zero filled one. Leave pages busy in
2924 * the original object. If a page list structure
2925 * is not specified, this call is a no-op.
2926 *
2927 * Note: access of default pager objects has a rather interesting
2928 * twist. The caller of this routine, presumably the file system
2929 * page cache handling code, will never actually make a request
2930 * against a default pager backed object. Only the default
2931 * pager will make requests on backing store related vm_objects
2932 * In this way the default pager can maintain the relationship
2933 * between backing store files (abstract memory objects) and
2934 * the vm_objects (cache objects), they support.
2935 *
2936 */
91447636 2937
0b4e3aa0
A
2938__private_extern__ kern_return_t
2939vm_object_upl_request(
1c79356b 2940 vm_object_t object,
91447636
A
2941 vm_object_offset_t offset,
2942 upl_size_t size,
1c79356b 2943 upl_t *upl_ptr,
0b4e3aa0
A
2944 upl_page_info_array_t user_page_list,
2945 unsigned int *page_list_count,
91447636 2946 int cntrl_flags)
1c79356b 2947{
91447636 2948 vm_page_t dst_page = VM_PAGE_NULL;
2d21ac55
A
2949 vm_object_offset_t dst_offset;
2950 upl_size_t xfer_size;
1c79356b 2951 boolean_t dirty;
55e303ae 2952 boolean_t hw_dirty;
1c79356b 2953 upl_t upl = NULL;
91447636
A
2954 unsigned int entry;
2955#if MACH_CLUSTER_STATS
1c79356b 2956 boolean_t encountered_lrp = FALSE;
91447636 2957#endif
1c79356b 2958 vm_page_t alias_page = NULL;
2d21ac55 2959 int refmod_state = 0;
91447636
A
2960 wpl_array_t lite_list = NULL;
2961 vm_object_t last_copy_object;
b0d623f7
A
2962 struct dw dw_array[DELAYED_WORK_LIMIT];
2963 struct dw *dwp;
2964 int dw_count;
91447636
A
2965
2966 if (cntrl_flags & ~UPL_VALID_FLAGS) {
2967 /*
2968 * For forward compatibility's sake,
2969 * reject any unknown flag.
2970 */
2971 return KERN_INVALID_VALUE;
2972 }
2d21ac55
A
2973 if ( (!object->internal) && (object->paging_offset != 0) )
2974 panic("vm_object_upl_request: external object with non-zero paging offset\n");
2975 if (object->phys_contiguous)
2976 panic("vm_object_upl_request: contiguous object specified\n");
0b4e3aa0 2977
0b4e3aa0 2978
cf7d32b8
A
2979 if ((size / PAGE_SIZE) > MAX_UPL_SIZE)
2980 size = MAX_UPL_SIZE * PAGE_SIZE;
1c79356b 2981
2d21ac55 2982 if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
cf7d32b8 2983 *page_list_count = MAX_UPL_SIZE;
1c79356b 2984
2d21ac55
A
2985 if (cntrl_flags & UPL_SET_INTERNAL) {
2986 if (cntrl_flags & UPL_SET_LITE) {
55e303ae 2987
2d21ac55 2988 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, 0, size);
91447636 2989
2d21ac55
A
2990 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
2991 lite_list = (wpl_array_t)
91447636 2992 (((uintptr_t)user_page_list) +
2d21ac55 2993 ((size/PAGE_SIZE) * sizeof(upl_page_info_t)));
b0d623f7
A
2994 if (size == 0) {
2995 user_page_list = NULL;
2996 lite_list = NULL;
2997 }
1c79356b 2998 } else {
2d21ac55 2999 upl = upl_create(UPL_CREATE_INTERNAL, 0, size);
55e303ae 3000
2d21ac55 3001 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
b0d623f7
A
3002 if (size == 0) {
3003 user_page_list = NULL;
3004 }
55e303ae 3005 }
2d21ac55
A
3006 } else {
3007 if (cntrl_flags & UPL_SET_LITE) {
91447636 3008
2d21ac55 3009 upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE, 0, size);
55e303ae 3010
2d21ac55 3011 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
b0d623f7
A
3012 if (size == 0) {
3013 lite_list = NULL;
3014 }
55e303ae 3015 } else {
2d21ac55 3016 upl = upl_create(UPL_CREATE_EXTERNAL, 0, size);
0b4e3aa0 3017 }
55e303ae 3018 }
2d21ac55
A
3019 *upl_ptr = upl;
3020
3021 if (user_page_list)
3022 user_page_list[0].device = FALSE;
91447636 3023
2d21ac55
A
3024 if (cntrl_flags & UPL_SET_LITE) {
3025 upl->map_object = object;
3026 } else {
3027 upl->map_object = vm_object_allocate(size);
3028 /*
3029 * No neeed to lock the new object: nobody else knows
3030 * about it yet, so it's all ours so far.
3031 */
3032 upl->map_object->shadow = object;
3033 upl->map_object->pageout = TRUE;
3034 upl->map_object->can_persist = FALSE;
3035 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
3036 upl->map_object->shadow_offset = offset;
3037 upl->map_object->wimg_bits = object->wimg_bits;
3038
3039 VM_PAGE_GRAB_FICTITIOUS(alias_page);
3040
3041 upl->flags |= UPL_SHADOWED;
3042 }
3043 /*
91447636
A
3044 * ENCRYPTED SWAP:
3045 * Just mark the UPL as "encrypted" here.
3046 * We'll actually encrypt the pages later,
3047 * in upl_encrypt(), when the caller has
3048 * selected which pages need to go to swap.
3049 */
2d21ac55 3050 if (cntrl_flags & UPL_ENCRYPT)
91447636 3051 upl->flags |= UPL_ENCRYPTED;
2d21ac55
A
3052
3053 if (cntrl_flags & UPL_FOR_PAGEOUT)
91447636 3054 upl->flags |= UPL_PAGEOUT;
2d21ac55 3055
55e303ae 3056 vm_object_lock(object);
b0d623f7 3057 vm_object_activity_begin(object);
2d21ac55
A
3058
3059 /*
3060 * we can lock in the paging_offset once paging_in_progress is set
3061 */
3062 upl->size = size;
3063 upl->offset = offset + object->paging_offset;
55e303ae 3064
b0d623f7 3065#if UPL_DEBUG
2d21ac55 3066 queue_enter(&object->uplq, upl, upl_t, uplq);
91447636 3067#endif /* UPL_DEBUG */
91447636 3068
2d21ac55 3069 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
91447636 3070 /*
2d21ac55
A
3071 * Honor copy-on-write obligations
3072 *
91447636
A
3073 * The caller is gathering these pages and
3074 * might modify their contents. We need to
3075 * make sure that the copy object has its own
3076 * private copies of these pages before we let
3077 * the caller modify them.
3078 */
3079 vm_object_update(object,
3080 offset,
3081 size,
3082 NULL,
3083 NULL,
3084 FALSE, /* should_return */
3085 MEMORY_OBJECT_COPY_SYNC,
3086 VM_PROT_NO_CHANGE);
b0d623f7 3087#if DEVELOPMENT || DEBUG
91447636
A
3088 upl_cow++;
3089 upl_cow_pages += size >> PAGE_SHIFT;
b0d623f7 3090#endif
55e303ae 3091 }
2d21ac55
A
3092 /*
3093 * remember which copy object we synchronized with
3094 */
91447636 3095 last_copy_object = object->copy;
1c79356b 3096 entry = 0;
55e303ae 3097
2d21ac55
A
3098 xfer_size = size;
3099 dst_offset = offset;
3100
b0d623f7
A
3101 dwp = &dw_array[0];
3102 dw_count = 0;
3103
2d21ac55
A
3104 while (xfer_size) {
3105
b0d623f7
A
3106 dwp->dw_mask = 0;
3107
2d21ac55 3108 if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
2d21ac55
A
3109 vm_object_unlock(object);
3110 VM_PAGE_GRAB_FICTITIOUS(alias_page);
b0d623f7 3111 vm_object_lock(object);
4a3eedf9 3112 }
2d21ac55
A
3113 if (cntrl_flags & UPL_COPYOUT_FROM) {
3114 upl->flags |= UPL_PAGE_SYNC_DONE;
3115
91447636 3116 if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
1c79356b
A
3117 dst_page->fictitious ||
3118 dst_page->absent ||
3119 dst_page->error ||
b0d623f7 3120 (VM_PAGE_WIRED(dst_page) && !dst_page->pageout && !dst_page->list_req_pending)) {
91447636
A
3121
3122 if (user_page_list)
1c79356b 3123 user_page_list[entry].phys_addr = 0;
2d21ac55 3124
b0d623f7 3125 goto try_next_page;
2d21ac55
A
3126 }
3127 /*
3128 * grab this up front...
3129 * a high percentange of the time we're going to
3130 * need the hardware modification state a bit later
3131 * anyway... so we can eliminate an extra call into
3132 * the pmap layer by grabbing it here and recording it
3133 */
3134 if (dst_page->pmapped)
3135 refmod_state = pmap_get_refmod(dst_page->phys_page);
3136 else
3137 refmod_state = 0;
3138
3139 if ( (refmod_state & VM_MEM_REFERENCED) && dst_page->inactive ) {
91447636 3140 /*
2d21ac55
A
3141 * page is on inactive list and referenced...
3142 * reactivate it now... this gets it out of the
3143 * way of vm_pageout_scan which would have to
3144 * reactivate it upon tripping over it
91447636 3145 */
b0d623f7 3146 dwp->dw_mask |= DW_vm_page_activate;
2d21ac55
A
3147 }
3148 if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
3149 /*
3150 * we're only asking for DIRTY pages to be returned
3151 */
3152 if (dst_page->list_req_pending || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
91447636 3153 /*
2d21ac55
A
3154 * if we were the page stolen by vm_pageout_scan to be
3155 * cleaned (as opposed to a buddy being clustered in
3156 * or this request is not being driven by a PAGEOUT cluster
3157 * then we only need to check for the page being dirty or
3158 * precious to decide whether to return it
91447636 3159 */
2d21ac55 3160 if (dst_page->dirty || dst_page->precious || (refmod_state & VM_MEM_MODIFIED))
91447636 3161 goto check_busy;
2d21ac55 3162 goto dont_return;
1c79356b 3163 }
2d21ac55
A
3164 /*
3165 * this is a request for a PAGEOUT cluster and this page
3166 * is merely along for the ride as a 'buddy'... not only
3167 * does it have to be dirty to be returned, but it also
3168 * can't have been referenced recently... note that we've
3169 * already filtered above based on whether this page is
3170 * currently on the inactive queue or it meets the page
3171 * ticket (generation count) check
3172 */
3173 if ( !(refmod_state & VM_MEM_REFERENCED) &&
3174 ((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
3175 goto check_busy;
1c79356b 3176 }
2d21ac55
A
3177dont_return:
3178 /*
3179 * if we reach here, we're not to return
3180 * the page... go on to the next one
3181 */
3182 if (user_page_list)
3183 user_page_list[entry].phys_addr = 0;
55e303ae 3184
b0d623f7 3185 goto try_next_page;
2d21ac55
A
3186 }
3187check_busy:
3188 if (dst_page->busy && (!(dst_page->list_req_pending && dst_page->pageout))) {
3189 if (cntrl_flags & UPL_NOBLOCK) {
3190 if (user_page_list)
3191 user_page_list[entry].phys_addr = 0;
55e303ae 3192
b0d623f7 3193 goto try_next_page;
1c79356b 3194 }
2d21ac55
A
3195 /*
3196 * someone else is playing with the
3197 * page. We will have to wait.
3198 */
2d21ac55 3199 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
1c79356b 3200
2d21ac55
A
3201 continue;
3202 }
3203 /*
3204 * Someone else already cleaning the page?
3205 */
b0d623f7 3206 if ((dst_page->cleaning || dst_page->absent || VM_PAGE_WIRED(dst_page)) && !dst_page->list_req_pending) {
2d21ac55
A
3207 if (user_page_list)
3208 user_page_list[entry].phys_addr = 0;
91447636 3209
b0d623f7 3210 goto try_next_page;
2d21ac55
A
3211 }
3212 /*
3213 * ENCRYPTED SWAP:
3214 * The caller is gathering this page and might
3215 * access its contents later on. Decrypt the
3216 * page before adding it to the UPL, so that
3217 * the caller never sees encrypted data.
3218 */
3219 if (! (cntrl_flags & UPL_ENCRYPT) && dst_page->encrypted) {
3220 int was_busy;
91447636
A
3221
3222 /*
2d21ac55
A
3223 * save the current state of busy
3224 * mark page as busy while decrypt
3225 * is in progress since it will drop
3226 * the object lock...
91447636 3227 */
2d21ac55
A
3228 was_busy = dst_page->busy;
3229 dst_page->busy = TRUE;
91447636 3230
2d21ac55
A
3231 vm_page_decrypt(dst_page, 0);
3232 vm_page_decrypt_for_upl_counter++;
3233 /*
3234 * restore to original busy state
3235 */
3236 dst_page->busy = was_busy;
b0d623f7
A
3237 }
3238 if (dst_page->pageout_queue == TRUE) {
91447636 3239
b0d623f7
A
3240 vm_page_lockspin_queues();
3241
3242 if (dst_page->pageout_queue == TRUE) {
3243 /*
3244 * we've buddied up a page for a clustered pageout
3245 * that has already been moved to the pageout
3246 * queue by pageout_scan... we need to remove
3247 * it from the queue and drop the laundry count
3248 * on that queue
3249 */
3250 vm_pageout_throttle_up(dst_page);
3251 }
3252 vm_page_unlock_queues();
91447636 3253 }
2d21ac55
A
3254#if MACH_CLUSTER_STATS
3255 /*
3256 * pageout statistics gathering. count
3257 * all the pages we will page out that
3258 * were not counted in the initial
3259 * vm_pageout_scan work
3260 */
3261 if (dst_page->list_req_pending)
3262 encountered_lrp = TRUE;
3263 if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious)) && !dst_page->list_req_pending) {
3264 if (encountered_lrp)
3265 CLUSTER_STAT(pages_at_higher_offsets++;)
3266 else
3267 CLUSTER_STAT(pages_at_lower_offsets++;)
3268 }
3269#endif
3270 /*
3271 * Turn off busy indication on pending
3272 * pageout. Note: we can only get here
3273 * in the request pending case.
3274 */
3275 dst_page->list_req_pending = FALSE;
3276 dst_page->busy = FALSE;
3277
3278 hw_dirty = refmod_state & VM_MEM_MODIFIED;
3279 dirty = hw_dirty ? TRUE : dst_page->dirty;
3280
3281 if (dst_page->phys_page > upl->highest_page)
3282 upl->highest_page = dst_page->phys_page;
3283
3284 if (cntrl_flags & UPL_SET_LITE) {
b0d623f7 3285 unsigned int pg_num;
2d21ac55 3286
b0d623f7
A
3287 pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
3288 assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
2d21ac55
A
3289 lite_list[pg_num>>5] |= 1 << (pg_num & 31);
3290
3291 if (hw_dirty)
3292 pmap_clear_modify(dst_page->phys_page);
3293
3294 /*
3295 * Mark original page as cleaning
3296 * in place.
3297 */
3298 dst_page->cleaning = TRUE;
3299 dst_page->precious = FALSE;
3300 } else {
3301 /*
3302 * use pageclean setup, it is more
3303 * convenient even for the pageout
3304 * cases here
3305 */
3306 vm_object_lock(upl->map_object);
3307 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
3308 vm_object_unlock(upl->map_object);
3309
3310 alias_page->absent = FALSE;
3311 alias_page = NULL;
1c79356b 3312 }
2d21ac55
A
3313#if MACH_PAGEMAP
3314 /*
3315 * Record that this page has been
3316 * written out
3317 */
3318 vm_external_state_set(object->existence_map, dst_page->offset);
3319#endif /*MACH_PAGEMAP*/
3320 dst_page->dirty = dirty;
55e303ae 3321
2d21ac55
A
3322 if (!dirty)
3323 dst_page->precious = TRUE;
91447636 3324
2d21ac55
A
3325 if (dst_page->pageout)
3326 dst_page->busy = TRUE;
3327
3328 if ( (cntrl_flags & UPL_ENCRYPT) ) {
3329 /*
3330 * ENCRYPTED SWAP:
3331 * We want to deny access to the target page
3332 * because its contents are about to be
3333 * encrypted and the user would be very
3334 * confused to see encrypted data instead
3335 * of their data.
3336 * We also set "encrypted_cleaning" to allow
3337 * vm_pageout_scan() to demote that page
3338 * from "adjacent/clean-in-place" to
3339 * "target/clean-and-free" if it bumps into
3340 * this page during its scanning while we're
3341 * still processing this cluster.
3342 */
3343 dst_page->busy = TRUE;
3344 dst_page->encrypted_cleaning = TRUE;
3345 }
3346 if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
3347 /*
3348 * deny access to the target page
3349 * while it is being worked on
3350 */
b0d623f7 3351 if ((!dst_page->pageout) && ( !VM_PAGE_WIRED(dst_page))) {
2d21ac55
A
3352 dst_page->busy = TRUE;
3353 dst_page->pageout = TRUE;
b0d623f7
A
3354
3355 dwp->dw_mask |= DW_vm_page_wire;
2d21ac55
A
3356 }
3357 }
3358 } else {
3359 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
91447636 3360 /*
2d21ac55
A
3361 * Honor copy-on-write obligations
3362 *
91447636
A
3363 * The copy object has changed since we
3364 * last synchronized for copy-on-write.
3365 * Another copy object might have been
3366 * inserted while we released the object's
3367 * lock. Since someone could have seen the
3368 * original contents of the remaining pages
3369 * through that new object, we have to
3370 * synchronize with it again for the remaining
3371 * pages only. The previous pages are "busy"
3372 * so they can not be seen through the new
3373 * mapping. The new mapping will see our
3374 * upcoming changes for those previous pages,
3375 * but that's OK since they couldn't see what
3376 * was there before. It's just a race anyway
3377 * and there's no guarantee of consistency or
3378 * atomicity. We just don't want new mappings
3379 * to see both the *before* and *after* pages.
3380 */
3381 if (object->copy != VM_OBJECT_NULL) {
3382 vm_object_update(
3383 object,
3384 dst_offset,/* current offset */
3385 xfer_size, /* remaining size */
3386 NULL,
3387 NULL,
3388 FALSE, /* should_return */
3389 MEMORY_OBJECT_COPY_SYNC,
3390 VM_PROT_NO_CHANGE);
2d21ac55 3391
b0d623f7 3392#if DEVELOPMENT || DEBUG
91447636 3393 upl_cow_again++;
2d21ac55 3394 upl_cow_again_pages += xfer_size >> PAGE_SHIFT;
b0d623f7 3395#endif
91447636 3396 }
2d21ac55
A
3397 /*
3398 * remember the copy object we synced with
3399 */
91447636
A
3400 last_copy_object = object->copy;
3401 }
91447636
A
3402 dst_page = vm_page_lookup(object, dst_offset);
3403
2d21ac55 3404 if (dst_page != VM_PAGE_NULL) {
b0d623f7
A
3405
3406 if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
3407
3408 if ( !(dst_page->absent && dst_page->list_req_pending) ) {
3409 /*
2d21ac55
A
3410 * skip over pages already present in the cache
3411 */
b0d623f7
A
3412 if (user_page_list)
3413 user_page_list[entry].phys_addr = 0;
2d21ac55 3414
b0d623f7 3415 goto try_next_page;
55e303ae 3416 }
b0d623f7
A
3417 }
3418 if ( !(dst_page->list_req_pending) ) {
3419
2d21ac55
A
3420 if (dst_page->cleaning) {
3421 /*
3422 * someone else is writing to the page... wait...
3423 */
2d21ac55
A
3424 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
3425
3426 continue;
3427 }
3428 } else {
3429 if (dst_page->fictitious &&
3430 dst_page->phys_page == vm_page_fictitious_addr) {
3431 assert( !dst_page->speculative);
3432 /*
3433 * dump the fictitious page
3434 */
3435 dst_page->list_req_pending = FALSE;
55e303ae 3436
b0d623f7 3437 VM_PAGE_FREE(dst_page);
2d21ac55
A
3438
3439 dst_page = NULL;
b0d623f7 3440
2d21ac55
A
3441 } else if (dst_page->absent) {
3442 /*
3443 * the default_pager case
3444 */
3445 dst_page->list_req_pending = FALSE;
3446 dst_page->busy = FALSE;
b0d623f7
A
3447
3448 } else if (dst_page->pageout) {
3449 /*
3450 * page was earmarked by vm_pageout_scan
3451 * to be cleaned and stolen... we're going
3452 * to take it back since we are not attempting
3453 * to read that page and we don't want to stall
3454 * waiting for it to be cleaned for 2 reasons...
3455 * 1 - no use paging it out and back in
3456 * 2 - if we stall, we may casue a deadlock in
3457 * the FS trying to acquire the its locks
3458 * on the VNOP_PAGEOUT path presuming that
3459 * those locks are already held on the read
3460 * path before trying to create this UPL
3461 *
3462 * so undo all of the state that vm_pageout_scan
3463 * hung on this page
3464 */
3465 dst_page->busy = FALSE;
3466
3467 vm_pageout_queue_steal(dst_page, FALSE);
2d21ac55 3468 }
0b4e3aa0 3469 }
1c79356b 3470 }
2d21ac55
A
3471 if (dst_page == VM_PAGE_NULL) {
3472 if (object->private) {
0b4e3aa0
A
3473 /*
3474 * This is a nasty wrinkle for users
3475 * of upl who encounter device or
3476 * private memory however, it is
3477 * unavoidable, only a fault can
2d21ac55 3478 * resolve the actual backing
0b4e3aa0
A
3479 * physical page by asking the
3480 * backing device.
3481 */
2d21ac55 3482 if (user_page_list)
55e303ae 3483 user_page_list[entry].phys_addr = 0;
2d21ac55 3484
b0d623f7 3485 goto try_next_page;
0b4e3aa0 3486 }
2d21ac55
A
3487 /*
3488 * need to allocate a page
2d21ac55 3489 */
4a3eedf9 3490 dst_page = vm_page_grab();
2d21ac55 3491
1c79356b 3492 if (dst_page == VM_PAGE_NULL) {
2d21ac55
A
3493 if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
3494 /*
3495 * we don't want to stall waiting for pages to come onto the free list
3496 * while we're already holding absent pages in this UPL
3497 * the caller will deal with the empty slots
3498 */
3499 if (user_page_list)
3500 user_page_list[entry].phys_addr = 0;
3501
3502 goto try_next_page;
3503 }
3504 /*
3505 * no pages available... wait
3506 * then try again for the same
3507 * offset...
3508 */
0b4e3aa0
A
3509 vm_object_unlock(object);
3510 VM_PAGE_WAIT();
b0d623f7 3511 vm_object_lock(object);
2d21ac55 3512
0b4e3aa0 3513 continue;
1c79356b 3514 }
b0d623f7 3515 vm_page_insert(dst_page, object, dst_offset);
4a3eedf9 3516
2d21ac55 3517 dst_page->absent = TRUE;
4a3eedf9 3518 dst_page->busy = FALSE;
2d21ac55
A
3519
3520 if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
91447636
A
3521 /*
3522 * if UPL_RET_ONLY_ABSENT was specified,
3523 * than we're definitely setting up a
3524 * upl for a clustered read/pagein
3525 * operation... mark the pages as clustered
2d21ac55
A
3526 * so upl_commit_range can put them on the
3527 * speculative list
91447636
A
3528 */
3529 dst_page->clustered = TRUE;
3530 }
1c79356b 3531 }
b0d623f7
A
3532 if (dst_page->fictitious) {
3533 panic("need corner case for fictitious page");
3534 }
3535 if (dst_page->busy) {
3536 /*
3537 * someone else is playing with the
3538 * page. We will have to wait.
3539 */
3540 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
3541
3542 continue;
3543 }
91447636
A
3544 /*
3545 * ENCRYPTED SWAP:
3546 */
3547 if (cntrl_flags & UPL_ENCRYPT) {
3548 /*
3549 * The page is going to be encrypted when we
3550 * get it from the pager, so mark it so.
3551 */
3552 dst_page->encrypted = TRUE;
3553 } else {
3554 /*
3555 * Otherwise, the page will not contain
3556 * encrypted data.
3557 */
3558 dst_page->encrypted = FALSE;
3559 }
1c79356b 3560 dst_page->overwriting = TRUE;
2d21ac55 3561
2d21ac55
A
3562 if (dst_page->pmapped) {
3563 if ( !(cntrl_flags & UPL_FILE_IO))
3564 /*
3565 * eliminate all mappings from the
3566 * original object and its prodigy
55e303ae 3567 */
2d21ac55
A
3568 refmod_state = pmap_disconnect(dst_page->phys_page);
3569 else
3570 refmod_state = pmap_get_refmod(dst_page->phys_page);
3571 } else
3572 refmod_state = 0;
55e303ae 3573
2d21ac55
A
3574 hw_dirty = refmod_state & VM_MEM_MODIFIED;
3575 dirty = hw_dirty ? TRUE : dst_page->dirty;
1c79356b 3576
2d21ac55 3577 if (cntrl_flags & UPL_SET_LITE) {
b0d623f7 3578 unsigned int pg_num;
1c79356b 3579
b0d623f7
A
3580 pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
3581 assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
2d21ac55 3582 lite_list[pg_num>>5] |= 1 << (pg_num & 31);
91447636 3583
2d21ac55
A
3584 if (hw_dirty)
3585 pmap_clear_modify(dst_page->phys_page);
0b4e3aa0 3586
2d21ac55
A
3587 /*
3588 * Mark original page as cleaning
3589 * in place.
3590 */
3591 dst_page->cleaning = TRUE;
3592 dst_page->precious = FALSE;
3593 } else {
3594 /*
3595 * use pageclean setup, it is more
3596 * convenient even for the pageout
3597 * cases here
3598 */
3599 vm_object_lock(upl->map_object);
3600 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
3601 vm_object_unlock(upl->map_object);
0b4e3aa0 3602
2d21ac55
A
3603 alias_page->absent = FALSE;
3604 alias_page = NULL;
3605 }
1c79356b 3606
2d21ac55
A
3607 if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
3608 /*
3609 * clean in place for read implies
3610 * that a write will be done on all
3611 * the pages that are dirty before
3612 * a upl commit is done. The caller
3613 * is obligated to preserve the
3614 * contents of all pages marked dirty
3615 */
3616 upl->flags |= UPL_CLEAR_DIRTY;
3617 }
3618 dst_page->dirty = dirty;
91447636 3619
2d21ac55
A
3620 if (!dirty)
3621 dst_page->precious = TRUE;
3622
b0d623f7 3623 if ( !VM_PAGE_WIRED(dst_page)) {
2d21ac55
A
3624 /*
3625 * deny access to the target page while
3626 * it is being worked on
3627 */
3628 dst_page->busy = TRUE;
3629 } else
b0d623f7 3630 dwp->dw_mask |= DW_vm_page_wire;
2d21ac55 3631
b0d623f7
A
3632 /*
3633 * We might be about to satisfy a fault which has been
3634 * requested. So no need for the "restart" bit.
3635 */
3636 dst_page->restart = FALSE;
3637 if (!dst_page->absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
2d21ac55
A
3638 /*
3639 * expect the page to be used
3640 */
b0d623f7 3641 dwp->dw_mask |= DW_set_reference;
2d21ac55
A
3642 }
3643 dst_page->precious = (cntrl_flags & UPL_PRECIOUS) ? TRUE : FALSE;
3644 }
3645 if (dst_page->phys_page > upl->highest_page)
3646 upl->highest_page = dst_page->phys_page;
3647 if (user_page_list) {
3648 user_page_list[entry].phys_addr = dst_page->phys_page;
2d21ac55
A
3649 user_page_list[entry].pageout = dst_page->pageout;
3650 user_page_list[entry].absent = dst_page->absent;
593a1d5f 3651 user_page_list[entry].dirty = dst_page->dirty;
2d21ac55 3652 user_page_list[entry].precious = dst_page->precious;
593a1d5f 3653 user_page_list[entry].device = FALSE;
2d21ac55
A
3654 if (dst_page->clustered == TRUE)
3655 user_page_list[entry].speculative = dst_page->speculative;
3656 else
3657 user_page_list[entry].speculative = FALSE;
593a1d5f
A
3658 user_page_list[entry].cs_validated = dst_page->cs_validated;
3659 user_page_list[entry].cs_tainted = dst_page->cs_tainted;
2d21ac55
A
3660 }
3661 /*
3662 * if UPL_RET_ONLY_ABSENT is set, then
3663 * we are working with a fresh page and we've
3664 * just set the clustered flag on it to
3665 * indicate that it was drug in as part of a
3666 * speculative cluster... so leave it alone
3667 */
3668 if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
3669 /*
3670 * someone is explicitly grabbing this page...
3671 * update clustered and speculative state
3672 *
3673 */
3674 VM_PAGE_CONSUME_CLUSTERED(dst_page);
3675 }
b0d623f7
A
3676try_next_page:
3677 if (dwp->dw_mask) {
3678 if (dwp->dw_mask & DW_vm_page_activate)
3679 VM_STAT_INCR(reactivations);
4a3eedf9 3680
b0d623f7
A
3681 if (dst_page->busy == FALSE) {
3682 /*
3683 * dw_do_work may need to drop the object lock
3684 * if it does, we need the pages it's looking at to
3685 * be held stable via the busy bit.
3686 */
3687 dst_page->busy = TRUE;
3688 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
3689 }
3690 dwp->dw_m = dst_page;
3691 dwp++;
3692 dw_count++;
3693
3694 if (dw_count >= DELAYED_WORK_LIMIT) {
3695 dw_do_work(object, &dw_array[0], dw_count);
3696
3697 dwp = &dw_array[0];
3698 dw_count = 0;
4a3eedf9 3699 }
2d21ac55 3700 }
2d21ac55
A
3701 entry++;
3702 dst_offset += PAGE_SIZE_64;
3703 xfer_size -= PAGE_SIZE;
3704 }
b0d623f7
A
3705 if (dw_count)
3706 dw_do_work(object, &dw_array[0], dw_count);
3707
2d21ac55 3708 if (alias_page != NULL) {
b0d623f7 3709 VM_PAGE_FREE(alias_page);
1c79356b 3710 }
91447636 3711
2d21ac55
A
3712 if (page_list_count != NULL) {
3713 if (upl->flags & UPL_INTERNAL)
3714 *page_list_count = 0;
3715 else if (*page_list_count > entry)
3716 *page_list_count = entry;
3717 }
b0d623f7
A
3718#if UPL_DEBUG
3719 upl->upl_state = 1;
3720#endif
1c79356b 3721 vm_object_unlock(object);
2d21ac55 3722
1c79356b
A
3723 return KERN_SUCCESS;
3724}
3725
0b4e3aa0 3726/* JMM - Backward compatability for now */
1c79356b 3727kern_return_t
91447636
A
3728vm_fault_list_request( /* forward */
3729 memory_object_control_t control,
3730 vm_object_offset_t offset,
3731 upl_size_t size,
3732 upl_t *upl_ptr,
3733 upl_page_info_t **user_page_list_ptr,
2d21ac55 3734 unsigned int page_list_count,
91447636
A
3735 int cntrl_flags);
3736kern_return_t
0b4e3aa0
A
3737vm_fault_list_request(
3738 memory_object_control_t control,
1c79356b 3739 vm_object_offset_t offset,
91447636 3740 upl_size_t size,
0b4e3aa0 3741 upl_t *upl_ptr,
1c79356b 3742 upl_page_info_t **user_page_list_ptr,
2d21ac55 3743 unsigned int page_list_count,
1c79356b
A
3744 int cntrl_flags)
3745{
0c530ab8 3746 unsigned int local_list_count;
0b4e3aa0
A
3747 upl_page_info_t *user_page_list;
3748 kern_return_t kr;
3749
b0d623f7
A
3750 if((cntrl_flags & UPL_VECTOR)==UPL_VECTOR)
3751 return KERN_INVALID_ARGUMENT;
3752
0b4e3aa0
A
3753 if (user_page_list_ptr != NULL) {
3754 local_list_count = page_list_count;
3755 user_page_list = *user_page_list_ptr;
3756 } else {
3757 local_list_count = 0;
3758 user_page_list = NULL;
3759 }
3760 kr = memory_object_upl_request(control,
3761 offset,
3762 size,
3763 upl_ptr,
3764 user_page_list,
3765 &local_list_count,
3766 cntrl_flags);
3767
3768 if(kr != KERN_SUCCESS)
3769 return kr;
3770
3771 if ((user_page_list_ptr != NULL) && (cntrl_flags & UPL_INTERNAL)) {
3772 *user_page_list_ptr = UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr);
3773 }
3774
3775 return KERN_SUCCESS;
3776}
3777
3778
3779
3780/*
3781 * Routine: vm_object_super_upl_request
3782 * Purpose:
3783 * Cause the population of a portion of a vm_object
3784 * in much the same way as memory_object_upl_request.
3785 * Depending on the nature of the request, the pages
3786 * returned may be contain valid data or be uninitialized.
3787 * However, the region may be expanded up to the super
3788 * cluster size provided.
3789 */
3790
3791__private_extern__ kern_return_t
3792vm_object_super_upl_request(
3793 vm_object_t object,
3794 vm_object_offset_t offset,
91447636
A
3795 upl_size_t size,
3796 upl_size_t super_cluster,
0b4e3aa0
A
3797 upl_t *upl,
3798 upl_page_info_t *user_page_list,
3799 unsigned int *page_list_count,
3800 int cntrl_flags)
3801{
b0d623f7 3802 if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR))
1c79356b 3803 return KERN_FAILURE;
0b4e3aa0 3804
55e303ae 3805 assert(object->paging_in_progress);
1c79356b 3806 offset = offset - object->paging_offset;
91447636 3807
91447636 3808 if (super_cluster > size) {
1c79356b
A
3809
3810 vm_object_offset_t base_offset;
91447636 3811 upl_size_t super_size;
b0d623f7 3812 vm_object_size_t super_size_64;
1c79356b 3813
2d21ac55
A
3814 base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
3815 super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster;
b0d623f7
A
3816 super_size_64 = ((base_offset + super_size) > object->size) ? (object->size - base_offset) : super_size;
3817 super_size = (upl_size_t) super_size_64;
3818 assert(super_size == super_size_64);
2d21ac55
A
3819
3820 if (offset > (base_offset + super_size)) {
3821 panic("vm_object_super_upl_request: Missed target pageout"
3822 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
3823 offset, base_offset, super_size, super_cluster,
3824 size, object->paging_offset);
3825 }
91447636
A
3826 /*
3827 * apparently there is a case where the vm requests a
3828 * page to be written out who's offset is beyond the
3829 * object size
3830 */
b0d623f7
A
3831 if ((offset + size) > (base_offset + super_size)) {
3832 super_size_64 = (offset + size) - base_offset;
3833 super_size = (upl_size_t) super_size_64;
3834 assert(super_size == super_size_64);
3835 }
1c79356b
A
3836
3837 offset = base_offset;
3838 size = super_size;
3839 }
2d21ac55 3840 return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
1c79356b
A
3841}
3842
b0d623f7 3843
91447636
A
3844kern_return_t
3845vm_map_create_upl(
3846 vm_map_t map,
3847 vm_map_address_t offset,
3848 upl_size_t *upl_size,
3849 upl_t *upl,
3850 upl_page_info_array_t page_list,
3851 unsigned int *count,
3852 int *flags)
3853{
3854 vm_map_entry_t entry;
3855 int caller_flags;
3856 int force_data_sync;
3857 int sync_cow_data;
3858 vm_object_t local_object;
3859 vm_map_offset_t local_offset;
3860 vm_map_offset_t local_start;
3861 kern_return_t ret;
3862
3863 caller_flags = *flags;
3864
3865 if (caller_flags & ~UPL_VALID_FLAGS) {
3866 /*
3867 * For forward compatibility's sake,
3868 * reject any unknown flag.
3869 */
3870 return KERN_INVALID_VALUE;
3871 }
91447636
A
3872 force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
3873 sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
3874
2d21ac55 3875 if (upl == NULL)
91447636
A
3876 return KERN_INVALID_ARGUMENT;
3877
91447636 3878REDISCOVER_ENTRY:
b0d623f7 3879 vm_map_lock_read(map);
2d21ac55 3880
91447636 3881 if (vm_map_lookup_entry(map, offset, &entry)) {
2d21ac55 3882
b0d623f7
A
3883 if ((entry->vme_end - offset) < *upl_size) {
3884 *upl_size = (upl_size_t) (entry->vme_end - offset);
3885 assert(*upl_size == entry->vme_end - offset);
3886 }
2d21ac55 3887
91447636 3888 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
2d21ac55
A
3889 *flags = 0;
3890
b0d623f7 3891 if ( !entry->is_sub_map && entry->object.vm_object != VM_OBJECT_NULL) {
2d21ac55
A
3892 if (entry->object.vm_object->private)
3893 *flags = UPL_DEV_MEMORY;
3894
3895 if (entry->object.vm_object->phys_contiguous)
91447636 3896 *flags |= UPL_PHYS_CONTIG;
91447636 3897 }
b0d623f7 3898 vm_map_unlock_read(map);
2d21ac55 3899
91447636
A
3900 return KERN_SUCCESS;
3901 }
2d21ac55 3902 if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
b0d623f7
A
3903 if ((*upl_size/PAGE_SIZE) > MAX_UPL_SIZE)
3904 *upl_size = MAX_UPL_SIZE * PAGE_SIZE;
2d21ac55 3905 }
91447636
A
3906 /*
3907 * Create an object if necessary.
3908 */
3909 if (entry->object.vm_object == VM_OBJECT_NULL) {
b0d623f7
A
3910
3911 if (vm_map_lock_read_to_write(map))
3912 goto REDISCOVER_ENTRY;
3913
2d21ac55 3914 entry->object.vm_object = vm_object_allocate((vm_size_t)(entry->vme_end - entry->vme_start));
91447636 3915 entry->offset = 0;
b0d623f7
A
3916
3917 vm_map_lock_write_to_read(map);
91447636
A
3918 }
3919 if (!(caller_flags & UPL_COPYOUT_FROM)) {
3920 if (!(entry->protection & VM_PROT_WRITE)) {
b0d623f7 3921 vm_map_unlock_read(map);
91447636
A
3922 return KERN_PROTECTION_FAILURE;
3923 }
3924 if (entry->needs_copy) {
b0d623f7
A
3925 /*
3926 * Honor copy-on-write for COPY_SYMMETRIC
3927 * strategy.
3928 */
91447636
A
3929 vm_map_t local_map;
3930 vm_object_t object;
91447636
A
3931 vm_object_offset_t new_offset;
3932 vm_prot_t prot;
3933 boolean_t wired;
91447636
A
3934 vm_map_version_t version;
3935 vm_map_t real_map;
3936
3937 local_map = map;
2d21ac55
A
3938
3939 if (vm_map_lookup_locked(&local_map,
3940 offset, VM_PROT_WRITE,
3941 OBJECT_LOCK_EXCLUSIVE,
3942 &version, &object,
3943 &new_offset, &prot, &wired,
3944 NULL,
b0d623f7
A
3945 &real_map) != KERN_SUCCESS) {
3946 vm_map_unlock_read(local_map);
91447636
A
3947 return KERN_FAILURE;
3948 }
2d21ac55 3949 if (real_map != map)
91447636 3950 vm_map_unlock(real_map);
b0d623f7
A
3951 vm_map_unlock_read(local_map);
3952
91447636 3953 vm_object_unlock(object);
91447636
A
3954
3955 goto REDISCOVER_ENTRY;
3956 }
3957 }
3958 if (entry->is_sub_map) {
3959 vm_map_t submap;
3960
3961 submap = entry->object.sub_map;
3962 local_start = entry->vme_start;
3963 local_offset = entry->offset;
2d21ac55 3964
91447636 3965 vm_map_reference(submap);
b0d623f7 3966 vm_map_unlock_read(map);
91447636 3967
2d21ac55
A
3968 ret = vm_map_create_upl(submap,
3969 local_offset + (offset - local_start),
3970 upl_size, upl, page_list, count, flags);
91447636 3971 vm_map_deallocate(submap);
2d21ac55 3972
91447636
A
3973 return ret;
3974 }
91447636 3975 if (sync_cow_data) {
2d21ac55 3976 if (entry->object.vm_object->shadow || entry->object.vm_object->copy) {
91447636
A
3977 local_object = entry->object.vm_object;
3978 local_start = entry->vme_start;
3979 local_offset = entry->offset;
2d21ac55 3980
91447636 3981 vm_object_reference(local_object);
b0d623f7 3982 vm_map_unlock_read(map);
91447636 3983
b0d623f7 3984 if (local_object->shadow && local_object->copy) {
2d21ac55
A
3985 vm_object_lock_request(
3986 local_object->shadow,
3987 (vm_object_offset_t)
3988 ((offset - local_start) +
3989 local_offset) +
3990 local_object->shadow_offset,
3991 *upl_size, FALSE,
3992 MEMORY_OBJECT_DATA_SYNC,
3993 VM_PROT_NO_CHANGE);
91447636
A
3994 }
3995 sync_cow_data = FALSE;
3996 vm_object_deallocate(local_object);
2d21ac55 3997
91447636
A
3998 goto REDISCOVER_ENTRY;
3999 }
4000 }
91447636 4001 if (force_data_sync) {
91447636
A
4002 local_object = entry->object.vm_object;
4003 local_start = entry->vme_start;
4004 local_offset = entry->offset;
2d21ac55 4005
91447636 4006 vm_object_reference(local_object);
b0d623f7 4007 vm_map_unlock_read(map);
91447636
A
4008
4009 vm_object_lock_request(
2d21ac55
A
4010 local_object,
4011 (vm_object_offset_t)
4012 ((offset - local_start) + local_offset),
4013 (vm_object_size_t)*upl_size, FALSE,
4014 MEMORY_OBJECT_DATA_SYNC,
4015 VM_PROT_NO_CHANGE);
4016
91447636
A
4017 force_data_sync = FALSE;
4018 vm_object_deallocate(local_object);
2d21ac55 4019
91447636
A
4020 goto REDISCOVER_ENTRY;
4021 }
2d21ac55
A
4022 if (entry->object.vm_object->private)
4023 *flags = UPL_DEV_MEMORY;
4024 else
4025 *flags = 0;
4026
4027 if (entry->object.vm_object->phys_contiguous)
4028 *flags |= UPL_PHYS_CONTIG;
91447636 4029
91447636
A
4030 local_object = entry->object.vm_object;
4031 local_offset = entry->offset;
4032 local_start = entry->vme_start;
2d21ac55 4033
91447636 4034 vm_object_reference(local_object);
b0d623f7 4035 vm_map_unlock_read(map);
2d21ac55
A
4036
4037 ret = vm_object_iopl_request(local_object,
4038 (vm_object_offset_t) ((offset - local_start) + local_offset),
4039 *upl_size,
4040 upl,
4041 page_list,
4042 count,
4043 caller_flags);
91447636 4044 vm_object_deallocate(local_object);
2d21ac55 4045
91447636
A
4046 return(ret);
4047 }
b0d623f7 4048 vm_map_unlock_read(map);
1c79356b 4049
2d21ac55 4050 return(KERN_FAILURE);
91447636
A
4051}
4052
4053/*
4054 * Internal routine to enter a UPL into a VM map.
4055 *
4056 * JMM - This should just be doable through the standard
4057 * vm_map_enter() API.
4058 */
1c79356b 4059kern_return_t
91447636
A
4060vm_map_enter_upl(
4061 vm_map_t map,
4062 upl_t upl,
b0d623f7 4063 vm_map_offset_t *dst_addr)
1c79356b 4064{
91447636 4065 vm_map_size_t size;
1c79356b 4066 vm_object_offset_t offset;
91447636 4067 vm_map_offset_t addr;
1c79356b
A
4068 vm_page_t m;
4069 kern_return_t kr;
b0d623f7
A
4070 int isVectorUPL = 0, curr_upl=0;
4071 upl_t vector_upl = NULL;
4072 vm_offset_t vector_upl_dst_addr = 0;
4073 vm_map_t vector_upl_submap = NULL;
4074 upl_offset_t subupl_offset = 0;
4075 upl_size_t subupl_size = 0;
1c79356b 4076
0b4e3aa0
A
4077 if (upl == UPL_NULL)
4078 return KERN_INVALID_ARGUMENT;
4079
b0d623f7
A
4080 if((isVectorUPL = vector_upl_is_valid(upl))) {
4081 int mapped=0,valid_upls=0;
4082 vector_upl = upl;
4083
4084 upl_lock(vector_upl);
4085 for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
4086 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
4087 if(upl == NULL)
4088 continue;
4089 valid_upls++;
4090 if (UPL_PAGE_LIST_MAPPED & upl->flags)
4091 mapped++;
4092 }
4093
4094 if(mapped) {
4095 if(mapped != valid_upls)
4096 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
4097 else {
4098 upl_unlock(vector_upl);
4099 return KERN_FAILURE;
4100 }
4101 }
4102
4103 kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, VM_FLAGS_ANYWHERE, &vector_upl_submap);
4104 if( kr != KERN_SUCCESS )
4105 panic("Vector UPL submap allocation failed\n");
4106 map = vector_upl_submap;
4107 vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
4108 curr_upl=0;
4109 }
4110 else
4111 upl_lock(upl);
4112
4113process_upl_to_enter:
4114 if(isVectorUPL){
4115 if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
4116 *dst_addr = vector_upl_dst_addr;
4117 upl_unlock(vector_upl);
4118 return KERN_SUCCESS;
4119 }
4120 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
4121 if(upl == NULL)
4122 goto process_upl_to_enter;
4123 vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
4124 *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
4125 }
0b4e3aa0 4126
2d21ac55
A
4127 /*
4128 * check to see if already mapped
4129 */
4130 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
0b4e3aa0 4131 upl_unlock(upl);
1c79356b 4132 return KERN_FAILURE;
0b4e3aa0 4133 }
1c79356b 4134
2d21ac55
A
4135 if ((!(upl->flags & UPL_SHADOWED)) && !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) ||
4136 (upl->map_object->phys_contiguous))) {
55e303ae
A
4137 vm_object_t object;
4138 vm_page_t alias_page;
4139 vm_object_offset_t new_offset;
b0d623f7 4140 unsigned int pg_num;
55e303ae
A
4141 wpl_array_t lite_list;
4142
2d21ac55 4143 if (upl->flags & UPL_INTERNAL) {
55e303ae 4144 lite_list = (wpl_array_t)
91447636 4145 ((((uintptr_t)upl) + sizeof(struct upl))
2d21ac55 4146 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
55e303ae 4147 } else {
2d21ac55 4148 lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
55e303ae
A
4149 }
4150 object = upl->map_object;
4151 upl->map_object = vm_object_allocate(upl->size);
2d21ac55 4152
55e303ae 4153 vm_object_lock(upl->map_object);
2d21ac55 4154
55e303ae
A
4155 upl->map_object->shadow = object;
4156 upl->map_object->pageout = TRUE;
4157 upl->map_object->can_persist = FALSE;
2d21ac55
A
4158 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
4159 upl->map_object->shadow_offset = upl->offset - object->paging_offset;
55e303ae 4160 upl->map_object->wimg_bits = object->wimg_bits;
55e303ae
A
4161 offset = upl->map_object->shadow_offset;
4162 new_offset = 0;
4163 size = upl->size;
91447636 4164
2d21ac55 4165 upl->flags |= UPL_SHADOWED;
91447636 4166
2d21ac55 4167 while (size) {
b0d623f7
A
4168 pg_num = (unsigned int) (new_offset / PAGE_SIZE);
4169 assert(pg_num == new_offset / PAGE_SIZE);
55e303ae 4170
2d21ac55 4171 if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
55e303ae 4172
2d21ac55 4173 VM_PAGE_GRAB_FICTITIOUS(alias_page);
91447636 4174
2d21ac55 4175 vm_object_lock(object);
91447636 4176
2d21ac55
A
4177 m = vm_page_lookup(object, offset);
4178 if (m == VM_PAGE_NULL) {
4179 panic("vm_upl_map: page missing\n");
4180 }
55e303ae 4181
2d21ac55
A
4182 /*
4183 * Convert the fictitious page to a private
4184 * shadow of the real page.
4185 */
4186 assert(alias_page->fictitious);
4187 alias_page->fictitious = FALSE;
4188 alias_page->private = TRUE;
4189 alias_page->pageout = TRUE;
4190 /*
4191 * since m is a page in the upl it must
4192 * already be wired or BUSY, so it's
4193 * safe to assign the underlying physical
4194 * page to the alias
4195 */
4196 alias_page->phys_page = m->phys_page;
4197
4198 vm_object_unlock(object);
4199
4200 vm_page_lockspin_queues();
4201 vm_page_wire(alias_page);
4202 vm_page_unlock_queues();
4203
4204 /*
4205 * ENCRYPTED SWAP:
4206 * The virtual page ("m") has to be wired in some way
4207 * here or its physical page ("m->phys_page") could
4208 * be recycled at any time.
4209 * Assuming this is enforced by the caller, we can't
4210 * get an encrypted page here. Since the encryption
4211 * key depends on the VM page's "pager" object and
4212 * the "paging_offset", we couldn't handle 2 pageable
4213 * VM pages (with different pagers and paging_offsets)
4214 * sharing the same physical page: we could end up
4215 * encrypting with one key (via one VM page) and
4216 * decrypting with another key (via the alias VM page).
4217 */
4218 ASSERT_PAGE_DECRYPTED(m);
55e303ae 4219
2d21ac55
A
4220 vm_page_insert(alias_page, upl->map_object, new_offset);
4221
4222 assert(!alias_page->wanted);
4223 alias_page->busy = FALSE;
4224 alias_page->absent = FALSE;
4225 }
4226 size -= PAGE_SIZE;
4227 offset += PAGE_SIZE_64;
4228 new_offset += PAGE_SIZE_64;
55e303ae 4229 }
91447636 4230 vm_object_unlock(upl->map_object);
55e303ae
A
4231 }
4232 if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || upl->map_object->phys_contiguous)
4233 offset = upl->offset - upl->map_object->paging_offset;
4234 else
4235 offset = 0;
1c79356b
A
4236 size = upl->size;
4237
2d21ac55 4238 vm_object_reference(upl->map_object);
1c79356b 4239
b0d623f7
A
4240 if(!isVectorUPL) {
4241 *dst_addr = 0;
4242 /*
4243 * NEED A UPL_MAP ALIAS
4244 */
4245 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
4246 VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
4247 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
4248 }
4249 else {
4250 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
4251 VM_FLAGS_FIXED, upl->map_object, offset, FALSE,
4252 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
4253 if(kr)
4254 panic("vm_map_enter failed for a Vector UPL\n");
4255 }
1c79356b 4256
0b4e3aa0
A
4257 if (kr != KERN_SUCCESS) {
4258 upl_unlock(upl);
1c79356b 4259 return(kr);
0b4e3aa0 4260 }
91447636
A
4261 vm_object_lock(upl->map_object);
4262
2d21ac55 4263 for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
1c79356b 4264 m = vm_page_lookup(upl->map_object, offset);
2d21ac55
A
4265
4266 if (m) {
4267 unsigned int cache_attr;
4268 cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
4269
4270 m->pmapped = TRUE;
b0d623f7
A
4271
4272 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
4273 * but only in kernel space. If this was on a user map,
4274 * we'd have to set the wpmapped bit. */
4275 /* m->wpmapped = TRUE; */
4276 assert(map==kernel_map);
9bccf70c 4277
2d21ac55 4278 PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, cache_attr, TRUE);
1c79356b 4279 }
2d21ac55 4280 offset += PAGE_SIZE_64;
1c79356b 4281 }
91447636
A
4282 vm_object_unlock(upl->map_object);
4283
2d21ac55
A
4284 /*
4285 * hold a reference for the mapping
4286 */
4287 upl->ref_count++;
1c79356b 4288 upl->flags |= UPL_PAGE_LIST_MAPPED;
b0d623f7
A
4289 upl->kaddr = (vm_offset_t) *dst_addr;
4290 assert(upl->kaddr == *dst_addr);
4291
4292 if(!isVectorUPL)
4293 upl_unlock(upl);
4294 else
4295 goto process_upl_to_enter;
2d21ac55 4296
1c79356b
A
4297 return KERN_SUCCESS;
4298}
4299
91447636
A
4300/*
4301 * Internal routine to remove a UPL mapping from a VM map.
4302 *
4303 * XXX - This should just be doable through a standard
4304 * vm_map_remove() operation. Otherwise, implicit clean-up
4305 * of the target map won't be able to correctly remove
4306 * these (and release the reference on the UPL). Having
4307 * to do this means we can't map these into user-space
4308 * maps yet.
4309 */
1c79356b 4310kern_return_t
91447636 4311vm_map_remove_upl(
1c79356b
A
4312 vm_map_t map,
4313 upl_t upl)
4314{
0b4e3aa0 4315 vm_address_t addr;
91447636 4316 upl_size_t size;
b0d623f7
A
4317 int isVectorUPL = 0, curr_upl = 0;
4318 upl_t vector_upl = NULL;
1c79356b 4319
0b4e3aa0
A
4320 if (upl == UPL_NULL)
4321 return KERN_INVALID_ARGUMENT;
4322
b0d623f7
A
4323 if((isVectorUPL = vector_upl_is_valid(upl))) {
4324 int unmapped=0, valid_upls=0;
4325 vector_upl = upl;
4326 upl_lock(vector_upl);
4327 for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
4328 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
4329 if(upl == NULL)
4330 continue;
4331 valid_upls++;
4332 if (!(UPL_PAGE_LIST_MAPPED & upl->flags))
4333 unmapped++;
4334 }
4335
4336 if(unmapped) {
4337 if(unmapped != valid_upls)
4338 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
4339 else {
4340 upl_unlock(vector_upl);
4341 return KERN_FAILURE;
4342 }
4343 }
4344 curr_upl=0;
4345 }
4346 else
4347 upl_lock(upl);
4348
4349process_upl_to_remove:
4350 if(isVectorUPL) {
4351 if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
4352 vm_map_t v_upl_submap;
4353 vm_offset_t v_upl_submap_dst_addr;
4354 vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
4355
4356 vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_NO_FLAGS);
4357 vm_map_deallocate(v_upl_submap);
4358 upl_unlock(vector_upl);
4359 return KERN_SUCCESS;
4360 }
4361
4362 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
4363 if(upl == NULL)
4364 goto process_upl_to_remove;
4365 }
2d21ac55
A
4366
4367 if (upl->flags & UPL_PAGE_LIST_MAPPED) {
0b4e3aa0 4368 addr = upl->kaddr;
1c79356b 4369 size = upl->size;
2d21ac55 4370
0b4e3aa0
A
4371 assert(upl->ref_count > 1);
4372 upl->ref_count--; /* removing mapping ref */
2d21ac55 4373
1c79356b
A
4374 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
4375 upl->kaddr = (vm_offset_t) 0;
b0d623f7
A
4376
4377 if(!isVectorUPL) {
4378 upl_unlock(upl);
4379
4380 vm_map_remove(map,
4381 vm_map_trunc_page(addr),
4382 vm_map_round_page(addr + size),
4383 VM_MAP_NO_FLAGS);
4384
4385 return KERN_SUCCESS;
4386 }
4387 else {
4388 /*
4389 * If it's a Vectored UPL, we'll be removing the entire
4390 * submap anyways, so no need to remove individual UPL
4391 * element mappings from within the submap
4392 */
4393 goto process_upl_to_remove;
4394 }
1c79356b 4395 }
0b4e3aa0 4396 upl_unlock(upl);
2d21ac55 4397
0b4e3aa0 4398 return KERN_FAILURE;
1c79356b
A
4399}
4400
b0d623f7
A
4401static void
4402dw_do_work(
4403 vm_object_t object,
4404 struct dw *dwp,
4405 int dw_count)
4406{
4407 int j;
4408 boolean_t held_as_spin = TRUE;
4409
4410 /*
4411 * pageout_scan takes the vm_page_lock_queues first
4412 * then tries for the object lock... to avoid what
4413 * is effectively a lock inversion, we'll go to the
4414 * trouble of taking them in that same order... otherwise
4415 * if this object contains the majority of the pages resident
4416 * in the UBC (or a small set of large objects actively being
4417 * worked on contain the majority of the pages), we could
4418 * cause the pageout_scan thread to 'starve' in its attempt
4419 * to find pages to move to the free queue, since it has to
4420 * successfully acquire the object lock of any candidate page
4421 * before it can steal/clean it.
4422 */
4423 if (!vm_page_trylockspin_queues()) {
4424 vm_object_unlock(object);
4425
4426 vm_page_lockspin_queues();
4427
4428 for (j = 0; ; j++) {
4429 if (!vm_object_lock_avoid(object) &&
4430 _vm_object_lock_try(object))
4431 break;
4432 vm_page_unlock_queues();
4433 mutex_pause(j);
4434 vm_page_lockspin_queues();
4435 }
4436 }
4437 for (j = 0; j < dw_count; j++, dwp++) {
4438
4439 if (dwp->dw_mask & DW_vm_pageout_throttle_up)
4440 vm_pageout_throttle_up(dwp->dw_m);
4441
4442 if (dwp->dw_mask & DW_vm_page_wire)
4443 vm_page_wire(dwp->dw_m);
4444 else if (dwp->dw_mask & DW_vm_page_unwire)
4445 vm_page_unwire(dwp->dw_m);
4446
4447 if (dwp->dw_mask & DW_vm_page_free) {
4448 if (held_as_spin == TRUE) {
4449 vm_page_lockconvert_queues();
4450 held_as_spin = FALSE;
4451 }
4452 vm_page_free(dwp->dw_m);
4453 } else {
4454 if (dwp->dw_mask & DW_vm_page_deactivate_internal)
4455 vm_page_deactivate_internal(dwp->dw_m, FALSE);
4456 else if (dwp->dw_mask & DW_vm_page_activate)
4457 vm_page_activate(dwp->dw_m);
4458 else if (dwp->dw_mask & DW_vm_page_speculate)
4459 vm_page_speculate(dwp->dw_m, TRUE);
4460 else if (dwp->dw_mask & DW_vm_page_lru)
4461 vm_page_lru(dwp->dw_m);
4462
4463 if (dwp->dw_mask & DW_set_reference)
4464 dwp->dw_m->reference = TRUE;
4465 else if (dwp->dw_mask & DW_clear_reference)
4466 dwp->dw_m->reference = FALSE;
4467
4468 if (dwp->dw_mask & DW_clear_busy)
4469 dwp->dw_m->busy = FALSE;
4470
4471 if (dwp->dw_mask & DW_PAGE_WAKEUP)
4472 PAGE_WAKEUP(dwp->dw_m);
4473 }
4474 }
4475 vm_page_unlock_queues();
4476}
4477
4478
4479
1c79356b 4480kern_return_t
0b4e3aa0 4481upl_commit_range(
1c79356b 4482 upl_t upl,
91447636
A
4483 upl_offset_t offset,
4484 upl_size_t size,
1c79356b 4485 int flags,
0b4e3aa0
A
4486 upl_page_info_t *page_list,
4487 mach_msg_type_number_t count,
4488 boolean_t *empty)
1c79356b 4489{
b0d623f7 4490 upl_size_t xfer_size, subupl_size = size;
55e303ae 4491 vm_object_t shadow_object;
2d21ac55 4492 vm_object_t object;
1c79356b 4493 vm_object_offset_t target_offset;
b0d623f7 4494 upl_offset_t subupl_offset = offset;
1c79356b 4495 int entry;
55e303ae
A
4496 wpl_array_t lite_list;
4497 int occupied;
91447636 4498 int clear_refmod = 0;
2d21ac55 4499 int pgpgout_count = 0;
b0d623f7
A
4500 struct dw dw_array[DELAYED_WORK_LIMIT];
4501 struct dw *dwp;
4502 int dw_count, isVectorUPL = 0;
4503 upl_t vector_upl = NULL;
1c79356b 4504
0b4e3aa0
A
4505 *empty = FALSE;
4506
4507 if (upl == UPL_NULL)
4508 return KERN_INVALID_ARGUMENT;
4509
4510 if (count == 0)
4511 page_list = NULL;
4512
b0d623f7
A
4513 if((isVectorUPL = vector_upl_is_valid(upl))) {
4514 vector_upl = upl;
4515 upl_lock(vector_upl);
4516 }
4517 else
4518 upl_lock(upl);
4519
4520process_upl_to_commit:
4521
4522 if(isVectorUPL) {
4523 size = subupl_size;
4524 offset = subupl_offset;
4525 if(size == 0) {
4526 upl_unlock(vector_upl);
4527 return KERN_SUCCESS;
4528 }
4529 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
4530 if(upl == NULL) {
4531 upl_unlock(vector_upl);
4532 return KERN_FAILURE;
4533 }
4534 page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
4535 subupl_size -= size;
4536 subupl_offset += size;
4537 }
4538
4539#if UPL_DEBUG
4540 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
4541 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
4542
4543 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
4544 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
4545
4546 upl->upl_commit_index++;
4547 }
4548#endif
2d21ac55
A
4549 if (upl->flags & UPL_DEVICE_MEMORY)
4550 xfer_size = 0;
4551 else if ((offset + size) <= upl->size)
4552 xfer_size = size;
b0d623f7
A
4553 else {
4554 if(!isVectorUPL)
4555 upl_unlock(upl);
4556 else {
4557 upl_unlock(vector_upl);
4558 }
2d21ac55 4559 return KERN_FAILURE;
91447636 4560 }
55e303ae
A
4561 if (upl->flags & UPL_CLEAR_DIRTY)
4562 flags |= UPL_COMMIT_CLEAR_DIRTY;
4563
2d21ac55
A
4564 if (upl->flags & UPL_INTERNAL)
4565 lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
4566 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
4567 else
4568 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
1c79356b 4569
2d21ac55
A
4570 object = upl->map_object;
4571
4572 if (upl->flags & UPL_SHADOWED) {
4573 vm_object_lock(object);
4574 shadow_object = object->shadow;
55e303ae 4575 } else {
2d21ac55 4576 shadow_object = object;
55e303ae 4577 }
1c79356b
A
4578 entry = offset/PAGE_SIZE;
4579 target_offset = (vm_object_offset_t)offset;
55e303ae 4580
b0d623f7
A
4581 if (upl->flags & UPL_KERNEL_OBJECT)
4582 vm_object_lock_shared(shadow_object);
4583 else
4584 vm_object_lock(shadow_object);
4a3eedf9 4585
b0d623f7
A
4586 if (upl->flags & UPL_ACCESS_BLOCKED) {
4587 assert(shadow_object->blocked_access);
4588 shadow_object->blocked_access = FALSE;
4589 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
4a3eedf9 4590 }
4a3eedf9 4591
593a1d5f
A
4592 if (shadow_object->code_signed) {
4593 /*
4594 * CODE SIGNING:
4595 * If the object is code-signed, do not let this UPL tell
4596 * us if the pages are valid or not. Let the pages be
4597 * validated by VM the normal way (when they get mapped or
4598 * copied).
4599 */
4600 flags &= ~UPL_COMMIT_CS_VALIDATED;
4601 }
4602 if (! page_list) {
4603 /*
4604 * No page list to get the code-signing info from !?
4605 */
4606 flags &= ~UPL_COMMIT_CS_VALIDATED;
4607 }
4608
b0d623f7
A
4609 dwp = &dw_array[0];
4610 dw_count = 0;
4611
91447636 4612 while (xfer_size) {
2d21ac55
A
4613 vm_page_t t, m;
4614
b0d623f7
A
4615 dwp->dw_mask = 0;
4616 clear_refmod = 0;
4617
55e303ae 4618 m = VM_PAGE_NULL;
d7e50217 4619
55e303ae 4620 if (upl->flags & UPL_LITE) {
b0d623f7 4621 unsigned int pg_num;
55e303ae 4622
b0d623f7
A
4623 pg_num = (unsigned int) (target_offset/PAGE_SIZE);
4624 assert(pg_num == target_offset/PAGE_SIZE);
55e303ae
A
4625
4626 if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
4627 lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
2d21ac55 4628
b0d623f7
A
4629 if (!(upl->flags & UPL_KERNEL_OBJECT))
4630 m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
55e303ae
A
4631 }
4632 }
2d21ac55
A
4633 if (upl->flags & UPL_SHADOWED) {
4634 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
4635
55e303ae
A
4636 t->pageout = FALSE;
4637
b0d623f7 4638 VM_PAGE_FREE(t);
55e303ae 4639
2d21ac55
A
4640 if (m == VM_PAGE_NULL)
4641 m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
55e303ae
A
4642 }
4643 }
b0d623f7 4644 if ((upl->flags & UPL_KERNEL_OBJECT) || m == VM_PAGE_NULL)
593a1d5f 4645 goto commit_next_page;
55e303ae 4646
593a1d5f
A
4647 if (flags & UPL_COMMIT_CS_VALIDATED) {
4648 /*
4649 * CODE SIGNING:
4650 * Set the code signing bits according to
4651 * what the UPL says they should be.
4652 */
4653 m->cs_validated = page_list[entry].cs_validated;
4654 m->cs_tainted = page_list[entry].cs_tainted;
4655 }
4656 if (upl->flags & UPL_IO_WIRE) {
55e303ae 4657
b0d623f7 4658 dwp->dw_mask |= DW_vm_page_unwire;
55e303ae 4659
593a1d5f
A
4660 if (page_list)
4661 page_list[entry].phys_addr = 0;
2d21ac55 4662
593a1d5f
A
4663 if (flags & UPL_COMMIT_SET_DIRTY)
4664 m->dirty = TRUE;
4665 else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
4666 m->dirty = FALSE;
b0d623f7 4667
593a1d5f
A
4668 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
4669 m->cs_validated && !m->cs_tainted) {
4a3eedf9
A
4670 /*
4671 * CODE SIGNING:
4672 * This page is no longer dirty
4673 * but could have been modified,
4674 * so it will need to be
4675 * re-validated.
4676 */
4677 m->cs_validated = FALSE;
b0d623f7 4678#if DEVELOPMENT || DEBUG
4a3eedf9 4679 vm_cs_validated_resets++;
b0d623f7
A
4680#endif
4681 pmap_disconnect(m->phys_page);
4a3eedf9 4682 }
91447636 4683 clear_refmod |= VM_MEM_MODIFIED;
55e303ae 4684 }
b0d623f7
A
4685 if (flags & UPL_COMMIT_INACTIVATE) {
4686 dwp->dw_mask |= DW_vm_page_deactivate_internal;
4687 clear_refmod |= VM_MEM_REFERENCED;
4688 }
4689 if (upl->flags & UPL_ACCESS_BLOCKED) {
593a1d5f
A
4690 /*
4691 * We blocked access to the pages in this UPL.
4692 * Clear the "busy" bit and wake up any waiter
4693 * for this page.
4694 */
b0d623f7 4695 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
593a1d5f
A
4696 }
4697 goto commit_next_page;
4698 }
4699 /*
4700 * make sure to clear the hardware
4701 * modify or reference bits before
4702 * releasing the BUSY bit on this page
4703 * otherwise we risk losing a legitimate
4704 * change of state
4705 */
4706 if (flags & UPL_COMMIT_CLEAR_DIRTY) {
4707 m->dirty = FALSE;
2d21ac55 4708
593a1d5f
A
4709 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
4710 m->cs_validated && !m->cs_tainted) {
4711 /*
4712 * CODE SIGNING:
4713 * This page is no longer dirty
4714 * but could have been modified,
4715 * so it will need to be
4716 * re-validated.
4717 */
4718 m->cs_validated = FALSE;
4719#if DEVELOPMENT || DEBUG
4720 vm_cs_validated_resets++;
4721#endif
b0d623f7 4722 pmap_disconnect(m->phys_page);
55e303ae 4723 }
593a1d5f
A
4724 clear_refmod |= VM_MEM_MODIFIED;
4725 }
593a1d5f
A
4726 if (page_list) {
4727 upl_page_info_t *p;
2d21ac55 4728
593a1d5f 4729 p = &(page_list[entry]);
b0d623f7 4730
593a1d5f
A
4731 if (p->phys_addr && p->pageout && !m->pageout) {
4732 m->busy = TRUE;
4733 m->pageout = TRUE;
b0d623f7
A
4734
4735 dwp->dw_mask |= DW_vm_page_wire;
4736
593a1d5f
A
4737 } else if (p->phys_addr &&
4738 !p->pageout && m->pageout &&
4739 !m->dump_cleaning) {
2d21ac55 4740 m->pageout = FALSE;
593a1d5f
A
4741 m->absent = FALSE;
4742 m->overwriting = FALSE;
b0d623f7
A
4743
4744 dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
593a1d5f
A
4745 }
4746 page_list[entry].phys_addr = 0;
4747 }
4748 m->dump_cleaning = FALSE;
2d21ac55 4749
593a1d5f 4750 if (m->laundry)
b0d623f7 4751 dwp->dw_mask |= DW_vm_pageout_throttle_up;
91447636 4752
593a1d5f
A
4753 if (m->pageout) {
4754 m->cleaning = FALSE;
4755 m->encrypted_cleaning = FALSE;
4756 m->pageout = FALSE;
1c79356b 4757#if MACH_CLUSTER_STATS
593a1d5f 4758 if (m->wanted) vm_pageout_target_collisions++;
1c79356b 4759#endif
2d21ac55 4760 m->dirty = FALSE;
b0d623f7 4761
593a1d5f
A
4762 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
4763 m->cs_validated && !m->cs_tainted) {
4a3eedf9
A
4764 /*
4765 * CODE SIGNING:
4766 * This page is no longer dirty
4767 * but could have been modified,
4768 * so it will need to be
4769 * re-validated.
4770 */
4771 m->cs_validated = FALSE;
593a1d5f 4772#if DEVELOPMENT || DEBUG
4a3eedf9 4773 vm_cs_validated_resets++;
593a1d5f 4774#endif
b0d623f7 4775 pmap_disconnect(m->phys_page);
4a3eedf9 4776 }
b0d623f7
A
4777
4778 if ((flags & UPL_COMMIT_SET_DIRTY) ||
4779 (m->pmapped && (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)))
593a1d5f 4780 m->dirty = TRUE;
b0d623f7 4781
593a1d5f
A
4782 if (m->dirty) {
4783 /*
4784 * page was re-dirtied after we started
4785 * the pageout... reactivate it since
4786 * we don't know whether the on-disk
4787 * copy matches what is now in memory
2d21ac55 4788 */
b0d623f7
A
4789 dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
4790
593a1d5f
A
4791 if (upl->flags & UPL_PAGEOUT) {
4792 CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
4793 VM_STAT_INCR(reactivations);
4794 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
4795 }
593a1d5f
A
4796 } else {
4797 /*
4798 * page has been successfully cleaned
4799 * go ahead and free it for other use
2d21ac55 4800 */
b0d623f7 4801
593a1d5f
A
4802 if (m->object->internal) {
4803 DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
4804 } else {
4805 DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
4806 }
b0d623f7
A
4807 dwp->dw_mask |= DW_vm_page_free;
4808
593a1d5f
A
4809 if (upl->flags & UPL_PAGEOUT) {
4810 CLUSTER_STAT(vm_pageout_target_page_freed++;)
b0d623f7 4811
593a1d5f
A
4812 if (page_list[entry].dirty) {
4813 VM_STAT_INCR(pageouts);
4814 DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
4815 pgpgout_count++;
4816 }
4817 }
de355530 4818 }
593a1d5f
A
4819 goto commit_next_page;
4820 }
4821#if MACH_CLUSTER_STATS
4822 if (m->wpmapped)
4823 m->dirty = pmap_is_modified(m->phys_page);
4824
4825 if (m->dirty) vm_pageout_cluster_dirtied++;
4826 else vm_pageout_cluster_cleaned++;
4827 if (m->wanted) vm_pageout_cluster_collisions++;
4828#endif
4829 m->dirty = FALSE;
91447636 4830
593a1d5f
A
4831 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
4832 m->cs_validated && !m->cs_tainted) {
2d21ac55 4833 /*
593a1d5f
A
4834 * CODE SIGNING:
4835 * This page is no longer dirty
4836 * but could have been modified,
4837 * so it will need to be
4838 * re-validated.
2d21ac55 4839 */
593a1d5f
A
4840 m->cs_validated = FALSE;
4841#if DEVELOPMENT || DEBUG
4842 vm_cs_validated_resets++;
4843#endif
b0d623f7 4844 pmap_disconnect(m->phys_page);
593a1d5f 4845 }
55e303ae 4846
593a1d5f
A
4847 if ((m->busy) && (m->cleaning)) {
4848 /*
4849 * the request_page_list case
4850 */
4851 m->absent = FALSE;
4852 m->overwriting = FALSE;
b0d623f7
A
4853
4854 dwp->dw_mask |= DW_clear_busy;
4855
593a1d5f
A
4856 } else if (m->overwriting) {
4857 /*
4858 * alternate request page list, write to
4859 * page_list case. Occurs when the original
4860 * page was wired at the time of the list
4861 * request
4862 */
b0d623f7 4863 assert(VM_PAGE_WIRED(m));
593a1d5f 4864 m->overwriting = FALSE;
b0d623f7
A
4865
4866 dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
593a1d5f
A
4867 }
4868 m->cleaning = FALSE;
4869 m->encrypted_cleaning = FALSE;
b0d623f7 4870
593a1d5f
A
4871 /*
4872 * It is a part of the semantic of COPYOUT_FROM
4873 * UPLs that a commit implies cache sync
4874 * between the vm page and the backing store
4875 * this can be used to strip the precious bit
4876 * as well as clean
4877 */
b0d623f7 4878 if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS))
593a1d5f 4879 m->precious = FALSE;
b0d623f7 4880
593a1d5f
A
4881 if (flags & UPL_COMMIT_SET_DIRTY)
4882 m->dirty = TRUE;
b0d623f7 4883
593a1d5f 4884 if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && !m->speculative) {
b0d623f7
A
4885 dwp->dw_mask |= DW_vm_page_deactivate_internal;
4886 clear_refmod |= VM_MEM_REFERENCED;
4887
593a1d5f 4888 } else if (!m->active && !m->inactive && !m->speculative) {
b0d623f7
A
4889
4890 if (m->clustered || (flags & UPL_COMMIT_SPECULATE))
4891 dwp->dw_mask |= DW_vm_page_speculate;
593a1d5f 4892 else if (m->reference)
b0d623f7
A
4893 dwp->dw_mask |= DW_vm_page_activate;
4894 else {
4895 dwp->dw_mask |= DW_vm_page_deactivate_internal;
4896 clear_refmod |= VM_MEM_REFERENCED;
4897 }
593a1d5f 4898 }
b0d623f7 4899 if (upl->flags & UPL_ACCESS_BLOCKED) {
2d21ac55 4900 /*
593a1d5f
A
4901 * We blocked access to the pages in this URL.
4902 * Clear the "busy" bit on this page before we
4903 * wake up any waiter.
2d21ac55 4904 */
b0d623f7 4905 dwp->dw_mask |= DW_clear_busy;
1c79356b 4906 }
593a1d5f
A
4907 /*
4908 * Wakeup any thread waiting for the page to be un-cleaning.
4909 */
b0d623f7 4910 dwp->dw_mask |= DW_PAGE_WAKEUP;
593a1d5f 4911
2d21ac55 4912commit_next_page:
b0d623f7
A
4913 if (clear_refmod)
4914 pmap_clear_refmod(m->phys_page, clear_refmod);
4915
1c79356b
A
4916 target_offset += PAGE_SIZE_64;
4917 xfer_size -= PAGE_SIZE;
4918 entry++;
2d21ac55 4919
b0d623f7
A
4920 if (dwp->dw_mask) {
4921 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
4922 if (m->busy == FALSE) {
4923 /*
4924 * dw_do_work may need to drop the object lock
4925 * if it does, we need the pages it's looking at to
4926 * be held stable via the busy bit.
4927 */
4928 m->busy = TRUE;
4929 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
4930 }
4931 dwp->dw_m = m;
4932 dwp++;
4933 dw_count++;
4a3eedf9 4934
b0d623f7
A
4935 if (dw_count >= DELAYED_WORK_LIMIT) {
4936 dw_do_work(shadow_object, &dw_array[0], dw_count);
4937
4938 dwp = &dw_array[0];
4939 dw_count = 0;
4940 }
4941 } else {
4942 if (dwp->dw_mask & DW_clear_busy)
4943 m->busy = FALSE;
4944
4945 if (dwp->dw_mask & DW_PAGE_WAKEUP)
4946 PAGE_WAKEUP(m);
4a3eedf9 4947 }
2d21ac55 4948 }
1c79356b 4949 }
b0d623f7
A
4950 if (dw_count)
4951 dw_do_work(shadow_object, &dw_array[0], dw_count);
55e303ae
A
4952
4953 occupied = 1;
4954
4955 if (upl->flags & UPL_DEVICE_MEMORY) {
4956 occupied = 0;
4957 } else if (upl->flags & UPL_LITE) {
4958 int pg_num;
4959 int i;
2d21ac55 4960
55e303ae
A
4961 pg_num = upl->size/PAGE_SIZE;
4962 pg_num = (pg_num + 31) >> 5;
4963 occupied = 0;
2d21ac55
A
4964
4965 for (i = 0; i < pg_num; i++) {
4966 if (lite_list[i] != 0) {
55e303ae
A
4967 occupied = 1;
4968 break;
4969 }
4970 }
4971 } else {
2d21ac55 4972 if (queue_empty(&upl->map_object->memq))
55e303ae 4973 occupied = 0;
55e303ae 4974 }
2d21ac55 4975 if (occupied == 0) {
b0d623f7
A
4976 /*
4977 * If this UPL element belongs to a Vector UPL and is
4978 * empty, then this is the right function to deallocate
4979 * it. So go ahead set the *empty variable. The flag
4980 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
4981 * should be considered relevant for the Vector UPL and not
4982 * the internal UPLs.
4983 */
4984 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
0b4e3aa0 4985 *empty = TRUE;
2d21ac55 4986
b0d623f7 4987 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
2d21ac55
A
4988 /*
4989 * this is not a paging object
4990 * so we need to drop the paging reference
4991 * that was taken when we created the UPL
4992 * against this object
4993 */
b0d623f7 4994 vm_object_activity_end(shadow_object);
2d21ac55
A
4995 } else {
4996 /*
4997 * we dontated the paging reference to
4998 * the map object... vm_pageout_object_terminate
4999 * will drop this reference
5000 */
5001 }
1c79356b 5002 }
55e303ae 5003 vm_object_unlock(shadow_object);
91447636
A
5004 if (object != shadow_object)
5005 vm_object_unlock(object);
b0d623f7
A
5006
5007 if(!isVectorUPL)
5008 upl_unlock(upl);
5009 else {
5010 /*
5011 * If we completed our operations on an UPL that is
5012 * part of a Vectored UPL and if empty is TRUE, then
5013 * we should go ahead and deallocate this UPL element.
5014 * Then we check if this was the last of the UPL elements
5015 * within that Vectored UPL. If so, set empty to TRUE
5016 * so that in ubc_upl_commit_range or ubc_upl_commit, we
5017 * can go ahead and deallocate the Vector UPL too.
5018 */
5019 if(*empty==TRUE) {
5020 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
5021 upl_deallocate(upl);
5022 }
5023 goto process_upl_to_commit;
5024 }
0b4e3aa0 5025
2d21ac55
A
5026 if (pgpgout_count) {
5027 DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
5028 }
5029
1c79356b
A
5030 return KERN_SUCCESS;
5031}
5032
0b4e3aa0
A
5033kern_return_t
5034upl_abort_range(
1c79356b 5035 upl_t upl,
91447636
A
5036 upl_offset_t offset,
5037 upl_size_t size,
0b4e3aa0
A
5038 int error,
5039 boolean_t *empty)
1c79356b 5040{
b0d623f7 5041 upl_size_t xfer_size, subupl_size = size;
55e303ae 5042 vm_object_t shadow_object;
2d21ac55 5043 vm_object_t object;
1c79356b 5044 vm_object_offset_t target_offset;
b0d623f7 5045 upl_offset_t subupl_offset = offset;
1c79356b 5046 int entry;
55e303ae
A
5047 wpl_array_t lite_list;
5048 int occupied;
b0d623f7
A
5049 struct dw dw_array[DELAYED_WORK_LIMIT];
5050 struct dw *dwp;
5051 int dw_count, isVectorUPL = 0;
5052 upl_t vector_upl = NULL;
1c79356b 5053
0b4e3aa0
A
5054 *empty = FALSE;
5055
5056 if (upl == UPL_NULL)
5057 return KERN_INVALID_ARGUMENT;
5058
2d21ac55
A
5059 if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
5060 return upl_commit_range(upl, offset, size, 0, NULL, 0, empty);
55e303ae 5061
b0d623f7
A
5062 if((isVectorUPL = vector_upl_is_valid(upl))) {
5063 vector_upl = upl;
5064 upl_lock(vector_upl);
5065 }
5066 else
5067 upl_lock(upl);
5068
5069process_upl_to_abort:
5070 if(isVectorUPL) {
5071 size = subupl_size;
5072 offset = subupl_offset;
5073 if(size == 0) {
5074 upl_unlock(vector_upl);
5075 return KERN_SUCCESS;
5076 }
5077 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
5078 if(upl == NULL) {
5079 upl_unlock(vector_upl);
5080 return KERN_FAILURE;
5081 }
5082 subupl_size -= size;
5083 subupl_offset += size;
5084 }
5085
5086 *empty = FALSE;
5087
5088#if UPL_DEBUG
5089 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
5090 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
5091
5092 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
5093 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
5094 upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
5095
5096 upl->upl_commit_index++;
5097 }
5098#endif
2d21ac55 5099 if (upl->flags & UPL_DEVICE_MEMORY)
1c79356b 5100 xfer_size = 0;
2d21ac55
A
5101 else if ((offset + size) <= upl->size)
5102 xfer_size = size;
b0d623f7
A
5103 else {
5104 if(!isVectorUPL)
5105 upl_unlock(upl);
5106 else {
5107 upl_unlock(vector_upl);
5108 }
55e303ae 5109
b0d623f7
A
5110 return KERN_FAILURE;
5111 }
2d21ac55 5112 if (upl->flags & UPL_INTERNAL) {
55e303ae 5113 lite_list = (wpl_array_t)
91447636 5114 ((((uintptr_t)upl) + sizeof(struct upl))
55e303ae
A
5115 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
5116 } else {
5117 lite_list = (wpl_array_t)
91447636 5118 (((uintptr_t)upl) + sizeof(struct upl));
55e303ae 5119 }
2d21ac55
A
5120 object = upl->map_object;
5121
5122 if (upl->flags & UPL_SHADOWED) {
5123 vm_object_lock(object);
5124 shadow_object = object->shadow;
5125 } else
5126 shadow_object = object;
5127
1c79356b
A
5128 entry = offset/PAGE_SIZE;
5129 target_offset = (vm_object_offset_t)offset;
2d21ac55 5130
b0d623f7
A
5131 if (upl->flags & UPL_KERNEL_OBJECT)
5132 vm_object_lock_shared(shadow_object);
5133 else
5134 vm_object_lock(shadow_object);
4a3eedf9 5135
b0d623f7
A
5136 if (upl->flags & UPL_ACCESS_BLOCKED) {
5137 assert(shadow_object->blocked_access);
5138 shadow_object->blocked_access = FALSE;
5139 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
4a3eedf9 5140 }
b0d623f7
A
5141
5142 dwp = &dw_array[0];
5143 dw_count = 0;
5144
5145 if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT))
5146 panic("upl_abort_range: kernel_object being DUMPED");
4a3eedf9 5147
2d21ac55
A
5148 while (xfer_size) {
5149 vm_page_t t, m;
5150
b0d623f7
A
5151 dwp->dw_mask = 0;
5152
55e303ae 5153 m = VM_PAGE_NULL;
2d21ac55
A
5154
5155 if (upl->flags & UPL_LITE) {
b0d623f7
A
5156 unsigned int pg_num;
5157
5158 pg_num = (unsigned int) (target_offset/PAGE_SIZE);
5159 assert(pg_num == target_offset/PAGE_SIZE);
5160
2d21ac55
A
5161
5162 if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
55e303ae 5163 lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
2d21ac55 5164
b0d623f7
A
5165 if ( !(upl->flags & UPL_KERNEL_OBJECT))
5166 m = vm_page_lookup(shadow_object, target_offset +
5167 (upl->offset - shadow_object->paging_offset));
55e303ae
A
5168 }
5169 }
2d21ac55
A
5170 if (upl->flags & UPL_SHADOWED) {
5171 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
5172 t->pageout = FALSE;
5173
b0d623f7 5174 VM_PAGE_FREE(t);
2d21ac55
A
5175
5176 if (m == VM_PAGE_NULL)
5177 m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
55e303ae
A
5178 }
5179 }
b0d623f7
A
5180 if ((upl->flags & UPL_KERNEL_OBJECT))
5181 goto abort_next_page;
5182
2d21ac55
A
5183 if (m != VM_PAGE_NULL) {
5184
5185 if (m->absent) {
91447636
A
5186 boolean_t must_free = TRUE;
5187
2d21ac55
A
5188 m->clustered = FALSE;
5189 /*
5190 * COPYOUT = FALSE case
5191 * check for error conditions which must
5192 * be passed back to the pages customer
5193 */
5194 if (error & UPL_ABORT_RESTART) {
1c79356b
A
5195 m->restart = TRUE;
5196 m->absent = FALSE;
2d21ac55 5197 m->unusual = TRUE;
91447636 5198 must_free = FALSE;
2d21ac55 5199 } else if (error & UPL_ABORT_UNAVAILABLE) {
1c79356b
A
5200 m->restart = FALSE;
5201 m->unusual = TRUE;
91447636 5202 must_free = FALSE;
2d21ac55 5203 } else if (error & UPL_ABORT_ERROR) {
1c79356b
A
5204 m->restart = FALSE;
5205 m->absent = FALSE;
1c79356b 5206 m->error = TRUE;
2d21ac55 5207 m->unusual = TRUE;
91447636 5208 must_free = FALSE;
1c79356b 5209 }
91447636
A
5210
5211 /*
5212 * ENCRYPTED SWAP:
5213 * If the page was already encrypted,
5214 * we don't really need to decrypt it
5215 * now. It will get decrypted later,
5216 * on demand, as soon as someone needs
5217 * to access its contents.
5218 */
1c79356b
A
5219
5220 m->cleaning = FALSE;
2d21ac55 5221 m->encrypted_cleaning = FALSE;
1c79356b 5222 m->overwriting = FALSE;
b0d623f7
A
5223
5224 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
91447636 5225
2d21ac55 5226 if (must_free == TRUE)
b0d623f7 5227 dwp->dw_mask |= DW_vm_page_free;
2d21ac55 5228 else
b0d623f7 5229 dwp->dw_mask |= DW_vm_page_activate;
2d21ac55
A
5230 } else {
5231 /*
5232 * Handle the trusted pager throttle.
5233 */
5234 if (m->laundry)
b0d623f7 5235 dwp->dw_mask |= DW_vm_pageout_throttle_up;
2d21ac55
A
5236
5237 if (m->pageout) {
5238 assert(m->busy);
5239 assert(m->wire_count == 1);
5240 m->pageout = FALSE;
b0d623f7
A
5241
5242 dwp->dw_mask |= DW_vm_page_unwire;
1c79356b 5243 }
2d21ac55
A
5244 m->dump_cleaning = FALSE;
5245 m->cleaning = FALSE;
5246 m->encrypted_cleaning = FALSE;
5247 m->overwriting = FALSE;
1c79356b 5248#if MACH_PAGEMAP
2d21ac55 5249 vm_external_state_clr(m->object->existence_map, m->offset);
1c79356b 5250#endif /* MACH_PAGEMAP */
2d21ac55
A
5251 if (error & UPL_ABORT_DUMP_PAGES) {
5252 pmap_disconnect(m->phys_page);
b0d623f7
A
5253
5254 dwp->dw_mask |= DW_vm_page_free;
2d21ac55
A
5255 } else {
5256 if (error & UPL_ABORT_REFERENCE) {
5257 /*
5258 * we've been told to explictly
5259 * reference this page... for
5260 * file I/O, this is done by
5261 * implementing an LRU on the inactive q
5262 */
b0d623f7 5263 dwp->dw_mask |= DW_vm_page_lru;
2d21ac55 5264 }
b0d623f7 5265 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
2d21ac55 5266 }
1c79356b 5267 }
2d21ac55 5268 }
b0d623f7 5269abort_next_page:
55e303ae
A
5270 target_offset += PAGE_SIZE_64;
5271 xfer_size -= PAGE_SIZE;
5272 entry++;
b0d623f7
A
5273
5274 if (dwp->dw_mask) {
5275 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
5276 if (m->busy == FALSE) {
5277 /*
5278 * dw_do_work may need to drop the object lock
5279 * if it does, we need the pages it's looking at to
5280 * be held stable via the busy bit.
5281 */
5282 m->busy = TRUE;
5283 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
5284 }
5285 dwp->dw_m = m;
5286 dwp++;
5287 dw_count++;
5288
5289 if (dw_count >= DELAYED_WORK_LIMIT) {
5290 dw_do_work(shadow_object, &dw_array[0], dw_count);
5291
5292 dwp = &dw_array[0];
5293 dw_count = 0;
5294 }
5295 } else {
5296 if (dwp->dw_mask & DW_clear_busy)
5297 m->busy = FALSE;
5298
5299 if (dwp->dw_mask & DW_PAGE_WAKEUP)
5300 PAGE_WAKEUP(m);
5301 }
5302 }
d7e50217 5303 }
b0d623f7
A
5304 if (dw_count)
5305 dw_do_work(shadow_object, &dw_array[0], dw_count);
2d21ac55 5306
55e303ae 5307 occupied = 1;
2d21ac55 5308
55e303ae
A
5309 if (upl->flags & UPL_DEVICE_MEMORY) {
5310 occupied = 0;
5311 } else if (upl->flags & UPL_LITE) {
5312 int pg_num;
5313 int i;
2d21ac55 5314
55e303ae
A
5315 pg_num = upl->size/PAGE_SIZE;
5316 pg_num = (pg_num + 31) >> 5;
5317 occupied = 0;
2d21ac55
A
5318
5319 for (i = 0; i < pg_num; i++) {
5320 if (lite_list[i] != 0) {
55e303ae
A
5321 occupied = 1;
5322 break;
5323 }
5324 }
5325 } else {
2d21ac55 5326 if (queue_empty(&upl->map_object->memq))
55e303ae 5327 occupied = 0;
55e303ae 5328 }
2d21ac55 5329 if (occupied == 0) {
b0d623f7
A
5330 /*
5331 * If this UPL element belongs to a Vector UPL and is
5332 * empty, then this is the right function to deallocate
5333 * it. So go ahead set the *empty variable. The flag
5334 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
5335 * should be considered relevant for the Vector UPL and
5336 * not the internal UPLs.
5337 */
5338 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
0b4e3aa0 5339 *empty = TRUE;
2d21ac55 5340
b0d623f7 5341 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
2d21ac55
A
5342 /*
5343 * this is not a paging object
5344 * so we need to drop the paging reference
5345 * that was taken when we created the UPL
5346 * against this object
5347 */
b0d623f7 5348 vm_object_activity_end(shadow_object);
2d21ac55
A
5349 } else {
5350 /*
5351 * we dontated the paging reference to
5352 * the map object... vm_pageout_object_terminate
5353 * will drop this reference
5354 */
5355 }
1c79356b 5356 }
55e303ae 5357 vm_object_unlock(shadow_object);
91447636
A
5358 if (object != shadow_object)
5359 vm_object_unlock(object);
b0d623f7
A
5360
5361 if(!isVectorUPL)
5362 upl_unlock(upl);
5363 else {
5364 /*
5365 * If we completed our operations on an UPL that is
5366 * part of a Vectored UPL and if empty is TRUE, then
5367 * we should go ahead and deallocate this UPL element.
5368 * Then we check if this was the last of the UPL elements
5369 * within that Vectored UPL. If so, set empty to TRUE
5370 * so that in ubc_upl_abort_range or ubc_upl_abort, we
5371 * can go ahead and deallocate the Vector UPL too.
5372 */
5373 if(*empty == TRUE) {
5374 *empty = vector_upl_set_subupl(vector_upl, upl,0);
5375 upl_deallocate(upl);
5376 }
5377 goto process_upl_to_abort;
5378 }
55e303ae 5379
1c79356b
A
5380 return KERN_SUCCESS;
5381}
5382
2d21ac55 5383
1c79356b 5384kern_return_t
0b4e3aa0 5385upl_abort(
1c79356b
A
5386 upl_t upl,
5387 int error)
2d21ac55
A
5388{
5389 boolean_t empty;
5390
5391 return upl_abort_range(upl, 0, upl->size, error, &empty);
1c79356b
A
5392}
5393
55e303ae 5394
2d21ac55
A
5395/* an option on commit should be wire */
5396kern_return_t
5397upl_commit(
5398 upl_t upl,
5399 upl_page_info_t *page_list,
5400 mach_msg_type_number_t count)
5401{
5402 boolean_t empty;
5403
5404 return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
5405}
5406
55e303ae 5407
b0d623f7
A
5408unsigned int vm_object_iopl_request_sleep_for_cleaning = 0;
5409
55e303ae
A
5410kern_return_t
5411vm_object_iopl_request(
5412 vm_object_t object,
5413 vm_object_offset_t offset,
91447636 5414 upl_size_t size,
55e303ae
A
5415 upl_t *upl_ptr,
5416 upl_page_info_array_t user_page_list,
5417 unsigned int *page_list_count,
5418 int cntrl_flags)
5419{
5420 vm_page_t dst_page;
2d21ac55
A
5421 vm_object_offset_t dst_offset;
5422 upl_size_t xfer_size;
55e303ae 5423 upl_t upl = NULL;
91447636
A
5424 unsigned int entry;
5425 wpl_array_t lite_list = NULL;
91447636 5426 int no_zero_fill = FALSE;
2d21ac55 5427 u_int32_t psize;
55e303ae
A
5428 kern_return_t ret;
5429 vm_prot_t prot;
2d21ac55 5430 struct vm_object_fault_info fault_info;
b0d623f7
A
5431 struct dw dw_array[DELAYED_WORK_LIMIT];
5432 struct dw *dwp;
5433 int dw_count;
5434 int dw_index;
55e303ae 5435
91447636
A
5436 if (cntrl_flags & ~UPL_VALID_FLAGS) {
5437 /*
5438 * For forward compatibility's sake,
5439 * reject any unknown flag.
5440 */
5441 return KERN_INVALID_VALUE;
5442 }
0c530ab8
A
5443 if (vm_lopage_poolsize == 0)
5444 cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
5445
5446 if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
5447 if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE))
5448 return KERN_INVALID_VALUE;
5449
5450 if (object->phys_contiguous) {
5451 if ((offset + object->shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
5452 return KERN_INVALID_ADDRESS;
2d21ac55
A
5453
5454 if (((offset + object->shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
0c530ab8
A
5455 return KERN_INVALID_ADDRESS;
5456 }
5457 }
91447636
A
5458
5459 if (cntrl_flags & UPL_ENCRYPT) {
5460 /*
5461 * ENCRYPTED SWAP:
5462 * The paging path doesn't use this interface,
5463 * so we don't support the UPL_ENCRYPT flag
5464 * here. We won't encrypt the pages.
5465 */
5466 assert(! (cntrl_flags & UPL_ENCRYPT));
5467 }
91447636
A
5468 if (cntrl_flags & UPL_NOZEROFILL)
5469 no_zero_fill = TRUE;
5470
5471 if (cntrl_flags & UPL_COPYOUT_FROM)
55e303ae 5472 prot = VM_PROT_READ;
91447636 5473 else
55e303ae 5474 prot = VM_PROT_READ | VM_PROT_WRITE;
55e303ae 5475
b0d623f7
A
5476 if (((size/PAGE_SIZE) > MAX_UPL_SIZE) && !object->phys_contiguous)
5477 size = MAX_UPL_SIZE * PAGE_SIZE;
55e303ae 5478
2d21ac55
A
5479 if (cntrl_flags & UPL_SET_INTERNAL) {
5480 if (page_list_count != NULL)
cf7d32b8 5481 *page_list_count = MAX_UPL_SIZE;
2d21ac55
A
5482 }
5483 if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
5484 ((page_list_count != NULL) && (*page_list_count != 0) && *page_list_count < (size/page_size)))
5485 return KERN_INVALID_ARGUMENT;
55e303ae 5486
2d21ac55
A
5487 if ((!object->internal) && (object->paging_offset != 0))
5488 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
5489
5490
5491 if (object->phys_contiguous)
5492 psize = PAGE_SIZE;
5493 else
5494 psize = size;
5495
5496 if (cntrl_flags & UPL_SET_INTERNAL) {
5497 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, UPL_IO_WIRE, psize);
5498
5499 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
5500 lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
5501 ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
b0d623f7
A
5502 if (size == 0) {
5503 user_page_list = NULL;
5504 lite_list = NULL;
5505 }
2d21ac55
A
5506 } else {
5507 upl = upl_create(UPL_CREATE_LITE, UPL_IO_WIRE, psize);
55e303ae 5508
2d21ac55 5509 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
b0d623f7
A
5510 if (size == 0) {
5511 lite_list = NULL;
5512 }
55e303ae 5513 }
2d21ac55
A
5514 if (user_page_list)
5515 user_page_list[0].device = FALSE;
5516 *upl_ptr = upl;
55e303ae 5517
2d21ac55
A
5518 upl->map_object = object;
5519 upl->size = size;
5520
b0d623f7
A
5521 if (object == kernel_object &&
5522 !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
5523 upl->flags |= UPL_KERNEL_OBJECT;
5524#if UPL_DEBUG
5525 vm_object_lock(object);
5526#else
5527 vm_object_lock_shared(object);
5528#endif
5529 } else {
5530 vm_object_lock(object);
5531 vm_object_activity_begin(object);
5532 }
2d21ac55
A
5533 /*
5534 * paging in progress also protects the paging_offset
5535 */
5536 upl->offset = offset + object->paging_offset;
55e303ae 5537
b0d623f7
A
5538 if (cntrl_flags & UPL_BLOCK_ACCESS) {
5539 /*
5540 * The user requested that access to the pages in this URL
5541 * be blocked until the UPL is commited or aborted.
5542 */
5543 upl->flags |= UPL_ACCESS_BLOCKED;
5544 }
5545
2d21ac55 5546 if (object->phys_contiguous) {
b0d623f7 5547#if UPL_DEBUG
2d21ac55
A
5548 queue_enter(&object->uplq, upl, upl_t, uplq);
5549#endif /* UPL_DEBUG */
55e303ae 5550
b0d623f7
A
5551 if (upl->flags & UPL_ACCESS_BLOCKED) {
5552 assert(!object->blocked_access);
5553 object->blocked_access = TRUE;
5554 }
5555
2d21ac55 5556 vm_object_unlock(object);
55e303ae 5557
2d21ac55
A
5558 /*
5559 * don't need any shadow mappings for this one
5560 * since it is already I/O memory
5561 */
5562 upl->flags |= UPL_DEVICE_MEMORY;
55e303ae 5563
b0d623f7 5564 upl->highest_page = (ppnum_t) ((offset + object->shadow_offset + size - 1)>>PAGE_SHIFT);
2d21ac55
A
5565
5566 if (user_page_list) {
b0d623f7 5567 user_page_list[0].phys_addr = (ppnum_t) ((offset + object->shadow_offset)>>PAGE_SHIFT);
2d21ac55 5568 user_page_list[0].device = TRUE;
55e303ae 5569 }
2d21ac55
A
5570 if (page_list_count != NULL) {
5571 if (upl->flags & UPL_INTERNAL)
5572 *page_list_count = 0;
5573 else
5574 *page_list_count = 1;
55e303ae 5575 }
2d21ac55 5576 return KERN_SUCCESS;
55e303ae 5577 }
b0d623f7
A
5578 if (object != kernel_object) {
5579 /*
5580 * Protect user space from future COW operations
5581 */
5582 object->true_share = TRUE;
55e303ae 5583
b0d623f7
A
5584 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
5585 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
5586 }
55e303ae 5587
b0d623f7 5588#if UPL_DEBUG
2d21ac55 5589 queue_enter(&object->uplq, upl, upl_t, uplq);
91447636 5590#endif /* UPL_DEBUG */
91447636 5591
b0d623f7
A
5592 if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
5593 object->copy != VM_OBJECT_NULL) {
91447636 5594 /*
b0d623f7
A
5595 * Honor copy-on-write obligations
5596 *
5597 * The caller is gathering these pages and
5598 * might modify their contents. We need to
5599 * make sure that the copy object has its own
5600 * private copies of these pages before we let
5601 * the caller modify them.
5602 *
5603 * NOTE: someone else could map the original object
5604 * after we've done this copy-on-write here, and they
5605 * could then see an inconsistent picture of the memory
5606 * while it's being modified via the UPL. To prevent this,
5607 * we would have to block access to these pages until the
5608 * UPL is released. We could use the UPL_BLOCK_ACCESS
5609 * code path for that...
91447636 5610 */
b0d623f7
A
5611 vm_object_update(object,
5612 offset,
5613 size,
5614 NULL,
5615 NULL,
5616 FALSE, /* should_return */
5617 MEMORY_OBJECT_COPY_SYNC,
5618 VM_PROT_NO_CHANGE);
5619#if DEVELOPMENT || DEBUG
5620 iopl_cow++;
5621 iopl_cow_pages += size >> PAGE_SHIFT;
5622#endif
55e303ae 5623 }
b0d623f7
A
5624
5625
55e303ae 5626 entry = 0;
2d21ac55
A
5627
5628 xfer_size = size;
5629 dst_offset = offset;
5630
5631 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
5632 fault_info.user_tag = 0;
5633 fault_info.lo_offset = offset;
5634 fault_info.hi_offset = offset + xfer_size;
5635 fault_info.no_cache = FALSE;
b0d623f7
A
5636 fault_info.stealth = FALSE;
5637
5638 dwp = &dw_array[0];
5639 dw_count = 0;
2d21ac55 5640
55e303ae 5641 while (xfer_size) {
2d21ac55 5642 vm_fault_return_t result;
b0d623f7
A
5643 unsigned int pg_num;
5644
5645 dwp->dw_mask = 0;
2d21ac55 5646
55e303ae
A
5647 dst_page = vm_page_lookup(object, dst_offset);
5648
91447636
A
5649 /*
5650 * ENCRYPTED SWAP:
5651 * If the page is encrypted, we need to decrypt it,
5652 * so force a soft page fault.
5653 */
b0d623f7
A
5654 if (dst_page == VM_PAGE_NULL ||
5655 dst_page->busy ||
5656 dst_page->encrypted ||
5657 dst_page->error ||
5658 dst_page->restart ||
5659 dst_page->absent ||
5660 dst_page->fictitious) {
5661
5662 if (object == kernel_object)
5663 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
2d21ac55 5664
55e303ae
A
5665 do {
5666 vm_page_t top_page;
5667 kern_return_t error_code;
5668 int interruptible;
5669
2d21ac55 5670 if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
55e303ae 5671 interruptible = THREAD_ABORTSAFE;
2d21ac55 5672 else
55e303ae 5673 interruptible = THREAD_UNINT;
2d21ac55
A
5674
5675 fault_info.interruptible = interruptible;
5676 fault_info.cluster_size = xfer_size;
55e303ae 5677
b0d623f7
A
5678 vm_object_paging_begin(object);
5679
55e303ae 5680 result = vm_fault_page(object, dst_offset,
2d21ac55
A
5681 prot | VM_PROT_WRITE, FALSE,
5682 &prot, &dst_page, &top_page,
5683 (int *)0,
5684 &error_code, no_zero_fill,
5685 FALSE, &fault_info);
5686
5687 switch (result) {
5688
55e303ae
A
5689 case VM_FAULT_SUCCESS:
5690
5691 PAGE_WAKEUP_DONE(dst_page);
55e303ae
A
5692 /*
5693 * Release paging references and
5694 * top-level placeholder page, if any.
5695 */
2d21ac55 5696 if (top_page != VM_PAGE_NULL) {
55e303ae 5697 vm_object_t local_object;
2d21ac55
A
5698
5699 local_object = top_page->object;
5700
5701 if (top_page->object != dst_page->object) {
5702 vm_object_lock(local_object);
55e303ae 5703 VM_PAGE_FREE(top_page);
2d21ac55
A
5704 vm_object_paging_end(local_object);
5705 vm_object_unlock(local_object);
55e303ae
A
5706 } else {
5707 VM_PAGE_FREE(top_page);
2d21ac55 5708 vm_object_paging_end(local_object);
55e303ae
A
5709 }
5710 }
b0d623f7 5711 vm_object_paging_end(object);
55e303ae
A
5712 break;
5713
55e303ae
A
5714 case VM_FAULT_RETRY:
5715 vm_object_lock(object);
55e303ae
A
5716 break;
5717
5718 case VM_FAULT_FICTITIOUS_SHORTAGE:
5719 vm_page_more_fictitious();
2d21ac55 5720
55e303ae 5721 vm_object_lock(object);
55e303ae
A
5722 break;
5723
5724 case VM_FAULT_MEMORY_SHORTAGE:
5725 if (vm_page_wait(interruptible)) {
5726 vm_object_lock(object);
55e303ae
A
5727 break;
5728 }
5729 /* fall thru */
5730
5731 case VM_FAULT_INTERRUPTED:
5732 error_code = MACH_SEND_INTERRUPTED;
5733 case VM_FAULT_MEMORY_ERROR:
b0d623f7 5734 memory_error:
2d21ac55 5735 ret = (error_code ? error_code: KERN_MEMORY_ERROR);
0c530ab8 5736
2d21ac55 5737 vm_object_lock(object);
0c530ab8 5738 goto return_err;
b0d623f7
A
5739
5740 case VM_FAULT_SUCCESS_NO_VM_PAGE:
5741 /* success but no page: fail */
5742 vm_object_paging_end(object);
5743 vm_object_unlock(object);
5744 goto memory_error;
5745
5746 default:
5747 panic("vm_object_iopl_request: unexpected error"
5748 " 0x%x from vm_fault_page()\n", result);
55e303ae 5749 }
2d21ac55 5750 } while (result != VM_FAULT_SUCCESS);
b0d623f7 5751
55e303ae 5752 }
0c530ab8 5753
b0d623f7
A
5754 if (upl->flags & UPL_KERNEL_OBJECT)
5755 goto record_phys_addr;
5756
5757 if (dst_page->cleaning) {
5758 /*
5759 * Someone else is cleaning this page in place.as
5760 * In theory, we should be able to proceed and use this
5761 * page but they'll probably end up clearing the "busy"
5762 * bit on it in upl_commit_range() but they didn't set
5763 * it, so they would clear our "busy" bit and open
5764 * us to race conditions.
5765 * We'd better wait for the cleaning to complete and
5766 * then try again.
5767 */
5768 vm_object_iopl_request_sleep_for_cleaning++;
5769 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
5770 continue;
5771 }
0c530ab8
A
5772 if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
5773 dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
5774 vm_page_t low_page;
5775 int refmod;
5776
5777 /*
5778 * support devices that can't DMA above 32 bits
5779 * by substituting pages from a pool of low address
5780 * memory for any pages we find above the 4G mark
5781 * can't substitute if the page is already wired because
5782 * we don't know whether that physical address has been
5783 * handed out to some other 64 bit capable DMA device to use
5784 */
b0d623f7 5785 if (VM_PAGE_WIRED(dst_page)) {
0c530ab8
A
5786 ret = KERN_PROTECTION_FAILURE;
5787 goto return_err;
5788 }
0c530ab8
A
5789 low_page = vm_page_grablo();
5790
5791 if (low_page == VM_PAGE_NULL) {
5792 ret = KERN_RESOURCE_SHORTAGE;
5793 goto return_err;
5794 }
5795 /*
5796 * from here until the vm_page_replace completes
5797 * we musn't drop the object lock... we don't
5798 * want anyone refaulting this page in and using
5799 * it after we disconnect it... we want the fault
5800 * to find the new page being substituted.
5801 */
2d21ac55
A
5802 if (dst_page->pmapped)
5803 refmod = pmap_disconnect(dst_page->phys_page);
5804 else
5805 refmod = 0;
0c530ab8 5806 vm_page_copy(dst_page, low_page);
2d21ac55 5807
0c530ab8
A
5808 low_page->reference = dst_page->reference;
5809 low_page->dirty = dst_page->dirty;
5810
5811 if (refmod & VM_MEM_REFERENCED)
5812 low_page->reference = TRUE;
5813 if (refmod & VM_MEM_MODIFIED)
5814 low_page->dirty = TRUE;
5815
0c530ab8 5816 vm_page_replace(low_page, object, dst_offset);
0c530ab8
A
5817
5818 dst_page = low_page;
5819 /*
5820 * vm_page_grablo returned the page marked
5821 * BUSY... we don't need a PAGE_WAKEUP_DONE
5822 * here, because we've never dropped the object lock
5823 */
5824 dst_page->busy = FALSE;
5825 }
b0d623f7 5826 dwp->dw_mask |= DW_vm_page_wire;
55e303ae 5827
91447636
A
5828 if (cntrl_flags & UPL_BLOCK_ACCESS) {
5829 /*
5830 * Mark the page "busy" to block any future page fault
5831 * on this page. We'll also remove the mapping
5832 * of all these pages before leaving this routine.
5833 */
5834 assert(!dst_page->fictitious);
5835 dst_page->busy = TRUE;
5836 }
2d21ac55
A
5837 /*
5838 * expect the page to be used
5839 * page queues lock must be held to set 'reference'
5840 */
b0d623f7 5841 dwp->dw_mask |= DW_set_reference;
55e303ae 5842
2d21ac55
A
5843 if (!(cntrl_flags & UPL_COPYOUT_FROM))
5844 dst_page->dirty = TRUE;
b0d623f7
A
5845record_phys_addr:
5846 pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
5847 assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
5848 lite_list[pg_num>>5] |= 1 << (pg_num & 31);
55e303ae 5849
2d21ac55
A
5850 if (dst_page->phys_page > upl->highest_page)
5851 upl->highest_page = dst_page->phys_page;
55e303ae 5852
2d21ac55
A
5853 if (user_page_list) {
5854 user_page_list[entry].phys_addr = dst_page->phys_page;
2d21ac55
A
5855 user_page_list[entry].pageout = dst_page->pageout;
5856 user_page_list[entry].absent = dst_page->absent;
593a1d5f 5857 user_page_list[entry].dirty = dst_page->dirty;
2d21ac55 5858 user_page_list[entry].precious = dst_page->precious;
593a1d5f 5859 user_page_list[entry].device = FALSE;
2d21ac55
A
5860 if (dst_page->clustered == TRUE)
5861 user_page_list[entry].speculative = dst_page->speculative;
5862 else
5863 user_page_list[entry].speculative = FALSE;
593a1d5f
A
5864 user_page_list[entry].cs_validated = dst_page->cs_validated;
5865 user_page_list[entry].cs_tainted = dst_page->cs_tainted;
55e303ae 5866 }
b0d623f7
A
5867 if (object != kernel_object) {
5868 /*
5869 * someone is explicitly grabbing this page...
5870 * update clustered and speculative state
5871 *
5872 */
5873 VM_PAGE_CONSUME_CLUSTERED(dst_page);
55e303ae
A
5874 }
5875 entry++;
5876 dst_offset += PAGE_SIZE_64;
5877 xfer_size -= PAGE_SIZE;
b0d623f7
A
5878
5879 if (dwp->dw_mask) {
5880 if (dst_page->busy == FALSE) {
5881 /*
5882 * dw_do_work may need to drop the object lock
5883 * if it does, we need the pages it's looking at to
5884 * be held stable via the busy bit.
5885 */
5886 dst_page->busy = TRUE;
5887 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
5888 }
5889 dwp->dw_m = dst_page;
5890 dwp++;
5891 dw_count++;
5892
5893 if (dw_count >= DELAYED_WORK_LIMIT) {
5894 dw_do_work(object, &dw_array[0], dw_count);
5895
5896 dwp = &dw_array[0];
5897 dw_count = 0;
5898 }
5899 }
55e303ae 5900 }
b0d623f7
A
5901 if (dw_count)
5902 dw_do_work(object, &dw_array[0], dw_count);
55e303ae 5903
2d21ac55
A
5904 if (page_list_count != NULL) {
5905 if (upl->flags & UPL_INTERNAL)
55e303ae 5906 *page_list_count = 0;
2d21ac55 5907 else if (*page_list_count > entry)
55e303ae
A
5908 *page_list_count = entry;
5909 }
55e303ae 5910 vm_object_unlock(object);
55e303ae 5911
91447636
A
5912 if (cntrl_flags & UPL_BLOCK_ACCESS) {
5913 /*
5914 * We've marked all the pages "busy" so that future
5915 * page faults will block.
5916 * Now remove the mapping for these pages, so that they
5917 * can't be accessed without causing a page fault.
5918 */
5919 vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
5920 PMAP_NULL, 0, VM_PROT_NONE);
b0d623f7
A
5921 assert(!object->blocked_access);
5922 object->blocked_access = TRUE;
91447636 5923 }
91447636 5924 return KERN_SUCCESS;
0c530ab8 5925
0c530ab8 5926return_err:
b0d623f7 5927 dw_index = 0;
0c530ab8
A
5928
5929 for (; offset < dst_offset; offset += PAGE_SIZE) {
5930 dst_page = vm_page_lookup(object, offset);
5931
5932 if (dst_page == VM_PAGE_NULL)
5933 panic("vm_object_iopl_request: Wired pages missing. \n");
2d21ac55 5934
b0d623f7
A
5935 if (dw_count) {
5936 if (dw_array[dw_index].dw_m == dst_page) {
5937 dw_index++;
5938 dw_count--;
5939 continue;
5940 }
5941 }
2d21ac55 5942 vm_page_lockspin_queues();
0c530ab8
A
5943 vm_page_unwire(dst_page);
5944 vm_page_unlock_queues();
2d21ac55
A
5945
5946 VM_STAT_INCR(reactivations);
0c530ab8 5947 }
b0d623f7
A
5948#if UPL_DEBUG
5949 upl->upl_state = 2;
5950#endif
5951 if (! (upl->flags & UPL_KERNEL_OBJECT)) {
5952 vm_object_activity_end(object);
5953 }
0c530ab8
A
5954 vm_object_unlock(object);
5955 upl_destroy(upl);
5956
5957 return ret;
1c79356b
A
5958}
5959
91447636
A
5960kern_return_t
5961upl_transpose(
5962 upl_t upl1,
5963 upl_t upl2)
1c79356b 5964{
91447636
A
5965 kern_return_t retval;
5966 boolean_t upls_locked;
5967 vm_object_t object1, object2;
1c79356b 5968
b0d623f7 5969 if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR)==UPL_VECTOR) || ((upl2->flags & UPL_VECTOR)==UPL_VECTOR)) {
91447636
A
5970 return KERN_INVALID_ARGUMENT;
5971 }
5972
5973 upls_locked = FALSE;
1c79356b 5974
91447636
A
5975 /*
5976 * Since we need to lock both UPLs at the same time,
5977 * avoid deadlocks by always taking locks in the same order.
5978 */
5979 if (upl1 < upl2) {
5980 upl_lock(upl1);
5981 upl_lock(upl2);
5982 } else {
5983 upl_lock(upl2);
5984 upl_lock(upl1);
5985 }
5986 upls_locked = TRUE; /* the UPLs will need to be unlocked */
5987
5988 object1 = upl1->map_object;
5989 object2 = upl2->map_object;
5990
5991 if (upl1->offset != 0 || upl2->offset != 0 ||
5992 upl1->size != upl2->size) {
5993 /*
5994 * We deal only with full objects, not subsets.
5995 * That's because we exchange the entire backing store info
5996 * for the objects: pager, resident pages, etc... We can't do
5997 * only part of it.
5998 */
5999 retval = KERN_INVALID_VALUE;
6000 goto done;
6001 }
6002
6003 /*
6004 * Tranpose the VM objects' backing store.
6005 */
6006 retval = vm_object_transpose(object1, object2,
6007 (vm_object_size_t) upl1->size);
6008
6009 if (retval == KERN_SUCCESS) {
6010 /*
6011 * Make each UPL point to the correct VM object, i.e. the
6012 * object holding the pages that the UPL refers to...
6013 */
b0d623f7 6014#if UPL_DEBUG
2d21ac55
A
6015 queue_remove(&object1->uplq, upl1, upl_t, uplq);
6016 queue_remove(&object2->uplq, upl2, upl_t, uplq);
6017#endif
91447636
A
6018 upl1->map_object = object2;
6019 upl2->map_object = object1;
b0d623f7 6020#if UPL_DEBUG
2d21ac55
A
6021 queue_enter(&object1->uplq, upl2, upl_t, uplq);
6022 queue_enter(&object2->uplq, upl1, upl_t, uplq);
6023#endif
91447636
A
6024 }
6025
6026done:
6027 /*
6028 * Cleanup.
6029 */
6030 if (upls_locked) {
6031 upl_unlock(upl1);
6032 upl_unlock(upl2);
6033 upls_locked = FALSE;
6034 }
6035
6036 return retval;
6037}
6038
6039/*
6040 * ENCRYPTED SWAP:
6041 *
6042 * Rationale: the user might have some encrypted data on disk (via
6043 * FileVault or any other mechanism). That data is then decrypted in
6044 * memory, which is safe as long as the machine is secure. But that
6045 * decrypted data in memory could be paged out to disk by the default
6046 * pager. The data would then be stored on disk in clear (not encrypted)
6047 * and it could be accessed by anyone who gets physical access to the
6048 * disk (if the laptop or the disk gets stolen for example). This weakens
6049 * the security offered by FileVault.
6050 *
6051 * Solution: the default pager will optionally request that all the
6052 * pages it gathers for pageout be encrypted, via the UPL interfaces,
6053 * before it sends this UPL to disk via the vnode_pageout() path.
6054 *
6055 * Notes:
6056 *
6057 * To avoid disrupting the VM LRU algorithms, we want to keep the
6058 * clean-in-place mechanisms, which allow us to send some extra pages to
6059 * swap (clustering) without actually removing them from the user's
6060 * address space. We don't want the user to unknowingly access encrypted
6061 * data, so we have to actually remove the encrypted pages from the page
6062 * table. When the user accesses the data, the hardware will fail to
6063 * locate the virtual page in its page table and will trigger a page
6064 * fault. We can then decrypt the page and enter it in the page table
6065 * again. Whenever we allow the user to access the contents of a page,
6066 * we have to make sure it's not encrypted.
6067 *
6068 *
6069 */
6070/*
6071 * ENCRYPTED SWAP:
6072 * Reserve of virtual addresses in the kernel address space.
6073 * We need to map the physical pages in the kernel, so that we
6074 * can call the encryption/decryption routines with a kernel
6075 * virtual address. We keep this pool of pre-allocated kernel
6076 * virtual addresses so that we don't have to scan the kernel's
6077 * virtaul address space each time we need to encrypt or decrypt
6078 * a physical page.
6079 * It would be nice to be able to encrypt and decrypt in physical
6080 * mode but that might not always be more efficient...
6081 */
6082decl_simple_lock_data(,vm_paging_lock)
6083#define VM_PAGING_NUM_PAGES 64
6084vm_map_offset_t vm_paging_base_address = 0;
6085boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
6086int vm_paging_max_index = 0;
2d21ac55
A
6087int vm_paging_page_waiter = 0;
6088int vm_paging_page_waiter_total = 0;
91447636
A
6089unsigned long vm_paging_no_kernel_page = 0;
6090unsigned long vm_paging_objects_mapped = 0;
6091unsigned long vm_paging_pages_mapped = 0;
6092unsigned long vm_paging_objects_mapped_slow = 0;
6093unsigned long vm_paging_pages_mapped_slow = 0;
6094
2d21ac55
A
6095void
6096vm_paging_map_init(void)
6097{
6098 kern_return_t kr;
6099 vm_map_offset_t page_map_offset;
6100 vm_map_entry_t map_entry;
6101
6102 assert(vm_paging_base_address == 0);
6103
6104 /*
6105 * Initialize our pool of pre-allocated kernel
6106 * virtual addresses.
6107 */
6108 page_map_offset = 0;
6109 kr = vm_map_find_space(kernel_map,
6110 &page_map_offset,
6111 VM_PAGING_NUM_PAGES * PAGE_SIZE,
6112 0,
6113 0,
6114 &map_entry);
6115 if (kr != KERN_SUCCESS) {
6116 panic("vm_paging_map_init: kernel_map full\n");
6117 }
6118 map_entry->object.vm_object = kernel_object;
b0d623f7 6119 map_entry->offset = page_map_offset;
2d21ac55
A
6120 vm_object_reference(kernel_object);
6121 vm_map_unlock(kernel_map);
6122
6123 assert(vm_paging_base_address == 0);
6124 vm_paging_base_address = page_map_offset;
6125}
6126
91447636
A
6127/*
6128 * ENCRYPTED SWAP:
6129 * vm_paging_map_object:
6130 * Maps part of a VM object's pages in the kernel
6131 * virtual address space, using the pre-allocated
6132 * kernel virtual addresses, if possible.
6133 * Context:
6134 * The VM object is locked. This lock will get
2d21ac55
A
6135 * dropped and re-acquired though, so the caller
6136 * must make sure the VM object is kept alive
6137 * (by holding a VM map that has a reference
6138 * on it, for example, or taking an extra reference).
6139 * The page should also be kept busy to prevent
6140 * it from being reclaimed.
91447636
A
6141 */
6142kern_return_t
6143vm_paging_map_object(
6144 vm_map_offset_t *address,
6145 vm_page_t page,
6146 vm_object_t object,
6147 vm_object_offset_t offset,
2d21ac55 6148 vm_map_size_t *size,
593a1d5f 6149 vm_prot_t protection,
2d21ac55 6150 boolean_t can_unlock_object)
91447636
A
6151{
6152 kern_return_t kr;
6153 vm_map_offset_t page_map_offset;
6154 vm_map_size_t map_size;
6155 vm_object_offset_t object_offset;
91447636 6156 int i;
91447636 6157
593a1d5f 6158
91447636 6159 if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
2d21ac55 6160 assert(page->busy);
91447636 6161 /*
91447636
A
6162 * Use one of the pre-allocated kernel virtual addresses
6163 * and just enter the VM page in the kernel address space
6164 * at that virtual address.
6165 */
91447636
A
6166 simple_lock(&vm_paging_lock);
6167
91447636
A
6168 /*
6169 * Try and find an available kernel virtual address
6170 * from our pre-allocated pool.
6171 */
6172 page_map_offset = 0;
2d21ac55
A
6173 for (;;) {
6174 for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
6175 if (vm_paging_page_inuse[i] == FALSE) {
6176 page_map_offset =
6177 vm_paging_base_address +
6178 (i * PAGE_SIZE);
6179 break;
6180 }
6181 }
6182 if (page_map_offset != 0) {
6183 /* found a space to map our page ! */
6184 break;
6185 }
6186
6187 if (can_unlock_object) {
6188 /*
6189 * If we can afford to unlock the VM object,
6190 * let's take the slow path now...
6191 */
91447636
A
6192 break;
6193 }
2d21ac55
A
6194 /*
6195 * We can't afford to unlock the VM object, so
6196 * let's wait for a space to become available...
6197 */
6198 vm_paging_page_waiter_total++;
6199 vm_paging_page_waiter++;
6200 thread_sleep_fast_usimple_lock(&vm_paging_page_waiter,
6201 &vm_paging_lock,
6202 THREAD_UNINT);
6203 vm_paging_page_waiter--;
6204 /* ... and try again */
91447636
A
6205 }
6206
6207 if (page_map_offset != 0) {
6208 /*
6209 * We found a kernel virtual address;
6210 * map the physical page to that virtual address.
6211 */
6212 if (i > vm_paging_max_index) {
6213 vm_paging_max_index = i;
6214 }
6215 vm_paging_page_inuse[i] = TRUE;
6216 simple_unlock(&vm_paging_lock);
2d21ac55
A
6217
6218 if (page->pmapped == FALSE) {
0c530ab8
A
6219 pmap_sync_page_data_phys(page->phys_page);
6220 }
2d21ac55
A
6221 page->pmapped = TRUE;
6222
6223 /*
6224 * Keep the VM object locked over the PMAP_ENTER
6225 * and the actual use of the page by the kernel,
6226 * or this pmap mapping might get undone by a
6227 * vm_object_pmap_protect() call...
6228 */
0c530ab8
A
6229 PMAP_ENTER(kernel_pmap,
6230 page_map_offset,
6231 page,
593a1d5f 6232 protection,
0c530ab8
A
6233 ((int) page->object->wimg_bits &
6234 VM_WIMG_MASK),
6235 TRUE);
91447636
A
6236 vm_paging_objects_mapped++;
6237 vm_paging_pages_mapped++;
6238 *address = page_map_offset;
91447636
A
6239
6240 /* all done and mapped, ready to use ! */
6241 return KERN_SUCCESS;
6242 }
6243
6244 /*
6245 * We ran out of pre-allocated kernel virtual
6246 * addresses. Just map the page in the kernel
6247 * the slow and regular way.
6248 */
6249 vm_paging_no_kernel_page++;
6250 simple_unlock(&vm_paging_lock);
2d21ac55
A
6251 }
6252
6253 if (! can_unlock_object) {
6254 return KERN_NOT_SUPPORTED;
91447636 6255 }
91447636
A
6256
6257 object_offset = vm_object_trunc_page(offset);
6258 map_size = vm_map_round_page(*size);
6259
6260 /*
6261 * Try and map the required range of the object
6262 * in the kernel_map
6263 */
6264
91447636
A
6265 vm_object_reference_locked(object); /* for the map entry */
6266 vm_object_unlock(object);
6267
6268 kr = vm_map_enter(kernel_map,
6269 address,
6270 map_size,
6271 0,
6272 VM_FLAGS_ANYWHERE,
6273 object,
6274 object_offset,
6275 FALSE,
593a1d5f 6276 protection,
91447636
A
6277 VM_PROT_ALL,
6278 VM_INHERIT_NONE);
6279 if (kr != KERN_SUCCESS) {
6280 *address = 0;
6281 *size = 0;
6282 vm_object_deallocate(object); /* for the map entry */
2d21ac55 6283 vm_object_lock(object);
91447636
A
6284 return kr;
6285 }
6286
6287 *size = map_size;
6288
6289 /*
6290 * Enter the mapped pages in the page table now.
6291 */
6292 vm_object_lock(object);
2d21ac55
A
6293 /*
6294 * VM object must be kept locked from before PMAP_ENTER()
6295 * until after the kernel is done accessing the page(s).
6296 * Otherwise, the pmap mappings in the kernel could be
6297 * undone by a call to vm_object_pmap_protect().
6298 */
6299
91447636
A
6300 for (page_map_offset = 0;
6301 map_size != 0;
6302 map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
6303 unsigned int cache_attr;
6304
6305 page = vm_page_lookup(object, offset + page_map_offset);
6306 if (page == VM_PAGE_NULL) {
2d21ac55
A
6307 printf("vm_paging_map_object: no page !?");
6308 vm_object_unlock(object);
6309 kr = vm_map_remove(kernel_map, *address, *size,
6310 VM_MAP_NO_FLAGS);
6311 assert(kr == KERN_SUCCESS);
6312 *address = 0;
6313 *size = 0;
6314 vm_object_lock(object);
6315 return KERN_MEMORY_ERROR;
91447636 6316 }
2d21ac55 6317 if (page->pmapped == FALSE) {
91447636
A
6318 pmap_sync_page_data_phys(page->phys_page);
6319 }
2d21ac55 6320 page->pmapped = TRUE;
91447636
A
6321 cache_attr = ((unsigned int) object->wimg_bits) & VM_WIMG_MASK;
6322
2d21ac55 6323 //assert(pmap_verify_free(page->phys_page));
91447636
A
6324 PMAP_ENTER(kernel_pmap,
6325 *address + page_map_offset,
6326 page,
593a1d5f 6327 protection,
91447636 6328 cache_attr,
0c530ab8 6329 TRUE);
91447636
A
6330 }
6331
6332 vm_paging_objects_mapped_slow++;
b0d623f7 6333 vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
91447636
A
6334
6335 return KERN_SUCCESS;
6336}
6337
6338/*
6339 * ENCRYPTED SWAP:
6340 * vm_paging_unmap_object:
6341 * Unmaps part of a VM object's pages from the kernel
6342 * virtual address space.
6343 * Context:
6344 * The VM object is locked. This lock will get
6345 * dropped and re-acquired though.
6346 */
6347void
6348vm_paging_unmap_object(
6349 vm_object_t object,
6350 vm_map_offset_t start,
6351 vm_map_offset_t end)
6352{
6353 kern_return_t kr;
91447636 6354 int i;
91447636 6355
0c530ab8 6356 if ((vm_paging_base_address == 0) ||
8f6c56a5
A
6357 (start < vm_paging_base_address) ||
6358 (end > (vm_paging_base_address
2d21ac55 6359 + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
91447636
A
6360 /*
6361 * We didn't use our pre-allocated pool of
6362 * kernel virtual address. Deallocate the
6363 * virtual memory.
6364 */
6365 if (object != VM_OBJECT_NULL) {
6366 vm_object_unlock(object);
6367 }
6368 kr = vm_map_remove(kernel_map, start, end, VM_MAP_NO_FLAGS);
6369 if (object != VM_OBJECT_NULL) {
6370 vm_object_lock(object);
6371 }
6372 assert(kr == KERN_SUCCESS);
6373 } else {
6374 /*
6375 * We used a kernel virtual address from our
6376 * pre-allocated pool. Put it back in the pool
6377 * for next time.
6378 */
91447636 6379 assert(end - start == PAGE_SIZE);
b0d623f7
A
6380 i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
6381 assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
91447636
A
6382
6383 /* undo the pmap mapping */
0c530ab8 6384 pmap_remove(kernel_pmap, start, end);
91447636
A
6385
6386 simple_lock(&vm_paging_lock);
6387 vm_paging_page_inuse[i] = FALSE;
2d21ac55
A
6388 if (vm_paging_page_waiter) {
6389 thread_wakeup(&vm_paging_page_waiter);
6390 }
91447636 6391 simple_unlock(&vm_paging_lock);
91447636
A
6392 }
6393}
6394
2d21ac55 6395#if CRYPTO
91447636
A
6396/*
6397 * Encryption data.
6398 * "iv" is the "initial vector". Ideally, we want to
6399 * have a different one for each page we encrypt, so that
6400 * crackers can't find encryption patterns too easily.
6401 */
6402#define SWAP_CRYPT_AES_KEY_SIZE 128 /* XXX 192 and 256 don't work ! */
6403boolean_t swap_crypt_ctx_initialized = FALSE;
6404aes_32t swap_crypt_key[8]; /* big enough for a 256 key */
6405aes_ctx swap_crypt_ctx;
6406const unsigned char swap_crypt_null_iv[AES_BLOCK_SIZE] = {0xa, };
6407
6408#if DEBUG
6409boolean_t swap_crypt_ctx_tested = FALSE;
6410unsigned char swap_crypt_test_page_ref[4096] __attribute__((aligned(4096)));
6411unsigned char swap_crypt_test_page_encrypt[4096] __attribute__((aligned(4096)));
6412unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096)));
6413#endif /* DEBUG */
6414
91447636
A
6415/*
6416 * Initialize the encryption context: key and key size.
6417 */
6418void swap_crypt_ctx_initialize(void); /* forward */
6419void
6420swap_crypt_ctx_initialize(void)
6421{
6422 unsigned int i;
6423
6424 /*
6425 * No need for locking to protect swap_crypt_ctx_initialized
6426 * because the first use of encryption will come from the
6427 * pageout thread (we won't pagein before there's been a pageout)
6428 * and there's only one pageout thread.
6429 */
6430 if (swap_crypt_ctx_initialized == FALSE) {
6431 for (i = 0;
6432 i < (sizeof (swap_crypt_key) /
6433 sizeof (swap_crypt_key[0]));
6434 i++) {
6435 swap_crypt_key[i] = random();
6436 }
6437 aes_encrypt_key((const unsigned char *) swap_crypt_key,
6438 SWAP_CRYPT_AES_KEY_SIZE,
6439 &swap_crypt_ctx.encrypt);
6440 aes_decrypt_key((const unsigned char *) swap_crypt_key,
6441 SWAP_CRYPT_AES_KEY_SIZE,
6442 &swap_crypt_ctx.decrypt);
6443 swap_crypt_ctx_initialized = TRUE;
6444 }
6445
6446#if DEBUG
6447 /*
6448 * Validate the encryption algorithms.
6449 */
6450 if (swap_crypt_ctx_tested == FALSE) {
6451 /* initialize */
6452 for (i = 0; i < 4096; i++) {
6453 swap_crypt_test_page_ref[i] = (char) i;
6454 }
6455 /* encrypt */
6456 aes_encrypt_cbc(swap_crypt_test_page_ref,
6457 swap_crypt_null_iv,
6458 PAGE_SIZE / AES_BLOCK_SIZE,
6459 swap_crypt_test_page_encrypt,
6460 &swap_crypt_ctx.encrypt);
6461 /* decrypt */
6462 aes_decrypt_cbc(swap_crypt_test_page_encrypt,
6463 swap_crypt_null_iv,
6464 PAGE_SIZE / AES_BLOCK_SIZE,
6465 swap_crypt_test_page_decrypt,
6466 &swap_crypt_ctx.decrypt);
6467 /* compare result with original */
6468 for (i = 0; i < 4096; i ++) {
6469 if (swap_crypt_test_page_decrypt[i] !=
6470 swap_crypt_test_page_ref[i]) {
6471 panic("encryption test failed");
6472 }
6473 }
6474
6475 /* encrypt again */
6476 aes_encrypt_cbc(swap_crypt_test_page_decrypt,
6477 swap_crypt_null_iv,
6478 PAGE_SIZE / AES_BLOCK_SIZE,
6479 swap_crypt_test_page_decrypt,
6480 &swap_crypt_ctx.encrypt);
6481 /* decrypt in place */
6482 aes_decrypt_cbc(swap_crypt_test_page_decrypt,
6483 swap_crypt_null_iv,
6484 PAGE_SIZE / AES_BLOCK_SIZE,
6485 swap_crypt_test_page_decrypt,
6486 &swap_crypt_ctx.decrypt);
6487 for (i = 0; i < 4096; i ++) {
6488 if (swap_crypt_test_page_decrypt[i] !=
6489 swap_crypt_test_page_ref[i]) {
6490 panic("in place encryption test failed");
6491 }
6492 }
6493
6494 swap_crypt_ctx_tested = TRUE;
6495 }
6496#endif /* DEBUG */
6497}
6498
6499/*
6500 * ENCRYPTED SWAP:
6501 * vm_page_encrypt:
6502 * Encrypt the given page, for secure paging.
6503 * The page might already be mapped at kernel virtual
6504 * address "kernel_mapping_offset". Otherwise, we need
6505 * to map it.
6506 *
6507 * Context:
6508 * The page's object is locked, but this lock will be released
6509 * and re-acquired.
6510 * The page is busy and not accessible by users (not entered in any pmap).
6511 */
6512void
6513vm_page_encrypt(
6514 vm_page_t page,
6515 vm_map_offset_t kernel_mapping_offset)
6516{
91447636 6517 kern_return_t kr;
91447636
A
6518 vm_map_size_t kernel_mapping_size;
6519 vm_offset_t kernel_vaddr;
6520 union {
6521 unsigned char aes_iv[AES_BLOCK_SIZE];
6522 struct {
6523 memory_object_t pager_object;
6524 vm_object_offset_t paging_offset;
6525 } vm;
6526 } encrypt_iv;
6527
6528 if (! vm_pages_encrypted) {
6529 vm_pages_encrypted = TRUE;
6530 }
6531
6532 assert(page->busy);
6533 assert(page->dirty || page->precious);
6534
6535 if (page->encrypted) {
6536 /*
6537 * Already encrypted: no need to do it again.
6538 */
6539 vm_page_encrypt_already_encrypted_counter++;
6540 return;
6541 }
6542 ASSERT_PAGE_DECRYPTED(page);
6543
6544 /*
2d21ac55
A
6545 * Take a paging-in-progress reference to keep the object
6546 * alive even if we have to unlock it (in vm_paging_map_object()
6547 * for example)...
91447636 6548 */
2d21ac55 6549 vm_object_paging_begin(page->object);
91447636
A
6550
6551 if (kernel_mapping_offset == 0) {
6552 /*
6553 * The page hasn't already been mapped in kernel space
6554 * by the caller. Map it now, so that we can access
6555 * its contents and encrypt them.
6556 */
6557 kernel_mapping_size = PAGE_SIZE;
6558 kr = vm_paging_map_object(&kernel_mapping_offset,
6559 page,
6560 page->object,
6561 page->offset,
2d21ac55 6562 &kernel_mapping_size,
593a1d5f 6563 VM_PROT_READ | VM_PROT_WRITE,
2d21ac55 6564 FALSE);
91447636
A
6565 if (kr != KERN_SUCCESS) {
6566 panic("vm_page_encrypt: "
6567 "could not map page in kernel: 0x%x\n",
6568 kr);
6569 }
6570 } else {
6571 kernel_mapping_size = 0;
6572 }
6573 kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
6574
6575 if (swap_crypt_ctx_initialized == FALSE) {
6576 swap_crypt_ctx_initialize();
6577 }
6578 assert(swap_crypt_ctx_initialized);
6579
6580 /*
6581 * Prepare an "initial vector" for the encryption.
6582 * We use the "pager" and the "paging_offset" for that
6583 * page to obfuscate the encrypted data a bit more and
6584 * prevent crackers from finding patterns that they could
6585 * use to break the key.
6586 */
6587 bzero(&encrypt_iv.aes_iv[0], sizeof (encrypt_iv.aes_iv));
6588 encrypt_iv.vm.pager_object = page->object->pager;
6589 encrypt_iv.vm.paging_offset =
6590 page->object->paging_offset + page->offset;
6591
91447636
A
6592 /* encrypt the "initial vector" */
6593 aes_encrypt_cbc((const unsigned char *) &encrypt_iv.aes_iv[0],
6594 swap_crypt_null_iv,
6595 1,
6596 &encrypt_iv.aes_iv[0],
6597 &swap_crypt_ctx.encrypt);
6598
6599 /*
6600 * Encrypt the page.
6601 */
6602 aes_encrypt_cbc((const unsigned char *) kernel_vaddr,
6603 &encrypt_iv.aes_iv[0],
6604 PAGE_SIZE / AES_BLOCK_SIZE,
6605 (unsigned char *) kernel_vaddr,
6606 &swap_crypt_ctx.encrypt);
6607
6608 vm_page_encrypt_counter++;
6609
91447636
A
6610 /*
6611 * Unmap the page from the kernel's address space,
6612 * if we had to map it ourselves. Otherwise, let
6613 * the caller undo the mapping if needed.
6614 */
6615 if (kernel_mapping_size != 0) {
6616 vm_paging_unmap_object(page->object,
6617 kernel_mapping_offset,
6618 kernel_mapping_offset + kernel_mapping_size);
6619 }
6620
6621 /*
2d21ac55 6622 * Clear the "reference" and "modified" bits.
91447636
A
6623 * This should clean up any impact the encryption had
6624 * on them.
2d21ac55
A
6625 * The page was kept busy and disconnected from all pmaps,
6626 * so it can't have been referenced or modified from user
6627 * space.
6628 * The software bits will be reset later after the I/O
6629 * has completed (in upl_commit_range()).
91447636 6630 */
2d21ac55 6631 pmap_clear_refmod(page->phys_page, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
91447636
A
6632
6633 page->encrypted = TRUE;
2d21ac55
A
6634
6635 vm_object_paging_end(page->object);
91447636
A
6636}
6637
6638/*
6639 * ENCRYPTED SWAP:
6640 * vm_page_decrypt:
6641 * Decrypt the given page.
6642 * The page might already be mapped at kernel virtual
6643 * address "kernel_mapping_offset". Otherwise, we need
6644 * to map it.
6645 *
6646 * Context:
6647 * The page's VM object is locked but will be unlocked and relocked.
6648 * The page is busy and not accessible by users (not entered in any pmap).
6649 */
6650void
6651vm_page_decrypt(
6652 vm_page_t page,
6653 vm_map_offset_t kernel_mapping_offset)
6654{
91447636
A
6655 kern_return_t kr;
6656 vm_map_size_t kernel_mapping_size;
6657 vm_offset_t kernel_vaddr;
91447636
A
6658 union {
6659 unsigned char aes_iv[AES_BLOCK_SIZE];
6660 struct {
6661 memory_object_t pager_object;
6662 vm_object_offset_t paging_offset;
6663 } vm;
6664 } decrypt_iv;
6665
6666 assert(page->busy);
6667 assert(page->encrypted);
6668
6669 /*
2d21ac55
A
6670 * Take a paging-in-progress reference to keep the object
6671 * alive even if we have to unlock it (in vm_paging_map_object()
6672 * for example)...
91447636 6673 */
2d21ac55 6674 vm_object_paging_begin(page->object);
91447636
A
6675
6676 if (kernel_mapping_offset == 0) {
6677 /*
6678 * The page hasn't already been mapped in kernel space
6679 * by the caller. Map it now, so that we can access
6680 * its contents and decrypt them.
6681 */
6682 kernel_mapping_size = PAGE_SIZE;
6683 kr = vm_paging_map_object(&kernel_mapping_offset,
6684 page,
6685 page->object,
6686 page->offset,
2d21ac55 6687 &kernel_mapping_size,
593a1d5f 6688 VM_PROT_READ | VM_PROT_WRITE,
2d21ac55 6689 FALSE);
91447636
A
6690 if (kr != KERN_SUCCESS) {
6691 panic("vm_page_decrypt: "
2d21ac55
A
6692 "could not map page in kernel: 0x%x\n",
6693 kr);
91447636
A
6694 }
6695 } else {
6696 kernel_mapping_size = 0;
6697 }
6698 kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
6699
6700 assert(swap_crypt_ctx_initialized);
6701
6702 /*
6703 * Prepare an "initial vector" for the decryption.
6704 * It has to be the same as the "initial vector" we
6705 * used to encrypt that page.
6706 */
6707 bzero(&decrypt_iv.aes_iv[0], sizeof (decrypt_iv.aes_iv));
6708 decrypt_iv.vm.pager_object = page->object->pager;
6709 decrypt_iv.vm.paging_offset =
6710 page->object->paging_offset + page->offset;
6711
91447636
A
6712 /* encrypt the "initial vector" */
6713 aes_encrypt_cbc((const unsigned char *) &decrypt_iv.aes_iv[0],
6714 swap_crypt_null_iv,
6715 1,
6716 &decrypt_iv.aes_iv[0],
6717 &swap_crypt_ctx.encrypt);
6718
6719 /*
6720 * Decrypt the page.
6721 */
6722 aes_decrypt_cbc((const unsigned char *) kernel_vaddr,
6723 &decrypt_iv.aes_iv[0],
6724 PAGE_SIZE / AES_BLOCK_SIZE,
6725 (unsigned char *) kernel_vaddr,
6726 &swap_crypt_ctx.decrypt);
6727 vm_page_decrypt_counter++;
6728
91447636
A
6729 /*
6730 * Unmap the page from the kernel's address space,
6731 * if we had to map it ourselves. Otherwise, let
6732 * the caller undo the mapping if needed.
6733 */
6734 if (kernel_mapping_size != 0) {
6735 vm_paging_unmap_object(page->object,
6736 kernel_vaddr,
6737 kernel_vaddr + PAGE_SIZE);
6738 }
6739
6740 /*
6741 * After decryption, the page is actually clean.
6742 * It was encrypted as part of paging, which "cleans"
6743 * the "dirty" pages.
6744 * Noone could access it after it was encrypted
6745 * and the decryption doesn't count.
6746 */
6747 page->dirty = FALSE;
b0d623f7 6748 assert (page->cs_validated == FALSE);
2d21ac55 6749 pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
91447636
A
6750 page->encrypted = FALSE;
6751
6752 /*
6753 * We've just modified the page's contents via the data cache and part
6754 * of the new contents might still be in the cache and not yet in RAM.
6755 * Since the page is now available and might get gathered in a UPL to
6756 * be part of a DMA transfer from a driver that expects the memory to
6757 * be coherent at this point, we have to flush the data cache.
6758 */
0c530ab8 6759 pmap_sync_page_attributes_phys(page->phys_page);
91447636
A
6760 /*
6761 * Since the page is not mapped yet, some code might assume that it
6762 * doesn't need to invalidate the instruction cache when writing to
2d21ac55
A
6763 * that page. That code relies on "pmapped" being FALSE, so that the
6764 * caches get synchronized when the page is first mapped.
91447636 6765 */
2d21ac55
A
6766 assert(pmap_verify_free(page->phys_page));
6767 page->pmapped = FALSE;
4a3eedf9 6768 page->wpmapped = FALSE;
2d21ac55
A
6769
6770 vm_object_paging_end(page->object);
91447636
A
6771}
6772
b0d623f7 6773#if DEVELOPMENT || DEBUG
91447636
A
6774unsigned long upl_encrypt_upls = 0;
6775unsigned long upl_encrypt_pages = 0;
b0d623f7 6776#endif
91447636
A
6777
6778/*
6779 * ENCRYPTED SWAP:
6780 *
6781 * upl_encrypt:
6782 * Encrypts all the pages in the UPL, within the specified range.
6783 *
6784 */
6785void
6786upl_encrypt(
6787 upl_t upl,
6788 upl_offset_t crypt_offset,
6789 upl_size_t crypt_size)
6790{
b0d623f7
A
6791 upl_size_t upl_size, subupl_size=crypt_size;
6792 upl_offset_t offset_in_upl, subupl_offset=crypt_offset;
91447636 6793 vm_object_t upl_object;
b0d623f7 6794 vm_object_offset_t upl_offset;
91447636
A
6795 vm_page_t page;
6796 vm_object_t shadow_object;
6797 vm_object_offset_t shadow_offset;
6798 vm_object_offset_t paging_offset;
6799 vm_object_offset_t base_offset;
b0d623f7
A
6800 int isVectorUPL = 0;
6801 upl_t vector_upl = NULL;
6802
6803 if((isVectorUPL = vector_upl_is_valid(upl)))
6804 vector_upl = upl;
6805
6806process_upl_to_encrypt:
6807 if(isVectorUPL) {
6808 crypt_size = subupl_size;
6809 crypt_offset = subupl_offset;
6810 upl = vector_upl_subupl_byoffset(vector_upl, &crypt_offset, &crypt_size);
6811 if(upl == NULL)
6812 panic("upl_encrypt: Accessing a sub-upl that doesn't exist\n");
6813 subupl_size -= crypt_size;
6814 subupl_offset += crypt_size;
6815 }
91447636 6816
b0d623f7 6817#if DEVELOPMENT || DEBUG
91447636
A
6818 upl_encrypt_upls++;
6819 upl_encrypt_pages += crypt_size / PAGE_SIZE;
b0d623f7 6820#endif
91447636
A
6821 upl_object = upl->map_object;
6822 upl_offset = upl->offset;
6823 upl_size = upl->size;
6824
91447636
A
6825 vm_object_lock(upl_object);
6826
6827 /*
6828 * Find the VM object that contains the actual pages.
6829 */
6830 if (upl_object->pageout) {
6831 shadow_object = upl_object->shadow;
6832 /*
6833 * The offset in the shadow object is actually also
6834 * accounted for in upl->offset. It possibly shouldn't be
6835 * this way, but for now don't account for it twice.
6836 */
6837 shadow_offset = 0;
6838 assert(upl_object->paging_offset == 0); /* XXX ? */
6839 vm_object_lock(shadow_object);
6840 } else {
6841 shadow_object = upl_object;
6842 shadow_offset = 0;
6843 }
6844
6845 paging_offset = shadow_object->paging_offset;
6846 vm_object_paging_begin(shadow_object);
6847
2d21ac55
A
6848 if (shadow_object != upl_object)
6849 vm_object_unlock(upl_object);
6850
91447636
A
6851
6852 base_offset = shadow_offset;
6853 base_offset += upl_offset;
6854 base_offset += crypt_offset;
6855 base_offset -= paging_offset;
91447636 6856
2d21ac55 6857 assert(crypt_offset + crypt_size <= upl_size);
91447636 6858
b0d623f7
A
6859 for (offset_in_upl = 0;
6860 offset_in_upl < crypt_size;
6861 offset_in_upl += PAGE_SIZE) {
91447636 6862 page = vm_page_lookup(shadow_object,
b0d623f7 6863 base_offset + offset_in_upl);
91447636
A
6864 if (page == VM_PAGE_NULL) {
6865 panic("upl_encrypt: "
6866 "no page for (obj=%p,off=%lld+%d)!\n",
6867 shadow_object,
6868 base_offset,
b0d623f7 6869 offset_in_upl);
91447636 6870 }
2d21ac55
A
6871 /*
6872 * Disconnect the page from all pmaps, so that nobody can
6873 * access it while it's encrypted. After that point, all
6874 * accesses to this page will cause a page fault and block
6875 * while the page is busy being encrypted. After the
6876 * encryption completes, any access will cause a
6877 * page fault and the page gets decrypted at that time.
6878 */
6879 pmap_disconnect(page->phys_page);
91447636 6880 vm_page_encrypt(page, 0);
2d21ac55 6881
b0d623f7 6882 if (vm_object_lock_avoid(shadow_object)) {
2d21ac55
A
6883 /*
6884 * Give vm_pageout_scan() a chance to convert more
6885 * pages from "clean-in-place" to "clean-and-free",
6886 * if it's interested in the same pages we selected
6887 * in this cluster.
6888 */
6889 vm_object_unlock(shadow_object);
b0d623f7 6890 mutex_pause(2);
2d21ac55
A
6891 vm_object_lock(shadow_object);
6892 }
91447636
A
6893 }
6894
6895 vm_object_paging_end(shadow_object);
6896 vm_object_unlock(shadow_object);
b0d623f7
A
6897
6898 if(isVectorUPL && subupl_size)
6899 goto process_upl_to_encrypt;
91447636
A
6900}
6901
2d21ac55
A
6902#else /* CRYPTO */
6903void
6904upl_encrypt(
6905 __unused upl_t upl,
6906 __unused upl_offset_t crypt_offset,
6907 __unused upl_size_t crypt_size)
6908{
6909}
6910
6911void
6912vm_page_encrypt(
6913 __unused vm_page_t page,
6914 __unused vm_map_offset_t kernel_mapping_offset)
6915{
6916}
6917
6918void
6919vm_page_decrypt(
6920 __unused vm_page_t page,
6921 __unused vm_map_offset_t kernel_mapping_offset)
6922{
6923}
6924
6925#endif /* CRYPTO */
6926
b0d623f7
A
6927void
6928vm_pageout_queue_steal(vm_page_t page, boolean_t queues_locked)
6929{
6930 page->list_req_pending = FALSE;
6931 page->cleaning = FALSE;
6932 page->pageout = FALSE;
6933
6934 if (!queues_locked) {
6935 vm_page_lockspin_queues();
6936 }
6937
6938 /*
6939 * need to drop the laundry count...
6940 * we may also need to remove it
6941 * from the I/O paging queue...
6942 * vm_pageout_throttle_up handles both cases
6943 *
6944 * the laundry and pageout_queue flags are cleared...
6945 */
6946 vm_pageout_throttle_up(page);
6947
6948 /*
6949 * toss the wire count we picked up
6950 * when we intially set this page up
6951 * to be cleaned...
6952 */
6953 vm_page_unwire(page);
6954
6955 vm_page_steal_pageout_page++;
6956
6957 if (!queues_locked) {
6958 vm_page_unlock_queues();
6959 }
6960}
6961
6962upl_t
6963vector_upl_create(vm_offset_t upl_offset)
6964{
6965 int vector_upl_size = sizeof(struct _vector_upl);
6966 int i=0;
6967 upl_t upl;
6968 vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
6969
6970 upl = upl_create(0,UPL_VECTOR,0);
6971 upl->vector_upl = vector_upl;
6972 upl->offset = upl_offset;
6973 vector_upl->size = 0;
6974 vector_upl->offset = upl_offset;
6975 vector_upl->invalid_upls=0;
6976 vector_upl->num_upls=0;
6977 vector_upl->pagelist = NULL;
6978
6979 for(i=0; i < MAX_VECTOR_UPL_ELEMENTS ; i++) {
6980 vector_upl->upl_iostates[i].size = 0;
6981 vector_upl->upl_iostates[i].offset = 0;
6982
6983 }
6984 return upl;
6985}
6986
6987void
6988vector_upl_deallocate(upl_t upl)
6989{
6990 if(upl) {
6991 vector_upl_t vector_upl = upl->vector_upl;
6992 if(vector_upl) {
6993 if(vector_upl->invalid_upls != vector_upl->num_upls)
6994 panic("Deallocating non-empty Vectored UPL\n");
6995 kfree(vector_upl->pagelist,(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)));
6996 vector_upl->invalid_upls=0;
6997 vector_upl->num_upls = 0;
6998 vector_upl->pagelist = NULL;
6999 vector_upl->size = 0;
7000 vector_upl->offset = 0;
7001 kfree(vector_upl, sizeof(struct _vector_upl));
7002 vector_upl = (vector_upl_t)0xdeadbeef;
7003 }
7004 else
7005 panic("vector_upl_deallocate was passed a non-vectored upl\n");
7006 }
7007 else
7008 panic("vector_upl_deallocate was passed a NULL upl\n");
7009}
7010
7011boolean_t
7012vector_upl_is_valid(upl_t upl)
7013{
7014 if(upl && ((upl->flags & UPL_VECTOR)==UPL_VECTOR)) {
7015 vector_upl_t vector_upl = upl->vector_upl;
7016 if(vector_upl == NULL || vector_upl == (vector_upl_t)0xdeadbeef || vector_upl == (vector_upl_t)0xfeedbeef)
7017 return FALSE;
7018 else
7019 return TRUE;
7020 }
7021 return FALSE;
7022}
7023
7024boolean_t
7025vector_upl_set_subupl(upl_t upl,upl_t subupl, uint32_t io_size)
7026{
7027 if(vector_upl_is_valid(upl)) {
7028 vector_upl_t vector_upl = upl->vector_upl;
7029
7030 if(vector_upl) {
7031 if(subupl) {
7032 if(io_size) {
7033 if(io_size < PAGE_SIZE)
7034 io_size = PAGE_SIZE;
7035 subupl->vector_upl = (void*)vector_upl;
7036 vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
7037 vector_upl->size += io_size;
7038 upl->size += io_size;
7039 }
7040 else {
7041 uint32_t i=0,invalid_upls=0;
7042 for(i = 0; i < vector_upl->num_upls; i++) {
7043 if(vector_upl->upl_elems[i] == subupl)
7044 break;
7045 }
7046 if(i == vector_upl->num_upls)
7047 panic("Trying to remove sub-upl when none exists");
7048
7049 vector_upl->upl_elems[i] = NULL;
7050 invalid_upls = hw_atomic_add(&(vector_upl)->invalid_upls, 1);
7051 if(invalid_upls == vector_upl->num_upls)
7052 return TRUE;
7053 else
7054 return FALSE;
7055 }
7056 }
7057 else
7058 panic("vector_upl_set_subupl was passed a NULL upl element\n");
7059 }
7060 else
7061 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
7062 }
7063 else
7064 panic("vector_upl_set_subupl was passed a NULL upl\n");
7065
7066 return FALSE;
7067}
7068
7069void
7070vector_upl_set_pagelist(upl_t upl)
7071{
7072 if(vector_upl_is_valid(upl)) {
7073 uint32_t i=0;
7074 vector_upl_t vector_upl = upl->vector_upl;
7075
7076 if(vector_upl) {
7077 vm_offset_t pagelist_size=0, cur_upl_pagelist_size=0;
7078
7079 vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE));
7080
7081 for(i=0; i < vector_upl->num_upls; i++) {
7082 cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size/PAGE_SIZE;
7083 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
7084 pagelist_size += cur_upl_pagelist_size;
7085 if(vector_upl->upl_elems[i]->highest_page > upl->highest_page)
7086 upl->highest_page = vector_upl->upl_elems[i]->highest_page;
7087 }
7088 assert( pagelist_size == (sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)) );
7089 }
7090 else
7091 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
7092 }
7093 else
7094 panic("vector_upl_set_pagelist was passed a NULL upl\n");
7095
7096}
7097
7098upl_t
7099vector_upl_subupl_byindex(upl_t upl, uint32_t index)
7100{
7101 if(vector_upl_is_valid(upl)) {
7102 vector_upl_t vector_upl = upl->vector_upl;
7103 if(vector_upl) {
7104 if(index < vector_upl->num_upls)
7105 return vector_upl->upl_elems[index];
7106 }
7107 else
7108 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
7109 }
7110 return NULL;
7111}
7112
7113upl_t
7114vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
7115{
7116 if(vector_upl_is_valid(upl)) {
7117 uint32_t i=0;
7118 vector_upl_t vector_upl = upl->vector_upl;
7119
7120 if(vector_upl) {
7121 upl_t subupl = NULL;
7122 vector_upl_iostates_t subupl_state;
7123
7124 for(i=0; i < vector_upl->num_upls; i++) {
7125 subupl = vector_upl->upl_elems[i];
7126 subupl_state = vector_upl->upl_iostates[i];
7127 if( *upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
7128 /* We could have been passed an offset/size pair that belongs
7129 * to an UPL element that has already been committed/aborted.
7130 * If so, return NULL.
7131 */
7132 if(subupl == NULL)
7133 return NULL;
7134 if((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
7135 *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
7136 if(*upl_size > subupl_state.size)
7137 *upl_size = subupl_state.size;
7138 }
7139 if(*upl_offset >= subupl_state.offset)
7140 *upl_offset -= subupl_state.offset;
7141 else if(i)
7142 panic("Vector UPL offset miscalculation\n");
7143 return subupl;
7144 }
7145 }
7146 }
7147 else
7148 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
7149 }
7150 return NULL;
7151}
7152
7153void
7154vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
7155{
7156 *v_upl_submap = NULL;
7157
7158 if(vector_upl_is_valid(upl)) {
7159 vector_upl_t vector_upl = upl->vector_upl;
7160 if(vector_upl) {
7161 *v_upl_submap = vector_upl->submap;
7162 *submap_dst_addr = vector_upl->submap_dst_addr;
7163 }
7164 else
7165 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
7166 }
7167 else
7168 panic("vector_upl_get_submap was passed a null UPL\n");
7169}
7170
7171void
7172vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
7173{
7174 if(vector_upl_is_valid(upl)) {
7175 vector_upl_t vector_upl = upl->vector_upl;
7176 if(vector_upl) {
7177 vector_upl->submap = submap;
7178 vector_upl->submap_dst_addr = submap_dst_addr;
7179 }
7180 else
7181 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
7182 }
7183 else
7184 panic("vector_upl_get_submap was passed a NULL UPL\n");
7185}
7186
7187void
7188vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
7189{
7190 if(vector_upl_is_valid(upl)) {
7191 uint32_t i = 0;
7192 vector_upl_t vector_upl = upl->vector_upl;
7193
7194 if(vector_upl) {
7195 for(i = 0; i < vector_upl->num_upls; i++) {
7196 if(vector_upl->upl_elems[i] == subupl)
7197 break;
7198 }
7199
7200 if(i == vector_upl->num_upls)
7201 panic("setting sub-upl iostate when none exists");
7202
7203 vector_upl->upl_iostates[i].offset = offset;
7204 if(size < PAGE_SIZE)
7205 size = PAGE_SIZE;
7206 vector_upl->upl_iostates[i].size = size;
7207 }
7208 else
7209 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
7210 }
7211 else
7212 panic("vector_upl_set_iostate was passed a NULL UPL\n");
7213}
7214
7215void
7216vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
7217{
7218 if(vector_upl_is_valid(upl)) {
7219 uint32_t i = 0;
7220 vector_upl_t vector_upl = upl->vector_upl;
7221
7222 if(vector_upl) {
7223 for(i = 0; i < vector_upl->num_upls; i++) {
7224 if(vector_upl->upl_elems[i] == subupl)
7225 break;
7226 }
7227
7228 if(i == vector_upl->num_upls)
7229 panic("getting sub-upl iostate when none exists");
7230
7231 *offset = vector_upl->upl_iostates[i].offset;
7232 *size = vector_upl->upl_iostates[i].size;
7233 }
7234 else
7235 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
7236 }
7237 else
7238 panic("vector_upl_get_iostate was passed a NULL UPL\n");
7239}
7240
7241void
7242vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
7243{
7244 if(vector_upl_is_valid(upl)) {
7245 vector_upl_t vector_upl = upl->vector_upl;
7246 if(vector_upl) {
7247 if(index < vector_upl->num_upls) {
7248 *offset = vector_upl->upl_iostates[index].offset;
7249 *size = vector_upl->upl_iostates[index].size;
7250 }
7251 else
7252 *offset = *size = 0;
7253 }
7254 else
7255 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
7256 }
7257 else
7258 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
7259}
7260
7261upl_page_info_t *
7262upl_get_internal_vectorupl_pagelist(upl_t upl)
7263{
7264 return ((vector_upl_t)(upl->vector_upl))->pagelist;
7265}
7266
7267void *
7268upl_get_internal_vectorupl(upl_t upl)
7269{
7270 return upl->vector_upl;
7271}
7272
91447636
A
7273vm_size_t
7274upl_get_internal_pagelist_offset(void)
7275{
7276 return sizeof(struct upl);
7277}
7278
91447636
A
7279void
7280upl_clear_dirty(
0c530ab8
A
7281 upl_t upl,
7282 boolean_t value)
91447636 7283{
0c530ab8
A
7284 if (value) {
7285 upl->flags |= UPL_CLEAR_DIRTY;
7286 } else {
7287 upl->flags &= ~UPL_CLEAR_DIRTY;
7288 }
91447636
A
7289}
7290
7291
7292#ifdef MACH_BSD
1c79356b 7293
2d21ac55
A
7294boolean_t upl_device_page(upl_page_info_t *upl)
7295{
7296 return(UPL_DEVICE_PAGE(upl));
7297}
1c79356b
A
7298boolean_t upl_page_present(upl_page_info_t *upl, int index)
7299{
7300 return(UPL_PAGE_PRESENT(upl, index));
7301}
2d21ac55
A
7302boolean_t upl_speculative_page(upl_page_info_t *upl, int index)
7303{
7304 return(UPL_SPECULATIVE_PAGE(upl, index));
7305}
1c79356b
A
7306boolean_t upl_dirty_page(upl_page_info_t *upl, int index)
7307{
7308 return(UPL_DIRTY_PAGE(upl, index));
7309}
7310boolean_t upl_valid_page(upl_page_info_t *upl, int index)
7311{
7312 return(UPL_VALID_PAGE(upl, index));
7313}
91447636 7314ppnum_t upl_phys_page(upl_page_info_t *upl, int index)
1c79356b 7315{
91447636 7316 return(UPL_PHYS_PAGE(upl, index));
1c79356b
A
7317}
7318
2d21ac55 7319
0b4e3aa0
A
7320void
7321vm_countdirtypages(void)
1c79356b
A
7322{
7323 vm_page_t m;
7324 int dpages;
7325 int pgopages;
7326 int precpages;
7327
7328
7329 dpages=0;
7330 pgopages=0;
7331 precpages=0;
7332
7333 vm_page_lock_queues();
7334 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
7335 do {
7336 if (m ==(vm_page_t )0) break;
7337
7338 if(m->dirty) dpages++;
7339 if(m->pageout) pgopages++;
7340 if(m->precious) precpages++;
7341
91447636 7342 assert(m->object != kernel_object);
1c79356b
A
7343 m = (vm_page_t) queue_next(&m->pageq);
7344 if (m ==(vm_page_t )0) break;
7345
7346 } while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
7347 vm_page_unlock_queues();
9bccf70c 7348
2d21ac55
A
7349 vm_page_lock_queues();
7350 m = (vm_page_t) queue_first(&vm_page_queue_throttled);
7351 do {
7352 if (m ==(vm_page_t )0) break;
7353
7354 dpages++;
7355 assert(m->dirty);
7356 assert(!m->pageout);
7357 assert(m->object != kernel_object);
7358 m = (vm_page_t) queue_next(&m->pageq);
7359 if (m ==(vm_page_t )0) break;
7360
7361 } while (!queue_end(&vm_page_queue_throttled,(queue_entry_t) m));
7362 vm_page_unlock_queues();
7363
9bccf70c
A
7364 vm_page_lock_queues();
7365 m = (vm_page_t) queue_first(&vm_page_queue_zf);
7366 do {
7367 if (m ==(vm_page_t )0) break;
7368
7369 if(m->dirty) dpages++;
7370 if(m->pageout) pgopages++;
7371 if(m->precious) precpages++;
7372
91447636 7373 assert(m->object != kernel_object);
9bccf70c
A
7374 m = (vm_page_t) queue_next(&m->pageq);
7375 if (m ==(vm_page_t )0) break;
7376
7377 } while (!queue_end(&vm_page_queue_zf,(queue_entry_t) m));
7378 vm_page_unlock_queues();
1c79356b
A
7379
7380 printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
7381
7382 dpages=0;
7383 pgopages=0;
7384 precpages=0;
7385
7386 vm_page_lock_queues();
7387 m = (vm_page_t) queue_first(&vm_page_queue_active);
7388
7389 do {
7390 if(m == (vm_page_t )0) break;
7391 if(m->dirty) dpages++;
7392 if(m->pageout) pgopages++;
7393 if(m->precious) precpages++;
7394
91447636 7395 assert(m->object != kernel_object);
1c79356b
A
7396 m = (vm_page_t) queue_next(&m->pageq);
7397 if(m == (vm_page_t )0) break;
7398
7399 } while (!queue_end(&vm_page_queue_active,(queue_entry_t) m));
7400 vm_page_unlock_queues();
7401
7402 printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
7403
7404}
7405#endif /* MACH_BSD */
7406
0c530ab8 7407ppnum_t upl_get_highest_page(
2d21ac55 7408 upl_t upl)
0c530ab8 7409{
2d21ac55 7410 return upl->highest_page;
0c530ab8
A
7411}
7412
b0d623f7
A
7413upl_size_t upl_get_size(
7414 upl_t upl)
7415{
7416 return upl->size;
7417}
7418
7419#if UPL_DEBUG
7420kern_return_t upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
1c79356b
A
7421{
7422 upl->ubc_alias1 = alias1;
7423 upl->ubc_alias2 = alias2;
7424 return KERN_SUCCESS;
7425}
b0d623f7 7426int upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
1c79356b
A
7427{
7428 if(al)
7429 *al = upl->ubc_alias1;
7430 if(al2)
7431 *al2 = upl->ubc_alias2;
7432 return KERN_SUCCESS;
7433}
91447636 7434#endif /* UPL_DEBUG */
1c79356b
A
7435
7436
7437
7438#if MACH_KDB
7439#include <ddb/db_output.h>
7440#include <ddb/db_print.h>
7441#include <vm/vm_print.h>
7442
7443#define printf kdbprintf
1c79356b
A
7444void db_pageout(void);
7445
7446void
7447db_vm(void)
7448{
1c79356b
A
7449
7450 iprintf("VM Statistics:\n");
7451 db_indent += 2;
7452 iprintf("pages:\n");
7453 db_indent += 2;
7454 iprintf("activ %5d inact %5d free %5d",
7455 vm_page_active_count, vm_page_inactive_count,
7456 vm_page_free_count);
7457 printf(" wire %5d gobbl %5d\n",
7458 vm_page_wire_count, vm_page_gobble_count);
1c79356b
A
7459 db_indent -= 2;
7460 iprintf("target:\n");
7461 db_indent += 2;
7462 iprintf("min %5d inact %5d free %5d",
7463 vm_page_free_min, vm_page_inactive_target,
7464 vm_page_free_target);
7465 printf(" resrv %5d\n", vm_page_free_reserved);
7466 db_indent -= 2;
1c79356b 7467 iprintf("pause:\n");
1c79356b
A
7468 db_pageout();
7469 db_indent -= 2;
7470}
7471
1c79356b 7472#if MACH_COUNTERS
91447636 7473extern int c_laundry_pages_freed;
1c79356b
A
7474#endif /* MACH_COUNTERS */
7475
91447636
A
7476void
7477db_pageout(void)
7478{
1c79356b
A
7479 iprintf("Pageout Statistics:\n");
7480 db_indent += 2;
7481 iprintf("active %5d inactv %5d\n",
7482 vm_pageout_active, vm_pageout_inactive);
7483 iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
7484 vm_pageout_inactive_nolock, vm_pageout_inactive_avoid,
7485 vm_pageout_inactive_busy, vm_pageout_inactive_absent);
7486 iprintf("used %5d clean %5d dirty %5d\n",
7487 vm_pageout_inactive_used, vm_pageout_inactive_clean,
7488 vm_pageout_inactive_dirty);
1c79356b
A
7489#if MACH_COUNTERS
7490 iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed);
7491#endif /* MACH_COUNTERS */
7492#if MACH_CLUSTER_STATS
7493 iprintf("Cluster Statistics:\n");
7494 db_indent += 2;
7495 iprintf("dirtied %5d cleaned %5d collisions %5d\n",
7496 vm_pageout_cluster_dirtied, vm_pageout_cluster_cleaned,
7497 vm_pageout_cluster_collisions);
7498 iprintf("clusters %5d conversions %5d\n",
7499 vm_pageout_cluster_clusters, vm_pageout_cluster_conversions);
7500 db_indent -= 2;
7501 iprintf("Target Statistics:\n");
7502 db_indent += 2;
7503 iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
7504 vm_pageout_target_collisions, vm_pageout_target_page_dirtied,
7505 vm_pageout_target_page_freed);
1c79356b
A
7506 db_indent -= 2;
7507#endif /* MACH_CLUSTER_STATS */
7508 db_indent -= 2;
7509}
7510
1c79356b 7511#endif /* MACH_KDB */