]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_pageout.c
9e8d0384dff7d602ec9e65f5527f733f95562c4d
[apple/xnu.git] / osfmk / vm / vm_pageout.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * The proverbial page-out daemon.
64 */
65
66 #include <stdint.h>
67
68 #include <debug.h>
69 #include <mach_pagemap.h>
70 #include <mach_cluster_stats.h>
71 #include <advisory_pageout.h>
72
73 #include <mach/mach_types.h>
74 #include <mach/memory_object.h>
75 #include <mach/memory_object_default.h>
76 #include <mach/memory_object_control_server.h>
77 #include <mach/mach_host_server.h>
78 #include <mach/upl.h>
79 #include <mach/vm_map.h>
80 #include <mach/vm_param.h>
81 #include <mach/vm_statistics.h>
82 #include <mach/sdt.h>
83
84 #include <kern/kern_types.h>
85 #include <kern/counters.h>
86 #include <kern/host_statistics.h>
87 #include <kern/machine.h>
88 #include <kern/misc_protos.h>
89 #include <kern/sched.h>
90 #include <kern/thread.h>
91 #include <kern/xpr.h>
92 #include <kern/kalloc.h>
93
94 #include <machine/vm_tuning.h>
95 #include <machine/commpage.h>
96
97 #include <vm/pmap.h>
98 #include <vm/vm_compressor_pager.h>
99 #include <vm/vm_fault.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_protos.h> /* must be last */
105 #include <vm/memory_object.h>
106 #include <vm/vm_purgeable_internal.h>
107 #include <vm/vm_shared_region.h>
108 #include <vm/vm_compressor.h>
109
110 /*
111 * ENCRYPTED SWAP:
112 */
113 #include <libkern/crypto/aes.h>
114 extern u_int32_t random(void); /* from <libkern/libkern.h> */
115
116 extern int cs_debug;
117
118 #if UPL_DEBUG
119 #include <libkern/OSDebug.h>
120 #endif
121
122 extern vm_pressure_level_t memorystatus_vm_pressure_level;
123 int memorystatus_purge_on_warning = 2;
124 int memorystatus_purge_on_urgent = 5;
125 int memorystatus_purge_on_critical = 8;
126
127 #if VM_PRESSURE_EVENTS
128 void vm_pressure_response(void);
129 boolean_t vm_pressure_thread_running = FALSE;
130 extern void consider_vm_pressure_events(void);
131 #endif
132 boolean_t vm_pressure_changed = FALSE;
133
134 #ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE /* maximum iterations of the active queue to move pages to inactive */
135 #define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 100
136 #endif
137
138 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
139 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
140 #endif
141
142 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
143 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
144 #endif
145
146 #ifndef VM_PAGEOUT_INACTIVE_RELIEF
147 #define VM_PAGEOUT_INACTIVE_RELIEF 50 /* minimum number of pages to move to the inactive q */
148 #endif
149
150 #ifndef VM_PAGE_LAUNDRY_MAX
151 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
152 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
153
154 #ifndef VM_PAGEOUT_BURST_WAIT
155 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds */
156 #endif /* VM_PAGEOUT_BURST_WAIT */
157
158 #ifndef VM_PAGEOUT_EMPTY_WAIT
159 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
160 #endif /* VM_PAGEOUT_EMPTY_WAIT */
161
162 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
163 #define VM_PAGEOUT_DEADLOCK_WAIT 300 /* milliseconds */
164 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
165
166 #ifndef VM_PAGEOUT_IDLE_WAIT
167 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
168 #endif /* VM_PAGEOUT_IDLE_WAIT */
169
170 #ifndef VM_PAGEOUT_SWAP_WAIT
171 #define VM_PAGEOUT_SWAP_WAIT 50 /* milliseconds */
172 #endif /* VM_PAGEOUT_SWAP_WAIT */
173
174 #ifndef VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED
175 #define VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED 1000 /* maximum pages considered before we issue a pressure event */
176 #endif /* VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED */
177
178 #ifndef VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS
179 #define VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS 5 /* seconds */
180 #endif /* VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS */
181
182 unsigned int vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
183 unsigned int vm_page_speculative_percentage = 5;
184
185 #ifndef VM_PAGE_SPECULATIVE_TARGET
186 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_page_speculative_percentage))
187 #endif /* VM_PAGE_SPECULATIVE_TARGET */
188
189
190 #ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
191 #define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
192 #endif /* VM_PAGE_INACTIVE_HEALTHY_LIMIT */
193
194
195 /*
196 * To obtain a reasonable LRU approximation, the inactive queue
197 * needs to be large enough to give pages on it a chance to be
198 * referenced a second time. This macro defines the fraction
199 * of active+inactive pages that should be inactive.
200 * The pageout daemon uses it to update vm_page_inactive_target.
201 *
202 * If vm_page_free_count falls below vm_page_free_target and
203 * vm_page_inactive_count is below vm_page_inactive_target,
204 * then the pageout daemon starts running.
205 */
206
207 #ifndef VM_PAGE_INACTIVE_TARGET
208 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
209 #endif /* VM_PAGE_INACTIVE_TARGET */
210
211 /*
212 * Once the pageout daemon starts running, it keeps going
213 * until vm_page_free_count meets or exceeds vm_page_free_target.
214 */
215
216 #ifndef VM_PAGE_FREE_TARGET
217 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
218 #endif /* VM_PAGE_FREE_TARGET */
219
220
221 /*
222 * The pageout daemon always starts running once vm_page_free_count
223 * falls below vm_page_free_min.
224 */
225
226 #ifndef VM_PAGE_FREE_MIN
227 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
228 #endif /* VM_PAGE_FREE_MIN */
229
230 #define VM_PAGE_FREE_RESERVED_LIMIT 100
231 #define VM_PAGE_FREE_MIN_LIMIT 1500
232 #define VM_PAGE_FREE_TARGET_LIMIT 2000
233
234
235 /*
236 * When vm_page_free_count falls below vm_page_free_reserved,
237 * only vm-privileged threads can allocate pages. vm-privilege
238 * allows the pageout daemon and default pager (and any other
239 * associated threads needed for default pageout) to continue
240 * operation by dipping into the reserved pool of pages.
241 */
242
243 #ifndef VM_PAGE_FREE_RESERVED
244 #define VM_PAGE_FREE_RESERVED(n) \
245 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
246 #endif /* VM_PAGE_FREE_RESERVED */
247
248 /*
249 * When we dequeue pages from the inactive list, they are
250 * reactivated (ie, put back on the active queue) if referenced.
251 * However, it is possible to starve the free list if other
252 * processors are referencing pages faster than we can turn off
253 * the referenced bit. So we limit the number of reactivations
254 * we will make per call of vm_pageout_scan().
255 */
256 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
257 #ifndef VM_PAGE_REACTIVATE_LIMIT
258 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
259 #endif /* VM_PAGE_REACTIVATE_LIMIT */
260 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 100
261
262
263 extern boolean_t hibernate_cleaning_in_progress;
264
265 /*
266 * Exported variable used to broadcast the activation of the pageout scan
267 * Working Set uses this to throttle its use of pmap removes. In this
268 * way, code which runs within memory in an uncontested context does
269 * not keep encountering soft faults.
270 */
271
272 unsigned int vm_pageout_scan_event_counter = 0;
273
274 /*
275 * Forward declarations for internal routines.
276 */
277 struct cq {
278 struct vm_pageout_queue *q;
279 void *current_chead;
280 char *scratch_buf;
281 };
282
283
284 #if VM_PRESSURE_EVENTS
285 void vm_pressure_thread(void);
286 #endif
287 static void vm_pageout_garbage_collect(int);
288 static void vm_pageout_iothread_continue(struct vm_pageout_queue *);
289 static void vm_pageout_iothread_external(void);
290 static void vm_pageout_iothread_internal(struct cq *cq);
291 static void vm_pageout_adjust_io_throttles(struct vm_pageout_queue *, struct vm_pageout_queue *, boolean_t);
292
293 extern void vm_pageout_continue(void);
294 extern void vm_pageout_scan(void);
295
296 static thread_t vm_pageout_external_iothread = THREAD_NULL;
297 static thread_t vm_pageout_internal_iothread = THREAD_NULL;
298
299 unsigned int vm_pageout_reserved_internal = 0;
300 unsigned int vm_pageout_reserved_really = 0;
301
302 unsigned int vm_pageout_swap_wait = 0;
303 unsigned int vm_pageout_idle_wait = 0; /* milliseconds */
304 unsigned int vm_pageout_empty_wait = 0; /* milliseconds */
305 unsigned int vm_pageout_burst_wait = 0; /* milliseconds */
306 unsigned int vm_pageout_deadlock_wait = 0; /* milliseconds */
307 unsigned int vm_pageout_deadlock_relief = 0;
308 unsigned int vm_pageout_inactive_relief = 0;
309 unsigned int vm_pageout_burst_active_throttle = 0;
310 unsigned int vm_pageout_burst_inactive_throttle = 0;
311
312 int vm_upl_wait_for_pages = 0;
313
314
315 /*
316 * These variables record the pageout daemon's actions:
317 * how many pages it looks at and what happens to those pages.
318 * No locking needed because only one thread modifies the variables.
319 */
320
321 unsigned int vm_pageout_active = 0; /* debugging */
322 unsigned int vm_pageout_active_busy = 0; /* debugging */
323 unsigned int vm_pageout_inactive = 0; /* debugging */
324 unsigned int vm_pageout_inactive_throttled = 0; /* debugging */
325 unsigned int vm_pageout_inactive_forced = 0; /* debugging */
326 unsigned int vm_pageout_inactive_nolock = 0; /* debugging */
327 unsigned int vm_pageout_inactive_avoid = 0; /* debugging */
328 unsigned int vm_pageout_inactive_busy = 0; /* debugging */
329 unsigned int vm_pageout_inactive_error = 0; /* debugging */
330 unsigned int vm_pageout_inactive_absent = 0; /* debugging */
331 unsigned int vm_pageout_inactive_notalive = 0; /* debugging */
332 unsigned int vm_pageout_inactive_used = 0; /* debugging */
333 unsigned int vm_pageout_cache_evicted = 0; /* debugging */
334 unsigned int vm_pageout_inactive_clean = 0; /* debugging */
335 unsigned int vm_pageout_speculative_clean = 0; /* debugging */
336
337 unsigned int vm_pageout_freed_from_cleaned = 0;
338 unsigned int vm_pageout_freed_from_speculative = 0;
339 unsigned int vm_pageout_freed_from_inactive_clean = 0;
340
341 unsigned int vm_pageout_enqueued_cleaned_from_inactive_clean = 0;
342 unsigned int vm_pageout_enqueued_cleaned_from_inactive_dirty = 0;
343
344 unsigned int vm_pageout_cleaned_reclaimed = 0; /* debugging; how many cleaned pages are reclaimed by the pageout scan */
345 unsigned int vm_pageout_cleaned_reactivated = 0; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */
346 unsigned int vm_pageout_cleaned_reference_reactivated = 0;
347 unsigned int vm_pageout_cleaned_volatile_reactivated = 0;
348 unsigned int vm_pageout_cleaned_fault_reactivated = 0;
349 unsigned int vm_pageout_cleaned_commit_reactivated = 0; /* debugging; how many cleaned pages are found to be referenced on commit (and are therefore reactivated) */
350 unsigned int vm_pageout_cleaned_busy = 0;
351 unsigned int vm_pageout_cleaned_nolock = 0;
352
353 unsigned int vm_pageout_inactive_dirty_internal = 0; /* debugging */
354 unsigned int vm_pageout_inactive_dirty_external = 0; /* debugging */
355 unsigned int vm_pageout_inactive_deactivated = 0; /* debugging */
356 unsigned int vm_pageout_inactive_anonymous = 0; /* debugging */
357 unsigned int vm_pageout_dirty_no_pager = 0; /* debugging */
358 unsigned int vm_pageout_purged_objects = 0; /* debugging */
359 unsigned int vm_stat_discard = 0; /* debugging */
360 unsigned int vm_stat_discard_sent = 0; /* debugging */
361 unsigned int vm_stat_discard_failure = 0; /* debugging */
362 unsigned int vm_stat_discard_throttle = 0; /* debugging */
363 unsigned int vm_pageout_reactivation_limit_exceeded = 0; /* debugging */
364 unsigned int vm_pageout_catch_ups = 0; /* debugging */
365 unsigned int vm_pageout_inactive_force_reclaim = 0; /* debugging */
366
367 unsigned int vm_pageout_scan_reclaimed_throttled = 0;
368 unsigned int vm_pageout_scan_active_throttled = 0;
369 unsigned int vm_pageout_scan_inactive_throttled_internal = 0;
370 unsigned int vm_pageout_scan_inactive_throttled_external = 0;
371 unsigned int vm_pageout_scan_throttle = 0; /* debugging */
372 unsigned int vm_pageout_scan_burst_throttle = 0; /* debugging */
373 unsigned int vm_pageout_scan_empty_throttle = 0; /* debugging */
374 unsigned int vm_pageout_scan_swap_throttle = 0; /* debugging */
375 unsigned int vm_pageout_scan_deadlock_detected = 0; /* debugging */
376 unsigned int vm_pageout_scan_active_throttle_success = 0; /* debugging */
377 unsigned int vm_pageout_scan_inactive_throttle_success = 0; /* debugging */
378 unsigned int vm_pageout_inactive_external_forced_jetsam_count = 0; /* debugging */
379 unsigned int vm_page_speculative_count_drifts = 0;
380 unsigned int vm_page_speculative_count_drift_max = 0;
381
382
383 /*
384 * Backing store throttle when BS is exhausted
385 */
386 unsigned int vm_backing_store_low = 0;
387
388 unsigned int vm_pageout_out_of_line = 0;
389 unsigned int vm_pageout_in_place = 0;
390
391 unsigned int vm_page_steal_pageout_page = 0;
392
393 /*
394 * ENCRYPTED SWAP:
395 * counters and statistics...
396 */
397 unsigned long vm_page_decrypt_counter = 0;
398 unsigned long vm_page_decrypt_for_upl_counter = 0;
399 unsigned long vm_page_encrypt_counter = 0;
400 unsigned long vm_page_encrypt_abort_counter = 0;
401 unsigned long vm_page_encrypt_already_encrypted_counter = 0;
402 boolean_t vm_pages_encrypted = FALSE; /* are there encrypted pages ? */
403
404 struct vm_pageout_queue vm_pageout_queue_internal;
405 struct vm_pageout_queue vm_pageout_queue_external;
406
407 unsigned int vm_page_speculative_target = 0;
408
409 vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
410
411 boolean_t (* volatile consider_buffer_cache_collect)(int) = NULL;
412
413 #if DEVELOPMENT || DEBUG
414 unsigned long vm_cs_validated_resets = 0;
415 #endif
416
417 int vm_debug_events = 0;
418
419 #if CONFIG_MEMORYSTATUS
420 #if !CONFIG_JETSAM
421 extern boolean_t memorystatus_idle_exit_from_VM(void);
422 #endif
423 extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async);
424 extern void memorystatus_on_pageout_scan_end(void);
425 #endif
426
427 boolean_t vm_page_compressions_failing = FALSE;
428
429 /*
430 * Routine: vm_backing_store_disable
431 * Purpose:
432 * Suspend non-privileged threads wishing to extend
433 * backing store when we are low on backing store
434 * (Synchronized by caller)
435 */
436 void
437 vm_backing_store_disable(
438 boolean_t disable)
439 {
440 if(disable) {
441 vm_backing_store_low = 1;
442 } else {
443 if(vm_backing_store_low) {
444 vm_backing_store_low = 0;
445 thread_wakeup((event_t) &vm_backing_store_low);
446 }
447 }
448 }
449
450
451 #if MACH_CLUSTER_STATS
452 unsigned long vm_pageout_cluster_dirtied = 0;
453 unsigned long vm_pageout_cluster_cleaned = 0;
454 unsigned long vm_pageout_cluster_collisions = 0;
455 unsigned long vm_pageout_cluster_clusters = 0;
456 unsigned long vm_pageout_cluster_conversions = 0;
457 unsigned long vm_pageout_target_collisions = 0;
458 unsigned long vm_pageout_target_page_dirtied = 0;
459 unsigned long vm_pageout_target_page_freed = 0;
460 #define CLUSTER_STAT(clause) clause
461 #else /* MACH_CLUSTER_STATS */
462 #define CLUSTER_STAT(clause)
463 #endif /* MACH_CLUSTER_STATS */
464
465 /*
466 * Routine: vm_pageout_object_terminate
467 * Purpose:
468 * Destroy the pageout_object, and perform all of the
469 * required cleanup actions.
470 *
471 * In/Out conditions:
472 * The object must be locked, and will be returned locked.
473 */
474 void
475 vm_pageout_object_terminate(
476 vm_object_t object)
477 {
478 vm_object_t shadow_object;
479
480 /*
481 * Deal with the deallocation (last reference) of a pageout object
482 * (used for cleaning-in-place) by dropping the paging references/
483 * freeing pages in the original object.
484 */
485
486 assert(object->pageout);
487 shadow_object = object->shadow;
488 vm_object_lock(shadow_object);
489
490 while (!queue_empty(&object->memq)) {
491 vm_page_t p, m;
492 vm_object_offset_t offset;
493
494 p = (vm_page_t) queue_first(&object->memq);
495
496 assert(p->private);
497 assert(p->pageout);
498 p->pageout = FALSE;
499 assert(!p->cleaning);
500 assert(!p->laundry);
501
502 offset = p->offset;
503 VM_PAGE_FREE(p);
504 p = VM_PAGE_NULL;
505
506 m = vm_page_lookup(shadow_object,
507 offset + object->vo_shadow_offset);
508
509 if(m == VM_PAGE_NULL)
510 continue;
511
512 assert((m->dirty) || (m->precious) ||
513 (m->busy && m->cleaning));
514
515 /*
516 * Handle the trusted pager throttle.
517 * Also decrement the burst throttle (if external).
518 */
519 vm_page_lock_queues();
520 if (m->pageout_queue)
521 vm_pageout_throttle_up(m);
522
523 /*
524 * Handle the "target" page(s). These pages are to be freed if
525 * successfully cleaned. Target pages are always busy, and are
526 * wired exactly once. The initial target pages are not mapped,
527 * (so cannot be referenced or modified) but converted target
528 * pages may have been modified between the selection as an
529 * adjacent page and conversion to a target.
530 */
531 if (m->pageout) {
532 assert(m->busy);
533 assert(m->wire_count == 1);
534 m->cleaning = FALSE;
535 m->encrypted_cleaning = FALSE;
536 m->pageout = FALSE;
537 #if MACH_CLUSTER_STATS
538 if (m->wanted) vm_pageout_target_collisions++;
539 #endif
540 /*
541 * Revoke all access to the page. Since the object is
542 * locked, and the page is busy, this prevents the page
543 * from being dirtied after the pmap_disconnect() call
544 * returns.
545 *
546 * Since the page is left "dirty" but "not modifed", we
547 * can detect whether the page was redirtied during
548 * pageout by checking the modify state.
549 */
550 if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED) {
551 SET_PAGE_DIRTY(m, FALSE);
552 } else {
553 m->dirty = FALSE;
554 }
555
556 if (m->dirty) {
557 CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
558 vm_page_unwire(m, TRUE); /* reactivates */
559 VM_STAT_INCR(reactivations);
560 PAGE_WAKEUP_DONE(m);
561 } else {
562 CLUSTER_STAT(vm_pageout_target_page_freed++;)
563 vm_page_free(m);/* clears busy, etc. */
564 }
565 vm_page_unlock_queues();
566 continue;
567 }
568 /*
569 * Handle the "adjacent" pages. These pages were cleaned in
570 * place, and should be left alone.
571 * If prep_pin_count is nonzero, then someone is using the
572 * page, so make it active.
573 */
574 if (!m->active && !m->inactive && !m->throttled && !m->private) {
575 if (m->reference)
576 vm_page_activate(m);
577 else
578 vm_page_deactivate(m);
579 }
580 if (m->overwriting) {
581 /*
582 * the (COPY_OUT_FROM == FALSE) request_page_list case
583 */
584 if (m->busy) {
585 /*
586 * We do not re-set m->dirty !
587 * The page was busy so no extraneous activity
588 * could have occurred. COPY_INTO is a read into the
589 * new pages. CLEAN_IN_PLACE does actually write
590 * out the pages but handling outside of this code
591 * will take care of resetting dirty. We clear the
592 * modify however for the Programmed I/O case.
593 */
594 pmap_clear_modify(m->phys_page);
595
596 m->busy = FALSE;
597 m->absent = FALSE;
598 } else {
599 /*
600 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
601 * Occurs when the original page was wired
602 * at the time of the list request
603 */
604 assert(VM_PAGE_WIRED(m));
605 vm_page_unwire(m, TRUE); /* reactivates */
606 }
607 m->overwriting = FALSE;
608 } else {
609 /*
610 * Set the dirty state according to whether or not the page was
611 * modified during the pageout. Note that we purposefully do
612 * NOT call pmap_clear_modify since the page is still mapped.
613 * If the page were to be dirtied between the 2 calls, this
614 * this fact would be lost. This code is only necessary to
615 * maintain statistics, since the pmap module is always
616 * consulted if m->dirty is false.
617 */
618 #if MACH_CLUSTER_STATS
619 m->dirty = pmap_is_modified(m->phys_page);
620
621 if (m->dirty) vm_pageout_cluster_dirtied++;
622 else vm_pageout_cluster_cleaned++;
623 if (m->wanted) vm_pageout_cluster_collisions++;
624 #else
625 m->dirty = FALSE;
626 #endif
627 }
628 if (m->encrypted_cleaning == TRUE) {
629 m->encrypted_cleaning = FALSE;
630 m->busy = FALSE;
631 }
632 m->cleaning = FALSE;
633
634 /*
635 * Wakeup any thread waiting for the page to be un-cleaning.
636 */
637 PAGE_WAKEUP(m);
638 vm_page_unlock_queues();
639 }
640 /*
641 * Account for the paging reference taken in vm_paging_object_allocate.
642 */
643 vm_object_activity_end(shadow_object);
644 vm_object_unlock(shadow_object);
645
646 assert(object->ref_count == 0);
647 assert(object->paging_in_progress == 0);
648 assert(object->activity_in_progress == 0);
649 assert(object->resident_page_count == 0);
650 return;
651 }
652
653 /*
654 * Routine: vm_pageclean_setup
655 *
656 * Purpose: setup a page to be cleaned (made non-dirty), but not
657 * necessarily flushed from the VM page cache.
658 * This is accomplished by cleaning in place.
659 *
660 * The page must not be busy, and new_object
661 * must be locked.
662 *
663 */
664 void
665 vm_pageclean_setup(
666 vm_page_t m,
667 vm_page_t new_m,
668 vm_object_t new_object,
669 vm_object_offset_t new_offset)
670 {
671 assert(!m->busy);
672 #if 0
673 assert(!m->cleaning);
674 #endif
675
676 XPR(XPR_VM_PAGEOUT,
677 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
678 m->object, m->offset, m,
679 new_m, new_offset);
680
681 pmap_clear_modify(m->phys_page);
682
683 /*
684 * Mark original page as cleaning in place.
685 */
686 m->cleaning = TRUE;
687 SET_PAGE_DIRTY(m, FALSE);
688 m->precious = FALSE;
689
690 /*
691 * Convert the fictitious page to a private shadow of
692 * the real page.
693 */
694 assert(new_m->fictitious);
695 assert(new_m->phys_page == vm_page_fictitious_addr);
696 new_m->fictitious = FALSE;
697 new_m->private = TRUE;
698 new_m->pageout = TRUE;
699 new_m->phys_page = m->phys_page;
700
701 vm_page_lockspin_queues();
702 vm_page_wire(new_m);
703 vm_page_unlock_queues();
704
705 vm_page_insert(new_m, new_object, new_offset);
706 assert(!new_m->wanted);
707 new_m->busy = FALSE;
708 }
709
710 /*
711 * Routine: vm_pageout_initialize_page
712 * Purpose:
713 * Causes the specified page to be initialized in
714 * the appropriate memory object. This routine is used to push
715 * pages into a copy-object when they are modified in the
716 * permanent object.
717 *
718 * The page is moved to a temporary object and paged out.
719 *
720 * In/out conditions:
721 * The page in question must not be on any pageout queues.
722 * The object to which it belongs must be locked.
723 * The page must be busy, but not hold a paging reference.
724 *
725 * Implementation:
726 * Move this page to a completely new object.
727 */
728 void
729 vm_pageout_initialize_page(
730 vm_page_t m)
731 {
732 vm_object_t object;
733 vm_object_offset_t paging_offset;
734 memory_object_t pager;
735
736 XPR(XPR_VM_PAGEOUT,
737 "vm_pageout_initialize_page, page 0x%X\n",
738 m, 0, 0, 0, 0);
739 assert(m->busy);
740
741 /*
742 * Verify that we really want to clean this page
743 */
744 assert(!m->absent);
745 assert(!m->error);
746 assert(m->dirty);
747
748 /*
749 * Create a paging reference to let us play with the object.
750 */
751 object = m->object;
752 paging_offset = m->offset + object->paging_offset;
753
754 if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) {
755 VM_PAGE_FREE(m);
756 panic("reservation without pageout?"); /* alan */
757 vm_object_unlock(object);
758
759 return;
760 }
761
762 /*
763 * If there's no pager, then we can't clean the page. This should
764 * never happen since this should be a copy object and therefore not
765 * an external object, so the pager should always be there.
766 */
767
768 pager = object->pager;
769
770 if (pager == MEMORY_OBJECT_NULL) {
771 VM_PAGE_FREE(m);
772 panic("missing pager for copy object");
773 return;
774 }
775
776 /*
777 * set the page for future call to vm_fault_list_request
778 */
779 pmap_clear_modify(m->phys_page);
780 SET_PAGE_DIRTY(m, FALSE);
781 m->pageout = TRUE;
782
783 /*
784 * keep the object from collapsing or terminating
785 */
786 vm_object_paging_begin(object);
787 vm_object_unlock(object);
788
789 /*
790 * Write the data to its pager.
791 * Note that the data is passed by naming the new object,
792 * not a virtual address; the pager interface has been
793 * manipulated to use the "internal memory" data type.
794 * [The object reference from its allocation is donated
795 * to the eventual recipient.]
796 */
797 memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
798
799 vm_object_lock(object);
800 vm_object_paging_end(object);
801 }
802
803 #if MACH_CLUSTER_STATS
804 #define MAXCLUSTERPAGES 16
805 struct {
806 unsigned long pages_in_cluster;
807 unsigned long pages_at_higher_offsets;
808 unsigned long pages_at_lower_offsets;
809 } cluster_stats[MAXCLUSTERPAGES];
810 #endif /* MACH_CLUSTER_STATS */
811
812
813 /*
814 * vm_pageout_cluster:
815 *
816 * Given a page, queue it to the appropriate I/O thread,
817 * which will page it out and attempt to clean adjacent pages
818 * in the same operation.
819 *
820 * The object and queues must be locked. We will take a
821 * paging reference to prevent deallocation or collapse when we
822 * release the object lock back at the call site. The I/O thread
823 * is responsible for consuming this reference
824 *
825 * The page must not be on any pageout queue.
826 */
827
828 void
829 vm_pageout_cluster(vm_page_t m, boolean_t pageout)
830 {
831 vm_object_t object = m->object;
832 struct vm_pageout_queue *q;
833
834
835 XPR(XPR_VM_PAGEOUT,
836 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
837 object, m->offset, m, 0, 0);
838
839 VM_PAGE_CHECK(m);
840 #if DEBUG
841 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
842 #endif
843 vm_object_lock_assert_exclusive(object);
844
845 /*
846 * Only a certain kind of page is appreciated here.
847 */
848 assert((m->dirty || m->precious) && (!VM_PAGE_WIRED(m)));
849 assert(!m->cleaning && !m->pageout && !m->laundry);
850 #ifndef CONFIG_FREEZE
851 assert(!m->inactive && !m->active);
852 assert(!m->throttled);
853 #endif
854
855 /*
856 * protect the object from collapse or termination
857 */
858 vm_object_activity_begin(object);
859
860 m->pageout = pageout;
861
862 if (object->internal == TRUE) {
863 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
864 m->busy = TRUE;
865
866 q = &vm_pageout_queue_internal;
867 } else
868 q = &vm_pageout_queue_external;
869
870 /*
871 * pgo_laundry count is tied to the laundry bit
872 */
873 m->laundry = TRUE;
874 q->pgo_laundry++;
875
876 m->pageout_queue = TRUE;
877 queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
878
879 if (q->pgo_idle == TRUE) {
880 q->pgo_idle = FALSE;
881 thread_wakeup((event_t) &q->pgo_pending);
882 }
883 VM_PAGE_CHECK(m);
884 }
885
886
887 unsigned long vm_pageout_throttle_up_count = 0;
888
889 /*
890 * A page is back from laundry or we are stealing it back from
891 * the laundering state. See if there are some pages waiting to
892 * go to laundry and if we can let some of them go now.
893 *
894 * Object and page queues must be locked.
895 */
896 void
897 vm_pageout_throttle_up(
898 vm_page_t m)
899 {
900 struct vm_pageout_queue *q;
901
902 assert(m->object != VM_OBJECT_NULL);
903 assert(m->object != kernel_object);
904
905 #if DEBUG
906 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
907 vm_object_lock_assert_exclusive(m->object);
908 #endif
909
910 vm_pageout_throttle_up_count++;
911
912 if (m->object->internal == TRUE)
913 q = &vm_pageout_queue_internal;
914 else
915 q = &vm_pageout_queue_external;
916
917 if (m->pageout_queue == TRUE) {
918
919 queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
920 m->pageout_queue = FALSE;
921
922 m->pageq.next = NULL;
923 m->pageq.prev = NULL;
924
925 vm_object_activity_end(m->object);
926 }
927 if (m->laundry == TRUE) {
928
929 m->laundry = FALSE;
930 q->pgo_laundry--;
931
932 if (q->pgo_throttled == TRUE) {
933 q->pgo_throttled = FALSE;
934 thread_wakeup((event_t) &q->pgo_laundry);
935 }
936 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
937 q->pgo_draining = FALSE;
938 thread_wakeup((event_t) (&q->pgo_laundry+1));
939 }
940 }
941 }
942
943
944 static void
945 vm_pageout_throttle_up_batch(
946 struct vm_pageout_queue *q,
947 int batch_cnt)
948 {
949 #if DEBUG
950 lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
951 #endif
952
953 vm_pageout_throttle_up_count += batch_cnt;
954
955 q->pgo_laundry -= batch_cnt;
956
957 if (q->pgo_throttled == TRUE) {
958 q->pgo_throttled = FALSE;
959 thread_wakeup((event_t) &q->pgo_laundry);
960 }
961 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
962 q->pgo_draining = FALSE;
963 thread_wakeup((event_t) (&q->pgo_laundry+1));
964 }
965 }
966
967
968
969 /*
970 * VM memory pressure monitoring.
971 *
972 * vm_pageout_scan() keeps track of the number of pages it considers and
973 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
974 *
975 * compute_memory_pressure() is called every second from compute_averages()
976 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
977 * of recalimed pages in a new vm_pageout_stat[] bucket.
978 *
979 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
980 * The caller provides the number of seconds ("nsecs") worth of statistics
981 * it wants, up to 30 seconds.
982 * It computes the number of pages reclaimed in the past "nsecs" seconds and
983 * also returns the number of pages the system still needs to reclaim at this
984 * moment in time.
985 */
986 #define VM_PAGEOUT_STAT_SIZE 31
987 struct vm_pageout_stat {
988 unsigned int considered;
989 unsigned int reclaimed;
990 } vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0,0}, };
991 unsigned int vm_pageout_stat_now = 0;
992 unsigned int vm_memory_pressure = 0;
993
994 #define VM_PAGEOUT_STAT_BEFORE(i) \
995 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
996 #define VM_PAGEOUT_STAT_AFTER(i) \
997 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
998
999 /*
1000 * Called from compute_averages().
1001 */
1002 void
1003 compute_memory_pressure(
1004 __unused void *arg)
1005 {
1006 unsigned int vm_pageout_next;
1007
1008 vm_memory_pressure =
1009 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].reclaimed;
1010
1011 commpage_set_memory_pressure( vm_memory_pressure );
1012
1013 /* move "now" forward */
1014 vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
1015 vm_pageout_stats[vm_pageout_next].considered = 0;
1016 vm_pageout_stats[vm_pageout_next].reclaimed = 0;
1017 vm_pageout_stat_now = vm_pageout_next;
1018 }
1019
1020
1021 /*
1022 * IMPORTANT
1023 * mach_vm_ctl_page_free_wanted() is called indirectly, via
1024 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
1025 * it must be safe in the restricted stackshot context. Locks and/or
1026 * blocking are not allowable.
1027 */
1028 unsigned int
1029 mach_vm_ctl_page_free_wanted(void)
1030 {
1031 unsigned int page_free_target, page_free_count, page_free_wanted;
1032
1033 page_free_target = vm_page_free_target;
1034 page_free_count = vm_page_free_count;
1035 if (page_free_target > page_free_count) {
1036 page_free_wanted = page_free_target - page_free_count;
1037 } else {
1038 page_free_wanted = 0;
1039 }
1040
1041 return page_free_wanted;
1042 }
1043
1044
1045 /*
1046 * IMPORTANT:
1047 * mach_vm_pressure_monitor() is called when taking a stackshot, with
1048 * wait_for_pressure FALSE, so that code path must remain safe in the
1049 * restricted stackshot context. No blocking or locks are allowable.
1050 * on that code path.
1051 */
1052
1053 kern_return_t
1054 mach_vm_pressure_monitor(
1055 boolean_t wait_for_pressure,
1056 unsigned int nsecs_monitored,
1057 unsigned int *pages_reclaimed_p,
1058 unsigned int *pages_wanted_p)
1059 {
1060 wait_result_t wr;
1061 unsigned int vm_pageout_then, vm_pageout_now;
1062 unsigned int pages_reclaimed;
1063
1064 /*
1065 * We don't take the vm_page_queue_lock here because we don't want
1066 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
1067 * thread when it's trying to reclaim memory. We don't need fully
1068 * accurate monitoring anyway...
1069 */
1070
1071 if (wait_for_pressure) {
1072 /* wait until there's memory pressure */
1073 while (vm_page_free_count >= vm_page_free_target) {
1074 wr = assert_wait((event_t) &vm_page_free_wanted,
1075 THREAD_INTERRUPTIBLE);
1076 if (wr == THREAD_WAITING) {
1077 wr = thread_block(THREAD_CONTINUE_NULL);
1078 }
1079 if (wr == THREAD_INTERRUPTED) {
1080 return KERN_ABORTED;
1081 }
1082 if (wr == THREAD_AWAKENED) {
1083 /*
1084 * The memory pressure might have already
1085 * been relieved but let's not block again
1086 * and let's report that there was memory
1087 * pressure at some point.
1088 */
1089 break;
1090 }
1091 }
1092 }
1093
1094 /* provide the number of pages the system wants to reclaim */
1095 if (pages_wanted_p != NULL) {
1096 *pages_wanted_p = mach_vm_ctl_page_free_wanted();
1097 }
1098
1099 if (pages_reclaimed_p == NULL) {
1100 return KERN_SUCCESS;
1101 }
1102
1103 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1104 do {
1105 vm_pageout_now = vm_pageout_stat_now;
1106 pages_reclaimed = 0;
1107 for (vm_pageout_then =
1108 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
1109 vm_pageout_then != vm_pageout_now &&
1110 nsecs_monitored-- != 0;
1111 vm_pageout_then =
1112 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
1113 pages_reclaimed += vm_pageout_stats[vm_pageout_then].reclaimed;
1114 }
1115 } while (vm_pageout_now != vm_pageout_stat_now);
1116 *pages_reclaimed_p = pages_reclaimed;
1117
1118 return KERN_SUCCESS;
1119 }
1120
1121
1122
1123 /*
1124 * function in BSD to apply I/O throttle to the pageout thread
1125 */
1126 extern void vm_pageout_io_throttle(void);
1127
1128
1129 #if LATENCY_JETSAM
1130 boolean_t jlp_init = FALSE;
1131 uint64_t jlp_time = 0, jlp_current = 0;
1132 struct vm_page jetsam_latency_page[NUM_OF_JETSAM_LATENCY_TOKENS];
1133 unsigned int latency_jetsam_wakeup = 0;
1134 #endif /* LATENCY_JETSAM */
1135
1136 /*
1137 * Page States: Used below to maintain the page state
1138 * before it's removed from it's Q. This saved state
1139 * helps us do the right accounting in certain cases
1140 */
1141 #define PAGE_STATE_SPECULATIVE 1
1142 #define PAGE_STATE_ANONYMOUS 2
1143 #define PAGE_STATE_INACTIVE 3
1144 #define PAGE_STATE_INACTIVE_FIRST 4
1145 #define PAGE_STATE_CLEAN 5
1146
1147
1148 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m) \
1149 MACRO_BEGIN \
1150 /* \
1151 * If a "reusable" page somehow made it back into \
1152 * the active queue, it's been re-used and is not \
1153 * quite re-usable. \
1154 * If the VM object was "all_reusable", consider it \
1155 * as "all re-used" instead of converting it to \
1156 * "partially re-used", which could be expensive. \
1157 */ \
1158 if ((m)->reusable || \
1159 (m)->object->all_reusable) { \
1160 vm_object_reuse_pages((m)->object, \
1161 (m)->offset, \
1162 (m)->offset + PAGE_SIZE_64, \
1163 FALSE); \
1164 } \
1165 MACRO_END
1166
1167
1168 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1169 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1170
1171 #define FCS_IDLE 0
1172 #define FCS_DELAYED 1
1173 #define FCS_DEADLOCK_DETECTED 2
1174
1175 struct flow_control {
1176 int state;
1177 mach_timespec_t ts;
1178 };
1179
1180 uint32_t vm_pageout_considered_page = 0;
1181 uint32_t vm_page_filecache_min = 0;
1182
1183 #define VM_PAGE_FILECACHE_MIN 50000
1184 #define ANONS_GRABBED_LIMIT 2
1185
1186 /*
1187 * vm_pageout_scan does the dirty work for the pageout daemon.
1188 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
1189 * held and vm_page_free_wanted == 0.
1190 */
1191 void
1192 vm_pageout_scan(void)
1193 {
1194 unsigned int loop_count = 0;
1195 unsigned int inactive_burst_count = 0;
1196 unsigned int active_burst_count = 0;
1197 unsigned int reactivated_this_call;
1198 unsigned int reactivate_limit;
1199 vm_page_t local_freeq = NULL;
1200 int local_freed = 0;
1201 int delayed_unlock;
1202 int delayed_unlock_limit = 0;
1203 int refmod_state = 0;
1204 int vm_pageout_deadlock_target = 0;
1205 struct vm_pageout_queue *iq;
1206 struct vm_pageout_queue *eq;
1207 struct vm_speculative_age_q *sq;
1208 struct flow_control flow_control = { 0, { 0, 0 } };
1209 boolean_t inactive_throttled = FALSE;
1210 boolean_t try_failed;
1211 mach_timespec_t ts;
1212 unsigned int msecs = 0;
1213 vm_object_t object;
1214 vm_object_t last_object_tried;
1215 uint32_t catch_up_count = 0;
1216 uint32_t inactive_reclaim_run;
1217 boolean_t forced_reclaim;
1218 boolean_t exceeded_burst_throttle;
1219 boolean_t grab_anonymous = FALSE;
1220 boolean_t force_anonymous = FALSE;
1221 int anons_grabbed = 0;
1222 int page_prev_state = 0;
1223 int cache_evict_throttle = 0;
1224 uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
1225 vm_pressure_level_t pressure_level;
1226
1227 VM_DEBUG_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
1228 vm_pageout_speculative_clean, vm_pageout_inactive_clean,
1229 vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
1230
1231 #if LATENCY_JETSAM
1232 if (jlp_init == FALSE) {
1233 int i=0;
1234 vm_page_t jlp;
1235 for(; i < NUM_OF_JETSAM_LATENCY_TOKENS; i++) {
1236 jlp = &jetsam_latency_page[i];
1237 jlp->fictitious = TRUE;
1238 jlp->offset = 0;
1239
1240 }
1241 jlp = &jetsam_latency_page[0];
1242 queue_enter(&vm_page_queue_active, jlp, vm_page_t, pageq);
1243 jlp->active = TRUE;
1244
1245 jlp->offset = mach_absolute_time();
1246 jlp_time = jlp->offset;
1247 jlp_current++;
1248 jlp_init = TRUE;
1249 }
1250 #endif /* LATENCY_JETSAM */
1251
1252 flow_control.state = FCS_IDLE;
1253 iq = &vm_pageout_queue_internal;
1254 eq = &vm_pageout_queue_external;
1255 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1256
1257
1258 XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1259
1260
1261 vm_page_lock_queues();
1262 delayed_unlock = 1; /* must be nonzero if Qs are locked, 0 if unlocked */
1263
1264 /*
1265 * Calculate the max number of referenced pages on the inactive
1266 * queue that we will reactivate.
1267 */
1268 reactivated_this_call = 0;
1269 reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
1270 vm_page_inactive_count);
1271 inactive_reclaim_run = 0;
1272
1273 vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
1274
1275 /*
1276 * We want to gradually dribble pages from the active queue
1277 * to the inactive queue. If we let the inactive queue get
1278 * very small, and then suddenly dump many pages into it,
1279 * those pages won't get a sufficient chance to be referenced
1280 * before we start taking them from the inactive queue.
1281 *
1282 * We must limit the rate at which we send pages to the pagers
1283 * so that we don't tie up too many pages in the I/O queues.
1284 * We implement a throttling mechanism using the laundry count
1285 * to limit the number of pages outstanding to the default
1286 * and external pagers. We can bypass the throttles and look
1287 * for clean pages if the pageout queues don't drain in a timely
1288 * fashion since this may indicate that the pageout paths are
1289 * stalled waiting for memory, which only we can provide.
1290 */
1291
1292
1293 Restart:
1294 assert(delayed_unlock!=0);
1295
1296 /*
1297 * Recalculate vm_page_inactivate_target.
1298 */
1299 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
1300 vm_page_inactive_count +
1301 vm_page_speculative_count);
1302
1303 vm_page_anonymous_min = vm_page_inactive_target / 20;
1304
1305
1306 /*
1307 * don't want to wake the pageout_scan thread up everytime we fall below
1308 * the targets... set a low water mark at 0.25% below the target
1309 */
1310 vm_page_inactive_min = vm_page_inactive_target - (vm_page_inactive_target / 400);
1311
1312 if (vm_page_speculative_percentage > 50)
1313 vm_page_speculative_percentage = 50;
1314 else if (vm_page_speculative_percentage <= 0)
1315 vm_page_speculative_percentage = 1;
1316
1317 vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
1318 vm_page_inactive_count);
1319
1320 object = NULL;
1321 last_object_tried = NULL;
1322 try_failed = FALSE;
1323
1324 if ((vm_page_inactive_count + vm_page_speculative_count) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count))
1325 catch_up_count = vm_page_inactive_count + vm_page_speculative_count;
1326 else
1327 catch_up_count = 0;
1328
1329 for (;;) {
1330 vm_page_t m;
1331
1332 DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
1333
1334 if (delayed_unlock == 0) {
1335 vm_page_lock_queues();
1336 delayed_unlock = 1;
1337 }
1338 if (vm_upl_wait_for_pages < 0)
1339 vm_upl_wait_for_pages = 0;
1340
1341 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
1342
1343 if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX)
1344 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
1345
1346 /*
1347 * Move pages from active to inactive if we're below the target
1348 */
1349 /* if we are trying to make clean, we need to make sure we actually have inactive - mj */
1350 if ((vm_page_inactive_count + vm_page_speculative_count) >= vm_page_inactive_target)
1351 goto done_moving_active_pages;
1352
1353 if (object != NULL) {
1354 vm_object_unlock(object);
1355 object = NULL;
1356 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1357 }
1358 /*
1359 * Don't sweep through active queue more than the throttle
1360 * which should be kept relatively low
1361 */
1362 active_burst_count = MIN(vm_pageout_burst_active_throttle, vm_page_active_count);
1363
1364 VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_START,
1365 vm_pageout_inactive, vm_pageout_inactive_used, vm_page_free_count, local_freed);
1366
1367 VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_NONE,
1368 vm_pageout_speculative_clean, vm_pageout_inactive_clean,
1369 vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
1370 memoryshot(VM_PAGEOUT_BALANCE, DBG_FUNC_START);
1371
1372
1373 while (!queue_empty(&vm_page_queue_active) && active_burst_count--) {
1374
1375 vm_pageout_active++;
1376
1377 m = (vm_page_t) queue_first(&vm_page_queue_active);
1378
1379 assert(m->active && !m->inactive);
1380 assert(!m->laundry);
1381 assert(m->object != kernel_object);
1382 assert(m->phys_page != vm_page_guard_addr);
1383
1384 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
1385
1386 #if LATENCY_JETSAM
1387 if (m->fictitious) {
1388 const uint32_t FREE_TARGET_MULTIPLIER = 2;
1389
1390 uint64_t now = mach_absolute_time();
1391 uint64_t delta = now - m->offset;
1392 clock_sec_t jl_secs = 0;
1393 clock_usec_t jl_usecs = 0;
1394 boolean_t issue_jetsam = FALSE;
1395
1396 absolutetime_to_microtime(delta, &jl_secs, &jl_usecs);
1397 jl_usecs += jl_secs * USEC_PER_SEC;
1398
1399 /* Jetsam only if the token hasn't aged sufficiently and the free count is close to the target (avoiding spurious triggers) */
1400 if ((jl_usecs <= JETSAM_AGE_NOTIFY_CRITICAL) && (vm_page_free_count < (FREE_TARGET_MULTIPLIER * vm_page_free_target))) {
1401 issue_jetsam = TRUE;
1402 }
1403
1404 VM_DEBUG_EVENT(vm_pageout_page_token, VM_PAGEOUT_PAGE_TOKEN, DBG_FUNC_NONE,
1405 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, jl_usecs);
1406
1407 m->offset = 0;
1408 queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
1409 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
1410
1411 m->offset = now;
1412 jlp_time = now;
1413
1414 if (issue_jetsam) {
1415 vm_page_unlock_queues();
1416
1417 if (local_freeq) {
1418 vm_page_free_list(local_freeq, TRUE);
1419 local_freeq = NULL;
1420 local_freed = 0;
1421 }
1422
1423 VM_DEBUG_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
1424 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, 0);
1425
1426 assert_wait_timeout(&latency_jetsam_wakeup, THREAD_INTERRUPTIBLE, 10 /* msecs */, 1000*NSEC_PER_USEC);
1427 /* Kill the top process asynchronously */
1428 memorystatus_kill_on_VM_page_shortage(TRUE);
1429 thread_block(THREAD_CONTINUE_NULL);
1430
1431 VM_DEBUG_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END, 0, 0, 0, 0);
1432
1433 vm_page_lock_queues();
1434 }
1435 } else {
1436 #endif /* LATENCY_JETSAM */
1437 /*
1438 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
1439 *
1440 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
1441 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
1442 * new reference happens. If no futher references happen on the page after that remote TLB flushes
1443 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
1444 * by pageout_scan, which is just fine since the last reference would have happened quite far
1445 * in the past (TLB caches don't hang around for very long), and of course could just as easily
1446 * have happened before we moved the page
1447 */
1448 pmap_clear_refmod_options(m->phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
1449
1450 /*
1451 * The page might be absent or busy,
1452 * but vm_page_deactivate can handle that.
1453 * FALSE indicates that we don't want a H/W clear reference
1454 */
1455 vm_page_deactivate_internal(m, FALSE);
1456
1457 if (delayed_unlock++ > delayed_unlock_limit) {
1458
1459 if (local_freeq) {
1460 vm_page_unlock_queues();
1461
1462 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1463 vm_page_free_count, local_freed, delayed_unlock_limit, 1);
1464
1465 vm_page_free_list(local_freeq, TRUE);
1466
1467 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1468 vm_page_free_count, 0, 0, 1);
1469
1470 local_freeq = NULL;
1471 local_freed = 0;
1472 vm_page_lock_queues();
1473 } else
1474 lck_mtx_yield(&vm_page_queue_lock);
1475
1476 delayed_unlock = 1;
1477
1478 /*
1479 * continue the while loop processing
1480 * the active queue... need to hold
1481 * the page queues lock
1482 */
1483 }
1484 #if LATENCY_JETSAM
1485 }
1486 #endif /* LATENCY_JETSAM */
1487 }
1488
1489 VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_END,
1490 vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count, vm_page_inactive_target);
1491 memoryshot(VM_PAGEOUT_BALANCE, DBG_FUNC_END);
1492
1493 /**********************************************************************
1494 * above this point we're playing with the active queue
1495 * below this point we're playing with the throttling mechanisms
1496 * and the inactive queue
1497 **********************************************************************/
1498
1499 done_moving_active_pages:
1500
1501 if (vm_page_free_count + local_freed >= vm_page_free_target) {
1502 if (object != NULL) {
1503 vm_object_unlock(object);
1504 object = NULL;
1505 }
1506 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1507
1508 if (local_freeq) {
1509 vm_page_unlock_queues();
1510
1511 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1512 vm_page_free_count, local_freed, delayed_unlock_limit, 2);
1513
1514 vm_page_free_list(local_freeq, TRUE);
1515
1516 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1517 vm_page_free_count, local_freed, 0, 2);
1518
1519 local_freeq = NULL;
1520 local_freed = 0;
1521 vm_page_lock_queues();
1522 }
1523 /*
1524 * make sure the pageout I/O threads are running
1525 * throttled in case there are still requests
1526 * in the laundry... since we have met our targets
1527 * we don't need the laundry to be cleaned in a timely
1528 * fashion... so let's avoid interfering with foreground
1529 * activity
1530 */
1531 vm_pageout_adjust_io_throttles(iq, eq, TRUE);
1532
1533 /*
1534 * recalculate vm_page_inactivate_target
1535 */
1536 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
1537 vm_page_inactive_count +
1538 vm_page_speculative_count);
1539 if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
1540 !queue_empty(&vm_page_queue_active)) {
1541 /*
1542 * inactive target still not met... keep going
1543 * until we get the queues balanced...
1544 */
1545 continue;
1546 }
1547 lck_mtx_lock(&vm_page_queue_free_lock);
1548
1549 if ((vm_page_free_count >= vm_page_free_target) &&
1550 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
1551 /*
1552 * done - we have met our target *and*
1553 * there is no one waiting for a page.
1554 */
1555 return_from_scan:
1556 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
1557
1558 VM_DEBUG_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
1559 vm_pageout_inactive, vm_pageout_inactive_used, 0, 0);
1560 VM_DEBUG_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
1561 vm_pageout_speculative_clean, vm_pageout_inactive_clean,
1562 vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
1563
1564 return;
1565 }
1566 lck_mtx_unlock(&vm_page_queue_free_lock);
1567 }
1568
1569 /*
1570 * Before anything, we check if we have any ripe volatile
1571 * objects around. If so, try to purge the first object.
1572 * If the purge fails, fall through to reclaim a page instead.
1573 * If the purge succeeds, go back to the top and reevalute
1574 * the new memory situation.
1575 */
1576 pressure_level = memorystatus_vm_pressure_level;
1577 assert (available_for_purge>=0);
1578
1579 if (available_for_purge
1580 || pressure_level > kVMPressureNormal
1581 ) {
1582 int force_purge;
1583
1584 if (object != NULL) {
1585 vm_object_unlock(object);
1586 object = NULL;
1587 }
1588
1589 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
1590 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
1591
1592 force_purge = 0; /* no force-purging */
1593 if (pressure_level >= kVMPressureCritical) {
1594 force_purge = memorystatus_purge_on_critical;
1595 } else if (pressure_level >= kVMPressureUrgent) {
1596 force_purge = memorystatus_purge_on_urgent;
1597 } else if (pressure_level >= kVMPressureWarning) {
1598 force_purge = memorystatus_purge_on_warning;
1599 } else {
1600 force_purge = 0;
1601 }
1602 if (vm_purgeable_object_purge_one(force_purge)) {
1603
1604 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
1605 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
1606 continue;
1607 }
1608 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
1609 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
1610 }
1611 if (queue_empty(&sq->age_q) && vm_page_speculative_count) {
1612 /*
1613 * try to pull pages from the aging bins...
1614 * see vm_page.h for an explanation of how
1615 * this mechanism works
1616 */
1617 struct vm_speculative_age_q *aq;
1618 mach_timespec_t ts_fully_aged;
1619 boolean_t can_steal = FALSE;
1620 int num_scanned_queues;
1621
1622 aq = &vm_page_queue_speculative[speculative_steal_index];
1623
1624 num_scanned_queues = 0;
1625 while (queue_empty(&aq->age_q) &&
1626 num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
1627
1628 speculative_steal_index++;
1629
1630 if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
1631 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
1632
1633 aq = &vm_page_queue_speculative[speculative_steal_index];
1634 }
1635
1636 if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
1637 /*
1638 * XXX We've scanned all the speculative
1639 * queues but still haven't found one
1640 * that is not empty, even though
1641 * vm_page_speculative_count is not 0.
1642 *
1643 * report the anomaly...
1644 */
1645 printf("vm_pageout_scan: "
1646 "all speculative queues empty "
1647 "but count=%d. Re-adjusting.\n",
1648 vm_page_speculative_count);
1649 if (vm_page_speculative_count > vm_page_speculative_count_drift_max)
1650 vm_page_speculative_count_drift_max = vm_page_speculative_count;
1651 vm_page_speculative_count_drifts++;
1652 #if 6553678
1653 Debugger("vm_pageout_scan: no speculative pages");
1654 #endif
1655 /* readjust... */
1656 vm_page_speculative_count = 0;
1657 /* ... and continue */
1658 continue;
1659 }
1660
1661 if (vm_page_speculative_count > vm_page_speculative_target)
1662 can_steal = TRUE;
1663 else {
1664 ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) / 1000;
1665 ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) % 1000)
1666 * 1000 * NSEC_PER_USEC;
1667
1668 ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
1669
1670 clock_sec_t sec;
1671 clock_nsec_t nsec;
1672 clock_get_system_nanotime(&sec, &nsec);
1673 ts.tv_sec = (unsigned int) sec;
1674 ts.tv_nsec = nsec;
1675
1676 if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
1677 can_steal = TRUE;
1678 }
1679 if (can_steal == TRUE)
1680 vm_page_speculate_ageit(aq);
1681 }
1682 if (queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
1683 int pages_evicted;
1684
1685 if (object != NULL) {
1686 vm_object_unlock(object);
1687 object = NULL;
1688 }
1689 pages_evicted = vm_object_cache_evict(100, 10);
1690
1691 if (pages_evicted) {
1692
1693 vm_pageout_cache_evicted += pages_evicted;
1694
1695 VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
1696 vm_page_free_count, pages_evicted, vm_pageout_cache_evicted, 0);
1697 memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
1698
1699 /*
1700 * we just freed up to 100 pages,
1701 * so go back to the top of the main loop
1702 * and re-evaulate the memory situation
1703 */
1704 continue;
1705 } else
1706 cache_evict_throttle = 100;
1707 }
1708 if (cache_evict_throttle)
1709 cache_evict_throttle--;
1710
1711
1712 exceeded_burst_throttle = FALSE;
1713 /*
1714 * Sometimes we have to pause:
1715 * 1) No inactive pages - nothing to do.
1716 * 2) Loop control - no acceptable pages found on the inactive queue
1717 * within the last vm_pageout_burst_inactive_throttle iterations
1718 * 3) Flow control - default pageout queue is full
1719 */
1720 if (queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_anonymous) && queue_empty(&sq->age_q)) {
1721 vm_pageout_scan_empty_throttle++;
1722 msecs = vm_pageout_empty_wait;
1723 goto vm_pageout_scan_delay;
1724
1725 } else if (inactive_burst_count >=
1726 MIN(vm_pageout_burst_inactive_throttle,
1727 (vm_page_inactive_count +
1728 vm_page_speculative_count))) {
1729 vm_pageout_scan_burst_throttle++;
1730 msecs = vm_pageout_burst_wait;
1731
1732 exceeded_burst_throttle = TRUE;
1733 goto vm_pageout_scan_delay;
1734
1735 } else if (vm_page_free_count > (vm_page_free_reserved / 4) &&
1736 VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE()) {
1737 vm_pageout_scan_swap_throttle++;
1738 msecs = vm_pageout_swap_wait;
1739 goto vm_pageout_scan_delay;
1740
1741 } else if (VM_PAGE_Q_THROTTLED(iq) &&
1742 VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) {
1743 clock_sec_t sec;
1744 clock_nsec_t nsec;
1745
1746 switch (flow_control.state) {
1747
1748 case FCS_IDLE:
1749 if ((vm_page_free_count + local_freed) < vm_page_free_target) {
1750
1751 if (vm_page_pageable_external_count > vm_page_filecache_min && !queue_empty(&vm_page_queue_inactive)) {
1752 anons_grabbed = ANONS_GRABBED_LIMIT;
1753 goto consider_inactive;
1754 }
1755 if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) && vm_page_active_count)
1756 continue;
1757 }
1758 reset_deadlock_timer:
1759 ts.tv_sec = vm_pageout_deadlock_wait / 1000;
1760 ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
1761 clock_get_system_nanotime(&sec, &nsec);
1762 flow_control.ts.tv_sec = (unsigned int) sec;
1763 flow_control.ts.tv_nsec = nsec;
1764 ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
1765
1766 flow_control.state = FCS_DELAYED;
1767 msecs = vm_pageout_deadlock_wait;
1768
1769 break;
1770
1771 case FCS_DELAYED:
1772 clock_get_system_nanotime(&sec, &nsec);
1773 ts.tv_sec = (unsigned int) sec;
1774 ts.tv_nsec = nsec;
1775
1776 if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
1777 /*
1778 * the pageout thread for the default pager is potentially
1779 * deadlocked since the
1780 * default pager queue has been throttled for more than the
1781 * allowable time... we need to move some clean pages or dirty
1782 * pages belonging to the external pagers if they aren't throttled
1783 * vm_page_free_wanted represents the number of threads currently
1784 * blocked waiting for pages... we'll move one page for each of
1785 * these plus a fixed amount to break the logjam... once we're done
1786 * moving this number of pages, we'll re-enter the FSC_DELAYED state
1787 * with a new timeout target since we have no way of knowing
1788 * whether we've broken the deadlock except through observation
1789 * of the queue associated with the default pager... we need to
1790 * stop moving pages and allow the system to run to see what
1791 * state it settles into.
1792 */
1793 vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
1794 vm_pageout_scan_deadlock_detected++;
1795 flow_control.state = FCS_DEADLOCK_DETECTED;
1796 thread_wakeup((event_t) &vm_pageout_garbage_collect);
1797 goto consider_inactive;
1798 }
1799 /*
1800 * just resniff instead of trying
1801 * to compute a new delay time... we're going to be
1802 * awakened immediately upon a laundry completion,
1803 * so we won't wait any longer than necessary
1804 */
1805 msecs = vm_pageout_idle_wait;
1806 break;
1807
1808 case FCS_DEADLOCK_DETECTED:
1809 if (vm_pageout_deadlock_target)
1810 goto consider_inactive;
1811 goto reset_deadlock_timer;
1812
1813 }
1814 vm_pageout_scan_delay:
1815 if (object != NULL) {
1816 vm_object_unlock(object);
1817 object = NULL;
1818 }
1819 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1820
1821 if (local_freeq) {
1822 vm_page_unlock_queues();
1823
1824 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1825 vm_page_free_count, local_freed, delayed_unlock_limit, 3);
1826
1827 vm_page_free_list(local_freeq, TRUE);
1828
1829 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1830 vm_page_free_count, local_freed, 0, 3);
1831
1832 local_freeq = NULL;
1833 local_freed = 0;
1834 vm_page_lock_queues();
1835
1836 if (flow_control.state == FCS_DELAYED &&
1837 !VM_PAGE_Q_THROTTLED(iq)) {
1838 flow_control.state = FCS_IDLE;
1839 goto consider_inactive;
1840 }
1841 }
1842
1843 if (vm_page_free_count >= vm_page_free_target) {
1844 /*
1845 * we're here because
1846 * 1) someone else freed up some pages while we had
1847 * the queues unlocked above
1848 * and we've hit one of the 3 conditions that
1849 * cause us to pause the pageout scan thread
1850 *
1851 * since we already have enough free pages,
1852 * let's avoid stalling and return normally
1853 *
1854 * before we return, make sure the pageout I/O threads
1855 * are running throttled in case there are still requests
1856 * in the laundry... since we have enough free pages
1857 * we don't need the laundry to be cleaned in a timely
1858 * fashion... so let's avoid interfering with foreground
1859 * activity
1860 *
1861 * we don't want to hold vm_page_queue_free_lock when
1862 * calling vm_pageout_adjust_io_throttles (since it
1863 * may cause other locks to be taken), we do the intitial
1864 * check outside of the lock. Once we take the lock,
1865 * we recheck the condition since it may have changed.
1866 * if it has, no problem, we will make the threads
1867 * non-throttled before actually blocking
1868 */
1869 vm_pageout_adjust_io_throttles(iq, eq, TRUE);
1870 }
1871 lck_mtx_lock(&vm_page_queue_free_lock);
1872
1873 if (vm_page_free_count >= vm_page_free_target &&
1874 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
1875 goto return_from_scan;
1876 }
1877 lck_mtx_unlock(&vm_page_queue_free_lock);
1878
1879 if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
1880 /*
1881 * we're most likely about to block due to one of
1882 * the 3 conditions that cause vm_pageout_scan to
1883 * not be able to make forward progress w/r
1884 * to providing new pages to the free queue,
1885 * so unthrottle the I/O threads in case we
1886 * have laundry to be cleaned... it needs
1887 * to be completed ASAP.
1888 *
1889 * even if we don't block, we want the io threads
1890 * running unthrottled since the sum of free +
1891 * clean pages is still under our free target
1892 */
1893 vm_pageout_adjust_io_throttles(iq, eq, FALSE);
1894 }
1895 if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
1896 /*
1897 * if we get here we're below our free target and
1898 * we're stalling due to a full laundry queue or
1899 * we don't have any inactive pages other then
1900 * those in the clean queue...
1901 * however, we have pages on the clean queue that
1902 * can be moved to the free queue, so let's not
1903 * stall the pageout scan
1904 */
1905 flow_control.state = FCS_IDLE;
1906 goto consider_inactive;
1907 }
1908 VM_CHECK_MEMORYSTATUS;
1909
1910 if (flow_control.state != FCS_IDLE)
1911 vm_pageout_scan_throttle++;
1912 iq->pgo_throttled = TRUE;
1913
1914 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
1915 vm_consider_waking_compactor_swapper();
1916
1917 assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
1918 counter(c_vm_pageout_scan_block++);
1919
1920 vm_page_unlock_queues();
1921
1922 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
1923
1924 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
1925 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
1926 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
1927
1928 thread_block(THREAD_CONTINUE_NULL);
1929
1930 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
1931 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
1932 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
1933
1934 vm_page_lock_queues();
1935 delayed_unlock = 1;
1936
1937 iq->pgo_throttled = FALSE;
1938
1939 if (loop_count >= vm_page_inactive_count)
1940 loop_count = 0;
1941 inactive_burst_count = 0;
1942
1943 goto Restart;
1944 /*NOTREACHED*/
1945 }
1946
1947
1948 flow_control.state = FCS_IDLE;
1949 consider_inactive:
1950 vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
1951 vm_pageout_inactive_external_forced_reactivate_limit);
1952 loop_count++;
1953 inactive_burst_count++;
1954 vm_pageout_inactive++;
1955
1956
1957 /*
1958 * Choose a victim.
1959 */
1960 while (1) {
1961 m = NULL;
1962
1963 if (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) {
1964 assert(vm_page_throttled_count == 0);
1965 assert(queue_empty(&vm_page_queue_throttled));
1966 }
1967 /*
1968 * The most eligible pages are ones we paged in speculatively,
1969 * but which have not yet been touched.
1970 */
1971 if (!queue_empty(&sq->age_q) ) {
1972 m = (vm_page_t) queue_first(&sq->age_q);
1973
1974 page_prev_state = PAGE_STATE_SPECULATIVE;
1975
1976 break;
1977 }
1978 /*
1979 * Try a clean-queue inactive page.
1980 */
1981 if (!queue_empty(&vm_page_queue_cleaned)) {
1982 m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
1983
1984 page_prev_state = PAGE_STATE_CLEAN;
1985
1986 break;
1987 }
1988
1989 grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
1990
1991 if (vm_page_pageable_external_count < vm_page_filecache_min || force_anonymous == TRUE) {
1992 grab_anonymous = TRUE;
1993 anons_grabbed = 0;
1994 }
1995
1996 if (grab_anonymous == TRUE && vm_compression_available() == FALSE)
1997 grab_anonymous = FALSE;
1998
1999 if (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || queue_empty(&vm_page_queue_anonymous)) {
2000
2001 if ( !queue_empty(&vm_page_queue_inactive) ) {
2002 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
2003
2004 page_prev_state = PAGE_STATE_INACTIVE;
2005 anons_grabbed = 0;
2006
2007 break;
2008 }
2009 }
2010 if ( !queue_empty(&vm_page_queue_anonymous) ) {
2011 m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
2012
2013 page_prev_state = PAGE_STATE_ANONYMOUS;
2014 anons_grabbed++;
2015
2016 break;
2017 }
2018
2019 /*
2020 * if we've gotten here, we have no victim page.
2021 * if making clean, free the local freed list and return.
2022 * if making free, check to see if we've finished balancing the queues
2023 * yet, if we haven't just continue, else panic
2024 */
2025 vm_page_unlock_queues();
2026
2027 if (object != NULL) {
2028 vm_object_unlock(object);
2029 object = NULL;
2030 }
2031 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2032
2033 if (local_freeq) {
2034 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
2035 vm_page_free_count, local_freed, delayed_unlock_limit, 5);
2036
2037 vm_page_free_list(local_freeq, TRUE);
2038
2039 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
2040 vm_page_free_count, local_freed, 0, 5);
2041
2042 local_freeq = NULL;
2043 local_freed = 0;
2044 }
2045 vm_page_lock_queues();
2046 delayed_unlock = 1;
2047
2048 if ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target)
2049 goto Restart;
2050
2051 panic("vm_pageout: no victim");
2052
2053 /* NOTREACHED */
2054 }
2055 force_anonymous = FALSE;
2056
2057 /*
2058 * we just found this page on one of our queues...
2059 * it can't also be on the pageout queue, so safe
2060 * to call VM_PAGE_QUEUES_REMOVE
2061 */
2062 assert(!m->pageout_queue);
2063
2064 VM_PAGE_QUEUES_REMOVE(m);
2065
2066 assert(!m->laundry);
2067 assert(!m->private);
2068 assert(!m->fictitious);
2069 assert(m->object != kernel_object);
2070 assert(m->phys_page != vm_page_guard_addr);
2071
2072
2073 if (page_prev_state != PAGE_STATE_SPECULATIVE)
2074 vm_pageout_stats[vm_pageout_stat_now].considered++;
2075
2076 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
2077
2078 /*
2079 * check to see if we currently are working
2080 * with the same object... if so, we've
2081 * already got the lock
2082 */
2083 if (m->object != object) {
2084 /*
2085 * the object associated with candidate page is
2086 * different from the one we were just working
2087 * with... dump the lock if we still own it
2088 */
2089 if (object != NULL) {
2090 vm_object_unlock(object);
2091 object = NULL;
2092 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2093 }
2094 /*
2095 * Try to lock object; since we've alread got the
2096 * page queues lock, we can only 'try' for this one.
2097 * if the 'try' fails, we need to do a mutex_pause
2098 * to allow the owner of the object lock a chance to
2099 * run... otherwise, we're likely to trip over this
2100 * object in the same state as we work our way through
2101 * the queue... clumps of pages associated with the same
2102 * object are fairly typical on the inactive and active queues
2103 */
2104 if (!vm_object_lock_try_scan(m->object)) {
2105 vm_page_t m_want = NULL;
2106
2107 vm_pageout_inactive_nolock++;
2108
2109 if (page_prev_state == PAGE_STATE_CLEAN)
2110 vm_pageout_cleaned_nolock++;
2111
2112 if (page_prev_state == PAGE_STATE_SPECULATIVE)
2113 page_prev_state = PAGE_STATE_INACTIVE_FIRST;
2114
2115 pmap_clear_reference(m->phys_page);
2116 m->reference = FALSE;
2117
2118 /*
2119 * m->object must be stable since we hold the page queues lock...
2120 * we can update the scan_collisions field sans the object lock
2121 * since it is a separate field and this is the only spot that does
2122 * a read-modify-write operation and it is never executed concurrently...
2123 * we can asynchronously set this field to 0 when creating a UPL, so it
2124 * is possible for the value to be a bit non-determistic, but that's ok
2125 * since it's only used as a hint
2126 */
2127 m->object->scan_collisions++;
2128
2129 if ( !queue_empty(&sq->age_q) )
2130 m_want = (vm_page_t) queue_first(&sq->age_q);
2131 else if ( !queue_empty(&vm_page_queue_cleaned))
2132 m_want = (vm_page_t) queue_first(&vm_page_queue_cleaned);
2133 else if (anons_grabbed >= ANONS_GRABBED_LIMIT || queue_empty(&vm_page_queue_anonymous))
2134 m_want = (vm_page_t) queue_first(&vm_page_queue_inactive);
2135 else if ( !queue_empty(&vm_page_queue_anonymous))
2136 m_want = (vm_page_t) queue_first(&vm_page_queue_anonymous);
2137
2138 /*
2139 * this is the next object we're going to be interested in
2140 * try to make sure its available after the mutex_yield
2141 * returns control
2142 */
2143 if (m_want)
2144 vm_pageout_scan_wants_object = m_want->object;
2145
2146 /*
2147 * force us to dump any collected free pages
2148 * and to pause before moving on
2149 */
2150 try_failed = TRUE;
2151
2152 goto requeue_page;
2153 }
2154 object = m->object;
2155 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2156
2157 try_failed = FALSE;
2158 }
2159 if (catch_up_count)
2160 catch_up_count--;
2161
2162 if (m->busy) {
2163 if (m->encrypted_cleaning) {
2164 /*
2165 * ENCRYPTED SWAP:
2166 * if this page has already been picked up as
2167 * part of a page-out cluster, it will be busy
2168 * because it is being encrypted (see
2169 * vm_object_upl_request()). But we still
2170 * want to demote it from "clean-in-place"
2171 * (aka "adjacent") to "clean-and-free" (aka
2172 * "target"), so let's ignore its "busy" bit
2173 * here and proceed to check for "cleaning" a
2174 * little bit below...
2175 *
2176 * CAUTION CAUTION:
2177 * A "busy" page should still be left alone for
2178 * most purposes, so we have to be very careful
2179 * not to process that page too much.
2180 */
2181 assert(m->cleaning);
2182 goto consider_inactive_page;
2183 }
2184
2185 /*
2186 * Somebody is already playing with this page.
2187 * Put it back on the appropriate queue
2188 *
2189 */
2190 vm_pageout_inactive_busy++;
2191
2192 if (page_prev_state == PAGE_STATE_CLEAN)
2193 vm_pageout_cleaned_busy++;
2194
2195 requeue_page:
2196 switch (page_prev_state) {
2197
2198 case PAGE_STATE_SPECULATIVE:
2199 vm_page_speculate(m, FALSE);
2200 break;
2201
2202 case PAGE_STATE_ANONYMOUS:
2203 case PAGE_STATE_CLEAN:
2204 case PAGE_STATE_INACTIVE:
2205 VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
2206 break;
2207
2208 case PAGE_STATE_INACTIVE_FIRST:
2209 VM_PAGE_ENQUEUE_INACTIVE(m, TRUE);
2210 break;
2211 }
2212 goto done_with_inactivepage;
2213 }
2214
2215
2216 /*
2217 * If it's absent, in error or the object is no longer alive,
2218 * we can reclaim the page... in the no longer alive case,
2219 * there are 2 states the page can be in that preclude us
2220 * from reclaiming it - busy or cleaning - that we've already
2221 * dealt with
2222 */
2223 if (m->absent || m->error || !object->alive) {
2224
2225 if (m->absent)
2226 vm_pageout_inactive_absent++;
2227 else if (!object->alive)
2228 vm_pageout_inactive_notalive++;
2229 else
2230 vm_pageout_inactive_error++;
2231 reclaim_page:
2232 if (vm_pageout_deadlock_target) {
2233 vm_pageout_scan_inactive_throttle_success++;
2234 vm_pageout_deadlock_target--;
2235 }
2236
2237 DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
2238
2239 if (object->internal) {
2240 DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
2241 } else {
2242 DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
2243 }
2244 assert(!m->cleaning);
2245 assert(!m->laundry);
2246
2247 m->busy = TRUE;
2248
2249 /*
2250 * remove page from object here since we're already
2251 * behind the object lock... defer the rest of the work
2252 * we'd normally do in vm_page_free_prepare_object
2253 * until 'vm_page_free_list' is called
2254 */
2255 if (m->tabled)
2256 vm_page_remove(m, TRUE);
2257
2258 assert(m->pageq.next == NULL &&
2259 m->pageq.prev == NULL);
2260 m->pageq.next = (queue_entry_t)local_freeq;
2261 local_freeq = m;
2262 local_freed++;
2263
2264 if (page_prev_state == PAGE_STATE_SPECULATIVE)
2265 vm_pageout_freed_from_speculative++;
2266 else if (page_prev_state == PAGE_STATE_CLEAN)
2267 vm_pageout_freed_from_cleaned++;
2268 else
2269 vm_pageout_freed_from_inactive_clean++;
2270
2271 if (page_prev_state != PAGE_STATE_SPECULATIVE)
2272 vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
2273
2274 goto done_with_inactivepage;
2275 }
2276 /*
2277 * If the object is empty, the page must be reclaimed even
2278 * if dirty or used.
2279 * If the page belongs to a volatile object, we stick it back
2280 * on.
2281 */
2282 if (object->copy == VM_OBJECT_NULL) {
2283 if (object->purgable == VM_PURGABLE_EMPTY) {
2284 if (m->pmapped == TRUE) {
2285 /* unmap the page */
2286 refmod_state = pmap_disconnect(m->phys_page);
2287 if (refmod_state & VM_MEM_MODIFIED) {
2288 SET_PAGE_DIRTY(m, FALSE);
2289 }
2290 }
2291 if (m->dirty || m->precious) {
2292 /* we saved the cost of cleaning this page ! */
2293 vm_page_purged_count++;
2294 }
2295 goto reclaim_page;
2296 }
2297
2298 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
2299 /*
2300 * With the VM compressor, the cost of
2301 * reclaiming a page is much lower (no I/O),
2302 * so if we find a "volatile" page, it's better
2303 * to let it get compressed rather than letting
2304 * it occupy a full page until it gets purged.
2305 * So no need to check for "volatile" here.
2306 */
2307 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
2308 /*
2309 * Avoid cleaning a "volatile" page which might
2310 * be purged soon.
2311 */
2312
2313 /* if it's wired, we can't put it on our queue */
2314 assert(!VM_PAGE_WIRED(m));
2315
2316 /* just stick it back on! */
2317 reactivated_this_call++;
2318
2319 if (page_prev_state == PAGE_STATE_CLEAN)
2320 vm_pageout_cleaned_volatile_reactivated++;
2321
2322 goto reactivate_page;
2323 }
2324 }
2325
2326 consider_inactive_page:
2327 if (m->busy) {
2328 /*
2329 * CAUTION CAUTION:
2330 * A "busy" page should always be left alone, except...
2331 */
2332 if (m->cleaning && m->encrypted_cleaning) {
2333 /*
2334 * ENCRYPTED_SWAP:
2335 * We could get here with a "busy" page
2336 * if it's being encrypted during a
2337 * "clean-in-place" operation. We'll deal
2338 * with it right away by testing if it has been
2339 * referenced and either reactivating it or
2340 * promoting it from "clean-in-place" to
2341 * "clean-and-free".
2342 */
2343 } else {
2344 panic("\"busy\" page considered for pageout\n");
2345 }
2346 }
2347
2348 /*
2349 * If it's being used, reactivate.
2350 * (Fictitious pages are either busy or absent.)
2351 * First, update the reference and dirty bits
2352 * to make sure the page is unreferenced.
2353 */
2354 refmod_state = -1;
2355
2356 if (m->reference == FALSE && m->pmapped == TRUE) {
2357 refmod_state = pmap_get_refmod(m->phys_page);
2358
2359 if (refmod_state & VM_MEM_REFERENCED)
2360 m->reference = TRUE;
2361 if (refmod_state & VM_MEM_MODIFIED) {
2362 SET_PAGE_DIRTY(m, FALSE);
2363 }
2364 }
2365
2366 /*
2367 * if (m->cleaning && !m->pageout)
2368 * If already cleaning this page in place and it hasn't
2369 * been recently referenced, just pull off the queue.
2370 * We can leave the page mapped, and upl_commit_range
2371 * will put it on the clean queue.
2372 *
2373 * note: if m->encrypted_cleaning == TRUE, then
2374 * m->cleaning == TRUE
2375 * and we'll handle it here
2376 *
2377 * if (m->pageout && !m->cleaning)
2378 * an msync INVALIDATE is in progress...
2379 * this page has been marked for destruction
2380 * after it has been cleaned,
2381 * but not yet gathered into a UPL
2382 * where 'cleaning' will be set...
2383 * just leave it off the paging queues
2384 *
2385 * if (m->pageout && m->clenaing)
2386 * an msync INVALIDATE is in progress
2387 * and the UPL has already gathered this page...
2388 * just leave it off the paging queues
2389 */
2390
2391 /*
2392 * page with m->pageout and still on the queues means that an
2393 * MS_INVALIDATE is in progress on this page... leave it alone
2394 */
2395 if (m->pageout) {
2396 goto done_with_inactivepage;
2397 }
2398
2399 /* if cleaning, reactivate if referenced. otherwise, just pull off queue */
2400 if (m->cleaning) {
2401 if (m->reference == TRUE) {
2402 reactivated_this_call++;
2403 goto reactivate_page;
2404 } else {
2405 goto done_with_inactivepage;
2406 }
2407 }
2408
2409 if (m->reference || m->dirty) {
2410 /* deal with a rogue "reusable" page */
2411 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
2412 }
2413
2414 if (m->reference && !m->no_cache) {
2415 /*
2416 * The page we pulled off the inactive list has
2417 * been referenced. It is possible for other
2418 * processors to be touching pages faster than we
2419 * can clear the referenced bit and traverse the
2420 * inactive queue, so we limit the number of
2421 * reactivations.
2422 */
2423 if (++reactivated_this_call >= reactivate_limit) {
2424 vm_pageout_reactivation_limit_exceeded++;
2425 } else if (catch_up_count) {
2426 vm_pageout_catch_ups++;
2427 } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
2428 vm_pageout_inactive_force_reclaim++;
2429 } else {
2430 uint32_t isinuse;
2431
2432 if (page_prev_state == PAGE_STATE_CLEAN)
2433 vm_pageout_cleaned_reference_reactivated++;
2434
2435 reactivate_page:
2436 if ( !object->internal && object->pager != MEMORY_OBJECT_NULL &&
2437 vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
2438 /*
2439 * no explict mappings of this object exist
2440 * and it's not open via the filesystem
2441 */
2442 vm_page_deactivate(m);
2443 vm_pageout_inactive_deactivated++;
2444 } else {
2445 /*
2446 * The page was/is being used, so put back on active list.
2447 */
2448 vm_page_activate(m);
2449 VM_STAT_INCR(reactivations);
2450 }
2451
2452 if (page_prev_state == PAGE_STATE_CLEAN)
2453 vm_pageout_cleaned_reactivated++;
2454
2455 vm_pageout_inactive_used++;
2456
2457 goto done_with_inactivepage;
2458 }
2459 /*
2460 * Make sure we call pmap_get_refmod() if it
2461 * wasn't already called just above, to update
2462 * the dirty bit.
2463 */
2464 if ((refmod_state == -1) && !m->dirty && m->pmapped) {
2465 refmod_state = pmap_get_refmod(m->phys_page);
2466 if (refmod_state & VM_MEM_MODIFIED) {
2467 SET_PAGE_DIRTY(m, FALSE);
2468 }
2469 }
2470 forced_reclaim = TRUE;
2471 } else {
2472 forced_reclaim = FALSE;
2473 }
2474
2475 XPR(XPR_VM_PAGEOUT,
2476 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
2477 object, m->offset, m, 0,0);
2478
2479 /*
2480 * we've got a candidate page to steal...
2481 *
2482 * m->dirty is up to date courtesy of the
2483 * preceding check for m->reference... if
2484 * we get here, then m->reference had to be
2485 * FALSE (or possibly "reactivate_limit" was
2486 * exceeded), but in either case we called
2487 * pmap_get_refmod() and updated both
2488 * m->reference and m->dirty
2489 *
2490 * if it's dirty or precious we need to
2491 * see if the target queue is throtttled
2492 * it if is, we need to skip over it by moving it back
2493 * to the end of the inactive queue
2494 */
2495
2496 inactive_throttled = FALSE;
2497
2498 if (m->dirty || m->precious) {
2499 if (object->internal) {
2500 if (VM_PAGE_Q_THROTTLED(iq))
2501 inactive_throttled = TRUE;
2502 } else if (VM_PAGE_Q_THROTTLED(eq)) {
2503 inactive_throttled = TRUE;
2504 }
2505 }
2506 throttle_inactive:
2507 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
2508 object->internal && m->dirty &&
2509 (object->purgable == VM_PURGABLE_DENY ||
2510 object->purgable == VM_PURGABLE_NONVOLATILE ||
2511 object->purgable == VM_PURGABLE_VOLATILE)) {
2512 queue_enter(&vm_page_queue_throttled, m,
2513 vm_page_t, pageq);
2514 m->throttled = TRUE;
2515 vm_page_throttled_count++;
2516
2517 vm_pageout_scan_reclaimed_throttled++;
2518
2519 goto done_with_inactivepage;
2520 }
2521 if (inactive_throttled == TRUE) {
2522
2523 if (object->internal == FALSE) {
2524 /*
2525 * we need to break up the following potential deadlock case...
2526 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2527 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2528 * c) Most of the pages in the inactive queue belong to this file.
2529 *
2530 * we are potentially in this deadlock because...
2531 * a) the external pageout queue is throttled
2532 * b) we're done with the active queue and moved on to the inactive queue
2533 * c) we've got a dirty external page
2534 *
2535 * since we don't know the reason for the external pageout queue being throttled we
2536 * must suspect that we are deadlocked, so move the current page onto the active queue
2537 * in an effort to cause a page from the active queue to 'age' to the inactive queue
2538 *
2539 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
2540 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
2541 * pool the next time we select a victim page... if we can make enough new free pages,
2542 * the deadlock will break, the external pageout queue will empty and it will no longer
2543 * be throttled
2544 *
2545 * if we have jestam configured, keep a count of the pages reactivated this way so
2546 * that we can try to find clean pages in the active/inactive queues before
2547 * deciding to jetsam a process
2548 */
2549 vm_pageout_scan_inactive_throttled_external++;
2550
2551 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
2552 m->active = TRUE;
2553 vm_page_active_count++;
2554 if (m->object->internal) {
2555 vm_page_pageable_internal_count++;
2556 } else {
2557 vm_page_pageable_external_count++;
2558 }
2559
2560 vm_pageout_adjust_io_throttles(iq, eq, FALSE);
2561
2562 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
2563 vm_pageout_inactive_external_forced_reactivate_limit--;
2564
2565 if (vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
2566 vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
2567 /*
2568 * Possible deadlock scenario so request jetsam action
2569 */
2570 assert(object);
2571 vm_object_unlock(object);
2572 object = VM_OBJECT_NULL;
2573 vm_page_unlock_queues();
2574
2575 VM_DEBUG_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
2576 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
2577
2578 /* Kill first suitable process */
2579 if (memorystatus_kill_on_VM_page_shortage(FALSE) == FALSE) {
2580 panic("vm_pageout_scan: Jetsam request failed\n");
2581 }
2582
2583 VM_DEBUG_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END, 0, 0, 0, 0);
2584
2585 vm_pageout_inactive_external_forced_jetsam_count++;
2586 vm_page_lock_queues();
2587 delayed_unlock = 1;
2588 }
2589 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2590 force_anonymous = TRUE;
2591 #endif
2592 goto done_with_inactivepage;
2593 } else {
2594 if (page_prev_state == PAGE_STATE_SPECULATIVE)
2595 page_prev_state = PAGE_STATE_INACTIVE;
2596
2597 vm_pageout_scan_inactive_throttled_internal++;
2598
2599 goto requeue_page;
2600 }
2601 }
2602
2603 /*
2604 * we've got a page that we can steal...
2605 * eliminate all mappings and make sure
2606 * we have the up-to-date modified state
2607 *
2608 * if we need to do a pmap_disconnect then we
2609 * need to re-evaluate m->dirty since the pmap_disconnect
2610 * provides the true state atomically... the
2611 * page was still mapped up to the pmap_disconnect
2612 * and may have been dirtied at the last microsecond
2613 *
2614 * Note that if 'pmapped' is FALSE then the page is not
2615 * and has not been in any map, so there is no point calling
2616 * pmap_disconnect(). m->dirty could have been set in anticipation
2617 * of likely usage of the page.
2618 */
2619 if (m->pmapped == TRUE) {
2620
2621 if (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE || object->internal == FALSE) {
2622 refmod_state = pmap_disconnect_options(m->phys_page, 0, NULL);
2623 } else {
2624 refmod_state = pmap_disconnect_options(m->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL);
2625 }
2626
2627 if (refmod_state & VM_MEM_MODIFIED) {
2628 SET_PAGE_DIRTY(m, FALSE);
2629 }
2630 }
2631 /*
2632 * reset our count of pages that have been reclaimed
2633 * since the last page was 'stolen'
2634 */
2635 inactive_reclaim_run = 0;
2636
2637 /*
2638 * If it's clean and not precious, we can free the page.
2639 */
2640 if (!m->dirty && !m->precious) {
2641
2642 if (page_prev_state == PAGE_STATE_SPECULATIVE)
2643 vm_pageout_speculative_clean++;
2644 else {
2645 if (page_prev_state == PAGE_STATE_ANONYMOUS)
2646 vm_pageout_inactive_anonymous++;
2647 else if (page_prev_state == PAGE_STATE_CLEAN)
2648 vm_pageout_cleaned_reclaimed++;
2649
2650 if (m->was_dirty) {
2651 /* page on clean queue used to be dirty; we should increment the vm_stat pageout count here */
2652 VM_STAT_INCR(pageouts);
2653 DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
2654 }
2655 vm_pageout_inactive_clean++;
2656 }
2657
2658 /*
2659 * OK, at this point we have found a page we are going to free.
2660 */
2661 goto reclaim_page;
2662 }
2663
2664 /*
2665 * The page may have been dirtied since the last check
2666 * for a throttled target queue (which may have been skipped
2667 * if the page was clean then). With the dirty page
2668 * disconnected here, we can make one final check.
2669 */
2670 if (object->internal) {
2671 if (VM_PAGE_Q_THROTTLED(iq))
2672 inactive_throttled = TRUE;
2673 } else if (VM_PAGE_Q_THROTTLED(eq)) {
2674 inactive_throttled = TRUE;
2675 }
2676
2677 if (inactive_throttled == TRUE)
2678 goto throttle_inactive;
2679
2680 #if VM_PRESSURE_EVENTS
2681 vm_pressure_response();
2682 #endif /* VM_PRESSURE_EVENTS */
2683
2684 /*
2685 * do NOT set the pageout bit!
2686 * sure, we might need free pages, but this page is going to take time to become free
2687 * anyway, so we may as well put it on the clean queue first and take it from there later
2688 * if necessary. that way, we'll ensure we don't free up too much. -mj
2689 */
2690 vm_pageout_cluster(m, FALSE);
2691
2692 if (page_prev_state == PAGE_STATE_ANONYMOUS)
2693 vm_pageout_inactive_anonymous++;
2694 if (object->internal)
2695 vm_pageout_inactive_dirty_internal++;
2696 else
2697 vm_pageout_inactive_dirty_external++;
2698
2699
2700 done_with_inactivepage:
2701 inactive_burst_count = 0;
2702
2703 if (delayed_unlock++ > delayed_unlock_limit || try_failed == TRUE) {
2704
2705 if (object != NULL) {
2706 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2707 vm_object_unlock(object);
2708 object = NULL;
2709 }
2710 if (local_freeq) {
2711 vm_page_unlock_queues();
2712
2713 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
2714 vm_page_free_count, local_freed, delayed_unlock_limit, 4);
2715
2716 vm_page_free_list(local_freeq, TRUE);
2717
2718 VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
2719 vm_page_free_count, local_freed, 0, 4);
2720
2721 local_freeq = NULL;
2722 local_freed = 0;
2723 vm_page_lock_queues();
2724 } else
2725 lck_mtx_yield(&vm_page_queue_lock);
2726
2727 delayed_unlock = 1;
2728 }
2729 vm_pageout_considered_page++;
2730
2731 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
2732 vm_consider_waking_compactor_swapper();
2733
2734 /*
2735 * back to top of pageout scan loop
2736 */
2737 }
2738 }
2739
2740
2741 int vm_page_free_count_init;
2742
2743 void
2744 vm_page_free_reserve(
2745 int pages)
2746 {
2747 int free_after_reserve;
2748
2749 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
2750
2751 if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT))
2752 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
2753 else
2754 vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
2755
2756 } else {
2757 if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT)
2758 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
2759 else
2760 vm_page_free_reserved += pages;
2761 }
2762 free_after_reserve = vm_page_free_count_init - vm_page_free_reserved;
2763
2764 vm_page_free_min = vm_page_free_reserved +
2765 VM_PAGE_FREE_MIN(free_after_reserve);
2766
2767 if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT)
2768 vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
2769
2770 vm_page_free_target = vm_page_free_reserved +
2771 VM_PAGE_FREE_TARGET(free_after_reserve);
2772
2773 if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT)
2774 vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
2775
2776 if (vm_page_free_target < vm_page_free_min + 5)
2777 vm_page_free_target = vm_page_free_min + 5;
2778
2779 vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 3);
2780 vm_page_creation_throttle = vm_page_free_target * 3;
2781 }
2782
2783 /*
2784 * vm_pageout is the high level pageout daemon.
2785 */
2786
2787 void
2788 vm_pageout_continue(void)
2789 {
2790 DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
2791 vm_pageout_scan_event_counter++;
2792
2793 vm_pageout_scan();
2794 /*
2795 * we hold both the vm_page_queue_free_lock
2796 * and the vm_page_queues_lock at this point
2797 */
2798 assert(vm_page_free_wanted == 0);
2799 assert(vm_page_free_wanted_privileged == 0);
2800 assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
2801
2802 lck_mtx_unlock(&vm_page_queue_free_lock);
2803 vm_page_unlock_queues();
2804
2805 counter(c_vm_pageout_block++);
2806 thread_block((thread_continue_t)vm_pageout_continue);
2807 /*NOTREACHED*/
2808 }
2809
2810
2811 #ifdef FAKE_DEADLOCK
2812
2813 #define FAKE_COUNT 5000
2814
2815 int internal_count = 0;
2816 int fake_deadlock = 0;
2817
2818 #endif
2819
2820 static void
2821 vm_pageout_iothread_continue(struct vm_pageout_queue *q)
2822 {
2823 vm_page_t m = NULL;
2824 vm_object_t object;
2825 vm_object_offset_t offset;
2826 memory_object_t pager;
2827 thread_t self = current_thread();
2828
2829 if ((vm_pageout_internal_iothread != THREAD_NULL)
2830 && (self == vm_pageout_external_iothread )
2831 && (self->options & TH_OPT_VMPRIV))
2832 self->options &= ~TH_OPT_VMPRIV;
2833
2834 vm_page_lockspin_queues();
2835
2836 while ( !queue_empty(&q->pgo_pending) ) {
2837
2838 q->pgo_busy = TRUE;
2839 queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
2840 if (m->object->object_slid) {
2841 panic("slid page %p not allowed on this path\n", m);
2842 }
2843 VM_PAGE_CHECK(m);
2844 m->pageout_queue = FALSE;
2845 m->pageq.next = NULL;
2846 m->pageq.prev = NULL;
2847
2848 /*
2849 * grab a snapshot of the object and offset this
2850 * page is tabled in so that we can relookup this
2851 * page after we've taken the object lock - these
2852 * fields are stable while we hold the page queues lock
2853 * but as soon as we drop it, there is nothing to keep
2854 * this page in this object... we hold an activity_in_progress
2855 * on this object which will keep it from terminating
2856 */
2857 object = m->object;
2858 offset = m->offset;
2859
2860 vm_page_unlock_queues();
2861
2862 #ifdef FAKE_DEADLOCK
2863 if (q == &vm_pageout_queue_internal) {
2864 vm_offset_t addr;
2865 int pg_count;
2866
2867 internal_count++;
2868
2869 if ((internal_count == FAKE_COUNT)) {
2870
2871 pg_count = vm_page_free_count + vm_page_free_reserved;
2872
2873 if (kmem_alloc(kernel_map, &addr, PAGE_SIZE * pg_count) == KERN_SUCCESS) {
2874 kmem_free(kernel_map, addr, PAGE_SIZE * pg_count);
2875 }
2876 internal_count = 0;
2877 fake_deadlock++;
2878 }
2879 }
2880 #endif
2881 vm_object_lock(object);
2882
2883 m = vm_page_lookup(object, offset);
2884
2885 if (m == NULL ||
2886 m->busy || m->cleaning || m->pageout_queue || !m->laundry) {
2887 /*
2888 * it's either the same page that someone else has
2889 * started cleaning (or it's finished cleaning or
2890 * been put back on the pageout queue), or
2891 * the page has been freed or we have found a
2892 * new page at this offset... in all of these cases
2893 * we merely need to release the activity_in_progress
2894 * we took when we put the page on the pageout queue
2895 */
2896 vm_object_activity_end(object);
2897 vm_object_unlock(object);
2898
2899 vm_page_lockspin_queues();
2900 continue;
2901 }
2902 if (!object->pager_initialized) {
2903
2904 /*
2905 * If there is no memory object for the page, create
2906 * one and hand it to the default pager.
2907 */
2908
2909 if (!object->pager_initialized)
2910 vm_object_collapse(object,
2911 (vm_object_offset_t) 0,
2912 TRUE);
2913 if (!object->pager_initialized)
2914 vm_object_pager_create(object);
2915 if (!object->pager_initialized) {
2916 /*
2917 * Still no pager for the object.
2918 * Reactivate the page.
2919 *
2920 * Should only happen if there is no
2921 * default pager.
2922 */
2923 m->pageout = FALSE;
2924
2925 vm_page_lockspin_queues();
2926
2927 vm_pageout_throttle_up(m);
2928 vm_page_activate(m);
2929 vm_pageout_dirty_no_pager++;
2930
2931 vm_page_unlock_queues();
2932
2933 /*
2934 * And we are done with it.
2935 */
2936 vm_object_activity_end(object);
2937 vm_object_unlock(object);
2938
2939 vm_page_lockspin_queues();
2940 continue;
2941 }
2942 }
2943 pager = object->pager;
2944
2945 if (pager == MEMORY_OBJECT_NULL) {
2946 /*
2947 * This pager has been destroyed by either
2948 * memory_object_destroy or vm_object_destroy, and
2949 * so there is nowhere for the page to go.
2950 */
2951 if (m->pageout) {
2952 /*
2953 * Just free the page... VM_PAGE_FREE takes
2954 * care of cleaning up all the state...
2955 * including doing the vm_pageout_throttle_up
2956 */
2957 VM_PAGE_FREE(m);
2958 } else {
2959 vm_page_lockspin_queues();
2960
2961 vm_pageout_throttle_up(m);
2962 vm_page_activate(m);
2963
2964 vm_page_unlock_queues();
2965
2966 /*
2967 * And we are done with it.
2968 */
2969 }
2970 vm_object_activity_end(object);
2971 vm_object_unlock(object);
2972
2973 vm_page_lockspin_queues();
2974 continue;
2975 }
2976 #if 0
2977 /*
2978 * we don't hold the page queue lock
2979 * so this check isn't safe to make
2980 */
2981 VM_PAGE_CHECK(m);
2982 #endif
2983 /*
2984 * give back the activity_in_progress reference we
2985 * took when we queued up this page and replace it
2986 * it with a paging_in_progress reference that will
2987 * also hold the paging offset from changing and
2988 * prevent the object from terminating
2989 */
2990 vm_object_activity_end(object);
2991 vm_object_paging_begin(object);
2992 vm_object_unlock(object);
2993
2994 /*
2995 * Send the data to the pager.
2996 * any pageout clustering happens there
2997 */
2998 memory_object_data_return(pager,
2999 m->offset + object->paging_offset,
3000 PAGE_SIZE,
3001 NULL,
3002 NULL,
3003 FALSE,
3004 FALSE,
3005 0);
3006
3007 vm_object_lock(object);
3008 vm_object_paging_end(object);
3009 vm_object_unlock(object);
3010
3011 vm_pageout_io_throttle();
3012
3013 vm_page_lockspin_queues();
3014 }
3015 q->pgo_busy = FALSE;
3016 q->pgo_idle = TRUE;
3017
3018 assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
3019 vm_page_unlock_queues();
3020
3021 thread_block_parameter((thread_continue_t)vm_pageout_iothread_continue, (void *) q);
3022 /*NOTREACHED*/
3023 }
3024
3025
3026 static void
3027 vm_pageout_iothread_external_continue(struct vm_pageout_queue *q)
3028 {
3029 vm_page_t m = NULL;
3030 vm_object_t object;
3031 vm_object_offset_t offset;
3032 memory_object_t pager;
3033
3034
3035 if (vm_pageout_internal_iothread != THREAD_NULL)
3036 current_thread()->options &= ~TH_OPT_VMPRIV;
3037
3038 vm_page_lockspin_queues();
3039
3040 while ( !queue_empty(&q->pgo_pending) ) {
3041
3042 q->pgo_busy = TRUE;
3043 queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
3044 if (m->object->object_slid) {
3045 panic("slid page %p not allowed on this path\n", m);
3046 }
3047 VM_PAGE_CHECK(m);
3048 m->pageout_queue = FALSE;
3049 m->pageq.next = NULL;
3050 m->pageq.prev = NULL;
3051
3052 /*
3053 * grab a snapshot of the object and offset this
3054 * page is tabled in so that we can relookup this
3055 * page after we've taken the object lock - these
3056 * fields are stable while we hold the page queues lock
3057 * but as soon as we drop it, there is nothing to keep
3058 * this page in this object... we hold an activity_in_progress
3059 * on this object which will keep it from terminating
3060 */
3061 object = m->object;
3062 offset = m->offset;
3063
3064 vm_page_unlock_queues();
3065
3066 vm_object_lock(object);
3067
3068 m = vm_page_lookup(object, offset);
3069
3070 if (m == NULL ||
3071 m->busy || m->cleaning || m->pageout_queue || !m->laundry) {
3072 /*
3073 * it's either the same page that someone else has
3074 * started cleaning (or it's finished cleaning or
3075 * been put back on the pageout queue), or
3076 * the page has been freed or we have found a
3077 * new page at this offset... in all of these cases
3078 * we merely need to release the activity_in_progress
3079 * we took when we put the page on the pageout queue
3080 */
3081 vm_object_activity_end(object);
3082 vm_object_unlock(object);
3083
3084 vm_page_lockspin_queues();
3085 continue;
3086 }
3087 pager = object->pager;
3088
3089 if (pager == MEMORY_OBJECT_NULL) {
3090 /*
3091 * This pager has been destroyed by either
3092 * memory_object_destroy or vm_object_destroy, and
3093 * so there is nowhere for the page to go.
3094 */
3095 if (m->pageout) {
3096 /*
3097 * Just free the page... VM_PAGE_FREE takes
3098 * care of cleaning up all the state...
3099 * including doing the vm_pageout_throttle_up
3100 */
3101 VM_PAGE_FREE(m);
3102 } else {
3103 vm_page_lockspin_queues();
3104
3105 vm_pageout_throttle_up(m);
3106 vm_page_activate(m);
3107
3108 vm_page_unlock_queues();
3109
3110 /*
3111 * And we are done with it.
3112 */
3113 }
3114 vm_object_activity_end(object);
3115 vm_object_unlock(object);
3116
3117 vm_page_lockspin_queues();
3118 continue;
3119 }
3120 #if 0
3121 /*
3122 * we don't hold the page queue lock
3123 * so this check isn't safe to make
3124 */
3125 VM_PAGE_CHECK(m);
3126 #endif
3127 /*
3128 * give back the activity_in_progress reference we
3129 * took when we queued up this page and replace it
3130 * it with a paging_in_progress reference that will
3131 * also hold the paging offset from changing and
3132 * prevent the object from terminating
3133 */
3134 vm_object_activity_end(object);
3135 vm_object_paging_begin(object);
3136 vm_object_unlock(object);
3137
3138 /*
3139 * Send the data to the pager.
3140 * any pageout clustering happens there
3141 */
3142 memory_object_data_return(pager,
3143 m->offset + object->paging_offset,
3144 PAGE_SIZE,
3145 NULL,
3146 NULL,
3147 FALSE,
3148 FALSE,
3149 0);
3150
3151 vm_object_lock(object);
3152 vm_object_paging_end(object);
3153 vm_object_unlock(object);
3154
3155 vm_pageout_io_throttle();
3156
3157 vm_page_lockspin_queues();
3158 }
3159 q->pgo_busy = FALSE;
3160 q->pgo_idle = TRUE;
3161
3162 assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
3163 vm_page_unlock_queues();
3164
3165 thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q);
3166 /*NOTREACHED*/
3167 }
3168
3169
3170 uint32_t vm_compressor_failed;
3171
3172 static void
3173 vm_pageout_iothread_internal_continue(struct cq *cq)
3174 {
3175 struct vm_pageout_queue *q;
3176 vm_page_t m = NULL;
3177 vm_object_t object;
3178 memory_object_t pager;
3179 boolean_t pgo_draining;
3180 vm_page_t local_q;
3181 int local_cnt;
3182 vm_page_t local_freeq = NULL;
3183 int local_freed = 0;
3184 int local_batch_size;
3185 kern_return_t retval;
3186
3187
3188 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
3189
3190 q = cq->q;
3191 local_batch_size = q->pgo_maxlaundry / (vm_compressor_thread_count * 4);
3192
3193 while (TRUE) {
3194
3195 local_cnt = 0;
3196 local_q = NULL;
3197
3198 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
3199
3200 vm_page_lock_queues();
3201
3202 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
3203
3204 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, 0, 0, 0, 0, 0);
3205
3206 while ( !queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
3207
3208 queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
3209
3210 VM_PAGE_CHECK(m);
3211
3212 m->pageout_queue = FALSE;
3213 m->pageq.prev = NULL;
3214
3215 m->pageq.next = (queue_entry_t)local_q;
3216 local_q = m;
3217 local_cnt++;
3218 }
3219 if (local_q == NULL)
3220 break;
3221
3222 q->pgo_busy = TRUE;
3223
3224 if ((pgo_draining = q->pgo_draining) == FALSE)
3225 vm_pageout_throttle_up_batch(q, local_cnt);
3226
3227 vm_page_unlock_queues();
3228
3229 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
3230
3231 while (local_q) {
3232
3233 m = local_q;
3234 local_q = (vm_page_t)m->pageq.next;
3235 m->pageq.next = NULL;
3236
3237 if (m->object->object_slid) {
3238 panic("slid page %p not allowed on this path\n", m);
3239 }
3240
3241 object = m->object;
3242 pager = object->pager;
3243
3244 if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
3245
3246 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
3247
3248 vm_object_lock(object);
3249
3250 /*
3251 * If there is no memory object for the page, create
3252 * one and hand it to the compression pager.
3253 */
3254
3255 if (!object->pager_initialized)
3256 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
3257 if (!object->pager_initialized)
3258 vm_object_compressor_pager_create(object);
3259
3260 if (!object->pager_initialized) {
3261 /*
3262 * Still no pager for the object.
3263 * Reactivate the page.
3264 *
3265 * Should only happen if there is no
3266 * compression pager
3267 */
3268 m->pageout = FALSE;
3269 m->laundry = FALSE;
3270 PAGE_WAKEUP_DONE(m);
3271
3272 vm_page_lockspin_queues();
3273 vm_page_activate(m);
3274 vm_pageout_dirty_no_pager++;
3275 vm_page_unlock_queues();
3276
3277 /*
3278 * And we are done with it.
3279 */
3280 vm_object_activity_end(object);
3281 vm_object_unlock(object);
3282
3283 continue;
3284 }
3285 pager = object->pager;
3286
3287 if (pager == MEMORY_OBJECT_NULL) {
3288 /*
3289 * This pager has been destroyed by either
3290 * memory_object_destroy or vm_object_destroy, and
3291 * so there is nowhere for the page to go.
3292 */
3293 if (m->pageout) {
3294 /*
3295 * Just free the page... VM_PAGE_FREE takes
3296 * care of cleaning up all the state...
3297 * including doing the vm_pageout_throttle_up
3298 */
3299 VM_PAGE_FREE(m);
3300 } else {
3301 m->laundry = FALSE;
3302 PAGE_WAKEUP_DONE(m);
3303
3304 vm_page_lockspin_queues();
3305 vm_page_activate(m);
3306 vm_page_unlock_queues();
3307
3308 /*
3309 * And we are done with it.
3310 */
3311 }
3312 vm_object_activity_end(object);
3313 vm_object_unlock(object);
3314
3315 continue;
3316 }
3317 vm_object_unlock(object);
3318
3319 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
3320 }
3321 while (vm_page_free_count < (vm_page_free_reserved - COMPRESSOR_FREE_RESERVED_LIMIT)) {
3322 kern_return_t wait_result;
3323 int need_wakeup = 0;
3324
3325 if (local_freeq) {
3326 vm_page_free_list(local_freeq, TRUE);
3327
3328 local_freeq = NULL;
3329 local_freed = 0;
3330
3331 continue;
3332 }
3333 lck_mtx_lock_spin(&vm_page_queue_free_lock);
3334
3335 if (vm_page_free_count < (vm_page_free_reserved - COMPRESSOR_FREE_RESERVED_LIMIT)) {
3336
3337 if (vm_page_free_wanted_privileged++ == 0)
3338 need_wakeup = 1;
3339 wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
3340
3341 lck_mtx_unlock(&vm_page_queue_free_lock);
3342
3343 if (need_wakeup)
3344 thread_wakeup((event_t)&vm_page_free_wanted);
3345
3346 if (wait_result == THREAD_WAITING)
3347 thread_block(THREAD_CONTINUE_NULL);
3348 } else
3349 lck_mtx_unlock(&vm_page_queue_free_lock);
3350 }
3351 retval = vm_compressor_pager_put(pager, m->offset + object->paging_offset, m->phys_page, &cq->current_chead, cq->scratch_buf);
3352
3353 vm_object_lock(object);
3354 m->laundry = FALSE;
3355 m->pageout = FALSE;
3356
3357 if (retval == KERN_SUCCESS) {
3358
3359 vm_page_compressions_failing = FALSE;
3360
3361 VM_STAT_INCR(compressions);
3362
3363 if (m->tabled)
3364 vm_page_remove(m, TRUE);
3365 vm_object_activity_end(object);
3366 vm_object_unlock(object);
3367
3368 m->pageq.next = (queue_entry_t)local_freeq;
3369 local_freeq = m;
3370 local_freed++;
3371
3372 } else {
3373 PAGE_WAKEUP_DONE(m);
3374
3375 vm_page_lockspin_queues();
3376
3377 vm_page_activate(m);
3378 vm_compressor_failed++;
3379
3380 vm_page_compressions_failing = TRUE;
3381
3382 vm_page_unlock_queues();
3383
3384 vm_object_activity_end(object);
3385 vm_object_unlock(object);
3386 }
3387 }
3388 if (local_freeq) {
3389 vm_page_free_list(local_freeq, TRUE);
3390
3391 local_freeq = NULL;
3392 local_freed = 0;
3393 }
3394 if (pgo_draining == TRUE) {
3395 vm_page_lockspin_queues();
3396 vm_pageout_throttle_up_batch(q, local_cnt);
3397 vm_page_unlock_queues();
3398 }
3399 }
3400 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
3401
3402 /*
3403 * queue lock is held and our q is empty
3404 */
3405 q->pgo_busy = FALSE;
3406 q->pgo_idle = TRUE;
3407
3408 assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
3409 vm_page_unlock_queues();
3410
3411 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
3412
3413 thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
3414 /*NOTREACHED*/
3415 }
3416
3417
3418
3419 static void
3420 vm_pageout_adjust_io_throttles(struct vm_pageout_queue *iq, struct vm_pageout_queue *eq, boolean_t req_lowpriority)
3421 {
3422 uint32_t policy;
3423 boolean_t set_iq = FALSE;
3424 boolean_t set_eq = FALSE;
3425
3426 if (hibernate_cleaning_in_progress == TRUE)
3427 req_lowpriority = FALSE;
3428
3429 if ((DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE) && iq->pgo_inited == TRUE && iq->pgo_lowpriority != req_lowpriority)
3430 set_iq = TRUE;
3431
3432 if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority)
3433 set_eq = TRUE;
3434
3435 if (set_iq == TRUE || set_eq == TRUE) {
3436
3437 vm_page_unlock_queues();
3438
3439 if (req_lowpriority == TRUE) {
3440 policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
3441 DTRACE_VM(laundrythrottle);
3442 } else {
3443 policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
3444 DTRACE_VM(laundryunthrottle);
3445 }
3446 if (set_iq == TRUE) {
3447 proc_set_task_policy_thread(kernel_task, iq->pgo_tid, TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
3448
3449 iq->pgo_lowpriority = req_lowpriority;
3450 }
3451 if (set_eq == TRUE) {
3452 proc_set_task_policy_thread(kernel_task, eq->pgo_tid, TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
3453
3454 eq->pgo_lowpriority = req_lowpriority;
3455 }
3456 vm_page_lock_queues();
3457 }
3458 }
3459
3460
3461 static void
3462 vm_pageout_iothread_external(void)
3463 {
3464 thread_t self = current_thread();
3465
3466 self->options |= TH_OPT_VMPRIV;
3467
3468 DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
3469
3470 proc_set_task_policy_thread(kernel_task, self->thread_id, TASK_POLICY_EXTERNAL,
3471 TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
3472
3473 vm_page_lock_queues();
3474
3475 vm_pageout_queue_external.pgo_tid = self->thread_id;
3476 vm_pageout_queue_external.pgo_lowpriority = TRUE;
3477 vm_pageout_queue_external.pgo_inited = TRUE;
3478
3479 vm_page_unlock_queues();
3480
3481 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
3482 vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
3483 else
3484 vm_pageout_iothread_continue(&vm_pageout_queue_external);
3485
3486 /*NOTREACHED*/
3487 }
3488
3489
3490 static void
3491 vm_pageout_iothread_internal(struct cq *cq)
3492 {
3493 thread_t self = current_thread();
3494
3495 self->options |= TH_OPT_VMPRIV;
3496
3497 if (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE) {
3498 DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
3499
3500 proc_set_task_policy_thread(kernel_task, self->thread_id, TASK_POLICY_EXTERNAL,
3501 TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
3502 }
3503 vm_page_lock_queues();
3504
3505 vm_pageout_queue_internal.pgo_tid = self->thread_id;
3506 vm_pageout_queue_internal.pgo_lowpriority = TRUE;
3507 vm_pageout_queue_internal.pgo_inited = TRUE;
3508
3509 vm_page_unlock_queues();
3510
3511 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
3512 cq->q = &vm_pageout_queue_internal;
3513 cq->current_chead = NULL;
3514 cq->scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE);
3515
3516 vm_pageout_iothread_internal_continue(cq);
3517 } else
3518 vm_pageout_iothread_continue(&vm_pageout_queue_internal);
3519
3520 /*NOTREACHED*/
3521 }
3522
3523 kern_return_t
3524 vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
3525 {
3526 if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
3527 return KERN_SUCCESS;
3528 } else {
3529 return KERN_FAILURE; /* Already set */
3530 }
3531 }
3532
3533
3534 extern boolean_t memorystatus_manual_testing_on;
3535 extern unsigned int memorystatus_level;
3536
3537
3538
3539 #if VM_PRESSURE_EVENTS
3540
3541 void
3542 vm_pressure_response(void)
3543 {
3544
3545
3546 vm_pressure_level_t old_level = kVMPressureNormal;
3547 int new_level = -1;
3548
3549 uint64_t available_memory = (((uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY) * 100);
3550
3551 memorystatus_level = (unsigned int) (available_memory / atop_64(max_mem));
3552
3553 if (memorystatus_manual_testing_on) {
3554 return;
3555 }
3556
3557 old_level = memorystatus_vm_pressure_level;
3558
3559 switch (memorystatus_vm_pressure_level) {
3560
3561 case kVMPressureNormal:
3562 {
3563 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
3564 new_level = kVMPressureCritical;
3565 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
3566 new_level = kVMPressureWarning;
3567 }
3568 break;
3569 }
3570
3571 case kVMPressureWarning:
3572 case kVMPressureUrgent:
3573 {
3574 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
3575 new_level = kVMPressureNormal;
3576 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
3577 new_level = kVMPressureCritical;
3578 }
3579 break;
3580 }
3581
3582 case kVMPressureCritical:
3583 {
3584 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
3585 new_level = kVMPressureNormal;
3586 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
3587 new_level = kVMPressureWarning;
3588 }
3589 break;
3590 }
3591
3592 default:
3593 return;
3594 }
3595
3596 if (new_level != -1) {
3597 memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
3598
3599 if (old_level != new_level) {
3600 if (vm_pressure_thread_running == FALSE) {
3601 thread_wakeup(&vm_pressure_thread);
3602 }
3603 thread_wakeup(&vm_pressure_changed);
3604 }
3605 }
3606
3607 }
3608 #endif /* VM_PRESSURE_EVENTS */
3609
3610 kern_return_t
3611 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level) {
3612
3613 #if !VM_PRESSURE_EVENTS
3614
3615 return KERN_FAILURE;
3616
3617 #else /* VM_PRESSURE_EVENTS */
3618
3619 kern_return_t kr = KERN_SUCCESS;
3620
3621 if (pressure_level != NULL) {
3622
3623 vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
3624
3625 if (wait_for_pressure == TRUE) {
3626 wait_result_t wr = 0;
3627
3628 while (old_level == *pressure_level) {
3629 wr = assert_wait((event_t) &vm_pressure_changed,
3630 THREAD_INTERRUPTIBLE);
3631 if (wr == THREAD_WAITING) {
3632 wr = thread_block(THREAD_CONTINUE_NULL);
3633 }
3634 if (wr == THREAD_INTERRUPTED) {
3635 return KERN_ABORTED;
3636 }
3637 if (wr == THREAD_AWAKENED) {
3638
3639 old_level = memorystatus_vm_pressure_level;
3640
3641 if (old_level != *pressure_level) {
3642 break;
3643 }
3644 }
3645 }
3646 }
3647
3648 *pressure_level = old_level;
3649 kr = KERN_SUCCESS;
3650 } else {
3651 kr = KERN_INVALID_ARGUMENT;
3652 }
3653
3654 return kr;
3655 #endif /* VM_PRESSURE_EVENTS */
3656 }
3657
3658 #if VM_PRESSURE_EVENTS
3659 void
3660 vm_pressure_thread(void) {
3661 static boolean_t set_up_thread = FALSE;
3662
3663 if (set_up_thread) {
3664 vm_pressure_thread_running = TRUE;
3665 consider_vm_pressure_events();
3666 vm_pressure_thread_running = FALSE;
3667 }
3668
3669 set_up_thread = TRUE;
3670 assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
3671 thread_block((thread_continue_t)vm_pressure_thread);
3672 }
3673 #endif /* VM_PRESSURE_EVENTS */
3674
3675
3676 uint32_t vm_pageout_considered_page_last = 0;
3677
3678 /*
3679 * called once per-second via "compute_averages"
3680 */
3681 void
3682 compute_pageout_gc_throttle()
3683 {
3684 if (vm_pageout_considered_page != vm_pageout_considered_page_last) {
3685
3686 vm_pageout_considered_page_last = vm_pageout_considered_page;
3687
3688 thread_wakeup((event_t) &vm_pageout_garbage_collect);
3689 }
3690 }
3691
3692
3693 static void
3694 vm_pageout_garbage_collect(int collect)
3695 {
3696
3697 if (collect) {
3698 boolean_t buf_large_zfree = FALSE;
3699 boolean_t first_try = TRUE;
3700
3701 stack_collect();
3702
3703 consider_machine_collect();
3704
3705 do {
3706 if (consider_buffer_cache_collect != NULL) {
3707 buf_large_zfree = (*consider_buffer_cache_collect)(0);
3708 }
3709 if (first_try == TRUE || buf_large_zfree == TRUE) {
3710 /*
3711 * consider_zone_gc should be last, because the other operations
3712 * might return memory to zones.
3713 */
3714 consider_zone_gc(buf_large_zfree);
3715 }
3716 first_try = FALSE;
3717
3718 } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
3719
3720 consider_machine_adjust();
3721 }
3722 assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
3723
3724 thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
3725 /*NOTREACHED*/
3726 }
3727
3728
3729
3730 void
3731 vm_pageout(void)
3732 {
3733 thread_t self = current_thread();
3734 thread_t thread;
3735 kern_return_t result;
3736 spl_t s;
3737
3738 /*
3739 * Set thread privileges.
3740 */
3741 s = splsched();
3742 thread_lock(self);
3743 self->priority = BASEPRI_PREEMPT - 1;
3744 set_sched_pri(self, self->priority);
3745 thread_unlock(self);
3746
3747 if (!self->reserved_stack)
3748 self->reserved_stack = self->kernel_stack;
3749
3750 splx(s);
3751
3752 /*
3753 * Initialize some paging parameters.
3754 */
3755
3756 if (vm_pageout_swap_wait == 0)
3757 vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
3758
3759 if (vm_pageout_idle_wait == 0)
3760 vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
3761
3762 if (vm_pageout_burst_wait == 0)
3763 vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
3764
3765 if (vm_pageout_empty_wait == 0)
3766 vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
3767
3768 if (vm_pageout_deadlock_wait == 0)
3769 vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
3770
3771 if (vm_pageout_deadlock_relief == 0)
3772 vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
3773
3774 if (vm_pageout_inactive_relief == 0)
3775 vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
3776
3777 if (vm_pageout_burst_active_throttle == 0)
3778 vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
3779
3780 if (vm_pageout_burst_inactive_throttle == 0)
3781 vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
3782
3783 #if !CONFIG_JETSAM
3784 vm_page_filecache_min = (uint32_t) (max_mem / PAGE_SIZE) / 20;
3785 if (vm_page_filecache_min < VM_PAGE_FILECACHE_MIN)
3786 vm_page_filecache_min = VM_PAGE_FILECACHE_MIN;
3787 #endif
3788
3789 /*
3790 * Set kernel task to low backing store privileged
3791 * status
3792 */
3793 task_lock(kernel_task);
3794 kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
3795 task_unlock(kernel_task);
3796
3797 vm_page_free_count_init = vm_page_free_count;
3798
3799 /*
3800 * even if we've already called vm_page_free_reserve
3801 * call it again here to insure that the targets are
3802 * accurately calculated (it uses vm_page_free_count_init)
3803 * calling it with an arg of 0 will not change the reserve
3804 * but will re-calculate free_min and free_target
3805 */
3806 if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
3807 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
3808 } else
3809 vm_page_free_reserve(0);
3810
3811
3812 queue_init(&vm_pageout_queue_external.pgo_pending);
3813 vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
3814 vm_pageout_queue_external.pgo_laundry = 0;
3815 vm_pageout_queue_external.pgo_idle = FALSE;
3816 vm_pageout_queue_external.pgo_busy = FALSE;
3817 vm_pageout_queue_external.pgo_throttled = FALSE;
3818 vm_pageout_queue_external.pgo_draining = FALSE;
3819 vm_pageout_queue_external.pgo_lowpriority = FALSE;
3820 vm_pageout_queue_external.pgo_tid = -1;
3821 vm_pageout_queue_external.pgo_inited = FALSE;
3822
3823
3824 queue_init(&vm_pageout_queue_internal.pgo_pending);
3825 vm_pageout_queue_internal.pgo_maxlaundry = 0;
3826 vm_pageout_queue_internal.pgo_laundry = 0;
3827 vm_pageout_queue_internal.pgo_idle = FALSE;
3828 vm_pageout_queue_internal.pgo_busy = FALSE;
3829 vm_pageout_queue_internal.pgo_throttled = FALSE;
3830 vm_pageout_queue_internal.pgo_draining = FALSE;
3831 vm_pageout_queue_internal.pgo_lowpriority = FALSE;
3832 vm_pageout_queue_internal.pgo_tid = -1;
3833 vm_pageout_queue_internal.pgo_inited = FALSE;
3834
3835 /* internal pageout thread started when default pager registered first time */
3836 /* external pageout and garbage collection threads started here */
3837
3838 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
3839 BASEPRI_PREEMPT - 1,
3840 &vm_pageout_external_iothread);
3841 if (result != KERN_SUCCESS)
3842 panic("vm_pageout_iothread_external: create failed");
3843
3844 thread_deallocate(vm_pageout_external_iothread);
3845
3846 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
3847 BASEPRI_DEFAULT,
3848 &thread);
3849 if (result != KERN_SUCCESS)
3850 panic("vm_pageout_garbage_collect: create failed");
3851
3852 thread_deallocate(thread);
3853
3854 #if VM_PRESSURE_EVENTS
3855 result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
3856 BASEPRI_DEFAULT,
3857 &thread);
3858
3859 if (result != KERN_SUCCESS)
3860 panic("vm_pressure_thread: create failed");
3861
3862 thread_deallocate(thread);
3863 #endif
3864
3865 vm_object_reaper_init();
3866
3867 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
3868 vm_compressor_pager_init();
3869
3870 vm_pageout_continue();
3871
3872 /*
3873 * Unreached code!
3874 *
3875 * The vm_pageout_continue() call above never returns, so the code below is never
3876 * executed. We take advantage of this to declare several DTrace VM related probe
3877 * points that our kernel doesn't have an analog for. These are probe points that
3878 * exist in Solaris and are in the DTrace documentation, so people may have written
3879 * scripts that use them. Declaring the probe points here means their scripts will
3880 * compile and execute which we want for portability of the scripts, but since this
3881 * section of code is never reached, the probe points will simply never fire. Yes,
3882 * this is basically a hack. The problem is the DTrace probe points were chosen with
3883 * Solaris specific VM events in mind, not portability to different VM implementations.
3884 */
3885
3886 DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
3887 DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
3888 DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
3889 DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
3890 DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
3891 DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
3892 DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
3893 /*NOTREACHED*/
3894 }
3895
3896
3897
3898 #define MAX_COMRPESSOR_THREAD_COUNT 8
3899
3900 struct cq ciq[MAX_COMRPESSOR_THREAD_COUNT];
3901
3902 int vm_compressor_thread_count = 2;
3903
3904 kern_return_t
3905 vm_pageout_internal_start(void)
3906 {
3907 kern_return_t result;
3908 int i;
3909 host_basic_info_data_t hinfo;
3910
3911 if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
3912 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
3913 #define BSD_HOST 1
3914 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
3915
3916 assert(hinfo.max_cpus > 0);
3917
3918 if (vm_compressor_thread_count >= hinfo.max_cpus)
3919 vm_compressor_thread_count = hinfo.max_cpus - 1;
3920 if (vm_compressor_thread_count <= 0)
3921 vm_compressor_thread_count = 1;
3922 else if (vm_compressor_thread_count > MAX_COMRPESSOR_THREAD_COUNT)
3923 vm_compressor_thread_count = MAX_COMRPESSOR_THREAD_COUNT;
3924
3925 vm_pageout_queue_internal.pgo_maxlaundry = (vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
3926 } else {
3927 vm_compressor_thread_count = 1;
3928 vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
3929 }
3930
3931 for (i = 0; i < vm_compressor_thread_count; i++) {
3932
3933 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i], BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
3934 if (result == KERN_SUCCESS)
3935 thread_deallocate(vm_pageout_internal_iothread);
3936 else
3937 break;
3938 }
3939 return result;
3940 }
3941
3942
3943 static upl_t
3944 upl_create(int type, int flags, upl_size_t size)
3945 {
3946 upl_t upl;
3947 vm_size_t page_field_size = 0;
3948 int upl_flags = 0;
3949 vm_size_t upl_size = sizeof(struct upl);
3950
3951 size = round_page_32(size);
3952
3953 if (type & UPL_CREATE_LITE) {
3954 page_field_size = (atop(size) + 7) >> 3;
3955 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
3956
3957 upl_flags |= UPL_LITE;
3958 }
3959 if (type & UPL_CREATE_INTERNAL) {
3960 upl_size += sizeof(struct upl_page_info) * atop(size);
3961
3962 upl_flags |= UPL_INTERNAL;
3963 }
3964 upl = (upl_t)kalloc(upl_size + page_field_size);
3965
3966 if (page_field_size)
3967 bzero((char *)upl + upl_size, page_field_size);
3968
3969 upl->flags = upl_flags | flags;
3970 upl->src_object = NULL;
3971 upl->kaddr = (vm_offset_t)0;
3972 upl->size = 0;
3973 upl->map_object = NULL;
3974 upl->ref_count = 1;
3975 upl->ext_ref_count = 0;
3976 upl->highest_page = 0;
3977 upl_lock_init(upl);
3978 upl->vector_upl = NULL;
3979 #if UPL_DEBUG
3980 upl->ubc_alias1 = 0;
3981 upl->ubc_alias2 = 0;
3982
3983 upl->upl_creator = current_thread();
3984 upl->upl_state = 0;
3985 upl->upl_commit_index = 0;
3986 bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
3987
3988 upl->uplq.next = 0;
3989 upl->uplq.prev = 0;
3990
3991 (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
3992 #endif /* UPL_DEBUG */
3993
3994 return(upl);
3995 }
3996
3997 static void
3998 upl_destroy(upl_t upl)
3999 {
4000 int page_field_size; /* bit field in word size buf */
4001 int size;
4002
4003 if (upl->ext_ref_count) {
4004 panic("upl(%p) ext_ref_count", upl);
4005 }
4006
4007 #if UPL_DEBUG
4008 if ( !(upl->flags & UPL_VECTOR)) {
4009 vm_object_t object;
4010
4011 if (upl->flags & UPL_SHADOWED) {
4012 object = upl->map_object->shadow;
4013 } else {
4014 object = upl->map_object;
4015 }
4016 vm_object_lock(object);
4017 queue_remove(&object->uplq, upl, upl_t, uplq);
4018 vm_object_activity_end(object);
4019 vm_object_collapse(object, 0, TRUE);
4020 vm_object_unlock(object);
4021 }
4022 #endif /* UPL_DEBUG */
4023 /*
4024 * drop a reference on the map_object whether or
4025 * not a pageout object is inserted
4026 */
4027 if (upl->flags & UPL_SHADOWED)
4028 vm_object_deallocate(upl->map_object);
4029
4030 if (upl->flags & UPL_DEVICE_MEMORY)
4031 size = PAGE_SIZE;
4032 else
4033 size = upl->size;
4034 page_field_size = 0;
4035
4036 if (upl->flags & UPL_LITE) {
4037 page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
4038 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
4039 }
4040 upl_lock_destroy(upl);
4041 upl->vector_upl = (vector_upl_t) 0xfeedbeef;
4042
4043 if (upl->flags & UPL_INTERNAL) {
4044 kfree(upl,
4045 sizeof(struct upl) +
4046 (sizeof(struct upl_page_info) * (size/PAGE_SIZE))
4047 + page_field_size);
4048 } else {
4049 kfree(upl, sizeof(struct upl) + page_field_size);
4050 }
4051 }
4052
4053 void
4054 upl_deallocate(upl_t upl)
4055 {
4056 if (--upl->ref_count == 0) {
4057 if(vector_upl_is_valid(upl))
4058 vector_upl_deallocate(upl);
4059 upl_destroy(upl);
4060 }
4061 }
4062
4063 #if DEVELOPMENT || DEBUG
4064 /*/*
4065 * Statistics about UPL enforcement of copy-on-write obligations.
4066 */
4067 unsigned long upl_cow = 0;
4068 unsigned long upl_cow_again = 0;
4069 unsigned long upl_cow_pages = 0;
4070 unsigned long upl_cow_again_pages = 0;
4071
4072 unsigned long iopl_cow = 0;
4073 unsigned long iopl_cow_pages = 0;
4074 #endif
4075
4076 /*
4077 * Routine: vm_object_upl_request
4078 * Purpose:
4079 * Cause the population of a portion of a vm_object.
4080 * Depending on the nature of the request, the pages
4081 * returned may be contain valid data or be uninitialized.
4082 * A page list structure, listing the physical pages
4083 * will be returned upon request.
4084 * This function is called by the file system or any other
4085 * supplier of backing store to a pager.
4086 * IMPORTANT NOTE: The caller must still respect the relationship
4087 * between the vm_object and its backing memory object. The
4088 * caller MUST NOT substitute changes in the backing file
4089 * without first doing a memory_object_lock_request on the
4090 * target range unless it is know that the pages are not
4091 * shared with another entity at the pager level.
4092 * Copy_in_to:
4093 * if a page list structure is present
4094 * return the mapped physical pages, where a
4095 * page is not present, return a non-initialized
4096 * one. If the no_sync bit is turned on, don't
4097 * call the pager unlock to synchronize with other
4098 * possible copies of the page. Leave pages busy
4099 * in the original object, if a page list structure
4100 * was specified. When a commit of the page list
4101 * pages is done, the dirty bit will be set for each one.
4102 * Copy_out_from:
4103 * If a page list structure is present, return
4104 * all mapped pages. Where a page does not exist
4105 * map a zero filled one. Leave pages busy in
4106 * the original object. If a page list structure
4107 * is not specified, this call is a no-op.
4108 *
4109 * Note: access of default pager objects has a rather interesting
4110 * twist. The caller of this routine, presumably the file system
4111 * page cache handling code, will never actually make a request
4112 * against a default pager backed object. Only the default
4113 * pager will make requests on backing store related vm_objects
4114 * In this way the default pager can maintain the relationship
4115 * between backing store files (abstract memory objects) and
4116 * the vm_objects (cache objects), they support.
4117 *
4118 */
4119
4120 __private_extern__ kern_return_t
4121 vm_object_upl_request(
4122 vm_object_t object,
4123 vm_object_offset_t offset,
4124 upl_size_t size,
4125 upl_t *upl_ptr,
4126 upl_page_info_array_t user_page_list,
4127 unsigned int *page_list_count,
4128 int cntrl_flags)
4129 {
4130 vm_page_t dst_page = VM_PAGE_NULL;
4131 vm_object_offset_t dst_offset;
4132 upl_size_t xfer_size;
4133 unsigned int size_in_pages;
4134 boolean_t dirty;
4135 boolean_t hw_dirty;
4136 upl_t upl = NULL;
4137 unsigned int entry;
4138 #if MACH_CLUSTER_STATS
4139 boolean_t encountered_lrp = FALSE;
4140 #endif
4141 vm_page_t alias_page = NULL;
4142 int refmod_state = 0;
4143 wpl_array_t lite_list = NULL;
4144 vm_object_t last_copy_object;
4145 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
4146 struct vm_page_delayed_work *dwp;
4147 int dw_count;
4148 int dw_limit;
4149
4150 if (cntrl_flags & ~UPL_VALID_FLAGS) {
4151 /*
4152 * For forward compatibility's sake,
4153 * reject any unknown flag.
4154 */
4155 return KERN_INVALID_VALUE;
4156 }
4157 if ( (!object->internal) && (object->paging_offset != 0) )
4158 panic("vm_object_upl_request: external object with non-zero paging offset\n");
4159 if (object->phys_contiguous)
4160 panic("vm_object_upl_request: contiguous object specified\n");
4161
4162
4163 if ((size / PAGE_SIZE) > MAX_UPL_SIZE)
4164 size = MAX_UPL_SIZE * PAGE_SIZE;
4165
4166 if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
4167 *page_list_count = MAX_UPL_SIZE;
4168
4169 if (cntrl_flags & UPL_SET_INTERNAL) {
4170 if (cntrl_flags & UPL_SET_LITE) {
4171
4172 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, 0, size);
4173
4174 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
4175 lite_list = (wpl_array_t)
4176 (((uintptr_t)user_page_list) +
4177 ((size/PAGE_SIZE) * sizeof(upl_page_info_t)));
4178 if (size == 0) {
4179 user_page_list = NULL;
4180 lite_list = NULL;
4181 }
4182 } else {
4183 upl = upl_create(UPL_CREATE_INTERNAL, 0, size);
4184
4185 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
4186 if (size == 0) {
4187 user_page_list = NULL;
4188 }
4189 }
4190 } else {
4191 if (cntrl_flags & UPL_SET_LITE) {
4192
4193 upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE, 0, size);
4194
4195 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
4196 if (size == 0) {
4197 lite_list = NULL;
4198 }
4199 } else {
4200 upl = upl_create(UPL_CREATE_EXTERNAL, 0, size);
4201 }
4202 }
4203 *upl_ptr = upl;
4204
4205 if (user_page_list)
4206 user_page_list[0].device = FALSE;
4207
4208 if (cntrl_flags & UPL_SET_LITE) {
4209 upl->map_object = object;
4210 } else {
4211 upl->map_object = vm_object_allocate(size);
4212 /*
4213 * No neeed to lock the new object: nobody else knows
4214 * about it yet, so it's all ours so far.
4215 */
4216 upl->map_object->shadow = object;
4217 upl->map_object->pageout = TRUE;
4218 upl->map_object->can_persist = FALSE;
4219 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
4220 upl->map_object->vo_shadow_offset = offset;
4221 upl->map_object->wimg_bits = object->wimg_bits;
4222
4223 VM_PAGE_GRAB_FICTITIOUS(alias_page);
4224
4225 upl->flags |= UPL_SHADOWED;
4226 }
4227 /*
4228 * ENCRYPTED SWAP:
4229 * Just mark the UPL as "encrypted" here.
4230 * We'll actually encrypt the pages later,
4231 * in upl_encrypt(), when the caller has
4232 * selected which pages need to go to swap.
4233 */
4234 if (cntrl_flags & UPL_ENCRYPT)
4235 upl->flags |= UPL_ENCRYPTED;
4236
4237 if (cntrl_flags & UPL_FOR_PAGEOUT)
4238 upl->flags |= UPL_PAGEOUT;
4239
4240 vm_object_lock(object);
4241 vm_object_activity_begin(object);
4242
4243 /*
4244 * we can lock in the paging_offset once paging_in_progress is set
4245 */
4246 upl->size = size;
4247 upl->offset = offset + object->paging_offset;
4248
4249 #if UPL_DEBUG
4250 vm_object_activity_begin(object);
4251 queue_enter(&object->uplq, upl, upl_t, uplq);
4252 #endif /* UPL_DEBUG */
4253
4254 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
4255 /*
4256 * Honor copy-on-write obligations
4257 *
4258 * The caller is gathering these pages and
4259 * might modify their contents. We need to
4260 * make sure that the copy object has its own
4261 * private copies of these pages before we let
4262 * the caller modify them.
4263 */
4264 vm_object_update(object,
4265 offset,
4266 size,
4267 NULL,
4268 NULL,
4269 FALSE, /* should_return */
4270 MEMORY_OBJECT_COPY_SYNC,
4271 VM_PROT_NO_CHANGE);
4272 #if DEVELOPMENT || DEBUG
4273 upl_cow++;
4274 upl_cow_pages += size >> PAGE_SHIFT;
4275 #endif
4276 }
4277 /*
4278 * remember which copy object we synchronized with
4279 */
4280 last_copy_object = object->copy;
4281 entry = 0;
4282
4283 xfer_size = size;
4284 dst_offset = offset;
4285 size_in_pages = size / PAGE_SIZE;
4286
4287 dwp = &dw_array[0];
4288 dw_count = 0;
4289 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
4290
4291 if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
4292 object->resident_page_count < (MAX_UPL_SIZE * 2))
4293 object->scan_collisions = 0;
4294
4295 while (xfer_size) {
4296
4297 dwp->dw_mask = 0;
4298
4299 if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
4300 vm_object_unlock(object);
4301 VM_PAGE_GRAB_FICTITIOUS(alias_page);
4302 vm_object_lock(object);
4303 }
4304 if (cntrl_flags & UPL_COPYOUT_FROM) {
4305 upl->flags |= UPL_PAGE_SYNC_DONE;
4306
4307 if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
4308 dst_page->fictitious ||
4309 dst_page->absent ||
4310 dst_page->error ||
4311 dst_page->cleaning ||
4312 (VM_PAGE_WIRED(dst_page))) {
4313
4314 if (user_page_list)
4315 user_page_list[entry].phys_addr = 0;
4316
4317 goto try_next_page;
4318 }
4319 /*
4320 * grab this up front...
4321 * a high percentange of the time we're going to
4322 * need the hardware modification state a bit later
4323 * anyway... so we can eliminate an extra call into
4324 * the pmap layer by grabbing it here and recording it
4325 */
4326 if (dst_page->pmapped)
4327 refmod_state = pmap_get_refmod(dst_page->phys_page);
4328 else
4329 refmod_state = 0;
4330
4331 if ( (refmod_state & VM_MEM_REFERENCED) && dst_page->inactive ) {
4332 /*
4333 * page is on inactive list and referenced...
4334 * reactivate it now... this gets it out of the
4335 * way of vm_pageout_scan which would have to
4336 * reactivate it upon tripping over it
4337 */
4338 dwp->dw_mask |= DW_vm_page_activate;
4339 }
4340 if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
4341 /*
4342 * we're only asking for DIRTY pages to be returned
4343 */
4344 if (dst_page->laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
4345 /*
4346 * if we were the page stolen by vm_pageout_scan to be
4347 * cleaned (as opposed to a buddy being clustered in
4348 * or this request is not being driven by a PAGEOUT cluster
4349 * then we only need to check for the page being dirty or
4350 * precious to decide whether to return it
4351 */
4352 if (dst_page->dirty || dst_page->precious || (refmod_state & VM_MEM_MODIFIED))
4353 goto check_busy;
4354 goto dont_return;
4355 }
4356 /*
4357 * this is a request for a PAGEOUT cluster and this page
4358 * is merely along for the ride as a 'buddy'... not only
4359 * does it have to be dirty to be returned, but it also
4360 * can't have been referenced recently...
4361 */
4362 if ( (hibernate_cleaning_in_progress == TRUE ||
4363 (!((refmod_state & VM_MEM_REFERENCED) || dst_page->reference) || dst_page->throttled)) &&
4364 ((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
4365 goto check_busy;
4366 }
4367 dont_return:
4368 /*
4369 * if we reach here, we're not to return
4370 * the page... go on to the next one
4371 */
4372 if (dst_page->laundry == TRUE) {
4373 /*
4374 * if we get here, the page is not 'cleaning' (filtered out above).
4375 * since it has been referenced, remove it from the laundry
4376 * so we don't pay the cost of an I/O to clean a page
4377 * we're just going to take back
4378 */
4379 vm_page_lockspin_queues();
4380
4381 vm_pageout_steal_laundry(dst_page, TRUE);
4382 vm_page_activate(dst_page);
4383
4384 vm_page_unlock_queues();
4385 }
4386 if (user_page_list)
4387 user_page_list[entry].phys_addr = 0;
4388
4389 goto try_next_page;
4390 }
4391 check_busy:
4392 if (dst_page->busy) {
4393 if (cntrl_flags & UPL_NOBLOCK) {
4394 if (user_page_list)
4395 user_page_list[entry].phys_addr = 0;
4396
4397 goto try_next_page;
4398 }
4399 /*
4400 * someone else is playing with the
4401 * page. We will have to wait.
4402 */
4403 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
4404
4405 continue;
4406 }
4407 /*
4408 * ENCRYPTED SWAP:
4409 * The caller is gathering this page and might
4410 * access its contents later on. Decrypt the
4411 * page before adding it to the UPL, so that
4412 * the caller never sees encrypted data.
4413 */
4414 if (! (cntrl_flags & UPL_ENCRYPT) && dst_page->encrypted) {
4415 int was_busy;
4416
4417 /*
4418 * save the current state of busy
4419 * mark page as busy while decrypt
4420 * is in progress since it will drop
4421 * the object lock...
4422 */
4423 was_busy = dst_page->busy;
4424 dst_page->busy = TRUE;
4425
4426 vm_page_decrypt(dst_page, 0);
4427 vm_page_decrypt_for_upl_counter++;
4428 /*
4429 * restore to original busy state
4430 */
4431 dst_page->busy = was_busy;
4432 }
4433 if (dst_page->pageout_queue == TRUE) {
4434
4435 vm_page_lockspin_queues();
4436
4437 if (dst_page->pageout_queue == TRUE) {
4438 /*
4439 * we've buddied up a page for a clustered pageout
4440 * that has already been moved to the pageout
4441 * queue by pageout_scan... we need to remove
4442 * it from the queue and drop the laundry count
4443 * on that queue
4444 */
4445 vm_pageout_throttle_up(dst_page);
4446 }
4447 vm_page_unlock_queues();
4448 }
4449 #if MACH_CLUSTER_STATS
4450 /*
4451 * pageout statistics gathering. count
4452 * all the pages we will page out that
4453 * were not counted in the initial
4454 * vm_pageout_scan work
4455 */
4456 if (dst_page->pageout)
4457 encountered_lrp = TRUE;
4458 if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious))) {
4459 if (encountered_lrp)
4460 CLUSTER_STAT(pages_at_higher_offsets++;)
4461 else
4462 CLUSTER_STAT(pages_at_lower_offsets++;)
4463 }
4464 #endif
4465 hw_dirty = refmod_state & VM_MEM_MODIFIED;
4466 dirty = hw_dirty ? TRUE : dst_page->dirty;
4467
4468 if (dst_page->phys_page > upl->highest_page)
4469 upl->highest_page = dst_page->phys_page;
4470
4471 if (cntrl_flags & UPL_SET_LITE) {
4472 unsigned int pg_num;
4473
4474 pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
4475 assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
4476 lite_list[pg_num>>5] |= 1 << (pg_num & 31);
4477
4478 if (hw_dirty)
4479 pmap_clear_modify(dst_page->phys_page);
4480
4481 /*
4482 * Mark original page as cleaning
4483 * in place.
4484 */
4485 dst_page->cleaning = TRUE;
4486 dst_page->precious = FALSE;
4487 } else {
4488 /*
4489 * use pageclean setup, it is more
4490 * convenient even for the pageout
4491 * cases here
4492 */
4493 vm_object_lock(upl->map_object);
4494 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
4495 vm_object_unlock(upl->map_object);
4496
4497 alias_page->absent = FALSE;
4498 alias_page = NULL;
4499 }
4500 #if MACH_PAGEMAP
4501 /*
4502 * Record that this page has been
4503 * written out
4504 */
4505 vm_external_state_set(object->existence_map, dst_page->offset);
4506 #endif /*MACH_PAGEMAP*/
4507 if (dirty) {
4508 SET_PAGE_DIRTY(dst_page, FALSE);
4509 } else {
4510 dst_page->dirty = FALSE;
4511 }
4512
4513 if (!dirty)
4514 dst_page->precious = TRUE;
4515
4516 if ( (cntrl_flags & UPL_ENCRYPT) ) {
4517 /*
4518 * ENCRYPTED SWAP:
4519 * We want to deny access to the target page
4520 * because its contents are about to be
4521 * encrypted and the user would be very
4522 * confused to see encrypted data instead
4523 * of their data.
4524 * We also set "encrypted_cleaning" to allow
4525 * vm_pageout_scan() to demote that page
4526 * from "adjacent/clean-in-place" to
4527 * "target/clean-and-free" if it bumps into
4528 * this page during its scanning while we're
4529 * still processing this cluster.
4530 */
4531 dst_page->busy = TRUE;
4532 dst_page->encrypted_cleaning = TRUE;
4533 }
4534 if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
4535 if ( !VM_PAGE_WIRED(dst_page))
4536 dst_page->pageout = TRUE;
4537 }
4538 } else {
4539 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
4540 /*
4541 * Honor copy-on-write obligations
4542 *
4543 * The copy object has changed since we
4544 * last synchronized for copy-on-write.
4545 * Another copy object might have been
4546 * inserted while we released the object's
4547 * lock. Since someone could have seen the
4548 * original contents of the remaining pages
4549 * through that new object, we have to
4550 * synchronize with it again for the remaining
4551 * pages only. The previous pages are "busy"
4552 * so they can not be seen through the new
4553 * mapping. The new mapping will see our
4554 * upcoming changes for those previous pages,
4555 * but that's OK since they couldn't see what
4556 * was there before. It's just a race anyway
4557 * and there's no guarantee of consistency or
4558 * atomicity. We just don't want new mappings
4559 * to see both the *before* and *after* pages.
4560 */
4561 if (object->copy != VM_OBJECT_NULL) {
4562 vm_object_update(
4563 object,
4564 dst_offset,/* current offset */
4565 xfer_size, /* remaining size */
4566 NULL,
4567 NULL,
4568 FALSE, /* should_return */
4569 MEMORY_OBJECT_COPY_SYNC,
4570 VM_PROT_NO_CHANGE);
4571
4572 #if DEVELOPMENT || DEBUG
4573 upl_cow_again++;
4574 upl_cow_again_pages += xfer_size >> PAGE_SHIFT;
4575 #endif
4576 }
4577 /*
4578 * remember the copy object we synced with
4579 */
4580 last_copy_object = object->copy;
4581 }
4582 dst_page = vm_page_lookup(object, dst_offset);
4583
4584 if (dst_page != VM_PAGE_NULL) {
4585
4586 if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
4587 /*
4588 * skip over pages already present in the cache
4589 */
4590 if (user_page_list)
4591 user_page_list[entry].phys_addr = 0;
4592
4593 goto try_next_page;
4594 }
4595 if (dst_page->fictitious) {
4596 panic("need corner case for fictitious page");
4597 }
4598
4599 if (dst_page->busy || dst_page->cleaning) {
4600 /*
4601 * someone else is playing with the
4602 * page. We will have to wait.
4603 */
4604 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
4605
4606 continue;
4607 }
4608 if (dst_page->laundry) {
4609 dst_page->pageout = FALSE;
4610
4611 vm_pageout_steal_laundry(dst_page, FALSE);
4612 }
4613 } else {
4614 if (object->private) {
4615 /*
4616 * This is a nasty wrinkle for users
4617 * of upl who encounter device or
4618 * private memory however, it is
4619 * unavoidable, only a fault can
4620 * resolve the actual backing
4621 * physical page by asking the
4622 * backing device.
4623 */
4624 if (user_page_list)
4625 user_page_list[entry].phys_addr = 0;
4626
4627 goto try_next_page;
4628 }
4629 if (object->scan_collisions) {
4630 /*
4631 * the pageout_scan thread is trying to steal
4632 * pages from this object, but has run into our
4633 * lock... grab 2 pages from the head of the object...
4634 * the first is freed on behalf of pageout_scan, the
4635 * 2nd is for our own use... we use vm_object_page_grab
4636 * in both cases to avoid taking pages from the free
4637 * list since we are under memory pressure and our
4638 * lock on this object is getting in the way of
4639 * relieving it
4640 */
4641 dst_page = vm_object_page_grab(object);
4642
4643 if (dst_page != VM_PAGE_NULL)
4644 vm_page_release(dst_page);
4645
4646 dst_page = vm_object_page_grab(object);
4647 }
4648 if (dst_page == VM_PAGE_NULL) {
4649 /*
4650 * need to allocate a page
4651 */
4652 dst_page = vm_page_grab();
4653 }
4654 if (dst_page == VM_PAGE_NULL) {
4655 if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
4656 /*
4657 * we don't want to stall waiting for pages to come onto the free list
4658 * while we're already holding absent pages in this UPL
4659 * the caller will deal with the empty slots
4660 */
4661 if (user_page_list)
4662 user_page_list[entry].phys_addr = 0;
4663
4664 goto try_next_page;
4665 }
4666 /*
4667 * no pages available... wait
4668 * then try again for the same
4669 * offset...
4670 */
4671 vm_object_unlock(object);
4672
4673 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
4674
4675 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
4676
4677 VM_PAGE_WAIT();
4678 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
4679
4680 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
4681
4682 vm_object_lock(object);
4683
4684 continue;
4685 }
4686 vm_page_insert(dst_page, object, dst_offset);
4687
4688 dst_page->absent = TRUE;
4689 dst_page->busy = FALSE;
4690
4691 if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
4692 /*
4693 * if UPL_RET_ONLY_ABSENT was specified,
4694 * than we're definitely setting up a
4695 * upl for a clustered read/pagein
4696 * operation... mark the pages as clustered
4697 * so upl_commit_range can put them on the
4698 * speculative list
4699 */
4700 dst_page->clustered = TRUE;
4701 }
4702 }
4703 /*
4704 * ENCRYPTED SWAP:
4705 */
4706 if (cntrl_flags & UPL_ENCRYPT) {
4707 /*
4708 * The page is going to be encrypted when we
4709 * get it from the pager, so mark it so.
4710 */
4711 dst_page->encrypted = TRUE;
4712 } else {
4713 /*
4714 * Otherwise, the page will not contain
4715 * encrypted data.
4716 */
4717 dst_page->encrypted = FALSE;
4718 }
4719 dst_page->overwriting = TRUE;
4720
4721 if (dst_page->pmapped) {
4722 if ( !(cntrl_flags & UPL_FILE_IO))
4723 /*
4724 * eliminate all mappings from the
4725 * original object and its prodigy
4726 */
4727 refmod_state = pmap_disconnect(dst_page->phys_page);
4728 else
4729 refmod_state = pmap_get_refmod(dst_page->phys_page);
4730 } else
4731 refmod_state = 0;
4732
4733 hw_dirty = refmod_state & VM_MEM_MODIFIED;
4734 dirty = hw_dirty ? TRUE : dst_page->dirty;
4735
4736 if (cntrl_flags & UPL_SET_LITE) {
4737 unsigned int pg_num;
4738
4739 pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
4740 assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
4741 lite_list[pg_num>>5] |= 1 << (pg_num & 31);
4742
4743 if (hw_dirty)
4744 pmap_clear_modify(dst_page->phys_page);
4745
4746 /*
4747 * Mark original page as cleaning
4748 * in place.
4749 */
4750 dst_page->cleaning = TRUE;
4751 dst_page->precious = FALSE;
4752 } else {
4753 /*
4754 * use pageclean setup, it is more
4755 * convenient even for the pageout
4756 * cases here
4757 */
4758 vm_object_lock(upl->map_object);
4759 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
4760 vm_object_unlock(upl->map_object);
4761
4762 alias_page->absent = FALSE;
4763 alias_page = NULL;
4764 }
4765
4766 if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
4767 upl->flags &= ~UPL_CLEAR_DIRTY;
4768 upl->flags |= UPL_SET_DIRTY;
4769 dirty = TRUE;
4770 upl->flags |= UPL_SET_DIRTY;
4771 } else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
4772 /*
4773 * clean in place for read implies
4774 * that a write will be done on all
4775 * the pages that are dirty before
4776 * a upl commit is done. The caller
4777 * is obligated to preserve the
4778 * contents of all pages marked dirty
4779 */
4780 upl->flags |= UPL_CLEAR_DIRTY;
4781 }
4782 dst_page->dirty = dirty;
4783
4784 if (!dirty)
4785 dst_page->precious = TRUE;
4786
4787 if ( !VM_PAGE_WIRED(dst_page)) {
4788 /*
4789 * deny access to the target page while
4790 * it is being worked on
4791 */
4792 dst_page->busy = TRUE;
4793 } else
4794 dwp->dw_mask |= DW_vm_page_wire;
4795
4796 /*
4797 * We might be about to satisfy a fault which has been
4798 * requested. So no need for the "restart" bit.
4799 */
4800 dst_page->restart = FALSE;
4801 if (!dst_page->absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
4802 /*
4803 * expect the page to be used
4804 */
4805 dwp->dw_mask |= DW_set_reference;
4806 }
4807 if (cntrl_flags & UPL_PRECIOUS) {
4808 if (dst_page->object->internal) {
4809 SET_PAGE_DIRTY(dst_page, FALSE);
4810 dst_page->precious = FALSE;
4811 } else {
4812 dst_page->precious = TRUE;
4813 }
4814 } else {
4815 dst_page->precious = FALSE;
4816 }
4817 }
4818 if (dst_page->busy)
4819 upl->flags |= UPL_HAS_BUSY;
4820
4821 if (dst_page->phys_page > upl->highest_page)
4822 upl->highest_page = dst_page->phys_page;
4823 if (user_page_list) {
4824 user_page_list[entry].phys_addr = dst_page->phys_page;
4825 user_page_list[entry].pageout = dst_page->pageout;
4826 user_page_list[entry].absent = dst_page->absent;
4827 user_page_list[entry].dirty = dst_page->dirty;
4828 user_page_list[entry].precious = dst_page->precious;
4829 user_page_list[entry].device = FALSE;
4830 user_page_list[entry].needed = FALSE;
4831 if (dst_page->clustered == TRUE)
4832 user_page_list[entry].speculative = dst_page->speculative;
4833 else
4834 user_page_list[entry].speculative = FALSE;
4835 user_page_list[entry].cs_validated = dst_page->cs_validated;
4836 user_page_list[entry].cs_tainted = dst_page->cs_tainted;
4837 }
4838 /*
4839 * if UPL_RET_ONLY_ABSENT is set, then
4840 * we are working with a fresh page and we've
4841 * just set the clustered flag on it to
4842 * indicate that it was drug in as part of a
4843 * speculative cluster... so leave it alone
4844 */
4845 if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
4846 /*
4847 * someone is explicitly grabbing this page...
4848 * update clustered and speculative state
4849 *
4850 */
4851 VM_PAGE_CONSUME_CLUSTERED(dst_page);
4852 }
4853 try_next_page:
4854 if (dwp->dw_mask) {
4855 if (dwp->dw_mask & DW_vm_page_activate)
4856 VM_STAT_INCR(reactivations);
4857
4858 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
4859
4860 if (dw_count >= dw_limit) {
4861 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
4862
4863 dwp = &dw_array[0];
4864 dw_count = 0;
4865 }
4866 }
4867 entry++;
4868 dst_offset += PAGE_SIZE_64;
4869 xfer_size -= PAGE_SIZE;
4870 }
4871 if (dw_count)
4872 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
4873
4874 if (alias_page != NULL) {
4875 VM_PAGE_FREE(alias_page);
4876 }
4877
4878 if (page_list_count != NULL) {
4879 if (upl->flags & UPL_INTERNAL)
4880 *page_list_count = 0;
4881 else if (*page_list_count > entry)
4882 *page_list_count = entry;
4883 }
4884 #if UPL_DEBUG
4885 upl->upl_state = 1;
4886 #endif
4887 vm_object_unlock(object);
4888
4889 return KERN_SUCCESS;
4890 }
4891
4892 /* JMM - Backward compatability for now */
4893 kern_return_t
4894 vm_fault_list_request( /* forward */
4895 memory_object_control_t control,
4896 vm_object_offset_t offset,
4897 upl_size_t size,
4898 upl_t *upl_ptr,
4899 upl_page_info_t **user_page_list_ptr,
4900 unsigned int page_list_count,
4901 int cntrl_flags);
4902 kern_return_t
4903 vm_fault_list_request(
4904 memory_object_control_t control,
4905 vm_object_offset_t offset,
4906 upl_size_t size,
4907 upl_t *upl_ptr,
4908 upl_page_info_t **user_page_list_ptr,
4909 unsigned int page_list_count,
4910 int cntrl_flags)
4911 {
4912 unsigned int local_list_count;
4913 upl_page_info_t *user_page_list;
4914 kern_return_t kr;
4915
4916 if((cntrl_flags & UPL_VECTOR)==UPL_VECTOR)
4917 return KERN_INVALID_ARGUMENT;
4918
4919 if (user_page_list_ptr != NULL) {
4920 local_list_count = page_list_count;
4921 user_page_list = *user_page_list_ptr;
4922 } else {
4923 local_list_count = 0;
4924 user_page_list = NULL;
4925 }
4926 kr = memory_object_upl_request(control,
4927 offset,
4928 size,
4929 upl_ptr,
4930 user_page_list,
4931 &local_list_count,
4932 cntrl_flags);
4933
4934 if(kr != KERN_SUCCESS)
4935 return kr;
4936
4937 if ((user_page_list_ptr != NULL) && (cntrl_flags & UPL_INTERNAL)) {
4938 *user_page_list_ptr = UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr);
4939 }
4940
4941 return KERN_SUCCESS;
4942 }
4943
4944
4945
4946 /*
4947 * Routine: vm_object_super_upl_request
4948 * Purpose:
4949 * Cause the population of a portion of a vm_object
4950 * in much the same way as memory_object_upl_request.
4951 * Depending on the nature of the request, the pages
4952 * returned may be contain valid data or be uninitialized.
4953 * However, the region may be expanded up to the super
4954 * cluster size provided.
4955 */
4956
4957 __private_extern__ kern_return_t
4958 vm_object_super_upl_request(
4959 vm_object_t object,
4960 vm_object_offset_t offset,
4961 upl_size_t size,
4962 upl_size_t super_cluster,
4963 upl_t *upl,
4964 upl_page_info_t *user_page_list,
4965 unsigned int *page_list_count,
4966 int cntrl_flags)
4967 {
4968 if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR))
4969 return KERN_FAILURE;
4970
4971 assert(object->paging_in_progress);
4972 offset = offset - object->paging_offset;
4973
4974 if (super_cluster > size) {
4975
4976 vm_object_offset_t base_offset;
4977 upl_size_t super_size;
4978 vm_object_size_t super_size_64;
4979
4980 base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
4981 super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster;
4982 super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
4983 super_size = (upl_size_t) super_size_64;
4984 assert(super_size == super_size_64);
4985
4986 if (offset > (base_offset + super_size)) {
4987 panic("vm_object_super_upl_request: Missed target pageout"
4988 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
4989 offset, base_offset, super_size, super_cluster,
4990 size, object->paging_offset);
4991 }
4992 /*
4993 * apparently there is a case where the vm requests a
4994 * page to be written out who's offset is beyond the
4995 * object size
4996 */
4997 if ((offset + size) > (base_offset + super_size)) {
4998 super_size_64 = (offset + size) - base_offset;
4999 super_size = (upl_size_t) super_size_64;
5000 assert(super_size == super_size_64);
5001 }
5002
5003 offset = base_offset;
5004 size = super_size;
5005 }
5006 return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
5007 }
5008
5009
5010 kern_return_t
5011 vm_map_create_upl(
5012 vm_map_t map,
5013 vm_map_address_t offset,
5014 upl_size_t *upl_size,
5015 upl_t *upl,
5016 upl_page_info_array_t page_list,
5017 unsigned int *count,
5018 int *flags)
5019 {
5020 vm_map_entry_t entry;
5021 int caller_flags;
5022 int force_data_sync;
5023 int sync_cow_data;
5024 vm_object_t local_object;
5025 vm_map_offset_t local_offset;
5026 vm_map_offset_t local_start;
5027 kern_return_t ret;
5028
5029 caller_flags = *flags;
5030
5031 if (caller_flags & ~UPL_VALID_FLAGS) {
5032 /*
5033 * For forward compatibility's sake,
5034 * reject any unknown flag.
5035 */
5036 return KERN_INVALID_VALUE;
5037 }
5038 force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
5039 sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
5040
5041 if (upl == NULL)
5042 return KERN_INVALID_ARGUMENT;
5043
5044 REDISCOVER_ENTRY:
5045 vm_map_lock_read(map);
5046
5047 if (vm_map_lookup_entry(map, offset, &entry)) {
5048
5049 if ((entry->vme_end - offset) < *upl_size) {
5050 *upl_size = (upl_size_t) (entry->vme_end - offset);
5051 assert(*upl_size == entry->vme_end - offset);
5052 }
5053
5054 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
5055 *flags = 0;
5056
5057 if ( !entry->is_sub_map && entry->object.vm_object != VM_OBJECT_NULL) {
5058 if (entry->object.vm_object->private)
5059 *flags = UPL_DEV_MEMORY;
5060
5061 if (entry->object.vm_object->phys_contiguous)
5062 *flags |= UPL_PHYS_CONTIG;
5063 }
5064 vm_map_unlock_read(map);
5065
5066 return KERN_SUCCESS;
5067 }
5068
5069 if (entry->is_sub_map) {
5070 vm_map_t submap;
5071
5072 submap = entry->object.sub_map;
5073 local_start = entry->vme_start;
5074 local_offset = entry->offset;
5075
5076 vm_map_reference(submap);
5077 vm_map_unlock_read(map);
5078
5079 ret = vm_map_create_upl(submap,
5080 local_offset + (offset - local_start),
5081 upl_size, upl, page_list, count, flags);
5082 vm_map_deallocate(submap);
5083
5084 return ret;
5085 }
5086
5087 if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
5088 if ((*upl_size/PAGE_SIZE) > MAX_UPL_SIZE)
5089 *upl_size = MAX_UPL_SIZE * PAGE_SIZE;
5090 }
5091 /*
5092 * Create an object if necessary.
5093 */
5094 if (entry->object.vm_object == VM_OBJECT_NULL) {
5095
5096 if (vm_map_lock_read_to_write(map))
5097 goto REDISCOVER_ENTRY;
5098
5099 entry->object.vm_object = vm_object_allocate((vm_size_t)(entry->vme_end - entry->vme_start));
5100 entry->offset = 0;
5101
5102 vm_map_lock_write_to_read(map);
5103 }
5104 if (!(caller_flags & UPL_COPYOUT_FROM)) {
5105 if (!(entry->protection & VM_PROT_WRITE)) {
5106 vm_map_unlock_read(map);
5107 return KERN_PROTECTION_FAILURE;
5108 }
5109
5110 local_object = entry->object.vm_object;
5111 if (vm_map_entry_should_cow_for_true_share(entry) &&
5112 local_object->vo_size > *upl_size &&
5113 *upl_size != 0) {
5114 vm_prot_t prot;
5115
5116 /*
5117 * Set up the targeted range for copy-on-write to avoid
5118 * applying true_share/copy_delay to the entire object.
5119 */
5120
5121 if (vm_map_lock_read_to_write(map)) {
5122 goto REDISCOVER_ENTRY;
5123 }
5124
5125 vm_map_clip_start(map,
5126 entry,
5127 vm_map_trunc_page(offset,
5128 VM_MAP_PAGE_MASK(map)));
5129 vm_map_clip_end(map,
5130 entry,
5131 vm_map_round_page(offset + *upl_size,
5132 VM_MAP_PAGE_MASK(map)));
5133 prot = entry->protection & ~VM_PROT_WRITE;
5134 if (override_nx(map, entry->alias) && prot)
5135 prot |= VM_PROT_EXECUTE;
5136 vm_object_pmap_protect(local_object,
5137 entry->offset,
5138 entry->vme_end - entry->vme_start,
5139 ((entry->is_shared || map->mapped_in_other_pmaps)
5140 ? PMAP_NULL
5141 : map->pmap),
5142 entry->vme_start,
5143 prot);
5144 entry->needs_copy = TRUE;
5145
5146 vm_map_lock_write_to_read(map);
5147 }
5148
5149 if (entry->needs_copy) {
5150 /*
5151 * Honor copy-on-write for COPY_SYMMETRIC
5152 * strategy.
5153 */
5154 vm_map_t local_map;
5155 vm_object_t object;
5156 vm_object_offset_t new_offset;
5157 vm_prot_t prot;
5158 boolean_t wired;
5159 vm_map_version_t version;
5160 vm_map_t real_map;
5161
5162 local_map = map;
5163
5164 if (vm_map_lookup_locked(&local_map,
5165 offset, VM_PROT_WRITE,
5166 OBJECT_LOCK_EXCLUSIVE,
5167 &version, &object,
5168 &new_offset, &prot, &wired,
5169 NULL,
5170 &real_map) != KERN_SUCCESS) {
5171 vm_map_unlock_read(local_map);
5172 return KERN_FAILURE;
5173 }
5174 if (real_map != map)
5175 vm_map_unlock(real_map);
5176 vm_map_unlock_read(local_map);
5177
5178 vm_object_unlock(object);
5179
5180 goto REDISCOVER_ENTRY;
5181 }
5182 }
5183 if (sync_cow_data) {
5184 if (entry->object.vm_object->shadow || entry->object.vm_object->copy) {
5185 local_object = entry->object.vm_object;
5186 local_start = entry->vme_start;
5187 local_offset = entry->offset;
5188
5189 vm_object_reference(local_object);
5190 vm_map_unlock_read(map);
5191
5192 if (local_object->shadow && local_object->copy) {
5193 vm_object_lock_request(
5194 local_object->shadow,
5195 (vm_object_offset_t)
5196 ((offset - local_start) +
5197 local_offset) +
5198 local_object->vo_shadow_offset,
5199 *upl_size, FALSE,
5200 MEMORY_OBJECT_DATA_SYNC,
5201 VM_PROT_NO_CHANGE);
5202 }
5203 sync_cow_data = FALSE;
5204 vm_object_deallocate(local_object);
5205
5206 goto REDISCOVER_ENTRY;
5207 }
5208 }
5209 if (force_data_sync) {
5210 local_object = entry->object.vm_object;
5211 local_start = entry->vme_start;
5212 local_offset = entry->offset;
5213
5214 vm_object_reference(local_object);
5215 vm_map_unlock_read(map);
5216
5217 vm_object_lock_request(
5218 local_object,
5219 (vm_object_offset_t)
5220 ((offset - local_start) + local_offset),
5221 (vm_object_size_t)*upl_size, FALSE,
5222 MEMORY_OBJECT_DATA_SYNC,
5223 VM_PROT_NO_CHANGE);
5224
5225 force_data_sync = FALSE;
5226 vm_object_deallocate(local_object);
5227
5228 goto REDISCOVER_ENTRY;
5229 }
5230 if (entry->object.vm_object->private)
5231 *flags = UPL_DEV_MEMORY;
5232 else
5233 *flags = 0;
5234
5235 if (entry->object.vm_object->phys_contiguous)
5236 *flags |= UPL_PHYS_CONTIG;
5237
5238 local_object = entry->object.vm_object;
5239 local_offset = entry->offset;
5240 local_start = entry->vme_start;
5241
5242 vm_object_reference(local_object);
5243 vm_map_unlock_read(map);
5244
5245 ret = vm_object_iopl_request(local_object,
5246 (vm_object_offset_t) ((offset - local_start) + local_offset),
5247 *upl_size,
5248 upl,
5249 page_list,
5250 count,
5251 caller_flags);
5252 vm_object_deallocate(local_object);
5253
5254 return(ret);
5255 }
5256 vm_map_unlock_read(map);
5257
5258 return(KERN_FAILURE);
5259 }
5260
5261 /*
5262 * Internal routine to enter a UPL into a VM map.
5263 *
5264 * JMM - This should just be doable through the standard
5265 * vm_map_enter() API.
5266 */
5267 kern_return_t
5268 vm_map_enter_upl(
5269 vm_map_t map,
5270 upl_t upl,
5271 vm_map_offset_t *dst_addr)
5272 {
5273 vm_map_size_t size;
5274 vm_object_offset_t offset;
5275 vm_map_offset_t addr;
5276 vm_page_t m;
5277 kern_return_t kr;
5278 int isVectorUPL = 0, curr_upl=0;
5279 upl_t vector_upl = NULL;
5280 vm_offset_t vector_upl_dst_addr = 0;
5281 vm_map_t vector_upl_submap = NULL;
5282 upl_offset_t subupl_offset = 0;
5283 upl_size_t subupl_size = 0;
5284
5285 if (upl == UPL_NULL)
5286 return KERN_INVALID_ARGUMENT;
5287
5288 if((isVectorUPL = vector_upl_is_valid(upl))) {
5289 int mapped=0,valid_upls=0;
5290 vector_upl = upl;
5291
5292 upl_lock(vector_upl);
5293 for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
5294 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
5295 if(upl == NULL)
5296 continue;
5297 valid_upls++;
5298 if (UPL_PAGE_LIST_MAPPED & upl->flags)
5299 mapped++;
5300 }
5301
5302 if(mapped) {
5303 if(mapped != valid_upls)
5304 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
5305 else {
5306 upl_unlock(vector_upl);
5307 return KERN_FAILURE;
5308 }
5309 }
5310
5311 kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, VM_FLAGS_ANYWHERE, &vector_upl_submap);
5312 if( kr != KERN_SUCCESS )
5313 panic("Vector UPL submap allocation failed\n");
5314 map = vector_upl_submap;
5315 vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
5316 curr_upl=0;
5317 }
5318 else
5319 upl_lock(upl);
5320
5321 process_upl_to_enter:
5322 if(isVectorUPL){
5323 if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
5324 *dst_addr = vector_upl_dst_addr;
5325 upl_unlock(vector_upl);
5326 return KERN_SUCCESS;
5327 }
5328 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
5329 if(upl == NULL)
5330 goto process_upl_to_enter;
5331
5332 vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
5333 *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
5334 } else {
5335 /*
5336 * check to see if already mapped
5337 */
5338 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
5339 upl_unlock(upl);
5340 return KERN_FAILURE;
5341 }
5342 }
5343 if ((!(upl->flags & UPL_SHADOWED)) &&
5344 ((upl->flags & UPL_HAS_BUSY) ||
5345 !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
5346
5347 vm_object_t object;
5348 vm_page_t alias_page;
5349 vm_object_offset_t new_offset;
5350 unsigned int pg_num;
5351 wpl_array_t lite_list;
5352
5353 if (upl->flags & UPL_INTERNAL) {
5354 lite_list = (wpl_array_t)
5355 ((((uintptr_t)upl) + sizeof(struct upl))
5356 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
5357 } else {
5358 lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
5359 }
5360 object = upl->map_object;
5361 upl->map_object = vm_object_allocate(upl->size);
5362
5363 vm_object_lock(upl->map_object);
5364
5365 upl->map_object->shadow = object;
5366 upl->map_object->pageout = TRUE;
5367 upl->map_object->can_persist = FALSE;
5368 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
5369 upl->map_object->vo_shadow_offset = upl->offset - object->paging_offset;
5370 upl->map_object->wimg_bits = object->wimg_bits;
5371 offset = upl->map_object->vo_shadow_offset;
5372 new_offset = 0;
5373 size = upl->size;
5374
5375 upl->flags |= UPL_SHADOWED;
5376
5377 while (size) {
5378 pg_num = (unsigned int) (new_offset / PAGE_SIZE);
5379 assert(pg_num == new_offset / PAGE_SIZE);
5380
5381 if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
5382
5383 VM_PAGE_GRAB_FICTITIOUS(alias_page);
5384
5385 vm_object_lock(object);
5386
5387 m = vm_page_lookup(object, offset);
5388 if (m == VM_PAGE_NULL) {
5389 panic("vm_upl_map: page missing\n");
5390 }
5391
5392 /*
5393 * Convert the fictitious page to a private
5394 * shadow of the real page.
5395 */
5396 assert(alias_page->fictitious);
5397 alias_page->fictitious = FALSE;
5398 alias_page->private = TRUE;
5399 alias_page->pageout = TRUE;
5400 /*
5401 * since m is a page in the upl it must
5402 * already be wired or BUSY, so it's
5403 * safe to assign the underlying physical
5404 * page to the alias
5405 */
5406 alias_page->phys_page = m->phys_page;
5407
5408 vm_object_unlock(object);
5409
5410 vm_page_lockspin_queues();
5411 vm_page_wire(alias_page);
5412 vm_page_unlock_queues();
5413
5414 /*
5415 * ENCRYPTED SWAP:
5416 * The virtual page ("m") has to be wired in some way
5417 * here or its physical page ("m->phys_page") could
5418 * be recycled at any time.
5419 * Assuming this is enforced by the caller, we can't
5420 * get an encrypted page here. Since the encryption
5421 * key depends on the VM page's "pager" object and
5422 * the "paging_offset", we couldn't handle 2 pageable
5423 * VM pages (with different pagers and paging_offsets)
5424 * sharing the same physical page: we could end up
5425 * encrypting with one key (via one VM page) and
5426 * decrypting with another key (via the alias VM page).
5427 */
5428 ASSERT_PAGE_DECRYPTED(m);
5429
5430 vm_page_insert(alias_page, upl->map_object, new_offset);
5431
5432 assert(!alias_page->wanted);
5433 alias_page->busy = FALSE;
5434 alias_page->absent = FALSE;
5435 }
5436 size -= PAGE_SIZE;
5437 offset += PAGE_SIZE_64;
5438 new_offset += PAGE_SIZE_64;
5439 }
5440 vm_object_unlock(upl->map_object);
5441 }
5442 if (upl->flags & UPL_SHADOWED)
5443 offset = 0;
5444 else
5445 offset = upl->offset - upl->map_object->paging_offset;
5446
5447 size = upl->size;
5448
5449 vm_object_reference(upl->map_object);
5450
5451 if(!isVectorUPL) {
5452 *dst_addr = 0;
5453 /*
5454 * NEED A UPL_MAP ALIAS
5455 */
5456 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
5457 VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
5458 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
5459
5460 if (kr != KERN_SUCCESS) {
5461 upl_unlock(upl);
5462 return(kr);
5463 }
5464 }
5465 else {
5466 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
5467 VM_FLAGS_FIXED, upl->map_object, offset, FALSE,
5468 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
5469 if(kr)
5470 panic("vm_map_enter failed for a Vector UPL\n");
5471 }
5472 vm_object_lock(upl->map_object);
5473
5474 for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
5475 m = vm_page_lookup(upl->map_object, offset);
5476
5477 if (m) {
5478 m->pmapped = TRUE;
5479
5480 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
5481 * but only in kernel space. If this was on a user map,
5482 * we'd have to set the wpmapped bit. */
5483 /* m->wpmapped = TRUE; */
5484 assert(map==kernel_map);
5485
5486 PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, VM_PROT_NONE, 0, TRUE);
5487 }
5488 offset += PAGE_SIZE_64;
5489 }
5490 vm_object_unlock(upl->map_object);
5491
5492 /*
5493 * hold a reference for the mapping
5494 */
5495 upl->ref_count++;
5496 upl->flags |= UPL_PAGE_LIST_MAPPED;
5497 upl->kaddr = (vm_offset_t) *dst_addr;
5498 assert(upl->kaddr == *dst_addr);
5499
5500 if(isVectorUPL)
5501 goto process_upl_to_enter;
5502
5503 upl_unlock(upl);
5504
5505 return KERN_SUCCESS;
5506 }
5507
5508 /*
5509 * Internal routine to remove a UPL mapping from a VM map.
5510 *
5511 * XXX - This should just be doable through a standard
5512 * vm_map_remove() operation. Otherwise, implicit clean-up
5513 * of the target map won't be able to correctly remove
5514 * these (and release the reference on the UPL). Having
5515 * to do this means we can't map these into user-space
5516 * maps yet.
5517 */
5518 kern_return_t
5519 vm_map_remove_upl(
5520 vm_map_t map,
5521 upl_t upl)
5522 {
5523 vm_address_t addr;
5524 upl_size_t size;
5525 int isVectorUPL = 0, curr_upl = 0;
5526 upl_t vector_upl = NULL;
5527
5528 if (upl == UPL_NULL)
5529 return KERN_INVALID_ARGUMENT;
5530
5531 if((isVectorUPL = vector_upl_is_valid(upl))) {
5532 int unmapped=0, valid_upls=0;
5533 vector_upl = upl;
5534 upl_lock(vector_upl);
5535 for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
5536 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
5537 if(upl == NULL)
5538 continue;
5539 valid_upls++;
5540 if (!(UPL_PAGE_LIST_MAPPED & upl->flags))
5541 unmapped++;
5542 }
5543
5544 if(unmapped) {
5545 if(unmapped != valid_upls)
5546 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
5547 else {
5548 upl_unlock(vector_upl);
5549 return KERN_FAILURE;
5550 }
5551 }
5552 curr_upl=0;
5553 }
5554 else
5555 upl_lock(upl);
5556
5557 process_upl_to_remove:
5558 if(isVectorUPL) {
5559 if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
5560 vm_map_t v_upl_submap;
5561 vm_offset_t v_upl_submap_dst_addr;
5562 vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
5563
5564 vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_NO_FLAGS);
5565 vm_map_deallocate(v_upl_submap);
5566 upl_unlock(vector_upl);
5567 return KERN_SUCCESS;
5568 }
5569
5570 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
5571 if(upl == NULL)
5572 goto process_upl_to_remove;
5573 }
5574
5575 if (upl->flags & UPL_PAGE_LIST_MAPPED) {
5576 addr = upl->kaddr;
5577 size = upl->size;
5578
5579 assert(upl->ref_count > 1);
5580 upl->ref_count--; /* removing mapping ref */
5581
5582 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
5583 upl->kaddr = (vm_offset_t) 0;
5584
5585 if(!isVectorUPL) {
5586 upl_unlock(upl);
5587
5588 vm_map_remove(
5589 map,
5590 vm_map_trunc_page(addr,
5591 VM_MAP_PAGE_MASK(map)),
5592 vm_map_round_page(addr + size,
5593 VM_MAP_PAGE_MASK(map)),
5594 VM_MAP_NO_FLAGS);
5595
5596 return KERN_SUCCESS;
5597 }
5598 else {
5599 /*
5600 * If it's a Vectored UPL, we'll be removing the entire
5601 * submap anyways, so no need to remove individual UPL
5602 * element mappings from within the submap
5603 */
5604 goto process_upl_to_remove;
5605 }
5606 }
5607 upl_unlock(upl);
5608
5609 return KERN_FAILURE;
5610 }
5611
5612
5613 kern_return_t
5614 upl_commit_range(
5615 upl_t upl,
5616 upl_offset_t offset,
5617 upl_size_t size,
5618 int flags,
5619 upl_page_info_t *page_list,
5620 mach_msg_type_number_t count,
5621 boolean_t *empty)
5622 {
5623 upl_size_t xfer_size, subupl_size = size;
5624 vm_object_t shadow_object;
5625 vm_object_t object;
5626 vm_object_offset_t target_offset;
5627 upl_offset_t subupl_offset = offset;
5628 int entry;
5629 wpl_array_t lite_list;
5630 int occupied;
5631 int clear_refmod = 0;
5632 int pgpgout_count = 0;
5633 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
5634 struct vm_page_delayed_work *dwp;
5635 int dw_count;
5636 int dw_limit;
5637 int isVectorUPL = 0;
5638 upl_t vector_upl = NULL;
5639 boolean_t should_be_throttled = FALSE;
5640
5641 *empty = FALSE;
5642
5643 if (upl == UPL_NULL)
5644 return KERN_INVALID_ARGUMENT;
5645
5646 if (count == 0)
5647 page_list = NULL;
5648
5649 if((isVectorUPL = vector_upl_is_valid(upl))) {
5650 vector_upl = upl;
5651 upl_lock(vector_upl);
5652 }
5653 else
5654 upl_lock(upl);
5655
5656 process_upl_to_commit:
5657
5658 if(isVectorUPL) {
5659 size = subupl_size;
5660 offset = subupl_offset;
5661 if(size == 0) {
5662 upl_unlock(vector_upl);
5663 return KERN_SUCCESS;
5664 }
5665 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
5666 if(upl == NULL) {
5667 upl_unlock(vector_upl);
5668 return KERN_FAILURE;
5669 }
5670 page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
5671 subupl_size -= size;
5672 subupl_offset += size;
5673 }
5674
5675 #if UPL_DEBUG
5676 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
5677 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
5678
5679 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
5680 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
5681
5682 upl->upl_commit_index++;
5683 }
5684 #endif
5685 if (upl->flags & UPL_DEVICE_MEMORY)
5686 xfer_size = 0;
5687 else if ((offset + size) <= upl->size)
5688 xfer_size = size;
5689 else {
5690 if(!isVectorUPL)
5691 upl_unlock(upl);
5692 else {
5693 upl_unlock(vector_upl);
5694 }
5695 return KERN_FAILURE;
5696 }
5697 if (upl->flags & UPL_SET_DIRTY)
5698 flags |= UPL_COMMIT_SET_DIRTY;
5699 if (upl->flags & UPL_CLEAR_DIRTY)
5700 flags |= UPL_COMMIT_CLEAR_DIRTY;
5701
5702 if (upl->flags & UPL_INTERNAL)
5703 lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
5704 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
5705 else
5706 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
5707
5708 object = upl->map_object;
5709
5710 if (upl->flags & UPL_SHADOWED) {
5711 vm_object_lock(object);
5712 shadow_object = object->shadow;
5713 } else {
5714 shadow_object = object;
5715 }
5716 entry = offset/PAGE_SIZE;
5717 target_offset = (vm_object_offset_t)offset;
5718
5719 if (upl->flags & UPL_KERNEL_OBJECT)
5720 vm_object_lock_shared(shadow_object);
5721 else
5722 vm_object_lock(shadow_object);
5723
5724 if (upl->flags & UPL_ACCESS_BLOCKED) {
5725 assert(shadow_object->blocked_access);
5726 shadow_object->blocked_access = FALSE;
5727 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
5728 }
5729
5730 if (shadow_object->code_signed) {
5731 /*
5732 * CODE SIGNING:
5733 * If the object is code-signed, do not let this UPL tell
5734 * us if the pages are valid or not. Let the pages be
5735 * validated by VM the normal way (when they get mapped or
5736 * copied).
5737 */
5738 flags &= ~UPL_COMMIT_CS_VALIDATED;
5739 }
5740 if (! page_list) {
5741 /*
5742 * No page list to get the code-signing info from !?
5743 */
5744 flags &= ~UPL_COMMIT_CS_VALIDATED;
5745 }
5746 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && shadow_object->internal)
5747 should_be_throttled = TRUE;
5748
5749 dwp = &dw_array[0];
5750 dw_count = 0;
5751 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
5752
5753 while (xfer_size) {
5754 vm_page_t t, m;
5755
5756 dwp->dw_mask = 0;
5757 clear_refmod = 0;
5758
5759 m = VM_PAGE_NULL;
5760
5761 if (upl->flags & UPL_LITE) {
5762 unsigned int pg_num;
5763
5764 pg_num = (unsigned int) (target_offset/PAGE_SIZE);
5765 assert(pg_num == target_offset/PAGE_SIZE);
5766
5767 if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
5768 lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
5769
5770 if (!(upl->flags & UPL_KERNEL_OBJECT))
5771 m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
5772 }
5773 }
5774 if (upl->flags & UPL_SHADOWED) {
5775 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
5776
5777 t->pageout = FALSE;
5778
5779 VM_PAGE_FREE(t);
5780
5781 if (m == VM_PAGE_NULL)
5782 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
5783 }
5784 }
5785 if ((upl->flags & UPL_KERNEL_OBJECT) || m == VM_PAGE_NULL)
5786 goto commit_next_page;
5787
5788 if (m->compressor) {
5789 assert(m->busy);
5790
5791 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
5792 goto commit_next_page;
5793 }
5794
5795 if (flags & UPL_COMMIT_CS_VALIDATED) {
5796 /*
5797 * CODE SIGNING:
5798 * Set the code signing bits according to
5799 * what the UPL says they should be.
5800 */
5801 m->cs_validated = page_list[entry].cs_validated;
5802 m->cs_tainted = page_list[entry].cs_tainted;
5803 }
5804 if (upl->flags & UPL_IO_WIRE) {
5805
5806 if (page_list)
5807 page_list[entry].phys_addr = 0;
5808
5809 if (flags & UPL_COMMIT_SET_DIRTY) {
5810 SET_PAGE_DIRTY(m, FALSE);
5811 } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
5812 m->dirty = FALSE;
5813
5814 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
5815 m->cs_validated && !m->cs_tainted) {
5816 /*
5817 * CODE SIGNING:
5818 * This page is no longer dirty
5819 * but could have been modified,
5820 * so it will need to be
5821 * re-validated.
5822 */
5823 m->cs_validated = FALSE;
5824 #if DEVELOPMENT || DEBUG
5825 vm_cs_validated_resets++;
5826 #endif
5827 pmap_disconnect(m->phys_page);
5828 }
5829 clear_refmod |= VM_MEM_MODIFIED;
5830 }
5831 if (flags & UPL_COMMIT_INACTIVATE) {
5832 dwp->dw_mask |= DW_vm_page_deactivate_internal;
5833 clear_refmod |= VM_MEM_REFERENCED;
5834 }
5835 if (upl->flags & UPL_ACCESS_BLOCKED) {
5836 /*
5837 * We blocked access to the pages in this UPL.
5838 * Clear the "busy" bit and wake up any waiter
5839 * for this page.
5840 */
5841 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
5842 }
5843 if (m->absent) {
5844 if (flags & UPL_COMMIT_FREE_ABSENT)
5845 dwp->dw_mask |= DW_vm_page_free;
5846 else {
5847 m->absent = FALSE;
5848 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
5849
5850 if ( !(dwp->dw_mask & DW_vm_page_deactivate_internal))
5851 dwp->dw_mask |= DW_vm_page_activate;
5852 }
5853 } else
5854 dwp->dw_mask |= DW_vm_page_unwire;
5855
5856 goto commit_next_page;
5857 }
5858 assert(!m->compressor);
5859
5860 if (page_list)
5861 page_list[entry].phys_addr = 0;
5862
5863 /*
5864 * make sure to clear the hardware
5865 * modify or reference bits before
5866 * releasing the BUSY bit on this page
5867 * otherwise we risk losing a legitimate
5868 * change of state
5869 */
5870 if (flags & UPL_COMMIT_CLEAR_DIRTY) {
5871 m->dirty = FALSE;
5872
5873 clear_refmod |= VM_MEM_MODIFIED;
5874 }
5875 if (m->laundry)
5876 dwp->dw_mask |= DW_vm_pageout_throttle_up;
5877
5878 if (VM_PAGE_WIRED(m))
5879 m->pageout = FALSE;
5880
5881 if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
5882 m->cs_validated && !m->cs_tainted) {
5883 /*
5884 * CODE SIGNING:
5885 * This page is no longer dirty
5886 * but could have been modified,
5887 * so it will need to be
5888 * re-validated.
5889 */
5890 m->cs_validated = FALSE;
5891 #if DEVELOPMENT || DEBUG
5892 vm_cs_validated_resets++;
5893 #endif
5894 pmap_disconnect(m->phys_page);
5895 }
5896 if (m->overwriting) {
5897 /*
5898 * the (COPY_OUT_FROM == FALSE) request_page_list case
5899 */
5900 if (m->busy) {
5901 m->absent = FALSE;
5902
5903 dwp->dw_mask |= DW_clear_busy;
5904 } else {
5905 /*
5906 * alternate (COPY_OUT_FROM == FALSE) page_list case
5907 * Occurs when the original page was wired
5908 * at the time of the list request
5909 */
5910 assert(VM_PAGE_WIRED(m));
5911
5912 dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
5913 }
5914 m->overwriting = FALSE;
5915 }
5916 if (m->encrypted_cleaning == TRUE) {
5917 m->encrypted_cleaning = FALSE;
5918
5919 dwp->dw_mask |= DW_clear_busy | DW_PAGE_WAKEUP;
5920 }
5921 m->cleaning = FALSE;
5922
5923 if (m->pageout) {
5924 /*
5925 * With the clean queue enabled, UPL_PAGEOUT should
5926 * no longer set the pageout bit. It's pages now go
5927 * to the clean queue.
5928 */
5929 assert(!(flags & UPL_PAGEOUT));
5930
5931 m->pageout = FALSE;
5932 #if MACH_CLUSTER_STATS
5933 if (m->wanted) vm_pageout_target_collisions++;
5934 #endif
5935 if ((flags & UPL_COMMIT_SET_DIRTY) ||
5936 (m->pmapped && (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED))) {
5937 /*
5938 * page was re-dirtied after we started
5939 * the pageout... reactivate it since
5940 * we don't know whether the on-disk
5941 * copy matches what is now in memory
5942 */
5943 SET_PAGE_DIRTY(m, FALSE);
5944
5945 dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
5946
5947 if (upl->flags & UPL_PAGEOUT) {
5948 CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
5949 VM_STAT_INCR(reactivations);
5950 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5951 }
5952 } else {
5953 /*
5954 * page has been successfully cleaned
5955 * go ahead and free it for other use
5956 */
5957 if (m->object->internal) {
5958 DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
5959 } else {
5960 DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
5961 }
5962 m->dirty = FALSE;
5963 m->busy = TRUE;
5964
5965 dwp->dw_mask |= DW_vm_page_free;
5966 }
5967 goto commit_next_page;
5968 }
5969 #if MACH_CLUSTER_STATS
5970 if (m->wpmapped)
5971 m->dirty = pmap_is_modified(m->phys_page);
5972
5973 if (m->dirty) vm_pageout_cluster_dirtied++;
5974 else vm_pageout_cluster_cleaned++;
5975 if (m->wanted) vm_pageout_cluster_collisions++;
5976 #endif
5977 /*
5978 * It is a part of the semantic of COPYOUT_FROM
5979 * UPLs that a commit implies cache sync
5980 * between the vm page and the backing store
5981 * this can be used to strip the precious bit
5982 * as well as clean
5983 */
5984 if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS))
5985 m->precious = FALSE;
5986
5987 if (flags & UPL_COMMIT_SET_DIRTY) {
5988 SET_PAGE_DIRTY(m, FALSE);
5989 } else {
5990 m->dirty = FALSE;
5991 }
5992
5993 /* with the clean queue on, move *all* cleaned pages to the clean queue */
5994 if (hibernate_cleaning_in_progress == FALSE && !m->dirty && (upl->flags & UPL_PAGEOUT)) {
5995 pgpgout_count++;
5996
5997 /* this page used to be dirty; now it's on the clean queue. */
5998 m->was_dirty = TRUE;
5999
6000 dwp->dw_mask |= DW_enqueue_cleaned;
6001 vm_pageout_enqueued_cleaned_from_inactive_dirty++;
6002 } else if (should_be_throttled == TRUE && !m->active && !m->inactive && !m->speculative && !m->throttled) {
6003 /*
6004 * page coming back in from being 'frozen'...
6005 * it was dirty before it was frozen, so keep it so
6006 * the vm_page_activate will notice that it really belongs
6007 * on the throttle queue and put it there
6008 */
6009 SET_PAGE_DIRTY(m, FALSE);
6010 dwp->dw_mask |= DW_vm_page_activate;
6011
6012 } else {
6013 if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && !m->speculative) {
6014 dwp->dw_mask |= DW_vm_page_deactivate_internal;
6015 clear_refmod |= VM_MEM_REFERENCED;
6016 } else if (!m->active && !m->inactive && !m->speculative) {
6017
6018 if (m->clustered || (flags & UPL_COMMIT_SPECULATE))
6019 dwp->dw_mask |= DW_vm_page_speculate;
6020 else if (m->reference)
6021 dwp->dw_mask |= DW_vm_page_activate;
6022 else {
6023 dwp->dw_mask |= DW_vm_page_deactivate_internal;
6024 clear_refmod |= VM_MEM_REFERENCED;
6025 }
6026 }
6027 }
6028 if (upl->flags & UPL_ACCESS_BLOCKED) {
6029 /*
6030 * We blocked access to the pages in this URL.
6031 * Clear the "busy" bit on this page before we
6032 * wake up any waiter.
6033 */
6034 dwp->dw_mask |= DW_clear_busy;
6035 }
6036 /*
6037 * Wakeup any thread waiting for the page to be un-cleaning.
6038 */
6039 dwp->dw_mask |= DW_PAGE_WAKEUP;
6040
6041 commit_next_page:
6042 if (clear_refmod)
6043 pmap_clear_refmod(m->phys_page, clear_refmod);
6044
6045 target_offset += PAGE_SIZE_64;
6046 xfer_size -= PAGE_SIZE;
6047 entry++;
6048
6049 if (dwp->dw_mask) {
6050 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
6051 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
6052
6053 if (dw_count >= dw_limit) {
6054 vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
6055
6056 dwp = &dw_array[0];
6057 dw_count = 0;
6058 }
6059 } else {
6060 if (dwp->dw_mask & DW_clear_busy)
6061 m->busy = FALSE;
6062
6063 if (dwp->dw_mask & DW_PAGE_WAKEUP)
6064 PAGE_WAKEUP(m);
6065 }
6066 }
6067 }
6068 if (dw_count)
6069 vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
6070
6071 occupied = 1;
6072
6073 if (upl->flags & UPL_DEVICE_MEMORY) {
6074 occupied = 0;
6075 } else if (upl->flags & UPL_LITE) {
6076 int pg_num;
6077 int i;
6078
6079 pg_num = upl->size/PAGE_SIZE;
6080 pg_num = (pg_num + 31) >> 5;
6081 occupied = 0;
6082
6083 for (i = 0; i < pg_num; i++) {
6084 if (lite_list[i] != 0) {
6085 occupied = 1;
6086 break;
6087 }
6088 }
6089 } else {
6090 if (queue_empty(&upl->map_object->memq))
6091 occupied = 0;
6092 }
6093 if (occupied == 0) {
6094 /*
6095 * If this UPL element belongs to a Vector UPL and is
6096 * empty, then this is the right function to deallocate
6097 * it. So go ahead set the *empty variable. The flag
6098 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
6099 * should be considered relevant for the Vector UPL and not
6100 * the internal UPLs.
6101 */
6102 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
6103 *empty = TRUE;
6104
6105 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
6106 /*
6107 * this is not a paging object
6108 * so we need to drop the paging reference
6109 * that was taken when we created the UPL
6110 * against this object
6111 */
6112 vm_object_activity_end(shadow_object);
6113 vm_object_collapse(shadow_object, 0, TRUE);
6114 } else {
6115 /*
6116 * we dontated the paging reference to
6117 * the map object... vm_pageout_object_terminate
6118 * will drop this reference
6119 */
6120 }
6121 }
6122 vm_object_unlock(shadow_object);
6123 if (object != shadow_object)
6124 vm_object_unlock(object);
6125
6126 if(!isVectorUPL)
6127 upl_unlock(upl);
6128 else {
6129 /*
6130 * If we completed our operations on an UPL that is
6131 * part of a Vectored UPL and if empty is TRUE, then
6132 * we should go ahead and deallocate this UPL element.
6133 * Then we check if this was the last of the UPL elements
6134 * within that Vectored UPL. If so, set empty to TRUE
6135 * so that in ubc_upl_commit_range or ubc_upl_commit, we
6136 * can go ahead and deallocate the Vector UPL too.
6137 */
6138 if(*empty==TRUE) {
6139 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
6140 upl_deallocate(upl);
6141 }
6142 goto process_upl_to_commit;
6143 }
6144
6145 if (pgpgout_count) {
6146 DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
6147 }
6148
6149 return KERN_SUCCESS;
6150 }
6151
6152 kern_return_t
6153 upl_abort_range(
6154 upl_t upl,
6155 upl_offset_t offset,
6156 upl_size_t size,
6157 int error,
6158 boolean_t *empty)
6159 {
6160 upl_page_info_t *user_page_list = NULL;
6161 upl_size_t xfer_size, subupl_size = size;
6162 vm_object_t shadow_object;
6163 vm_object_t object;
6164 vm_object_offset_t target_offset;
6165 upl_offset_t subupl_offset = offset;
6166 int entry;
6167 wpl_array_t lite_list;
6168 int occupied;
6169 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
6170 struct vm_page_delayed_work *dwp;
6171 int dw_count;
6172 int dw_limit;
6173 int isVectorUPL = 0;
6174 upl_t vector_upl = NULL;
6175
6176 *empty = FALSE;
6177
6178 if (upl == UPL_NULL)
6179 return KERN_INVALID_ARGUMENT;
6180
6181 if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
6182 return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
6183
6184 if((isVectorUPL = vector_upl_is_valid(upl))) {
6185 vector_upl = upl;
6186 upl_lock(vector_upl);
6187 }
6188 else
6189 upl_lock(upl);
6190
6191 process_upl_to_abort:
6192 if(isVectorUPL) {
6193 size = subupl_size;
6194 offset = subupl_offset;
6195 if(size == 0) {
6196 upl_unlock(vector_upl);
6197 return KERN_SUCCESS;
6198 }
6199 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
6200 if(upl == NULL) {
6201 upl_unlock(vector_upl);
6202 return KERN_FAILURE;
6203 }
6204 subupl_size -= size;
6205 subupl_offset += size;
6206 }
6207
6208 *empty = FALSE;
6209
6210 #if UPL_DEBUG
6211 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
6212 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
6213
6214 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
6215 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
6216 upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
6217
6218 upl->upl_commit_index++;
6219 }
6220 #endif
6221 if (upl->flags & UPL_DEVICE_MEMORY)
6222 xfer_size = 0;
6223 else if ((offset + size) <= upl->size)
6224 xfer_size = size;
6225 else {
6226 if(!isVectorUPL)
6227 upl_unlock(upl);
6228 else {
6229 upl_unlock(vector_upl);
6230 }
6231
6232 return KERN_FAILURE;
6233 }
6234 if (upl->flags & UPL_INTERNAL) {
6235 lite_list = (wpl_array_t)
6236 ((((uintptr_t)upl) + sizeof(struct upl))
6237 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
6238
6239 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
6240 } else {
6241 lite_list = (wpl_array_t)
6242 (((uintptr_t)upl) + sizeof(struct upl));
6243 }
6244 object = upl->map_object;
6245
6246 if (upl->flags & UPL_SHADOWED) {
6247 vm_object_lock(object);
6248 shadow_object = object->shadow;
6249 } else
6250 shadow_object = object;
6251
6252 entry = offset/PAGE_SIZE;
6253 target_offset = (vm_object_offset_t)offset;
6254
6255 if (upl->flags & UPL_KERNEL_OBJECT)
6256 vm_object_lock_shared(shadow_object);
6257 else
6258 vm_object_lock(shadow_object);
6259
6260 if (upl->flags & UPL_ACCESS_BLOCKED) {
6261 assert(shadow_object->blocked_access);
6262 shadow_object->blocked_access = FALSE;
6263 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
6264 }
6265
6266 dwp = &dw_array[0];
6267 dw_count = 0;
6268 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
6269
6270 if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT))
6271 panic("upl_abort_range: kernel_object being DUMPED");
6272
6273 while (xfer_size) {
6274 vm_page_t t, m;
6275 unsigned int pg_num;
6276 boolean_t needed;
6277
6278 pg_num = (unsigned int) (target_offset/PAGE_SIZE);
6279 assert(pg_num == target_offset/PAGE_SIZE);
6280
6281 needed = FALSE;
6282
6283 if (user_page_list)
6284 needed = user_page_list[pg_num].needed;
6285
6286 dwp->dw_mask = 0;
6287 m = VM_PAGE_NULL;
6288
6289 if (upl->flags & UPL_LITE) {
6290
6291 if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
6292 lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
6293
6294 if ( !(upl->flags & UPL_KERNEL_OBJECT))
6295 m = vm_page_lookup(shadow_object, target_offset +
6296 (upl->offset - shadow_object->paging_offset));
6297 }
6298 }
6299 if (upl->flags & UPL_SHADOWED) {
6300 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
6301 t->pageout = FALSE;
6302
6303 VM_PAGE_FREE(t);
6304
6305 if (m == VM_PAGE_NULL)
6306 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
6307 }
6308 }
6309 if ((upl->flags & UPL_KERNEL_OBJECT))
6310 goto abort_next_page;
6311
6312 if (m != VM_PAGE_NULL) {
6313
6314 assert(!m->compressor);
6315
6316 if (m->absent) {
6317 boolean_t must_free = TRUE;
6318
6319 /*
6320 * COPYOUT = FALSE case
6321 * check for error conditions which must
6322 * be passed back to the pages customer
6323 */
6324 if (error & UPL_ABORT_RESTART) {
6325 m->restart = TRUE;
6326 m->absent = FALSE;
6327 m->unusual = TRUE;
6328 must_free = FALSE;
6329 } else if (error & UPL_ABORT_UNAVAILABLE) {
6330 m->restart = FALSE;
6331 m->unusual = TRUE;
6332 must_free = FALSE;
6333 } else if (error & UPL_ABORT_ERROR) {
6334 m->restart = FALSE;
6335 m->absent = FALSE;
6336 m->error = TRUE;
6337 m->unusual = TRUE;
6338 must_free = FALSE;
6339 }
6340 if (m->clustered && needed == FALSE) {
6341 /*
6342 * This page was a part of a speculative
6343 * read-ahead initiated by the kernel
6344 * itself. No one is expecting this
6345 * page and no one will clean up its
6346 * error state if it ever becomes valid
6347 * in the future.
6348 * We have to free it here.
6349 */
6350 must_free = TRUE;
6351 }
6352
6353 /*
6354 * ENCRYPTED SWAP:
6355 * If the page was already encrypted,
6356 * we don't really need to decrypt it
6357 * now. It will get decrypted later,
6358 * on demand, as soon as someone needs
6359 * to access its contents.
6360 */
6361
6362 m->cleaning = FALSE;
6363 m->encrypted_cleaning = FALSE;
6364
6365 if (m->overwriting && !m->busy) {
6366 /*
6367 * this shouldn't happen since
6368 * this is an 'absent' page, but
6369 * it doesn't hurt to check for
6370 * the 'alternate' method of
6371 * stabilizing the page...
6372 * we will mark 'busy' to be cleared
6373 * in the following code which will
6374 * take care of the primary stabilzation
6375 * method (i.e. setting 'busy' to TRUE)
6376 */
6377 dwp->dw_mask |= DW_vm_page_unwire;
6378 }
6379 m->overwriting = FALSE;
6380
6381 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
6382
6383 if (must_free == TRUE)
6384 dwp->dw_mask |= DW_vm_page_free;
6385 else
6386 dwp->dw_mask |= DW_vm_page_activate;
6387 } else {
6388 /*
6389 * Handle the trusted pager throttle.
6390 */
6391 if (m->laundry)
6392 dwp->dw_mask |= DW_vm_pageout_throttle_up;
6393
6394 if (upl->flags & UPL_ACCESS_BLOCKED) {
6395 /*
6396 * We blocked access to the pages in this UPL.
6397 * Clear the "busy" bit and wake up any waiter
6398 * for this page.
6399 */
6400 dwp->dw_mask |= DW_clear_busy;
6401 }
6402 if (m->overwriting) {
6403 if (m->busy)
6404 dwp->dw_mask |= DW_clear_busy;
6405 else {
6406 /*
6407 * deal with the 'alternate' method
6408 * of stabilizing the page...
6409 * we will either free the page
6410 * or mark 'busy' to be cleared
6411 * in the following code which will
6412 * take care of the primary stabilzation
6413 * method (i.e. setting 'busy' to TRUE)
6414 */
6415 dwp->dw_mask |= DW_vm_page_unwire;
6416 }
6417 m->overwriting = FALSE;
6418 }
6419 if (m->encrypted_cleaning == TRUE) {
6420 m->encrypted_cleaning = FALSE;
6421
6422 dwp->dw_mask |= DW_clear_busy;
6423 }
6424 m->pageout = FALSE;
6425 m->cleaning = FALSE;
6426 #if MACH_PAGEMAP
6427 vm_external_state_clr(m->object->existence_map, m->offset);
6428 #endif /* MACH_PAGEMAP */
6429 if (error & UPL_ABORT_DUMP_PAGES) {
6430 pmap_disconnect(m->phys_page);
6431
6432 dwp->dw_mask |= DW_vm_page_free;
6433 } else {
6434 if (!(dwp->dw_mask & DW_vm_page_unwire)) {
6435 if (error & UPL_ABORT_REFERENCE) {
6436 /*
6437 * we've been told to explictly
6438 * reference this page... for
6439 * file I/O, this is done by
6440 * implementing an LRU on the inactive q
6441 */
6442 dwp->dw_mask |= DW_vm_page_lru;
6443
6444 } else if (!m->active && !m->inactive && !m->speculative)
6445 dwp->dw_mask |= DW_vm_page_deactivate_internal;
6446 }
6447 dwp->dw_mask |= DW_PAGE_WAKEUP;
6448 }
6449 }
6450 }
6451 abort_next_page:
6452 target_offset += PAGE_SIZE_64;
6453 xfer_size -= PAGE_SIZE;
6454 entry++;
6455
6456 if (dwp->dw_mask) {
6457 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
6458 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
6459
6460 if (dw_count >= dw_limit) {
6461 vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
6462
6463 dwp = &dw_array[0];
6464 dw_count = 0;
6465 }
6466 } else {
6467 if (dwp->dw_mask & DW_clear_busy)
6468 m->busy = FALSE;
6469
6470 if (dwp->dw_mask & DW_PAGE_WAKEUP)
6471 PAGE_WAKEUP(m);
6472 }
6473 }
6474 }
6475 if (dw_count)
6476 vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
6477
6478 occupied = 1;
6479
6480 if (upl->flags & UPL_DEVICE_MEMORY) {
6481 occupied = 0;
6482 } else if (upl->flags & UPL_LITE) {
6483 int pg_num;
6484 int i;
6485
6486 pg_num = upl->size/PAGE_SIZE;
6487 pg_num = (pg_num + 31) >> 5;
6488 occupied = 0;
6489
6490 for (i = 0; i < pg_num; i++) {
6491 if (lite_list[i] != 0) {
6492 occupied = 1;
6493 break;
6494 }
6495 }
6496 } else {
6497 if (queue_empty(&upl->map_object->memq))
6498 occupied = 0;
6499 }
6500 if (occupied == 0) {
6501 /*
6502 * If this UPL element belongs to a Vector UPL and is
6503 * empty, then this is the right function to deallocate
6504 * it. So go ahead set the *empty variable. The flag
6505 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
6506 * should be considered relevant for the Vector UPL and
6507 * not the internal UPLs.
6508 */
6509 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
6510 *empty = TRUE;
6511
6512 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
6513 /*
6514 * this is not a paging object
6515 * so we need to drop the paging reference
6516 * that was taken when we created the UPL
6517 * against this object
6518 */
6519 vm_object_activity_end(shadow_object);
6520 vm_object_collapse(shadow_object, 0, TRUE);
6521 } else {
6522 /*
6523 * we dontated the paging reference to
6524 * the map object... vm_pageout_object_terminate
6525 * will drop this reference
6526 */
6527 }
6528 }
6529 vm_object_unlock(shadow_object);
6530 if (object != shadow_object)
6531 vm_object_unlock(object);
6532
6533 if(!isVectorUPL)
6534 upl_unlock(upl);
6535 else {
6536 /*
6537 * If we completed our operations on an UPL that is
6538 * part of a Vectored UPL and if empty is TRUE, then
6539 * we should go ahead and deallocate this UPL element.
6540 * Then we check if this was the last of the UPL elements
6541 * within that Vectored UPL. If so, set empty to TRUE
6542 * so that in ubc_upl_abort_range or ubc_upl_abort, we
6543 * can go ahead and deallocate the Vector UPL too.
6544 */
6545 if(*empty == TRUE) {
6546 *empty = vector_upl_set_subupl(vector_upl, upl,0);
6547 upl_deallocate(upl);
6548 }
6549 goto process_upl_to_abort;
6550 }
6551
6552 return KERN_SUCCESS;
6553 }
6554
6555
6556 kern_return_t
6557 upl_abort(
6558 upl_t upl,
6559 int error)
6560 {
6561 boolean_t empty;
6562
6563 return upl_abort_range(upl, 0, upl->size, error, &empty);
6564 }
6565
6566
6567 /* an option on commit should be wire */
6568 kern_return_t
6569 upl_commit(
6570 upl_t upl,
6571 upl_page_info_t *page_list,
6572 mach_msg_type_number_t count)
6573 {
6574 boolean_t empty;
6575
6576 return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
6577 }
6578
6579 void
6580 vm_object_set_pmap_cache_attr(
6581 vm_object_t object,
6582 upl_page_info_array_t user_page_list,
6583 unsigned int num_pages,
6584 boolean_t batch_pmap_op)
6585 {
6586 unsigned int cache_attr = 0;
6587
6588 cache_attr = object->wimg_bits & VM_WIMG_MASK;
6589 assert(user_page_list);
6590 if (cache_attr != VM_WIMG_USE_DEFAULT) {
6591 PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op);
6592 }
6593 }
6594
6595 unsigned int vm_object_iopl_request_sleep_for_cleaning = 0;
6596
6597 kern_return_t
6598 vm_object_iopl_request(
6599 vm_object_t object,
6600 vm_object_offset_t offset,
6601 upl_size_t size,
6602 upl_t *upl_ptr,
6603 upl_page_info_array_t user_page_list,
6604 unsigned int *page_list_count,
6605 int cntrl_flags)
6606 {
6607 vm_page_t dst_page;
6608 vm_object_offset_t dst_offset;
6609 upl_size_t xfer_size;
6610 upl_t upl = NULL;
6611 unsigned int entry;
6612 wpl_array_t lite_list = NULL;
6613 int no_zero_fill = FALSE;
6614 unsigned int size_in_pages;
6615 u_int32_t psize;
6616 kern_return_t ret;
6617 vm_prot_t prot;
6618 struct vm_object_fault_info fault_info;
6619 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
6620 struct vm_page_delayed_work *dwp;
6621 int dw_count;
6622 int dw_limit;
6623 int dw_index;
6624 boolean_t caller_lookup;
6625
6626 if (cntrl_flags & ~UPL_VALID_FLAGS) {
6627 /*
6628 * For forward compatibility's sake,
6629 * reject any unknown flag.
6630 */
6631 return KERN_INVALID_VALUE;
6632 }
6633 if (vm_lopage_needed == FALSE)
6634 cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
6635
6636 if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
6637 if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE))
6638 return KERN_INVALID_VALUE;
6639
6640 if (object->phys_contiguous) {
6641 if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
6642 return KERN_INVALID_ADDRESS;
6643
6644 if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
6645 return KERN_INVALID_ADDRESS;
6646 }
6647 }
6648
6649 if (cntrl_flags & UPL_ENCRYPT) {
6650 /*
6651 * ENCRYPTED SWAP:
6652 * The paging path doesn't use this interface,
6653 * so we don't support the UPL_ENCRYPT flag
6654 * here. We won't encrypt the pages.
6655 */
6656 assert(! (cntrl_flags & UPL_ENCRYPT));
6657 }
6658 if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO))
6659 no_zero_fill = TRUE;
6660
6661 if (cntrl_flags & UPL_COPYOUT_FROM)
6662 prot = VM_PROT_READ;
6663 else
6664 prot = VM_PROT_READ | VM_PROT_WRITE;
6665
6666 if (((size/PAGE_SIZE) > MAX_UPL_SIZE) && !object->phys_contiguous)
6667 size = MAX_UPL_SIZE * PAGE_SIZE;
6668
6669 if (cntrl_flags & UPL_SET_INTERNAL) {
6670 if (page_list_count != NULL)
6671 *page_list_count = MAX_UPL_SIZE;
6672 }
6673 if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
6674 ((page_list_count != NULL) && (*page_list_count != 0) && *page_list_count < (size/page_size)))
6675 return KERN_INVALID_ARGUMENT;
6676
6677 if ((!object->internal) && (object->paging_offset != 0))
6678 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
6679
6680
6681 if (object->phys_contiguous)
6682 psize = PAGE_SIZE;
6683 else
6684 psize = size;
6685
6686 if (cntrl_flags & UPL_SET_INTERNAL) {
6687 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, UPL_IO_WIRE, psize);
6688
6689 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
6690 lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
6691 ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
6692 if (size == 0) {
6693 user_page_list = NULL;
6694 lite_list = NULL;
6695 }
6696 } else {
6697 upl = upl_create(UPL_CREATE_LITE, UPL_IO_WIRE, psize);
6698
6699 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
6700 if (size == 0) {
6701 lite_list = NULL;
6702 }
6703 }
6704 if (user_page_list)
6705 user_page_list[0].device = FALSE;
6706 *upl_ptr = upl;
6707
6708 upl->map_object = object;
6709 upl->size = size;
6710
6711 size_in_pages = size / PAGE_SIZE;
6712
6713 if (object == kernel_object &&
6714 !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
6715 upl->flags |= UPL_KERNEL_OBJECT;
6716 #if UPL_DEBUG
6717 vm_object_lock(object);
6718 #else
6719 vm_object_lock_shared(object);
6720 #endif
6721 } else {
6722 vm_object_lock(object);
6723 vm_object_activity_begin(object);
6724 }
6725 /*
6726 * paging in progress also protects the paging_offset
6727 */
6728 upl->offset = offset + object->paging_offset;
6729
6730 if (cntrl_flags & UPL_BLOCK_ACCESS) {
6731 /*
6732 * The user requested that access to the pages in this UPL
6733 * be blocked until the UPL is commited or aborted.
6734 */
6735 upl->flags |= UPL_ACCESS_BLOCKED;
6736 }
6737
6738 if (object->phys_contiguous) {
6739 #if UPL_DEBUG
6740 vm_object_activity_begin(object);
6741 queue_enter(&object->uplq, upl, upl_t, uplq);
6742 #endif /* UPL_DEBUG */
6743
6744 if (upl->flags & UPL_ACCESS_BLOCKED) {
6745 assert(!object->blocked_access);
6746 object->blocked_access = TRUE;
6747 }
6748
6749 vm_object_unlock(object);
6750
6751 /*
6752 * don't need any shadow mappings for this one
6753 * since it is already I/O memory
6754 */
6755 upl->flags |= UPL_DEVICE_MEMORY;
6756
6757 upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1)>>PAGE_SHIFT);
6758
6759 if (user_page_list) {
6760 user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset)>>PAGE_SHIFT);
6761 user_page_list[0].device = TRUE;
6762 }
6763 if (page_list_count != NULL) {
6764 if (upl->flags & UPL_INTERNAL)
6765 *page_list_count = 0;
6766 else
6767 *page_list_count = 1;
6768 }
6769 return KERN_SUCCESS;
6770 }
6771 if (object != kernel_object && object != compressor_object) {
6772 /*
6773 * Protect user space from future COW operations
6774 */
6775 object->true_share = TRUE;
6776
6777 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
6778 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
6779 }
6780
6781 #if UPL_DEBUG
6782 vm_object_activity_begin(object);
6783 queue_enter(&object->uplq, upl, upl_t, uplq);
6784 #endif /* UPL_DEBUG */
6785
6786 if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
6787 object->copy != VM_OBJECT_NULL) {
6788 /*
6789 * Honor copy-on-write obligations
6790 *
6791 * The caller is gathering these pages and
6792 * might modify their contents. We need to
6793 * make sure that the copy object has its own
6794 * private copies of these pages before we let
6795 * the caller modify them.
6796 *
6797 * NOTE: someone else could map the original object
6798 * after we've done this copy-on-write here, and they
6799 * could then see an inconsistent picture of the memory
6800 * while it's being modified via the UPL. To prevent this,
6801 * we would have to block access to these pages until the
6802 * UPL is released. We could use the UPL_BLOCK_ACCESS
6803 * code path for that...
6804 */
6805 vm_object_update(object,
6806 offset,
6807 size,
6808 NULL,
6809 NULL,
6810 FALSE, /* should_return */
6811 MEMORY_OBJECT_COPY_SYNC,
6812 VM_PROT_NO_CHANGE);
6813 #if DEVELOPMENT || DEBUG
6814 iopl_cow++;
6815 iopl_cow_pages += size >> PAGE_SHIFT;
6816 #endif
6817 }
6818
6819
6820 entry = 0;
6821
6822 xfer_size = size;
6823 dst_offset = offset;
6824
6825 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
6826 fault_info.user_tag = 0;
6827 fault_info.lo_offset = offset;
6828 fault_info.hi_offset = offset + xfer_size;
6829 fault_info.no_cache = FALSE;
6830 fault_info.stealth = FALSE;
6831 fault_info.io_sync = FALSE;
6832 fault_info.cs_bypass = FALSE;
6833 fault_info.mark_zf_absent = (0 == (cntrl_flags & UPL_NOZEROFILLIO));
6834
6835 dwp = &dw_array[0];
6836 dw_count = 0;
6837 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
6838
6839 while (xfer_size) {
6840 vm_fault_return_t result;
6841 unsigned int pg_num;
6842
6843 dwp->dw_mask = 0;
6844
6845 dst_page = vm_page_lookup(object, dst_offset);
6846
6847 /*
6848 * ENCRYPTED SWAP:
6849 * If the page is encrypted, we need to decrypt it,
6850 * so force a soft page fault.
6851 */
6852 if (dst_page == VM_PAGE_NULL ||
6853 dst_page->busy ||
6854 dst_page->encrypted ||
6855 dst_page->error ||
6856 dst_page->restart ||
6857 dst_page->absent ||
6858 dst_page->fictitious) {
6859
6860 if (object == kernel_object)
6861 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
6862 if (object == compressor_object)
6863 panic("vm_object_iopl_request: missing/bad page in compressor object\n");
6864
6865 if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
6866 ret = KERN_MEMORY_ERROR;
6867 goto return_err;
6868 }
6869
6870 /*
6871 * We just looked up the page and the result remains valid
6872 * until the object lock is release, so send it to
6873 * vm_fault_page() (as "dst_page"), to avoid having to
6874 * look it up again there.
6875 */
6876 caller_lookup = TRUE;
6877
6878 do {
6879 vm_page_t top_page;
6880 kern_return_t error_code;
6881 int interruptible;
6882
6883 if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
6884 interruptible = THREAD_ABORTSAFE;
6885 else
6886 interruptible = THREAD_UNINT;
6887
6888 fault_info.interruptible = interruptible;
6889 fault_info.cluster_size = xfer_size;
6890 fault_info.batch_pmap_op = TRUE;
6891
6892 vm_object_paging_begin(object);
6893
6894 result = vm_fault_page(object, dst_offset,
6895 prot | VM_PROT_WRITE, FALSE,
6896 caller_lookup,
6897 &prot, &dst_page, &top_page,
6898 (int *)0,
6899 &error_code, no_zero_fill,
6900 FALSE, &fault_info);
6901
6902 /* our lookup is no longer valid at this point */
6903 caller_lookup = FALSE;
6904
6905 switch (result) {
6906
6907 case VM_FAULT_SUCCESS:
6908
6909 if ( !dst_page->absent) {
6910 PAGE_WAKEUP_DONE(dst_page);
6911 } else {
6912 /*
6913 * we only get back an absent page if we
6914 * requested that it not be zero-filled
6915 * because we are about to fill it via I/O
6916 *
6917 * absent pages should be left BUSY
6918 * to prevent them from being faulted
6919 * into an address space before we've
6920 * had a chance to complete the I/O on
6921 * them since they may contain info that
6922 * shouldn't be seen by the faulting task
6923 */
6924 }
6925 /*
6926 * Release paging references and
6927 * top-level placeholder page, if any.
6928 */
6929 if (top_page != VM_PAGE_NULL) {
6930 vm_object_t local_object;
6931
6932 local_object = top_page->object;
6933
6934 if (top_page->object != dst_page->object) {
6935 vm_object_lock(local_object);
6936 VM_PAGE_FREE(top_page);
6937 vm_object_paging_end(local_object);
6938 vm_object_unlock(local_object);
6939 } else {
6940 VM_PAGE_FREE(top_page);
6941 vm_object_paging_end(local_object);
6942 }
6943 }
6944 vm_object_paging_end(object);
6945 break;
6946
6947 case VM_FAULT_RETRY:
6948 vm_object_lock(object);
6949 break;
6950
6951 case VM_FAULT_MEMORY_SHORTAGE:
6952 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
6953
6954 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
6955
6956 if (vm_page_wait(interruptible)) {
6957 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
6958
6959 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
6960 vm_object_lock(object);
6961
6962 break;
6963 }
6964 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
6965
6966 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
6967
6968 /* fall thru */
6969
6970 case VM_FAULT_INTERRUPTED:
6971 error_code = MACH_SEND_INTERRUPTED;
6972 case VM_FAULT_MEMORY_ERROR:
6973 memory_error:
6974 ret = (error_code ? error_code: KERN_MEMORY_ERROR);
6975
6976 vm_object_lock(object);
6977 goto return_err;
6978
6979 case VM_FAULT_SUCCESS_NO_VM_PAGE:
6980 /* success but no page: fail */
6981 vm_object_paging_end(object);
6982 vm_object_unlock(object);
6983 goto memory_error;
6984
6985 default:
6986 panic("vm_object_iopl_request: unexpected error"
6987 " 0x%x from vm_fault_page()\n", result);
6988 }
6989 } while (result != VM_FAULT_SUCCESS);
6990
6991 }
6992 if (upl->flags & UPL_KERNEL_OBJECT)
6993 goto record_phys_addr;
6994
6995 if (dst_page->compressor) {
6996 dst_page->busy = TRUE;
6997 goto record_phys_addr;
6998 }
6999
7000 if (dst_page->cleaning) {
7001 /*
7002 * Someone else is cleaning this page in place.
7003 * In theory, we should be able to proceed and use this
7004 * page but they'll probably end up clearing the "busy"
7005 * bit on it in upl_commit_range() but they didn't set
7006 * it, so they would clear our "busy" bit and open
7007 * us to race conditions.
7008 * We'd better wait for the cleaning to complete and
7009 * then try again.
7010 */
7011 vm_object_iopl_request_sleep_for_cleaning++;
7012 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
7013 continue;
7014 }
7015 if (dst_page->laundry) {
7016 dst_page->pageout = FALSE;
7017
7018 vm_pageout_steal_laundry(dst_page, FALSE);
7019 }
7020 if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
7021 dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
7022 vm_page_t low_page;
7023 int refmod;
7024
7025 /*
7026 * support devices that can't DMA above 32 bits
7027 * by substituting pages from a pool of low address
7028 * memory for any pages we find above the 4G mark
7029 * can't substitute if the page is already wired because
7030 * we don't know whether that physical address has been
7031 * handed out to some other 64 bit capable DMA device to use
7032 */
7033 if (VM_PAGE_WIRED(dst_page)) {
7034 ret = KERN_PROTECTION_FAILURE;
7035 goto return_err;
7036 }
7037 low_page = vm_page_grablo();
7038
7039 if (low_page == VM_PAGE_NULL) {
7040 ret = KERN_RESOURCE_SHORTAGE;
7041 goto return_err;
7042 }
7043 /*
7044 * from here until the vm_page_replace completes
7045 * we musn't drop the object lock... we don't
7046 * want anyone refaulting this page in and using
7047 * it after we disconnect it... we want the fault
7048 * to find the new page being substituted.
7049 */
7050 if (dst_page->pmapped)
7051 refmod = pmap_disconnect(dst_page->phys_page);
7052 else
7053 refmod = 0;
7054
7055 if (!dst_page->absent)
7056 vm_page_copy(dst_page, low_page);
7057
7058 low_page->reference = dst_page->reference;
7059 low_page->dirty = dst_page->dirty;
7060 low_page->absent = dst_page->absent;
7061
7062 if (refmod & VM_MEM_REFERENCED)
7063 low_page->reference = TRUE;
7064 if (refmod & VM_MEM_MODIFIED) {
7065 SET_PAGE_DIRTY(low_page, FALSE);
7066 }
7067
7068 vm_page_replace(low_page, object, dst_offset);
7069
7070 dst_page = low_page;
7071 /*
7072 * vm_page_grablo returned the page marked
7073 * BUSY... we don't need a PAGE_WAKEUP_DONE
7074 * here, because we've never dropped the object lock
7075 */
7076 if ( !dst_page->absent)
7077 dst_page->busy = FALSE;
7078 }
7079 if ( !dst_page->busy)
7080 dwp->dw_mask |= DW_vm_page_wire;
7081
7082 if (cntrl_flags & UPL_BLOCK_ACCESS) {
7083 /*
7084 * Mark the page "busy" to block any future page fault
7085 * on this page in addition to wiring it.
7086 * We'll also remove the mapping
7087 * of all these pages before leaving this routine.
7088 */
7089 assert(!dst_page->fictitious);
7090 dst_page->busy = TRUE;
7091 }
7092 /*
7093 * expect the page to be used
7094 * page queues lock must be held to set 'reference'
7095 */
7096 dwp->dw_mask |= DW_set_reference;
7097
7098 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
7099 SET_PAGE_DIRTY(dst_page, TRUE);
7100 }
7101 record_phys_addr:
7102 if (dst_page->busy)
7103 upl->flags |= UPL_HAS_BUSY;
7104
7105 pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
7106 assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
7107 lite_list[pg_num>>5] |= 1 << (pg_num & 31);
7108
7109 if (dst_page->phys_page > upl->highest_page)
7110 upl->highest_page = dst_page->phys_page;
7111
7112 if (user_page_list) {
7113 user_page_list[entry].phys_addr = dst_page->phys_page;
7114 user_page_list[entry].pageout = dst_page->pageout;
7115 user_page_list[entry].absent = dst_page->absent;
7116 user_page_list[entry].dirty = dst_page->dirty;
7117 user_page_list[entry].precious = dst_page->precious;
7118 user_page_list[entry].device = FALSE;
7119 user_page_list[entry].needed = FALSE;
7120 if (dst_page->clustered == TRUE)
7121 user_page_list[entry].speculative = dst_page->speculative;
7122 else
7123 user_page_list[entry].speculative = FALSE;
7124 user_page_list[entry].cs_validated = dst_page->cs_validated;
7125 user_page_list[entry].cs_tainted = dst_page->cs_tainted;
7126 }
7127 if (object != kernel_object && object != compressor_object) {
7128 /*
7129 * someone is explicitly grabbing this page...
7130 * update clustered and speculative state
7131 *
7132 */
7133 VM_PAGE_CONSUME_CLUSTERED(dst_page);
7134 }
7135 entry++;
7136 dst_offset += PAGE_SIZE_64;
7137 xfer_size -= PAGE_SIZE;
7138
7139 if (dwp->dw_mask) {
7140 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
7141
7142 if (dw_count >= dw_limit) {
7143 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
7144
7145 dwp = &dw_array[0];
7146 dw_count = 0;
7147 }
7148 }
7149 }
7150 if (dw_count)
7151 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
7152
7153 vm_object_set_pmap_cache_attr(object, user_page_list, entry, TRUE);
7154
7155 if (page_list_count != NULL) {
7156 if (upl->flags & UPL_INTERNAL)
7157 *page_list_count = 0;
7158 else if (*page_list_count > entry)
7159 *page_list_count = entry;
7160 }
7161 vm_object_unlock(object);
7162
7163 if (cntrl_flags & UPL_BLOCK_ACCESS) {
7164 /*
7165 * We've marked all the pages "busy" so that future
7166 * page faults will block.
7167 * Now remove the mapping for these pages, so that they
7168 * can't be accessed without causing a page fault.
7169 */
7170 vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
7171 PMAP_NULL, 0, VM_PROT_NONE);
7172 assert(!object->blocked_access);
7173 object->blocked_access = TRUE;
7174 }
7175 return KERN_SUCCESS;
7176
7177 return_err:
7178 dw_index = 0;
7179
7180 for (; offset < dst_offset; offset += PAGE_SIZE) {
7181 boolean_t need_unwire;
7182
7183 dst_page = vm_page_lookup(object, offset);
7184
7185 if (dst_page == VM_PAGE_NULL)
7186 panic("vm_object_iopl_request: Wired page missing. \n");
7187
7188 /*
7189 * if we've already processed this page in an earlier
7190 * dw_do_work, we need to undo the wiring... we will
7191 * leave the dirty and reference bits on if they
7192 * were set, since we don't have a good way of knowing
7193 * what the previous state was and we won't get here
7194 * under any normal circumstances... we will always
7195 * clear BUSY and wakeup any waiters via vm_page_free
7196 * or PAGE_WAKEUP_DONE
7197 */
7198 need_unwire = TRUE;
7199
7200 if (dw_count) {
7201 if (dw_array[dw_index].dw_m == dst_page) {
7202 /*
7203 * still in the deferred work list
7204 * which means we haven't yet called
7205 * vm_page_wire on this page
7206 */
7207 need_unwire = FALSE;
7208
7209 dw_index++;
7210 dw_count--;
7211 }
7212 }
7213 vm_page_lock_queues();
7214
7215 if (dst_page->absent) {
7216 vm_page_free(dst_page);
7217
7218 need_unwire = FALSE;
7219 } else {
7220 if (need_unwire == TRUE)
7221 vm_page_unwire(dst_page, TRUE);
7222
7223 PAGE_WAKEUP_DONE(dst_page);
7224 }
7225 vm_page_unlock_queues();
7226
7227 if (need_unwire == TRUE)
7228 VM_STAT_INCR(reactivations);
7229 }
7230 #if UPL_DEBUG
7231 upl->upl_state = 2;
7232 #endif
7233 if (! (upl->flags & UPL_KERNEL_OBJECT)) {
7234 vm_object_activity_end(object);
7235 vm_object_collapse(object, 0, TRUE);
7236 }
7237 vm_object_unlock(object);
7238 upl_destroy(upl);
7239
7240 return ret;
7241 }
7242
7243 kern_return_t
7244 upl_transpose(
7245 upl_t upl1,
7246 upl_t upl2)
7247 {
7248 kern_return_t retval;
7249 boolean_t upls_locked;
7250 vm_object_t object1, object2;
7251
7252 if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR)==UPL_VECTOR) || ((upl2->flags & UPL_VECTOR)==UPL_VECTOR)) {
7253 return KERN_INVALID_ARGUMENT;
7254 }
7255
7256 upls_locked = FALSE;
7257
7258 /*
7259 * Since we need to lock both UPLs at the same time,
7260 * avoid deadlocks by always taking locks in the same order.
7261 */
7262 if (upl1 < upl2) {
7263 upl_lock(upl1);
7264 upl_lock(upl2);
7265 } else {
7266 upl_lock(upl2);
7267 upl_lock(upl1);
7268 }
7269 upls_locked = TRUE; /* the UPLs will need to be unlocked */
7270
7271 object1 = upl1->map_object;
7272 object2 = upl2->map_object;
7273
7274 if (upl1->offset != 0 || upl2->offset != 0 ||
7275 upl1->size != upl2->size) {
7276 /*
7277 * We deal only with full objects, not subsets.
7278 * That's because we exchange the entire backing store info
7279 * for the objects: pager, resident pages, etc... We can't do
7280 * only part of it.
7281 */
7282 retval = KERN_INVALID_VALUE;
7283 goto done;
7284 }
7285
7286 /*
7287 * Tranpose the VM objects' backing store.
7288 */
7289 retval = vm_object_transpose(object1, object2,
7290 (vm_object_size_t) upl1->size);
7291
7292 if (retval == KERN_SUCCESS) {
7293 /*
7294 * Make each UPL point to the correct VM object, i.e. the
7295 * object holding the pages that the UPL refers to...
7296 */
7297 #if UPL_DEBUG
7298 queue_remove(&object1->uplq, upl1, upl_t, uplq);
7299 queue_remove(&object2->uplq, upl2, upl_t, uplq);
7300 #endif
7301 upl1->map_object = object2;
7302 upl2->map_object = object1;
7303 #if UPL_DEBUG
7304 queue_enter(&object1->uplq, upl2, upl_t, uplq);
7305 queue_enter(&object2->uplq, upl1, upl_t, uplq);
7306 #endif
7307 }
7308
7309 done:
7310 /*
7311 * Cleanup.
7312 */
7313 if (upls_locked) {
7314 upl_unlock(upl1);
7315 upl_unlock(upl2);
7316 upls_locked = FALSE;
7317 }
7318
7319 return retval;
7320 }
7321
7322 void
7323 upl_range_needed(
7324 upl_t upl,
7325 int index,
7326 int count)
7327 {
7328 upl_page_info_t *user_page_list;
7329 int size_in_pages;
7330
7331 if ( !(upl->flags & UPL_INTERNAL) || count <= 0)
7332 return;
7333
7334 size_in_pages = upl->size / PAGE_SIZE;
7335
7336 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
7337
7338 while (count-- && index < size_in_pages)
7339 user_page_list[index++].needed = TRUE;
7340 }
7341
7342
7343 /*
7344 * ENCRYPTED SWAP:
7345 *
7346 * Rationale: the user might have some encrypted data on disk (via
7347 * FileVault or any other mechanism). That data is then decrypted in
7348 * memory, which is safe as long as the machine is secure. But that
7349 * decrypted data in memory could be paged out to disk by the default
7350 * pager. The data would then be stored on disk in clear (not encrypted)
7351 * and it could be accessed by anyone who gets physical access to the
7352 * disk (if the laptop or the disk gets stolen for example). This weakens
7353 * the security offered by FileVault.
7354 *
7355 * Solution: the default pager will optionally request that all the
7356 * pages it gathers for pageout be encrypted, via the UPL interfaces,
7357 * before it sends this UPL to disk via the vnode_pageout() path.
7358 *
7359 * Notes:
7360 *
7361 * To avoid disrupting the VM LRU algorithms, we want to keep the
7362 * clean-in-place mechanisms, which allow us to send some extra pages to
7363 * swap (clustering) without actually removing them from the user's
7364 * address space. We don't want the user to unknowingly access encrypted
7365 * data, so we have to actually remove the encrypted pages from the page
7366 * table. When the user accesses the data, the hardware will fail to
7367 * locate the virtual page in its page table and will trigger a page
7368 * fault. We can then decrypt the page and enter it in the page table
7369 * again. Whenever we allow the user to access the contents of a page,
7370 * we have to make sure it's not encrypted.
7371 *
7372 *
7373 */
7374 /*
7375 * ENCRYPTED SWAP:
7376 * Reserve of virtual addresses in the kernel address space.
7377 * We need to map the physical pages in the kernel, so that we
7378 * can call the encryption/decryption routines with a kernel
7379 * virtual address. We keep this pool of pre-allocated kernel
7380 * virtual addresses so that we don't have to scan the kernel's
7381 * virtaul address space each time we need to encrypt or decrypt
7382 * a physical page.
7383 * It would be nice to be able to encrypt and decrypt in physical
7384 * mode but that might not always be more efficient...
7385 */
7386 decl_simple_lock_data(,vm_paging_lock)
7387 #define VM_PAGING_NUM_PAGES 64
7388 vm_map_offset_t vm_paging_base_address = 0;
7389 boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
7390 int vm_paging_max_index = 0;
7391 int vm_paging_page_waiter = 0;
7392 int vm_paging_page_waiter_total = 0;
7393 unsigned long vm_paging_no_kernel_page = 0;
7394 unsigned long vm_paging_objects_mapped = 0;
7395 unsigned long vm_paging_pages_mapped = 0;
7396 unsigned long vm_paging_objects_mapped_slow = 0;
7397 unsigned long vm_paging_pages_mapped_slow = 0;
7398
7399 void
7400 vm_paging_map_init(void)
7401 {
7402 kern_return_t kr;
7403 vm_map_offset_t page_map_offset;
7404 vm_map_entry_t map_entry;
7405
7406 assert(vm_paging_base_address == 0);
7407
7408 /*
7409 * Initialize our pool of pre-allocated kernel
7410 * virtual addresses.
7411 */
7412 page_map_offset = 0;
7413 kr = vm_map_find_space(kernel_map,
7414 &page_map_offset,
7415 VM_PAGING_NUM_PAGES * PAGE_SIZE,
7416 0,
7417 0,
7418 &map_entry);
7419 if (kr != KERN_SUCCESS) {
7420 panic("vm_paging_map_init: kernel_map full\n");
7421 }
7422 map_entry->object.vm_object = kernel_object;
7423 map_entry->offset = page_map_offset;
7424 map_entry->protection = VM_PROT_NONE;
7425 map_entry->max_protection = VM_PROT_NONE;
7426 map_entry->permanent = TRUE;
7427 vm_object_reference(kernel_object);
7428 vm_map_unlock(kernel_map);
7429
7430 assert(vm_paging_base_address == 0);
7431 vm_paging_base_address = page_map_offset;
7432 }
7433
7434 /*
7435 * ENCRYPTED SWAP:
7436 * vm_paging_map_object:
7437 * Maps part of a VM object's pages in the kernel
7438 * virtual address space, using the pre-allocated
7439 * kernel virtual addresses, if possible.
7440 * Context:
7441 * The VM object is locked. This lock will get
7442 * dropped and re-acquired though, so the caller
7443 * must make sure the VM object is kept alive
7444 * (by holding a VM map that has a reference
7445 * on it, for example, or taking an extra reference).
7446 * The page should also be kept busy to prevent
7447 * it from being reclaimed.
7448 */
7449 kern_return_t
7450 vm_paging_map_object(
7451 vm_page_t page,
7452 vm_object_t object,
7453 vm_object_offset_t offset,
7454 vm_prot_t protection,
7455 boolean_t can_unlock_object,
7456 vm_map_size_t *size, /* IN/OUT */
7457 vm_map_offset_t *address, /* OUT */
7458 boolean_t *need_unmap) /* OUT */
7459 {
7460 kern_return_t kr;
7461 vm_map_offset_t page_map_offset;
7462 vm_map_size_t map_size;
7463 vm_object_offset_t object_offset;
7464 int i;
7465
7466 if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
7467 /* use permanent 1-to-1 kernel mapping of physical memory ? */
7468 #if __x86_64__
7469 *address = (vm_map_offset_t)
7470 PHYSMAP_PTOV((pmap_paddr_t)page->phys_page <<
7471 PAGE_SHIFT);
7472 *need_unmap = FALSE;
7473 return KERN_SUCCESS;
7474 #else
7475 #warn "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..."
7476 #endif
7477
7478 assert(page->busy);
7479 /*
7480 * Use one of the pre-allocated kernel virtual addresses
7481 * and just enter the VM page in the kernel address space
7482 * at that virtual address.
7483 */
7484 simple_lock(&vm_paging_lock);
7485
7486 /*
7487 * Try and find an available kernel virtual address
7488 * from our pre-allocated pool.
7489 */
7490 page_map_offset = 0;
7491 for (;;) {
7492 for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
7493 if (vm_paging_page_inuse[i] == FALSE) {
7494 page_map_offset =
7495 vm_paging_base_address +
7496 (i * PAGE_SIZE);
7497 break;
7498 }
7499 }
7500 if (page_map_offset != 0) {
7501 /* found a space to map our page ! */
7502 break;
7503 }
7504
7505 if (can_unlock_object) {
7506 /*
7507 * If we can afford to unlock the VM object,
7508 * let's take the slow path now...
7509 */
7510 break;
7511 }
7512 /*
7513 * We can't afford to unlock the VM object, so
7514 * let's wait for a space to become available...
7515 */
7516 vm_paging_page_waiter_total++;
7517 vm_paging_page_waiter++;
7518 thread_sleep_fast_usimple_lock(&vm_paging_page_waiter,
7519 &vm_paging_lock,
7520 THREAD_UNINT);
7521 vm_paging_page_waiter--;
7522 /* ... and try again */
7523 }
7524
7525 if (page_map_offset != 0) {
7526 /*
7527 * We found a kernel virtual address;
7528 * map the physical page to that virtual address.
7529 */
7530 if (i > vm_paging_max_index) {
7531 vm_paging_max_index = i;
7532 }
7533 vm_paging_page_inuse[i] = TRUE;
7534 simple_unlock(&vm_paging_lock);
7535
7536 page->pmapped = TRUE;
7537
7538 /*
7539 * Keep the VM object locked over the PMAP_ENTER
7540 * and the actual use of the page by the kernel,
7541 * or this pmap mapping might get undone by a
7542 * vm_object_pmap_protect() call...
7543 */
7544 PMAP_ENTER(kernel_pmap,
7545 page_map_offset,
7546 page,
7547 protection,
7548 VM_PROT_NONE,
7549 0,
7550 TRUE);
7551 vm_paging_objects_mapped++;
7552 vm_paging_pages_mapped++;
7553 *address = page_map_offset;
7554 *need_unmap = TRUE;
7555
7556 /* all done and mapped, ready to use ! */
7557 return KERN_SUCCESS;
7558 }
7559
7560 /*
7561 * We ran out of pre-allocated kernel virtual
7562 * addresses. Just map the page in the kernel
7563 * the slow and regular way.
7564 */
7565 vm_paging_no_kernel_page++;
7566 simple_unlock(&vm_paging_lock);
7567 }
7568
7569 if (! can_unlock_object) {
7570 *address = 0;
7571 *size = 0;
7572 *need_unmap = FALSE;
7573 return KERN_NOT_SUPPORTED;
7574 }
7575
7576 object_offset = vm_object_trunc_page(offset);
7577 map_size = vm_map_round_page(*size,
7578 VM_MAP_PAGE_MASK(kernel_map));
7579
7580 /*
7581 * Try and map the required range of the object
7582 * in the kernel_map
7583 */
7584
7585 vm_object_reference_locked(object); /* for the map entry */
7586 vm_object_unlock(object);
7587
7588 kr = vm_map_enter(kernel_map,
7589 address,
7590 map_size,
7591 0,
7592 VM_FLAGS_ANYWHERE,
7593 object,
7594 object_offset,
7595 FALSE,
7596 protection,
7597 VM_PROT_ALL,
7598 VM_INHERIT_NONE);
7599 if (kr != KERN_SUCCESS) {
7600 *address = 0;
7601 *size = 0;
7602 *need_unmap = FALSE;
7603 vm_object_deallocate(object); /* for the map entry */
7604 vm_object_lock(object);
7605 return kr;
7606 }
7607
7608 *size = map_size;
7609
7610 /*
7611 * Enter the mapped pages in the page table now.
7612 */
7613 vm_object_lock(object);
7614 /*
7615 * VM object must be kept locked from before PMAP_ENTER()
7616 * until after the kernel is done accessing the page(s).
7617 * Otherwise, the pmap mappings in the kernel could be
7618 * undone by a call to vm_object_pmap_protect().
7619 */
7620
7621 for (page_map_offset = 0;
7622 map_size != 0;
7623 map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
7624
7625 page = vm_page_lookup(object, offset + page_map_offset);
7626 if (page == VM_PAGE_NULL) {
7627 printf("vm_paging_map_object: no page !?");
7628 vm_object_unlock(object);
7629 kr = vm_map_remove(kernel_map, *address, *size,
7630 VM_MAP_NO_FLAGS);
7631 assert(kr == KERN_SUCCESS);
7632 *address = 0;
7633 *size = 0;
7634 *need_unmap = FALSE;
7635 vm_object_lock(object);
7636 return KERN_MEMORY_ERROR;
7637 }
7638 page->pmapped = TRUE;
7639
7640 //assert(pmap_verify_free(page->phys_page));
7641 PMAP_ENTER(kernel_pmap,
7642 *address + page_map_offset,
7643 page,
7644 protection,
7645 VM_PROT_NONE,
7646 0,
7647 TRUE);
7648 }
7649
7650 vm_paging_objects_mapped_slow++;
7651 vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
7652
7653 *need_unmap = TRUE;
7654
7655 return KERN_SUCCESS;
7656 }
7657
7658 /*
7659 * ENCRYPTED SWAP:
7660 * vm_paging_unmap_object:
7661 * Unmaps part of a VM object's pages from the kernel
7662 * virtual address space.
7663 * Context:
7664 * The VM object is locked. This lock will get
7665 * dropped and re-acquired though.
7666 */
7667 void
7668 vm_paging_unmap_object(
7669 vm_object_t object,
7670 vm_map_offset_t start,
7671 vm_map_offset_t end)
7672 {
7673 kern_return_t kr;
7674 int i;
7675
7676 if ((vm_paging_base_address == 0) ||
7677 (start < vm_paging_base_address) ||
7678 (end > (vm_paging_base_address
7679 + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
7680 /*
7681 * We didn't use our pre-allocated pool of
7682 * kernel virtual address. Deallocate the
7683 * virtual memory.
7684 */
7685 if (object != VM_OBJECT_NULL) {
7686 vm_object_unlock(object);
7687 }
7688 kr = vm_map_remove(kernel_map, start, end, VM_MAP_NO_FLAGS);
7689 if (object != VM_OBJECT_NULL) {
7690 vm_object_lock(object);
7691 }
7692 assert(kr == KERN_SUCCESS);
7693 } else {
7694 /*
7695 * We used a kernel virtual address from our
7696 * pre-allocated pool. Put it back in the pool
7697 * for next time.
7698 */
7699 assert(end - start == PAGE_SIZE);
7700 i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
7701 assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
7702
7703 /* undo the pmap mapping */
7704 pmap_remove(kernel_pmap, start, end);
7705
7706 simple_lock(&vm_paging_lock);
7707 vm_paging_page_inuse[i] = FALSE;
7708 if (vm_paging_page_waiter) {
7709 thread_wakeup(&vm_paging_page_waiter);
7710 }
7711 simple_unlock(&vm_paging_lock);
7712 }
7713 }
7714
7715 #if CRYPTO
7716 /*
7717 * Encryption data.
7718 * "iv" is the "initial vector". Ideally, we want to
7719 * have a different one for each page we encrypt, so that
7720 * crackers can't find encryption patterns too easily.
7721 */
7722 #define SWAP_CRYPT_AES_KEY_SIZE 128 /* XXX 192 and 256 don't work ! */
7723 boolean_t swap_crypt_ctx_initialized = FALSE;
7724 uint32_t swap_crypt_key[8]; /* big enough for a 256 key */
7725 aes_ctx swap_crypt_ctx;
7726 const unsigned char swap_crypt_null_iv[AES_BLOCK_SIZE] = {0xa, };
7727
7728 #if DEBUG
7729 boolean_t swap_crypt_ctx_tested = FALSE;
7730 unsigned char swap_crypt_test_page_ref[4096] __attribute__((aligned(4096)));
7731 unsigned char swap_crypt_test_page_encrypt[4096] __attribute__((aligned(4096)));
7732 unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096)));
7733 #endif /* DEBUG */
7734
7735 /*
7736 * Initialize the encryption context: key and key size.
7737 */
7738 void swap_crypt_ctx_initialize(void); /* forward */
7739 void
7740 swap_crypt_ctx_initialize(void)
7741 {
7742 unsigned int i;
7743
7744 /*
7745 * No need for locking to protect swap_crypt_ctx_initialized
7746 * because the first use of encryption will come from the
7747 * pageout thread (we won't pagein before there's been a pageout)
7748 * and there's only one pageout thread.
7749 */
7750 if (swap_crypt_ctx_initialized == FALSE) {
7751 for (i = 0;
7752 i < (sizeof (swap_crypt_key) /
7753 sizeof (swap_crypt_key[0]));
7754 i++) {
7755 swap_crypt_key[i] = random();
7756 }
7757 aes_encrypt_key((const unsigned char *) swap_crypt_key,
7758 SWAP_CRYPT_AES_KEY_SIZE,
7759 &swap_crypt_ctx.encrypt);
7760 aes_decrypt_key((const unsigned char *) swap_crypt_key,
7761 SWAP_CRYPT_AES_KEY_SIZE,
7762 &swap_crypt_ctx.decrypt);
7763 swap_crypt_ctx_initialized = TRUE;
7764 }
7765
7766 #if DEBUG
7767 /*
7768 * Validate the encryption algorithms.
7769 */
7770 if (swap_crypt_ctx_tested == FALSE) {
7771 /* initialize */
7772 for (i = 0; i < 4096; i++) {
7773 swap_crypt_test_page_ref[i] = (char) i;
7774 }
7775 /* encrypt */
7776 aes_encrypt_cbc(swap_crypt_test_page_ref,
7777 swap_crypt_null_iv,
7778 PAGE_SIZE / AES_BLOCK_SIZE,
7779 swap_crypt_test_page_encrypt,
7780 &swap_crypt_ctx.encrypt);
7781 /* decrypt */
7782 aes_decrypt_cbc(swap_crypt_test_page_encrypt,
7783 swap_crypt_null_iv,
7784 PAGE_SIZE / AES_BLOCK_SIZE,
7785 swap_crypt_test_page_decrypt,
7786 &swap_crypt_ctx.decrypt);
7787 /* compare result with original */
7788 for (i = 0; i < 4096; i ++) {
7789 if (swap_crypt_test_page_decrypt[i] !=
7790 swap_crypt_test_page_ref[i]) {
7791 panic("encryption test failed");
7792 }
7793 }
7794
7795 /* encrypt again */
7796 aes_encrypt_cbc(swap_crypt_test_page_decrypt,
7797 swap_crypt_null_iv,
7798 PAGE_SIZE / AES_BLOCK_SIZE,
7799 swap_crypt_test_page_decrypt,
7800 &swap_crypt_ctx.encrypt);
7801 /* decrypt in place */
7802 aes_decrypt_cbc(swap_crypt_test_page_decrypt,
7803 swap_crypt_null_iv,
7804 PAGE_SIZE / AES_BLOCK_SIZE,
7805 swap_crypt_test_page_decrypt,
7806 &swap_crypt_ctx.decrypt);
7807 for (i = 0; i < 4096; i ++) {
7808 if (swap_crypt_test_page_decrypt[i] !=
7809 swap_crypt_test_page_ref[i]) {
7810 panic("in place encryption test failed");
7811 }
7812 }
7813
7814 swap_crypt_ctx_tested = TRUE;
7815 }
7816 #endif /* DEBUG */
7817 }
7818
7819 /*
7820 * ENCRYPTED SWAP:
7821 * vm_page_encrypt:
7822 * Encrypt the given page, for secure paging.
7823 * The page might already be mapped at kernel virtual
7824 * address "kernel_mapping_offset". Otherwise, we need
7825 * to map it.
7826 *
7827 * Context:
7828 * The page's object is locked, but this lock will be released
7829 * and re-acquired.
7830 * The page is busy and not accessible by users (not entered in any pmap).
7831 */
7832 void
7833 vm_page_encrypt(
7834 vm_page_t page,
7835 vm_map_offset_t kernel_mapping_offset)
7836 {
7837 kern_return_t kr;
7838 vm_map_size_t kernel_mapping_size;
7839 boolean_t kernel_mapping_needs_unmap;
7840 vm_offset_t kernel_vaddr;
7841 union {
7842 unsigned char aes_iv[AES_BLOCK_SIZE];
7843 struct {
7844 memory_object_t pager_object;
7845 vm_object_offset_t paging_offset;
7846 } vm;
7847 } encrypt_iv;
7848
7849 if (! vm_pages_encrypted) {
7850 vm_pages_encrypted = TRUE;
7851 }
7852
7853 assert(page->busy);
7854
7855 if (page->encrypted) {
7856 /*
7857 * Already encrypted: no need to do it again.
7858 */
7859 vm_page_encrypt_already_encrypted_counter++;
7860 return;
7861 }
7862 assert(page->dirty || page->precious);
7863
7864 ASSERT_PAGE_DECRYPTED(page);
7865
7866 /*
7867 * Take a paging-in-progress reference to keep the object
7868 * alive even if we have to unlock it (in vm_paging_map_object()
7869 * for example)...
7870 */
7871 vm_object_paging_begin(page->object);
7872
7873 if (kernel_mapping_offset == 0) {
7874 /*
7875 * The page hasn't already been mapped in kernel space
7876 * by the caller. Map it now, so that we can access
7877 * its contents and encrypt them.
7878 */
7879 kernel_mapping_size = PAGE_SIZE;
7880 kernel_mapping_needs_unmap = FALSE;
7881 kr = vm_paging_map_object(page,
7882 page->object,
7883 page->offset,
7884 VM_PROT_READ | VM_PROT_WRITE,
7885 FALSE,
7886 &kernel_mapping_size,
7887 &kernel_mapping_offset,
7888 &kernel_mapping_needs_unmap);
7889 if (kr != KERN_SUCCESS) {
7890 panic("vm_page_encrypt: "
7891 "could not map page in kernel: 0x%x\n",
7892 kr);
7893 }
7894 } else {
7895 kernel_mapping_size = 0;
7896 kernel_mapping_needs_unmap = FALSE;
7897 }
7898 kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
7899
7900 if (swap_crypt_ctx_initialized == FALSE) {
7901 swap_crypt_ctx_initialize();
7902 }
7903 assert(swap_crypt_ctx_initialized);
7904
7905 /*
7906 * Prepare an "initial vector" for the encryption.
7907 * We use the "pager" and the "paging_offset" for that
7908 * page to obfuscate the encrypted data a bit more and
7909 * prevent crackers from finding patterns that they could
7910 * use to break the key.
7911 */
7912 bzero(&encrypt_iv.aes_iv[0], sizeof (encrypt_iv.aes_iv));
7913 encrypt_iv.vm.pager_object = page->object->pager;
7914 encrypt_iv.vm.paging_offset =
7915 page->object->paging_offset + page->offset;
7916
7917 /* encrypt the "initial vector" */
7918 aes_encrypt_cbc((const unsigned char *) &encrypt_iv.aes_iv[0],
7919 swap_crypt_null_iv,
7920 1,
7921 &encrypt_iv.aes_iv[0],
7922 &swap_crypt_ctx.encrypt);
7923
7924 /*
7925 * Encrypt the page.
7926 */
7927 aes_encrypt_cbc((const unsigned char *) kernel_vaddr,
7928 &encrypt_iv.aes_iv[0],
7929 PAGE_SIZE / AES_BLOCK_SIZE,
7930 (unsigned char *) kernel_vaddr,
7931 &swap_crypt_ctx.encrypt);
7932
7933 vm_page_encrypt_counter++;
7934
7935 /*
7936 * Unmap the page from the kernel's address space,
7937 * if we had to map it ourselves. Otherwise, let
7938 * the caller undo the mapping if needed.
7939 */
7940 if (kernel_mapping_needs_unmap) {
7941 vm_paging_unmap_object(page->object,
7942 kernel_mapping_offset,
7943 kernel_mapping_offset + kernel_mapping_size);
7944 }
7945
7946 /*
7947 * Clear the "reference" and "modified" bits.
7948 * This should clean up any impact the encryption had
7949 * on them.
7950 * The page was kept busy and disconnected from all pmaps,
7951 * so it can't have been referenced or modified from user
7952 * space.
7953 * The software bits will be reset later after the I/O
7954 * has completed (in upl_commit_range()).
7955 */
7956 pmap_clear_refmod(page->phys_page, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
7957
7958 page->encrypted = TRUE;
7959
7960 vm_object_paging_end(page->object);
7961 }
7962
7963 /*
7964 * ENCRYPTED SWAP:
7965 * vm_page_decrypt:
7966 * Decrypt the given page.
7967 * The page might already be mapped at kernel virtual
7968 * address "kernel_mapping_offset". Otherwise, we need
7969 * to map it.
7970 *
7971 * Context:
7972 * The page's VM object is locked but will be unlocked and relocked.
7973 * The page is busy and not accessible by users (not entered in any pmap).
7974 */
7975 void
7976 vm_page_decrypt(
7977 vm_page_t page,
7978 vm_map_offset_t kernel_mapping_offset)
7979 {
7980 kern_return_t kr;
7981 vm_map_size_t kernel_mapping_size;
7982 vm_offset_t kernel_vaddr;
7983 boolean_t kernel_mapping_needs_unmap;
7984 union {
7985 unsigned char aes_iv[AES_BLOCK_SIZE];
7986 struct {
7987 memory_object_t pager_object;
7988 vm_object_offset_t paging_offset;
7989 } vm;
7990 } decrypt_iv;
7991 boolean_t was_dirty;
7992
7993 assert(page->busy);
7994 assert(page->encrypted);
7995
7996 was_dirty = page->dirty;
7997
7998 /*
7999 * Take a paging-in-progress reference to keep the object
8000 * alive even if we have to unlock it (in vm_paging_map_object()
8001 * for example)...
8002 */
8003 vm_object_paging_begin(page->object);
8004
8005 if (kernel_mapping_offset == 0) {
8006 /*
8007 * The page hasn't already been mapped in kernel space
8008 * by the caller. Map it now, so that we can access
8009 * its contents and decrypt them.
8010 */
8011 kernel_mapping_size = PAGE_SIZE;
8012 kernel_mapping_needs_unmap = FALSE;
8013 kr = vm_paging_map_object(page,
8014 page->object,
8015 page->offset,
8016 VM_PROT_READ | VM_PROT_WRITE,
8017 FALSE,
8018 &kernel_mapping_size,
8019 &kernel_mapping_offset,
8020 &kernel_mapping_needs_unmap);
8021 if (kr != KERN_SUCCESS) {
8022 panic("vm_page_decrypt: "
8023 "could not map page in kernel: 0x%x\n",
8024 kr);
8025 }
8026 } else {
8027 kernel_mapping_size = 0;
8028 kernel_mapping_needs_unmap = FALSE;
8029 }
8030 kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
8031
8032 assert(swap_crypt_ctx_initialized);
8033
8034 /*
8035 * Prepare an "initial vector" for the decryption.
8036 * It has to be the same as the "initial vector" we
8037 * used to encrypt that page.
8038 */
8039 bzero(&decrypt_iv.aes_iv[0], sizeof (decrypt_iv.aes_iv));
8040 decrypt_iv.vm.pager_object = page->object->pager;
8041 decrypt_iv.vm.paging_offset =
8042 page->object->paging_offset + page->offset;
8043
8044 /* encrypt the "initial vector" */
8045 aes_encrypt_cbc((const unsigned char *) &decrypt_iv.aes_iv[0],
8046 swap_crypt_null_iv,
8047 1,
8048 &decrypt_iv.aes_iv[0],
8049 &swap_crypt_ctx.encrypt);
8050
8051 /*
8052 * Decrypt the page.
8053 */
8054 aes_decrypt_cbc((const unsigned char *) kernel_vaddr,
8055 &decrypt_iv.aes_iv[0],
8056 PAGE_SIZE / AES_BLOCK_SIZE,
8057 (unsigned char *) kernel_vaddr,
8058 &swap_crypt_ctx.decrypt);
8059 vm_page_decrypt_counter++;
8060
8061 /*
8062 * Unmap the page from the kernel's address space,
8063 * if we had to map it ourselves. Otherwise, let
8064 * the caller undo the mapping if needed.
8065 */
8066 if (kernel_mapping_needs_unmap) {
8067 vm_paging_unmap_object(page->object,
8068 kernel_vaddr,
8069 kernel_vaddr + PAGE_SIZE);
8070 }
8071
8072 if (was_dirty) {
8073 /*
8074 * The pager did not specify that the page would be
8075 * clean when it got paged in, so let's not clean it here
8076 * either.
8077 */
8078 } else {
8079 /*
8080 * After decryption, the page is actually still clean.
8081 * It was encrypted as part of paging, which "cleans"
8082 * the "dirty" pages.
8083 * Noone could access it after it was encrypted
8084 * and the decryption doesn't count.
8085 */
8086 page->dirty = FALSE;
8087 assert (page->cs_validated == FALSE);
8088 pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
8089 }
8090 page->encrypted = FALSE;
8091
8092 /*
8093 * We've just modified the page's contents via the data cache and part
8094 * of the new contents might still be in the cache and not yet in RAM.
8095 * Since the page is now available and might get gathered in a UPL to
8096 * be part of a DMA transfer from a driver that expects the memory to
8097 * be coherent at this point, we have to flush the data cache.
8098 */
8099 pmap_sync_page_attributes_phys(page->phys_page);
8100 /*
8101 * Since the page is not mapped yet, some code might assume that it
8102 * doesn't need to invalidate the instruction cache when writing to
8103 * that page. That code relies on "pmapped" being FALSE, so that the
8104 * caches get synchronized when the page is first mapped.
8105 */
8106 assert(pmap_verify_free(page->phys_page));
8107 page->pmapped = FALSE;
8108 page->wpmapped = FALSE;
8109
8110 vm_object_paging_end(page->object);
8111 }
8112
8113 #if DEVELOPMENT || DEBUG
8114 unsigned long upl_encrypt_upls = 0;
8115 unsigned long upl_encrypt_pages = 0;
8116 #endif
8117
8118 /*
8119 * ENCRYPTED SWAP:
8120 *
8121 * upl_encrypt:
8122 * Encrypts all the pages in the UPL, within the specified range.
8123 *
8124 */
8125 void
8126 upl_encrypt(
8127 upl_t upl,
8128 upl_offset_t crypt_offset,
8129 upl_size_t crypt_size)
8130 {
8131 upl_size_t upl_size, subupl_size=crypt_size;
8132 upl_offset_t offset_in_upl, subupl_offset=crypt_offset;
8133 vm_object_t upl_object;
8134 vm_object_offset_t upl_offset;
8135 vm_page_t page;
8136 vm_object_t shadow_object;
8137 vm_object_offset_t shadow_offset;
8138 vm_object_offset_t paging_offset;
8139 vm_object_offset_t base_offset;
8140 int isVectorUPL = 0;
8141 upl_t vector_upl = NULL;
8142
8143 if((isVectorUPL = vector_upl_is_valid(upl)))
8144 vector_upl = upl;
8145
8146 process_upl_to_encrypt:
8147 if(isVectorUPL) {
8148 crypt_size = subupl_size;
8149 crypt_offset = subupl_offset;
8150 upl = vector_upl_subupl_byoffset(vector_upl, &crypt_offset, &crypt_size);
8151 if(upl == NULL)
8152 panic("upl_encrypt: Accessing a sub-upl that doesn't exist\n");
8153 subupl_size -= crypt_size;
8154 subupl_offset += crypt_size;
8155 }
8156
8157 #if DEVELOPMENT || DEBUG
8158 upl_encrypt_upls++;
8159 upl_encrypt_pages += crypt_size / PAGE_SIZE;
8160 #endif
8161 upl_object = upl->map_object;
8162 upl_offset = upl->offset;
8163 upl_size = upl->size;
8164
8165 vm_object_lock(upl_object);
8166
8167 /*
8168 * Find the VM object that contains the actual pages.
8169 */
8170 if (upl_object->pageout) {
8171 shadow_object = upl_object->shadow;
8172 /*
8173 * The offset in the shadow object is actually also
8174 * accounted for in upl->offset. It possibly shouldn't be
8175 * this way, but for now don't account for it twice.
8176 */
8177 shadow_offset = 0;
8178 assert(upl_object->paging_offset == 0); /* XXX ? */
8179 vm_object_lock(shadow_object);
8180 } else {
8181 shadow_object = upl_object;
8182 shadow_offset = 0;
8183 }
8184
8185 paging_offset = shadow_object->paging_offset;
8186 vm_object_paging_begin(shadow_object);
8187
8188 if (shadow_object != upl_object)
8189 vm_object_unlock(upl_object);
8190
8191
8192 base_offset = shadow_offset;
8193 base_offset += upl_offset;
8194 base_offset += crypt_offset;
8195 base_offset -= paging_offset;
8196
8197 assert(crypt_offset + crypt_size <= upl_size);
8198
8199 for (offset_in_upl = 0;
8200 offset_in_upl < crypt_size;
8201 offset_in_upl += PAGE_SIZE) {
8202 page = vm_page_lookup(shadow_object,
8203 base_offset + offset_in_upl);
8204 if (page == VM_PAGE_NULL) {
8205 panic("upl_encrypt: "
8206 "no page for (obj=%p,off=0x%llx+0x%x)!\n",
8207 shadow_object,
8208 base_offset,
8209 offset_in_upl);
8210 }
8211 /*
8212 * Disconnect the page from all pmaps, so that nobody can
8213 * access it while it's encrypted. After that point, all
8214 * accesses to this page will cause a page fault and block
8215 * while the page is busy being encrypted. After the
8216 * encryption completes, any access will cause a
8217 * page fault and the page gets decrypted at that time.
8218 */
8219 pmap_disconnect(page->phys_page);
8220 vm_page_encrypt(page, 0);
8221
8222 if (vm_object_lock_avoid(shadow_object)) {
8223 /*
8224 * Give vm_pageout_scan() a chance to convert more
8225 * pages from "clean-in-place" to "clean-and-free",
8226 * if it's interested in the same pages we selected
8227 * in this cluster.
8228 */
8229 vm_object_unlock(shadow_object);
8230 mutex_pause(2);
8231 vm_object_lock(shadow_object);
8232 }
8233 }
8234
8235 vm_object_paging_end(shadow_object);
8236 vm_object_unlock(shadow_object);
8237
8238 if(isVectorUPL && subupl_size)
8239 goto process_upl_to_encrypt;
8240 }
8241
8242 #else /* CRYPTO */
8243 void
8244 upl_encrypt(
8245 __unused upl_t upl,
8246 __unused upl_offset_t crypt_offset,
8247 __unused upl_size_t crypt_size)
8248 {
8249 }
8250
8251 void
8252 vm_page_encrypt(
8253 __unused vm_page_t page,
8254 __unused vm_map_offset_t kernel_mapping_offset)
8255 {
8256 }
8257
8258 void
8259 vm_page_decrypt(
8260 __unused vm_page_t page,
8261 __unused vm_map_offset_t kernel_mapping_offset)
8262 {
8263 }
8264
8265 #endif /* CRYPTO */
8266
8267 /*
8268 * page->object must be locked
8269 */
8270 void
8271 vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked)
8272 {
8273 if (!queues_locked) {
8274 vm_page_lockspin_queues();
8275 }
8276
8277 /*
8278 * need to drop the laundry count...
8279 * we may also need to remove it
8280 * from the I/O paging queue...
8281 * vm_pageout_throttle_up handles both cases
8282 *
8283 * the laundry and pageout_queue flags are cleared...
8284 */
8285 vm_pageout_throttle_up(page);
8286
8287 vm_page_steal_pageout_page++;
8288
8289 if (!queues_locked) {
8290 vm_page_unlock_queues();
8291 }
8292 }
8293
8294 upl_t
8295 vector_upl_create(vm_offset_t upl_offset)
8296 {
8297 int vector_upl_size = sizeof(struct _vector_upl);
8298 int i=0;
8299 upl_t upl;
8300 vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
8301
8302 upl = upl_create(0,UPL_VECTOR,0);
8303 upl->vector_upl = vector_upl;
8304 upl->offset = upl_offset;
8305 vector_upl->size = 0;
8306 vector_upl->offset = upl_offset;
8307 vector_upl->invalid_upls=0;
8308 vector_upl->num_upls=0;
8309 vector_upl->pagelist = NULL;
8310
8311 for(i=0; i < MAX_VECTOR_UPL_ELEMENTS ; i++) {
8312 vector_upl->upl_iostates[i].size = 0;
8313 vector_upl->upl_iostates[i].offset = 0;
8314
8315 }
8316 return upl;
8317 }
8318
8319 void
8320 vector_upl_deallocate(upl_t upl)
8321 {
8322 if(upl) {
8323 vector_upl_t vector_upl = upl->vector_upl;
8324 if(vector_upl) {
8325 if(vector_upl->invalid_upls != vector_upl->num_upls)
8326 panic("Deallocating non-empty Vectored UPL\n");
8327 kfree(vector_upl->pagelist,(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)));
8328 vector_upl->invalid_upls=0;
8329 vector_upl->num_upls = 0;
8330 vector_upl->pagelist = NULL;
8331 vector_upl->size = 0;
8332 vector_upl->offset = 0;
8333 kfree(vector_upl, sizeof(struct _vector_upl));
8334 vector_upl = (vector_upl_t)0xfeedfeed;
8335 }
8336 else
8337 panic("vector_upl_deallocate was passed a non-vectored upl\n");
8338 }
8339 else
8340 panic("vector_upl_deallocate was passed a NULL upl\n");
8341 }
8342
8343 boolean_t
8344 vector_upl_is_valid(upl_t upl)
8345 {
8346 if(upl && ((upl->flags & UPL_VECTOR)==UPL_VECTOR)) {
8347 vector_upl_t vector_upl = upl->vector_upl;
8348 if(vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef)
8349 return FALSE;
8350 else
8351 return TRUE;
8352 }
8353 return FALSE;
8354 }
8355
8356 boolean_t
8357 vector_upl_set_subupl(upl_t upl,upl_t subupl, uint32_t io_size)
8358 {
8359 if(vector_upl_is_valid(upl)) {
8360 vector_upl_t vector_upl = upl->vector_upl;
8361
8362 if(vector_upl) {
8363 if(subupl) {
8364 if(io_size) {
8365 if(io_size < PAGE_SIZE)
8366 io_size = PAGE_SIZE;
8367 subupl->vector_upl = (void*)vector_upl;
8368 vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
8369 vector_upl->size += io_size;
8370 upl->size += io_size;
8371 }
8372 else {
8373 uint32_t i=0,invalid_upls=0;
8374 for(i = 0; i < vector_upl->num_upls; i++) {
8375 if(vector_upl->upl_elems[i] == subupl)
8376 break;
8377 }
8378 if(i == vector_upl->num_upls)
8379 panic("Trying to remove sub-upl when none exists");
8380
8381 vector_upl->upl_elems[i] = NULL;
8382 invalid_upls = hw_atomic_add(&(vector_upl)->invalid_upls, 1);
8383 if(invalid_upls == vector_upl->num_upls)
8384 return TRUE;
8385 else
8386 return FALSE;
8387 }
8388 }
8389 else
8390 panic("vector_upl_set_subupl was passed a NULL upl element\n");
8391 }
8392 else
8393 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
8394 }
8395 else
8396 panic("vector_upl_set_subupl was passed a NULL upl\n");
8397
8398 return FALSE;
8399 }
8400
8401 void
8402 vector_upl_set_pagelist(upl_t upl)
8403 {
8404 if(vector_upl_is_valid(upl)) {
8405 uint32_t i=0;
8406 vector_upl_t vector_upl = upl->vector_upl;
8407
8408 if(vector_upl) {
8409 vm_offset_t pagelist_size=0, cur_upl_pagelist_size=0;
8410
8411 vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE));
8412
8413 for(i=0; i < vector_upl->num_upls; i++) {
8414 cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size/PAGE_SIZE;
8415 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
8416 pagelist_size += cur_upl_pagelist_size;
8417 if(vector_upl->upl_elems[i]->highest_page > upl->highest_page)
8418 upl->highest_page = vector_upl->upl_elems[i]->highest_page;
8419 }
8420 assert( pagelist_size == (sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)) );
8421 }
8422 else
8423 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
8424 }
8425 else
8426 panic("vector_upl_set_pagelist was passed a NULL upl\n");
8427
8428 }
8429
8430 upl_t
8431 vector_upl_subupl_byindex(upl_t upl, uint32_t index)
8432 {
8433 if(vector_upl_is_valid(upl)) {
8434 vector_upl_t vector_upl = upl->vector_upl;
8435 if(vector_upl) {
8436 if(index < vector_upl->num_upls)
8437 return vector_upl->upl_elems[index];
8438 }
8439 else
8440 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
8441 }
8442 return NULL;
8443 }
8444
8445 upl_t
8446 vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
8447 {
8448 if(vector_upl_is_valid(upl)) {
8449 uint32_t i=0;
8450 vector_upl_t vector_upl = upl->vector_upl;
8451
8452 if(vector_upl) {
8453 upl_t subupl = NULL;
8454 vector_upl_iostates_t subupl_state;
8455
8456 for(i=0; i < vector_upl->num_upls; i++) {
8457 subupl = vector_upl->upl_elems[i];
8458 subupl_state = vector_upl->upl_iostates[i];
8459 if( *upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
8460 /* We could have been passed an offset/size pair that belongs
8461 * to an UPL element that has already been committed/aborted.
8462 * If so, return NULL.
8463 */
8464 if(subupl == NULL)
8465 return NULL;
8466 if((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
8467 *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
8468 if(*upl_size > subupl_state.size)
8469 *upl_size = subupl_state.size;
8470 }
8471 if(*upl_offset >= subupl_state.offset)
8472 *upl_offset -= subupl_state.offset;
8473 else if(i)
8474 panic("Vector UPL offset miscalculation\n");
8475 return subupl;
8476 }
8477 }
8478 }
8479 else
8480 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
8481 }
8482 return NULL;
8483 }
8484
8485 void
8486 vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
8487 {
8488 *v_upl_submap = NULL;
8489
8490 if(vector_upl_is_valid(upl)) {
8491 vector_upl_t vector_upl = upl->vector_upl;
8492 if(vector_upl) {
8493 *v_upl_submap = vector_upl->submap;
8494 *submap_dst_addr = vector_upl->submap_dst_addr;
8495 }
8496 else
8497 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
8498 }
8499 else
8500 panic("vector_upl_get_submap was passed a null UPL\n");
8501 }
8502
8503 void
8504 vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
8505 {
8506 if(vector_upl_is_valid(upl)) {
8507 vector_upl_t vector_upl = upl->vector_upl;
8508 if(vector_upl) {
8509 vector_upl->submap = submap;
8510 vector_upl->submap_dst_addr = submap_dst_addr;
8511 }
8512 else
8513 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
8514 }
8515 else
8516 panic("vector_upl_get_submap was passed a NULL UPL\n");
8517 }
8518
8519 void
8520 vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
8521 {
8522 if(vector_upl_is_valid(upl)) {
8523 uint32_t i = 0;
8524 vector_upl_t vector_upl = upl->vector_upl;
8525
8526 if(vector_upl) {
8527 for(i = 0; i < vector_upl->num_upls; i++) {
8528 if(vector_upl->upl_elems[i] == subupl)
8529 break;
8530 }
8531
8532 if(i == vector_upl->num_upls)
8533 panic("setting sub-upl iostate when none exists");
8534
8535 vector_upl->upl_iostates[i].offset = offset;
8536 if(size < PAGE_SIZE)
8537 size = PAGE_SIZE;
8538 vector_upl->upl_iostates[i].size = size;
8539 }
8540 else
8541 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
8542 }
8543 else
8544 panic("vector_upl_set_iostate was passed a NULL UPL\n");
8545 }
8546
8547 void
8548 vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
8549 {
8550 if(vector_upl_is_valid(upl)) {
8551 uint32_t i = 0;
8552 vector_upl_t vector_upl = upl->vector_upl;
8553
8554 if(vector_upl) {
8555 for(i = 0; i < vector_upl->num_upls; i++) {
8556 if(vector_upl->upl_elems[i] == subupl)
8557 break;
8558 }
8559
8560 if(i == vector_upl->num_upls)
8561 panic("getting sub-upl iostate when none exists");
8562
8563 *offset = vector_upl->upl_iostates[i].offset;
8564 *size = vector_upl->upl_iostates[i].size;
8565 }
8566 else
8567 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
8568 }
8569 else
8570 panic("vector_upl_get_iostate was passed a NULL UPL\n");
8571 }
8572
8573 void
8574 vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
8575 {
8576 if(vector_upl_is_valid(upl)) {
8577 vector_upl_t vector_upl = upl->vector_upl;
8578 if(vector_upl) {
8579 if(index < vector_upl->num_upls) {
8580 *offset = vector_upl->upl_iostates[index].offset;
8581 *size = vector_upl->upl_iostates[index].size;
8582 }
8583 else
8584 *offset = *size = 0;
8585 }
8586 else
8587 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
8588 }
8589 else
8590 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
8591 }
8592
8593 upl_page_info_t *
8594 upl_get_internal_vectorupl_pagelist(upl_t upl)
8595 {
8596 return ((vector_upl_t)(upl->vector_upl))->pagelist;
8597 }
8598
8599 void *
8600 upl_get_internal_vectorupl(upl_t upl)
8601 {
8602 return upl->vector_upl;
8603 }
8604
8605 vm_size_t
8606 upl_get_internal_pagelist_offset(void)
8607 {
8608 return sizeof(struct upl);
8609 }
8610
8611 void
8612 upl_clear_dirty(
8613 upl_t upl,
8614 boolean_t value)
8615 {
8616 if (value) {
8617 upl->flags |= UPL_CLEAR_DIRTY;
8618 } else {
8619 upl->flags &= ~UPL_CLEAR_DIRTY;
8620 }
8621 }
8622
8623 void
8624 upl_set_referenced(
8625 upl_t upl,
8626 boolean_t value)
8627 {
8628 upl_lock(upl);
8629 if (value) {
8630 upl->ext_ref_count++;
8631 } else {
8632 if (!upl->ext_ref_count) {
8633 panic("upl_set_referenced not %p\n", upl);
8634 }
8635 upl->ext_ref_count--;
8636 }
8637 upl_unlock(upl);
8638 }
8639
8640 boolean_t
8641 vm_page_is_slideable(vm_page_t m)
8642 {
8643 boolean_t result = FALSE;
8644 vm_shared_region_slide_info_t si;
8645
8646 vm_object_lock_assert_held(m->object);
8647
8648 /* make sure our page belongs to the one object allowed to do this */
8649 if (!m->object->object_slid) {
8650 goto done;
8651 }
8652
8653 si = m->object->vo_slide_info;
8654 if (si == NULL) {
8655 goto done;
8656 }
8657
8658 if(!m->slid && (si->start <= m->offset && si->end > m->offset)) {
8659 result = TRUE;
8660 }
8661
8662 done:
8663 return result;
8664 }
8665
8666 int vm_page_slide_counter = 0;
8667 int vm_page_slide_errors = 0;
8668 kern_return_t
8669 vm_page_slide(
8670 vm_page_t page,
8671 vm_map_offset_t kernel_mapping_offset)
8672 {
8673 kern_return_t kr;
8674 vm_map_size_t kernel_mapping_size;
8675 boolean_t kernel_mapping_needs_unmap;
8676 vm_offset_t kernel_vaddr;
8677 uint32_t pageIndex = 0;
8678
8679 assert(!page->slid);
8680 assert(page->object->object_slid);
8681 vm_object_lock_assert_exclusive(page->object);
8682
8683 if (page->error)
8684 return KERN_FAILURE;
8685
8686 /*
8687 * Take a paging-in-progress reference to keep the object
8688 * alive even if we have to unlock it (in vm_paging_map_object()
8689 * for example)...
8690 */
8691 vm_object_paging_begin(page->object);
8692
8693 if (kernel_mapping_offset == 0) {
8694 /*
8695 * The page hasn't already been mapped in kernel space
8696 * by the caller. Map it now, so that we can access
8697 * its contents and decrypt them.
8698 */
8699 kernel_mapping_size = PAGE_SIZE;
8700 kernel_mapping_needs_unmap = FALSE;
8701 kr = vm_paging_map_object(page,
8702 page->object,
8703 page->offset,
8704 VM_PROT_READ | VM_PROT_WRITE,
8705 FALSE,
8706 &kernel_mapping_size,
8707 &kernel_mapping_offset,
8708 &kernel_mapping_needs_unmap);
8709 if (kr != KERN_SUCCESS) {
8710 panic("vm_page_slide: "
8711 "could not map page in kernel: 0x%x\n",
8712 kr);
8713 }
8714 } else {
8715 kernel_mapping_size = 0;
8716 kernel_mapping_needs_unmap = FALSE;
8717 }
8718 kernel_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping_offset);
8719
8720 /*
8721 * Slide the pointers on the page.
8722 */
8723
8724 /*assert that slide_file_info.start/end are page-aligned?*/
8725
8726 assert(!page->slid);
8727 assert(page->object->object_slid);
8728
8729 pageIndex = (uint32_t)((page->offset - page->object->vo_slide_info->start)/PAGE_SIZE);
8730 kr = vm_shared_region_slide_page(page->object->vo_slide_info, kernel_vaddr, pageIndex);
8731 vm_page_slide_counter++;
8732
8733 /*
8734 * Unmap the page from the kernel's address space,
8735 */
8736 if (kernel_mapping_needs_unmap) {
8737 vm_paging_unmap_object(page->object,
8738 kernel_vaddr,
8739 kernel_vaddr + PAGE_SIZE);
8740 }
8741
8742 page->dirty = FALSE;
8743 pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
8744
8745 if (kr != KERN_SUCCESS || cs_debug > 1) {
8746 printf("vm_page_slide(%p): "
8747 "obj %p off 0x%llx mobj %p moff 0x%llx\n",
8748 page,
8749 page->object, page->offset,
8750 page->object->pager,
8751 page->offset + page->object->paging_offset);
8752 }
8753
8754 if (kr == KERN_SUCCESS) {
8755 page->slid = TRUE;
8756 } else {
8757 page->error = TRUE;
8758 vm_page_slide_errors++;
8759 }
8760
8761 vm_object_paging_end(page->object);
8762
8763 return kr;
8764 }
8765
8766 void inline memoryshot(unsigned int event, unsigned int control)
8767 {
8768 if (vm_debug_events) {
8769 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control,
8770 vm_page_active_count, vm_page_inactive_count,
8771 vm_page_free_count, vm_page_speculative_count,
8772 vm_page_throttled_count);
8773 } else {
8774 (void) event;
8775 (void) control;
8776 }
8777
8778 }
8779
8780 #ifdef MACH_BSD
8781
8782 boolean_t upl_device_page(upl_page_info_t *upl)
8783 {
8784 return(UPL_DEVICE_PAGE(upl));
8785 }
8786 boolean_t upl_page_present(upl_page_info_t *upl, int index)
8787 {
8788 return(UPL_PAGE_PRESENT(upl, index));
8789 }
8790 boolean_t upl_speculative_page(upl_page_info_t *upl, int index)
8791 {
8792 return(UPL_SPECULATIVE_PAGE(upl, index));
8793 }
8794 boolean_t upl_dirty_page(upl_page_info_t *upl, int index)
8795 {
8796 return(UPL_DIRTY_PAGE(upl, index));
8797 }
8798 boolean_t upl_valid_page(upl_page_info_t *upl, int index)
8799 {
8800 return(UPL_VALID_PAGE(upl, index));
8801 }
8802 ppnum_t upl_phys_page(upl_page_info_t *upl, int index)
8803 {
8804 return(UPL_PHYS_PAGE(upl, index));
8805 }
8806
8807
8808 void
8809 vm_countdirtypages(void)
8810 {
8811 vm_page_t m;
8812 int dpages;
8813 int pgopages;
8814 int precpages;
8815
8816
8817 dpages=0;
8818 pgopages=0;
8819 precpages=0;
8820
8821 vm_page_lock_queues();
8822 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
8823 do {
8824 if (m ==(vm_page_t )0) break;
8825
8826 if(m->dirty) dpages++;
8827 if(m->pageout) pgopages++;
8828 if(m->precious) precpages++;
8829
8830 assert(m->object != kernel_object);
8831 m = (vm_page_t) queue_next(&m->pageq);
8832 if (m ==(vm_page_t )0) break;
8833
8834 } while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
8835 vm_page_unlock_queues();
8836
8837 vm_page_lock_queues();
8838 m = (vm_page_t) queue_first(&vm_page_queue_throttled);
8839 do {
8840 if (m ==(vm_page_t )0) break;
8841
8842 dpages++;
8843 assert(m->dirty);
8844 assert(!m->pageout);
8845 assert(m->object != kernel_object);
8846 m = (vm_page_t) queue_next(&m->pageq);
8847 if (m ==(vm_page_t )0) break;
8848
8849 } while (!queue_end(&vm_page_queue_throttled,(queue_entry_t) m));
8850 vm_page_unlock_queues();
8851
8852 vm_page_lock_queues();
8853 m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
8854 do {
8855 if (m ==(vm_page_t )0) break;
8856
8857 if(m->dirty) dpages++;
8858 if(m->pageout) pgopages++;
8859 if(m->precious) precpages++;
8860
8861 assert(m->object != kernel_object);
8862 m = (vm_page_t) queue_next(&m->pageq);
8863 if (m ==(vm_page_t )0) break;
8864
8865 } while (!queue_end(&vm_page_queue_anonymous,(queue_entry_t) m));
8866 vm_page_unlock_queues();
8867
8868 printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
8869
8870 dpages=0;
8871 pgopages=0;
8872 precpages=0;
8873
8874 vm_page_lock_queues();
8875 m = (vm_page_t) queue_first(&vm_page_queue_active);
8876
8877 do {
8878 if(m == (vm_page_t )0) break;
8879 if(m->dirty) dpages++;
8880 if(m->pageout) pgopages++;
8881 if(m->precious) precpages++;
8882
8883 assert(m->object != kernel_object);
8884 m = (vm_page_t) queue_next(&m->pageq);
8885 if(m == (vm_page_t )0) break;
8886
8887 } while (!queue_end(&vm_page_queue_active,(queue_entry_t) m));
8888 vm_page_unlock_queues();
8889
8890 printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
8891
8892 }
8893 #endif /* MACH_BSD */
8894
8895 ppnum_t upl_get_highest_page(
8896 upl_t upl)
8897 {
8898 return upl->highest_page;
8899 }
8900
8901 upl_size_t upl_get_size(
8902 upl_t upl)
8903 {
8904 return upl->size;
8905 }
8906
8907 #if UPL_DEBUG
8908 kern_return_t upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
8909 {
8910 upl->ubc_alias1 = alias1;
8911 upl->ubc_alias2 = alias2;
8912 return KERN_SUCCESS;
8913 }
8914 int upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
8915 {
8916 if(al)
8917 *al = upl->ubc_alias1;
8918 if(al2)
8919 *al2 = upl->ubc_alias2;
8920 return KERN_SUCCESS;
8921 }
8922 #endif /* UPL_DEBUG */