]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_pageout.c
21b7d3951387548463a4c6b08c533cb342c2ce29
[apple/xnu.git] / osfmk / vm / vm_pageout.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * The proverbial page-out daemon.
64 */
65
66 #include <stdint.h>
67
68 #include <debug.h>
69 #include <mach_pagemap.h>
70 #include <mach_cluster_stats.h>
71
72 #include <mach/mach_types.h>
73 #include <mach/memory_object.h>
74 #include <mach/memory_object_default.h>
75 #include <mach/memory_object_control_server.h>
76 #include <mach/mach_host_server.h>
77 #include <mach/upl.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_param.h>
80 #include <mach/vm_statistics.h>
81 #include <mach/sdt.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/counters.h>
85 #include <kern/host_statistics.h>
86 #include <kern/machine.h>
87 #include <kern/misc_protos.h>
88 #include <kern/sched.h>
89 #include <kern/thread.h>
90 #include <kern/kalloc.h>
91 #include <kern/policy_internal.h>
92 #include <kern/thread_group.h>
93
94 #include <machine/vm_tuning.h>
95 #include <machine/commpage.h>
96
97 #include <vm/pmap.h>
98 #include <vm/vm_compressor_pager.h>
99 #include <vm/vm_fault.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_protos.h> /* must be last */
105 #include <vm/memory_object.h>
106 #include <vm/vm_purgeable_internal.h>
107 #include <vm/vm_shared_region.h>
108 #include <vm/vm_compressor.h>
109
110 #include <san/kasan.h>
111
112 #if CONFIG_PHANTOM_CACHE
113 #include <vm/vm_phantom_cache.h>
114 #endif
115
116 #if UPL_DEBUG
117 #include <libkern/OSDebug.h>
118 #endif
119
120 extern int cs_debug;
121
122 extern void mbuf_drain(boolean_t);
123
124 #if VM_PRESSURE_EVENTS
125 #if CONFIG_JETSAM
126 extern unsigned int memorystatus_available_pages;
127 extern unsigned int memorystatus_available_pages_pressure;
128 extern unsigned int memorystatus_available_pages_critical;
129 #else /* CONFIG_JETSAM */
130 extern uint64_t memorystatus_available_pages;
131 extern uint64_t memorystatus_available_pages_pressure;
132 extern uint64_t memorystatus_available_pages_critical;
133 #endif /* CONFIG_JETSAM */
134
135 extern unsigned int memorystatus_frozen_count;
136 extern unsigned int memorystatus_suspended_count;
137 extern vm_pressure_level_t memorystatus_vm_pressure_level;
138
139 extern lck_mtx_t memorystatus_jetsam_fg_band_lock;
140 extern uint32_t memorystatus_jetsam_fg_band_waiters;
141
142 void vm_pressure_response(void);
143 extern void consider_vm_pressure_events(void);
144
145 #define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
146 #endif /* VM_PRESSURE_EVENTS */
147
148 thread_t vm_pageout_scan_thread = THREAD_NULL;
149 boolean_t vps_dynamic_priority_enabled = FALSE;
150
151 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
152 #ifdef CONFIG_EMBEDDED
153 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
154 #else
155 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
156 #endif
157 #endif
158
159 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
160 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
161 #endif
162
163 #ifndef VM_PAGE_LAUNDRY_MAX
164 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
165 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
166
167 #ifndef VM_PAGEOUT_BURST_WAIT
168 #define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */
169 #endif /* VM_PAGEOUT_BURST_WAIT */
170
171 #ifndef VM_PAGEOUT_EMPTY_WAIT
172 #define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */
173 #endif /* VM_PAGEOUT_EMPTY_WAIT */
174
175 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
176 #define VM_PAGEOUT_DEADLOCK_WAIT 100 /* milliseconds */
177 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
178
179 #ifndef VM_PAGEOUT_IDLE_WAIT
180 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
181 #endif /* VM_PAGEOUT_IDLE_WAIT */
182
183 #ifndef VM_PAGEOUT_SWAP_WAIT
184 #define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */
185 #endif /* VM_PAGEOUT_SWAP_WAIT */
186
187
188 #ifndef VM_PAGE_SPECULATIVE_TARGET
189 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage))
190 #endif /* VM_PAGE_SPECULATIVE_TARGET */
191
192
193 /*
194 * To obtain a reasonable LRU approximation, the inactive queue
195 * needs to be large enough to give pages on it a chance to be
196 * referenced a second time. This macro defines the fraction
197 * of active+inactive pages that should be inactive.
198 * The pageout daemon uses it to update vm_page_inactive_target.
199 *
200 * If vm_page_free_count falls below vm_page_free_target and
201 * vm_page_inactive_count is below vm_page_inactive_target,
202 * then the pageout daemon starts running.
203 */
204
205 #ifndef VM_PAGE_INACTIVE_TARGET
206 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
207 #endif /* VM_PAGE_INACTIVE_TARGET */
208
209 /*
210 * Once the pageout daemon starts running, it keeps going
211 * until vm_page_free_count meets or exceeds vm_page_free_target.
212 */
213
214 #ifndef VM_PAGE_FREE_TARGET
215 #ifdef CONFIG_EMBEDDED
216 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
217 #else
218 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
219 #endif
220 #endif /* VM_PAGE_FREE_TARGET */
221
222
223 /*
224 * The pageout daemon always starts running once vm_page_free_count
225 * falls below vm_page_free_min.
226 */
227
228 #ifndef VM_PAGE_FREE_MIN
229 #ifdef CONFIG_EMBEDDED
230 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
231 #else
232 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
233 #endif
234 #endif /* VM_PAGE_FREE_MIN */
235
236 #ifdef CONFIG_EMBEDDED
237 #define VM_PAGE_FREE_RESERVED_LIMIT 100
238 #define VM_PAGE_FREE_MIN_LIMIT 1500
239 #define VM_PAGE_FREE_TARGET_LIMIT 2000
240 #else
241 #define VM_PAGE_FREE_RESERVED_LIMIT 1700
242 #define VM_PAGE_FREE_MIN_LIMIT 3500
243 #define VM_PAGE_FREE_TARGET_LIMIT 4000
244 #endif
245
246 /*
247 * When vm_page_free_count falls below vm_page_free_reserved,
248 * only vm-privileged threads can allocate pages. vm-privilege
249 * allows the pageout daemon and default pager (and any other
250 * associated threads needed for default pageout) to continue
251 * operation by dipping into the reserved pool of pages.
252 */
253
254 #ifndef VM_PAGE_FREE_RESERVED
255 #define VM_PAGE_FREE_RESERVED(n) \
256 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
257 #endif /* VM_PAGE_FREE_RESERVED */
258
259 /*
260 * When we dequeue pages from the inactive list, they are
261 * reactivated (ie, put back on the active queue) if referenced.
262 * However, it is possible to starve the free list if other
263 * processors are referencing pages faster than we can turn off
264 * the referenced bit. So we limit the number of reactivations
265 * we will make per call of vm_pageout_scan().
266 */
267 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
268
269 #ifndef VM_PAGE_REACTIVATE_LIMIT
270 #ifdef CONFIG_EMBEDDED
271 #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
272 #else
273 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
274 #endif
275 #endif /* VM_PAGE_REACTIVATE_LIMIT */
276 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000
277
278 extern boolean_t hibernate_cleaning_in_progress;
279
280 /*
281 * Forward declarations for internal routines.
282 */
283 struct cq {
284 struct vm_pageout_queue *q;
285 void *current_chead;
286 char *scratch_buf;
287 int id;
288 };
289
290 struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT];
291
292
293 #if VM_PRESSURE_EVENTS
294 void vm_pressure_thread(void);
295
296 boolean_t VM_PRESSURE_NORMAL_TO_WARNING(void);
297 boolean_t VM_PRESSURE_WARNING_TO_CRITICAL(void);
298
299 boolean_t VM_PRESSURE_WARNING_TO_NORMAL(void);
300 boolean_t VM_PRESSURE_CRITICAL_TO_WARNING(void);
301 #endif
302
303 void vm_pageout_garbage_collect(int);
304 static void vm_pageout_iothread_external(void);
305 static void vm_pageout_iothread_internal(struct cq *cq);
306 static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *, boolean_t);
307
308 extern void vm_pageout_continue(void);
309 extern void vm_pageout_scan(void);
310
311 void vm_tests(void); /* forward */
312
313 boolean_t vm_pageout_running = FALSE;
314
315 uint32_t vm_page_upl_tainted = 0;
316 uint32_t vm_page_iopl_tainted = 0;
317
318 #if !CONFIG_EMBEDDED
319 static boolean_t vm_pageout_waiter = FALSE;
320 #endif /* !CONFIG_EMBEDDED */
321
322
323 #if DEVELOPMENT || DEBUG
324 struct vm_pageout_debug vm_pageout_debug;
325 #endif
326 struct vm_pageout_vminfo vm_pageout_vminfo;
327 struct vm_pageout_state vm_pageout_state;
328 struct vm_config vm_config;
329
330 struct vm_pageout_queue vm_pageout_queue_internal __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
331 struct vm_pageout_queue vm_pageout_queue_external __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
332
333 int vm_upl_wait_for_pages = 0;
334 vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
335
336 boolean_t(*volatile consider_buffer_cache_collect)(int) = NULL;
337
338 int vm_debug_events = 0;
339
340 lck_grp_t vm_pageout_lck_grp;
341
342 #if CONFIG_MEMORYSTATUS
343 extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async);
344
345 uint32_t vm_pageout_memorystatus_fb_factor_nr = 5;
346 uint32_t vm_pageout_memorystatus_fb_factor_dr = 2;
347
348 #endif
349
350
351
352 /*
353 * Routine: vm_pageout_object_terminate
354 * Purpose:
355 * Destroy the pageout_object, and perform all of the
356 * required cleanup actions.
357 *
358 * In/Out conditions:
359 * The object must be locked, and will be returned locked.
360 */
361 void
362 vm_pageout_object_terminate(
363 vm_object_t object)
364 {
365 vm_object_t shadow_object;
366
367 /*
368 * Deal with the deallocation (last reference) of a pageout object
369 * (used for cleaning-in-place) by dropping the paging references/
370 * freeing pages in the original object.
371 */
372
373 assert(object->pageout);
374 shadow_object = object->shadow;
375 vm_object_lock(shadow_object);
376
377 while (!vm_page_queue_empty(&object->memq)) {
378 vm_page_t p, m;
379 vm_object_offset_t offset;
380
381 p = (vm_page_t) vm_page_queue_first(&object->memq);
382
383 assert(p->vmp_private);
384 assert(p->vmp_free_when_done);
385 p->vmp_free_when_done = FALSE;
386 assert(!p->vmp_cleaning);
387 assert(!p->vmp_laundry);
388
389 offset = p->vmp_offset;
390 VM_PAGE_FREE(p);
391 p = VM_PAGE_NULL;
392
393 m = vm_page_lookup(shadow_object,
394 offset + object->vo_shadow_offset);
395
396 if (m == VM_PAGE_NULL) {
397 continue;
398 }
399
400 assert((m->vmp_dirty) || (m->vmp_precious) ||
401 (m->vmp_busy && m->vmp_cleaning));
402
403 /*
404 * Handle the trusted pager throttle.
405 * Also decrement the burst throttle (if external).
406 */
407 vm_page_lock_queues();
408 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
409 vm_pageout_throttle_up(m);
410 }
411
412 /*
413 * Handle the "target" page(s). These pages are to be freed if
414 * successfully cleaned. Target pages are always busy, and are
415 * wired exactly once. The initial target pages are not mapped,
416 * (so cannot be referenced or modified) but converted target
417 * pages may have been modified between the selection as an
418 * adjacent page and conversion to a target.
419 */
420 if (m->vmp_free_when_done) {
421 assert(m->vmp_busy);
422 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
423 assert(m->vmp_wire_count == 1);
424 m->vmp_cleaning = FALSE;
425 m->vmp_free_when_done = FALSE;
426 /*
427 * Revoke all access to the page. Since the object is
428 * locked, and the page is busy, this prevents the page
429 * from being dirtied after the pmap_disconnect() call
430 * returns.
431 *
432 * Since the page is left "dirty" but "not modifed", we
433 * can detect whether the page was redirtied during
434 * pageout by checking the modify state.
435 */
436 if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
437 SET_PAGE_DIRTY(m, FALSE);
438 } else {
439 m->vmp_dirty = FALSE;
440 }
441
442 if (m->vmp_dirty) {
443 vm_page_unwire(m, TRUE); /* reactivates */
444 VM_STAT_INCR(reactivations);
445 PAGE_WAKEUP_DONE(m);
446 } else {
447 vm_page_free(m); /* clears busy, etc. */
448 }
449 vm_page_unlock_queues();
450 continue;
451 }
452 /*
453 * Handle the "adjacent" pages. These pages were cleaned in
454 * place, and should be left alone.
455 * If prep_pin_count is nonzero, then someone is using the
456 * page, so make it active.
457 */
458 if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) && !m->vmp_private) {
459 if (m->vmp_reference) {
460 vm_page_activate(m);
461 } else {
462 vm_page_deactivate(m);
463 }
464 }
465 if (m->vmp_overwriting) {
466 /*
467 * the (COPY_OUT_FROM == FALSE) request_page_list case
468 */
469 if (m->vmp_busy) {
470 /*
471 * We do not re-set m->vmp_dirty !
472 * The page was busy so no extraneous activity
473 * could have occurred. COPY_INTO is a read into the
474 * new pages. CLEAN_IN_PLACE does actually write
475 * out the pages but handling outside of this code
476 * will take care of resetting dirty. We clear the
477 * modify however for the Programmed I/O case.
478 */
479 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
480
481 m->vmp_busy = FALSE;
482 m->vmp_absent = FALSE;
483 } else {
484 /*
485 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
486 * Occurs when the original page was wired
487 * at the time of the list request
488 */
489 assert(VM_PAGE_WIRED(m));
490 vm_page_unwire(m, TRUE); /* reactivates */
491 }
492 m->vmp_overwriting = FALSE;
493 } else {
494 m->vmp_dirty = FALSE;
495 }
496 m->vmp_cleaning = FALSE;
497
498 /*
499 * Wakeup any thread waiting for the page to be un-cleaning.
500 */
501 PAGE_WAKEUP(m);
502 vm_page_unlock_queues();
503 }
504 /*
505 * Account for the paging reference taken in vm_paging_object_allocate.
506 */
507 vm_object_activity_end(shadow_object);
508 vm_object_unlock(shadow_object);
509
510 assert(object->ref_count == 0);
511 assert(object->paging_in_progress == 0);
512 assert(object->activity_in_progress == 0);
513 assert(object->resident_page_count == 0);
514 return;
515 }
516
517 /*
518 * Routine: vm_pageclean_setup
519 *
520 * Purpose: setup a page to be cleaned (made non-dirty), but not
521 * necessarily flushed from the VM page cache.
522 * This is accomplished by cleaning in place.
523 *
524 * The page must not be busy, and new_object
525 * must be locked.
526 *
527 */
528 static void
529 vm_pageclean_setup(
530 vm_page_t m,
531 vm_page_t new_m,
532 vm_object_t new_object,
533 vm_object_offset_t new_offset)
534 {
535 assert(!m->vmp_busy);
536 #if 0
537 assert(!m->vmp_cleaning);
538 #endif
539
540 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
541
542 /*
543 * Mark original page as cleaning in place.
544 */
545 m->vmp_cleaning = TRUE;
546 SET_PAGE_DIRTY(m, FALSE);
547 m->vmp_precious = FALSE;
548
549 /*
550 * Convert the fictitious page to a private shadow of
551 * the real page.
552 */
553 assert(new_m->vmp_fictitious);
554 assert(VM_PAGE_GET_PHYS_PAGE(new_m) == vm_page_fictitious_addr);
555 new_m->vmp_fictitious = FALSE;
556 new_m->vmp_private = TRUE;
557 new_m->vmp_free_when_done = TRUE;
558 VM_PAGE_SET_PHYS_PAGE(new_m, VM_PAGE_GET_PHYS_PAGE(m));
559
560 vm_page_lockspin_queues();
561 vm_page_wire(new_m, VM_KERN_MEMORY_NONE, TRUE);
562 vm_page_unlock_queues();
563
564 vm_page_insert_wired(new_m, new_object, new_offset, VM_KERN_MEMORY_NONE);
565 assert(!new_m->vmp_wanted);
566 new_m->vmp_busy = FALSE;
567 }
568
569 /*
570 * Routine: vm_pageout_initialize_page
571 * Purpose:
572 * Causes the specified page to be initialized in
573 * the appropriate memory object. This routine is used to push
574 * pages into a copy-object when they are modified in the
575 * permanent object.
576 *
577 * The page is moved to a temporary object and paged out.
578 *
579 * In/out conditions:
580 * The page in question must not be on any pageout queues.
581 * The object to which it belongs must be locked.
582 * The page must be busy, but not hold a paging reference.
583 *
584 * Implementation:
585 * Move this page to a completely new object.
586 */
587 void
588 vm_pageout_initialize_page(
589 vm_page_t m)
590 {
591 vm_object_t object;
592 vm_object_offset_t paging_offset;
593 memory_object_t pager;
594
595 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
596
597 object = VM_PAGE_OBJECT(m);
598
599 assert(m->vmp_busy);
600 assert(object->internal);
601
602 /*
603 * Verify that we really want to clean this page
604 */
605 assert(!m->vmp_absent);
606 assert(!m->vmp_error);
607 assert(m->vmp_dirty);
608
609 /*
610 * Create a paging reference to let us play with the object.
611 */
612 paging_offset = m->vmp_offset + object->paging_offset;
613
614 if (m->vmp_absent || m->vmp_error || m->vmp_restart || (!m->vmp_dirty && !m->vmp_precious)) {
615 panic("reservation without pageout?"); /* alan */
616
617 VM_PAGE_FREE(m);
618 vm_object_unlock(object);
619
620 return;
621 }
622
623 /*
624 * If there's no pager, then we can't clean the page. This should
625 * never happen since this should be a copy object and therefore not
626 * an external object, so the pager should always be there.
627 */
628
629 pager = object->pager;
630
631 if (pager == MEMORY_OBJECT_NULL) {
632 panic("missing pager for copy object");
633
634 VM_PAGE_FREE(m);
635 return;
636 }
637
638 /*
639 * set the page for future call to vm_fault_list_request
640 */
641 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
642 SET_PAGE_DIRTY(m, FALSE);
643
644 /*
645 * keep the object from collapsing or terminating
646 */
647 vm_object_paging_begin(object);
648 vm_object_unlock(object);
649
650 /*
651 * Write the data to its pager.
652 * Note that the data is passed by naming the new object,
653 * not a virtual address; the pager interface has been
654 * manipulated to use the "internal memory" data type.
655 * [The object reference from its allocation is donated
656 * to the eventual recipient.]
657 */
658 memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
659
660 vm_object_lock(object);
661 vm_object_paging_end(object);
662 }
663
664
665 /*
666 * vm_pageout_cluster:
667 *
668 * Given a page, queue it to the appropriate I/O thread,
669 * which will page it out and attempt to clean adjacent pages
670 * in the same operation.
671 *
672 * The object and queues must be locked. We will take a
673 * paging reference to prevent deallocation or collapse when we
674 * release the object lock back at the call site. The I/O thread
675 * is responsible for consuming this reference
676 *
677 * The page must not be on any pageout queue.
678 */
679 #if DEVELOPMENT || DEBUG
680 vmct_stats_t vmct_stats;
681
682 int32_t vmct_active = 0;
683 uint64_t vm_compressor_epoch_start = 0;
684 uint64_t vm_compressor_epoch_stop = 0;
685
686 typedef enum vmct_state_t {
687 VMCT_IDLE,
688 VMCT_AWAKENED,
689 VMCT_ACTIVE,
690 } vmct_state_t;
691 vmct_state_t vmct_state[MAX_COMPRESSOR_THREAD_COUNT];
692 #endif
693
694
695 void
696 vm_pageout_cluster(vm_page_t m)
697 {
698 vm_object_t object = VM_PAGE_OBJECT(m);
699 struct vm_pageout_queue *q;
700
701 VM_PAGE_CHECK(m);
702 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
703 vm_object_lock_assert_exclusive(object);
704
705 /*
706 * Only a certain kind of page is appreciated here.
707 */
708 assert((m->vmp_dirty || m->vmp_precious) && (!VM_PAGE_WIRED(m)));
709 assert(!m->vmp_cleaning && !m->vmp_laundry);
710 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
711
712 /*
713 * protect the object from collapse or termination
714 */
715 vm_object_activity_begin(object);
716
717 if (object->internal == TRUE) {
718 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
719
720 m->vmp_busy = TRUE;
721
722 q = &vm_pageout_queue_internal;
723 } else {
724 q = &vm_pageout_queue_external;
725 }
726
727 /*
728 * pgo_laundry count is tied to the laundry bit
729 */
730 m->vmp_laundry = TRUE;
731 q->pgo_laundry++;
732
733 m->vmp_q_state = VM_PAGE_ON_PAGEOUT_Q;
734 vm_page_queue_enter(&q->pgo_pending, m, vmp_pageq);
735
736 if (q->pgo_idle == TRUE) {
737 q->pgo_idle = FALSE;
738 thread_wakeup((event_t) &q->pgo_pending);
739 }
740 VM_PAGE_CHECK(m);
741 }
742
743
744 /*
745 * A page is back from laundry or we are stealing it back from
746 * the laundering state. See if there are some pages waiting to
747 * go to laundry and if we can let some of them go now.
748 *
749 * Object and page queues must be locked.
750 */
751 void
752 vm_pageout_throttle_up(
753 vm_page_t m)
754 {
755 struct vm_pageout_queue *q;
756 vm_object_t m_object;
757
758 m_object = VM_PAGE_OBJECT(m);
759
760 assert(m_object != VM_OBJECT_NULL);
761 assert(m_object != kernel_object);
762
763 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
764 vm_object_lock_assert_exclusive(m_object);
765
766 if (m_object->internal == TRUE) {
767 q = &vm_pageout_queue_internal;
768 } else {
769 q = &vm_pageout_queue_external;
770 }
771
772 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
773 vm_page_queue_remove(&q->pgo_pending, m, vmp_pageq);
774 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
775
776 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
777
778 vm_object_activity_end(m_object);
779
780 VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page, 1);
781 }
782 if (m->vmp_laundry == TRUE) {
783 m->vmp_laundry = FALSE;
784 q->pgo_laundry--;
785
786 if (q->pgo_throttled == TRUE) {
787 q->pgo_throttled = FALSE;
788 thread_wakeup((event_t) &q->pgo_laundry);
789 }
790 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
791 q->pgo_draining = FALSE;
792 thread_wakeup((event_t) (&q->pgo_laundry + 1));
793 }
794 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, 1);
795 }
796 }
797
798
799 static void
800 vm_pageout_throttle_up_batch(
801 struct vm_pageout_queue *q,
802 int batch_cnt)
803 {
804 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
805
806 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, batch_cnt);
807
808 q->pgo_laundry -= batch_cnt;
809
810 if (q->pgo_throttled == TRUE) {
811 q->pgo_throttled = FALSE;
812 thread_wakeup((event_t) &q->pgo_laundry);
813 }
814 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
815 q->pgo_draining = FALSE;
816 thread_wakeup((event_t) (&q->pgo_laundry + 1));
817 }
818 }
819
820
821
822 /*
823 * VM memory pressure monitoring.
824 *
825 * vm_pageout_scan() keeps track of the number of pages it considers and
826 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
827 *
828 * compute_memory_pressure() is called every second from compute_averages()
829 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
830 * of recalimed pages in a new vm_pageout_stat[] bucket.
831 *
832 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
833 * The caller provides the number of seconds ("nsecs") worth of statistics
834 * it wants, up to 30 seconds.
835 * It computes the number of pages reclaimed in the past "nsecs" seconds and
836 * also returns the number of pages the system still needs to reclaim at this
837 * moment in time.
838 */
839 #if DEVELOPMENT || DEBUG
840 #define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1
841 #else
842 #define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1
843 #endif
844 struct vm_pageout_stat {
845 unsigned long vm_page_active_count;
846 unsigned long vm_page_speculative_count;
847 unsigned long vm_page_inactive_count;
848 unsigned long vm_page_anonymous_count;
849
850 unsigned long vm_page_free_count;
851 unsigned long vm_page_wire_count;
852 unsigned long vm_page_compressor_count;
853
854 unsigned long vm_page_pages_compressed;
855 unsigned long vm_page_pageable_internal_count;
856 unsigned long vm_page_pageable_external_count;
857 unsigned long vm_page_xpmapped_external_count;
858
859 unsigned int pages_grabbed;
860 unsigned int pages_freed;
861
862 unsigned int pages_compressed;
863 unsigned int pages_grabbed_by_compressor;
864 unsigned int failed_compressions;
865
866 unsigned int pages_evicted;
867 unsigned int pages_purged;
868
869 unsigned int considered;
870 unsigned int considered_bq_internal;
871 unsigned int considered_bq_external;
872
873 unsigned int skipped_external;
874 unsigned int filecache_min_reactivations;
875
876 unsigned int freed_speculative;
877 unsigned int freed_cleaned;
878 unsigned int freed_internal;
879 unsigned int freed_external;
880
881 unsigned int cleaned_dirty_external;
882 unsigned int cleaned_dirty_internal;
883
884 unsigned int inactive_referenced;
885 unsigned int inactive_nolock;
886 unsigned int reactivation_limit_exceeded;
887 unsigned int forced_inactive_reclaim;
888
889 unsigned int throttled_internal_q;
890 unsigned int throttled_external_q;
891
892 unsigned int phantom_ghosts_found;
893 unsigned int phantom_ghosts_added;
894 } vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
895
896 unsigned int vm_pageout_stat_now = 0;
897
898 #define VM_PAGEOUT_STAT_BEFORE(i) \
899 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
900 #define VM_PAGEOUT_STAT_AFTER(i) \
901 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
902
903 #if VM_PAGE_BUCKETS_CHECK
904 int vm_page_buckets_check_interval = 80; /* in eighths of a second */
905 #endif /* VM_PAGE_BUCKETS_CHECK */
906
907
908 void
909 record_memory_pressure(void);
910 void
911 record_memory_pressure(void)
912 {
913 unsigned int vm_pageout_next;
914
915 #if VM_PAGE_BUCKETS_CHECK
916 /* check the consistency of VM page buckets at regular interval */
917 static int counter = 0;
918 if ((++counter % vm_page_buckets_check_interval) == 0) {
919 vm_page_buckets_check();
920 }
921 #endif /* VM_PAGE_BUCKETS_CHECK */
922
923 vm_pageout_state.vm_memory_pressure =
924 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_speculative +
925 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_cleaned +
926 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_internal +
927 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_external;
928
929 commpage_set_memory_pressure((unsigned int)vm_pageout_state.vm_memory_pressure );
930
931 /* move "now" forward */
932 vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
933
934 bzero(&vm_pageout_stats[vm_pageout_next], sizeof(struct vm_pageout_stat));
935
936 vm_pageout_stat_now = vm_pageout_next;
937 }
938
939
940 /*
941 * IMPORTANT
942 * mach_vm_ctl_page_free_wanted() is called indirectly, via
943 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
944 * it must be safe in the restricted stackshot context. Locks and/or
945 * blocking are not allowable.
946 */
947 unsigned int
948 mach_vm_ctl_page_free_wanted(void)
949 {
950 unsigned int page_free_target, page_free_count, page_free_wanted;
951
952 page_free_target = vm_page_free_target;
953 page_free_count = vm_page_free_count;
954 if (page_free_target > page_free_count) {
955 page_free_wanted = page_free_target - page_free_count;
956 } else {
957 page_free_wanted = 0;
958 }
959
960 return page_free_wanted;
961 }
962
963
964 /*
965 * IMPORTANT:
966 * mach_vm_pressure_monitor() is called when taking a stackshot, with
967 * wait_for_pressure FALSE, so that code path must remain safe in the
968 * restricted stackshot context. No blocking or locks are allowable.
969 * on that code path.
970 */
971
972 kern_return_t
973 mach_vm_pressure_monitor(
974 boolean_t wait_for_pressure,
975 unsigned int nsecs_monitored,
976 unsigned int *pages_reclaimed_p,
977 unsigned int *pages_wanted_p)
978 {
979 wait_result_t wr;
980 unsigned int vm_pageout_then, vm_pageout_now;
981 unsigned int pages_reclaimed;
982 unsigned int units_of_monitor;
983
984 units_of_monitor = 8 * nsecs_monitored;
985 /*
986 * We don't take the vm_page_queue_lock here because we don't want
987 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
988 * thread when it's trying to reclaim memory. We don't need fully
989 * accurate monitoring anyway...
990 */
991
992 if (wait_for_pressure) {
993 /* wait until there's memory pressure */
994 while (vm_page_free_count >= vm_page_free_target) {
995 wr = assert_wait((event_t) &vm_page_free_wanted,
996 THREAD_INTERRUPTIBLE);
997 if (wr == THREAD_WAITING) {
998 wr = thread_block(THREAD_CONTINUE_NULL);
999 }
1000 if (wr == THREAD_INTERRUPTED) {
1001 return KERN_ABORTED;
1002 }
1003 if (wr == THREAD_AWAKENED) {
1004 /*
1005 * The memory pressure might have already
1006 * been relieved but let's not block again
1007 * and let's report that there was memory
1008 * pressure at some point.
1009 */
1010 break;
1011 }
1012 }
1013 }
1014
1015 /* provide the number of pages the system wants to reclaim */
1016 if (pages_wanted_p != NULL) {
1017 *pages_wanted_p = mach_vm_ctl_page_free_wanted();
1018 }
1019
1020 if (pages_reclaimed_p == NULL) {
1021 return KERN_SUCCESS;
1022 }
1023
1024 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1025 vm_pageout_now = vm_pageout_stat_now;
1026 pages_reclaimed = 0;
1027 for (vm_pageout_then =
1028 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
1029 vm_pageout_then != vm_pageout_now &&
1030 units_of_monitor-- != 0;
1031 vm_pageout_then =
1032 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
1033 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_speculative;
1034 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_cleaned;
1035 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_internal;
1036 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_external;
1037 }
1038 *pages_reclaimed_p = pages_reclaimed;
1039
1040 return KERN_SUCCESS;
1041 }
1042
1043
1044
1045 #if DEVELOPMENT || DEBUG
1046
1047 static void
1048 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int);
1049
1050 /*
1051 * condition variable used to make sure there is
1052 * only a single sweep going on at a time
1053 */
1054 boolean_t vm_pageout_disconnect_all_pages_active = FALSE;
1055
1056
1057 void
1058 vm_pageout_disconnect_all_pages()
1059 {
1060 vm_page_lock_queues();
1061
1062 if (vm_pageout_disconnect_all_pages_active == TRUE) {
1063 vm_page_unlock_queues();
1064 return;
1065 }
1066 vm_pageout_disconnect_all_pages_active = TRUE;
1067 vm_page_unlock_queues();
1068
1069 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled, vm_page_throttled_count);
1070 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
1071 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active, vm_page_active_count);
1072
1073 vm_pageout_disconnect_all_pages_active = FALSE;
1074 }
1075
1076
1077 void
1078 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount)
1079 {
1080 vm_page_t m;
1081 vm_object_t t_object = NULL;
1082 vm_object_t l_object = NULL;
1083 vm_object_t m_object = NULL;
1084 int delayed_unlock = 0;
1085 int try_failed_count = 0;
1086 int disconnected_count = 0;
1087 int paused_count = 0;
1088 int object_locked_count = 0;
1089
1090 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_START,
1091 q, qcount, 0, 0, 0);
1092
1093 vm_page_lock_queues();
1094
1095 while (qcount && !vm_page_queue_empty(q)) {
1096 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1097
1098 m = (vm_page_t) vm_page_queue_first(q);
1099 m_object = VM_PAGE_OBJECT(m);
1100
1101 /*
1102 * check to see if we currently are working
1103 * with the same object... if so, we've
1104 * already got the lock
1105 */
1106 if (m_object != l_object) {
1107 /*
1108 * the object associated with candidate page is
1109 * different from the one we were just working
1110 * with... dump the lock if we still own it
1111 */
1112 if (l_object != NULL) {
1113 vm_object_unlock(l_object);
1114 l_object = NULL;
1115 }
1116 if (m_object != t_object) {
1117 try_failed_count = 0;
1118 }
1119
1120 /*
1121 * Try to lock object; since we've alread got the
1122 * page queues lock, we can only 'try' for this one.
1123 * if the 'try' fails, we need to do a mutex_pause
1124 * to allow the owner of the object lock a chance to
1125 * run...
1126 */
1127 if (!vm_object_lock_try_scan(m_object)) {
1128 if (try_failed_count > 20) {
1129 goto reenter_pg_on_q;
1130 }
1131 vm_page_unlock_queues();
1132 mutex_pause(try_failed_count++);
1133 vm_page_lock_queues();
1134 delayed_unlock = 0;
1135
1136 paused_count++;
1137
1138 t_object = m_object;
1139 continue;
1140 }
1141 object_locked_count++;
1142
1143 l_object = m_object;
1144 }
1145 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
1146 /*
1147 * put it back on the head of its queue
1148 */
1149 goto reenter_pg_on_q;
1150 }
1151 if (m->vmp_pmapped == TRUE) {
1152 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
1153
1154 disconnected_count++;
1155 }
1156 reenter_pg_on_q:
1157 vm_page_queue_remove(q, m, vmp_pageq);
1158 vm_page_queue_enter(q, m, vmp_pageq);
1159
1160 qcount--;
1161 try_failed_count = 0;
1162
1163 if (delayed_unlock++ > 128) {
1164 if (l_object != NULL) {
1165 vm_object_unlock(l_object);
1166 l_object = NULL;
1167 }
1168 lck_mtx_yield(&vm_page_queue_lock);
1169 delayed_unlock = 0;
1170 }
1171 }
1172 if (l_object != NULL) {
1173 vm_object_unlock(l_object);
1174 l_object = NULL;
1175 }
1176 vm_page_unlock_queues();
1177
1178 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_END,
1179 q, disconnected_count, object_locked_count, paused_count, 0);
1180 }
1181
1182 #endif
1183
1184
1185 static void
1186 vm_pageout_page_queue(vm_page_queue_head_t *, int);
1187
1188 /*
1189 * condition variable used to make sure there is
1190 * only a single sweep going on at a time
1191 */
1192 boolean_t vm_pageout_anonymous_pages_active = FALSE;
1193
1194
1195 void
1196 vm_pageout_anonymous_pages()
1197 {
1198 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
1199 vm_page_lock_queues();
1200
1201 if (vm_pageout_anonymous_pages_active == TRUE) {
1202 vm_page_unlock_queues();
1203 return;
1204 }
1205 vm_pageout_anonymous_pages_active = TRUE;
1206 vm_page_unlock_queues();
1207
1208 vm_pageout_page_queue(&vm_page_queue_throttled, vm_page_throttled_count);
1209 vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
1210 vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count);
1211
1212 if (VM_CONFIG_SWAP_IS_PRESENT) {
1213 vm_consider_swapping();
1214 }
1215
1216 vm_page_lock_queues();
1217 vm_pageout_anonymous_pages_active = FALSE;
1218 vm_page_unlock_queues();
1219 }
1220 }
1221
1222
1223 void
1224 vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount)
1225 {
1226 vm_page_t m;
1227 vm_object_t t_object = NULL;
1228 vm_object_t l_object = NULL;
1229 vm_object_t m_object = NULL;
1230 int delayed_unlock = 0;
1231 int try_failed_count = 0;
1232 int refmod_state;
1233 int pmap_options;
1234 struct vm_pageout_queue *iq;
1235 ppnum_t phys_page;
1236
1237
1238 iq = &vm_pageout_queue_internal;
1239
1240 vm_page_lock_queues();
1241
1242 while (qcount && !vm_page_queue_empty(q)) {
1243 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1244
1245 if (VM_PAGE_Q_THROTTLED(iq)) {
1246 if (l_object != NULL) {
1247 vm_object_unlock(l_object);
1248 l_object = NULL;
1249 }
1250 iq->pgo_draining = TRUE;
1251
1252 assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
1253 vm_page_unlock_queues();
1254
1255 thread_block(THREAD_CONTINUE_NULL);
1256
1257 vm_page_lock_queues();
1258 delayed_unlock = 0;
1259 continue;
1260 }
1261 m = (vm_page_t) vm_page_queue_first(q);
1262 m_object = VM_PAGE_OBJECT(m);
1263
1264 /*
1265 * check to see if we currently are working
1266 * with the same object... if so, we've
1267 * already got the lock
1268 */
1269 if (m_object != l_object) {
1270 if (!m_object->internal) {
1271 goto reenter_pg_on_q;
1272 }
1273
1274 /*
1275 * the object associated with candidate page is
1276 * different from the one we were just working
1277 * with... dump the lock if we still own it
1278 */
1279 if (l_object != NULL) {
1280 vm_object_unlock(l_object);
1281 l_object = NULL;
1282 }
1283 if (m_object != t_object) {
1284 try_failed_count = 0;
1285 }
1286
1287 /*
1288 * Try to lock object; since we've alread got the
1289 * page queues lock, we can only 'try' for this one.
1290 * if the 'try' fails, we need to do a mutex_pause
1291 * to allow the owner of the object lock a chance to
1292 * run...
1293 */
1294 if (!vm_object_lock_try_scan(m_object)) {
1295 if (try_failed_count > 20) {
1296 goto reenter_pg_on_q;
1297 }
1298 vm_page_unlock_queues();
1299 mutex_pause(try_failed_count++);
1300 vm_page_lock_queues();
1301 delayed_unlock = 0;
1302
1303 t_object = m_object;
1304 continue;
1305 }
1306 l_object = m_object;
1307 }
1308 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
1309 /*
1310 * page is not to be cleaned
1311 * put it back on the head of its queue
1312 */
1313 goto reenter_pg_on_q;
1314 }
1315 phys_page = VM_PAGE_GET_PHYS_PAGE(m);
1316
1317 if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
1318 refmod_state = pmap_get_refmod(phys_page);
1319
1320 if (refmod_state & VM_MEM_REFERENCED) {
1321 m->vmp_reference = TRUE;
1322 }
1323 if (refmod_state & VM_MEM_MODIFIED) {
1324 SET_PAGE_DIRTY(m, FALSE);
1325 }
1326 }
1327 if (m->vmp_reference == TRUE) {
1328 m->vmp_reference = FALSE;
1329 pmap_clear_refmod_options(phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
1330 goto reenter_pg_on_q;
1331 }
1332 if (m->vmp_pmapped == TRUE) {
1333 if (m->vmp_dirty || m->vmp_precious) {
1334 pmap_options = PMAP_OPTIONS_COMPRESSOR;
1335 } else {
1336 pmap_options = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
1337 }
1338 refmod_state = pmap_disconnect_options(phys_page, pmap_options, NULL);
1339 if (refmod_state & VM_MEM_MODIFIED) {
1340 SET_PAGE_DIRTY(m, FALSE);
1341 }
1342 }
1343
1344 if (!m->vmp_dirty && !m->vmp_precious) {
1345 vm_page_unlock_queues();
1346 VM_PAGE_FREE(m);
1347 vm_page_lock_queues();
1348 delayed_unlock = 0;
1349
1350 goto next_pg;
1351 }
1352 if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1353 if (!m_object->pager_initialized) {
1354 vm_page_unlock_queues();
1355
1356 vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE);
1357
1358 if (!m_object->pager_initialized) {
1359 vm_object_compressor_pager_create(m_object);
1360 }
1361
1362 vm_page_lock_queues();
1363 delayed_unlock = 0;
1364 }
1365 if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1366 goto reenter_pg_on_q;
1367 }
1368 /*
1369 * vm_object_compressor_pager_create will drop the object lock
1370 * which means 'm' may no longer be valid to use
1371 */
1372 continue;
1373 }
1374 /*
1375 * we've already factored out pages in the laundry which
1376 * means this page can't be on the pageout queue so it's
1377 * safe to do the vm_page_queues_remove
1378 */
1379 vm_page_queues_remove(m, TRUE);
1380
1381 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1382
1383 vm_pageout_cluster(m);
1384
1385 goto next_pg;
1386
1387 reenter_pg_on_q:
1388 vm_page_queue_remove(q, m, vmp_pageq);
1389 vm_page_queue_enter(q, m, vmp_pageq);
1390 next_pg:
1391 qcount--;
1392 try_failed_count = 0;
1393
1394 if (delayed_unlock++ > 128) {
1395 if (l_object != NULL) {
1396 vm_object_unlock(l_object);
1397 l_object = NULL;
1398 }
1399 lck_mtx_yield(&vm_page_queue_lock);
1400 delayed_unlock = 0;
1401 }
1402 }
1403 if (l_object != NULL) {
1404 vm_object_unlock(l_object);
1405 l_object = NULL;
1406 }
1407 vm_page_unlock_queues();
1408 }
1409
1410
1411
1412 /*
1413 * function in BSD to apply I/O throttle to the pageout thread
1414 */
1415 extern void vm_pageout_io_throttle(void);
1416
1417 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \
1418 MACRO_BEGIN \
1419 /* \
1420 * If a "reusable" page somehow made it back into \
1421 * the active queue, it's been re-used and is not \
1422 * quite re-usable. \
1423 * If the VM object was "all_reusable", consider it \
1424 * as "all re-used" instead of converting it to \
1425 * "partially re-used", which could be expensive. \
1426 */ \
1427 assert(VM_PAGE_OBJECT((m)) == (obj)); \
1428 if ((m)->vmp_reusable || \
1429 (obj)->all_reusable) { \
1430 vm_object_reuse_pages((obj), \
1431 (m)->vmp_offset, \
1432 (m)->vmp_offset + PAGE_SIZE_64, \
1433 FALSE); \
1434 } \
1435 MACRO_END
1436
1437
1438 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1439 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1440
1441 #define FCS_IDLE 0
1442 #define FCS_DELAYED 1
1443 #define FCS_DEADLOCK_DETECTED 2
1444
1445 struct flow_control {
1446 int state;
1447 mach_timespec_t ts;
1448 };
1449
1450
1451 #if CONFIG_BACKGROUND_QUEUE
1452 uint64_t vm_pageout_rejected_bq_internal = 0;
1453 uint64_t vm_pageout_rejected_bq_external = 0;
1454 uint64_t vm_pageout_skipped_bq_internal = 0;
1455 #endif
1456
1457 #define ANONS_GRABBED_LIMIT 2
1458
1459
1460 #if 0
1461 static void vm_pageout_delayed_unlock(int *, int *, vm_page_t *);
1462 #endif
1463 static void vm_pageout_prepare_to_block(vm_object_t *, int *, vm_page_t *, int *, int);
1464
1465 #define VM_PAGEOUT_PB_NO_ACTION 0
1466 #define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
1467 #define VM_PAGEOUT_PB_THREAD_YIELD 2
1468
1469
1470 #if 0
1471 static void
1472 vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *local_freeq)
1473 {
1474 if (*local_freeq) {
1475 vm_page_unlock_queues();
1476
1477 VM_DEBUG_CONSTANT_EVENT(
1478 vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1479 vm_page_free_count, 0, 0, 1);
1480
1481 vm_page_free_list(*local_freeq, TRUE);
1482
1483 VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1484 vm_page_free_count, *local_freed, 0, 1);
1485
1486 *local_freeq = NULL;
1487 *local_freed = 0;
1488
1489 vm_page_lock_queues();
1490 } else {
1491 lck_mtx_yield(&vm_page_queue_lock);
1492 }
1493 *delayed_unlock = 1;
1494 }
1495 #endif
1496
1497
1498 static void
1499 vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock,
1500 vm_page_t *local_freeq, int *local_freed, int action)
1501 {
1502 vm_page_unlock_queues();
1503
1504 if (*object != NULL) {
1505 vm_object_unlock(*object);
1506 *object = NULL;
1507 }
1508 if (*local_freeq) {
1509 vm_page_free_list(*local_freeq, TRUE);
1510
1511 *local_freeq = NULL;
1512 *local_freed = 0;
1513 }
1514 *delayed_unlock = 1;
1515
1516 switch (action) {
1517 case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER:
1518 vm_consider_waking_compactor_swapper();
1519 break;
1520 case VM_PAGEOUT_PB_THREAD_YIELD:
1521 thread_yield_internal(1);
1522 break;
1523 case VM_PAGEOUT_PB_NO_ACTION:
1524 default:
1525 break;
1526 }
1527 vm_page_lock_queues();
1528 }
1529
1530
1531 static struct vm_pageout_vminfo last;
1532
1533 uint64_t last_vm_page_pages_grabbed = 0;
1534
1535 extern uint32_t c_segment_pages_compressed;
1536
1537 extern uint64_t shared_region_pager_reclaimed;
1538 extern struct memory_object_pager_ops shared_region_pager_ops;
1539
1540 void
1541 update_vm_info(void)
1542 {
1543 uint64_t tmp;
1544
1545 vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count = vm_page_active_count;
1546 vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count = vm_page_speculative_count;
1547 vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count = vm_page_inactive_count;
1548 vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count = vm_page_anonymous_count;
1549
1550 vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count = vm_page_free_count;
1551 vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count = vm_page_wire_count;
1552 vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count = VM_PAGE_COMPRESSOR_COUNT;
1553
1554 vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed = c_segment_pages_compressed;
1555 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count = vm_page_pageable_internal_count;
1556 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count = vm_page_pageable_external_count;
1557 vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count = vm_page_xpmapped_external_count;
1558
1559
1560 tmp = vm_pageout_vminfo.vm_pageout_considered_page;
1561 vm_pageout_stats[vm_pageout_stat_now].considered = (unsigned int)(tmp - last.vm_pageout_considered_page);
1562 last.vm_pageout_considered_page = tmp;
1563
1564 tmp = vm_pageout_vminfo.vm_pageout_compressions;
1565 vm_pageout_stats[vm_pageout_stat_now].pages_compressed = (unsigned int)(tmp - last.vm_pageout_compressions);
1566 last.vm_pageout_compressions = tmp;
1567
1568 tmp = vm_pageout_vminfo.vm_compressor_failed;
1569 vm_pageout_stats[vm_pageout_stat_now].failed_compressions = (unsigned int)(tmp - last.vm_compressor_failed);
1570 last.vm_compressor_failed = tmp;
1571
1572 tmp = vm_pageout_vminfo.vm_compressor_pages_grabbed;
1573 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor = (unsigned int)(tmp - last.vm_compressor_pages_grabbed);
1574 last.vm_compressor_pages_grabbed = tmp;
1575
1576 tmp = vm_pageout_vminfo.vm_phantom_cache_found_ghost;
1577 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found = (unsigned int)(tmp - last.vm_phantom_cache_found_ghost);
1578 last.vm_phantom_cache_found_ghost = tmp;
1579
1580 tmp = vm_pageout_vminfo.vm_phantom_cache_added_ghost;
1581 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added = (unsigned int)(tmp - last.vm_phantom_cache_added_ghost);
1582 last.vm_phantom_cache_added_ghost = tmp;
1583
1584 tmp = get_pages_grabbed_count();
1585 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed = (unsigned int)(tmp - last_vm_page_pages_grabbed);
1586 last_vm_page_pages_grabbed = tmp;
1587
1588 tmp = vm_pageout_vminfo.vm_page_pages_freed;
1589 vm_pageout_stats[vm_pageout_stat_now].pages_freed = (unsigned int)(tmp - last.vm_page_pages_freed);
1590 last.vm_page_pages_freed = tmp;
1591
1592
1593 if (vm_pageout_stats[vm_pageout_stat_now].considered) {
1594 tmp = vm_pageout_vminfo.vm_pageout_pages_evicted;
1595 vm_pageout_stats[vm_pageout_stat_now].pages_evicted = (unsigned int)(tmp - last.vm_pageout_pages_evicted);
1596 last.vm_pageout_pages_evicted = tmp;
1597
1598 tmp = vm_pageout_vminfo.vm_pageout_pages_purged;
1599 vm_pageout_stats[vm_pageout_stat_now].pages_purged = (unsigned int)(tmp - last.vm_pageout_pages_purged);
1600 last.vm_pageout_pages_purged = tmp;
1601
1602 tmp = vm_pageout_vminfo.vm_pageout_freed_speculative;
1603 vm_pageout_stats[vm_pageout_stat_now].freed_speculative = (unsigned int)(tmp - last.vm_pageout_freed_speculative);
1604 last.vm_pageout_freed_speculative = tmp;
1605
1606 tmp = vm_pageout_vminfo.vm_pageout_freed_external;
1607 vm_pageout_stats[vm_pageout_stat_now].freed_external = (unsigned int)(tmp - last.vm_pageout_freed_external);
1608 last.vm_pageout_freed_external = tmp;
1609
1610 tmp = vm_pageout_vminfo.vm_pageout_inactive_referenced;
1611 vm_pageout_stats[vm_pageout_stat_now].inactive_referenced = (unsigned int)(tmp - last.vm_pageout_inactive_referenced);
1612 last.vm_pageout_inactive_referenced = tmp;
1613
1614 tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external;
1615 vm_pageout_stats[vm_pageout_stat_now].throttled_external_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_external);
1616 last.vm_pageout_scan_inactive_throttled_external = tmp;
1617
1618 tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_external;
1619 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_external);
1620 last.vm_pageout_inactive_dirty_external = tmp;
1621
1622 tmp = vm_pageout_vminfo.vm_pageout_freed_cleaned;
1623 vm_pageout_stats[vm_pageout_stat_now].freed_cleaned = (unsigned int)(tmp - last.vm_pageout_freed_cleaned);
1624 last.vm_pageout_freed_cleaned = tmp;
1625
1626 tmp = vm_pageout_vminfo.vm_pageout_inactive_nolock;
1627 vm_pageout_stats[vm_pageout_stat_now].inactive_nolock = (unsigned int)(tmp - last.vm_pageout_inactive_nolock);
1628 last.vm_pageout_inactive_nolock = tmp;
1629
1630 tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal;
1631 vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_internal);
1632 last.vm_pageout_scan_inactive_throttled_internal = tmp;
1633
1634 tmp = vm_pageout_vminfo.vm_pageout_skipped_external;
1635 vm_pageout_stats[vm_pageout_stat_now].skipped_external = (unsigned int)(tmp - last.vm_pageout_skipped_external);
1636 last.vm_pageout_skipped_external = tmp;
1637
1638 tmp = vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded;
1639 vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded = (unsigned int)(tmp - last.vm_pageout_reactivation_limit_exceeded);
1640 last.vm_pageout_reactivation_limit_exceeded = tmp;
1641
1642 tmp = vm_pageout_vminfo.vm_pageout_inactive_force_reclaim;
1643 vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim = (unsigned int)(tmp - last.vm_pageout_inactive_force_reclaim);
1644 last.vm_pageout_inactive_force_reclaim = tmp;
1645
1646 tmp = vm_pageout_vminfo.vm_pageout_freed_internal;
1647 vm_pageout_stats[vm_pageout_stat_now].freed_internal = (unsigned int)(tmp - last.vm_pageout_freed_internal);
1648 last.vm_pageout_freed_internal = tmp;
1649
1650 tmp = vm_pageout_vminfo.vm_pageout_considered_bq_internal;
1651 vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal = (unsigned int)(tmp - last.vm_pageout_considered_bq_internal);
1652 last.vm_pageout_considered_bq_internal = tmp;
1653
1654 tmp = vm_pageout_vminfo.vm_pageout_considered_bq_external;
1655 vm_pageout_stats[vm_pageout_stat_now].considered_bq_external = (unsigned int)(tmp - last.vm_pageout_considered_bq_external);
1656 last.vm_pageout_considered_bq_external = tmp;
1657
1658 tmp = vm_pageout_vminfo.vm_pageout_filecache_min_reactivated;
1659 vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations = (unsigned int)(tmp - last.vm_pageout_filecache_min_reactivated);
1660 last.vm_pageout_filecache_min_reactivated = tmp;
1661
1662 tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_internal;
1663 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_internal);
1664 last.vm_pageout_inactive_dirty_internal = tmp;
1665 }
1666
1667 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO1)) | DBG_FUNC_NONE,
1668 vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count,
1669 vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count,
1670 vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count,
1671 vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count,
1672 0);
1673
1674 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO2)) | DBG_FUNC_NONE,
1675 vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count,
1676 vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count,
1677 vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count,
1678 0,
1679 0);
1680
1681 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO3)) | DBG_FUNC_NONE,
1682 vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed,
1683 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count,
1684 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count,
1685 vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count,
1686 0);
1687
1688 if (vm_pageout_stats[vm_pageout_stat_now].considered ||
1689 vm_pageout_stats[vm_pageout_stat_now].pages_compressed ||
1690 vm_pageout_stats[vm_pageout_stat_now].failed_compressions) {
1691 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO4)) | DBG_FUNC_NONE,
1692 vm_pageout_stats[vm_pageout_stat_now].considered,
1693 vm_pageout_stats[vm_pageout_stat_now].freed_speculative,
1694 vm_pageout_stats[vm_pageout_stat_now].freed_external,
1695 vm_pageout_stats[vm_pageout_stat_now].inactive_referenced,
1696 0);
1697
1698 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO5)) | DBG_FUNC_NONE,
1699 vm_pageout_stats[vm_pageout_stat_now].throttled_external_q,
1700 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external,
1701 vm_pageout_stats[vm_pageout_stat_now].freed_cleaned,
1702 vm_pageout_stats[vm_pageout_stat_now].inactive_nolock,
1703 0);
1704
1705 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO6)) | DBG_FUNC_NONE,
1706 vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q,
1707 vm_pageout_stats[vm_pageout_stat_now].pages_compressed,
1708 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor,
1709 vm_pageout_stats[vm_pageout_stat_now].skipped_external,
1710 0);
1711
1712 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO7)) | DBG_FUNC_NONE,
1713 vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded,
1714 vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim,
1715 vm_pageout_stats[vm_pageout_stat_now].failed_compressions,
1716 vm_pageout_stats[vm_pageout_stat_now].freed_internal,
1717 0);
1718
1719 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO8)) | DBG_FUNC_NONE,
1720 vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal,
1721 vm_pageout_stats[vm_pageout_stat_now].considered_bq_external,
1722 vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations,
1723 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal,
1724 0);
1725 }
1726 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO9)) | DBG_FUNC_NONE,
1727 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed,
1728 vm_pageout_stats[vm_pageout_stat_now].pages_freed,
1729 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found,
1730 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added,
1731 0);
1732
1733 record_memory_pressure();
1734 }
1735
1736 extern boolean_t hibernation_vmqueues_inspection;
1737
1738 /*
1739 * Return values for functions called by vm_pageout_scan
1740 * that control its flow.
1741 *
1742 * PROCEED -- vm_pageout_scan will keep making forward progress.
1743 * DONE_RETURN -- page demand satisfied, work is done -> vm_pageout_scan returns.
1744 * NEXT_ITERATION -- restart the 'for' loop in vm_pageout_scan aka continue.
1745 */
1746
1747 #define VM_PAGEOUT_SCAN_PROCEED (0)
1748 #define VM_PAGEOUT_SCAN_DONE_RETURN (1)
1749 #define VM_PAGEOUT_SCAN_NEXT_ITERATION (2)
1750
1751 /*
1752 * This function is called only from vm_pageout_scan and
1753 * it moves overflow secluded pages (one-at-a-time) to the
1754 * batched 'local' free Q or active Q.
1755 */
1756 static void
1757 vps_deal_with_secluded_page_overflow(vm_page_t *local_freeq, int *local_freed)
1758 {
1759 #if CONFIG_SECLUDED_MEMORY
1760 /*
1761 * Deal with secluded_q overflow.
1762 */
1763 if (vm_page_secluded_count > vm_page_secluded_target) {
1764 vm_page_t secluded_page;
1765
1766 /*
1767 * SECLUDED_AGING_BEFORE_ACTIVE:
1768 * Excess secluded pages go to the active queue and
1769 * will later go to the inactive queue.
1770 */
1771 assert((vm_page_secluded_count_free +
1772 vm_page_secluded_count_inuse) ==
1773 vm_page_secluded_count);
1774 secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
1775 assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
1776
1777 vm_page_queues_remove(secluded_page, FALSE);
1778 assert(!secluded_page->vmp_fictitious);
1779 assert(!VM_PAGE_WIRED(secluded_page));
1780
1781 if (secluded_page->vmp_object == 0) {
1782 /* transfer to free queue */
1783 assert(secluded_page->vmp_busy);
1784 secluded_page->vmp_snext = *local_freeq;
1785 *local_freeq = secluded_page;
1786 *local_freed += 1;
1787 } else {
1788 /* transfer to head of active queue */
1789 vm_page_enqueue_active(secluded_page, FALSE);
1790 secluded_page = VM_PAGE_NULL;
1791 }
1792 }
1793 #else /* CONFIG_SECLUDED_MEMORY */
1794
1795 #pragma unused(local_freeq)
1796 #pragma unused(local_freed)
1797
1798 return;
1799
1800 #endif /* CONFIG_SECLUDED_MEMORY */
1801 }
1802
1803 /*
1804 * This function is called only from vm_pageout_scan and
1805 * it initializes the loop targets for vm_pageout_scan().
1806 */
1807 static void
1808 vps_init_page_targets(void)
1809 {
1810 /*
1811 * LD TODO: Other page targets should be calculated here too.
1812 */
1813 vm_page_anonymous_min = vm_page_inactive_target / 20;
1814
1815 if (vm_pageout_state.vm_page_speculative_percentage > 50) {
1816 vm_pageout_state.vm_page_speculative_percentage = 50;
1817 } else if (vm_pageout_state.vm_page_speculative_percentage <= 0) {
1818 vm_pageout_state.vm_page_speculative_percentage = 1;
1819 }
1820
1821 vm_pageout_state.vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
1822 vm_page_inactive_count);
1823 }
1824
1825 /*
1826 * This function is called only from vm_pageout_scan and
1827 * it purges a single VM object at-a-time and will either
1828 * make vm_pageout_scan() restart the loop or keeping moving forward.
1829 */
1830 static int
1831 vps_purge_object()
1832 {
1833 int force_purge;
1834
1835 assert(available_for_purge >= 0);
1836 force_purge = 0; /* no force-purging */
1837
1838 #if VM_PRESSURE_EVENTS
1839 vm_pressure_level_t pressure_level;
1840
1841 pressure_level = memorystatus_vm_pressure_level;
1842
1843 if (pressure_level > kVMPressureNormal) {
1844 if (pressure_level >= kVMPressureCritical) {
1845 force_purge = vm_pageout_state.memorystatus_purge_on_critical;
1846 } else if (pressure_level >= kVMPressureUrgent) {
1847 force_purge = vm_pageout_state.memorystatus_purge_on_urgent;
1848 } else if (pressure_level >= kVMPressureWarning) {
1849 force_purge = vm_pageout_state.memorystatus_purge_on_warning;
1850 }
1851 }
1852 #endif /* VM_PRESSURE_EVENTS */
1853
1854 if (available_for_purge || force_purge) {
1855 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
1856
1857 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
1858 if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) {
1859 VM_PAGEOUT_DEBUG(vm_pageout_purged_objects, 1);
1860 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
1861 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
1862
1863 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
1864 }
1865 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
1866 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
1867 }
1868
1869 return VM_PAGEOUT_SCAN_PROCEED;
1870 }
1871
1872 /*
1873 * This function is called only from vm_pageout_scan and
1874 * it will try to age the next speculative Q if the oldest
1875 * one is empty.
1876 */
1877 static int
1878 vps_age_speculative_queue(boolean_t force_speculative_aging)
1879 {
1880 #define DELAY_SPECULATIVE_AGE 1000
1881
1882 /*
1883 * try to pull pages from the aging bins...
1884 * see vm_page.h for an explanation of how
1885 * this mechanism works
1886 */
1887 boolean_t can_steal = FALSE;
1888 int num_scanned_queues;
1889 static int delay_speculative_age = 0; /* depends the # of times we go through the main pageout_scan loop.*/
1890 mach_timespec_t ts;
1891 struct vm_speculative_age_q *aq;
1892 struct vm_speculative_age_q *sq;
1893
1894 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1895
1896 aq = &vm_page_queue_speculative[speculative_steal_index];
1897
1898 num_scanned_queues = 0;
1899 while (vm_page_queue_empty(&aq->age_q) &&
1900 num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
1901 speculative_steal_index++;
1902
1903 if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
1904 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
1905 }
1906
1907 aq = &vm_page_queue_speculative[speculative_steal_index];
1908 }
1909
1910 if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
1911 /*
1912 * XXX We've scanned all the speculative
1913 * queues but still haven't found one
1914 * that is not empty, even though
1915 * vm_page_speculative_count is not 0.
1916 */
1917 if (!vm_page_queue_empty(&sq->age_q)) {
1918 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
1919 }
1920 #if DEVELOPMENT || DEBUG
1921 panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
1922 #endif
1923 /* readjust... */
1924 vm_page_speculative_count = 0;
1925 /* ... and continue */
1926 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
1927 }
1928
1929 if (vm_page_speculative_count > vm_pageout_state.vm_page_speculative_target || force_speculative_aging == TRUE) {
1930 can_steal = TRUE;
1931 } else {
1932 if (!delay_speculative_age) {
1933 mach_timespec_t ts_fully_aged;
1934
1935 ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) / 1000;
1936 ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) % 1000)
1937 * 1000 * NSEC_PER_USEC;
1938
1939 ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
1940
1941 clock_sec_t sec;
1942 clock_nsec_t nsec;
1943 clock_get_system_nanotime(&sec, &nsec);
1944 ts.tv_sec = (unsigned int) sec;
1945 ts.tv_nsec = nsec;
1946
1947 if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0) {
1948 can_steal = TRUE;
1949 } else {
1950 delay_speculative_age++;
1951 }
1952 } else {
1953 delay_speculative_age++;
1954 if (delay_speculative_age == DELAY_SPECULATIVE_AGE) {
1955 delay_speculative_age = 0;
1956 }
1957 }
1958 }
1959 if (can_steal == TRUE) {
1960 vm_page_speculate_ageit(aq);
1961 }
1962
1963 return VM_PAGEOUT_SCAN_PROCEED;
1964 }
1965
1966 /*
1967 * This function is called only from vm_pageout_scan and
1968 * it evicts a single VM object from the cache.
1969 */
1970 static int inline
1971 vps_object_cache_evict(vm_object_t *object_to_unlock)
1972 {
1973 static int cache_evict_throttle = 0;
1974 struct vm_speculative_age_q *sq;
1975
1976 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1977
1978 if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
1979 int pages_evicted;
1980
1981 if (*object_to_unlock != NULL) {
1982 vm_object_unlock(*object_to_unlock);
1983 *object_to_unlock = NULL;
1984 }
1985 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
1986
1987 pages_evicted = vm_object_cache_evict(100, 10);
1988
1989 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END, pages_evicted, 0, 0, 0, 0);
1990
1991 if (pages_evicted) {
1992 vm_pageout_vminfo.vm_pageout_pages_evicted += pages_evicted;
1993
1994 VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
1995 vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0);
1996 memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
1997
1998 /*
1999 * we just freed up to 100 pages,
2000 * so go back to the top of the main loop
2001 * and re-evaulate the memory situation
2002 */
2003 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2004 } else {
2005 cache_evict_throttle = 1000;
2006 }
2007 }
2008 if (cache_evict_throttle) {
2009 cache_evict_throttle--;
2010 }
2011
2012 return VM_PAGEOUT_SCAN_PROCEED;
2013 }
2014
2015
2016 /*
2017 * This function is called only from vm_pageout_scan and
2018 * it calculates the filecache min. that needs to be maintained
2019 * as we start to steal pages.
2020 */
2021 static void
2022 vps_calculate_filecache_min(void)
2023 {
2024 int divisor = vm_pageout_state.vm_page_filecache_min_divisor;
2025
2026 #if CONFIG_JETSAM
2027 /*
2028 * don't let the filecache_min fall below 15% of available memory
2029 * on systems with an active compressor that isn't nearing its
2030 * limits w/r to accepting new data
2031 *
2032 * on systems w/o the compressor/swapper, the filecache is always
2033 * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
2034 * since most (if not all) of the anonymous pages are in the
2035 * throttled queue (which isn't counted as available) which
2036 * effectively disables this filter
2037 */
2038 if (vm_compressor_low_on_space() || divisor == 0) {
2039 vm_pageout_state.vm_page_filecache_min = 0;
2040 } else {
2041 vm_pageout_state.vm_page_filecache_min =
2042 ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2043 }
2044 #else
2045 if (vm_compressor_out_of_space() || divisor == 0) {
2046 vm_pageout_state.vm_page_filecache_min = 0;
2047 } else {
2048 /*
2049 * don't let the filecache_min fall below the specified critical level
2050 */
2051 vm_pageout_state.vm_page_filecache_min =
2052 ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2053 }
2054 #endif
2055 if (vm_page_free_count < (vm_page_free_reserved / 4)) {
2056 vm_pageout_state.vm_page_filecache_min = 0;
2057 }
2058 }
2059
2060 /*
2061 * This function is called only from vm_pageout_scan and
2062 * it updates the flow control time to detect if VM pageoutscan
2063 * isn't making progress.
2064 */
2065 static void
2066 vps_flow_control_reset_deadlock_timer(struct flow_control *flow_control)
2067 {
2068 mach_timespec_t ts;
2069 clock_sec_t sec;
2070 clock_nsec_t nsec;
2071
2072 ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000;
2073 ts.tv_nsec = (vm_pageout_state.vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
2074 clock_get_system_nanotime(&sec, &nsec);
2075 flow_control->ts.tv_sec = (unsigned int) sec;
2076 flow_control->ts.tv_nsec = nsec;
2077 ADD_MACH_TIMESPEC(&flow_control->ts, &ts);
2078
2079 flow_control->state = FCS_DELAYED;
2080
2081 vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal++;
2082 }
2083
2084 /*
2085 * This function is called only from vm_pageout_scan and
2086 * it is the flow control logic of VM pageout scan which
2087 * controls if it should block and for how long.
2088 * Any blocking of vm_pageout_scan happens ONLY in this function.
2089 */
2090 static int
2091 vps_flow_control(struct flow_control *flow_control, int *anons_grabbed, vm_object_t *object, int *delayed_unlock,
2092 vm_page_t *local_freeq, int *local_freed, int *vm_pageout_deadlock_target, unsigned int inactive_burst_count)
2093 {
2094 boolean_t exceeded_burst_throttle = FALSE;
2095 unsigned int msecs = 0;
2096 uint32_t inactive_external_count;
2097 mach_timespec_t ts;
2098 struct vm_pageout_queue *iq;
2099 struct vm_pageout_queue *eq;
2100 struct vm_speculative_age_q *sq;
2101
2102 iq = &vm_pageout_queue_internal;
2103 eq = &vm_pageout_queue_external;
2104 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2105
2106 /*
2107 * Sometimes we have to pause:
2108 * 1) No inactive pages - nothing to do.
2109 * 2) Loop control - no acceptable pages found on the inactive queue
2110 * within the last vm_pageout_burst_inactive_throttle iterations
2111 * 3) Flow control - default pageout queue is full
2112 */
2113 if (vm_page_queue_empty(&vm_page_queue_inactive) &&
2114 vm_page_queue_empty(&vm_page_queue_anonymous) &&
2115 vm_page_queue_empty(&vm_page_queue_cleaned) &&
2116 vm_page_queue_empty(&sq->age_q)) {
2117 VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1);
2118 msecs = vm_pageout_state.vm_pageout_empty_wait;
2119 } else if (inactive_burst_count >=
2120 MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle,
2121 (vm_page_inactive_count +
2122 vm_page_speculative_count))) {
2123 VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1);
2124 msecs = vm_pageout_state.vm_pageout_burst_wait;
2125
2126 exceeded_burst_throttle = TRUE;
2127 } else if (VM_PAGE_Q_THROTTLED(iq) &&
2128 VM_DYNAMIC_PAGING_ENABLED()) {
2129 clock_sec_t sec;
2130 clock_nsec_t nsec;
2131
2132 switch (flow_control->state) {
2133 case FCS_IDLE:
2134 if ((vm_page_free_count + *local_freed) < vm_page_free_target &&
2135 vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2136 /*
2137 * since the compressor is running independently of vm_pageout_scan
2138 * let's not wait for it just yet... as long as we have a healthy supply
2139 * of filecache pages to work with, let's keep stealing those.
2140 */
2141 inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2142
2143 if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min &&
2144 (inactive_external_count >= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2145 *anons_grabbed = ANONS_GRABBED_LIMIT;
2146 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred, 1);
2147 return VM_PAGEOUT_SCAN_PROCEED;
2148 }
2149 }
2150
2151 vps_flow_control_reset_deadlock_timer(flow_control);
2152 msecs = vm_pageout_state.vm_pageout_deadlock_wait;
2153
2154 break;
2155
2156 case FCS_DELAYED:
2157 clock_get_system_nanotime(&sec, &nsec);
2158 ts.tv_sec = (unsigned int) sec;
2159 ts.tv_nsec = nsec;
2160
2161 if (CMP_MACH_TIMESPEC(&ts, &flow_control->ts) >= 0) {
2162 /*
2163 * the pageout thread for the default pager is potentially
2164 * deadlocked since the
2165 * default pager queue has been throttled for more than the
2166 * allowable time... we need to move some clean pages or dirty
2167 * pages belonging to the external pagers if they aren't throttled
2168 * vm_page_free_wanted represents the number of threads currently
2169 * blocked waiting for pages... we'll move one page for each of
2170 * these plus a fixed amount to break the logjam... once we're done
2171 * moving this number of pages, we'll re-enter the FSC_DELAYED state
2172 * with a new timeout target since we have no way of knowing
2173 * whether we've broken the deadlock except through observation
2174 * of the queue associated with the default pager... we need to
2175 * stop moving pages and allow the system to run to see what
2176 * state it settles into.
2177 */
2178
2179 *vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief +
2180 vm_page_free_wanted + vm_page_free_wanted_privileged;
2181 VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected, 1);
2182 flow_control->state = FCS_DEADLOCK_DETECTED;
2183 thread_wakeup((event_t) &vm_pageout_garbage_collect);
2184 return VM_PAGEOUT_SCAN_PROCEED;
2185 }
2186 /*
2187 * just resniff instead of trying
2188 * to compute a new delay time... we're going to be
2189 * awakened immediately upon a laundry completion,
2190 * so we won't wait any longer than necessary
2191 */
2192 msecs = vm_pageout_state.vm_pageout_idle_wait;
2193 break;
2194
2195 case FCS_DEADLOCK_DETECTED:
2196 if (*vm_pageout_deadlock_target) {
2197 return VM_PAGEOUT_SCAN_PROCEED;
2198 }
2199
2200 vps_flow_control_reset_deadlock_timer(flow_control);
2201 msecs = vm_pageout_state.vm_pageout_deadlock_wait;
2202
2203 break;
2204 }
2205 } else {
2206 /*
2207 * No need to pause...
2208 */
2209 return VM_PAGEOUT_SCAN_PROCEED;
2210 }
2211
2212 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2213
2214 vm_pageout_prepare_to_block(object, delayed_unlock, local_freeq, local_freed,
2215 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2216
2217 if (vm_page_free_count >= vm_page_free_target) {
2218 /*
2219 * we're here because
2220 * 1) someone else freed up some pages while we had
2221 * the queues unlocked above
2222 * and we've hit one of the 3 conditions that
2223 * cause us to pause the pageout scan thread
2224 *
2225 * since we already have enough free pages,
2226 * let's avoid stalling and return normally
2227 *
2228 * before we return, make sure the pageout I/O threads
2229 * are running throttled in case there are still requests
2230 * in the laundry... since we have enough free pages
2231 * we don't need the laundry to be cleaned in a timely
2232 * fashion... so let's avoid interfering with foreground
2233 * activity
2234 *
2235 * we don't want to hold vm_page_queue_free_lock when
2236 * calling vm_pageout_adjust_eq_iothrottle (since it
2237 * may cause other locks to be taken), we do the intitial
2238 * check outside of the lock. Once we take the lock,
2239 * we recheck the condition since it may have changed.
2240 * if it has, no problem, we will make the threads
2241 * non-throttled before actually blocking
2242 */
2243 vm_pageout_adjust_eq_iothrottle(eq, TRUE);
2244 }
2245 lck_mtx_lock(&vm_page_queue_free_lock);
2246
2247 if (vm_page_free_count >= vm_page_free_target &&
2248 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
2249 return VM_PAGEOUT_SCAN_DONE_RETURN;
2250 }
2251 lck_mtx_unlock(&vm_page_queue_free_lock);
2252
2253 if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
2254 /*
2255 * we're most likely about to block due to one of
2256 * the 3 conditions that cause vm_pageout_scan to
2257 * not be able to make forward progress w/r
2258 * to providing new pages to the free queue,
2259 * so unthrottle the I/O threads in case we
2260 * have laundry to be cleaned... it needs
2261 * to be completed ASAP.
2262 *
2263 * even if we don't block, we want the io threads
2264 * running unthrottled since the sum of free +
2265 * clean pages is still under our free target
2266 */
2267 vm_pageout_adjust_eq_iothrottle(eq, FALSE);
2268 }
2269 if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
2270 /*
2271 * if we get here we're below our free target and
2272 * we're stalling due to a full laundry queue or
2273 * we don't have any inactive pages other then
2274 * those in the clean queue...
2275 * however, we have pages on the clean queue that
2276 * can be moved to the free queue, so let's not
2277 * stall the pageout scan
2278 */
2279 flow_control->state = FCS_IDLE;
2280 return VM_PAGEOUT_SCAN_PROCEED;
2281 }
2282 if (flow_control->state == FCS_DELAYED && !VM_PAGE_Q_THROTTLED(iq)) {
2283 flow_control->state = FCS_IDLE;
2284 return VM_PAGEOUT_SCAN_PROCEED;
2285 }
2286
2287 VM_CHECK_MEMORYSTATUS;
2288
2289 if (flow_control->state != FCS_IDLE) {
2290 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1);
2291 }
2292
2293 iq->pgo_throttled = TRUE;
2294 assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000 * NSEC_PER_USEC);
2295
2296 counter(c_vm_pageout_scan_block++);
2297
2298 vm_page_unlock_queues();
2299
2300 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
2301
2302 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
2303 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2304 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
2305
2306 thread_block(THREAD_CONTINUE_NULL);
2307
2308 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
2309 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2310 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
2311
2312 vm_page_lock_queues();
2313
2314 iq->pgo_throttled = FALSE;
2315
2316 vps_init_page_targets();
2317
2318 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2319 }
2320
2321 /*
2322 * This function is called only from vm_pageout_scan and
2323 * it will find and return the most appropriate page to be
2324 * reclaimed.
2325 */
2326 static int
2327 vps_choose_victim_page(vm_page_t *victim_page, int *anons_grabbed, boolean_t *grab_anonymous, boolean_t force_anonymous,
2328 boolean_t *is_page_from_bg_q, unsigned int reactivated_this_call)
2329 {
2330 vm_page_t m = NULL;
2331 vm_object_t m_object = VM_OBJECT_NULL;
2332 uint32_t inactive_external_count;
2333 struct vm_speculative_age_q *sq;
2334 struct vm_pageout_queue *iq;
2335 int retval = VM_PAGEOUT_SCAN_PROCEED;
2336
2337 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2338 iq = &vm_pageout_queue_internal;
2339
2340 while (1) {
2341 *is_page_from_bg_q = FALSE;
2342
2343 m = NULL;
2344 m_object = VM_OBJECT_NULL;
2345
2346 if (VM_DYNAMIC_PAGING_ENABLED()) {
2347 assert(vm_page_throttled_count == 0);
2348 assert(vm_page_queue_empty(&vm_page_queue_throttled));
2349 }
2350
2351 /*
2352 * Try for a clean-queue inactive page.
2353 * These are pages that vm_pageout_scan tried to steal earlier, but
2354 * were dirty and had to be cleaned. Pick them up now that they are clean.
2355 */
2356 if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2357 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2358
2359 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
2360
2361 break;
2362 }
2363
2364 /*
2365 * The next most eligible pages are ones we paged in speculatively,
2366 * but which have not yet been touched and have been aged out.
2367 */
2368 if (!vm_page_queue_empty(&sq->age_q)) {
2369 m = (vm_page_t) vm_page_queue_first(&sq->age_q);
2370
2371 assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
2372
2373 if (!m->vmp_dirty || force_anonymous == FALSE) {
2374 break;
2375 } else {
2376 m = NULL;
2377 }
2378 }
2379
2380 #if CONFIG_BACKGROUND_QUEUE
2381 if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
2382 vm_object_t bg_m_object = NULL;
2383
2384 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
2385
2386 bg_m_object = VM_PAGE_OBJECT(m);
2387
2388 if (!VM_PAGE_PAGEABLE(m)) {
2389 /*
2390 * This page is on the background queue
2391 * but not on a pageable queue. This is
2392 * likely a transient state and whoever
2393 * took it out of its pageable queue
2394 * will likely put it back on a pageable
2395 * queue soon but we can't deal with it
2396 * at this point, so let's ignore this
2397 * page.
2398 */
2399 } else if (force_anonymous == FALSE || bg_m_object->internal) {
2400 if (bg_m_object->internal &&
2401 (VM_PAGE_Q_THROTTLED(iq) ||
2402 vm_compressor_out_of_space() == TRUE ||
2403 vm_page_free_count < (vm_page_free_reserved / 4))) {
2404 vm_pageout_skipped_bq_internal++;
2405 } else {
2406 *is_page_from_bg_q = TRUE;
2407
2408 if (bg_m_object->internal) {
2409 vm_pageout_vminfo.vm_pageout_considered_bq_internal++;
2410 } else {
2411 vm_pageout_vminfo.vm_pageout_considered_bq_external++;
2412 }
2413 break;
2414 }
2415 }
2416 }
2417 #endif /* CONFIG_BACKGROUND_QUEUE */
2418
2419 inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2420
2421 if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) ||
2422 (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2423 *grab_anonymous = TRUE;
2424 *anons_grabbed = 0;
2425
2426 vm_pageout_vminfo.vm_pageout_skipped_external++;
2427 goto want_anonymous;
2428 }
2429 *grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
2430
2431 #if CONFIG_JETSAM
2432 /* If the file-backed pool has accumulated
2433 * significantly more pages than the jetsam
2434 * threshold, prefer to reclaim those
2435 * inline to minimise compute overhead of reclaiming
2436 * anonymous pages.
2437 * This calculation does not account for the CPU local
2438 * external page queues, as those are expected to be
2439 * much smaller relative to the global pools.
2440 */
2441
2442 struct vm_pageout_queue *eq = &vm_pageout_queue_external;
2443
2444 if (*grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) {
2445 if (vm_page_pageable_external_count >
2446 vm_pageout_state.vm_page_filecache_min) {
2447 if ((vm_page_pageable_external_count *
2448 vm_pageout_memorystatus_fb_factor_dr) >
2449 (memorystatus_available_pages_critical *
2450 vm_pageout_memorystatus_fb_factor_nr)) {
2451 *grab_anonymous = FALSE;
2452
2453 VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1);
2454 }
2455 }
2456 if (*grab_anonymous) {
2457 VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1);
2458 }
2459 }
2460 #endif /* CONFIG_JETSAM */
2461
2462 want_anonymous:
2463 if (*grab_anonymous == FALSE || *anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
2464 if (!vm_page_queue_empty(&vm_page_queue_inactive)) {
2465 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2466
2467 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
2468 *anons_grabbed = 0;
2469
2470 if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) {
2471 if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2472 if ((++reactivated_this_call % 100)) {
2473 vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++;
2474
2475 vm_page_activate(m);
2476 VM_STAT_INCR(reactivations);
2477 #if CONFIG_BACKGROUND_QUEUE
2478 #if DEVELOPMENT || DEBUG
2479 if (*is_page_from_bg_q == TRUE) {
2480 if (m_object->internal) {
2481 vm_pageout_rejected_bq_internal++;
2482 } else {
2483 vm_pageout_rejected_bq_external++;
2484 }
2485 }
2486 #endif /* DEVELOPMENT || DEBUG */
2487 #endif /* CONFIG_BACKGROUND_QUEUE */
2488 vm_pageout_state.vm_pageout_inactive_used++;
2489
2490 m = NULL;
2491 retval = VM_PAGEOUT_SCAN_NEXT_ITERATION;
2492
2493 break;
2494 }
2495
2496 /*
2497 * steal 1% of the file backed pages even if
2498 * we are under the limit that has been set
2499 * for a healthy filecache
2500 */
2501 }
2502 }
2503 break;
2504 }
2505 }
2506 if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2507 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2508
2509 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
2510 *anons_grabbed += 1;
2511
2512 break;
2513 }
2514
2515 m = NULL;
2516 }
2517
2518 *victim_page = m;
2519
2520 return retval;
2521 }
2522
2523 /*
2524 * This function is called only from vm_pageout_scan and
2525 * it will put a page back on the active/inactive queue
2526 * if we can't reclaim it for some reason.
2527 */
2528 static void
2529 vps_requeue_page(vm_page_t m, int page_prev_q_state, __unused boolean_t page_from_bg_q)
2530 {
2531 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
2532 vm_page_enqueue_inactive(m, FALSE);
2533 } else {
2534 vm_page_activate(m);
2535 }
2536
2537 #if CONFIG_BACKGROUND_QUEUE
2538 #if DEVELOPMENT || DEBUG
2539 vm_object_t m_object = VM_PAGE_OBJECT(m);
2540
2541 if (page_from_bg_q == TRUE) {
2542 if (m_object->internal) {
2543 vm_pageout_rejected_bq_internal++;
2544 } else {
2545 vm_pageout_rejected_bq_external++;
2546 }
2547 }
2548 #endif /* DEVELOPMENT || DEBUG */
2549 #endif /* CONFIG_BACKGROUND_QUEUE */
2550 }
2551
2552 /*
2553 * This function is called only from vm_pageout_scan and
2554 * it will try to grab the victim page's VM object (m_object)
2555 * which differs from the previous victim page's object (object).
2556 */
2557 static int
2558 vps_switch_object(vm_page_t m, vm_object_t m_object, vm_object_t *object, int page_prev_q_state, boolean_t avoid_anon_pages, boolean_t page_from_bg_q)
2559 {
2560 struct vm_speculative_age_q *sq;
2561
2562 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2563
2564 /*
2565 * the object associated with candidate page is
2566 * different from the one we were just working
2567 * with... dump the lock if we still own it
2568 */
2569 if (*object != NULL) {
2570 vm_object_unlock(*object);
2571 *object = NULL;
2572 }
2573 /*
2574 * Try to lock object; since we've alread got the
2575 * page queues lock, we can only 'try' for this one.
2576 * if the 'try' fails, we need to do a mutex_pause
2577 * to allow the owner of the object lock a chance to
2578 * run... otherwise, we're likely to trip over this
2579 * object in the same state as we work our way through
2580 * the queue... clumps of pages associated with the same
2581 * object are fairly typical on the inactive and active queues
2582 */
2583 if (!vm_object_lock_try_scan(m_object)) {
2584 vm_page_t m_want = NULL;
2585
2586 vm_pageout_vminfo.vm_pageout_inactive_nolock++;
2587
2588 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2589 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1);
2590 }
2591
2592 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
2593
2594 m->vmp_reference = FALSE;
2595
2596 if (!m_object->object_is_shared_cache) {
2597 /*
2598 * don't apply this optimization if this is the shared cache
2599 * object, it's too easy to get rid of very hot and important
2600 * pages...
2601 * m->vmp_object must be stable since we hold the page queues lock...
2602 * we can update the scan_collisions field sans the object lock
2603 * since it is a separate field and this is the only spot that does
2604 * a read-modify-write operation and it is never executed concurrently...
2605 * we can asynchronously set this field to 0 when creating a UPL, so it
2606 * is possible for the value to be a bit non-determistic, but that's ok
2607 * since it's only used as a hint
2608 */
2609 m_object->scan_collisions = 1;
2610 }
2611 if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2612 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2613 } else if (!vm_page_queue_empty(&sq->age_q)) {
2614 m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
2615 } else if ((avoid_anon_pages || vm_page_queue_empty(&vm_page_queue_anonymous)) &&
2616 !vm_page_queue_empty(&vm_page_queue_inactive)) {
2617 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2618 } else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2619 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2620 }
2621
2622 /*
2623 * this is the next object we're going to be interested in
2624 * try to make sure its available after the mutex_pause
2625 * returns control
2626 */
2627 if (m_want) {
2628 vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
2629 }
2630
2631 vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
2632
2633 return VM_PAGEOUT_SCAN_NEXT_ITERATION;
2634 } else {
2635 *object = m_object;
2636 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2637 }
2638
2639 return VM_PAGEOUT_SCAN_PROCEED;
2640 }
2641
2642 /*
2643 * This function is called only from vm_pageout_scan and
2644 * it notices that pageout scan may be rendered ineffective
2645 * due to a FS deadlock and will jetsam a process if possible.
2646 * If jetsam isn't supported, it'll move the page to the active
2647 * queue to try and get some different pages pushed onwards so
2648 * we can try to get out of this scenario.
2649 */
2650 static void
2651 vps_deal_with_throttled_queues(vm_page_t m, vm_object_t *object, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit,
2652 int *delayed_unlock, boolean_t *force_anonymous, __unused boolean_t is_page_from_bg_q)
2653 {
2654 struct vm_pageout_queue *eq;
2655 vm_object_t cur_object = VM_OBJECT_NULL;
2656
2657 cur_object = *object;
2658
2659 eq = &vm_pageout_queue_external;
2660
2661 if (cur_object->internal == FALSE) {
2662 /*
2663 * we need to break up the following potential deadlock case...
2664 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2665 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2666 * c) Most of the pages in the inactive queue belong to this file.
2667 *
2668 * we are potentially in this deadlock because...
2669 * a) the external pageout queue is throttled
2670 * b) we're done with the active queue and moved on to the inactive queue
2671 * c) we've got a dirty external page
2672 *
2673 * since we don't know the reason for the external pageout queue being throttled we
2674 * must suspect that we are deadlocked, so move the current page onto the active queue
2675 * in an effort to cause a page from the active queue to 'age' to the inactive queue
2676 *
2677 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
2678 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
2679 * pool the next time we select a victim page... if we can make enough new free pages,
2680 * the deadlock will break, the external pageout queue will empty and it will no longer
2681 * be throttled
2682 *
2683 * if we have jetsam configured, keep a count of the pages reactivated this way so
2684 * that we can try to find clean pages in the active/inactive queues before
2685 * deciding to jetsam a process
2686 */
2687 vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external++;
2688
2689 vm_page_check_pageable_safe(m);
2690 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
2691 vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq);
2692 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
2693 vm_page_active_count++;
2694 vm_page_pageable_external_count++;
2695
2696 vm_pageout_adjust_eq_iothrottle(eq, FALSE);
2697
2698 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
2699
2700 #pragma unused(force_anonymous)
2701
2702 *vm_pageout_inactive_external_forced_reactivate_limit -= 1;
2703
2704 if (*vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
2705 *vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
2706 /*
2707 * Possible deadlock scenario so request jetsam action
2708 */
2709
2710 assert(cur_object);
2711 vm_object_unlock(cur_object);
2712
2713 cur_object = VM_OBJECT_NULL;
2714
2715 /*
2716 * VM pageout scan needs to know we have dropped this lock and so set the
2717 * object variable we got passed in to NULL.
2718 */
2719 *object = VM_OBJECT_NULL;
2720
2721 vm_page_unlock_queues();
2722
2723 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
2724 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
2725
2726 /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
2727 if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) {
2728 VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1);
2729 }
2730
2731 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END,
2732 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
2733
2734 vm_page_lock_queues();
2735 *delayed_unlock = 1;
2736 }
2737 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2738
2739 #pragma unused(vm_pageout_inactive_external_forced_reactivate_limit)
2740 #pragma unused(delayed_unlock)
2741
2742 *force_anonymous = TRUE;
2743 #endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2744 } else {
2745 vm_page_activate(m);
2746 VM_STAT_INCR(reactivations);
2747
2748 #if CONFIG_BACKGROUND_QUEUE
2749 #if DEVELOPMENT || DEBUG
2750 if (is_page_from_bg_q == TRUE) {
2751 if (cur_object->internal) {
2752 vm_pageout_rejected_bq_internal++;
2753 } else {
2754 vm_pageout_rejected_bq_external++;
2755 }
2756 }
2757 #endif /* DEVELOPMENT || DEBUG */
2758 #endif /* CONFIG_BACKGROUND_QUEUE */
2759
2760 vm_pageout_state.vm_pageout_inactive_used++;
2761 }
2762 }
2763
2764
2765 void
2766 vm_page_balance_inactive(int max_to_move)
2767 {
2768 vm_page_t m;
2769
2770 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2771
2772 if (hibernation_vmqueues_inspection == TRUE) {
2773 /*
2774 * It is likely that the hibernation code path is
2775 * dealing with these very queues as we are about
2776 * to move pages around in/from them and completely
2777 * change the linkage of the pages.
2778 *
2779 * And so we skip the rebalancing of these queues.
2780 */
2781 return;
2782 }
2783 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
2784 vm_page_inactive_count +
2785 vm_page_speculative_count);
2786
2787 while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) {
2788 VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1);
2789
2790 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
2791
2792 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
2793 assert(!m->vmp_laundry);
2794 assert(VM_PAGE_OBJECT(m) != kernel_object);
2795 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
2796
2797 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
2798
2799 /*
2800 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
2801 *
2802 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
2803 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
2804 * new reference happens. If no futher references happen on the page after that remote TLB flushes
2805 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
2806 * by pageout_scan, which is just fine since the last reference would have happened quite far
2807 * in the past (TLB caches don't hang around for very long), and of course could just as easily
2808 * have happened before we moved the page
2809 */
2810 if (m->vmp_pmapped == TRUE) {
2811 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
2812 }
2813
2814 /*
2815 * The page might be absent or busy,
2816 * but vm_page_deactivate can handle that.
2817 * FALSE indicates that we don't want a H/W clear reference
2818 */
2819 vm_page_deactivate_internal(m, FALSE);
2820 }
2821 }
2822
2823
2824 /*
2825 * vm_pageout_scan does the dirty work for the pageout daemon.
2826 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
2827 * held and vm_page_free_wanted == 0.
2828 */
2829 void
2830 vm_pageout_scan(void)
2831 {
2832 unsigned int loop_count = 0;
2833 unsigned int inactive_burst_count = 0;
2834 unsigned int reactivated_this_call;
2835 unsigned int reactivate_limit;
2836 vm_page_t local_freeq = NULL;
2837 int local_freed = 0;
2838 int delayed_unlock;
2839 int delayed_unlock_limit = 0;
2840 int refmod_state = 0;
2841 int vm_pageout_deadlock_target = 0;
2842 struct vm_pageout_queue *iq;
2843 struct vm_pageout_queue *eq;
2844 struct vm_speculative_age_q *sq;
2845 struct flow_control flow_control = { .state = 0, .ts = { .tv_sec = 0, .tv_nsec = 0 } };
2846 boolean_t inactive_throttled = FALSE;
2847 vm_object_t object = NULL;
2848 uint32_t inactive_reclaim_run;
2849 boolean_t grab_anonymous = FALSE;
2850 boolean_t force_anonymous = FALSE;
2851 boolean_t force_speculative_aging = FALSE;
2852 int anons_grabbed = 0;
2853 int page_prev_q_state = 0;
2854 boolean_t page_from_bg_q = FALSE;
2855 uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
2856 vm_object_t m_object = VM_OBJECT_NULL;
2857 int retval = 0;
2858 boolean_t lock_yield_check = FALSE;
2859
2860
2861 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
2862 vm_pageout_vminfo.vm_pageout_freed_speculative,
2863 vm_pageout_state.vm_pageout_inactive_clean,
2864 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
2865 vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
2866
2867 flow_control.state = FCS_IDLE;
2868 iq = &vm_pageout_queue_internal;
2869 eq = &vm_pageout_queue_external;
2870 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
2871
2872 /* Ask the pmap layer to return any pages it no longer needs. */
2873 uint64_t pmap_wired_pages_freed = pmap_release_pages_fast();
2874
2875 vm_page_lock_queues();
2876
2877 vm_page_wire_count -= pmap_wired_pages_freed;
2878
2879 delayed_unlock = 1;
2880
2881 /*
2882 * Calculate the max number of referenced pages on the inactive
2883 * queue that we will reactivate.
2884 */
2885 reactivated_this_call = 0;
2886 reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
2887 vm_page_inactive_count);
2888 inactive_reclaim_run = 0;
2889
2890 vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
2891
2892 /*
2893 * We must limit the rate at which we send pages to the pagers
2894 * so that we don't tie up too many pages in the I/O queues.
2895 * We implement a throttling mechanism using the laundry count
2896 * to limit the number of pages outstanding to the default
2897 * and external pagers. We can bypass the throttles and look
2898 * for clean pages if the pageout queues don't drain in a timely
2899 * fashion since this may indicate that the pageout paths are
2900 * stalled waiting for memory, which only we can provide.
2901 */
2902
2903 vps_init_page_targets();
2904 assert(object == NULL);
2905 assert(delayed_unlock != 0);
2906
2907 for (;;) {
2908 vm_page_t m;
2909
2910 DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
2911
2912 if (lock_yield_check) {
2913 lock_yield_check = FALSE;
2914
2915 if (delayed_unlock++ > delayed_unlock_limit) {
2916 int freed = local_freed;
2917
2918 vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
2919 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2920 if (freed == 0) {
2921 lck_mtx_yield(&vm_page_queue_lock);
2922 }
2923 } else if (vm_pageout_scan_wants_object) {
2924 vm_page_unlock_queues();
2925 mutex_pause(0);
2926 vm_page_lock_queues();
2927 }
2928 }
2929
2930 if (vm_upl_wait_for_pages < 0) {
2931 vm_upl_wait_for_pages = 0;
2932 }
2933
2934 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
2935
2936 if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) {
2937 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
2938 }
2939
2940 vps_deal_with_secluded_page_overflow(&local_freeq, &local_freed);
2941
2942 assert(delayed_unlock);
2943
2944 /*
2945 * maintain our balance
2946 */
2947 vm_page_balance_inactive(1);
2948
2949
2950 /**********************************************************************
2951 * above this point we're playing with the active and secluded queues
2952 * below this point we're playing with the throttling mechanisms
2953 * and the inactive queue
2954 **********************************************************************/
2955
2956 if (vm_page_free_count + local_freed >= vm_page_free_target) {
2957 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2958
2959 vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
2960 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2961 /*
2962 * make sure the pageout I/O threads are running
2963 * throttled in case there are still requests
2964 * in the laundry... since we have met our targets
2965 * we don't need the laundry to be cleaned in a timely
2966 * fashion... so let's avoid interfering with foreground
2967 * activity
2968 */
2969 vm_pageout_adjust_eq_iothrottle(eq, TRUE);
2970
2971 lck_mtx_lock(&vm_page_queue_free_lock);
2972
2973 if ((vm_page_free_count >= vm_page_free_target) &&
2974 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
2975 /*
2976 * done - we have met our target *and*
2977 * there is no one waiting for a page.
2978 */
2979 return_from_scan:
2980 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
2981
2982 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
2983 vm_pageout_state.vm_pageout_inactive,
2984 vm_pageout_state.vm_pageout_inactive_used, 0, 0);
2985 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
2986 vm_pageout_vminfo.vm_pageout_freed_speculative,
2987 vm_pageout_state.vm_pageout_inactive_clean,
2988 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
2989 vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
2990
2991 return;
2992 }
2993 lck_mtx_unlock(&vm_page_queue_free_lock);
2994 }
2995
2996 /*
2997 * Before anything, we check if we have any ripe volatile
2998 * objects around. If so, try to purge the first object.
2999 * If the purge fails, fall through to reclaim a page instead.
3000 * If the purge succeeds, go back to the top and reevalute
3001 * the new memory situation.
3002 */
3003 retval = vps_purge_object();
3004
3005 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3006 /*
3007 * Success
3008 */
3009 if (object != NULL) {
3010 vm_object_unlock(object);
3011 object = NULL;
3012 }
3013
3014 lock_yield_check = FALSE;
3015 continue;
3016 }
3017
3018 /*
3019 * If our 'aged' queue is empty and we have some speculative pages
3020 * in the other queues, let's go through and see if we need to age
3021 * them.
3022 *
3023 * If we succeeded in aging a speculative Q or just that everything
3024 * looks normal w.r.t queue age and queue counts, we keep going onward.
3025 *
3026 * If, for some reason, we seem to have a mismatch between the spec.
3027 * page count and the page queues, we reset those variables and
3028 * restart the loop (LD TODO: Track this better?).
3029 */
3030 if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
3031 retval = vps_age_speculative_queue(force_speculative_aging);
3032
3033 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3034 lock_yield_check = FALSE;
3035 continue;
3036 }
3037 }
3038 force_speculative_aging = FALSE;
3039
3040 /*
3041 * Check to see if we need to evict objects from the cache.
3042 *
3043 * Note: 'object' here doesn't have anything to do with
3044 * the eviction part. We just need to make sure we have dropped
3045 * any object lock we might be holding if we need to go down
3046 * into the eviction logic.
3047 */
3048 retval = vps_object_cache_evict(&object);
3049
3050 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3051 lock_yield_check = FALSE;
3052 continue;
3053 }
3054
3055
3056 /*
3057 * Calculate our filecache_min that will affect the loop
3058 * going forward.
3059 */
3060 vps_calculate_filecache_min();
3061
3062 /*
3063 * LD TODO: Use a structure to hold all state variables for a single
3064 * vm_pageout_scan iteration and pass that structure to this function instead.
3065 */
3066 retval = vps_flow_control(&flow_control, &anons_grabbed, &object,
3067 &delayed_unlock, &local_freeq, &local_freed,
3068 &vm_pageout_deadlock_target, inactive_burst_count);
3069
3070 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3071 if (loop_count >= vm_page_inactive_count) {
3072 loop_count = 0;
3073 }
3074
3075 inactive_burst_count = 0;
3076
3077 assert(object == NULL);
3078 assert(delayed_unlock != 0);
3079
3080 lock_yield_check = FALSE;
3081 continue;
3082 } else if (retval == VM_PAGEOUT_SCAN_DONE_RETURN) {
3083 goto return_from_scan;
3084 }
3085
3086 flow_control.state = FCS_IDLE;
3087
3088 vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
3089 vm_pageout_inactive_external_forced_reactivate_limit);
3090 loop_count++;
3091 inactive_burst_count++;
3092 vm_pageout_state.vm_pageout_inactive++;
3093
3094 /*
3095 * Choose a victim.
3096 */
3097
3098 m = NULL;
3099 retval = vps_choose_victim_page(&m, &anons_grabbed, &grab_anonymous, force_anonymous, &page_from_bg_q, reactivated_this_call);
3100
3101 if (m == NULL) {
3102 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3103 reactivated_this_call++;
3104
3105 inactive_burst_count = 0;
3106
3107 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3108 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3109 }
3110
3111 lock_yield_check = TRUE;
3112 continue;
3113 }
3114
3115 /*
3116 * if we've gotten here, we have no victim page.
3117 * check to see if we've not finished balancing the queues
3118 * or we have a page on the aged speculative queue that we
3119 * skipped due to force_anonymous == TRUE.. or we have
3120 * speculative pages that we can prematurely age... if
3121 * one of these cases we'll keep going, else panic
3122 */
3123 force_anonymous = FALSE;
3124 VM_PAGEOUT_DEBUG(vm_pageout_no_victim, 1);
3125
3126 if (!vm_page_queue_empty(&sq->age_q)) {
3127 lock_yield_check = TRUE;
3128 continue;
3129 }
3130
3131 if (vm_page_speculative_count) {
3132 force_speculative_aging = TRUE;
3133 lock_yield_check = TRUE;
3134 continue;
3135 }
3136 panic("vm_pageout: no victim");
3137
3138 /* NOTREACHED */
3139 }
3140
3141 assert(VM_PAGE_PAGEABLE(m));
3142 m_object = VM_PAGE_OBJECT(m);
3143 force_anonymous = FALSE;
3144
3145 page_prev_q_state = m->vmp_q_state;
3146 /*
3147 * we just found this page on one of our queues...
3148 * it can't also be on the pageout queue, so safe
3149 * to call vm_page_queues_remove
3150 */
3151 vm_page_queues_remove(m, TRUE);
3152
3153 assert(!m->vmp_laundry);
3154 assert(!m->vmp_private);
3155 assert(!m->vmp_fictitious);
3156 assert(m_object != kernel_object);
3157 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
3158
3159 vm_pageout_vminfo.vm_pageout_considered_page++;
3160
3161 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
3162
3163 /*
3164 * check to see if we currently are working
3165 * with the same object... if so, we've
3166 * already got the lock
3167 */
3168 if (m_object != object) {
3169 boolean_t avoid_anon_pages = (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT);
3170
3171 /*
3172 * vps_switch_object() will always drop the 'object' lock first
3173 * and then try to acquire the 'm_object' lock. So 'object' has to point to
3174 * either 'm_object' or NULL.
3175 */
3176 retval = vps_switch_object(m, m_object, &object, page_prev_q_state, avoid_anon_pages, page_from_bg_q);
3177
3178 if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
3179 lock_yield_check = TRUE;
3180 continue;
3181 }
3182 }
3183 assert(m_object == object);
3184 assert(VM_PAGE_OBJECT(m) == m_object);
3185
3186 if (m->vmp_busy) {
3187 /*
3188 * Somebody is already playing with this page.
3189 * Put it back on the appropriate queue
3190 *
3191 */
3192 VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1);
3193
3194 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3195 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1);
3196 }
3197
3198 vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
3199
3200 lock_yield_check = TRUE;
3201 continue;
3202 }
3203
3204 /*
3205 * if (m->vmp_cleaning && !m->vmp_free_when_done)
3206 * If already cleaning this page in place
3207 * just leave if off the paging queues.
3208 * We can leave the page mapped, and upl_commit_range
3209 * will put it on the clean queue.
3210 *
3211 * if (m->vmp_free_when_done && !m->vmp_cleaning)
3212 * an msync INVALIDATE is in progress...
3213 * this page has been marked for destruction
3214 * after it has been cleaned,
3215 * but not yet gathered into a UPL
3216 * where 'cleaning' will be set...
3217 * just leave it off the paging queues
3218 *
3219 * if (m->vmp_free_when_done && m->vmp_clenaing)
3220 * an msync INVALIDATE is in progress
3221 * and the UPL has already gathered this page...
3222 * just leave it off the paging queues
3223 */
3224 if (m->vmp_free_when_done || m->vmp_cleaning) {
3225 lock_yield_check = TRUE;
3226 continue;
3227 }
3228
3229
3230 /*
3231 * If it's absent, in error or the object is no longer alive,
3232 * we can reclaim the page... in the no longer alive case,
3233 * there are 2 states the page can be in that preclude us
3234 * from reclaiming it - busy or cleaning - that we've already
3235 * dealt with
3236 */
3237 if (m->vmp_absent || m->vmp_error || !object->alive) {
3238 if (m->vmp_absent) {
3239 VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1);
3240 } else if (!object->alive) {
3241 VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1);
3242 } else {
3243 VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1);
3244 }
3245 reclaim_page:
3246 if (vm_pageout_deadlock_target) {
3247 VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1);
3248 vm_pageout_deadlock_target--;
3249 }
3250
3251 DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
3252
3253 if (object->internal) {
3254 DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
3255 } else {
3256 DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
3257 }
3258 assert(!m->vmp_cleaning);
3259 assert(!m->vmp_laundry);
3260
3261 if (!object->internal &&
3262 object->pager != NULL &&
3263 object->pager->mo_pager_ops == &shared_region_pager_ops) {
3264 shared_region_pager_reclaimed++;
3265 }
3266
3267 m->vmp_busy = TRUE;
3268
3269 /*
3270 * remove page from object here since we're already
3271 * behind the object lock... defer the rest of the work
3272 * we'd normally do in vm_page_free_prepare_object
3273 * until 'vm_page_free_list' is called
3274 */
3275 if (m->vmp_tabled) {
3276 vm_page_remove(m, TRUE);
3277 }
3278
3279 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
3280 m->vmp_snext = local_freeq;
3281 local_freeq = m;
3282 local_freed++;
3283
3284 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
3285 vm_pageout_vminfo.vm_pageout_freed_speculative++;
3286 } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3287 vm_pageout_vminfo.vm_pageout_freed_cleaned++;
3288 } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) {
3289 vm_pageout_vminfo.vm_pageout_freed_internal++;
3290 } else {
3291 vm_pageout_vminfo.vm_pageout_freed_external++;
3292 }
3293
3294 inactive_burst_count = 0;
3295
3296 lock_yield_check = TRUE;
3297 continue;
3298 }
3299 if (object->copy == VM_OBJECT_NULL) {
3300 /*
3301 * No one else can have any interest in this page.
3302 * If this is an empty purgable object, the page can be
3303 * reclaimed even if dirty.
3304 * If the page belongs to a volatile purgable object, we
3305 * reactivate it if the compressor isn't active.
3306 */
3307 if (object->purgable == VM_PURGABLE_EMPTY) {
3308 if (m->vmp_pmapped == TRUE) {
3309 /* unmap the page */
3310 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3311 if (refmod_state & VM_MEM_MODIFIED) {
3312 SET_PAGE_DIRTY(m, FALSE);
3313 }
3314 }
3315 if (m->vmp_dirty || m->vmp_precious) {
3316 /* we saved the cost of cleaning this page ! */
3317 vm_page_purged_count++;
3318 }
3319 goto reclaim_page;
3320 }
3321
3322 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
3323 /*
3324 * With the VM compressor, the cost of
3325 * reclaiming a page is much lower (no I/O),
3326 * so if we find a "volatile" page, it's better
3327 * to let it get compressed rather than letting
3328 * it occupy a full page until it gets purged.
3329 * So no need to check for "volatile" here.
3330 */
3331 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
3332 /*
3333 * Avoid cleaning a "volatile" page which might
3334 * be purged soon.
3335 */
3336
3337 /* if it's wired, we can't put it on our queue */
3338 assert(!VM_PAGE_WIRED(m));
3339
3340 /* just stick it back on! */
3341 reactivated_this_call++;
3342
3343 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3344 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1);
3345 }
3346
3347 goto reactivate_page;
3348 }
3349 }
3350 /*
3351 * If it's being used, reactivate.
3352 * (Fictitious pages are either busy or absent.)
3353 * First, update the reference and dirty bits
3354 * to make sure the page is unreferenced.
3355 */
3356 refmod_state = -1;
3357
3358 if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
3359 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
3360
3361 if (refmod_state & VM_MEM_REFERENCED) {
3362 m->vmp_reference = TRUE;
3363 }
3364 if (refmod_state & VM_MEM_MODIFIED) {
3365 SET_PAGE_DIRTY(m, FALSE);
3366 }
3367 }
3368
3369 if (m->vmp_reference || m->vmp_dirty) {
3370 /* deal with a rogue "reusable" page */
3371 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
3372 }
3373
3374 if (vm_pageout_state.vm_page_xpmapped_min_divisor == 0) {
3375 vm_pageout_state.vm_page_xpmapped_min = 0;
3376 } else {
3377 vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / vm_pageout_state.vm_page_xpmapped_min_divisor;
3378 }
3379
3380 if (!m->vmp_no_cache &&
3381 page_from_bg_q == FALSE &&
3382 (m->vmp_reference || (m->vmp_xpmapped && !object->internal &&
3383 (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) {
3384 /*
3385 * The page we pulled off the inactive list has
3386 * been referenced. It is possible for other
3387 * processors to be touching pages faster than we
3388 * can clear the referenced bit and traverse the
3389 * inactive queue, so we limit the number of
3390 * reactivations.
3391 */
3392 if (++reactivated_this_call >= reactivate_limit) {
3393 vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded++;
3394 } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
3395 vm_pageout_vminfo.vm_pageout_inactive_force_reclaim++;
3396 } else {
3397 uint32_t isinuse;
3398
3399 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3400 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1);
3401 }
3402
3403 vm_pageout_vminfo.vm_pageout_inactive_referenced++;
3404 reactivate_page:
3405 if (!object->internal && object->pager != MEMORY_OBJECT_NULL &&
3406 vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
3407 /*
3408 * no explict mappings of this object exist
3409 * and it's not open via the filesystem
3410 */
3411 vm_page_deactivate(m);
3412 VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated, 1);
3413 } else {
3414 /*
3415 * The page was/is being used, so put back on active list.
3416 */
3417 vm_page_activate(m);
3418 VM_STAT_INCR(reactivations);
3419 inactive_burst_count = 0;
3420 }
3421 #if CONFIG_BACKGROUND_QUEUE
3422 #if DEVELOPMENT || DEBUG
3423 if (page_from_bg_q == TRUE) {
3424 if (m_object->internal) {
3425 vm_pageout_rejected_bq_internal++;
3426 } else {
3427 vm_pageout_rejected_bq_external++;
3428 }
3429 }
3430 #endif /* DEVELOPMENT || DEBUG */
3431 #endif /* CONFIG_BACKGROUND_QUEUE */
3432
3433 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3434 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3435 }
3436 vm_pageout_state.vm_pageout_inactive_used++;
3437
3438 lock_yield_check = TRUE;
3439 continue;
3440 }
3441 /*
3442 * Make sure we call pmap_get_refmod() if it
3443 * wasn't already called just above, to update
3444 * the dirty bit.
3445 */
3446 if ((refmod_state == -1) && !m->vmp_dirty && m->vmp_pmapped) {
3447 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
3448 if (refmod_state & VM_MEM_MODIFIED) {
3449 SET_PAGE_DIRTY(m, FALSE);
3450 }
3451 }
3452 }
3453
3454 /*
3455 * we've got a candidate page to steal...
3456 *
3457 * m->vmp_dirty is up to date courtesy of the
3458 * preceding check for m->vmp_reference... if
3459 * we get here, then m->vmp_reference had to be
3460 * FALSE (or possibly "reactivate_limit" was
3461 * exceeded), but in either case we called
3462 * pmap_get_refmod() and updated both
3463 * m->vmp_reference and m->vmp_dirty
3464 *
3465 * if it's dirty or precious we need to
3466 * see if the target queue is throtttled
3467 * it if is, we need to skip over it by moving it back
3468 * to the end of the inactive queue
3469 */
3470
3471 inactive_throttled = FALSE;
3472
3473 if (m->vmp_dirty || m->vmp_precious) {
3474 if (object->internal) {
3475 if (VM_PAGE_Q_THROTTLED(iq)) {
3476 inactive_throttled = TRUE;
3477 }
3478 } else if (VM_PAGE_Q_THROTTLED(eq)) {
3479 inactive_throttled = TRUE;
3480 }
3481 }
3482 throttle_inactive:
3483 if (!VM_DYNAMIC_PAGING_ENABLED() &&
3484 object->internal && m->vmp_dirty &&
3485 (object->purgable == VM_PURGABLE_DENY ||
3486 object->purgable == VM_PURGABLE_NONVOLATILE ||
3487 object->purgable == VM_PURGABLE_VOLATILE)) {
3488 vm_page_check_pageable_safe(m);
3489 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3490 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
3491 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
3492 vm_page_throttled_count++;
3493
3494 VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled, 1);
3495
3496 inactive_burst_count = 0;
3497
3498 lock_yield_check = TRUE;
3499 continue;
3500 }
3501 if (inactive_throttled == TRUE) {
3502 vps_deal_with_throttled_queues(m, &object, &vm_pageout_inactive_external_forced_reactivate_limit,
3503 &delayed_unlock, &force_anonymous, page_from_bg_q);
3504
3505 inactive_burst_count = 0;
3506
3507 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3508 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3509 }
3510
3511 lock_yield_check = TRUE;
3512 continue;
3513 }
3514
3515 /*
3516 * we've got a page that we can steal...
3517 * eliminate all mappings and make sure
3518 * we have the up-to-date modified state
3519 *
3520 * if we need to do a pmap_disconnect then we
3521 * need to re-evaluate m->vmp_dirty since the pmap_disconnect
3522 * provides the true state atomically... the
3523 * page was still mapped up to the pmap_disconnect
3524 * and may have been dirtied at the last microsecond
3525 *
3526 * Note that if 'pmapped' is FALSE then the page is not
3527 * and has not been in any map, so there is no point calling
3528 * pmap_disconnect(). m->vmp_dirty could have been set in anticipation
3529 * of likely usage of the page.
3530 */
3531 if (m->vmp_pmapped == TRUE) {
3532 int pmap_options;
3533
3534 /*
3535 * Don't count this page as going into the compressor
3536 * if any of these are true:
3537 * 1) compressed pager isn't enabled
3538 * 2) Freezer enabled device with compressed pager
3539 * backend (exclusive use) i.e. most of the VM system
3540 * (including vm_pageout_scan) has no knowledge of
3541 * the compressor
3542 * 3) This page belongs to a file and hence will not be
3543 * sent into the compressor
3544 */
3545 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
3546 object->internal == FALSE) {
3547 pmap_options = 0;
3548 } else if (m->vmp_dirty || m->vmp_precious) {
3549 /*
3550 * VM knows that this page is dirty (or
3551 * precious) and needs to be compressed
3552 * rather than freed.
3553 * Tell the pmap layer to count this page
3554 * as "compressed".
3555 */
3556 pmap_options = PMAP_OPTIONS_COMPRESSOR;
3557 } else {
3558 /*
3559 * VM does not know if the page needs to
3560 * be preserved but the pmap layer might tell
3561 * us if any mapping has "modified" it.
3562 * Let's the pmap layer to count this page
3563 * as compressed if and only if it has been
3564 * modified.
3565 */
3566 pmap_options =
3567 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
3568 }
3569 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
3570 pmap_options,
3571 NULL);
3572 if (refmod_state & VM_MEM_MODIFIED) {
3573 SET_PAGE_DIRTY(m, FALSE);
3574 }
3575 }
3576
3577 /*
3578 * reset our count of pages that have been reclaimed
3579 * since the last page was 'stolen'
3580 */
3581 inactive_reclaim_run = 0;
3582
3583 /*
3584 * If it's clean and not precious, we can free the page.
3585 */
3586 if (!m->vmp_dirty && !m->vmp_precious) {
3587 vm_pageout_state.vm_pageout_inactive_clean++;
3588
3589 /*
3590 * OK, at this point we have found a page we are going to free.
3591 */
3592 #if CONFIG_PHANTOM_CACHE
3593 if (!object->internal) {
3594 vm_phantom_cache_add_ghost(m);
3595 }
3596 #endif
3597 goto reclaim_page;
3598 }
3599
3600 /*
3601 * The page may have been dirtied since the last check
3602 * for a throttled target queue (which may have been skipped
3603 * if the page was clean then). With the dirty page
3604 * disconnected here, we can make one final check.
3605 */
3606 if (object->internal) {
3607 if (VM_PAGE_Q_THROTTLED(iq)) {
3608 inactive_throttled = TRUE;
3609 }
3610 } else if (VM_PAGE_Q_THROTTLED(eq)) {
3611 inactive_throttled = TRUE;
3612 }
3613
3614 if (inactive_throttled == TRUE) {
3615 goto throttle_inactive;
3616 }
3617
3618 #if VM_PRESSURE_EVENTS
3619 #if CONFIG_JETSAM
3620
3621 /*
3622 * If Jetsam is enabled, then the sending
3623 * of memory pressure notifications is handled
3624 * from the same thread that takes care of high-water
3625 * and other jetsams i.e. the memorystatus_thread.
3626 */
3627
3628 #else /* CONFIG_JETSAM */
3629
3630 vm_pressure_response();
3631
3632 #endif /* CONFIG_JETSAM */
3633 #endif /* VM_PRESSURE_EVENTS */
3634
3635 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
3636 VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1);
3637 }
3638
3639 if (object->internal) {
3640 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal++;
3641 } else {
3642 vm_pageout_vminfo.vm_pageout_inactive_dirty_external++;
3643 }
3644
3645 /*
3646 * internal pages will go to the compressor...
3647 * external pages will go to the appropriate pager to be cleaned
3648 * and upon completion will end up on 'vm_page_queue_cleaned' which
3649 * is a preferred queue to steal from
3650 */
3651 vm_pageout_cluster(m);
3652 inactive_burst_count = 0;
3653
3654 /*
3655 * back to top of pageout scan loop
3656 */
3657 }
3658 }
3659
3660
3661 void
3662 vm_page_free_reserve(
3663 int pages)
3664 {
3665 int free_after_reserve;
3666
3667 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
3668 if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) {
3669 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
3670 } else {
3671 vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
3672 }
3673 } else {
3674 if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) {
3675 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
3676 } else {
3677 vm_page_free_reserved += pages;
3678 }
3679 }
3680 free_after_reserve = vm_pageout_state.vm_page_free_count_init - vm_page_free_reserved;
3681
3682 vm_page_free_min = vm_page_free_reserved +
3683 VM_PAGE_FREE_MIN(free_after_reserve);
3684
3685 if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) {
3686 vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
3687 }
3688
3689 vm_page_free_target = vm_page_free_reserved +
3690 VM_PAGE_FREE_TARGET(free_after_reserve);
3691
3692 if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) {
3693 vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
3694 }
3695
3696 if (vm_page_free_target < vm_page_free_min + 5) {
3697 vm_page_free_target = vm_page_free_min + 5;
3698 }
3699
3700 vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
3701 }
3702
3703 /*
3704 * vm_pageout is the high level pageout daemon.
3705 */
3706
3707 void
3708 vm_pageout_continue(void)
3709 {
3710 DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
3711 VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter, 1);
3712
3713 lck_mtx_lock(&vm_page_queue_free_lock);
3714 vm_pageout_running = TRUE;
3715 lck_mtx_unlock(&vm_page_queue_free_lock);
3716
3717 vm_pageout_scan();
3718 /*
3719 * we hold both the vm_page_queue_free_lock
3720 * and the vm_page_queues_lock at this point
3721 */
3722 assert(vm_page_free_wanted == 0);
3723 assert(vm_page_free_wanted_privileged == 0);
3724 assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
3725
3726 vm_pageout_running = FALSE;
3727 #if !CONFIG_EMBEDDED
3728 if (vm_pageout_waiter) {
3729 vm_pageout_waiter = FALSE;
3730 thread_wakeup((event_t)&vm_pageout_waiter);
3731 }
3732 #endif /* !CONFIG_EMBEDDED */
3733
3734 lck_mtx_unlock(&vm_page_queue_free_lock);
3735 vm_page_unlock_queues();
3736
3737 counter(c_vm_pageout_block++);
3738 thread_block((thread_continue_t)vm_pageout_continue);
3739 /*NOTREACHED*/
3740 }
3741
3742 #if !CONFIG_EMBEDDED
3743 kern_return_t
3744 vm_pageout_wait(uint64_t deadline)
3745 {
3746 kern_return_t kr;
3747
3748 lck_mtx_lock(&vm_page_queue_free_lock);
3749 for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr);) {
3750 vm_pageout_waiter = TRUE;
3751 if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
3752 &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
3753 (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
3754 kr = KERN_OPERATION_TIMED_OUT;
3755 }
3756 }
3757 lck_mtx_unlock(&vm_page_queue_free_lock);
3758
3759 return kr;
3760 }
3761 #endif /* !CONFIG_EMBEDDED */
3762
3763
3764 static void
3765 vm_pageout_iothread_external_continue(struct vm_pageout_queue *q)
3766 {
3767 vm_page_t m = NULL;
3768 vm_object_t object;
3769 vm_object_offset_t offset;
3770 memory_object_t pager;
3771
3772 /* On systems with a compressor, the external IO thread clears its
3773 * VM privileged bit to accommodate large allocations (e.g. bulk UPL
3774 * creation)
3775 */
3776 if (vm_pageout_state.vm_pageout_internal_iothread != THREAD_NULL) {
3777 current_thread()->options &= ~TH_OPT_VMPRIV;
3778 }
3779
3780 vm_page_lockspin_queues();
3781
3782 while (!vm_page_queue_empty(&q->pgo_pending)) {
3783 q->pgo_busy = TRUE;
3784 vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
3785
3786 assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
3787 VM_PAGE_CHECK(m);
3788 /*
3789 * grab a snapshot of the object and offset this
3790 * page is tabled in so that we can relookup this
3791 * page after we've taken the object lock - these
3792 * fields are stable while we hold the page queues lock
3793 * but as soon as we drop it, there is nothing to keep
3794 * this page in this object... we hold an activity_in_progress
3795 * on this object which will keep it from terminating
3796 */
3797 object = VM_PAGE_OBJECT(m);
3798 offset = m->vmp_offset;
3799
3800 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
3801 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
3802
3803 vm_page_unlock_queues();
3804
3805 vm_object_lock(object);
3806
3807 m = vm_page_lookup(object, offset);
3808
3809 if (m == NULL || m->vmp_busy || m->vmp_cleaning ||
3810 !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) {
3811 /*
3812 * it's either the same page that someone else has
3813 * started cleaning (or it's finished cleaning or
3814 * been put back on the pageout queue), or
3815 * the page has been freed or we have found a
3816 * new page at this offset... in all of these cases
3817 * we merely need to release the activity_in_progress
3818 * we took when we put the page on the pageout queue
3819 */
3820 vm_object_activity_end(object);
3821 vm_object_unlock(object);
3822
3823 vm_page_lockspin_queues();
3824 continue;
3825 }
3826 pager = object->pager;
3827
3828 if (pager == MEMORY_OBJECT_NULL) {
3829 /*
3830 * This pager has been destroyed by either
3831 * memory_object_destroy or vm_object_destroy, and
3832 * so there is nowhere for the page to go.
3833 */
3834 if (m->vmp_free_when_done) {
3835 /*
3836 * Just free the page... VM_PAGE_FREE takes
3837 * care of cleaning up all the state...
3838 * including doing the vm_pageout_throttle_up
3839 */
3840 VM_PAGE_FREE(m);
3841 } else {
3842 vm_page_lockspin_queues();
3843
3844 vm_pageout_throttle_up(m);
3845 vm_page_activate(m);
3846
3847 vm_page_unlock_queues();
3848
3849 /*
3850 * And we are done with it.
3851 */
3852 }
3853 vm_object_activity_end(object);
3854 vm_object_unlock(object);
3855
3856 vm_page_lockspin_queues();
3857 continue;
3858 }
3859 #if 0
3860 /*
3861 * we don't hold the page queue lock
3862 * so this check isn't safe to make
3863 */
3864 VM_PAGE_CHECK(m);
3865 #endif
3866 /*
3867 * give back the activity_in_progress reference we
3868 * took when we queued up this page and replace it
3869 * it with a paging_in_progress reference that will
3870 * also hold the paging offset from changing and
3871 * prevent the object from terminating
3872 */
3873 vm_object_activity_end(object);
3874 vm_object_paging_begin(object);
3875 vm_object_unlock(object);
3876
3877 /*
3878 * Send the data to the pager.
3879 * any pageout clustering happens there
3880 */
3881 memory_object_data_return(pager,
3882 m->vmp_offset + object->paging_offset,
3883 PAGE_SIZE,
3884 NULL,
3885 NULL,
3886 FALSE,
3887 FALSE,
3888 0);
3889
3890 vm_object_lock(object);
3891 vm_object_paging_end(object);
3892 vm_object_unlock(object);
3893
3894 vm_pageout_io_throttle();
3895
3896 vm_page_lockspin_queues();
3897 }
3898 q->pgo_busy = FALSE;
3899 q->pgo_idle = TRUE;
3900
3901 assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
3902 vm_page_unlock_queues();
3903
3904 thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q);
3905 /*NOTREACHED*/
3906 }
3907
3908
3909 #define MAX_FREE_BATCH 32
3910 uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by
3911 * this thread.
3912 */
3913
3914
3915 void
3916 vm_pageout_iothread_internal_continue(struct cq *);
3917 void
3918 vm_pageout_iothread_internal_continue(struct cq *cq)
3919 {
3920 struct vm_pageout_queue *q;
3921 vm_page_t m = NULL;
3922 boolean_t pgo_draining;
3923 vm_page_t local_q;
3924 int local_cnt;
3925 vm_page_t local_freeq = NULL;
3926 int local_freed = 0;
3927 int local_batch_size;
3928 #if DEVELOPMENT || DEBUG
3929 int ncomps = 0;
3930 boolean_t marked_active = FALSE;
3931 #endif
3932 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
3933
3934 q = cq->q;
3935 local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
3936
3937 #if RECORD_THE_COMPRESSED_DATA
3938 if (q->pgo_laundry) {
3939 c_compressed_record_init();
3940 }
3941 #endif
3942 while (TRUE) {
3943 int pages_left_on_q = 0;
3944
3945 local_cnt = 0;
3946 local_q = NULL;
3947
3948 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
3949
3950 vm_page_lock_queues();
3951 #if DEVELOPMENT || DEBUG
3952 if (marked_active == FALSE) {
3953 vmct_active++;
3954 vmct_state[cq->id] = VMCT_ACTIVE;
3955 marked_active = TRUE;
3956 if (vmct_active == 1) {
3957 vm_compressor_epoch_start = mach_absolute_time();
3958 }
3959 }
3960 #endif
3961 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
3962
3963 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
3964
3965 while (!vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
3966 vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
3967 assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
3968 VM_PAGE_CHECK(m);
3969
3970 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
3971 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
3972 m->vmp_laundry = FALSE;
3973
3974 m->vmp_snext = local_q;
3975 local_q = m;
3976 local_cnt++;
3977 }
3978 if (local_q == NULL) {
3979 break;
3980 }
3981
3982 q->pgo_busy = TRUE;
3983
3984 if ((pgo_draining = q->pgo_draining) == FALSE) {
3985 vm_pageout_throttle_up_batch(q, local_cnt);
3986 pages_left_on_q = q->pgo_laundry;
3987 } else {
3988 pages_left_on_q = q->pgo_laundry - local_cnt;
3989 }
3990
3991 vm_page_unlock_queues();
3992
3993 #if !RECORD_THE_COMPRESSED_DATA
3994 if (pages_left_on_q >= local_batch_size && cq->id < (vm_pageout_state.vm_compressor_thread_count - 1)) {
3995 thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1));
3996 }
3997 #endif
3998 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
3999
4000 while (local_q) {
4001 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
4002
4003 m = local_q;
4004 local_q = m->vmp_snext;
4005 m->vmp_snext = NULL;
4006
4007 if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m) == KERN_SUCCESS) {
4008 #if DEVELOPMENT || DEBUG
4009 ncomps++;
4010 #endif
4011 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END, local_cnt, 0, 0, 0, 0);
4012
4013 m->vmp_snext = local_freeq;
4014 local_freeq = m;
4015 local_freed++;
4016
4017 if (local_freed >= MAX_FREE_BATCH) {
4018 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4019
4020 vm_page_free_list(local_freeq, TRUE);
4021
4022 local_freeq = NULL;
4023 local_freed = 0;
4024 }
4025 }
4026 #if !CONFIG_JETSAM
4027 while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
4028 kern_return_t wait_result;
4029 int need_wakeup = 0;
4030
4031 if (local_freeq) {
4032 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4033
4034 vm_page_free_list(local_freeq, TRUE);
4035 local_freeq = NULL;
4036 local_freed = 0;
4037
4038 continue;
4039 }
4040 lck_mtx_lock_spin(&vm_page_queue_free_lock);
4041
4042 if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
4043 if (vm_page_free_wanted_privileged++ == 0) {
4044 need_wakeup = 1;
4045 }
4046 wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
4047
4048 lck_mtx_unlock(&vm_page_queue_free_lock);
4049
4050 if (need_wakeup) {
4051 thread_wakeup((event_t)&vm_page_free_wanted);
4052 }
4053
4054 if (wait_result == THREAD_WAITING) {
4055 thread_block(THREAD_CONTINUE_NULL);
4056 }
4057 } else {
4058 lck_mtx_unlock(&vm_page_queue_free_lock);
4059 }
4060 }
4061 #endif
4062 }
4063 if (local_freeq) {
4064 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
4065
4066 vm_page_free_list(local_freeq, TRUE);
4067 local_freeq = NULL;
4068 local_freed = 0;
4069 }
4070 if (pgo_draining == TRUE) {
4071 vm_page_lockspin_queues();
4072 vm_pageout_throttle_up_batch(q, local_cnt);
4073 vm_page_unlock_queues();
4074 }
4075 }
4076 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
4077
4078 /*
4079 * queue lock is held and our q is empty
4080 */
4081 q->pgo_busy = FALSE;
4082 q->pgo_idle = TRUE;
4083
4084 assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT);
4085 #if DEVELOPMENT || DEBUG
4086 if (marked_active == TRUE) {
4087 vmct_active--;
4088 vmct_state[cq->id] = VMCT_IDLE;
4089
4090 if (vmct_active == 0) {
4091 vm_compressor_epoch_stop = mach_absolute_time();
4092 assertf(vm_compressor_epoch_stop >= vm_compressor_epoch_start,
4093 "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
4094 vm_compressor_epoch_start, vm_compressor_epoch_stop);
4095 /* This interval includes intervals where one or more
4096 * compressor threads were pre-empted
4097 */
4098 vmct_stats.vmct_cthreads_total += vm_compressor_epoch_stop - vm_compressor_epoch_start;
4099 }
4100 }
4101 #endif
4102 vm_page_unlock_queues();
4103 #if DEVELOPMENT || DEBUG
4104 if (__improbable(vm_compressor_time_thread)) {
4105 vmct_stats.vmct_runtimes[cq->id] = thread_get_runtime_self();
4106 vmct_stats.vmct_pages[cq->id] += ncomps;
4107 vmct_stats.vmct_iterations[cq->id]++;
4108 if (ncomps > vmct_stats.vmct_maxpages[cq->id]) {
4109 vmct_stats.vmct_maxpages[cq->id] = ncomps;
4110 }
4111 if (ncomps < vmct_stats.vmct_minpages[cq->id]) {
4112 vmct_stats.vmct_minpages[cq->id] = ncomps;
4113 }
4114 }
4115 #endif
4116
4117 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
4118
4119 thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
4120 /*NOTREACHED*/
4121 }
4122
4123
4124 kern_return_t
4125 vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m)
4126 {
4127 vm_object_t object;
4128 memory_object_t pager;
4129 int compressed_count_delta;
4130 kern_return_t retval;
4131
4132 object = VM_PAGE_OBJECT(m);
4133
4134 assert(!m->vmp_free_when_done);
4135 assert(!m->vmp_laundry);
4136
4137 pager = object->pager;
4138
4139 if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
4140 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
4141
4142 vm_object_lock(object);
4143
4144 /*
4145 * If there is no memory object for the page, create
4146 * one and hand it to the compression pager.
4147 */
4148
4149 if (!object->pager_initialized) {
4150 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
4151 }
4152 if (!object->pager_initialized) {
4153 vm_object_compressor_pager_create(object);
4154 }
4155
4156 pager = object->pager;
4157
4158 if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
4159 /*
4160 * Still no pager for the object,
4161 * or the pager has been destroyed.
4162 * Reactivate the page.
4163 *
4164 * Should only happen if there is no
4165 * compression pager
4166 */
4167 PAGE_WAKEUP_DONE(m);
4168
4169 vm_page_lockspin_queues();
4170 vm_page_activate(m);
4171 VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager, 1);
4172 vm_page_unlock_queues();
4173
4174 /*
4175 * And we are done with it.
4176 */
4177 vm_object_activity_end(object);
4178 vm_object_unlock(object);
4179
4180 return KERN_FAILURE;
4181 }
4182 vm_object_unlock(object);
4183
4184 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
4185 }
4186 assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
4187 assert(object->activity_in_progress > 0);
4188
4189 retval = vm_compressor_pager_put(
4190 pager,
4191 m->vmp_offset + object->paging_offset,
4192 VM_PAGE_GET_PHYS_PAGE(m),
4193 current_chead,
4194 scratch_buf,
4195 &compressed_count_delta);
4196
4197 vm_object_lock(object);
4198
4199 assert(object->activity_in_progress > 0);
4200 assert(VM_PAGE_OBJECT(m) == object);
4201 assert( !VM_PAGE_WIRED(m));
4202
4203 vm_compressor_pager_count(pager,
4204 compressed_count_delta,
4205 FALSE, /* shared_lock */
4206 object);
4207
4208 if (retval == KERN_SUCCESS) {
4209 /*
4210 * If the object is purgeable, its owner's
4211 * purgeable ledgers will be updated in
4212 * vm_page_remove() but the page still
4213 * contributes to the owner's memory footprint,
4214 * so account for it as such.
4215 */
4216 if ((object->purgable != VM_PURGABLE_DENY ||
4217 object->vo_ledger_tag) &&
4218 object->vo_owner != NULL) {
4219 /* one more compressed purgeable/tagged page */
4220 vm_object_owner_compressed_update(object,
4221 +1);
4222 }
4223 VM_STAT_INCR(compressions);
4224
4225 if (m->vmp_tabled) {
4226 vm_page_remove(m, TRUE);
4227 }
4228 } else {
4229 PAGE_WAKEUP_DONE(m);
4230
4231 vm_page_lockspin_queues();
4232
4233 vm_page_activate(m);
4234 vm_pageout_vminfo.vm_compressor_failed++;
4235
4236 vm_page_unlock_queues();
4237 }
4238 vm_object_activity_end(object);
4239 vm_object_unlock(object);
4240
4241 return retval;
4242 }
4243
4244
4245 static void
4246 vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpriority)
4247 {
4248 uint32_t policy;
4249
4250 if (hibernate_cleaning_in_progress == TRUE) {
4251 req_lowpriority = FALSE;
4252 }
4253
4254 if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority) {
4255 vm_page_unlock_queues();
4256
4257 if (req_lowpriority == TRUE) {
4258 policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
4259 DTRACE_VM(laundrythrottle);
4260 } else {
4261 policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
4262 DTRACE_VM(laundryunthrottle);
4263 }
4264 proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid,
4265 TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
4266
4267 eq->pgo_lowpriority = req_lowpriority;
4268
4269 vm_page_lock_queues();
4270 }
4271 }
4272
4273
4274 static void
4275 vm_pageout_iothread_external(void)
4276 {
4277 thread_t self = current_thread();
4278
4279 self->options |= TH_OPT_VMPRIV;
4280
4281 DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
4282
4283 proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
4284 TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
4285
4286 vm_page_lock_queues();
4287
4288 vm_pageout_queue_external.pgo_tid = self->thread_id;
4289 vm_pageout_queue_external.pgo_lowpriority = TRUE;
4290 vm_pageout_queue_external.pgo_inited = TRUE;
4291
4292 vm_page_unlock_queues();
4293
4294 vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
4295
4296 /*NOTREACHED*/
4297 }
4298
4299
4300 static void
4301 vm_pageout_iothread_internal(struct cq *cq)
4302 {
4303 thread_t self = current_thread();
4304
4305 self->options |= TH_OPT_VMPRIV;
4306
4307 vm_page_lock_queues();
4308
4309 vm_pageout_queue_internal.pgo_tid = self->thread_id;
4310 vm_pageout_queue_internal.pgo_lowpriority = TRUE;
4311 vm_pageout_queue_internal.pgo_inited = TRUE;
4312
4313 vm_page_unlock_queues();
4314
4315 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
4316 thread_vm_bind_group_add();
4317 }
4318
4319
4320
4321 thread_set_thread_name(current_thread(), "VM_compressor");
4322 #if DEVELOPMENT || DEBUG
4323 vmct_stats.vmct_minpages[cq->id] = INT32_MAX;
4324 #endif
4325 vm_pageout_iothread_internal_continue(cq);
4326
4327 /*NOTREACHED*/
4328 }
4329
4330 kern_return_t
4331 vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
4332 {
4333 if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
4334 return KERN_SUCCESS;
4335 } else {
4336 return KERN_FAILURE; /* Already set */
4337 }
4338 }
4339
4340 extern boolean_t memorystatus_manual_testing_on;
4341 extern unsigned int memorystatus_level;
4342
4343
4344 #if VM_PRESSURE_EVENTS
4345
4346 boolean_t vm_pressure_events_enabled = FALSE;
4347
4348 void
4349 vm_pressure_response(void)
4350 {
4351 vm_pressure_level_t old_level = kVMPressureNormal;
4352 int new_level = -1;
4353 unsigned int total_pages;
4354 uint64_t available_memory = 0;
4355
4356 if (vm_pressure_events_enabled == FALSE) {
4357 return;
4358 }
4359
4360 #if CONFIG_EMBEDDED
4361
4362 available_memory = (uint64_t) memorystatus_available_pages;
4363
4364 #else /* CONFIG_EMBEDDED */
4365
4366 available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
4367 memorystatus_available_pages = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
4368
4369 #endif /* CONFIG_EMBEDDED */
4370
4371 total_pages = (unsigned int) atop_64(max_mem);
4372 #if CONFIG_SECLUDED_MEMORY
4373 total_pages -= vm_page_secluded_count;
4374 #endif /* CONFIG_SECLUDED_MEMORY */
4375 memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
4376
4377 if (memorystatus_manual_testing_on) {
4378 return;
4379 }
4380
4381 old_level = memorystatus_vm_pressure_level;
4382
4383 switch (memorystatus_vm_pressure_level) {
4384 case kVMPressureNormal:
4385 {
4386 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4387 new_level = kVMPressureCritical;
4388 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
4389 new_level = kVMPressureWarning;
4390 }
4391 break;
4392 }
4393
4394 case kVMPressureWarning:
4395 case kVMPressureUrgent:
4396 {
4397 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4398 new_level = kVMPressureNormal;
4399 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4400 new_level = kVMPressureCritical;
4401 }
4402 break;
4403 }
4404
4405 case kVMPressureCritical:
4406 {
4407 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4408 new_level = kVMPressureNormal;
4409 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
4410 new_level = kVMPressureWarning;
4411 }
4412 break;
4413 }
4414
4415 default:
4416 return;
4417 }
4418
4419 if (new_level != -1) {
4420 memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
4421
4422 if (new_level != (int) old_level) {
4423 VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
4424 new_level, old_level, 0, 0);
4425 }
4426
4427 if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != memorystatus_vm_pressure_level)) {
4428 if (vm_pageout_state.vm_pressure_thread_running == FALSE) {
4429 thread_wakeup(&vm_pressure_thread);
4430 }
4431
4432 if (old_level != memorystatus_vm_pressure_level) {
4433 thread_wakeup(&vm_pageout_state.vm_pressure_changed);
4434 }
4435 }
4436 }
4437 }
4438 #endif /* VM_PRESSURE_EVENTS */
4439
4440 /*
4441 * Function called by a kernel thread to either get the current pressure level or
4442 * wait until memory pressure changes from a given level.
4443 */
4444 kern_return_t
4445 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level)
4446 {
4447 #if !VM_PRESSURE_EVENTS
4448
4449 return KERN_FAILURE;
4450
4451 #else /* VM_PRESSURE_EVENTS */
4452
4453 wait_result_t wr = 0;
4454 vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
4455
4456 if (pressure_level == NULL) {
4457 return KERN_INVALID_ARGUMENT;
4458 }
4459
4460 if (*pressure_level == kVMPressureJetsam) {
4461 if (!wait_for_pressure) {
4462 return KERN_INVALID_ARGUMENT;
4463 }
4464
4465 lck_mtx_lock(&memorystatus_jetsam_fg_band_lock);
4466 wr = assert_wait((event_t)&memorystatus_jetsam_fg_band_waiters,
4467 THREAD_INTERRUPTIBLE);
4468 if (wr == THREAD_WAITING) {
4469 ++memorystatus_jetsam_fg_band_waiters;
4470 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
4471 wr = thread_block(THREAD_CONTINUE_NULL);
4472 } else {
4473 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
4474 }
4475 if (wr != THREAD_AWAKENED) {
4476 return KERN_ABORTED;
4477 }
4478 *pressure_level = kVMPressureJetsam;
4479 return KERN_SUCCESS;
4480 }
4481
4482 if (wait_for_pressure == TRUE) {
4483 while (old_level == *pressure_level) {
4484 wr = assert_wait((event_t) &vm_pageout_state.vm_pressure_changed,
4485 THREAD_INTERRUPTIBLE);
4486 if (wr == THREAD_WAITING) {
4487 wr = thread_block(THREAD_CONTINUE_NULL);
4488 }
4489 if (wr == THREAD_INTERRUPTED) {
4490 return KERN_ABORTED;
4491 }
4492
4493 if (wr == THREAD_AWAKENED) {
4494 old_level = memorystatus_vm_pressure_level;
4495 }
4496 }
4497 }
4498
4499 *pressure_level = old_level;
4500 return KERN_SUCCESS;
4501 #endif /* VM_PRESSURE_EVENTS */
4502 }
4503
4504 #if VM_PRESSURE_EVENTS
4505 void
4506 vm_pressure_thread(void)
4507 {
4508 static boolean_t thread_initialized = FALSE;
4509
4510 if (thread_initialized == TRUE) {
4511 vm_pageout_state.vm_pressure_thread_running = TRUE;
4512 consider_vm_pressure_events();
4513 vm_pageout_state.vm_pressure_thread_running = FALSE;
4514 }
4515
4516 thread_set_thread_name(current_thread(), "VM_pressure");
4517 thread_initialized = TRUE;
4518 assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
4519 thread_block((thread_continue_t)vm_pressure_thread);
4520 }
4521 #endif /* VM_PRESSURE_EVENTS */
4522
4523
4524 /*
4525 * called once per-second via "compute_averages"
4526 */
4527 void
4528 compute_pageout_gc_throttle(__unused void *arg)
4529 {
4530 if (vm_pageout_vminfo.vm_pageout_considered_page != vm_pageout_state.vm_pageout_considered_page_last) {
4531 vm_pageout_state.vm_pageout_considered_page_last = vm_pageout_vminfo.vm_pageout_considered_page;
4532
4533 thread_wakeup((event_t) &vm_pageout_garbage_collect);
4534 }
4535 }
4536
4537 /*
4538 * vm_pageout_garbage_collect can also be called when the zone allocator needs
4539 * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
4540 * jetsams. We need to check if the zone map size is above its jetsam limit to
4541 * decide if this was indeed the case.
4542 *
4543 * We need to do this on a different thread because of the following reasons:
4544 *
4545 * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
4546 * itself causing the system to hang. We perform synchronous jetsams if we're
4547 * leaking in the VM map entries zone, so the leaking process could be doing a
4548 * zalloc for a VM map entry while holding its vm_map lock, when it decides to
4549 * jetsam itself. We also need the vm_map lock on the process termination path,
4550 * which would now lead the dying process to deadlock against itself.
4551 *
4552 * 2. The jetsam path might need to allocate zone memory itself. We could try
4553 * using the non-blocking variant of zalloc for this path, but we can still
4554 * end up trying to do a kernel_memory_allocate when the zone_map is almost
4555 * full.
4556 */
4557
4558 extern boolean_t is_zone_map_nearing_exhaustion(void);
4559
4560 void
4561 vm_pageout_garbage_collect(int collect)
4562 {
4563 if (collect) {
4564 if (is_zone_map_nearing_exhaustion()) {
4565 /*
4566 * Woken up by the zone allocator for zone-map-exhaustion jetsams.
4567 *
4568 * Bail out after calling zone_gc (which triggers the
4569 * zone-map-exhaustion jetsams). If we fall through, the subsequent
4570 * operations that clear out a bunch of caches might allocate zone
4571 * memory themselves (for eg. vm_map operations would need VM map
4572 * entries). Since the zone map is almost full at this point, we
4573 * could end up with a panic. We just need to quickly jetsam a
4574 * process and exit here.
4575 *
4576 * It could so happen that we were woken up to relieve memory
4577 * pressure and the zone map also happened to be near its limit at
4578 * the time, in which case we'll skip out early. But that should be
4579 * ok; if memory pressure persists, the thread will simply be woken
4580 * up again.
4581 */
4582 consider_zone_gc(TRUE);
4583 } else {
4584 /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
4585 boolean_t buf_large_zfree = FALSE;
4586 boolean_t first_try = TRUE;
4587
4588 stack_collect();
4589
4590 consider_machine_collect();
4591 mbuf_drain(FALSE);
4592
4593 do {
4594 if (consider_buffer_cache_collect != NULL) {
4595 buf_large_zfree = (*consider_buffer_cache_collect)(0);
4596 }
4597 if (first_try == TRUE || buf_large_zfree == TRUE) {
4598 /*
4599 * consider_zone_gc should be last, because the other operations
4600 * might return memory to zones.
4601 */
4602 consider_zone_gc(FALSE);
4603 }
4604 first_try = FALSE;
4605 } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
4606
4607 consider_machine_adjust();
4608 }
4609 }
4610
4611 assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
4612
4613 thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
4614 /*NOTREACHED*/
4615 }
4616
4617
4618 #if VM_PAGE_BUCKETS_CHECK
4619 #if VM_PAGE_FAKE_BUCKETS
4620 extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
4621 #endif /* VM_PAGE_FAKE_BUCKETS */
4622 #endif /* VM_PAGE_BUCKETS_CHECK */
4623
4624
4625
4626 void
4627 vm_set_restrictions()
4628 {
4629 int vm_restricted_to_single_processor = 0;
4630
4631 if (PE_parse_boot_argn("vm_restricted_to_single_processor", &vm_restricted_to_single_processor, sizeof(vm_restricted_to_single_processor))) {
4632 kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor);
4633 vm_pageout_state.vm_restricted_to_single_processor = (vm_restricted_to_single_processor ? TRUE : FALSE);
4634 } else {
4635 host_basic_info_data_t hinfo;
4636 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
4637
4638 #define BSD_HOST 1
4639 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
4640
4641 assert(hinfo.max_cpus > 0);
4642
4643 if (hinfo.max_cpus <= 3) {
4644 /*
4645 * on systems with a limited number of CPUS, bind the
4646 * 4 major threads that can free memory and that tend to use
4647 * a fair bit of CPU under pressured conditions to a single processor.
4648 * This insures that these threads don't hog all of the available CPUs
4649 * (important for camera launch), while allowing them to run independently
4650 * w/r to locks... the 4 threads are
4651 * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
4652 * vm_compressor_swap_trigger_thread (minor and major compactions),
4653 * memorystatus_thread (jetsams).
4654 *
4655 * the first time the thread is run, it is responsible for checking the
4656 * state of vm_restricted_to_single_processor, and if TRUE it calls
4657 * thread_bind_master... someday this should be replaced with a group
4658 * scheduling mechanism and KPI.
4659 */
4660 vm_pageout_state.vm_restricted_to_single_processor = TRUE;
4661 } else {
4662 vm_pageout_state.vm_restricted_to_single_processor = FALSE;
4663 }
4664 }
4665 }
4666
4667 void
4668 vm_pageout(void)
4669 {
4670 thread_t self = current_thread();
4671 thread_t thread;
4672 kern_return_t result;
4673 spl_t s;
4674
4675 /*
4676 * Set thread privileges.
4677 */
4678 s = splsched();
4679
4680 vm_pageout_scan_thread = self;
4681
4682 #if CONFIG_VPS_DYNAMIC_PRIO
4683
4684 int vps_dynprio_bootarg = 0;
4685
4686 if (PE_parse_boot_argn("vps_dynamic_priority_enabled", &vps_dynprio_bootarg, sizeof(vps_dynprio_bootarg))) {
4687 vps_dynamic_priority_enabled = (vps_dynprio_bootarg ? TRUE : FALSE);
4688 kprintf("Overriding vps_dynamic_priority_enabled to %d\n", vps_dynamic_priority_enabled);
4689 } else {
4690 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
4691 vps_dynamic_priority_enabled = TRUE;
4692 } else {
4693 vps_dynamic_priority_enabled = FALSE;
4694 }
4695 }
4696
4697 if (vps_dynamic_priority_enabled) {
4698 sched_set_kernel_thread_priority(self, MAXPRI_THROTTLE);
4699 thread_set_eager_preempt(self);
4700 } else {
4701 sched_set_kernel_thread_priority(self, BASEPRI_VM);
4702 }
4703
4704 #else /* CONFIG_VPS_DYNAMIC_PRIO */
4705
4706 vps_dynamic_priority_enabled = FALSE;
4707 sched_set_kernel_thread_priority(self, BASEPRI_VM);
4708
4709 #endif /* CONFIG_VPS_DYNAMIC_PRIO */
4710
4711 thread_lock(self);
4712 self->options |= TH_OPT_VMPRIV;
4713 thread_unlock(self);
4714
4715 if (!self->reserved_stack) {
4716 self->reserved_stack = self->kernel_stack;
4717 }
4718
4719 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE &&
4720 vps_dynamic_priority_enabled == FALSE) {
4721 thread_vm_bind_group_add();
4722 }
4723
4724
4725
4726
4727 splx(s);
4728
4729 thread_set_thread_name(current_thread(), "VM_pageout_scan");
4730
4731 /*
4732 * Initialize some paging parameters.
4733 */
4734
4735 vm_pageout_state.vm_pressure_thread_running = FALSE;
4736 vm_pageout_state.vm_pressure_changed = FALSE;
4737 vm_pageout_state.memorystatus_purge_on_warning = 2;
4738 vm_pageout_state.memorystatus_purge_on_urgent = 5;
4739 vm_pageout_state.memorystatus_purge_on_critical = 8;
4740 vm_pageout_state.vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
4741 vm_pageout_state.vm_page_speculative_percentage = 5;
4742 vm_pageout_state.vm_page_speculative_target = 0;
4743
4744 vm_pageout_state.vm_pageout_external_iothread = THREAD_NULL;
4745 vm_pageout_state.vm_pageout_internal_iothread = THREAD_NULL;
4746
4747 vm_pageout_state.vm_pageout_swap_wait = 0;
4748 vm_pageout_state.vm_pageout_idle_wait = 0;
4749 vm_pageout_state.vm_pageout_empty_wait = 0;
4750 vm_pageout_state.vm_pageout_burst_wait = 0;
4751 vm_pageout_state.vm_pageout_deadlock_wait = 0;
4752 vm_pageout_state.vm_pageout_deadlock_relief = 0;
4753 vm_pageout_state.vm_pageout_burst_inactive_throttle = 0;
4754
4755 vm_pageout_state.vm_pageout_inactive = 0;
4756 vm_pageout_state.vm_pageout_inactive_used = 0;
4757 vm_pageout_state.vm_pageout_inactive_clean = 0;
4758
4759 vm_pageout_state.vm_memory_pressure = 0;
4760 vm_pageout_state.vm_page_filecache_min = 0;
4761 #if CONFIG_JETSAM
4762 vm_pageout_state.vm_page_filecache_min_divisor = 70;
4763 vm_pageout_state.vm_page_xpmapped_min_divisor = 40;
4764 #else
4765 vm_pageout_state.vm_page_filecache_min_divisor = 27;
4766 vm_pageout_state.vm_page_xpmapped_min_divisor = 36;
4767 #endif
4768 vm_pageout_state.vm_page_free_count_init = vm_page_free_count;
4769
4770 vm_pageout_state.vm_pageout_considered_page_last = 0;
4771
4772 if (vm_pageout_state.vm_pageout_swap_wait == 0) {
4773 vm_pageout_state.vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
4774 }
4775
4776 if (vm_pageout_state.vm_pageout_idle_wait == 0) {
4777 vm_pageout_state.vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
4778 }
4779
4780 if (vm_pageout_state.vm_pageout_burst_wait == 0) {
4781 vm_pageout_state.vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
4782 }
4783
4784 if (vm_pageout_state.vm_pageout_empty_wait == 0) {
4785 vm_pageout_state.vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
4786 }
4787
4788 if (vm_pageout_state.vm_pageout_deadlock_wait == 0) {
4789 vm_pageout_state.vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
4790 }
4791
4792 if (vm_pageout_state.vm_pageout_deadlock_relief == 0) {
4793 vm_pageout_state.vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
4794 }
4795
4796 if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) {
4797 vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
4798 }
4799 /*
4800 * even if we've already called vm_page_free_reserve
4801 * call it again here to insure that the targets are
4802 * accurately calculated (it uses vm_page_free_count_init)
4803 * calling it with an arg of 0 will not change the reserve
4804 * but will re-calculate free_min and free_target
4805 */
4806 if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
4807 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
4808 } else {
4809 vm_page_free_reserve(0);
4810 }
4811
4812
4813 vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
4814 vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
4815 vm_pageout_queue_external.pgo_laundry = 0;
4816 vm_pageout_queue_external.pgo_idle = FALSE;
4817 vm_pageout_queue_external.pgo_busy = FALSE;
4818 vm_pageout_queue_external.pgo_throttled = FALSE;
4819 vm_pageout_queue_external.pgo_draining = FALSE;
4820 vm_pageout_queue_external.pgo_lowpriority = FALSE;
4821 vm_pageout_queue_external.pgo_tid = -1;
4822 vm_pageout_queue_external.pgo_inited = FALSE;
4823
4824 vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
4825 vm_pageout_queue_internal.pgo_maxlaundry = 0;
4826 vm_pageout_queue_internal.pgo_laundry = 0;
4827 vm_pageout_queue_internal.pgo_idle = FALSE;
4828 vm_pageout_queue_internal.pgo_busy = FALSE;
4829 vm_pageout_queue_internal.pgo_throttled = FALSE;
4830 vm_pageout_queue_internal.pgo_draining = FALSE;
4831 vm_pageout_queue_internal.pgo_lowpriority = FALSE;
4832 vm_pageout_queue_internal.pgo_tid = -1;
4833 vm_pageout_queue_internal.pgo_inited = FALSE;
4834
4835 /* internal pageout thread started when default pager registered first time */
4836 /* external pageout and garbage collection threads started here */
4837
4838 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
4839 BASEPRI_VM,
4840 &vm_pageout_state.vm_pageout_external_iothread);
4841 if (result != KERN_SUCCESS) {
4842 panic("vm_pageout_iothread_external: create failed");
4843 }
4844 thread_set_thread_name(vm_pageout_state.vm_pageout_external_iothread, "VM_pageout_external_iothread");
4845 thread_deallocate(vm_pageout_state.vm_pageout_external_iothread);
4846
4847 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
4848 BASEPRI_DEFAULT,
4849 &thread);
4850 if (result != KERN_SUCCESS) {
4851 panic("vm_pageout_garbage_collect: create failed");
4852 }
4853 thread_set_thread_name(thread, "VM_pageout_garbage_collect");
4854 thread_deallocate(thread);
4855
4856 #if VM_PRESSURE_EVENTS
4857 result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
4858 BASEPRI_DEFAULT,
4859 &thread);
4860
4861 if (result != KERN_SUCCESS) {
4862 panic("vm_pressure_thread: create failed");
4863 }
4864
4865 thread_deallocate(thread);
4866 #endif
4867
4868 vm_object_reaper_init();
4869
4870
4871 bzero(&vm_config, sizeof(vm_config));
4872
4873 switch (vm_compressor_mode) {
4874 case VM_PAGER_DEFAULT:
4875 printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
4876
4877 case VM_PAGER_COMPRESSOR_WITH_SWAP:
4878 vm_config.compressor_is_present = TRUE;
4879 vm_config.swap_is_present = TRUE;
4880 vm_config.compressor_is_active = TRUE;
4881 vm_config.swap_is_active = TRUE;
4882 break;
4883
4884 case VM_PAGER_COMPRESSOR_NO_SWAP:
4885 vm_config.compressor_is_present = TRUE;
4886 vm_config.swap_is_present = TRUE;
4887 vm_config.compressor_is_active = TRUE;
4888 break;
4889
4890 case VM_PAGER_FREEZER_DEFAULT:
4891 printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
4892
4893 case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
4894 vm_config.compressor_is_present = TRUE;
4895 vm_config.swap_is_present = TRUE;
4896 break;
4897
4898 case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
4899 vm_config.compressor_is_present = TRUE;
4900 vm_config.swap_is_present = TRUE;
4901 vm_config.compressor_is_active = TRUE;
4902 vm_config.freezer_swap_is_active = TRUE;
4903 break;
4904
4905 case VM_PAGER_NOT_CONFIGURED:
4906 break;
4907
4908 default:
4909 printf("unknown compressor mode - %x\n", vm_compressor_mode);
4910 break;
4911 }
4912 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
4913 vm_compressor_pager_init();
4914 }
4915
4916 #if VM_PRESSURE_EVENTS
4917 vm_pressure_events_enabled = TRUE;
4918 #endif /* VM_PRESSURE_EVENTS */
4919
4920 #if CONFIG_PHANTOM_CACHE
4921 vm_phantom_cache_init();
4922 #endif
4923 #if VM_PAGE_BUCKETS_CHECK
4924 #if VM_PAGE_FAKE_BUCKETS
4925 printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
4926 (uint64_t) vm_page_fake_buckets_start,
4927 (uint64_t) vm_page_fake_buckets_end);
4928 pmap_protect(kernel_pmap,
4929 vm_page_fake_buckets_start,
4930 vm_page_fake_buckets_end,
4931 VM_PROT_READ);
4932 // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
4933 #endif /* VM_PAGE_FAKE_BUCKETS */
4934 #endif /* VM_PAGE_BUCKETS_CHECK */
4935
4936 #if VM_OBJECT_TRACKING
4937 vm_object_tracking_init();
4938 #endif /* VM_OBJECT_TRACKING */
4939
4940 vm_tests();
4941
4942 vm_pageout_continue();
4943
4944 /*
4945 * Unreached code!
4946 *
4947 * The vm_pageout_continue() call above never returns, so the code below is never
4948 * executed. We take advantage of this to declare several DTrace VM related probe
4949 * points that our kernel doesn't have an analog for. These are probe points that
4950 * exist in Solaris and are in the DTrace documentation, so people may have written
4951 * scripts that use them. Declaring the probe points here means their scripts will
4952 * compile and execute which we want for portability of the scripts, but since this
4953 * section of code is never reached, the probe points will simply never fire. Yes,
4954 * this is basically a hack. The problem is the DTrace probe points were chosen with
4955 * Solaris specific VM events in mind, not portability to different VM implementations.
4956 */
4957
4958 DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
4959 DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
4960 DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
4961 DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
4962 DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
4963 DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
4964 DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
4965 /*NOTREACHED*/
4966 }
4967
4968
4969
4970 kern_return_t
4971 vm_pageout_internal_start(void)
4972 {
4973 kern_return_t result;
4974 int i;
4975 host_basic_info_data_t hinfo;
4976
4977 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4978
4979 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
4980 #define BSD_HOST 1
4981 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
4982
4983 assert(hinfo.max_cpus > 0);
4984
4985 lck_grp_init(&vm_pageout_lck_grp, "vm_pageout", LCK_GRP_ATTR_NULL);
4986
4987 #if CONFIG_EMBEDDED
4988 vm_pageout_state.vm_compressor_thread_count = 1;
4989 #else
4990 if (hinfo.max_cpus > 4) {
4991 vm_pageout_state.vm_compressor_thread_count = 2;
4992 } else {
4993 vm_pageout_state.vm_compressor_thread_count = 1;
4994 }
4995 #endif
4996 PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state.vm_compressor_thread_count,
4997 sizeof(vm_pageout_state.vm_compressor_thread_count));
4998
4999 if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) {
5000 vm_pageout_state.vm_compressor_thread_count = hinfo.max_cpus - 1;
5001 }
5002 if (vm_pageout_state.vm_compressor_thread_count <= 0) {
5003 vm_pageout_state.vm_compressor_thread_count = 1;
5004 } else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) {
5005 vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
5006 }
5007
5008 vm_pageout_queue_internal.pgo_maxlaundry = (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
5009
5010 PE_parse_boot_argn("vmpgoi_maxlaundry", &vm_pageout_queue_internal.pgo_maxlaundry, sizeof(vm_pageout_queue_internal.pgo_maxlaundry));
5011
5012 for (i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
5013 ciq[i].id = i;
5014 ciq[i].q = &vm_pageout_queue_internal;
5015 ciq[i].current_chead = NULL;
5016 ciq[i].scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE);
5017
5018 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i],
5019 BASEPRI_VM, &vm_pageout_state.vm_pageout_internal_iothread);
5020
5021 if (result == KERN_SUCCESS) {
5022 thread_deallocate(vm_pageout_state.vm_pageout_internal_iothread);
5023 } else {
5024 break;
5025 }
5026 }
5027 return result;
5028 }
5029
5030 #if CONFIG_IOSCHED
5031 /*
5032 * To support I/O Expedite for compressed files we mark the upls with special flags.
5033 * The way decmpfs works is that we create a big upl which marks all the pages needed to
5034 * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
5035 * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
5036 * being held in the big original UPL. We mark each of these smaller UPLs with the flag
5037 * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
5038 * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
5039 * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
5040 * unless the real I/O upl is being destroyed).
5041 */
5042
5043
5044 static void
5045 upl_set_decmp_info(upl_t upl, upl_t src_upl)
5046 {
5047 assert((src_upl->flags & UPL_DECMP_REQ) != 0);
5048
5049 upl_lock(src_upl);
5050 if (src_upl->decmp_io_upl) {
5051 /*
5052 * If there is already an alive real I/O UPL, ignore this new UPL.
5053 * This case should rarely happen and even if it does, it just means
5054 * that we might issue a spurious expedite which the driver is expected
5055 * to handle.
5056 */
5057 upl_unlock(src_upl);
5058 return;
5059 }
5060 src_upl->decmp_io_upl = (void *)upl;
5061 src_upl->ref_count++;
5062
5063 upl->flags |= UPL_DECMP_REAL_IO;
5064 upl->decmp_io_upl = (void *)src_upl;
5065 upl_unlock(src_upl);
5066 }
5067 #endif /* CONFIG_IOSCHED */
5068
5069 #if UPL_DEBUG
5070 int upl_debug_enabled = 1;
5071 #else
5072 int upl_debug_enabled = 0;
5073 #endif
5074
5075 static upl_t
5076 upl_create(int type, int flags, upl_size_t size)
5077 {
5078 upl_t upl;
5079 vm_size_t page_field_size = 0;
5080 int upl_flags = 0;
5081 vm_size_t upl_size = sizeof(struct upl);
5082
5083 size = round_page_32(size);
5084
5085 if (type & UPL_CREATE_LITE) {
5086 page_field_size = (atop(size) + 7) >> 3;
5087 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
5088
5089 upl_flags |= UPL_LITE;
5090 }
5091 if (type & UPL_CREATE_INTERNAL) {
5092 upl_size += sizeof(struct upl_page_info) * atop(size);
5093
5094 upl_flags |= UPL_INTERNAL;
5095 }
5096 upl = (upl_t)kalloc(upl_size + page_field_size);
5097
5098 if (page_field_size) {
5099 bzero((char *)upl + upl_size, page_field_size);
5100 }
5101
5102 upl->flags = upl_flags | flags;
5103 upl->kaddr = (vm_offset_t)0;
5104 upl->size = 0;
5105 upl->map_object = NULL;
5106 upl->ref_count = 1;
5107 upl->ext_ref_count = 0;
5108 upl->highest_page = 0;
5109 upl_lock_init(upl);
5110 upl->vector_upl = NULL;
5111 upl->associated_upl = NULL;
5112 upl->upl_iodone = NULL;
5113 #if CONFIG_IOSCHED
5114 if (type & UPL_CREATE_IO_TRACKING) {
5115 upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
5116 }
5117
5118 upl->upl_reprio_info = 0;
5119 upl->decmp_io_upl = 0;
5120 if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
5121 /* Only support expedite on internal UPLs */
5122 thread_t curthread = current_thread();
5123 upl->upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * atop(size));
5124 bzero(upl->upl_reprio_info, (sizeof(uint64_t) * atop(size)));
5125 upl->flags |= UPL_EXPEDITE_SUPPORTED;
5126 if (curthread->decmp_upl != NULL) {
5127 upl_set_decmp_info(upl, curthread->decmp_upl);
5128 }
5129 }
5130 #endif
5131 #if CONFIG_IOSCHED || UPL_DEBUG
5132 if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
5133 upl->upl_creator = current_thread();
5134 upl->uplq.next = 0;
5135 upl->uplq.prev = 0;
5136 upl->flags |= UPL_TRACKED_BY_OBJECT;
5137 }
5138 #endif
5139
5140 #if UPL_DEBUG
5141 upl->ubc_alias1 = 0;
5142 upl->ubc_alias2 = 0;
5143
5144 upl->upl_state = 0;
5145 upl->upl_commit_index = 0;
5146 bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
5147
5148 (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
5149 #endif /* UPL_DEBUG */
5150
5151 return upl;
5152 }
5153
5154 static void
5155 upl_destroy(upl_t upl)
5156 {
5157 int page_field_size; /* bit field in word size buf */
5158 int size;
5159
5160 if (upl->ext_ref_count) {
5161 panic("upl(%p) ext_ref_count", upl);
5162 }
5163
5164 #if CONFIG_IOSCHED
5165 if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
5166 upl_t src_upl;
5167 src_upl = upl->decmp_io_upl;
5168 assert((src_upl->flags & UPL_DECMP_REQ) != 0);
5169 upl_lock(src_upl);
5170 src_upl->decmp_io_upl = NULL;
5171 upl_unlock(src_upl);
5172 upl_deallocate(src_upl);
5173 }
5174 #endif /* CONFIG_IOSCHED */
5175
5176 #if CONFIG_IOSCHED || UPL_DEBUG
5177 if ((upl->flags & UPL_TRACKED_BY_OBJECT) && !(upl->flags & UPL_VECTOR)) {
5178 vm_object_t object;
5179
5180 if (upl->flags & UPL_SHADOWED) {
5181 object = upl->map_object->shadow;
5182 } else {
5183 object = upl->map_object;
5184 }
5185
5186 vm_object_lock(object);
5187 queue_remove(&object->uplq, upl, upl_t, uplq);
5188 vm_object_activity_end(object);
5189 vm_object_collapse(object, 0, TRUE);
5190 vm_object_unlock(object);
5191 }
5192 #endif
5193 /*
5194 * drop a reference on the map_object whether or
5195 * not a pageout object is inserted
5196 */
5197 if (upl->flags & UPL_SHADOWED) {
5198 vm_object_deallocate(upl->map_object);
5199 }
5200
5201 if (upl->flags & UPL_DEVICE_MEMORY) {
5202 size = PAGE_SIZE;
5203 } else {
5204 size = upl->size;
5205 }
5206 page_field_size = 0;
5207
5208 if (upl->flags & UPL_LITE) {
5209 page_field_size = ((size / PAGE_SIZE) + 7) >> 3;
5210 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
5211 }
5212 upl_lock_destroy(upl);
5213 upl->vector_upl = (vector_upl_t) 0xfeedbeef;
5214
5215 #if CONFIG_IOSCHED
5216 if (upl->flags & UPL_EXPEDITE_SUPPORTED) {
5217 kfree(upl->upl_reprio_info, sizeof(uint64_t) * (size / PAGE_SIZE));
5218 }
5219 #endif
5220
5221 if (upl->flags & UPL_INTERNAL) {
5222 kfree(upl,
5223 sizeof(struct upl) +
5224 (sizeof(struct upl_page_info) * (size / PAGE_SIZE))
5225 + page_field_size);
5226 } else {
5227 kfree(upl, sizeof(struct upl) + page_field_size);
5228 }
5229 }
5230
5231 void
5232 upl_deallocate(upl_t upl)
5233 {
5234 upl_lock(upl);
5235
5236 if (--upl->ref_count == 0) {
5237 if (vector_upl_is_valid(upl)) {
5238 vector_upl_deallocate(upl);
5239 }
5240 upl_unlock(upl);
5241
5242 if (upl->upl_iodone) {
5243 upl_callout_iodone(upl);
5244 }
5245
5246 upl_destroy(upl);
5247 } else {
5248 upl_unlock(upl);
5249 }
5250 }
5251
5252 #if CONFIG_IOSCHED
5253 void
5254 upl_mark_decmp(upl_t upl)
5255 {
5256 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
5257 upl->flags |= UPL_DECMP_REQ;
5258 upl->upl_creator->decmp_upl = (void *)upl;
5259 }
5260 }
5261
5262 void
5263 upl_unmark_decmp(upl_t upl)
5264 {
5265 if (upl && (upl->flags & UPL_DECMP_REQ)) {
5266 upl->upl_creator->decmp_upl = NULL;
5267 }
5268 }
5269
5270 #endif /* CONFIG_IOSCHED */
5271
5272 #define VM_PAGE_Q_BACKING_UP(q) \
5273 ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
5274
5275 boolean_t must_throttle_writes(void);
5276
5277 boolean_t
5278 must_throttle_writes()
5279 {
5280 if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) &&
5281 vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10) {
5282 return TRUE;
5283 }
5284
5285 return FALSE;
5286 }
5287
5288
5289 /*
5290 * Routine: vm_object_upl_request
5291 * Purpose:
5292 * Cause the population of a portion of a vm_object.
5293 * Depending on the nature of the request, the pages
5294 * returned may be contain valid data or be uninitialized.
5295 * A page list structure, listing the physical pages
5296 * will be returned upon request.
5297 * This function is called by the file system or any other
5298 * supplier of backing store to a pager.
5299 * IMPORTANT NOTE: The caller must still respect the relationship
5300 * between the vm_object and its backing memory object. The
5301 * caller MUST NOT substitute changes in the backing file
5302 * without first doing a memory_object_lock_request on the
5303 * target range unless it is know that the pages are not
5304 * shared with another entity at the pager level.
5305 * Copy_in_to:
5306 * if a page list structure is present
5307 * return the mapped physical pages, where a
5308 * page is not present, return a non-initialized
5309 * one. If the no_sync bit is turned on, don't
5310 * call the pager unlock to synchronize with other
5311 * possible copies of the page. Leave pages busy
5312 * in the original object, if a page list structure
5313 * was specified. When a commit of the page list
5314 * pages is done, the dirty bit will be set for each one.
5315 * Copy_out_from:
5316 * If a page list structure is present, return
5317 * all mapped pages. Where a page does not exist
5318 * map a zero filled one. Leave pages busy in
5319 * the original object. If a page list structure
5320 * is not specified, this call is a no-op.
5321 *
5322 * Note: access of default pager objects has a rather interesting
5323 * twist. The caller of this routine, presumably the file system
5324 * page cache handling code, will never actually make a request
5325 * against a default pager backed object. Only the default
5326 * pager will make requests on backing store related vm_objects
5327 * In this way the default pager can maintain the relationship
5328 * between backing store files (abstract memory objects) and
5329 * the vm_objects (cache objects), they support.
5330 *
5331 */
5332
5333 __private_extern__ kern_return_t
5334 vm_object_upl_request(
5335 vm_object_t object,
5336 vm_object_offset_t offset,
5337 upl_size_t size,
5338 upl_t *upl_ptr,
5339 upl_page_info_array_t user_page_list,
5340 unsigned int *page_list_count,
5341 upl_control_flags_t cntrl_flags,
5342 vm_tag_t tag)
5343 {
5344 vm_page_t dst_page = VM_PAGE_NULL;
5345 vm_object_offset_t dst_offset;
5346 upl_size_t xfer_size;
5347 unsigned int size_in_pages;
5348 boolean_t dirty;
5349 boolean_t hw_dirty;
5350 upl_t upl = NULL;
5351 unsigned int entry;
5352 vm_page_t alias_page = NULL;
5353 int refmod_state = 0;
5354 wpl_array_t lite_list = NULL;
5355 vm_object_t last_copy_object;
5356 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
5357 struct vm_page_delayed_work *dwp;
5358 int dw_count;
5359 int dw_limit;
5360 int io_tracking_flag = 0;
5361 int grab_options;
5362 int page_grab_count = 0;
5363 ppnum_t phys_page;
5364 pmap_flush_context pmap_flush_context_storage;
5365 boolean_t pmap_flushes_delayed = FALSE;
5366 #if DEVELOPMENT || DEBUG
5367 task_t task = current_task();
5368 #endif /* DEVELOPMENT || DEBUG */
5369
5370 if (cntrl_flags & ~UPL_VALID_FLAGS) {
5371 /*
5372 * For forward compatibility's sake,
5373 * reject any unknown flag.
5374 */
5375 return KERN_INVALID_VALUE;
5376 }
5377 if ((!object->internal) && (object->paging_offset != 0)) {
5378 panic("vm_object_upl_request: external object with non-zero paging offset\n");
5379 }
5380 if (object->phys_contiguous) {
5381 panic("vm_object_upl_request: contiguous object specified\n");
5382 }
5383
5384 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, 0, 0);
5385
5386 if (size > MAX_UPL_SIZE_BYTES) {
5387 size = MAX_UPL_SIZE_BYTES;
5388 }
5389
5390 if ((cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL) {
5391 *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
5392 }
5393
5394 #if CONFIG_IOSCHED || UPL_DEBUG
5395 if (object->io_tracking || upl_debug_enabled) {
5396 io_tracking_flag |= UPL_CREATE_IO_TRACKING;
5397 }
5398 #endif
5399 #if CONFIG_IOSCHED
5400 if (object->io_tracking) {
5401 io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
5402 }
5403 #endif
5404
5405 if (cntrl_flags & UPL_SET_INTERNAL) {
5406 if (cntrl_flags & UPL_SET_LITE) {
5407 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
5408
5409 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
5410 lite_list = (wpl_array_t)
5411 (((uintptr_t)user_page_list) +
5412 ((size / PAGE_SIZE) * sizeof(upl_page_info_t)));
5413 if (size == 0) {
5414 user_page_list = NULL;
5415 lite_list = NULL;
5416 }
5417 } else {
5418 upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size);
5419
5420 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
5421 if (size == 0) {
5422 user_page_list = NULL;
5423 }
5424 }
5425 } else {
5426 if (cntrl_flags & UPL_SET_LITE) {
5427 upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
5428
5429 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
5430 if (size == 0) {
5431 lite_list = NULL;
5432 }
5433 } else {
5434 upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size);
5435 }
5436 }
5437 *upl_ptr = upl;
5438
5439 if (user_page_list) {
5440 user_page_list[0].device = FALSE;
5441 }
5442
5443 if (cntrl_flags & UPL_SET_LITE) {
5444 upl->map_object = object;
5445 } else {
5446 upl->map_object = vm_object_allocate(size);
5447 /*
5448 * No neeed to lock the new object: nobody else knows
5449 * about it yet, so it's all ours so far.
5450 */
5451 upl->map_object->shadow = object;
5452 upl->map_object->pageout = TRUE;
5453 upl->map_object->can_persist = FALSE;
5454 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
5455 upl->map_object->vo_shadow_offset = offset;
5456 upl->map_object->wimg_bits = object->wimg_bits;
5457
5458 VM_PAGE_GRAB_FICTITIOUS(alias_page);
5459
5460 upl->flags |= UPL_SHADOWED;
5461 }
5462 if (cntrl_flags & UPL_FOR_PAGEOUT) {
5463 upl->flags |= UPL_PAGEOUT;
5464 }
5465
5466 vm_object_lock(object);
5467 vm_object_activity_begin(object);
5468
5469 grab_options = 0;
5470 #if CONFIG_SECLUDED_MEMORY
5471 if (object->can_grab_secluded) {
5472 grab_options |= VM_PAGE_GRAB_SECLUDED;
5473 }
5474 #endif /* CONFIG_SECLUDED_MEMORY */
5475
5476 /*
5477 * we can lock in the paging_offset once paging_in_progress is set
5478 */
5479 upl->size = size;
5480 upl->offset = offset + object->paging_offset;
5481
5482 #if CONFIG_IOSCHED || UPL_DEBUG
5483 if (object->io_tracking || upl_debug_enabled) {
5484 vm_object_activity_begin(object);
5485 queue_enter(&object->uplq, upl, upl_t, uplq);
5486 }
5487 #endif
5488 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
5489 /*
5490 * Honor copy-on-write obligations
5491 *
5492 * The caller is gathering these pages and
5493 * might modify their contents. We need to
5494 * make sure that the copy object has its own
5495 * private copies of these pages before we let
5496 * the caller modify them.
5497 */
5498 vm_object_update(object,
5499 offset,
5500 size,
5501 NULL,
5502 NULL,
5503 FALSE, /* should_return */
5504 MEMORY_OBJECT_COPY_SYNC,
5505 VM_PROT_NO_CHANGE);
5506
5507 VM_PAGEOUT_DEBUG(upl_cow, 1);
5508 VM_PAGEOUT_DEBUG(upl_cow_pages, (size >> PAGE_SHIFT));
5509 }
5510 /*
5511 * remember which copy object we synchronized with
5512 */
5513 last_copy_object = object->copy;
5514 entry = 0;
5515
5516 xfer_size = size;
5517 dst_offset = offset;
5518 size_in_pages = size / PAGE_SIZE;
5519
5520 dwp = &dw_array[0];
5521 dw_count = 0;
5522 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
5523
5524 if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
5525 object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) {
5526 object->scan_collisions = 0;
5527 }
5528
5529 if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) {
5530 boolean_t isSSD = FALSE;
5531
5532 #if CONFIG_EMBEDDED
5533 isSSD = TRUE;
5534 #else
5535 vnode_pager_get_isSSD(object->pager, &isSSD);
5536 #endif
5537 vm_object_unlock(object);
5538
5539 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
5540
5541 if (isSSD == TRUE) {
5542 delay(1000 * size_in_pages);
5543 } else {
5544 delay(5000 * size_in_pages);
5545 }
5546 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
5547
5548 vm_object_lock(object);
5549 }
5550
5551 while (xfer_size) {
5552 dwp->dw_mask = 0;
5553
5554 if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
5555 vm_object_unlock(object);
5556 VM_PAGE_GRAB_FICTITIOUS(alias_page);
5557 vm_object_lock(object);
5558 }
5559 if (cntrl_flags & UPL_COPYOUT_FROM) {
5560 upl->flags |= UPL_PAGE_SYNC_DONE;
5561
5562 if (((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
5563 dst_page->vmp_fictitious ||
5564 dst_page->vmp_absent ||
5565 dst_page->vmp_error ||
5566 dst_page->vmp_cleaning ||
5567 (VM_PAGE_WIRED(dst_page))) {
5568 if (user_page_list) {
5569 user_page_list[entry].phys_addr = 0;
5570 }
5571
5572 goto try_next_page;
5573 }
5574 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
5575
5576 /*
5577 * grab this up front...
5578 * a high percentange of the time we're going to
5579 * need the hardware modification state a bit later
5580 * anyway... so we can eliminate an extra call into
5581 * the pmap layer by grabbing it here and recording it
5582 */
5583 if (dst_page->vmp_pmapped) {
5584 refmod_state = pmap_get_refmod(phys_page);
5585 } else {
5586 refmod_state = 0;
5587 }
5588
5589 if ((refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) {
5590 /*
5591 * page is on inactive list and referenced...
5592 * reactivate it now... this gets it out of the
5593 * way of vm_pageout_scan which would have to
5594 * reactivate it upon tripping over it
5595 */
5596 dwp->dw_mask |= DW_vm_page_activate;
5597 }
5598 if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
5599 /*
5600 * we're only asking for DIRTY pages to be returned
5601 */
5602 if (dst_page->vmp_laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
5603 /*
5604 * if we were the page stolen by vm_pageout_scan to be
5605 * cleaned (as opposed to a buddy being clustered in
5606 * or this request is not being driven by a PAGEOUT cluster
5607 * then we only need to check for the page being dirty or
5608 * precious to decide whether to return it
5609 */
5610 if (dst_page->vmp_dirty || dst_page->vmp_precious || (refmod_state & VM_MEM_MODIFIED)) {
5611 goto check_busy;
5612 }
5613 goto dont_return;
5614 }
5615 /*
5616 * this is a request for a PAGEOUT cluster and this page
5617 * is merely along for the ride as a 'buddy'... not only
5618 * does it have to be dirty to be returned, but it also
5619 * can't have been referenced recently...
5620 */
5621 if ((hibernate_cleaning_in_progress == TRUE ||
5622 (!((refmod_state & VM_MEM_REFERENCED) || dst_page->vmp_reference) ||
5623 (dst_page->vmp_q_state == VM_PAGE_ON_THROTTLED_Q))) &&
5624 ((refmod_state & VM_MEM_MODIFIED) || dst_page->vmp_dirty || dst_page->vmp_precious)) {
5625 goto check_busy;
5626 }
5627 dont_return:
5628 /*
5629 * if we reach here, we're not to return
5630 * the page... go on to the next one
5631 */
5632 if (dst_page->vmp_laundry == TRUE) {
5633 /*
5634 * if we get here, the page is not 'cleaning' (filtered out above).
5635 * since it has been referenced, remove it from the laundry
5636 * so we don't pay the cost of an I/O to clean a page
5637 * we're just going to take back
5638 */
5639 vm_page_lockspin_queues();
5640
5641 vm_pageout_steal_laundry(dst_page, TRUE);
5642 vm_page_activate(dst_page);
5643
5644 vm_page_unlock_queues();
5645 }
5646 if (user_page_list) {
5647 user_page_list[entry].phys_addr = 0;
5648 }
5649
5650 goto try_next_page;
5651 }
5652 check_busy:
5653 if (dst_page->vmp_busy) {
5654 if (cntrl_flags & UPL_NOBLOCK) {
5655 if (user_page_list) {
5656 user_page_list[entry].phys_addr = 0;
5657 }
5658 dwp->dw_mask = 0;
5659
5660 goto try_next_page;
5661 }
5662 /*
5663 * someone else is playing with the
5664 * page. We will have to wait.
5665 */
5666 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
5667
5668 continue;
5669 }
5670 if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
5671 vm_page_lockspin_queues();
5672
5673 if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
5674 /*
5675 * we've buddied up a page for a clustered pageout
5676 * that has already been moved to the pageout
5677 * queue by pageout_scan... we need to remove
5678 * it from the queue and drop the laundry count
5679 * on that queue
5680 */
5681 vm_pageout_throttle_up(dst_page);
5682 }
5683 vm_page_unlock_queues();
5684 }
5685 hw_dirty = refmod_state & VM_MEM_MODIFIED;
5686 dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
5687
5688 if (phys_page > upl->highest_page) {
5689 upl->highest_page = phys_page;
5690 }
5691
5692 assert(!pmap_is_noencrypt(phys_page));
5693
5694 if (cntrl_flags & UPL_SET_LITE) {
5695 unsigned int pg_num;
5696
5697 pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
5698 assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
5699 lite_list[pg_num >> 5] |= 1U << (pg_num & 31);
5700
5701 if (hw_dirty) {
5702 if (pmap_flushes_delayed == FALSE) {
5703 pmap_flush_context_init(&pmap_flush_context_storage);
5704 pmap_flushes_delayed = TRUE;
5705 }
5706 pmap_clear_refmod_options(phys_page,
5707 VM_MEM_MODIFIED,
5708 PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_CLEAR_WRITE,
5709 &pmap_flush_context_storage);
5710 }
5711
5712 /*
5713 * Mark original page as cleaning
5714 * in place.
5715 */
5716 dst_page->vmp_cleaning = TRUE;
5717 dst_page->vmp_precious = FALSE;
5718 } else {
5719 /*
5720 * use pageclean setup, it is more
5721 * convenient even for the pageout
5722 * cases here
5723 */
5724 vm_object_lock(upl->map_object);
5725 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
5726 vm_object_unlock(upl->map_object);
5727
5728 alias_page->vmp_absent = FALSE;
5729 alias_page = NULL;
5730 }
5731 if (dirty) {
5732 SET_PAGE_DIRTY(dst_page, FALSE);
5733 } else {
5734 dst_page->vmp_dirty = FALSE;
5735 }
5736
5737 if (!dirty) {
5738 dst_page->vmp_precious = TRUE;
5739 }
5740
5741 if (!(cntrl_flags & UPL_CLEAN_IN_PLACE)) {
5742 if (!VM_PAGE_WIRED(dst_page)) {
5743 dst_page->vmp_free_when_done = TRUE;
5744 }
5745 }
5746 } else {
5747 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
5748 /*
5749 * Honor copy-on-write obligations
5750 *
5751 * The copy object has changed since we
5752 * last synchronized for copy-on-write.
5753 * Another copy object might have been
5754 * inserted while we released the object's
5755 * lock. Since someone could have seen the
5756 * original contents of the remaining pages
5757 * through that new object, we have to
5758 * synchronize with it again for the remaining
5759 * pages only. The previous pages are "busy"
5760 * so they can not be seen through the new
5761 * mapping. The new mapping will see our
5762 * upcoming changes for those previous pages,
5763 * but that's OK since they couldn't see what
5764 * was there before. It's just a race anyway
5765 * and there's no guarantee of consistency or
5766 * atomicity. We just don't want new mappings
5767 * to see both the *before* and *after* pages.
5768 */
5769 if (object->copy != VM_OBJECT_NULL) {
5770 vm_object_update(
5771 object,
5772 dst_offset,/* current offset */
5773 xfer_size, /* remaining size */
5774 NULL,
5775 NULL,
5776 FALSE, /* should_return */
5777 MEMORY_OBJECT_COPY_SYNC,
5778 VM_PROT_NO_CHANGE);
5779
5780 VM_PAGEOUT_DEBUG(upl_cow_again, 1);
5781 VM_PAGEOUT_DEBUG(upl_cow_again_pages, (xfer_size >> PAGE_SHIFT));
5782 }
5783 /*
5784 * remember the copy object we synced with
5785 */
5786 last_copy_object = object->copy;
5787 }
5788 dst_page = vm_page_lookup(object, dst_offset);
5789
5790 if (dst_page != VM_PAGE_NULL) {
5791 if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
5792 /*
5793 * skip over pages already present in the cache
5794 */
5795 if (user_page_list) {
5796 user_page_list[entry].phys_addr = 0;
5797 }
5798
5799 goto try_next_page;
5800 }
5801 if (dst_page->vmp_fictitious) {
5802 panic("need corner case for fictitious page");
5803 }
5804
5805 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
5806 /*
5807 * someone else is playing with the
5808 * page. We will have to wait.
5809 */
5810 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
5811
5812 continue;
5813 }
5814 if (dst_page->vmp_laundry) {
5815 vm_pageout_steal_laundry(dst_page, FALSE);
5816 }
5817 } else {
5818 if (object->private) {
5819 /*
5820 * This is a nasty wrinkle for users
5821 * of upl who encounter device or
5822 * private memory however, it is
5823 * unavoidable, only a fault can
5824 * resolve the actual backing
5825 * physical page by asking the
5826 * backing device.
5827 */
5828 if (user_page_list) {
5829 user_page_list[entry].phys_addr = 0;
5830 }
5831
5832 goto try_next_page;
5833 }
5834 if (object->scan_collisions) {
5835 /*
5836 * the pageout_scan thread is trying to steal
5837 * pages from this object, but has run into our
5838 * lock... grab 2 pages from the head of the object...
5839 * the first is freed on behalf of pageout_scan, the
5840 * 2nd is for our own use... we use vm_object_page_grab
5841 * in both cases to avoid taking pages from the free
5842 * list since we are under memory pressure and our
5843 * lock on this object is getting in the way of
5844 * relieving it
5845 */
5846 dst_page = vm_object_page_grab(object);
5847
5848 if (dst_page != VM_PAGE_NULL) {
5849 vm_page_release(dst_page,
5850 FALSE);
5851 }
5852
5853 dst_page = vm_object_page_grab(object);
5854 }
5855 if (dst_page == VM_PAGE_NULL) {
5856 /*
5857 * need to allocate a page
5858 */
5859 dst_page = vm_page_grab_options(grab_options);
5860 if (dst_page != VM_PAGE_NULL) {
5861 page_grab_count++;
5862 }
5863 }
5864 if (dst_page == VM_PAGE_NULL) {
5865 if ((cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
5866 /*
5867 * we don't want to stall waiting for pages to come onto the free list
5868 * while we're already holding absent pages in this UPL
5869 * the caller will deal with the empty slots
5870 */
5871 if (user_page_list) {
5872 user_page_list[entry].phys_addr = 0;
5873 }
5874
5875 goto try_next_page;
5876 }
5877 /*
5878 * no pages available... wait
5879 * then try again for the same
5880 * offset...
5881 */
5882 vm_object_unlock(object);
5883
5884 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
5885
5886 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
5887
5888 VM_PAGE_WAIT();
5889 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
5890
5891 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
5892
5893 vm_object_lock(object);
5894
5895 continue;
5896 }
5897 vm_page_insert(dst_page, object, dst_offset);
5898
5899 dst_page->vmp_absent = TRUE;
5900 dst_page->vmp_busy = FALSE;
5901
5902 if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
5903 /*
5904 * if UPL_RET_ONLY_ABSENT was specified,
5905 * than we're definitely setting up a
5906 * upl for a clustered read/pagein
5907 * operation... mark the pages as clustered
5908 * so upl_commit_range can put them on the
5909 * speculative list
5910 */
5911 dst_page->vmp_clustered = TRUE;
5912
5913 if (!(cntrl_flags & UPL_FILE_IO)) {
5914 VM_STAT_INCR(pageins);
5915 }
5916 }
5917 }
5918 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
5919
5920 dst_page->vmp_overwriting = TRUE;
5921
5922 if (dst_page->vmp_pmapped) {
5923 if (!(cntrl_flags & UPL_FILE_IO)) {
5924 /*
5925 * eliminate all mappings from the
5926 * original object and its prodigy
5927 */
5928 refmod_state = pmap_disconnect(phys_page);
5929 } else {
5930 refmod_state = pmap_get_refmod(phys_page);
5931 }
5932 } else {
5933 refmod_state = 0;
5934 }
5935
5936 hw_dirty = refmod_state & VM_MEM_MODIFIED;
5937 dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
5938
5939 if (cntrl_flags & UPL_SET_LITE) {
5940 unsigned int pg_num;
5941
5942 pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
5943 assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
5944 lite_list[pg_num >> 5] |= 1U << (pg_num & 31);
5945
5946 if (hw_dirty) {
5947 pmap_clear_modify(phys_page);
5948 }
5949
5950 /*
5951 * Mark original page as cleaning
5952 * in place.
5953 */
5954 dst_page->vmp_cleaning = TRUE;
5955 dst_page->vmp_precious = FALSE;
5956 } else {
5957 /*
5958 * use pageclean setup, it is more
5959 * convenient even for the pageout
5960 * cases here
5961 */
5962 vm_object_lock(upl->map_object);
5963 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
5964 vm_object_unlock(upl->map_object);
5965
5966 alias_page->vmp_absent = FALSE;
5967 alias_page = NULL;
5968 }
5969
5970 if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
5971 upl->flags &= ~UPL_CLEAR_DIRTY;
5972 upl->flags |= UPL_SET_DIRTY;
5973 dirty = TRUE;
5974 /*
5975 * Page belonging to a code-signed object is about to
5976 * be written. Mark it tainted and disconnect it from
5977 * all pmaps so processes have to fault it back in and
5978 * deal with the tainted bit.
5979 */
5980 if (object->code_signed && dst_page->vmp_cs_tainted == FALSE) {
5981 dst_page->vmp_cs_tainted = TRUE;
5982 vm_page_upl_tainted++;
5983 if (dst_page->vmp_pmapped) {
5984 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
5985 if (refmod_state & VM_MEM_REFERENCED) {
5986 dst_page->vmp_reference = TRUE;
5987 }
5988 }
5989 }
5990 } else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
5991 /*
5992 * clean in place for read implies
5993 * that a write will be done on all
5994 * the pages that are dirty before
5995 * a upl commit is done. The caller
5996 * is obligated to preserve the
5997 * contents of all pages marked dirty
5998 */
5999 upl->flags |= UPL_CLEAR_DIRTY;
6000 }
6001 dst_page->vmp_dirty = dirty;
6002
6003 if (!dirty) {
6004 dst_page->vmp_precious = TRUE;
6005 }
6006
6007 if (!VM_PAGE_WIRED(dst_page)) {
6008 /*
6009 * deny access to the target page while
6010 * it is being worked on
6011 */
6012 dst_page->vmp_busy = TRUE;
6013 } else {
6014 dwp->dw_mask |= DW_vm_page_wire;
6015 }
6016
6017 /*
6018 * We might be about to satisfy a fault which has been
6019 * requested. So no need for the "restart" bit.
6020 */
6021 dst_page->vmp_restart = FALSE;
6022 if (!dst_page->vmp_absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
6023 /*
6024 * expect the page to be used
6025 */
6026 dwp->dw_mask |= DW_set_reference;
6027 }
6028 if (cntrl_flags & UPL_PRECIOUS) {
6029 if (object->internal) {
6030 SET_PAGE_DIRTY(dst_page, FALSE);
6031 dst_page->vmp_precious = FALSE;
6032 } else {
6033 dst_page->vmp_precious = TRUE;
6034 }
6035 } else {
6036 dst_page->vmp_precious = FALSE;
6037 }
6038 }
6039 if (dst_page->vmp_busy) {
6040 upl->flags |= UPL_HAS_BUSY;
6041 }
6042
6043 if (phys_page > upl->highest_page) {
6044 upl->highest_page = phys_page;
6045 }
6046 assert(!pmap_is_noencrypt(phys_page));
6047 if (user_page_list) {
6048 user_page_list[entry].phys_addr = phys_page;
6049 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
6050 user_page_list[entry].absent = dst_page->vmp_absent;
6051 user_page_list[entry].dirty = dst_page->vmp_dirty;
6052 user_page_list[entry].precious = dst_page->vmp_precious;
6053 user_page_list[entry].device = FALSE;
6054 user_page_list[entry].needed = FALSE;
6055 if (dst_page->vmp_clustered == TRUE) {
6056 user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
6057 } else {
6058 user_page_list[entry].speculative = FALSE;
6059 }
6060 user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
6061 user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
6062 user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
6063 user_page_list[entry].mark = FALSE;
6064 }
6065 /*
6066 * if UPL_RET_ONLY_ABSENT is set, then
6067 * we are working with a fresh page and we've
6068 * just set the clustered flag on it to
6069 * indicate that it was drug in as part of a
6070 * speculative cluster... so leave it alone
6071 */
6072 if (!(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
6073 /*
6074 * someone is explicitly grabbing this page...
6075 * update clustered and speculative state
6076 *
6077 */
6078 if (dst_page->vmp_clustered) {
6079 VM_PAGE_CONSUME_CLUSTERED(dst_page);
6080 }
6081 }
6082 try_next_page:
6083 if (dwp->dw_mask) {
6084 if (dwp->dw_mask & DW_vm_page_activate) {
6085 VM_STAT_INCR(reactivations);
6086 }
6087
6088 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
6089
6090 if (dw_count >= dw_limit) {
6091 vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
6092
6093 dwp = &dw_array[0];
6094 dw_count = 0;
6095 }
6096 }
6097 entry++;
6098 dst_offset += PAGE_SIZE_64;
6099 xfer_size -= PAGE_SIZE;
6100 }
6101 if (dw_count) {
6102 vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
6103 }
6104
6105 if (alias_page != NULL) {
6106 VM_PAGE_FREE(alias_page);
6107 }
6108 if (pmap_flushes_delayed == TRUE) {
6109 pmap_flush(&pmap_flush_context_storage);
6110 }
6111
6112 if (page_list_count != NULL) {
6113 if (upl->flags & UPL_INTERNAL) {
6114 *page_list_count = 0;
6115 } else if (*page_list_count > entry) {
6116 *page_list_count = entry;
6117 }
6118 }
6119 #if UPL_DEBUG
6120 upl->upl_state = 1;
6121 #endif
6122 vm_object_unlock(object);
6123
6124 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
6125 #if DEVELOPMENT || DEBUG
6126 if (task != NULL) {
6127 ledger_credit(task->ledger, task_ledgers.pages_grabbed_upl, page_grab_count);
6128 }
6129 #endif /* DEVELOPMENT || DEBUG */
6130
6131 return KERN_SUCCESS;
6132 }
6133
6134 /*
6135 * Routine: vm_object_super_upl_request
6136 * Purpose:
6137 * Cause the population of a portion of a vm_object
6138 * in much the same way as memory_object_upl_request.
6139 * Depending on the nature of the request, the pages
6140 * returned may be contain valid data or be uninitialized.
6141 * However, the region may be expanded up to the super
6142 * cluster size provided.
6143 */
6144
6145 __private_extern__ kern_return_t
6146 vm_object_super_upl_request(
6147 vm_object_t object,
6148 vm_object_offset_t offset,
6149 upl_size_t size,
6150 upl_size_t super_cluster,
6151 upl_t *upl,
6152 upl_page_info_t *user_page_list,
6153 unsigned int *page_list_count,
6154 upl_control_flags_t cntrl_flags,
6155 vm_tag_t tag)
6156 {
6157 if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR) == UPL_VECTOR)) {
6158 return KERN_FAILURE;
6159 }
6160
6161 assert(object->paging_in_progress);
6162 offset = offset - object->paging_offset;
6163
6164 if (super_cluster > size) {
6165 vm_object_offset_t base_offset;
6166 upl_size_t super_size;
6167 vm_object_size_t super_size_64;
6168
6169 base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
6170 super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster << 1 : super_cluster;
6171 super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
6172 super_size = (upl_size_t) super_size_64;
6173 assert(super_size == super_size_64);
6174
6175 if (offset > (base_offset + super_size)) {
6176 panic("vm_object_super_upl_request: Missed target pageout"
6177 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
6178 offset, base_offset, super_size, super_cluster,
6179 size, object->paging_offset);
6180 }
6181 /*
6182 * apparently there is a case where the vm requests a
6183 * page to be written out who's offset is beyond the
6184 * object size
6185 */
6186 if ((offset + size) > (base_offset + super_size)) {
6187 super_size_64 = (offset + size) - base_offset;
6188 super_size = (upl_size_t) super_size_64;
6189 assert(super_size == super_size_64);
6190 }
6191
6192 offset = base_offset;
6193 size = super_size;
6194 }
6195 return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags, tag);
6196 }
6197
6198 #if CONFIG_EMBEDDED
6199 int cs_executable_create_upl = 0;
6200 extern int proc_selfpid(void);
6201 extern char *proc_name_address(void *p);
6202 #endif /* CONFIG_EMBEDDED */
6203
6204 kern_return_t
6205 vm_map_create_upl(
6206 vm_map_t map,
6207 vm_map_address_t offset,
6208 upl_size_t *upl_size,
6209 upl_t *upl,
6210 upl_page_info_array_t page_list,
6211 unsigned int *count,
6212 upl_control_flags_t *flags,
6213 vm_tag_t tag)
6214 {
6215 vm_map_entry_t entry;
6216 upl_control_flags_t caller_flags;
6217 int force_data_sync;
6218 int sync_cow_data;
6219 vm_object_t local_object;
6220 vm_map_offset_t local_offset;
6221 vm_map_offset_t local_start;
6222 kern_return_t ret;
6223
6224 assert(page_aligned(offset));
6225
6226 caller_flags = *flags;
6227
6228 if (caller_flags & ~UPL_VALID_FLAGS) {
6229 /*
6230 * For forward compatibility's sake,
6231 * reject any unknown flag.
6232 */
6233 return KERN_INVALID_VALUE;
6234 }
6235 force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
6236 sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
6237
6238 if (upl == NULL) {
6239 return KERN_INVALID_ARGUMENT;
6240 }
6241
6242 REDISCOVER_ENTRY:
6243 vm_map_lock_read(map);
6244
6245 if (!vm_map_lookup_entry(map, offset, &entry)) {
6246 vm_map_unlock_read(map);
6247 return KERN_FAILURE;
6248 }
6249
6250 if ((entry->vme_end - offset) < *upl_size) {
6251 *upl_size = (upl_size_t) (entry->vme_end - offset);
6252 assert(*upl_size == entry->vme_end - offset);
6253 }
6254
6255 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
6256 *flags = 0;
6257
6258 if (!entry->is_sub_map &&
6259 VME_OBJECT(entry) != VM_OBJECT_NULL) {
6260 if (VME_OBJECT(entry)->private) {
6261 *flags = UPL_DEV_MEMORY;
6262 }
6263
6264 if (VME_OBJECT(entry)->phys_contiguous) {
6265 *flags |= UPL_PHYS_CONTIG;
6266 }
6267 }
6268 vm_map_unlock_read(map);
6269 return KERN_SUCCESS;
6270 }
6271
6272 if (VME_OBJECT(entry) == VM_OBJECT_NULL ||
6273 !VME_OBJECT(entry)->phys_contiguous) {
6274 if (*upl_size > MAX_UPL_SIZE_BYTES) {
6275 *upl_size = MAX_UPL_SIZE_BYTES;
6276 }
6277 }
6278
6279 /*
6280 * Create an object if necessary.
6281 */
6282 if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
6283 if (vm_map_lock_read_to_write(map)) {
6284 goto REDISCOVER_ENTRY;
6285 }
6286
6287 VME_OBJECT_SET(entry,
6288 vm_object_allocate((vm_size_t)
6289 (entry->vme_end -
6290 entry->vme_start)));
6291 VME_OFFSET_SET(entry, 0);
6292 assert(entry->use_pmap);
6293
6294 vm_map_lock_write_to_read(map);
6295 }
6296
6297 if (!(caller_flags & UPL_COPYOUT_FROM) &&
6298 !entry->is_sub_map &&
6299 !(entry->protection & VM_PROT_WRITE)) {
6300 vm_map_unlock_read(map);
6301 return KERN_PROTECTION_FAILURE;
6302 }
6303
6304 #if CONFIG_EMBEDDED
6305 if (map->pmap != kernel_pmap &&
6306 (caller_flags & UPL_COPYOUT_FROM) &&
6307 (entry->protection & VM_PROT_EXECUTE) &&
6308 !(entry->protection & VM_PROT_WRITE)) {
6309 vm_offset_t kaddr;
6310 vm_size_t ksize;
6311
6312 /*
6313 * We're about to create a read-only UPL backed by
6314 * memory from an executable mapping.
6315 * Wiring the pages would result in the pages being copied
6316 * (due to the "MAP_PRIVATE" mapping) and no longer
6317 * code-signed, so no longer eligible for execution.
6318 * Instead, let's copy the data into a kernel buffer and
6319 * create the UPL from this kernel buffer.
6320 * The kernel buffer is then freed, leaving the UPL holding
6321 * the last reference on the VM object, so the memory will
6322 * be released when the UPL is committed.
6323 */
6324
6325 vm_map_unlock_read(map);
6326 /* allocate kernel buffer */
6327 ksize = round_page(*upl_size);
6328 kaddr = 0;
6329 ret = kmem_alloc_pageable(kernel_map,
6330 &kaddr,
6331 ksize,
6332 tag);
6333 if (ret == KERN_SUCCESS) {
6334 /* copyin the user data */
6335 assert(page_aligned(offset));
6336 ret = copyinmap(map, offset, (void *)kaddr, *upl_size);
6337 }
6338 if (ret == KERN_SUCCESS) {
6339 if (ksize > *upl_size) {
6340 /* zero out the extra space in kernel buffer */
6341 memset((void *)(kaddr + *upl_size),
6342 0,
6343 ksize - *upl_size);
6344 }
6345 /* create the UPL from the kernel buffer */
6346 ret = vm_map_create_upl(kernel_map, kaddr, upl_size,
6347 upl, page_list, count, flags, tag);
6348 }
6349 if (kaddr != 0) {
6350 /* free the kernel buffer */
6351 kmem_free(kernel_map, kaddr, ksize);
6352 kaddr = 0;
6353 ksize = 0;
6354 }
6355 #if DEVELOPMENT || DEBUG
6356 DTRACE_VM4(create_upl_from_executable,
6357 vm_map_t, map,
6358 vm_map_address_t, offset,
6359 upl_size_t, *upl_size,
6360 kern_return_t, ret);
6361 #endif /* DEVELOPMENT || DEBUG */
6362 return ret;
6363 }
6364 #endif /* CONFIG_EMBEDDED */
6365
6366 local_object = VME_OBJECT(entry);
6367 assert(local_object != VM_OBJECT_NULL);
6368
6369 if (!entry->is_sub_map &&
6370 !entry->needs_copy &&
6371 *upl_size != 0 &&
6372 local_object->vo_size > *upl_size && /* partial UPL */
6373 entry->wired_count == 0 && /* No COW for entries that are wired */
6374 (map->pmap != kernel_pmap) && /* alias checks */
6375 (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */
6376 ||
6377 ( /* case 2 */
6378 local_object->internal &&
6379 (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) &&
6380 local_object->ref_count > 1))) {
6381 vm_prot_t prot;
6382
6383 /*
6384 * Case 1:
6385 * Set up the targeted range for copy-on-write to avoid
6386 * applying true_share/copy_delay to the entire object.
6387 *
6388 * Case 2:
6389 * This map entry covers only part of an internal
6390 * object. There could be other map entries covering
6391 * other areas of this object and some of these map
6392 * entries could be marked as "needs_copy", which
6393 * assumes that the object is COPY_SYMMETRIC.
6394 * To avoid marking this object as COPY_DELAY and
6395 * "true_share", let's shadow it and mark the new
6396 * (smaller) object as "true_share" and COPY_DELAY.
6397 */
6398
6399 if (vm_map_lock_read_to_write(map)) {
6400 goto REDISCOVER_ENTRY;
6401 }
6402 vm_map_lock_assert_exclusive(map);
6403 assert(VME_OBJECT(entry) == local_object);
6404
6405 vm_map_clip_start(map,
6406 entry,
6407 vm_map_trunc_page(offset,
6408 VM_MAP_PAGE_MASK(map)));
6409 vm_map_clip_end(map,
6410 entry,
6411 vm_map_round_page(offset + *upl_size,
6412 VM_MAP_PAGE_MASK(map)));
6413 if ((entry->vme_end - offset) < *upl_size) {
6414 *upl_size = (upl_size_t) (entry->vme_end - offset);
6415 assert(*upl_size == entry->vme_end - offset);
6416 }
6417
6418 prot = entry->protection & ~VM_PROT_WRITE;
6419 if (override_nx(map, VME_ALIAS(entry)) && prot) {
6420 prot |= VM_PROT_EXECUTE;
6421 }
6422 vm_object_pmap_protect(local_object,
6423 VME_OFFSET(entry),
6424 entry->vme_end - entry->vme_start,
6425 ((entry->is_shared ||
6426 map->mapped_in_other_pmaps)
6427 ? PMAP_NULL
6428 : map->pmap),
6429 entry->vme_start,
6430 prot);
6431
6432 assert(entry->wired_count == 0);
6433
6434 /*
6435 * Lock the VM object and re-check its status: if it's mapped
6436 * in another address space, we could still be racing with
6437 * another thread holding that other VM map exclusively.
6438 */
6439 vm_object_lock(local_object);
6440 if (local_object->true_share) {
6441 /* object is already in proper state: no COW needed */
6442 assert(local_object->copy_strategy !=
6443 MEMORY_OBJECT_COPY_SYMMETRIC);
6444 } else {
6445 /* not true_share: ask for copy-on-write below */
6446 assert(local_object->copy_strategy ==
6447 MEMORY_OBJECT_COPY_SYMMETRIC);
6448 entry->needs_copy = TRUE;
6449 }
6450 vm_object_unlock(local_object);
6451
6452 vm_map_lock_write_to_read(map);
6453 }
6454
6455 if (entry->needs_copy) {
6456 /*
6457 * Honor copy-on-write for COPY_SYMMETRIC
6458 * strategy.
6459 */
6460 vm_map_t local_map;
6461 vm_object_t object;
6462 vm_object_offset_t new_offset;
6463 vm_prot_t prot;
6464 boolean_t wired;
6465 vm_map_version_t version;
6466 vm_map_t real_map;
6467 vm_prot_t fault_type;
6468
6469 local_map = map;
6470
6471 if (caller_flags & UPL_COPYOUT_FROM) {
6472 fault_type = VM_PROT_READ | VM_PROT_COPY;
6473 vm_counters.create_upl_extra_cow++;
6474 vm_counters.create_upl_extra_cow_pages +=
6475 (entry->vme_end - entry->vme_start) / PAGE_SIZE;
6476 } else {
6477 fault_type = VM_PROT_WRITE;
6478 }
6479 if (vm_map_lookup_locked(&local_map,
6480 offset, fault_type,
6481 OBJECT_LOCK_EXCLUSIVE,
6482 &version, &object,
6483 &new_offset, &prot, &wired,
6484 NULL,
6485 &real_map) != KERN_SUCCESS) {
6486 if (fault_type == VM_PROT_WRITE) {
6487 vm_counters.create_upl_lookup_failure_write++;
6488 } else {
6489 vm_counters.create_upl_lookup_failure_copy++;
6490 }
6491 vm_map_unlock_read(local_map);
6492 return KERN_FAILURE;
6493 }
6494 if (real_map != map) {
6495 vm_map_unlock(real_map);
6496 }
6497 vm_map_unlock_read(local_map);
6498
6499 vm_object_unlock(object);
6500
6501 goto REDISCOVER_ENTRY;
6502 }
6503
6504 if (entry->is_sub_map) {
6505 vm_map_t submap;
6506
6507 submap = VME_SUBMAP(entry);
6508 local_start = entry->vme_start;
6509 local_offset = VME_OFFSET(entry);
6510
6511 vm_map_reference(submap);
6512 vm_map_unlock_read(map);
6513
6514 ret = vm_map_create_upl(submap,
6515 local_offset + (offset - local_start),
6516 upl_size, upl, page_list, count, flags, tag);
6517 vm_map_deallocate(submap);
6518
6519 return ret;
6520 }
6521
6522 if (sync_cow_data &&
6523 (VME_OBJECT(entry)->shadow ||
6524 VME_OBJECT(entry)->copy)) {
6525 local_object = VME_OBJECT(entry);
6526 local_start = entry->vme_start;
6527 local_offset = VME_OFFSET(entry);
6528
6529 vm_object_reference(local_object);
6530 vm_map_unlock_read(map);
6531
6532 if (local_object->shadow && local_object->copy) {
6533 vm_object_lock_request(local_object->shadow,
6534 ((vm_object_offset_t)
6535 ((offset - local_start) +
6536 local_offset) +
6537 local_object->vo_shadow_offset),
6538 *upl_size, FALSE,
6539 MEMORY_OBJECT_DATA_SYNC,
6540 VM_PROT_NO_CHANGE);
6541 }
6542 sync_cow_data = FALSE;
6543 vm_object_deallocate(local_object);
6544
6545 goto REDISCOVER_ENTRY;
6546 }
6547 if (force_data_sync) {
6548 local_object = VME_OBJECT(entry);
6549 local_start = entry->vme_start;
6550 local_offset = VME_OFFSET(entry);
6551
6552 vm_object_reference(local_object);
6553 vm_map_unlock_read(map);
6554
6555 vm_object_lock_request(local_object,
6556 ((vm_object_offset_t)
6557 ((offset - local_start) +
6558 local_offset)),
6559 (vm_object_size_t)*upl_size,
6560 FALSE,
6561 MEMORY_OBJECT_DATA_SYNC,
6562 VM_PROT_NO_CHANGE);
6563
6564 force_data_sync = FALSE;
6565 vm_object_deallocate(local_object);
6566
6567 goto REDISCOVER_ENTRY;
6568 }
6569 if (VME_OBJECT(entry)->private) {
6570 *flags = UPL_DEV_MEMORY;
6571 } else {
6572 *flags = 0;
6573 }
6574
6575 if (VME_OBJECT(entry)->phys_contiguous) {
6576 *flags |= UPL_PHYS_CONTIG;
6577 }
6578
6579 local_object = VME_OBJECT(entry);
6580 local_offset = VME_OFFSET(entry);
6581 local_start = entry->vme_start;
6582
6583 #if CONFIG_EMBEDDED
6584 /*
6585 * Wiring will copy the pages to the shadow object.
6586 * The shadow object will not be code-signed so
6587 * attempting to execute code from these copied pages
6588 * would trigger a code-signing violation.
6589 */
6590 if (entry->protection & VM_PROT_EXECUTE) {
6591 #if MACH_ASSERT
6592 printf("pid %d[%s] create_upl out of executable range from "
6593 "0x%llx to 0x%llx: side effects may include "
6594 "code-signing violations later on\n",
6595 proc_selfpid(),
6596 (current_task()->bsd_info
6597 ? proc_name_address(current_task()->bsd_info)
6598 : "?"),
6599 (uint64_t) entry->vme_start,
6600 (uint64_t) entry->vme_end);
6601 #endif /* MACH_ASSERT */
6602 DTRACE_VM2(cs_executable_create_upl,
6603 uint64_t, (uint64_t)entry->vme_start,
6604 uint64_t, (uint64_t)entry->vme_end);
6605 cs_executable_create_upl++;
6606 }
6607 #endif /* CONFIG_EMBEDDED */
6608
6609 vm_object_lock(local_object);
6610
6611 /*
6612 * Ensure that this object is "true_share" and "copy_delay" now,
6613 * while we're still holding the VM map lock. After we unlock the map,
6614 * anything could happen to that mapping, including some copy-on-write
6615 * activity. We need to make sure that the IOPL will point at the
6616 * same memory as the mapping.
6617 */
6618 if (local_object->true_share) {
6619 assert(local_object->copy_strategy !=
6620 MEMORY_OBJECT_COPY_SYMMETRIC);
6621 } else if (local_object != kernel_object &&
6622 local_object != compressor_object &&
6623 !local_object->phys_contiguous) {
6624 #if VM_OBJECT_TRACKING_OP_TRUESHARE
6625 if (!local_object->true_share &&
6626 vm_object_tracking_inited) {
6627 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
6628 int num = 0;
6629 num = OSBacktrace(bt,
6630 VM_OBJECT_TRACKING_BTDEPTH);
6631 btlog_add_entry(vm_object_tracking_btlog,
6632 local_object,
6633 VM_OBJECT_TRACKING_OP_TRUESHARE,
6634 bt,
6635 num);
6636 }
6637 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
6638 local_object->true_share = TRUE;
6639 if (local_object->copy_strategy ==
6640 MEMORY_OBJECT_COPY_SYMMETRIC) {
6641 local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
6642 }
6643 }
6644
6645 vm_object_reference_locked(local_object);
6646 vm_object_unlock(local_object);
6647
6648 vm_map_unlock_read(map);
6649
6650 ret = vm_object_iopl_request(local_object,
6651 ((vm_object_offset_t)
6652 ((offset - local_start) + local_offset)),
6653 *upl_size,
6654 upl,
6655 page_list,
6656 count,
6657 caller_flags,
6658 tag);
6659 vm_object_deallocate(local_object);
6660
6661 return ret;
6662 }
6663
6664 /*
6665 * Internal routine to enter a UPL into a VM map.
6666 *
6667 * JMM - This should just be doable through the standard
6668 * vm_map_enter() API.
6669 */
6670 kern_return_t
6671 vm_map_enter_upl(
6672 vm_map_t map,
6673 upl_t upl,
6674 vm_map_offset_t *dst_addr)
6675 {
6676 vm_map_size_t size;
6677 vm_object_offset_t offset;
6678 vm_map_offset_t addr;
6679 vm_page_t m;
6680 kern_return_t kr;
6681 int isVectorUPL = 0, curr_upl = 0;
6682 upl_t vector_upl = NULL;
6683 vm_offset_t vector_upl_dst_addr = 0;
6684 vm_map_t vector_upl_submap = NULL;
6685 upl_offset_t subupl_offset = 0;
6686 upl_size_t subupl_size = 0;
6687
6688 if (upl == UPL_NULL) {
6689 return KERN_INVALID_ARGUMENT;
6690 }
6691
6692 if ((isVectorUPL = vector_upl_is_valid(upl))) {
6693 int mapped = 0, valid_upls = 0;
6694 vector_upl = upl;
6695
6696 upl_lock(vector_upl);
6697 for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
6698 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
6699 if (upl == NULL) {
6700 continue;
6701 }
6702 valid_upls++;
6703 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
6704 mapped++;
6705 }
6706 }
6707
6708 if (mapped) {
6709 if (mapped != valid_upls) {
6710 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
6711 } else {
6712 upl_unlock(vector_upl);
6713 return KERN_FAILURE;
6714 }
6715 }
6716
6717 kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE,
6718 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE,
6719 &vector_upl_submap);
6720 if (kr != KERN_SUCCESS) {
6721 panic("Vector UPL submap allocation failed\n");
6722 }
6723 map = vector_upl_submap;
6724 vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
6725 curr_upl = 0;
6726 } else {
6727 upl_lock(upl);
6728 }
6729
6730 process_upl_to_enter:
6731 if (isVectorUPL) {
6732 if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
6733 *dst_addr = vector_upl_dst_addr;
6734 upl_unlock(vector_upl);
6735 return KERN_SUCCESS;
6736 }
6737 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
6738 if (upl == NULL) {
6739 goto process_upl_to_enter;
6740 }
6741
6742 vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
6743 *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
6744 } else {
6745 /*
6746 * check to see if already mapped
6747 */
6748 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
6749 upl_unlock(upl);
6750 return KERN_FAILURE;
6751 }
6752 }
6753 if ((!(upl->flags & UPL_SHADOWED)) &&
6754 ((upl->flags & UPL_HAS_BUSY) ||
6755 !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
6756 vm_object_t object;
6757 vm_page_t alias_page;
6758 vm_object_offset_t new_offset;
6759 unsigned int pg_num;
6760 wpl_array_t lite_list;
6761
6762 if (upl->flags & UPL_INTERNAL) {
6763 lite_list = (wpl_array_t)
6764 ((((uintptr_t)upl) + sizeof(struct upl))
6765 + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t)));
6766 } else {
6767 lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
6768 }
6769 object = upl->map_object;
6770 upl->map_object = vm_object_allocate(upl->size);
6771
6772 vm_object_lock(upl->map_object);
6773
6774 upl->map_object->shadow = object;
6775 upl->map_object->pageout = TRUE;
6776 upl->map_object->can_persist = FALSE;
6777 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
6778 upl->map_object->vo_shadow_offset = upl->offset - object->paging_offset;
6779 upl->map_object->wimg_bits = object->wimg_bits;
6780 offset = upl->map_object->vo_shadow_offset;
6781 new_offset = 0;
6782 size = upl->size;
6783
6784 upl->flags |= UPL_SHADOWED;
6785
6786 while (size) {
6787 pg_num = (unsigned int) (new_offset / PAGE_SIZE);
6788 assert(pg_num == new_offset / PAGE_SIZE);
6789
6790 if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
6791 VM_PAGE_GRAB_FICTITIOUS(alias_page);
6792
6793 vm_object_lock(object);
6794
6795 m = vm_page_lookup(object, offset);
6796 if (m == VM_PAGE_NULL) {
6797 panic("vm_upl_map: page missing\n");
6798 }
6799
6800 /*
6801 * Convert the fictitious page to a private
6802 * shadow of the real page.
6803 */
6804 assert(alias_page->vmp_fictitious);
6805 alias_page->vmp_fictitious = FALSE;
6806 alias_page->vmp_private = TRUE;
6807 alias_page->vmp_free_when_done = TRUE;
6808 /*
6809 * since m is a page in the upl it must
6810 * already be wired or BUSY, so it's
6811 * safe to assign the underlying physical
6812 * page to the alias
6813 */
6814 VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m));
6815
6816 vm_object_unlock(object);
6817
6818 vm_page_lockspin_queues();
6819 vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE);
6820 vm_page_unlock_queues();
6821
6822 vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE);
6823
6824 assert(!alias_page->vmp_wanted);
6825 alias_page->vmp_busy = FALSE;
6826 alias_page->vmp_absent = FALSE;
6827 }
6828 size -= PAGE_SIZE;
6829 offset += PAGE_SIZE_64;
6830 new_offset += PAGE_SIZE_64;
6831 }
6832 vm_object_unlock(upl->map_object);
6833 }
6834 if (upl->flags & UPL_SHADOWED) {
6835 offset = 0;
6836 } else {
6837 offset = upl->offset - upl->map_object->paging_offset;
6838 }
6839
6840 size = upl->size;
6841
6842 vm_object_reference(upl->map_object);
6843
6844 if (!isVectorUPL) {
6845 *dst_addr = 0;
6846 /*
6847 * NEED A UPL_MAP ALIAS
6848 */
6849 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
6850 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
6851 upl->map_object, offset, FALSE,
6852 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
6853
6854 if (kr != KERN_SUCCESS) {
6855 vm_object_deallocate(upl->map_object);
6856 upl_unlock(upl);
6857 return kr;
6858 }
6859 } else {
6860 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
6861 VM_FLAGS_FIXED, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
6862 upl->map_object, offset, FALSE,
6863 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
6864 if (kr) {
6865 panic("vm_map_enter failed for a Vector UPL\n");
6866 }
6867 }
6868 vm_object_lock(upl->map_object);
6869
6870 for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
6871 m = vm_page_lookup(upl->map_object, offset);
6872
6873 if (m) {
6874 m->vmp_pmapped = TRUE;
6875
6876 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
6877 * but only in kernel space. If this was on a user map,
6878 * we'd have to set the wpmapped bit. */
6879 /* m->vmp_wpmapped = TRUE; */
6880 assert(map->pmap == kernel_pmap);
6881
6882 PMAP_ENTER(map->pmap, addr, m, VM_PROT_DEFAULT, VM_PROT_NONE, 0, TRUE, kr);
6883
6884 assert(kr == KERN_SUCCESS);
6885 #if KASAN
6886 kasan_notify_address(addr, PAGE_SIZE_64);
6887 #endif
6888 }
6889 offset += PAGE_SIZE_64;
6890 }
6891 vm_object_unlock(upl->map_object);
6892
6893 /*
6894 * hold a reference for the mapping
6895 */
6896 upl->ref_count++;
6897 upl->flags |= UPL_PAGE_LIST_MAPPED;
6898 upl->kaddr = (vm_offset_t) *dst_addr;
6899 assert(upl->kaddr == *dst_addr);
6900
6901 if (isVectorUPL) {
6902 goto process_upl_to_enter;
6903 }
6904
6905 upl_unlock(upl);
6906
6907 return KERN_SUCCESS;
6908 }
6909
6910 /*
6911 * Internal routine to remove a UPL mapping from a VM map.
6912 *
6913 * XXX - This should just be doable through a standard
6914 * vm_map_remove() operation. Otherwise, implicit clean-up
6915 * of the target map won't be able to correctly remove
6916 * these (and release the reference on the UPL). Having
6917 * to do this means we can't map these into user-space
6918 * maps yet.
6919 */
6920 kern_return_t
6921 vm_map_remove_upl(
6922 vm_map_t map,
6923 upl_t upl)
6924 {
6925 vm_address_t addr;
6926 upl_size_t size;
6927 int isVectorUPL = 0, curr_upl = 0;
6928 upl_t vector_upl = NULL;
6929
6930 if (upl == UPL_NULL) {
6931 return KERN_INVALID_ARGUMENT;
6932 }
6933
6934 if ((isVectorUPL = vector_upl_is_valid(upl))) {
6935 int unmapped = 0, valid_upls = 0;
6936 vector_upl = upl;
6937 upl_lock(vector_upl);
6938 for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
6939 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
6940 if (upl == NULL) {
6941 continue;
6942 }
6943 valid_upls++;
6944 if (!(UPL_PAGE_LIST_MAPPED & upl->flags)) {
6945 unmapped++;
6946 }
6947 }
6948
6949 if (unmapped) {
6950 if (unmapped != valid_upls) {
6951 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
6952 } else {
6953 upl_unlock(vector_upl);
6954 return KERN_FAILURE;
6955 }
6956 }
6957 curr_upl = 0;
6958 } else {
6959 upl_lock(upl);
6960 }
6961
6962 process_upl_to_remove:
6963 if (isVectorUPL) {
6964 if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
6965 vm_map_t v_upl_submap;
6966 vm_offset_t v_upl_submap_dst_addr;
6967 vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
6968
6969 vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_REMOVE_NO_FLAGS);
6970 vm_map_deallocate(v_upl_submap);
6971 upl_unlock(vector_upl);
6972 return KERN_SUCCESS;
6973 }
6974
6975 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
6976 if (upl == NULL) {
6977 goto process_upl_to_remove;
6978 }
6979 }
6980
6981 if (upl->flags & UPL_PAGE_LIST_MAPPED) {
6982 addr = upl->kaddr;
6983 size = upl->size;
6984
6985 assert(upl->ref_count > 1);
6986 upl->ref_count--; /* removing mapping ref */
6987
6988 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
6989 upl->kaddr = (vm_offset_t) 0;
6990
6991 if (!isVectorUPL) {
6992 upl_unlock(upl);
6993
6994 vm_map_remove(
6995 map,
6996 vm_map_trunc_page(addr,
6997 VM_MAP_PAGE_MASK(map)),
6998 vm_map_round_page(addr + size,
6999 VM_MAP_PAGE_MASK(map)),
7000 VM_MAP_REMOVE_NO_FLAGS);
7001 return KERN_SUCCESS;
7002 } else {
7003 /*
7004 * If it's a Vectored UPL, we'll be removing the entire
7005 * submap anyways, so no need to remove individual UPL
7006 * element mappings from within the submap
7007 */
7008 goto process_upl_to_remove;
7009 }
7010 }
7011 upl_unlock(upl);
7012
7013 return KERN_FAILURE;
7014 }
7015
7016
7017 kern_return_t
7018 upl_commit_range(
7019 upl_t upl,
7020 upl_offset_t offset,
7021 upl_size_t size,
7022 int flags,
7023 upl_page_info_t *page_list,
7024 mach_msg_type_number_t count,
7025 boolean_t *empty)
7026 {
7027 upl_size_t xfer_size, subupl_size = size;
7028 vm_object_t shadow_object;
7029 vm_object_t object;
7030 vm_object_t m_object;
7031 vm_object_offset_t target_offset;
7032 upl_offset_t subupl_offset = offset;
7033 int entry;
7034 wpl_array_t lite_list;
7035 int occupied;
7036 int clear_refmod = 0;
7037 int pgpgout_count = 0;
7038 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
7039 struct vm_page_delayed_work *dwp;
7040 int dw_count;
7041 int dw_limit;
7042 int isVectorUPL = 0;
7043 upl_t vector_upl = NULL;
7044 boolean_t should_be_throttled = FALSE;
7045
7046 vm_page_t nxt_page = VM_PAGE_NULL;
7047 int fast_path_possible = 0;
7048 int fast_path_full_commit = 0;
7049 int throttle_page = 0;
7050 int unwired_count = 0;
7051 int local_queue_count = 0;
7052 vm_page_t first_local, last_local;
7053
7054 *empty = FALSE;
7055
7056 if (upl == UPL_NULL) {
7057 return KERN_INVALID_ARGUMENT;
7058 }
7059
7060 if (count == 0) {
7061 page_list = NULL;
7062 }
7063
7064 if ((isVectorUPL = vector_upl_is_valid(upl))) {
7065 vector_upl = upl;
7066 upl_lock(vector_upl);
7067 } else {
7068 upl_lock(upl);
7069 }
7070
7071 process_upl_to_commit:
7072
7073 if (isVectorUPL) {
7074 size = subupl_size;
7075 offset = subupl_offset;
7076 if (size == 0) {
7077 upl_unlock(vector_upl);
7078 return KERN_SUCCESS;
7079 }
7080 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
7081 if (upl == NULL) {
7082 upl_unlock(vector_upl);
7083 return KERN_FAILURE;
7084 }
7085 page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
7086 subupl_size -= size;
7087 subupl_offset += size;
7088 }
7089
7090 #if UPL_DEBUG
7091 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
7092 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
7093
7094 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
7095 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
7096
7097 upl->upl_commit_index++;
7098 }
7099 #endif
7100 if (upl->flags & UPL_DEVICE_MEMORY) {
7101 xfer_size = 0;
7102 } else if ((offset + size) <= upl->size) {
7103 xfer_size = size;
7104 } else {
7105 if (!isVectorUPL) {
7106 upl_unlock(upl);
7107 } else {
7108 upl_unlock(vector_upl);
7109 }
7110 return KERN_FAILURE;
7111 }
7112 if (upl->flags & UPL_SET_DIRTY) {
7113 flags |= UPL_COMMIT_SET_DIRTY;
7114 }
7115 if (upl->flags & UPL_CLEAR_DIRTY) {
7116 flags |= UPL_COMMIT_CLEAR_DIRTY;
7117 }
7118
7119 if (upl->flags & UPL_INTERNAL) {
7120 lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
7121 + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t)));
7122 } else {
7123 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
7124 }
7125
7126 object = upl->map_object;
7127
7128 if (upl->flags & UPL_SHADOWED) {
7129 vm_object_lock(object);
7130 shadow_object = object->shadow;
7131 } else {
7132 shadow_object = object;
7133 }
7134 entry = offset / PAGE_SIZE;
7135 target_offset = (vm_object_offset_t)offset;
7136
7137 assert(!(target_offset & PAGE_MASK));
7138 assert(!(xfer_size & PAGE_MASK));
7139
7140 if (upl->flags & UPL_KERNEL_OBJECT) {
7141 vm_object_lock_shared(shadow_object);
7142 } else {
7143 vm_object_lock(shadow_object);
7144 }
7145
7146 VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
7147
7148 if (upl->flags & UPL_ACCESS_BLOCKED) {
7149 assert(shadow_object->blocked_access);
7150 shadow_object->blocked_access = FALSE;
7151 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
7152 }
7153
7154 if (shadow_object->code_signed) {
7155 /*
7156 * CODE SIGNING:
7157 * If the object is code-signed, do not let this UPL tell
7158 * us if the pages are valid or not. Let the pages be
7159 * validated by VM the normal way (when they get mapped or
7160 * copied).
7161 */
7162 flags &= ~UPL_COMMIT_CS_VALIDATED;
7163 }
7164 if (!page_list) {
7165 /*
7166 * No page list to get the code-signing info from !?
7167 */
7168 flags &= ~UPL_COMMIT_CS_VALIDATED;
7169 }
7170 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
7171 should_be_throttled = TRUE;
7172 }
7173
7174 dwp = &dw_array[0];
7175 dw_count = 0;
7176 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
7177
7178 if ((upl->flags & UPL_IO_WIRE) &&
7179 !(flags & UPL_COMMIT_FREE_ABSENT) &&
7180 !isVectorUPL &&
7181 shadow_object->purgable != VM_PURGABLE_VOLATILE &&
7182 shadow_object->purgable != VM_PURGABLE_EMPTY) {
7183 if (!vm_page_queue_empty(&shadow_object->memq)) {
7184 if (size == shadow_object->vo_size) {
7185 nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
7186 fast_path_full_commit = 1;
7187 }
7188 fast_path_possible = 1;
7189
7190 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
7191 (shadow_object->purgable == VM_PURGABLE_DENY ||
7192 shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
7193 shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
7194 throttle_page = 1;
7195 }
7196 }
7197 }
7198 first_local = VM_PAGE_NULL;
7199 last_local = VM_PAGE_NULL;
7200
7201 while (xfer_size) {
7202 vm_page_t t, m;
7203
7204 dwp->dw_mask = 0;
7205 clear_refmod = 0;
7206
7207 m = VM_PAGE_NULL;
7208
7209 if (upl->flags & UPL_LITE) {
7210 unsigned int pg_num;
7211
7212 if (nxt_page != VM_PAGE_NULL) {
7213 m = nxt_page;
7214 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
7215 target_offset = m->vmp_offset;
7216 }
7217 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
7218 assert(pg_num == target_offset / PAGE_SIZE);
7219
7220 if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
7221 lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31));
7222
7223 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
7224 m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
7225 }
7226 } else {
7227 m = NULL;
7228 }
7229 }
7230 if (upl->flags & UPL_SHADOWED) {
7231 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
7232 t->vmp_free_when_done = FALSE;
7233
7234 VM_PAGE_FREE(t);
7235
7236 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
7237 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
7238 }
7239 }
7240 }
7241 if (m == VM_PAGE_NULL) {
7242 goto commit_next_page;
7243 }
7244
7245 m_object = VM_PAGE_OBJECT(m);
7246
7247 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7248 assert(m->vmp_busy);
7249
7250 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7251 goto commit_next_page;
7252 }
7253
7254 if (flags & UPL_COMMIT_CS_VALIDATED) {
7255 /*
7256 * CODE SIGNING:
7257 * Set the code signing bits according to
7258 * what the UPL says they should be.
7259 */
7260 m->vmp_cs_validated = page_list[entry].cs_validated;
7261 m->vmp_cs_tainted = page_list[entry].cs_tainted;
7262 m->vmp_cs_nx = page_list[entry].cs_nx;
7263 }
7264 if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
7265 m->vmp_written_by_kernel = TRUE;
7266 }
7267
7268 if (upl->flags & UPL_IO_WIRE) {
7269 if (page_list) {
7270 page_list[entry].phys_addr = 0;
7271 }
7272
7273 if (flags & UPL_COMMIT_SET_DIRTY) {
7274 SET_PAGE_DIRTY(m, FALSE);
7275 } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
7276 m->vmp_dirty = FALSE;
7277
7278 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
7279 m->vmp_cs_validated && !m->vmp_cs_tainted) {
7280 /*
7281 * CODE SIGNING:
7282 * This page is no longer dirty
7283 * but could have been modified,
7284 * so it will need to be
7285 * re-validated.
7286 */
7287 m->vmp_cs_validated = FALSE;
7288
7289 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
7290
7291 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7292 }
7293 clear_refmod |= VM_MEM_MODIFIED;
7294 }
7295 if (upl->flags & UPL_ACCESS_BLOCKED) {
7296 /*
7297 * We blocked access to the pages in this UPL.
7298 * Clear the "busy" bit and wake up any waiter
7299 * for this page.
7300 */
7301 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7302 }
7303 if (fast_path_possible) {
7304 assert(m_object->purgable != VM_PURGABLE_EMPTY);
7305 assert(m_object->purgable != VM_PURGABLE_VOLATILE);
7306 if (m->vmp_absent) {
7307 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
7308 assert(m->vmp_wire_count == 0);
7309 assert(m->vmp_busy);
7310
7311 m->vmp_absent = FALSE;
7312 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7313 } else {
7314 if (m->vmp_wire_count == 0) {
7315 panic("wire_count == 0, m = %p, obj = %p\n", m, shadow_object);
7316 }
7317 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
7318
7319 /*
7320 * XXX FBDP need to update some other
7321 * counters here (purgeable_wired_count)
7322 * (ledgers), ...
7323 */
7324 assert(m->vmp_wire_count > 0);
7325 m->vmp_wire_count--;
7326
7327 if (m->vmp_wire_count == 0) {
7328 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
7329 unwired_count++;
7330 }
7331 }
7332 if (m->vmp_wire_count == 0) {
7333 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
7334
7335 if (last_local == VM_PAGE_NULL) {
7336 assert(first_local == VM_PAGE_NULL);
7337
7338 last_local = m;
7339 first_local = m;
7340 } else {
7341 assert(first_local != VM_PAGE_NULL);
7342
7343 m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
7344 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
7345 first_local = m;
7346 }
7347 local_queue_count++;
7348
7349 if (throttle_page) {
7350 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
7351 } else {
7352 if (flags & UPL_COMMIT_INACTIVATE) {
7353 if (shadow_object->internal) {
7354 m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
7355 } else {
7356 m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
7357 }
7358 } else {
7359 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
7360 }
7361 }
7362 }
7363 } else {
7364 if (flags & UPL_COMMIT_INACTIVATE) {
7365 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7366 clear_refmod |= VM_MEM_REFERENCED;
7367 }
7368 if (m->vmp_absent) {
7369 if (flags & UPL_COMMIT_FREE_ABSENT) {
7370 dwp->dw_mask |= DW_vm_page_free;
7371 } else {
7372 m->vmp_absent = FALSE;
7373 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7374
7375 if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
7376 dwp->dw_mask |= DW_vm_page_activate;
7377 }
7378 }
7379 } else {
7380 dwp->dw_mask |= DW_vm_page_unwire;
7381 }
7382 }
7383 goto commit_next_page;
7384 }
7385 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7386
7387 if (page_list) {
7388 page_list[entry].phys_addr = 0;
7389 }
7390
7391 /*
7392 * make sure to clear the hardware
7393 * modify or reference bits before
7394 * releasing the BUSY bit on this page
7395 * otherwise we risk losing a legitimate
7396 * change of state
7397 */
7398 if (flags & UPL_COMMIT_CLEAR_DIRTY) {
7399 m->vmp_dirty = FALSE;
7400
7401 clear_refmod |= VM_MEM_MODIFIED;
7402 }
7403 if (m->vmp_laundry) {
7404 dwp->dw_mask |= DW_vm_pageout_throttle_up;
7405 }
7406
7407 if (VM_PAGE_WIRED(m)) {
7408 m->vmp_free_when_done = FALSE;
7409 }
7410
7411 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
7412 m->vmp_cs_validated && !m->vmp_cs_tainted) {
7413 /*
7414 * CODE SIGNING:
7415 * This page is no longer dirty
7416 * but could have been modified,
7417 * so it will need to be
7418 * re-validated.
7419 */
7420 m->vmp_cs_validated = FALSE;
7421
7422 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
7423
7424 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7425 }
7426 if (m->vmp_overwriting) {
7427 /*
7428 * the (COPY_OUT_FROM == FALSE) request_page_list case
7429 */
7430 if (m->vmp_busy) {
7431 #if CONFIG_PHANTOM_CACHE
7432 if (m->vmp_absent && !m_object->internal) {
7433 dwp->dw_mask |= DW_vm_phantom_cache_update;
7434 }
7435 #endif
7436 m->vmp_absent = FALSE;
7437
7438 dwp->dw_mask |= DW_clear_busy;
7439 } else {
7440 /*
7441 * alternate (COPY_OUT_FROM == FALSE) page_list case
7442 * Occurs when the original page was wired
7443 * at the time of the list request
7444 */
7445 assert(VM_PAGE_WIRED(m));
7446
7447 dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
7448 }
7449 m->vmp_overwriting = FALSE;
7450 }
7451 m->vmp_cleaning = FALSE;
7452
7453 if (m->vmp_free_when_done) {
7454 /*
7455 * With the clean queue enabled, UPL_PAGEOUT should
7456 * no longer set the pageout bit. Its pages now go
7457 * to the clean queue.
7458 *
7459 * We don't use the cleaned Q anymore and so this
7460 * assert isn't correct. The code for the clean Q
7461 * still exists and might be used in the future. If we
7462 * go back to the cleaned Q, we will re-enable this
7463 * assert.
7464 *
7465 * assert(!(upl->flags & UPL_PAGEOUT));
7466 */
7467 assert(!m_object->internal);
7468
7469 m->vmp_free_when_done = FALSE;
7470
7471 if ((flags & UPL_COMMIT_SET_DIRTY) ||
7472 (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
7473 /*
7474 * page was re-dirtied after we started
7475 * the pageout... reactivate it since
7476 * we don't know whether the on-disk
7477 * copy matches what is now in memory
7478 */
7479 SET_PAGE_DIRTY(m, FALSE);
7480
7481 dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
7482
7483 if (upl->flags & UPL_PAGEOUT) {
7484 VM_STAT_INCR(reactivations);
7485 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
7486 }
7487 } else {
7488 /*
7489 * page has been successfully cleaned
7490 * go ahead and free it for other use
7491 */
7492 if (m_object->internal) {
7493 DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
7494 } else {
7495 DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
7496 }
7497 m->vmp_dirty = FALSE;
7498 m->vmp_busy = TRUE;
7499
7500 dwp->dw_mask |= DW_vm_page_free;
7501 }
7502 goto commit_next_page;
7503 }
7504 /*
7505 * It is a part of the semantic of COPYOUT_FROM
7506 * UPLs that a commit implies cache sync
7507 * between the vm page and the backing store
7508 * this can be used to strip the precious bit
7509 * as well as clean
7510 */
7511 if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
7512 m->vmp_precious = FALSE;
7513 }
7514
7515 if (flags & UPL_COMMIT_SET_DIRTY) {
7516 SET_PAGE_DIRTY(m, FALSE);
7517 } else {
7518 m->vmp_dirty = FALSE;
7519 }
7520
7521 /* with the clean queue on, move *all* cleaned pages to the clean queue */
7522 if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
7523 pgpgout_count++;
7524
7525 VM_STAT_INCR(pageouts);
7526 DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
7527
7528 dwp->dw_mask |= DW_enqueue_cleaned;
7529 } else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
7530 /*
7531 * page coming back in from being 'frozen'...
7532 * it was dirty before it was frozen, so keep it so
7533 * the vm_page_activate will notice that it really belongs
7534 * on the throttle queue and put it there
7535 */
7536 SET_PAGE_DIRTY(m, FALSE);
7537 dwp->dw_mask |= DW_vm_page_activate;
7538 } else {
7539 if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
7540 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7541 clear_refmod |= VM_MEM_REFERENCED;
7542 } else if (!VM_PAGE_PAGEABLE(m)) {
7543 if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
7544 dwp->dw_mask |= DW_vm_page_speculate;
7545 } else if (m->vmp_reference) {
7546 dwp->dw_mask |= DW_vm_page_activate;
7547 } else {
7548 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7549 clear_refmod |= VM_MEM_REFERENCED;
7550 }
7551 }
7552 }
7553 if (upl->flags & UPL_ACCESS_BLOCKED) {
7554 /*
7555 * We blocked access to the pages in this URL.
7556 * Clear the "busy" bit on this page before we
7557 * wake up any waiter.
7558 */
7559 dwp->dw_mask |= DW_clear_busy;
7560 }
7561 /*
7562 * Wakeup any thread waiting for the page to be un-cleaning.
7563 */
7564 dwp->dw_mask |= DW_PAGE_WAKEUP;
7565
7566 commit_next_page:
7567 if (clear_refmod) {
7568 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
7569 }
7570
7571 target_offset += PAGE_SIZE_64;
7572 xfer_size -= PAGE_SIZE;
7573 entry++;
7574
7575 if (dwp->dw_mask) {
7576 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
7577 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
7578
7579 if (dw_count >= dw_limit) {
7580 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
7581
7582 dwp = &dw_array[0];
7583 dw_count = 0;
7584 }
7585 } else {
7586 if (dwp->dw_mask & DW_clear_busy) {
7587 m->vmp_busy = FALSE;
7588 }
7589
7590 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7591 PAGE_WAKEUP(m);
7592 }
7593 }
7594 }
7595 }
7596 if (dw_count) {
7597 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
7598 }
7599
7600 if (fast_path_possible) {
7601 assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
7602 assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
7603
7604 if (local_queue_count || unwired_count) {
7605 if (local_queue_count) {
7606 vm_page_t first_target;
7607 vm_page_queue_head_t *target_queue;
7608
7609 if (throttle_page) {
7610 target_queue = &vm_page_queue_throttled;
7611 } else {
7612 if (flags & UPL_COMMIT_INACTIVATE) {
7613 if (shadow_object->internal) {
7614 target_queue = &vm_page_queue_anonymous;
7615 } else {
7616 target_queue = &vm_page_queue_inactive;
7617 }
7618 } else {
7619 target_queue = &vm_page_queue_active;
7620 }
7621 }
7622 /*
7623 * Transfer the entire local queue to a regular LRU page queues.
7624 */
7625 vm_page_lockspin_queues();
7626
7627 first_target = (vm_page_t) vm_page_queue_first(target_queue);
7628
7629 if (vm_page_queue_empty(target_queue)) {
7630 target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
7631 } else {
7632 first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
7633 }
7634
7635 target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
7636 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
7637 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
7638
7639 /*
7640 * Adjust the global page counts.
7641 */
7642 if (throttle_page) {
7643 vm_page_throttled_count += local_queue_count;
7644 } else {
7645 if (flags & UPL_COMMIT_INACTIVATE) {
7646 if (shadow_object->internal) {
7647 vm_page_anonymous_count += local_queue_count;
7648 }
7649 vm_page_inactive_count += local_queue_count;
7650
7651 token_new_pagecount += local_queue_count;
7652 } else {
7653 vm_page_active_count += local_queue_count;
7654 }
7655
7656 if (shadow_object->internal) {
7657 vm_page_pageable_internal_count += local_queue_count;
7658 } else {
7659 vm_page_pageable_external_count += local_queue_count;
7660 }
7661 }
7662 } else {
7663 vm_page_lockspin_queues();
7664 }
7665 if (unwired_count) {
7666 vm_page_wire_count -= unwired_count;
7667 VM_CHECK_MEMORYSTATUS;
7668 }
7669 vm_page_unlock_queues();
7670
7671 VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
7672 }
7673 }
7674 occupied = 1;
7675
7676 if (upl->flags & UPL_DEVICE_MEMORY) {
7677 occupied = 0;
7678 } else if (upl->flags & UPL_LITE) {
7679 int pg_num;
7680 int i;
7681
7682 occupied = 0;
7683
7684 if (!fast_path_full_commit) {
7685 pg_num = upl->size / PAGE_SIZE;
7686 pg_num = (pg_num + 31) >> 5;
7687
7688 for (i = 0; i < pg_num; i++) {
7689 if (lite_list[i] != 0) {
7690 occupied = 1;
7691 break;
7692 }
7693 }
7694 }
7695 } else {
7696 if (vm_page_queue_empty(&upl->map_object->memq)) {
7697 occupied = 0;
7698 }
7699 }
7700 if (occupied == 0) {
7701 /*
7702 * If this UPL element belongs to a Vector UPL and is
7703 * empty, then this is the right function to deallocate
7704 * it. So go ahead set the *empty variable. The flag
7705 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
7706 * should be considered relevant for the Vector UPL and not
7707 * the internal UPLs.
7708 */
7709 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
7710 *empty = TRUE;
7711 }
7712
7713 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
7714 /*
7715 * this is not a paging object
7716 * so we need to drop the paging reference
7717 * that was taken when we created the UPL
7718 * against this object
7719 */
7720 vm_object_activity_end(shadow_object);
7721 vm_object_collapse(shadow_object, 0, TRUE);
7722 } else {
7723 /*
7724 * we dontated the paging reference to
7725 * the map object... vm_pageout_object_terminate
7726 * will drop this reference
7727 */
7728 }
7729 }
7730 VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
7731 vm_object_unlock(shadow_object);
7732 if (object != shadow_object) {
7733 vm_object_unlock(object);
7734 }
7735
7736 if (!isVectorUPL) {
7737 upl_unlock(upl);
7738 } else {
7739 /*
7740 * If we completed our operations on an UPL that is
7741 * part of a Vectored UPL and if empty is TRUE, then
7742 * we should go ahead and deallocate this UPL element.
7743 * Then we check if this was the last of the UPL elements
7744 * within that Vectored UPL. If so, set empty to TRUE
7745 * so that in ubc_upl_commit_range or ubc_upl_commit, we
7746 * can go ahead and deallocate the Vector UPL too.
7747 */
7748 if (*empty == TRUE) {
7749 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
7750 upl_deallocate(upl);
7751 }
7752 goto process_upl_to_commit;
7753 }
7754 if (pgpgout_count) {
7755 DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
7756 }
7757
7758 return KERN_SUCCESS;
7759 }
7760
7761 kern_return_t
7762 upl_abort_range(
7763 upl_t upl,
7764 upl_offset_t offset,
7765 upl_size_t size,
7766 int error,
7767 boolean_t *empty)
7768 {
7769 upl_page_info_t *user_page_list = NULL;
7770 upl_size_t xfer_size, subupl_size = size;
7771 vm_object_t shadow_object;
7772 vm_object_t object;
7773 vm_object_offset_t target_offset;
7774 upl_offset_t subupl_offset = offset;
7775 int entry;
7776 wpl_array_t lite_list;
7777 int occupied;
7778 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
7779 struct vm_page_delayed_work *dwp;
7780 int dw_count;
7781 int dw_limit;
7782 int isVectorUPL = 0;
7783 upl_t vector_upl = NULL;
7784
7785 *empty = FALSE;
7786
7787 if (upl == UPL_NULL) {
7788 return KERN_INVALID_ARGUMENT;
7789 }
7790
7791 if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
7792 return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
7793 }
7794
7795 if ((isVectorUPL = vector_upl_is_valid(upl))) {
7796 vector_upl = upl;
7797 upl_lock(vector_upl);
7798 } else {
7799 upl_lock(upl);
7800 }
7801
7802 process_upl_to_abort:
7803 if (isVectorUPL) {
7804 size = subupl_size;
7805 offset = subupl_offset;
7806 if (size == 0) {
7807 upl_unlock(vector_upl);
7808 return KERN_SUCCESS;
7809 }
7810 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
7811 if (upl == NULL) {
7812 upl_unlock(vector_upl);
7813 return KERN_FAILURE;
7814 }
7815 subupl_size -= size;
7816 subupl_offset += size;
7817 }
7818
7819 *empty = FALSE;
7820
7821 #if UPL_DEBUG
7822 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
7823 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
7824
7825 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
7826 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
7827 upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
7828
7829 upl->upl_commit_index++;
7830 }
7831 #endif
7832 if (upl->flags & UPL_DEVICE_MEMORY) {
7833 xfer_size = 0;
7834 } else if ((offset + size) <= upl->size) {
7835 xfer_size = size;
7836 } else {
7837 if (!isVectorUPL) {
7838 upl_unlock(upl);
7839 } else {
7840 upl_unlock(vector_upl);
7841 }
7842
7843 return KERN_FAILURE;
7844 }
7845 if (upl->flags & UPL_INTERNAL) {
7846 lite_list = (wpl_array_t)
7847 ((((uintptr_t)upl) + sizeof(struct upl))
7848 + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t)));
7849
7850 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
7851 } else {
7852 lite_list = (wpl_array_t)
7853 (((uintptr_t)upl) + sizeof(struct upl));
7854 }
7855 object = upl->map_object;
7856
7857 if (upl->flags & UPL_SHADOWED) {
7858 vm_object_lock(object);
7859 shadow_object = object->shadow;
7860 } else {
7861 shadow_object = object;
7862 }
7863
7864 entry = offset / PAGE_SIZE;
7865 target_offset = (vm_object_offset_t)offset;
7866
7867 assert(!(target_offset & PAGE_MASK));
7868 assert(!(xfer_size & PAGE_MASK));
7869
7870 if (upl->flags & UPL_KERNEL_OBJECT) {
7871 vm_object_lock_shared(shadow_object);
7872 } else {
7873 vm_object_lock(shadow_object);
7874 }
7875
7876 if (upl->flags & UPL_ACCESS_BLOCKED) {
7877 assert(shadow_object->blocked_access);
7878 shadow_object->blocked_access = FALSE;
7879 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
7880 }
7881
7882 dwp = &dw_array[0];
7883 dw_count = 0;
7884 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
7885
7886 if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
7887 panic("upl_abort_range: kernel_object being DUMPED");
7888 }
7889
7890 while (xfer_size) {
7891 vm_page_t t, m;
7892 unsigned int pg_num;
7893 boolean_t needed;
7894
7895 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
7896 assert(pg_num == target_offset / PAGE_SIZE);
7897
7898 needed = FALSE;
7899
7900 if (user_page_list) {
7901 needed = user_page_list[pg_num].needed;
7902 }
7903
7904 dwp->dw_mask = 0;
7905 m = VM_PAGE_NULL;
7906
7907 if (upl->flags & UPL_LITE) {
7908 if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) {
7909 lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31));
7910
7911 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
7912 m = vm_page_lookup(shadow_object, target_offset +
7913 (upl->offset - shadow_object->paging_offset));
7914 }
7915 }
7916 }
7917 if (upl->flags & UPL_SHADOWED) {
7918 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
7919 t->vmp_free_when_done = FALSE;
7920
7921 VM_PAGE_FREE(t);
7922
7923 if (m == VM_PAGE_NULL) {
7924 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
7925 }
7926 }
7927 }
7928 if ((upl->flags & UPL_KERNEL_OBJECT)) {
7929 goto abort_next_page;
7930 }
7931
7932 if (m != VM_PAGE_NULL) {
7933 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7934
7935 if (m->vmp_absent) {
7936 boolean_t must_free = TRUE;
7937
7938 /*
7939 * COPYOUT = FALSE case
7940 * check for error conditions which must
7941 * be passed back to the pages customer
7942 */
7943 if (error & UPL_ABORT_RESTART) {
7944 m->vmp_restart = TRUE;
7945 m->vmp_absent = FALSE;
7946 m->vmp_unusual = TRUE;
7947 must_free = FALSE;
7948 } else if (error & UPL_ABORT_UNAVAILABLE) {
7949 m->vmp_restart = FALSE;
7950 m->vmp_unusual = TRUE;
7951 must_free = FALSE;
7952 } else if (error & UPL_ABORT_ERROR) {
7953 m->vmp_restart = FALSE;
7954 m->vmp_absent = FALSE;
7955 m->vmp_error = TRUE;
7956 m->vmp_unusual = TRUE;
7957 must_free = FALSE;
7958 }
7959 if (m->vmp_clustered && needed == FALSE) {
7960 /*
7961 * This page was a part of a speculative
7962 * read-ahead initiated by the kernel
7963 * itself. No one is expecting this
7964 * page and no one will clean up its
7965 * error state if it ever becomes valid
7966 * in the future.
7967 * We have to free it here.
7968 */
7969 must_free = TRUE;
7970 }
7971 m->vmp_cleaning = FALSE;
7972
7973 if (m->vmp_overwriting && !m->vmp_busy) {
7974 /*
7975 * this shouldn't happen since
7976 * this is an 'absent' page, but
7977 * it doesn't hurt to check for
7978 * the 'alternate' method of
7979 * stabilizing the page...
7980 * we will mark 'busy' to be cleared
7981 * in the following code which will
7982 * take care of the primary stabilzation
7983 * method (i.e. setting 'busy' to TRUE)
7984 */
7985 dwp->dw_mask |= DW_vm_page_unwire;
7986 }
7987 m->vmp_overwriting = FALSE;
7988
7989 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7990
7991 if (must_free == TRUE) {
7992 dwp->dw_mask |= DW_vm_page_free;
7993 } else {
7994 dwp->dw_mask |= DW_vm_page_activate;
7995 }
7996 } else {
7997 /*
7998 * Handle the trusted pager throttle.
7999 */
8000 if (m->vmp_laundry) {
8001 dwp->dw_mask |= DW_vm_pageout_throttle_up;
8002 }
8003
8004 if (upl->flags & UPL_ACCESS_BLOCKED) {
8005 /*
8006 * We blocked access to the pages in this UPL.
8007 * Clear the "busy" bit and wake up any waiter
8008 * for this page.
8009 */
8010 dwp->dw_mask |= DW_clear_busy;
8011 }
8012 if (m->vmp_overwriting) {
8013 if (m->vmp_busy) {
8014 dwp->dw_mask |= DW_clear_busy;
8015 } else {
8016 /*
8017 * deal with the 'alternate' method
8018 * of stabilizing the page...
8019 * we will either free the page
8020 * or mark 'busy' to be cleared
8021 * in the following code which will
8022 * take care of the primary stabilzation
8023 * method (i.e. setting 'busy' to TRUE)
8024 */
8025 dwp->dw_mask |= DW_vm_page_unwire;
8026 }
8027 m->vmp_overwriting = FALSE;
8028 }
8029 m->vmp_free_when_done = FALSE;
8030 m->vmp_cleaning = FALSE;
8031
8032 if (error & UPL_ABORT_DUMP_PAGES) {
8033 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
8034
8035 dwp->dw_mask |= DW_vm_page_free;
8036 } else {
8037 if (!(dwp->dw_mask & DW_vm_page_unwire)) {
8038 if (error & UPL_ABORT_REFERENCE) {
8039 /*
8040 * we've been told to explictly
8041 * reference this page... for
8042 * file I/O, this is done by
8043 * implementing an LRU on the inactive q
8044 */
8045 dwp->dw_mask |= DW_vm_page_lru;
8046 } else if (!VM_PAGE_PAGEABLE(m)) {
8047 dwp->dw_mask |= DW_vm_page_deactivate_internal;
8048 }
8049 }
8050 dwp->dw_mask |= DW_PAGE_WAKEUP;
8051 }
8052 }
8053 }
8054 abort_next_page:
8055 target_offset += PAGE_SIZE_64;
8056 xfer_size -= PAGE_SIZE;
8057 entry++;
8058
8059 if (dwp->dw_mask) {
8060 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
8061 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
8062
8063 if (dw_count >= dw_limit) {
8064 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
8065
8066 dwp = &dw_array[0];
8067 dw_count = 0;
8068 }
8069 } else {
8070 if (dwp->dw_mask & DW_clear_busy) {
8071 m->vmp_busy = FALSE;
8072 }
8073
8074 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
8075 PAGE_WAKEUP(m);
8076 }
8077 }
8078 }
8079 }
8080 if (dw_count) {
8081 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
8082 }
8083
8084 occupied = 1;
8085
8086 if (upl->flags & UPL_DEVICE_MEMORY) {
8087 occupied = 0;
8088 } else if (upl->flags & UPL_LITE) {
8089 int pg_num;
8090 int i;
8091
8092 pg_num = upl->size / PAGE_SIZE;
8093 pg_num = (pg_num + 31) >> 5;
8094 occupied = 0;
8095
8096 for (i = 0; i < pg_num; i++) {
8097 if (lite_list[i] != 0) {
8098 occupied = 1;
8099 break;
8100 }
8101 }
8102 } else {
8103 if (vm_page_queue_empty(&upl->map_object->memq)) {
8104 occupied = 0;
8105 }
8106 }
8107 if (occupied == 0) {
8108 /*
8109 * If this UPL element belongs to a Vector UPL and is
8110 * empty, then this is the right function to deallocate
8111 * it. So go ahead set the *empty variable. The flag
8112 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
8113 * should be considered relevant for the Vector UPL and
8114 * not the internal UPLs.
8115 */
8116 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
8117 *empty = TRUE;
8118 }
8119
8120 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
8121 /*
8122 * this is not a paging object
8123 * so we need to drop the paging reference
8124 * that was taken when we created the UPL
8125 * against this object
8126 */
8127 vm_object_activity_end(shadow_object);
8128 vm_object_collapse(shadow_object, 0, TRUE);
8129 } else {
8130 /*
8131 * we dontated the paging reference to
8132 * the map object... vm_pageout_object_terminate
8133 * will drop this reference
8134 */
8135 }
8136 }
8137 vm_object_unlock(shadow_object);
8138 if (object != shadow_object) {
8139 vm_object_unlock(object);
8140 }
8141
8142 if (!isVectorUPL) {
8143 upl_unlock(upl);
8144 } else {
8145 /*
8146 * If we completed our operations on an UPL that is
8147 * part of a Vectored UPL and if empty is TRUE, then
8148 * we should go ahead and deallocate this UPL element.
8149 * Then we check if this was the last of the UPL elements
8150 * within that Vectored UPL. If so, set empty to TRUE
8151 * so that in ubc_upl_abort_range or ubc_upl_abort, we
8152 * can go ahead and deallocate the Vector UPL too.
8153 */
8154 if (*empty == TRUE) {
8155 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
8156 upl_deallocate(upl);
8157 }
8158 goto process_upl_to_abort;
8159 }
8160
8161 return KERN_SUCCESS;
8162 }
8163
8164
8165 kern_return_t
8166 upl_abort(
8167 upl_t upl,
8168 int error)
8169 {
8170 boolean_t empty;
8171
8172 if (upl == UPL_NULL) {
8173 return KERN_INVALID_ARGUMENT;
8174 }
8175
8176 return upl_abort_range(upl, 0, upl->size, error, &empty);
8177 }
8178
8179
8180 /* an option on commit should be wire */
8181 kern_return_t
8182 upl_commit(
8183 upl_t upl,
8184 upl_page_info_t *page_list,
8185 mach_msg_type_number_t count)
8186 {
8187 boolean_t empty;
8188
8189 if (upl == UPL_NULL) {
8190 return KERN_INVALID_ARGUMENT;
8191 }
8192
8193 return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
8194 }
8195
8196
8197 void
8198 iopl_valid_data(
8199 upl_t upl,
8200 vm_tag_t tag)
8201 {
8202 vm_object_t object;
8203 vm_offset_t offset;
8204 vm_page_t m, nxt_page = VM_PAGE_NULL;
8205 upl_size_t size;
8206 int wired_count = 0;
8207
8208 if (upl == NULL) {
8209 panic("iopl_valid_data: NULL upl");
8210 }
8211 if (vector_upl_is_valid(upl)) {
8212 panic("iopl_valid_data: vector upl");
8213 }
8214 if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_SHADOWED | UPL_ACCESS_BLOCKED | UPL_IO_WIRE | UPL_INTERNAL)) != UPL_IO_WIRE) {
8215 panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags);
8216 }
8217
8218 object = upl->map_object;
8219
8220 if (object == kernel_object || object == compressor_object) {
8221 panic("iopl_valid_data: object == kernel or compressor");
8222 }
8223
8224 if (object->purgable == VM_PURGABLE_VOLATILE ||
8225 object->purgable == VM_PURGABLE_EMPTY) {
8226 panic("iopl_valid_data: object %p purgable %d",
8227 object, object->purgable);
8228 }
8229
8230 size = upl->size;
8231
8232 vm_object_lock(object);
8233 VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
8234
8235 if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) {
8236 nxt_page = (vm_page_t)vm_page_queue_first(&object->memq);
8237 } else {
8238 offset = 0 + upl->offset - object->paging_offset;
8239 }
8240
8241 while (size) {
8242 if (nxt_page != VM_PAGE_NULL) {
8243 m = nxt_page;
8244 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
8245 } else {
8246 m = vm_page_lookup(object, offset);
8247 offset += PAGE_SIZE;
8248
8249 if (m == VM_PAGE_NULL) {
8250 panic("iopl_valid_data: missing expected page at offset %lx", (long)offset);
8251 }
8252 }
8253 if (m->vmp_busy) {
8254 if (!m->vmp_absent) {
8255 panic("iopl_valid_data: busy page w/o absent");
8256 }
8257
8258 if (m->vmp_pageq.next || m->vmp_pageq.prev) {
8259 panic("iopl_valid_data: busy+absent page on page queue");
8260 }
8261 if (m->vmp_reusable) {
8262 panic("iopl_valid_data: %p is reusable", m);
8263 }
8264
8265 m->vmp_absent = FALSE;
8266 m->vmp_dirty = TRUE;
8267 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
8268 assert(m->vmp_wire_count == 0);
8269 m->vmp_wire_count++;
8270 assert(m->vmp_wire_count);
8271 if (m->vmp_wire_count == 1) {
8272 m->vmp_q_state = VM_PAGE_IS_WIRED;
8273 wired_count++;
8274 } else {
8275 panic("iopl_valid_data: %p already wired\n", m);
8276 }
8277
8278 PAGE_WAKEUP_DONE(m);
8279 }
8280 size -= PAGE_SIZE;
8281 }
8282 if (wired_count) {
8283 VM_OBJECT_WIRED_PAGE_COUNT(object, wired_count);
8284 assert(object->resident_page_count >= object->wired_page_count);
8285
8286 /* no need to adjust purgeable accounting for this object: */
8287 assert(object->purgable != VM_PURGABLE_VOLATILE);
8288 assert(object->purgable != VM_PURGABLE_EMPTY);
8289
8290 vm_page_lockspin_queues();
8291 vm_page_wire_count += wired_count;
8292 vm_page_unlock_queues();
8293 }
8294 VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
8295 vm_object_unlock(object);
8296 }
8297
8298
8299 void
8300 vm_object_set_pmap_cache_attr(
8301 vm_object_t object,
8302 upl_page_info_array_t user_page_list,
8303 unsigned int num_pages,
8304 boolean_t batch_pmap_op)
8305 {
8306 unsigned int cache_attr = 0;
8307
8308 cache_attr = object->wimg_bits & VM_WIMG_MASK;
8309 assert(user_page_list);
8310 if (cache_attr != VM_WIMG_USE_DEFAULT) {
8311 PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op);
8312 }
8313 }
8314
8315
8316 boolean_t vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t);
8317 kern_return_t vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t, vm_object_offset_t *, int, int*);
8318
8319
8320
8321 boolean_t
8322 vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
8323 wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag)
8324 {
8325 vm_page_t dst_page;
8326 unsigned int entry;
8327 int page_count;
8328 int delayed_unlock = 0;
8329 boolean_t retval = TRUE;
8330 ppnum_t phys_page;
8331
8332 vm_object_lock_assert_exclusive(object);
8333 assert(object->purgable != VM_PURGABLE_VOLATILE);
8334 assert(object->purgable != VM_PURGABLE_EMPTY);
8335 assert(object->pager == NULL);
8336 assert(object->copy == NULL);
8337 assert(object->shadow == NULL);
8338
8339 page_count = object->resident_page_count;
8340 dst_page = (vm_page_t)vm_page_queue_first(&object->memq);
8341
8342 vm_page_lock_queues();
8343
8344 while (page_count--) {
8345 if (dst_page->vmp_busy ||
8346 dst_page->vmp_fictitious ||
8347 dst_page->vmp_absent ||
8348 dst_page->vmp_error ||
8349 dst_page->vmp_cleaning ||
8350 dst_page->vmp_restart ||
8351 dst_page->vmp_laundry) {
8352 retval = FALSE;
8353 goto done;
8354 }
8355 if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
8356 retval = FALSE;
8357 goto done;
8358 }
8359 dst_page->vmp_reference = TRUE;
8360
8361 vm_page_wire(dst_page, tag, FALSE);
8362
8363 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
8364 SET_PAGE_DIRTY(dst_page, FALSE);
8365 }
8366 entry = (unsigned int)(dst_page->vmp_offset / PAGE_SIZE);
8367 assert(entry >= 0 && entry < object->resident_page_count);
8368 lite_list[entry >> 5] |= 1U << (entry & 31);
8369
8370 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8371
8372 if (phys_page > upl->highest_page) {
8373 upl->highest_page = phys_page;
8374 }
8375
8376 if (user_page_list) {
8377 user_page_list[entry].phys_addr = phys_page;
8378 user_page_list[entry].absent = dst_page->vmp_absent;
8379 user_page_list[entry].dirty = dst_page->vmp_dirty;
8380 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
8381 user_page_list[entry].precious = dst_page->vmp_precious;
8382 user_page_list[entry].device = FALSE;
8383 user_page_list[entry].speculative = FALSE;
8384 user_page_list[entry].cs_validated = FALSE;
8385 user_page_list[entry].cs_tainted = FALSE;
8386 user_page_list[entry].cs_nx = FALSE;
8387 user_page_list[entry].needed = FALSE;
8388 user_page_list[entry].mark = FALSE;
8389 }
8390 if (delayed_unlock++ > 256) {
8391 delayed_unlock = 0;
8392 lck_mtx_yield(&vm_page_queue_lock);
8393
8394 VM_CHECK_MEMORYSTATUS;
8395 }
8396 dst_page = (vm_page_t)vm_page_queue_next(&dst_page->vmp_listq);
8397 }
8398 done:
8399 vm_page_unlock_queues();
8400
8401 VM_CHECK_MEMORYSTATUS;
8402
8403 return retval;
8404 }
8405
8406
8407 kern_return_t
8408 vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
8409 wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag, vm_object_offset_t *dst_offset,
8410 int page_count, int* page_grab_count)
8411 {
8412 vm_page_t dst_page;
8413 boolean_t no_zero_fill = FALSE;
8414 int interruptible;
8415 int pages_wired = 0;
8416 int pages_inserted = 0;
8417 int entry = 0;
8418 uint64_t delayed_ledger_update = 0;
8419 kern_return_t ret = KERN_SUCCESS;
8420 int grab_options;
8421 ppnum_t phys_page;
8422
8423 vm_object_lock_assert_exclusive(object);
8424 assert(object->purgable != VM_PURGABLE_VOLATILE);
8425 assert(object->purgable != VM_PURGABLE_EMPTY);
8426 assert(object->pager == NULL);
8427 assert(object->copy == NULL);
8428 assert(object->shadow == NULL);
8429
8430 if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
8431 interruptible = THREAD_ABORTSAFE;
8432 } else {
8433 interruptible = THREAD_UNINT;
8434 }
8435
8436 if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
8437 no_zero_fill = TRUE;
8438 }
8439
8440 grab_options = 0;
8441 #if CONFIG_SECLUDED_MEMORY
8442 if (object->can_grab_secluded) {
8443 grab_options |= VM_PAGE_GRAB_SECLUDED;
8444 }
8445 #endif /* CONFIG_SECLUDED_MEMORY */
8446
8447 while (page_count--) {
8448 while ((dst_page = vm_page_grab_options(grab_options))
8449 == VM_PAGE_NULL) {
8450 OSAddAtomic(page_count, &vm_upl_wait_for_pages);
8451
8452 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
8453
8454 if (vm_page_wait(interruptible) == FALSE) {
8455 /*
8456 * interrupted case
8457 */
8458 OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
8459
8460 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
8461
8462 ret = MACH_SEND_INTERRUPTED;
8463 goto done;
8464 }
8465 OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
8466
8467 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
8468 }
8469 if (no_zero_fill == FALSE) {
8470 vm_page_zero_fill(dst_page);
8471 } else {
8472 dst_page->vmp_absent = TRUE;
8473 }
8474
8475 dst_page->vmp_reference = TRUE;
8476
8477 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
8478 SET_PAGE_DIRTY(dst_page, FALSE);
8479 }
8480 if (dst_page->vmp_absent == FALSE) {
8481 assert(dst_page->vmp_q_state == VM_PAGE_NOT_ON_Q);
8482 assert(dst_page->vmp_wire_count == 0);
8483 dst_page->vmp_wire_count++;
8484 dst_page->vmp_q_state = VM_PAGE_IS_WIRED;
8485 assert(dst_page->vmp_wire_count);
8486 pages_wired++;
8487 PAGE_WAKEUP_DONE(dst_page);
8488 }
8489 pages_inserted++;
8490
8491 vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update);
8492
8493 lite_list[entry >> 5] |= 1U << (entry & 31);
8494
8495 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8496
8497 if (phys_page > upl->highest_page) {
8498 upl->highest_page = phys_page;
8499 }
8500
8501 if (user_page_list) {
8502 user_page_list[entry].phys_addr = phys_page;
8503 user_page_list[entry].absent = dst_page->vmp_absent;
8504 user_page_list[entry].dirty = dst_page->vmp_dirty;
8505 user_page_list[entry].free_when_done = FALSE;
8506 user_page_list[entry].precious = FALSE;
8507 user_page_list[entry].device = FALSE;
8508 user_page_list[entry].speculative = FALSE;
8509 user_page_list[entry].cs_validated = FALSE;
8510 user_page_list[entry].cs_tainted = FALSE;
8511 user_page_list[entry].cs_nx = FALSE;
8512 user_page_list[entry].needed = FALSE;
8513 user_page_list[entry].mark = FALSE;
8514 }
8515 entry++;
8516 *dst_offset += PAGE_SIZE_64;
8517 }
8518 done:
8519 if (pages_wired) {
8520 vm_page_lockspin_queues();
8521 vm_page_wire_count += pages_wired;
8522 vm_page_unlock_queues();
8523 }
8524 if (pages_inserted) {
8525 if (object->internal) {
8526 OSAddAtomic(pages_inserted, &vm_page_internal_count);
8527 } else {
8528 OSAddAtomic(pages_inserted, &vm_page_external_count);
8529 }
8530 }
8531 if (delayed_ledger_update) {
8532 task_t owner;
8533 int ledger_idx_volatile;
8534 int ledger_idx_nonvolatile;
8535 int ledger_idx_volatile_compressed;
8536 int ledger_idx_nonvolatile_compressed;
8537 boolean_t do_footprint;
8538
8539 owner = VM_OBJECT_OWNER(object);
8540 assert(owner);
8541
8542 vm_object_ledger_tag_ledgers(object,
8543 &ledger_idx_volatile,
8544 &ledger_idx_nonvolatile,
8545 &ledger_idx_volatile_compressed,
8546 &ledger_idx_nonvolatile_compressed,
8547 &do_footprint);
8548
8549 /* more non-volatile bytes */
8550 ledger_credit(owner->ledger,
8551 ledger_idx_nonvolatile,
8552 delayed_ledger_update);
8553 if (do_footprint) {
8554 /* more footprint */
8555 ledger_credit(owner->ledger,
8556 task_ledgers.phys_footprint,
8557 delayed_ledger_update);
8558 }
8559 }
8560
8561 assert(page_grab_count);
8562 *page_grab_count = pages_inserted;
8563
8564 return ret;
8565 }
8566
8567
8568
8569 kern_return_t
8570 vm_object_iopl_request(
8571 vm_object_t object,
8572 vm_object_offset_t offset,
8573 upl_size_t size,
8574 upl_t *upl_ptr,
8575 upl_page_info_array_t user_page_list,
8576 unsigned int *page_list_count,
8577 upl_control_flags_t cntrl_flags,
8578 vm_tag_t tag)
8579 {
8580 vm_page_t dst_page;
8581 vm_object_offset_t dst_offset;
8582 upl_size_t xfer_size;
8583 upl_t upl = NULL;
8584 unsigned int entry;
8585 wpl_array_t lite_list = NULL;
8586 int no_zero_fill = FALSE;
8587 unsigned int size_in_pages;
8588 int page_grab_count = 0;
8589 u_int32_t psize;
8590 kern_return_t ret;
8591 vm_prot_t prot;
8592 struct vm_object_fault_info fault_info = {};
8593 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
8594 struct vm_page_delayed_work *dwp;
8595 int dw_count;
8596 int dw_limit;
8597 int dw_index;
8598 boolean_t caller_lookup;
8599 int io_tracking_flag = 0;
8600 int interruptible;
8601 ppnum_t phys_page;
8602
8603 boolean_t set_cache_attr_needed = FALSE;
8604 boolean_t free_wired_pages = FALSE;
8605 boolean_t fast_path_empty_req = FALSE;
8606 boolean_t fast_path_full_req = FALSE;
8607
8608 #if DEVELOPMENT || DEBUG
8609 task_t task = current_task();
8610 #endif /* DEVELOPMENT || DEBUG */
8611
8612 if (cntrl_flags & ~UPL_VALID_FLAGS) {
8613 /*
8614 * For forward compatibility's sake,
8615 * reject any unknown flag.
8616 */
8617 return KERN_INVALID_VALUE;
8618 }
8619 if (vm_lopage_needed == FALSE) {
8620 cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
8621 }
8622
8623 if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
8624 if ((cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE)) {
8625 return KERN_INVALID_VALUE;
8626 }
8627
8628 if (object->phys_contiguous) {
8629 if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address) {
8630 return KERN_INVALID_ADDRESS;
8631 }
8632
8633 if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address) {
8634 return KERN_INVALID_ADDRESS;
8635 }
8636 }
8637 }
8638 if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
8639 no_zero_fill = TRUE;
8640 }
8641
8642 if (cntrl_flags & UPL_COPYOUT_FROM) {
8643 prot = VM_PROT_READ;
8644 } else {
8645 prot = VM_PROT_READ | VM_PROT_WRITE;
8646 }
8647
8648 if ((!object->internal) && (object->paging_offset != 0)) {
8649 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
8650 }
8651
8652 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, prot, 0);
8653
8654 #if CONFIG_IOSCHED || UPL_DEBUG
8655 if ((object->io_tracking && object != kernel_object) || upl_debug_enabled) {
8656 io_tracking_flag |= UPL_CREATE_IO_TRACKING;
8657 }
8658 #endif
8659
8660 #if CONFIG_IOSCHED
8661 if (object->io_tracking) {
8662 /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
8663 if (object != kernel_object) {
8664 io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
8665 }
8666 }
8667 #endif
8668
8669 if (object->phys_contiguous) {
8670 psize = PAGE_SIZE;
8671 } else {
8672 psize = size;
8673 }
8674
8675 if (cntrl_flags & UPL_SET_INTERNAL) {
8676 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
8677
8678 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
8679 lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
8680 ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
8681 if (size == 0) {
8682 user_page_list = NULL;
8683 lite_list = NULL;
8684 }
8685 } else {
8686 upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
8687
8688 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
8689 if (size == 0) {
8690 lite_list = NULL;
8691 }
8692 }
8693 if (user_page_list) {
8694 user_page_list[0].device = FALSE;
8695 }
8696 *upl_ptr = upl;
8697
8698 if (cntrl_flags & UPL_NOZEROFILLIO) {
8699 DTRACE_VM4(upl_nozerofillio,
8700 vm_object_t, object,
8701 vm_object_offset_t, offset,
8702 upl_size_t, size,
8703 upl_t, upl);
8704 }
8705
8706 upl->map_object = object;
8707 upl->size = size;
8708
8709 size_in_pages = size / PAGE_SIZE;
8710
8711 if (object == kernel_object &&
8712 !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
8713 upl->flags |= UPL_KERNEL_OBJECT;
8714 #if UPL_DEBUG
8715 vm_object_lock(object);
8716 #else
8717 vm_object_lock_shared(object);
8718 #endif
8719 } else {
8720 vm_object_lock(object);
8721 vm_object_activity_begin(object);
8722 }
8723 /*
8724 * paging in progress also protects the paging_offset
8725 */
8726 upl->offset = offset + object->paging_offset;
8727
8728 if (cntrl_flags & UPL_BLOCK_ACCESS) {
8729 /*
8730 * The user requested that access to the pages in this UPL
8731 * be blocked until the UPL is commited or aborted.
8732 */
8733 upl->flags |= UPL_ACCESS_BLOCKED;
8734 }
8735
8736 #if CONFIG_IOSCHED || UPL_DEBUG
8737 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
8738 vm_object_activity_begin(object);
8739 queue_enter(&object->uplq, upl, upl_t, uplq);
8740 }
8741 #endif
8742
8743 if (object->phys_contiguous) {
8744 if (upl->flags & UPL_ACCESS_BLOCKED) {
8745 assert(!object->blocked_access);
8746 object->blocked_access = TRUE;
8747 }
8748
8749 vm_object_unlock(object);
8750
8751 /*
8752 * don't need any shadow mappings for this one
8753 * since it is already I/O memory
8754 */
8755 upl->flags |= UPL_DEVICE_MEMORY;
8756
8757 upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1) >> PAGE_SHIFT);
8758
8759 if (user_page_list) {
8760 user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset) >> PAGE_SHIFT);
8761 user_page_list[0].device = TRUE;
8762 }
8763 if (page_list_count != NULL) {
8764 if (upl->flags & UPL_INTERNAL) {
8765 *page_list_count = 0;
8766 } else {
8767 *page_list_count = 1;
8768 }
8769 }
8770
8771 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
8772 #if DEVELOPMENT || DEBUG
8773 if (task != NULL) {
8774 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
8775 }
8776 #endif /* DEVELOPMENT || DEBUG */
8777 return KERN_SUCCESS;
8778 }
8779 if (object != kernel_object && object != compressor_object) {
8780 /*
8781 * Protect user space from future COW operations
8782 */
8783 #if VM_OBJECT_TRACKING_OP_TRUESHARE
8784 if (!object->true_share &&
8785 vm_object_tracking_inited) {
8786 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
8787 int num = 0;
8788
8789 num = OSBacktrace(bt,
8790 VM_OBJECT_TRACKING_BTDEPTH);
8791 btlog_add_entry(vm_object_tracking_btlog,
8792 object,
8793 VM_OBJECT_TRACKING_OP_TRUESHARE,
8794 bt,
8795 num);
8796 }
8797 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
8798
8799 vm_object_lock_assert_exclusive(object);
8800 object->true_share = TRUE;
8801
8802 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
8803 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
8804 }
8805 }
8806
8807 if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
8808 object->copy != VM_OBJECT_NULL) {
8809 /*
8810 * Honor copy-on-write obligations
8811 *
8812 * The caller is gathering these pages and
8813 * might modify their contents. We need to
8814 * make sure that the copy object has its own
8815 * private copies of these pages before we let
8816 * the caller modify them.
8817 *
8818 * NOTE: someone else could map the original object
8819 * after we've done this copy-on-write here, and they
8820 * could then see an inconsistent picture of the memory
8821 * while it's being modified via the UPL. To prevent this,
8822 * we would have to block access to these pages until the
8823 * UPL is released. We could use the UPL_BLOCK_ACCESS
8824 * code path for that...
8825 */
8826 vm_object_update(object,
8827 offset,
8828 size,
8829 NULL,
8830 NULL,
8831 FALSE, /* should_return */
8832 MEMORY_OBJECT_COPY_SYNC,
8833 VM_PROT_NO_CHANGE);
8834 VM_PAGEOUT_DEBUG(iopl_cow, 1);
8835 VM_PAGEOUT_DEBUG(iopl_cow_pages, (size >> PAGE_SHIFT));
8836 }
8837 if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) &&
8838 object->purgable != VM_PURGABLE_VOLATILE &&
8839 object->purgable != VM_PURGABLE_EMPTY &&
8840 object->copy == NULL &&
8841 size == object->vo_size &&
8842 offset == 0 &&
8843 object->shadow == NULL &&
8844 object->pager == NULL) {
8845 if (object->resident_page_count == size_in_pages) {
8846 assert(object != compressor_object);
8847 assert(object != kernel_object);
8848 fast_path_full_req = TRUE;
8849 } else if (object->resident_page_count == 0) {
8850 assert(object != compressor_object);
8851 assert(object != kernel_object);
8852 fast_path_empty_req = TRUE;
8853 set_cache_attr_needed = TRUE;
8854 }
8855 }
8856
8857 if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
8858 interruptible = THREAD_ABORTSAFE;
8859 } else {
8860 interruptible = THREAD_UNINT;
8861 }
8862
8863 entry = 0;
8864
8865 xfer_size = size;
8866 dst_offset = offset;
8867 dw_count = 0;
8868
8869 if (fast_path_full_req) {
8870 if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE) {
8871 goto finish;
8872 }
8873 /*
8874 * we couldn't complete the processing of this request on the fast path
8875 * so fall through to the slow path and finish up
8876 */
8877 } else if (fast_path_empty_req) {
8878 if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
8879 ret = KERN_MEMORY_ERROR;
8880 goto return_err;
8881 }
8882 ret = vm_object_iopl_wire_empty(object, upl, user_page_list, lite_list, cntrl_flags, tag, &dst_offset, size_in_pages, &page_grab_count);
8883
8884 if (ret) {
8885 free_wired_pages = TRUE;
8886 goto return_err;
8887 }
8888 goto finish;
8889 }
8890
8891 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
8892 fault_info.lo_offset = offset;
8893 fault_info.hi_offset = offset + xfer_size;
8894 fault_info.mark_zf_absent = TRUE;
8895 fault_info.interruptible = interruptible;
8896 fault_info.batch_pmap_op = TRUE;
8897
8898 dwp = &dw_array[0];
8899 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
8900
8901 while (xfer_size) {
8902 vm_fault_return_t result;
8903
8904 dwp->dw_mask = 0;
8905
8906 if (fast_path_full_req) {
8907 /*
8908 * if we get here, it means that we ran into a page
8909 * state we couldn't handle in the fast path and
8910 * bailed out to the slow path... since the order
8911 * we look at pages is different between the 2 paths,
8912 * the following check is needed to determine whether
8913 * this page was already processed in the fast path
8914 */
8915 if (lite_list[entry >> 5] & (1 << (entry & 31))) {
8916 goto skip_page;
8917 }
8918 }
8919 dst_page = vm_page_lookup(object, dst_offset);
8920
8921 if (dst_page == VM_PAGE_NULL ||
8922 dst_page->vmp_busy ||
8923 dst_page->vmp_error ||
8924 dst_page->vmp_restart ||
8925 dst_page->vmp_absent ||
8926 dst_page->vmp_fictitious) {
8927 if (object == kernel_object) {
8928 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
8929 }
8930 if (object == compressor_object) {
8931 panic("vm_object_iopl_request: missing/bad page in compressor object\n");
8932 }
8933
8934 if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
8935 ret = KERN_MEMORY_ERROR;
8936 goto return_err;
8937 }
8938 set_cache_attr_needed = TRUE;
8939
8940 /*
8941 * We just looked up the page and the result remains valid
8942 * until the object lock is release, so send it to
8943 * vm_fault_page() (as "dst_page"), to avoid having to
8944 * look it up again there.
8945 */
8946 caller_lookup = TRUE;
8947
8948 do {
8949 vm_page_t top_page;
8950 kern_return_t error_code;
8951
8952 fault_info.cluster_size = xfer_size;
8953
8954 vm_object_paging_begin(object);
8955
8956 result = vm_fault_page(object, dst_offset,
8957 prot | VM_PROT_WRITE, FALSE,
8958 caller_lookup,
8959 &prot, &dst_page, &top_page,
8960 (int *)0,
8961 &error_code, no_zero_fill,
8962 FALSE, &fault_info);
8963
8964 /* our lookup is no longer valid at this point */
8965 caller_lookup = FALSE;
8966
8967 switch (result) {
8968 case VM_FAULT_SUCCESS:
8969 page_grab_count++;
8970
8971 if (!dst_page->vmp_absent) {
8972 PAGE_WAKEUP_DONE(dst_page);
8973 } else {
8974 /*
8975 * we only get back an absent page if we
8976 * requested that it not be zero-filled
8977 * because we are about to fill it via I/O
8978 *
8979 * absent pages should be left BUSY
8980 * to prevent them from being faulted
8981 * into an address space before we've
8982 * had a chance to complete the I/O on
8983 * them since they may contain info that
8984 * shouldn't be seen by the faulting task
8985 */
8986 }
8987 /*
8988 * Release paging references and
8989 * top-level placeholder page, if any.
8990 */
8991 if (top_page != VM_PAGE_NULL) {
8992 vm_object_t local_object;
8993
8994 local_object = VM_PAGE_OBJECT(top_page);
8995
8996 /*
8997 * comparing 2 packed pointers
8998 */
8999 if (top_page->vmp_object != dst_page->vmp_object) {
9000 vm_object_lock(local_object);
9001 VM_PAGE_FREE(top_page);
9002 vm_object_paging_end(local_object);
9003 vm_object_unlock(local_object);
9004 } else {
9005 VM_PAGE_FREE(top_page);
9006 vm_object_paging_end(local_object);
9007 }
9008 }
9009 vm_object_paging_end(object);
9010 break;
9011
9012 case VM_FAULT_RETRY:
9013 vm_object_lock(object);
9014 break;
9015
9016 case VM_FAULT_MEMORY_SHORTAGE:
9017 OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages);
9018
9019 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
9020
9021 if (vm_page_wait(interruptible)) {
9022 OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
9023
9024 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
9025 vm_object_lock(object);
9026
9027 break;
9028 }
9029 OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
9030
9031 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
9032
9033 /* fall thru */
9034
9035 case VM_FAULT_INTERRUPTED:
9036 error_code = MACH_SEND_INTERRUPTED;
9037 case VM_FAULT_MEMORY_ERROR:
9038 memory_error:
9039 ret = (error_code ? error_code: KERN_MEMORY_ERROR);
9040
9041 vm_object_lock(object);
9042 goto return_err;
9043
9044 case VM_FAULT_SUCCESS_NO_VM_PAGE:
9045 /* success but no page: fail */
9046 vm_object_paging_end(object);
9047 vm_object_unlock(object);
9048 goto memory_error;
9049
9050 default:
9051 panic("vm_object_iopl_request: unexpected error"
9052 " 0x%x from vm_fault_page()\n", result);
9053 }
9054 } while (result != VM_FAULT_SUCCESS);
9055 }
9056 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
9057
9058 if (upl->flags & UPL_KERNEL_OBJECT) {
9059 goto record_phys_addr;
9060 }
9061
9062 if (dst_page->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
9063 dst_page->vmp_busy = TRUE;
9064 goto record_phys_addr;
9065 }
9066
9067 if (dst_page->vmp_cleaning) {
9068 /*
9069 * Someone else is cleaning this page in place.
9070 * In theory, we should be able to proceed and use this
9071 * page but they'll probably end up clearing the "busy"
9072 * bit on it in upl_commit_range() but they didn't set
9073 * it, so they would clear our "busy" bit and open
9074 * us to race conditions.
9075 * We'd better wait for the cleaning to complete and
9076 * then try again.
9077 */
9078 VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning, 1);
9079 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
9080 continue;
9081 }
9082 if (dst_page->vmp_laundry) {
9083 vm_pageout_steal_laundry(dst_page, FALSE);
9084 }
9085
9086 if ((cntrl_flags & UPL_NEED_32BIT_ADDR) &&
9087 phys_page >= (max_valid_dma_address >> PAGE_SHIFT)) {
9088 vm_page_t low_page;
9089 int refmod;
9090
9091 /*
9092 * support devices that can't DMA above 32 bits
9093 * by substituting pages from a pool of low address
9094 * memory for any pages we find above the 4G mark
9095 * can't substitute if the page is already wired because
9096 * we don't know whether that physical address has been
9097 * handed out to some other 64 bit capable DMA device to use
9098 */
9099 if (VM_PAGE_WIRED(dst_page)) {
9100 ret = KERN_PROTECTION_FAILURE;
9101 goto return_err;
9102 }
9103 low_page = vm_page_grablo();
9104
9105 if (low_page == VM_PAGE_NULL) {
9106 ret = KERN_RESOURCE_SHORTAGE;
9107 goto return_err;
9108 }
9109 /*
9110 * from here until the vm_page_replace completes
9111 * we musn't drop the object lock... we don't
9112 * want anyone refaulting this page in and using
9113 * it after we disconnect it... we want the fault
9114 * to find the new page being substituted.
9115 */
9116 if (dst_page->vmp_pmapped) {
9117 refmod = pmap_disconnect(phys_page);
9118 } else {
9119 refmod = 0;
9120 }
9121
9122 if (!dst_page->vmp_absent) {
9123 vm_page_copy(dst_page, low_page);
9124 }
9125
9126 low_page->vmp_reference = dst_page->vmp_reference;
9127 low_page->vmp_dirty = dst_page->vmp_dirty;
9128 low_page->vmp_absent = dst_page->vmp_absent;
9129
9130 if (refmod & VM_MEM_REFERENCED) {
9131 low_page->vmp_reference = TRUE;
9132 }
9133 if (refmod & VM_MEM_MODIFIED) {
9134 SET_PAGE_DIRTY(low_page, FALSE);
9135 }
9136
9137 vm_page_replace(low_page, object, dst_offset);
9138
9139 dst_page = low_page;
9140 /*
9141 * vm_page_grablo returned the page marked
9142 * BUSY... we don't need a PAGE_WAKEUP_DONE
9143 * here, because we've never dropped the object lock
9144 */
9145 if (!dst_page->vmp_absent) {
9146 dst_page->vmp_busy = FALSE;
9147 }
9148
9149 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
9150 }
9151 if (!dst_page->vmp_busy) {
9152 dwp->dw_mask |= DW_vm_page_wire;
9153 }
9154
9155 if (cntrl_flags & UPL_BLOCK_ACCESS) {
9156 /*
9157 * Mark the page "busy" to block any future page fault
9158 * on this page in addition to wiring it.
9159 * We'll also remove the mapping
9160 * of all these pages before leaving this routine.
9161 */
9162 assert(!dst_page->vmp_fictitious);
9163 dst_page->vmp_busy = TRUE;
9164 }
9165 /*
9166 * expect the page to be used
9167 * page queues lock must be held to set 'reference'
9168 */
9169 dwp->dw_mask |= DW_set_reference;
9170
9171 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
9172 SET_PAGE_DIRTY(dst_page, TRUE);
9173 /*
9174 * Page belonging to a code-signed object is about to
9175 * be written. Mark it tainted and disconnect it from
9176 * all pmaps so processes have to fault it back in and
9177 * deal with the tainted bit.
9178 */
9179 if (object->code_signed && dst_page->vmp_cs_tainted == FALSE) {
9180 dst_page->vmp_cs_tainted = TRUE;
9181 vm_page_iopl_tainted++;
9182 if (dst_page->vmp_pmapped) {
9183 int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
9184 if (refmod & VM_MEM_REFERENCED) {
9185 dst_page->vmp_reference = TRUE;
9186 }
9187 }
9188 }
9189 }
9190 if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
9191 pmap_sync_page_attributes_phys(phys_page);
9192 dst_page->vmp_written_by_kernel = FALSE;
9193 }
9194
9195 record_phys_addr:
9196 if (dst_page->vmp_busy) {
9197 upl->flags |= UPL_HAS_BUSY;
9198 }
9199
9200 lite_list[entry >> 5] |= 1U << (entry & 31);
9201
9202 if (phys_page > upl->highest_page) {
9203 upl->highest_page = phys_page;
9204 }
9205
9206 if (user_page_list) {
9207 user_page_list[entry].phys_addr = phys_page;
9208 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
9209 user_page_list[entry].absent = dst_page->vmp_absent;
9210 user_page_list[entry].dirty = dst_page->vmp_dirty;
9211 user_page_list[entry].precious = dst_page->vmp_precious;
9212 user_page_list[entry].device = FALSE;
9213 user_page_list[entry].needed = FALSE;
9214 if (dst_page->vmp_clustered == TRUE) {
9215 user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
9216 } else {
9217 user_page_list[entry].speculative = FALSE;
9218 }
9219 user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
9220 user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
9221 user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
9222 user_page_list[entry].mark = FALSE;
9223 }
9224 if (object != kernel_object && object != compressor_object) {
9225 /*
9226 * someone is explicitly grabbing this page...
9227 * update clustered and speculative state
9228 *
9229 */
9230 if (dst_page->vmp_clustered) {
9231 VM_PAGE_CONSUME_CLUSTERED(dst_page);
9232 }
9233 }
9234 skip_page:
9235 entry++;
9236 dst_offset += PAGE_SIZE_64;
9237 xfer_size -= PAGE_SIZE;
9238
9239 if (dwp->dw_mask) {
9240 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
9241
9242 if (dw_count >= dw_limit) {
9243 vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
9244
9245 dwp = &dw_array[0];
9246 dw_count = 0;
9247 }
9248 }
9249 }
9250 assert(entry == size_in_pages);
9251
9252 if (dw_count) {
9253 vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
9254 }
9255 finish:
9256 if (user_page_list && set_cache_attr_needed == TRUE) {
9257 vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE);
9258 }
9259
9260 if (page_list_count != NULL) {
9261 if (upl->flags & UPL_INTERNAL) {
9262 *page_list_count = 0;
9263 } else if (*page_list_count > size_in_pages) {
9264 *page_list_count = size_in_pages;
9265 }
9266 }
9267 vm_object_unlock(object);
9268
9269 if (cntrl_flags & UPL_BLOCK_ACCESS) {
9270 /*
9271 * We've marked all the pages "busy" so that future
9272 * page faults will block.
9273 * Now remove the mapping for these pages, so that they
9274 * can't be accessed without causing a page fault.
9275 */
9276 vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
9277 PMAP_NULL, 0, VM_PROT_NONE);
9278 assert(!object->blocked_access);
9279 object->blocked_access = TRUE;
9280 }
9281
9282 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
9283 #if DEVELOPMENT || DEBUG
9284 if (task != NULL) {
9285 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9286 }
9287 #endif /* DEVELOPMENT || DEBUG */
9288 return KERN_SUCCESS;
9289
9290 return_err:
9291 dw_index = 0;
9292
9293 for (; offset < dst_offset; offset += PAGE_SIZE) {
9294 boolean_t need_unwire;
9295
9296 dst_page = vm_page_lookup(object, offset);
9297
9298 if (dst_page == VM_PAGE_NULL) {
9299 panic("vm_object_iopl_request: Wired page missing. \n");
9300 }
9301
9302 /*
9303 * if we've already processed this page in an earlier
9304 * dw_do_work, we need to undo the wiring... we will
9305 * leave the dirty and reference bits on if they
9306 * were set, since we don't have a good way of knowing
9307 * what the previous state was and we won't get here
9308 * under any normal circumstances... we will always
9309 * clear BUSY and wakeup any waiters via vm_page_free
9310 * or PAGE_WAKEUP_DONE
9311 */
9312 need_unwire = TRUE;
9313
9314 if (dw_count) {
9315 if (dw_array[dw_index].dw_m == dst_page) {
9316 /*
9317 * still in the deferred work list
9318 * which means we haven't yet called
9319 * vm_page_wire on this page
9320 */
9321 need_unwire = FALSE;
9322
9323 dw_index++;
9324 dw_count--;
9325 }
9326 }
9327 vm_page_lock_queues();
9328
9329 if (dst_page->vmp_absent || free_wired_pages == TRUE) {
9330 vm_page_free(dst_page);
9331
9332 need_unwire = FALSE;
9333 } else {
9334 if (need_unwire == TRUE) {
9335 vm_page_unwire(dst_page, TRUE);
9336 }
9337
9338 PAGE_WAKEUP_DONE(dst_page);
9339 }
9340 vm_page_unlock_queues();
9341
9342 if (need_unwire == TRUE) {
9343 VM_STAT_INCR(reactivations);
9344 }
9345 }
9346 #if UPL_DEBUG
9347 upl->upl_state = 2;
9348 #endif
9349 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
9350 vm_object_activity_end(object);
9351 vm_object_collapse(object, 0, TRUE);
9352 }
9353 vm_object_unlock(object);
9354 upl_destroy(upl);
9355
9356 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, ret, 0, 0);
9357 #if DEVELOPMENT || DEBUG
9358 if (task != NULL) {
9359 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
9360 }
9361 #endif /* DEVELOPMENT || DEBUG */
9362 return ret;
9363 }
9364
9365 kern_return_t
9366 upl_transpose(
9367 upl_t upl1,
9368 upl_t upl2)
9369 {
9370 kern_return_t retval;
9371 boolean_t upls_locked;
9372 vm_object_t object1, object2;
9373
9374 if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR) == UPL_VECTOR) || ((upl2->flags & UPL_VECTOR) == UPL_VECTOR)) {
9375 return KERN_INVALID_ARGUMENT;
9376 }
9377
9378 upls_locked = FALSE;
9379
9380 /*
9381 * Since we need to lock both UPLs at the same time,
9382 * avoid deadlocks by always taking locks in the same order.
9383 */
9384 if (upl1 < upl2) {
9385 upl_lock(upl1);
9386 upl_lock(upl2);
9387 } else {
9388 upl_lock(upl2);
9389 upl_lock(upl1);
9390 }
9391 upls_locked = TRUE; /* the UPLs will need to be unlocked */
9392
9393 object1 = upl1->map_object;
9394 object2 = upl2->map_object;
9395
9396 if (upl1->offset != 0 || upl2->offset != 0 ||
9397 upl1->size != upl2->size) {
9398 /*
9399 * We deal only with full objects, not subsets.
9400 * That's because we exchange the entire backing store info
9401 * for the objects: pager, resident pages, etc... We can't do
9402 * only part of it.
9403 */
9404 retval = KERN_INVALID_VALUE;
9405 goto done;
9406 }
9407
9408 /*
9409 * Tranpose the VM objects' backing store.
9410 */
9411 retval = vm_object_transpose(object1, object2,
9412 (vm_object_size_t) upl1->size);
9413
9414 if (retval == KERN_SUCCESS) {
9415 /*
9416 * Make each UPL point to the correct VM object, i.e. the
9417 * object holding the pages that the UPL refers to...
9418 */
9419 #if CONFIG_IOSCHED || UPL_DEBUG
9420 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
9421 vm_object_lock(object1);
9422 vm_object_lock(object2);
9423 }
9424 if (upl1->flags & UPL_TRACKED_BY_OBJECT) {
9425 queue_remove(&object1->uplq, upl1, upl_t, uplq);
9426 }
9427 if (upl2->flags & UPL_TRACKED_BY_OBJECT) {
9428 queue_remove(&object2->uplq, upl2, upl_t, uplq);
9429 }
9430 #endif
9431 upl1->map_object = object2;
9432 upl2->map_object = object1;
9433
9434 #if CONFIG_IOSCHED || UPL_DEBUG
9435 if (upl1->flags & UPL_TRACKED_BY_OBJECT) {
9436 queue_enter(&object2->uplq, upl1, upl_t, uplq);
9437 }
9438 if (upl2->flags & UPL_TRACKED_BY_OBJECT) {
9439 queue_enter(&object1->uplq, upl2, upl_t, uplq);
9440 }
9441 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
9442 vm_object_unlock(object2);
9443 vm_object_unlock(object1);
9444 }
9445 #endif
9446 }
9447
9448 done:
9449 /*
9450 * Cleanup.
9451 */
9452 if (upls_locked) {
9453 upl_unlock(upl1);
9454 upl_unlock(upl2);
9455 upls_locked = FALSE;
9456 }
9457
9458 return retval;
9459 }
9460
9461 void
9462 upl_range_needed(
9463 upl_t upl,
9464 int index,
9465 int count)
9466 {
9467 upl_page_info_t *user_page_list;
9468 int size_in_pages;
9469
9470 if (!(upl->flags & UPL_INTERNAL) || count <= 0) {
9471 return;
9472 }
9473
9474 size_in_pages = upl->size / PAGE_SIZE;
9475
9476 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
9477
9478 while (count-- && index < size_in_pages) {
9479 user_page_list[index++].needed = TRUE;
9480 }
9481 }
9482
9483
9484 /*
9485 * Reserve of virtual addresses in the kernel address space.
9486 * We need to map the physical pages in the kernel, so that we
9487 * can call the code-signing or slide routines with a kernel
9488 * virtual address. We keep this pool of pre-allocated kernel
9489 * virtual addresses so that we don't have to scan the kernel's
9490 * virtaul address space each time we need to work with
9491 * a physical page.
9492 */
9493 decl_simple_lock_data(, vm_paging_lock);
9494 #define VM_PAGING_NUM_PAGES 64
9495 vm_map_offset_t vm_paging_base_address = 0;
9496 boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
9497 int vm_paging_max_index = 0;
9498 int vm_paging_page_waiter = 0;
9499 int vm_paging_page_waiter_total = 0;
9500
9501 unsigned long vm_paging_no_kernel_page = 0;
9502 unsigned long vm_paging_objects_mapped = 0;
9503 unsigned long vm_paging_pages_mapped = 0;
9504 unsigned long vm_paging_objects_mapped_slow = 0;
9505 unsigned long vm_paging_pages_mapped_slow = 0;
9506
9507 void
9508 vm_paging_map_init(void)
9509 {
9510 kern_return_t kr;
9511 vm_map_offset_t page_map_offset;
9512 vm_map_entry_t map_entry;
9513
9514 assert(vm_paging_base_address == 0);
9515
9516 /*
9517 * Initialize our pool of pre-allocated kernel
9518 * virtual addresses.
9519 */
9520 page_map_offset = 0;
9521 kr = vm_map_find_space(kernel_map,
9522 &page_map_offset,
9523 VM_PAGING_NUM_PAGES * PAGE_SIZE,
9524 0,
9525 0,
9526 VM_MAP_KERNEL_FLAGS_NONE,
9527 VM_KERN_MEMORY_NONE,
9528 &map_entry);
9529 if (kr != KERN_SUCCESS) {
9530 panic("vm_paging_map_init: kernel_map full\n");
9531 }
9532 VME_OBJECT_SET(map_entry, kernel_object);
9533 VME_OFFSET_SET(map_entry, page_map_offset);
9534 map_entry->protection = VM_PROT_NONE;
9535 map_entry->max_protection = VM_PROT_NONE;
9536 map_entry->permanent = TRUE;
9537 vm_object_reference(kernel_object);
9538 vm_map_unlock(kernel_map);
9539
9540 assert(vm_paging_base_address == 0);
9541 vm_paging_base_address = page_map_offset;
9542 }
9543
9544 /*
9545 * vm_paging_map_object:
9546 * Maps part of a VM object's pages in the kernel
9547 * virtual address space, using the pre-allocated
9548 * kernel virtual addresses, if possible.
9549 * Context:
9550 * The VM object is locked. This lock will get
9551 * dropped and re-acquired though, so the caller
9552 * must make sure the VM object is kept alive
9553 * (by holding a VM map that has a reference
9554 * on it, for example, or taking an extra reference).
9555 * The page should also be kept busy to prevent
9556 * it from being reclaimed.
9557 */
9558 kern_return_t
9559 vm_paging_map_object(
9560 vm_page_t page,
9561 vm_object_t object,
9562 vm_object_offset_t offset,
9563 vm_prot_t protection,
9564 boolean_t can_unlock_object,
9565 vm_map_size_t *size, /* IN/OUT */
9566 vm_map_offset_t *address, /* OUT */
9567 boolean_t *need_unmap) /* OUT */
9568 {
9569 kern_return_t kr;
9570 vm_map_offset_t page_map_offset;
9571 vm_map_size_t map_size;
9572 vm_object_offset_t object_offset;
9573 int i;
9574
9575 if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
9576 /* use permanent 1-to-1 kernel mapping of physical memory ? */
9577 *address = (vm_map_offset_t)
9578 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT);
9579 *need_unmap = FALSE;
9580 return KERN_SUCCESS;
9581
9582 assert(page->vmp_busy);
9583 /*
9584 * Use one of the pre-allocated kernel virtual addresses
9585 * and just enter the VM page in the kernel address space
9586 * at that virtual address.
9587 */
9588 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
9589
9590 /*
9591 * Try and find an available kernel virtual address
9592 * from our pre-allocated pool.
9593 */
9594 page_map_offset = 0;
9595 for (;;) {
9596 for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
9597 if (vm_paging_page_inuse[i] == FALSE) {
9598 page_map_offset =
9599 vm_paging_base_address +
9600 (i * PAGE_SIZE);
9601 break;
9602 }
9603 }
9604 if (page_map_offset != 0) {
9605 /* found a space to map our page ! */
9606 break;
9607 }
9608
9609 if (can_unlock_object) {
9610 /*
9611 * If we can afford to unlock the VM object,
9612 * let's take the slow path now...
9613 */
9614 break;
9615 }
9616 /*
9617 * We can't afford to unlock the VM object, so
9618 * let's wait for a space to become available...
9619 */
9620 vm_paging_page_waiter_total++;
9621 vm_paging_page_waiter++;
9622 kr = assert_wait((event_t)&vm_paging_page_waiter, THREAD_UNINT);
9623 if (kr == THREAD_WAITING) {
9624 simple_unlock(&vm_paging_lock);
9625 kr = thread_block(THREAD_CONTINUE_NULL);
9626 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
9627 }
9628 vm_paging_page_waiter--;
9629 /* ... and try again */
9630 }
9631
9632 if (page_map_offset != 0) {
9633 /*
9634 * We found a kernel virtual address;
9635 * map the physical page to that virtual address.
9636 */
9637 if (i > vm_paging_max_index) {
9638 vm_paging_max_index = i;
9639 }
9640 vm_paging_page_inuse[i] = TRUE;
9641 simple_unlock(&vm_paging_lock);
9642
9643 page->vmp_pmapped = TRUE;
9644
9645 /*
9646 * Keep the VM object locked over the PMAP_ENTER
9647 * and the actual use of the page by the kernel,
9648 * or this pmap mapping might get undone by a
9649 * vm_object_pmap_protect() call...
9650 */
9651 PMAP_ENTER(kernel_pmap,
9652 page_map_offset,
9653 page,
9654 protection,
9655 VM_PROT_NONE,
9656 0,
9657 TRUE,
9658 kr);
9659 assert(kr == KERN_SUCCESS);
9660 vm_paging_objects_mapped++;
9661 vm_paging_pages_mapped++;
9662 *address = page_map_offset;
9663 *need_unmap = TRUE;
9664
9665 #if KASAN
9666 kasan_notify_address(page_map_offset, PAGE_SIZE);
9667 #endif
9668
9669 /* all done and mapped, ready to use ! */
9670 return KERN_SUCCESS;
9671 }
9672
9673 /*
9674 * We ran out of pre-allocated kernel virtual
9675 * addresses. Just map the page in the kernel
9676 * the slow and regular way.
9677 */
9678 vm_paging_no_kernel_page++;
9679 simple_unlock(&vm_paging_lock);
9680 }
9681
9682 if (!can_unlock_object) {
9683 *address = 0;
9684 *size = 0;
9685 *need_unmap = FALSE;
9686 return KERN_NOT_SUPPORTED;
9687 }
9688
9689 object_offset = vm_object_trunc_page(offset);
9690 map_size = vm_map_round_page(*size,
9691 VM_MAP_PAGE_MASK(kernel_map));
9692
9693 /*
9694 * Try and map the required range of the object
9695 * in the kernel_map
9696 */
9697
9698 vm_object_reference_locked(object); /* for the map entry */
9699 vm_object_unlock(object);
9700
9701 kr = vm_map_enter(kernel_map,
9702 address,
9703 map_size,
9704 0,
9705 VM_FLAGS_ANYWHERE,
9706 VM_MAP_KERNEL_FLAGS_NONE,
9707 VM_KERN_MEMORY_NONE,
9708 object,
9709 object_offset,
9710 FALSE,
9711 protection,
9712 VM_PROT_ALL,
9713 VM_INHERIT_NONE);
9714 if (kr != KERN_SUCCESS) {
9715 *address = 0;
9716 *size = 0;
9717 *need_unmap = FALSE;
9718 vm_object_deallocate(object); /* for the map entry */
9719 vm_object_lock(object);
9720 return kr;
9721 }
9722
9723 *size = map_size;
9724
9725 /*
9726 * Enter the mapped pages in the page table now.
9727 */
9728 vm_object_lock(object);
9729 /*
9730 * VM object must be kept locked from before PMAP_ENTER()
9731 * until after the kernel is done accessing the page(s).
9732 * Otherwise, the pmap mappings in the kernel could be
9733 * undone by a call to vm_object_pmap_protect().
9734 */
9735
9736 for (page_map_offset = 0;
9737 map_size != 0;
9738 map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
9739 page = vm_page_lookup(object, offset + page_map_offset);
9740 if (page == VM_PAGE_NULL) {
9741 printf("vm_paging_map_object: no page !?");
9742 vm_object_unlock(object);
9743 kr = vm_map_remove(kernel_map, *address, *size,
9744 VM_MAP_REMOVE_NO_FLAGS);
9745 assert(kr == KERN_SUCCESS);
9746 *address = 0;
9747 *size = 0;
9748 *need_unmap = FALSE;
9749 vm_object_lock(object);
9750 return KERN_MEMORY_ERROR;
9751 }
9752 page->vmp_pmapped = TRUE;
9753
9754 //assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page)));
9755 PMAP_ENTER(kernel_pmap,
9756 *address + page_map_offset,
9757 page,
9758 protection,
9759 VM_PROT_NONE,
9760 0,
9761 TRUE,
9762 kr);
9763 assert(kr == KERN_SUCCESS);
9764 #if KASAN
9765 kasan_notify_address(*address + page_map_offset, PAGE_SIZE);
9766 #endif
9767 }
9768
9769 vm_paging_objects_mapped_slow++;
9770 vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
9771
9772 *need_unmap = TRUE;
9773
9774 return KERN_SUCCESS;
9775 }
9776
9777 /*
9778 * vm_paging_unmap_object:
9779 * Unmaps part of a VM object's pages from the kernel
9780 * virtual address space.
9781 * Context:
9782 * The VM object is locked. This lock will get
9783 * dropped and re-acquired though.
9784 */
9785 void
9786 vm_paging_unmap_object(
9787 vm_object_t object,
9788 vm_map_offset_t start,
9789 vm_map_offset_t end)
9790 {
9791 kern_return_t kr;
9792 int i;
9793
9794 if ((vm_paging_base_address == 0) ||
9795 (start < vm_paging_base_address) ||
9796 (end > (vm_paging_base_address
9797 + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
9798 /*
9799 * We didn't use our pre-allocated pool of
9800 * kernel virtual address. Deallocate the
9801 * virtual memory.
9802 */
9803 if (object != VM_OBJECT_NULL) {
9804 vm_object_unlock(object);
9805 }
9806 kr = vm_map_remove(kernel_map, start, end,
9807 VM_MAP_REMOVE_NO_FLAGS);
9808 if (object != VM_OBJECT_NULL) {
9809 vm_object_lock(object);
9810 }
9811 assert(kr == KERN_SUCCESS);
9812 } else {
9813 /*
9814 * We used a kernel virtual address from our
9815 * pre-allocated pool. Put it back in the pool
9816 * for next time.
9817 */
9818 assert(end - start == PAGE_SIZE);
9819 i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
9820 assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
9821
9822 /* undo the pmap mapping */
9823 pmap_remove(kernel_pmap, start, end);
9824
9825 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
9826 vm_paging_page_inuse[i] = FALSE;
9827 if (vm_paging_page_waiter) {
9828 thread_wakeup(&vm_paging_page_waiter);
9829 }
9830 simple_unlock(&vm_paging_lock);
9831 }
9832 }
9833
9834
9835 /*
9836 * page->vmp_object must be locked
9837 */
9838 void
9839 vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked)
9840 {
9841 if (!queues_locked) {
9842 vm_page_lockspin_queues();
9843 }
9844
9845 page->vmp_free_when_done = FALSE;
9846 /*
9847 * need to drop the laundry count...
9848 * we may also need to remove it
9849 * from the I/O paging queue...
9850 * vm_pageout_throttle_up handles both cases
9851 *
9852 * the laundry and pageout_queue flags are cleared...
9853 */
9854 vm_pageout_throttle_up(page);
9855
9856 if (!queues_locked) {
9857 vm_page_unlock_queues();
9858 }
9859 }
9860
9861 upl_t
9862 vector_upl_create(vm_offset_t upl_offset)
9863 {
9864 int vector_upl_size = sizeof(struct _vector_upl);
9865 int i = 0;
9866 upl_t upl;
9867 vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
9868
9869 upl = upl_create(0, UPL_VECTOR, 0);
9870 upl->vector_upl = vector_upl;
9871 upl->offset = upl_offset;
9872 vector_upl->size = 0;
9873 vector_upl->offset = upl_offset;
9874 vector_upl->invalid_upls = 0;
9875 vector_upl->num_upls = 0;
9876 vector_upl->pagelist = NULL;
9877
9878 for (i = 0; i < MAX_VECTOR_UPL_ELEMENTS; i++) {
9879 vector_upl->upl_iostates[i].size = 0;
9880 vector_upl->upl_iostates[i].offset = 0;
9881 }
9882 return upl;
9883 }
9884
9885 void
9886 vector_upl_deallocate(upl_t upl)
9887 {
9888 if (upl) {
9889 vector_upl_t vector_upl = upl->vector_upl;
9890 if (vector_upl) {
9891 if (vector_upl->invalid_upls != vector_upl->num_upls) {
9892 panic("Deallocating non-empty Vectored UPL\n");
9893 }
9894 kfree(vector_upl->pagelist, (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)));
9895 vector_upl->invalid_upls = 0;
9896 vector_upl->num_upls = 0;
9897 vector_upl->pagelist = NULL;
9898 vector_upl->size = 0;
9899 vector_upl->offset = 0;
9900 kfree(vector_upl, sizeof(struct _vector_upl));
9901 vector_upl = (vector_upl_t)0xfeedfeed;
9902 } else {
9903 panic("vector_upl_deallocate was passed a non-vectored upl\n");
9904 }
9905 } else {
9906 panic("vector_upl_deallocate was passed a NULL upl\n");
9907 }
9908 }
9909
9910 boolean_t
9911 vector_upl_is_valid(upl_t upl)
9912 {
9913 if (upl && ((upl->flags & UPL_VECTOR) == UPL_VECTOR)) {
9914 vector_upl_t vector_upl = upl->vector_upl;
9915 if (vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef) {
9916 return FALSE;
9917 } else {
9918 return TRUE;
9919 }
9920 }
9921 return FALSE;
9922 }
9923
9924 boolean_t
9925 vector_upl_set_subupl(upl_t upl, upl_t subupl, uint32_t io_size)
9926 {
9927 if (vector_upl_is_valid(upl)) {
9928 vector_upl_t vector_upl = upl->vector_upl;
9929
9930 if (vector_upl) {
9931 if (subupl) {
9932 if (io_size) {
9933 if (io_size < PAGE_SIZE) {
9934 io_size = PAGE_SIZE;
9935 }
9936 subupl->vector_upl = (void*)vector_upl;
9937 vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
9938 vector_upl->size += io_size;
9939 upl->size += io_size;
9940 } else {
9941 uint32_t i = 0, invalid_upls = 0;
9942 for (i = 0; i < vector_upl->num_upls; i++) {
9943 if (vector_upl->upl_elems[i] == subupl) {
9944 break;
9945 }
9946 }
9947 if (i == vector_upl->num_upls) {
9948 panic("Trying to remove sub-upl when none exists");
9949 }
9950
9951 vector_upl->upl_elems[i] = NULL;
9952 invalid_upls = os_atomic_inc(&(vector_upl)->invalid_upls,
9953 relaxed);
9954 if (invalid_upls == vector_upl->num_upls) {
9955 return TRUE;
9956 } else {
9957 return FALSE;
9958 }
9959 }
9960 } else {
9961 panic("vector_upl_set_subupl was passed a NULL upl element\n");
9962 }
9963 } else {
9964 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
9965 }
9966 } else {
9967 panic("vector_upl_set_subupl was passed a NULL upl\n");
9968 }
9969
9970 return FALSE;
9971 }
9972
9973 void
9974 vector_upl_set_pagelist(upl_t upl)
9975 {
9976 if (vector_upl_is_valid(upl)) {
9977 uint32_t i = 0;
9978 vector_upl_t vector_upl = upl->vector_upl;
9979
9980 if (vector_upl) {
9981 vm_offset_t pagelist_size = 0, cur_upl_pagelist_size = 0;
9982
9983 vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE));
9984
9985 for (i = 0; i < vector_upl->num_upls; i++) {
9986 cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size / PAGE_SIZE;
9987 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
9988 pagelist_size += cur_upl_pagelist_size;
9989 if (vector_upl->upl_elems[i]->highest_page > upl->highest_page) {
9990 upl->highest_page = vector_upl->upl_elems[i]->highest_page;
9991 }
9992 }
9993 assert( pagelist_size == (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)));
9994 } else {
9995 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
9996 }
9997 } else {
9998 panic("vector_upl_set_pagelist was passed a NULL upl\n");
9999 }
10000 }
10001
10002 upl_t
10003 vector_upl_subupl_byindex(upl_t upl, uint32_t index)
10004 {
10005 if (vector_upl_is_valid(upl)) {
10006 vector_upl_t vector_upl = upl->vector_upl;
10007 if (vector_upl) {
10008 if (index < vector_upl->num_upls) {
10009 return vector_upl->upl_elems[index];
10010 }
10011 } else {
10012 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
10013 }
10014 }
10015 return NULL;
10016 }
10017
10018 upl_t
10019 vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
10020 {
10021 if (vector_upl_is_valid(upl)) {
10022 uint32_t i = 0;
10023 vector_upl_t vector_upl = upl->vector_upl;
10024
10025 if (vector_upl) {
10026 upl_t subupl = NULL;
10027 vector_upl_iostates_t subupl_state;
10028
10029 for (i = 0; i < vector_upl->num_upls; i++) {
10030 subupl = vector_upl->upl_elems[i];
10031 subupl_state = vector_upl->upl_iostates[i];
10032 if (*upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
10033 /* We could have been passed an offset/size pair that belongs
10034 * to an UPL element that has already been committed/aborted.
10035 * If so, return NULL.
10036 */
10037 if (subupl == NULL) {
10038 return NULL;
10039 }
10040 if ((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
10041 *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
10042 if (*upl_size > subupl_state.size) {
10043 *upl_size = subupl_state.size;
10044 }
10045 }
10046 if (*upl_offset >= subupl_state.offset) {
10047 *upl_offset -= subupl_state.offset;
10048 } else if (i) {
10049 panic("Vector UPL offset miscalculation\n");
10050 }
10051 return subupl;
10052 }
10053 }
10054 } else {
10055 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
10056 }
10057 }
10058 return NULL;
10059 }
10060
10061 void
10062 vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
10063 {
10064 *v_upl_submap = NULL;
10065
10066 if (vector_upl_is_valid(upl)) {
10067 vector_upl_t vector_upl = upl->vector_upl;
10068 if (vector_upl) {
10069 *v_upl_submap = vector_upl->submap;
10070 *submap_dst_addr = vector_upl->submap_dst_addr;
10071 } else {
10072 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
10073 }
10074 } else {
10075 panic("vector_upl_get_submap was passed a null UPL\n");
10076 }
10077 }
10078
10079 void
10080 vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
10081 {
10082 if (vector_upl_is_valid(upl)) {
10083 vector_upl_t vector_upl = upl->vector_upl;
10084 if (vector_upl) {
10085 vector_upl->submap = submap;
10086 vector_upl->submap_dst_addr = submap_dst_addr;
10087 } else {
10088 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
10089 }
10090 } else {
10091 panic("vector_upl_get_submap was passed a NULL UPL\n");
10092 }
10093 }
10094
10095 void
10096 vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
10097 {
10098 if (vector_upl_is_valid(upl)) {
10099 uint32_t i = 0;
10100 vector_upl_t vector_upl = upl->vector_upl;
10101
10102 if (vector_upl) {
10103 for (i = 0; i < vector_upl->num_upls; i++) {
10104 if (vector_upl->upl_elems[i] == subupl) {
10105 break;
10106 }
10107 }
10108
10109 if (i == vector_upl->num_upls) {
10110 panic("setting sub-upl iostate when none exists");
10111 }
10112
10113 vector_upl->upl_iostates[i].offset = offset;
10114 if (size < PAGE_SIZE) {
10115 size = PAGE_SIZE;
10116 }
10117 vector_upl->upl_iostates[i].size = size;
10118 } else {
10119 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
10120 }
10121 } else {
10122 panic("vector_upl_set_iostate was passed a NULL UPL\n");
10123 }
10124 }
10125
10126 void
10127 vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
10128 {
10129 if (vector_upl_is_valid(upl)) {
10130 uint32_t i = 0;
10131 vector_upl_t vector_upl = upl->vector_upl;
10132
10133 if (vector_upl) {
10134 for (i = 0; i < vector_upl->num_upls; i++) {
10135 if (vector_upl->upl_elems[i] == subupl) {
10136 break;
10137 }
10138 }
10139
10140 if (i == vector_upl->num_upls) {
10141 panic("getting sub-upl iostate when none exists");
10142 }
10143
10144 *offset = vector_upl->upl_iostates[i].offset;
10145 *size = vector_upl->upl_iostates[i].size;
10146 } else {
10147 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
10148 }
10149 } else {
10150 panic("vector_upl_get_iostate was passed a NULL UPL\n");
10151 }
10152 }
10153
10154 void
10155 vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
10156 {
10157 if (vector_upl_is_valid(upl)) {
10158 vector_upl_t vector_upl = upl->vector_upl;
10159 if (vector_upl) {
10160 if (index < vector_upl->num_upls) {
10161 *offset = vector_upl->upl_iostates[index].offset;
10162 *size = vector_upl->upl_iostates[index].size;
10163 } else {
10164 *offset = *size = 0;
10165 }
10166 } else {
10167 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
10168 }
10169 } else {
10170 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
10171 }
10172 }
10173
10174 upl_page_info_t *
10175 upl_get_internal_vectorupl_pagelist(upl_t upl)
10176 {
10177 return ((vector_upl_t)(upl->vector_upl))->pagelist;
10178 }
10179
10180 void *
10181 upl_get_internal_vectorupl(upl_t upl)
10182 {
10183 return upl->vector_upl;
10184 }
10185
10186 vm_size_t
10187 upl_get_internal_pagelist_offset(void)
10188 {
10189 return sizeof(struct upl);
10190 }
10191
10192 void
10193 upl_clear_dirty(
10194 upl_t upl,
10195 boolean_t value)
10196 {
10197 if (value) {
10198 upl->flags |= UPL_CLEAR_DIRTY;
10199 } else {
10200 upl->flags &= ~UPL_CLEAR_DIRTY;
10201 }
10202 }
10203
10204 void
10205 upl_set_referenced(
10206 upl_t upl,
10207 boolean_t value)
10208 {
10209 upl_lock(upl);
10210 if (value) {
10211 upl->ext_ref_count++;
10212 } else {
10213 if (!upl->ext_ref_count) {
10214 panic("upl_set_referenced not %p\n", upl);
10215 }
10216 upl->ext_ref_count--;
10217 }
10218 upl_unlock(upl);
10219 }
10220
10221 #if CONFIG_IOSCHED
10222 void
10223 upl_set_blkno(
10224 upl_t upl,
10225 vm_offset_t upl_offset,
10226 int io_size,
10227 int64_t blkno)
10228 {
10229 int i, j;
10230 if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
10231 return;
10232 }
10233
10234 assert(upl->upl_reprio_info != 0);
10235 for (i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) {
10236 UPL_SET_REPRIO_INFO(upl, i, blkno, io_size);
10237 }
10238 }
10239 #endif
10240
10241 void inline
10242 memoryshot(unsigned int event, unsigned int control)
10243 {
10244 if (vm_debug_events) {
10245 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control,
10246 vm_page_active_count, vm_page_inactive_count,
10247 vm_page_free_count, vm_page_speculative_count,
10248 vm_page_throttled_count);
10249 } else {
10250 (void) event;
10251 (void) control;
10252 }
10253 }
10254
10255 #ifdef MACH_BSD
10256
10257 boolean_t
10258 upl_device_page(upl_page_info_t *upl)
10259 {
10260 return UPL_DEVICE_PAGE(upl);
10261 }
10262 boolean_t
10263 upl_page_present(upl_page_info_t *upl, int index)
10264 {
10265 return UPL_PAGE_PRESENT(upl, index);
10266 }
10267 boolean_t
10268 upl_speculative_page(upl_page_info_t *upl, int index)
10269 {
10270 return UPL_SPECULATIVE_PAGE(upl, index);
10271 }
10272 boolean_t
10273 upl_dirty_page(upl_page_info_t *upl, int index)
10274 {
10275 return UPL_DIRTY_PAGE(upl, index);
10276 }
10277 boolean_t
10278 upl_valid_page(upl_page_info_t *upl, int index)
10279 {
10280 return UPL_VALID_PAGE(upl, index);
10281 }
10282 ppnum_t
10283 upl_phys_page(upl_page_info_t *upl, int index)
10284 {
10285 return UPL_PHYS_PAGE(upl, index);
10286 }
10287
10288 void
10289 upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v)
10290 {
10291 upl[index].mark = v;
10292 }
10293
10294 boolean_t
10295 upl_page_get_mark(upl_page_info_t *upl, int index)
10296 {
10297 return upl[index].mark;
10298 }
10299
10300 void
10301 vm_countdirtypages(void)
10302 {
10303 vm_page_t m;
10304 int dpages;
10305 int pgopages;
10306 int precpages;
10307
10308
10309 dpages = 0;
10310 pgopages = 0;
10311 precpages = 0;
10312
10313 vm_page_lock_queues();
10314 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
10315 do {
10316 if (m == (vm_page_t)0) {
10317 break;
10318 }
10319
10320 if (m->vmp_dirty) {
10321 dpages++;
10322 }
10323 if (m->vmp_free_when_done) {
10324 pgopages++;
10325 }
10326 if (m->vmp_precious) {
10327 precpages++;
10328 }
10329
10330 assert(VM_PAGE_OBJECT(m) != kernel_object);
10331 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10332 if (m == (vm_page_t)0) {
10333 break;
10334 }
10335 } while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m));
10336 vm_page_unlock_queues();
10337
10338 vm_page_lock_queues();
10339 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
10340 do {
10341 if (m == (vm_page_t)0) {
10342 break;
10343 }
10344
10345 dpages++;
10346 assert(m->vmp_dirty);
10347 assert(!m->vmp_free_when_done);
10348 assert(VM_PAGE_OBJECT(m) != kernel_object);
10349 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10350 if (m == (vm_page_t)0) {
10351 break;
10352 }
10353 } while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m));
10354 vm_page_unlock_queues();
10355
10356 vm_page_lock_queues();
10357 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
10358 do {
10359 if (m == (vm_page_t)0) {
10360 break;
10361 }
10362
10363 if (m->vmp_dirty) {
10364 dpages++;
10365 }
10366 if (m->vmp_free_when_done) {
10367 pgopages++;
10368 }
10369 if (m->vmp_precious) {
10370 precpages++;
10371 }
10372
10373 assert(VM_PAGE_OBJECT(m) != kernel_object);
10374 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10375 if (m == (vm_page_t)0) {
10376 break;
10377 }
10378 } while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m));
10379 vm_page_unlock_queues();
10380
10381 printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
10382
10383 dpages = 0;
10384 pgopages = 0;
10385 precpages = 0;
10386
10387 vm_page_lock_queues();
10388 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
10389
10390 do {
10391 if (m == (vm_page_t)0) {
10392 break;
10393 }
10394 if (m->vmp_dirty) {
10395 dpages++;
10396 }
10397 if (m->vmp_free_when_done) {
10398 pgopages++;
10399 }
10400 if (m->vmp_precious) {
10401 precpages++;
10402 }
10403
10404 assert(VM_PAGE_OBJECT(m) != kernel_object);
10405 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
10406 if (m == (vm_page_t)0) {
10407 break;
10408 }
10409 } while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m));
10410 vm_page_unlock_queues();
10411
10412 printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
10413 }
10414 #endif /* MACH_BSD */
10415
10416
10417 #if CONFIG_IOSCHED
10418 int
10419 upl_get_cached_tier(upl_t upl)
10420 {
10421 assert(upl);
10422 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
10423 return upl->upl_priority;
10424 }
10425 return -1;
10426 }
10427 #endif /* CONFIG_IOSCHED */
10428
10429
10430 void
10431 upl_callout_iodone(upl_t upl)
10432 {
10433 struct upl_io_completion *upl_ctx = upl->upl_iodone;
10434
10435 if (upl_ctx) {
10436 void (*iodone_func)(void *, int) = upl_ctx->io_done;
10437
10438 assert(upl_ctx->io_done);
10439
10440 (*iodone_func)(upl_ctx->io_context, upl_ctx->io_error);
10441 }
10442 }
10443
10444 void
10445 upl_set_iodone(upl_t upl, void *upl_iodone)
10446 {
10447 upl->upl_iodone = (struct upl_io_completion *)upl_iodone;
10448 }
10449
10450 void
10451 upl_set_iodone_error(upl_t upl, int error)
10452 {
10453 struct upl_io_completion *upl_ctx = upl->upl_iodone;
10454
10455 if (upl_ctx) {
10456 upl_ctx->io_error = error;
10457 }
10458 }
10459
10460
10461 ppnum_t
10462 upl_get_highest_page(
10463 upl_t upl)
10464 {
10465 return upl->highest_page;
10466 }
10467
10468 upl_size_t
10469 upl_get_size(
10470 upl_t upl)
10471 {
10472 return upl->size;
10473 }
10474
10475 upl_t
10476 upl_associated_upl(upl_t upl)
10477 {
10478 return upl->associated_upl;
10479 }
10480
10481 void
10482 upl_set_associated_upl(upl_t upl, upl_t associated_upl)
10483 {
10484 upl->associated_upl = associated_upl;
10485 }
10486
10487 struct vnode *
10488 upl_lookup_vnode(upl_t upl)
10489 {
10490 if (!upl->map_object->internal) {
10491 return vnode_pager_lookup_vnode(upl->map_object->pager);
10492 } else {
10493 return NULL;
10494 }
10495 }
10496
10497 #if UPL_DEBUG
10498 kern_return_t
10499 upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
10500 {
10501 upl->ubc_alias1 = alias1;
10502 upl->ubc_alias2 = alias2;
10503 return KERN_SUCCESS;
10504 }
10505 int
10506 upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
10507 {
10508 if (al) {
10509 *al = upl->ubc_alias1;
10510 }
10511 if (al2) {
10512 *al2 = upl->ubc_alias2;
10513 }
10514 return KERN_SUCCESS;
10515 }
10516 #endif /* UPL_DEBUG */
10517
10518 #if VM_PRESSURE_EVENTS
10519 /*
10520 * Upward trajectory.
10521 */
10522 extern boolean_t vm_compressor_low_on_space(void);
10523
10524 boolean_t
10525 VM_PRESSURE_NORMAL_TO_WARNING(void)
10526 {
10527 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10528 /* Available pages below our threshold */
10529 if (memorystatus_available_pages < memorystatus_available_pages_pressure) {
10530 /* No frozen processes to kill */
10531 if (memorystatus_frozen_count == 0) {
10532 /* Not enough suspended processes available. */
10533 if (memorystatus_suspended_count < MEMORYSTATUS_SUSPENDED_THRESHOLD) {
10534 return TRUE;
10535 }
10536 }
10537 }
10538 return FALSE;
10539 } else {
10540 return (AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0;
10541 }
10542 }
10543
10544 boolean_t
10545 VM_PRESSURE_WARNING_TO_CRITICAL(void)
10546 {
10547 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10548 /* Available pages below our threshold */
10549 if (memorystatus_available_pages < memorystatus_available_pages_critical) {
10550 return TRUE;
10551 }
10552 return FALSE;
10553 } else {
10554 return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
10555 }
10556 }
10557
10558 /*
10559 * Downward trajectory.
10560 */
10561 boolean_t
10562 VM_PRESSURE_WARNING_TO_NORMAL(void)
10563 {
10564 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10565 /* Available pages above our threshold */
10566 unsigned int target_threshold = (unsigned int) (memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100));
10567 if (memorystatus_available_pages > target_threshold) {
10568 return TRUE;
10569 }
10570 return FALSE;
10571 } else {
10572 return (AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0;
10573 }
10574 }
10575
10576 boolean_t
10577 VM_PRESSURE_CRITICAL_TO_WARNING(void)
10578 {
10579 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10580 /* Available pages above our threshold */
10581 unsigned int target_threshold = (unsigned int)(memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100));
10582 if (memorystatus_available_pages > target_threshold) {
10583 return TRUE;
10584 }
10585 return FALSE;
10586 } else {
10587 return (AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
10588 }
10589 }
10590 #endif /* VM_PRESSURE_EVENTS */
10591
10592
10593
10594 #define VM_TEST_COLLAPSE_COMPRESSOR 0
10595 #define VM_TEST_WIRE_AND_EXTRACT 0
10596 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
10597 #if __arm64__
10598 #define VM_TEST_KERNEL_OBJECT_FAULT 0
10599 #endif /* __arm64__ */
10600 #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
10601
10602 #if VM_TEST_COLLAPSE_COMPRESSOR
10603 extern boolean_t vm_object_collapse_compressor_allowed;
10604 #include <IOKit/IOLib.h>
10605 static void
10606 vm_test_collapse_compressor(void)
10607 {
10608 vm_object_size_t backing_size, top_size;
10609 vm_object_t backing_object, top_object;
10610 vm_map_offset_t backing_offset, top_offset;
10611 unsigned char *backing_address, *top_address;
10612 kern_return_t kr;
10613
10614 printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
10615
10616 /* create backing object */
10617 backing_size = 15 * PAGE_SIZE;
10618 backing_object = vm_object_allocate(backing_size);
10619 assert(backing_object != VM_OBJECT_NULL);
10620 printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
10621 backing_object);
10622 /* map backing object */
10623 backing_offset = 0;
10624 kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
10625 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE,
10626 backing_object, 0, FALSE,
10627 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
10628 assert(kr == KERN_SUCCESS);
10629 backing_address = (unsigned char *) backing_offset;
10630 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10631 "mapped backing object %p at 0x%llx\n",
10632 backing_object, (uint64_t) backing_offset);
10633 /* populate with pages to be compressed in backing object */
10634 backing_address[0x1 * PAGE_SIZE] = 0xB1;
10635 backing_address[0x4 * PAGE_SIZE] = 0xB4;
10636 backing_address[0x7 * PAGE_SIZE] = 0xB7;
10637 backing_address[0xa * PAGE_SIZE] = 0xBA;
10638 backing_address[0xd * PAGE_SIZE] = 0xBD;
10639 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10640 "populated pages to be compressed in "
10641 "backing_object %p\n", backing_object);
10642 /* compress backing object */
10643 vm_object_pageout(backing_object);
10644 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
10645 backing_object);
10646 /* wait for all the pages to be gone */
10647 while (*(volatile int *)&backing_object->resident_page_count != 0) {
10648 IODelay(10);
10649 }
10650 printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
10651 backing_object);
10652 /* populate with pages to be resident in backing object */
10653 backing_address[0x0 * PAGE_SIZE] = 0xB0;
10654 backing_address[0x3 * PAGE_SIZE] = 0xB3;
10655 backing_address[0x6 * PAGE_SIZE] = 0xB6;
10656 backing_address[0x9 * PAGE_SIZE] = 0xB9;
10657 backing_address[0xc * PAGE_SIZE] = 0xBC;
10658 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10659 "populated pages to be resident in "
10660 "backing_object %p\n", backing_object);
10661 /* leave the other pages absent */
10662 /* mess with the paging_offset of the backing_object */
10663 assert(backing_object->paging_offset == 0);
10664 backing_object->paging_offset = 0x3000;
10665
10666 /* create top object */
10667 top_size = 9 * PAGE_SIZE;
10668 top_object = vm_object_allocate(top_size);
10669 assert(top_object != VM_OBJECT_NULL);
10670 printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
10671 top_object);
10672 /* map top object */
10673 top_offset = 0;
10674 kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
10675 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE,
10676 top_object, 0, FALSE,
10677 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
10678 assert(kr == KERN_SUCCESS);
10679 top_address = (unsigned char *) top_offset;
10680 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10681 "mapped top object %p at 0x%llx\n",
10682 top_object, (uint64_t) top_offset);
10683 /* populate with pages to be compressed in top object */
10684 top_address[0x3 * PAGE_SIZE] = 0xA3;
10685 top_address[0x4 * PAGE_SIZE] = 0xA4;
10686 top_address[0x5 * PAGE_SIZE] = 0xA5;
10687 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10688 "populated pages to be compressed in "
10689 "top_object %p\n", top_object);
10690 /* compress top object */
10691 vm_object_pageout(top_object);
10692 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
10693 top_object);
10694 /* wait for all the pages to be gone */
10695 while (top_object->resident_page_count != 0) {
10696 IODelay(10);
10697 }
10698 printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
10699 top_object);
10700 /* populate with pages to be resident in top object */
10701 top_address[0x0 * PAGE_SIZE] = 0xA0;
10702 top_address[0x1 * PAGE_SIZE] = 0xA1;
10703 top_address[0x2 * PAGE_SIZE] = 0xA2;
10704 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10705 "populated pages to be resident in "
10706 "top_object %p\n", top_object);
10707 /* leave the other pages absent */
10708
10709 /* link the 2 objects */
10710 vm_object_reference(backing_object);
10711 top_object->shadow = backing_object;
10712 top_object->vo_shadow_offset = 0x3000;
10713 printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
10714 top_object, backing_object);
10715
10716 /* unmap backing object */
10717 vm_map_remove(kernel_map,
10718 backing_offset,
10719 backing_offset + backing_size,
10720 VM_MAP_REMOVE_NO_FLAGS);
10721 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10722 "unmapped backing_object %p [0x%llx:0x%llx]\n",
10723 backing_object,
10724 (uint64_t) backing_offset,
10725 (uint64_t) (backing_offset + backing_size));
10726
10727 /* collapse */
10728 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
10729 vm_object_lock(top_object);
10730 vm_object_collapse(top_object, 0, FALSE);
10731 vm_object_unlock(top_object);
10732 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
10733
10734 /* did it work? */
10735 if (top_object->shadow != VM_OBJECT_NULL) {
10736 printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
10737 printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10738 if (vm_object_collapse_compressor_allowed) {
10739 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10740 }
10741 } else {
10742 /* check the contents of the mapping */
10743 unsigned char expect[9] =
10744 { 0xA0, 0xA1, 0xA2, /* resident in top */
10745 0xA3, 0xA4, 0xA5, /* compressed in top */
10746 0xB9, /* resident in backing + shadow_offset */
10747 0xBD, /* compressed in backing + shadow_offset + paging_offset */
10748 0x00 }; /* absent in both */
10749 unsigned char actual[9];
10750 unsigned int i, errors;
10751
10752 errors = 0;
10753 for (i = 0; i < sizeof(actual); i++) {
10754 actual[i] = (unsigned char) top_address[i * PAGE_SIZE];
10755 if (actual[i] != expect[i]) {
10756 errors++;
10757 }
10758 }
10759 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10760 "actual [%x %x %x %x %x %x %x %x %x] "
10761 "expect [%x %x %x %x %x %x %x %x %x] "
10762 "%d errors\n",
10763 actual[0], actual[1], actual[2], actual[3],
10764 actual[4], actual[5], actual[6], actual[7],
10765 actual[8],
10766 expect[0], expect[1], expect[2], expect[3],
10767 expect[4], expect[5], expect[6], expect[7],
10768 expect[8],
10769 errors);
10770 if (errors) {
10771 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10772 } else {
10773 printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
10774 }
10775 }
10776 }
10777 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
10778 #define vm_test_collapse_compressor()
10779 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
10780
10781 #if VM_TEST_WIRE_AND_EXTRACT
10782 extern ledger_template_t task_ledger_template;
10783 #include <mach/mach_vm.h>
10784 extern ppnum_t vm_map_get_phys_page(vm_map_t map,
10785 vm_offset_t offset);
10786 static void
10787 vm_test_wire_and_extract(void)
10788 {
10789 ledger_t ledger;
10790 vm_map_t user_map, wire_map;
10791 mach_vm_address_t user_addr, wire_addr;
10792 mach_vm_size_t user_size, wire_size;
10793 mach_vm_offset_t cur_offset;
10794 vm_prot_t cur_prot, max_prot;
10795 ppnum_t user_ppnum, wire_ppnum;
10796 kern_return_t kr;
10797
10798 ledger = ledger_instantiate(task_ledger_template,
10799 LEDGER_CREATE_ACTIVE_ENTRIES);
10800 user_map = vm_map_create(pmap_create_options(ledger, 0, PMAP_CREATE_64BIT),
10801 0x100000000ULL,
10802 0x200000000ULL,
10803 TRUE);
10804 wire_map = vm_map_create(NULL,
10805 0x100000000ULL,
10806 0x200000000ULL,
10807 TRUE);
10808 user_addr = 0;
10809 user_size = 0x10000;
10810 kr = mach_vm_allocate(user_map,
10811 &user_addr,
10812 user_size,
10813 VM_FLAGS_ANYWHERE);
10814 assert(kr == KERN_SUCCESS);
10815 wire_addr = 0;
10816 wire_size = user_size;
10817 kr = mach_vm_remap(wire_map,
10818 &wire_addr,
10819 wire_size,
10820 0,
10821 VM_FLAGS_ANYWHERE,
10822 user_map,
10823 user_addr,
10824 FALSE,
10825 &cur_prot,
10826 &max_prot,
10827 VM_INHERIT_NONE);
10828 assert(kr == KERN_SUCCESS);
10829 for (cur_offset = 0;
10830 cur_offset < wire_size;
10831 cur_offset += PAGE_SIZE) {
10832 kr = vm_map_wire_and_extract(wire_map,
10833 wire_addr + cur_offset,
10834 VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
10835 TRUE,
10836 &wire_ppnum);
10837 assert(kr == KERN_SUCCESS);
10838 user_ppnum = vm_map_get_phys_page(user_map,
10839 user_addr + cur_offset);
10840 printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
10841 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
10842 kr,
10843 user_map, user_addr + cur_offset, user_ppnum,
10844 wire_map, wire_addr + cur_offset, wire_ppnum);
10845 if (kr != KERN_SUCCESS ||
10846 wire_ppnum == 0 ||
10847 wire_ppnum != user_ppnum) {
10848 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
10849 }
10850 }
10851 cur_offset -= PAGE_SIZE;
10852 kr = vm_map_wire_and_extract(wire_map,
10853 wire_addr + cur_offset,
10854 VM_PROT_DEFAULT,
10855 TRUE,
10856 &wire_ppnum);
10857 assert(kr == KERN_SUCCESS);
10858 printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
10859 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
10860 kr,
10861 user_map, user_addr + cur_offset, user_ppnum,
10862 wire_map, wire_addr + cur_offset, wire_ppnum);
10863 if (kr != KERN_SUCCESS ||
10864 wire_ppnum == 0 ||
10865 wire_ppnum != user_ppnum) {
10866 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
10867 }
10868
10869 printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
10870 }
10871 #else /* VM_TEST_WIRE_AND_EXTRACT */
10872 #define vm_test_wire_and_extract()
10873 #endif /* VM_TEST_WIRE_AND_EXTRACT */
10874
10875 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
10876 static void
10877 vm_test_page_wire_overflow_panic(void)
10878 {
10879 vm_object_t object;
10880 vm_page_t page;
10881
10882 printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
10883
10884 object = vm_object_allocate(PAGE_SIZE);
10885 vm_object_lock(object);
10886 page = vm_page_alloc(object, 0x0);
10887 vm_page_lock_queues();
10888 do {
10889 vm_page_wire(page, 1, FALSE);
10890 } while (page->wire_count != 0);
10891 vm_page_unlock_queues();
10892 vm_object_unlock(object);
10893 panic("FBDP(%p,%p): wire_count overflow not detected\n",
10894 object, page);
10895 }
10896 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
10897 #define vm_test_page_wire_overflow_panic()
10898 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
10899
10900 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
10901 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
10902 static void
10903 vm_test_kernel_object_fault(void)
10904 {
10905 kern_return_t kr;
10906 vm_offset_t stack;
10907 uintptr_t frameb[2];
10908 int ret;
10909
10910 kr = kernel_memory_allocate(kernel_map, &stack,
10911 kernel_stack_size + (2 * PAGE_SIZE),
10912 0,
10913 (KMA_KSTACK | KMA_KOBJECT |
10914 KMA_GUARD_FIRST | KMA_GUARD_LAST),
10915 VM_KERN_MEMORY_STACK);
10916 if (kr != KERN_SUCCESS) {
10917 panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr);
10918 }
10919 ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE);
10920 if (ret != 0) {
10921 printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
10922 } else {
10923 printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
10924 }
10925 vm_map_remove(kernel_map,
10926 stack,
10927 stack + kernel_stack_size + (2 * PAGE_SIZE),
10928 VM_MAP_REMOVE_KUNWIRE);
10929 stack = 0;
10930 }
10931 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
10932 #define vm_test_kernel_object_fault()
10933 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
10934
10935 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
10936 static void
10937 vm_test_device_pager_transpose(void)
10938 {
10939 memory_object_t device_pager;
10940 vm_object_t anon_object, device_object;
10941 vm_size_t size;
10942 vm_map_offset_t device_mapping;
10943 kern_return_t kr;
10944
10945 size = 3 * PAGE_SIZE;
10946 anon_object = vm_object_allocate(size);
10947 assert(anon_object != VM_OBJECT_NULL);
10948 device_pager = device_pager_setup(NULL, 0, size, 0);
10949 assert(device_pager != NULL);
10950 device_object = memory_object_to_vm_object(device_pager);
10951 assert(device_object != VM_OBJECT_NULL);
10952 #if 0
10953 /*
10954 * Can't actually map this, since another thread might do a
10955 * vm_map_enter() that gets coalesced into this object, which
10956 * would cause the test to fail.
10957 */
10958 vm_map_offset_t anon_mapping = 0;
10959 kr = vm_map_enter(kernel_map, &anon_mapping, size, 0,
10960 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE,
10961 anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
10962 VM_INHERIT_DEFAULT);
10963 assert(kr == KERN_SUCCESS);
10964 #endif
10965 device_mapping = 0;
10966 kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0,
10967 VM_FLAGS_ANYWHERE,
10968 VM_MAP_KERNEL_FLAGS_NONE,
10969 VM_KERN_MEMORY_NONE,
10970 (void *)device_pager, 0, FALSE,
10971 VM_PROT_DEFAULT, VM_PROT_ALL,
10972 VM_INHERIT_DEFAULT);
10973 assert(kr == KERN_SUCCESS);
10974 memory_object_deallocate(device_pager);
10975
10976 vm_object_lock(anon_object);
10977 vm_object_activity_begin(anon_object);
10978 anon_object->blocked_access = TRUE;
10979 vm_object_unlock(anon_object);
10980 vm_object_lock(device_object);
10981 vm_object_activity_begin(device_object);
10982 device_object->blocked_access = TRUE;
10983 vm_object_unlock(device_object);
10984
10985 assert(anon_object->ref_count == 1);
10986 assert(!anon_object->named);
10987 assert(device_object->ref_count == 2);
10988 assert(device_object->named);
10989
10990 kr = vm_object_transpose(device_object, anon_object, size);
10991 assert(kr == KERN_SUCCESS);
10992
10993 vm_object_lock(anon_object);
10994 vm_object_activity_end(anon_object);
10995 anon_object->blocked_access = FALSE;
10996 vm_object_unlock(anon_object);
10997 vm_object_lock(device_object);
10998 vm_object_activity_end(device_object);
10999 device_object->blocked_access = FALSE;
11000 vm_object_unlock(device_object);
11001
11002 assert(anon_object->ref_count == 2);
11003 assert(anon_object->named);
11004 #if 0
11005 kr = vm_deallocate(kernel_map, anon_mapping, size);
11006 assert(kr == KERN_SUCCESS);
11007 #endif
11008 assert(device_object->ref_count == 1);
11009 assert(!device_object->named);
11010 kr = vm_deallocate(kernel_map, device_mapping, size);
11011 assert(kr == KERN_SUCCESS);
11012
11013 printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
11014 }
11015 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
11016 #define vm_test_device_pager_transpose()
11017 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
11018
11019 void
11020 vm_tests(void)
11021 {
11022 vm_test_collapse_compressor();
11023 vm_test_wire_and_extract();
11024 vm_test_page_wire_overflow_panic();
11025 vm_test_kernel_object_fault();
11026 vm_test_device_pager_transpose();
11027 }