]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_pageout.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / vm / vm_pageout.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * The proverbial page-out daemon.
64 */
65
66 #include <stdint.h>
67
68 #include <debug.h>
69 #include <mach_pagemap.h>
70 #include <mach_cluster_stats.h>
71
72 #include <mach/mach_types.h>
73 #include <mach/memory_object.h>
74 #include <mach/memory_object_default.h>
75 #include <mach/memory_object_control_server.h>
76 #include <mach/mach_host_server.h>
77 #include <mach/upl.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_param.h>
80 #include <mach/vm_statistics.h>
81 #include <mach/sdt.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/counters.h>
85 #include <kern/host_statistics.h>
86 #include <kern/machine.h>
87 #include <kern/misc_protos.h>
88 #include <kern/sched.h>
89 #include <kern/thread.h>
90 #include <kern/xpr.h>
91 #include <kern/kalloc.h>
92 #include <kern/policy_internal.h>
93 #include <kern/thread_group.h>
94
95 #include <machine/vm_tuning.h>
96 #include <machine/commpage.h>
97
98 #include <vm/pmap.h>
99 #include <vm/vm_compressor_pager.h>
100 #include <vm/vm_fault.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_object.h>
103 #include <vm/vm_page.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_protos.h> /* must be last */
106 #include <vm/memory_object.h>
107 #include <vm/vm_purgeable_internal.h>
108 #include <vm/vm_shared_region.h>
109 #include <vm/vm_compressor.h>
110
111 #include <san/kasan.h>
112
113 #if CONFIG_PHANTOM_CACHE
114 #include <vm/vm_phantom_cache.h>
115 #endif
116
117 #if UPL_DEBUG
118 #include <libkern/OSDebug.h>
119 #endif
120
121 extern int cs_debug;
122
123 extern void mbuf_drain(boolean_t);
124
125 #if VM_PRESSURE_EVENTS
126 #if CONFIG_JETSAM
127 extern unsigned int memorystatus_available_pages;
128 extern unsigned int memorystatus_available_pages_pressure;
129 extern unsigned int memorystatus_available_pages_critical;
130 #else /* CONFIG_JETSAM */
131 extern uint64_t memorystatus_available_pages;
132 extern uint64_t memorystatus_available_pages_pressure;
133 extern uint64_t memorystatus_available_pages_critical;
134 #endif /* CONFIG_JETSAM */
135
136 extern unsigned int memorystatus_frozen_count;
137 extern unsigned int memorystatus_suspended_count;
138 extern vm_pressure_level_t memorystatus_vm_pressure_level;
139
140 void vm_pressure_response(void);
141 extern void consider_vm_pressure_events(void);
142
143 #define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
144 #endif /* VM_PRESSURE_EVENTS */
145
146
147 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
148 #ifdef CONFIG_EMBEDDED
149 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
150 #else
151 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
152 #endif
153 #endif
154
155 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
156 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
157 #endif
158
159 #ifndef VM_PAGE_LAUNDRY_MAX
160 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
161 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
162
163 #ifndef VM_PAGEOUT_BURST_WAIT
164 #define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */
165 #endif /* VM_PAGEOUT_BURST_WAIT */
166
167 #ifndef VM_PAGEOUT_EMPTY_WAIT
168 #define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */
169 #endif /* VM_PAGEOUT_EMPTY_WAIT */
170
171 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
172 #define VM_PAGEOUT_DEADLOCK_WAIT 100 /* milliseconds */
173 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
174
175 #ifndef VM_PAGEOUT_IDLE_WAIT
176 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
177 #endif /* VM_PAGEOUT_IDLE_WAIT */
178
179 #ifndef VM_PAGEOUT_SWAP_WAIT
180 #define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */
181 #endif /* VM_PAGEOUT_SWAP_WAIT */
182
183
184 #ifndef VM_PAGE_SPECULATIVE_TARGET
185 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage))
186 #endif /* VM_PAGE_SPECULATIVE_TARGET */
187
188
189 /*
190 * To obtain a reasonable LRU approximation, the inactive queue
191 * needs to be large enough to give pages on it a chance to be
192 * referenced a second time. This macro defines the fraction
193 * of active+inactive pages that should be inactive.
194 * The pageout daemon uses it to update vm_page_inactive_target.
195 *
196 * If vm_page_free_count falls below vm_page_free_target and
197 * vm_page_inactive_count is below vm_page_inactive_target,
198 * then the pageout daemon starts running.
199 */
200
201 #ifndef VM_PAGE_INACTIVE_TARGET
202 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
203 #endif /* VM_PAGE_INACTIVE_TARGET */
204
205 /*
206 * Once the pageout daemon starts running, it keeps going
207 * until vm_page_free_count meets or exceeds vm_page_free_target.
208 */
209
210 #ifndef VM_PAGE_FREE_TARGET
211 #ifdef CONFIG_EMBEDDED
212 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
213 #else
214 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
215 #endif
216 #endif /* VM_PAGE_FREE_TARGET */
217
218
219 /*
220 * The pageout daemon always starts running once vm_page_free_count
221 * falls below vm_page_free_min.
222 */
223
224 #ifndef VM_PAGE_FREE_MIN
225 #ifdef CONFIG_EMBEDDED
226 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
227 #else
228 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
229 #endif
230 #endif /* VM_PAGE_FREE_MIN */
231
232 #ifdef CONFIG_EMBEDDED
233 #define VM_PAGE_FREE_RESERVED_LIMIT 100
234 #define VM_PAGE_FREE_MIN_LIMIT 1500
235 #define VM_PAGE_FREE_TARGET_LIMIT 2000
236 #else
237 #define VM_PAGE_FREE_RESERVED_LIMIT 1700
238 #define VM_PAGE_FREE_MIN_LIMIT 3500
239 #define VM_PAGE_FREE_TARGET_LIMIT 4000
240 #endif
241
242 /*
243 * When vm_page_free_count falls below vm_page_free_reserved,
244 * only vm-privileged threads can allocate pages. vm-privilege
245 * allows the pageout daemon and default pager (and any other
246 * associated threads needed for default pageout) to continue
247 * operation by dipping into the reserved pool of pages.
248 */
249
250 #ifndef VM_PAGE_FREE_RESERVED
251 #define VM_PAGE_FREE_RESERVED(n) \
252 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
253 #endif /* VM_PAGE_FREE_RESERVED */
254
255 /*
256 * When we dequeue pages from the inactive list, they are
257 * reactivated (ie, put back on the active queue) if referenced.
258 * However, it is possible to starve the free list if other
259 * processors are referencing pages faster than we can turn off
260 * the referenced bit. So we limit the number of reactivations
261 * we will make per call of vm_pageout_scan().
262 */
263 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
264
265 #ifndef VM_PAGE_REACTIVATE_LIMIT
266 #ifdef CONFIG_EMBEDDED
267 #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
268 #else
269 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
270 #endif
271 #endif /* VM_PAGE_REACTIVATE_LIMIT */
272 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000
273
274 extern boolean_t hibernate_cleaning_in_progress;
275
276 /*
277 * Forward declarations for internal routines.
278 */
279 struct cq {
280 struct vm_pageout_queue *q;
281 void *current_chead;
282 char *scratch_buf;
283 int id;
284 };
285
286 struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT];
287
288
289 #if VM_PRESSURE_EVENTS
290 void vm_pressure_thread(void);
291
292 boolean_t VM_PRESSURE_NORMAL_TO_WARNING(void);
293 boolean_t VM_PRESSURE_WARNING_TO_CRITICAL(void);
294
295 boolean_t VM_PRESSURE_WARNING_TO_NORMAL(void);
296 boolean_t VM_PRESSURE_CRITICAL_TO_WARNING(void);
297 #endif
298
299 void vm_pageout_garbage_collect(int);
300 static void vm_pageout_iothread_external(void);
301 static void vm_pageout_iothread_internal(struct cq *cq);
302 static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *, boolean_t);
303
304 extern void vm_pageout_continue(void);
305 extern void vm_pageout_scan(void);
306
307 void vm_tests(void); /* forward */
308
309 #if !CONFIG_EMBEDDED
310 static boolean_t vm_pageout_waiter = FALSE;
311 static boolean_t vm_pageout_running = FALSE;
312 #endif /* !CONFIG_EMBEDDED */
313
314
315 #if DEVELOPMENT || DEBUG
316 struct vm_pageout_debug vm_pageout_debug;
317 #endif
318 struct vm_pageout_vminfo vm_pageout_vminfo;
319 struct vm_pageout_state vm_pageout_state;
320 struct vm_config vm_config;
321
322 struct vm_pageout_queue vm_pageout_queue_internal __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
323 struct vm_pageout_queue vm_pageout_queue_external __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
324
325 int vm_upl_wait_for_pages = 0;
326 vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
327
328 boolean_t(*volatile consider_buffer_cache_collect)(int) = NULL;
329
330 int vm_debug_events = 0;
331
332 lck_grp_t vm_pageout_lck_grp;
333
334 #if CONFIG_MEMORYSTATUS
335 extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async);
336
337 uint32_t vm_pageout_memorystatus_fb_factor_nr = 5;
338 uint32_t vm_pageout_memorystatus_fb_factor_dr = 2;
339
340 #endif
341
342
343
344 /*
345 * Routine: vm_pageout_object_terminate
346 * Purpose:
347 * Destroy the pageout_object, and perform all of the
348 * required cleanup actions.
349 *
350 * In/Out conditions:
351 * The object must be locked, and will be returned locked.
352 */
353 void
354 vm_pageout_object_terminate(
355 vm_object_t object)
356 {
357 vm_object_t shadow_object;
358
359 /*
360 * Deal with the deallocation (last reference) of a pageout object
361 * (used for cleaning-in-place) by dropping the paging references/
362 * freeing pages in the original object.
363 */
364
365 assert(object->pageout);
366 shadow_object = object->shadow;
367 vm_object_lock(shadow_object);
368
369 while (!vm_page_queue_empty(&object->memq)) {
370 vm_page_t p, m;
371 vm_object_offset_t offset;
372
373 p = (vm_page_t) vm_page_queue_first(&object->memq);
374
375 assert(p->vmp_private);
376 assert(p->vmp_free_when_done);
377 p->vmp_free_when_done = FALSE;
378 assert(!p->vmp_cleaning);
379 assert(!p->vmp_laundry);
380
381 offset = p->vmp_offset;
382 VM_PAGE_FREE(p);
383 p = VM_PAGE_NULL;
384
385 m = vm_page_lookup(shadow_object,
386 offset + object->vo_shadow_offset);
387
388 if (m == VM_PAGE_NULL) {
389 continue;
390 }
391
392 assert((m->vmp_dirty) || (m->vmp_precious) ||
393 (m->vmp_busy && m->vmp_cleaning));
394
395 /*
396 * Handle the trusted pager throttle.
397 * Also decrement the burst throttle (if external).
398 */
399 vm_page_lock_queues();
400 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
401 vm_pageout_throttle_up(m);
402 }
403
404 /*
405 * Handle the "target" page(s). These pages are to be freed if
406 * successfully cleaned. Target pages are always busy, and are
407 * wired exactly once. The initial target pages are not mapped,
408 * (so cannot be referenced or modified) but converted target
409 * pages may have been modified between the selection as an
410 * adjacent page and conversion to a target.
411 */
412 if (m->vmp_free_when_done) {
413 assert(m->vmp_busy);
414 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
415 assert(m->vmp_wire_count == 1);
416 m->vmp_cleaning = FALSE;
417 m->vmp_free_when_done = FALSE;
418 /*
419 * Revoke all access to the page. Since the object is
420 * locked, and the page is busy, this prevents the page
421 * from being dirtied after the pmap_disconnect() call
422 * returns.
423 *
424 * Since the page is left "dirty" but "not modifed", we
425 * can detect whether the page was redirtied during
426 * pageout by checking the modify state.
427 */
428 if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
429 SET_PAGE_DIRTY(m, FALSE);
430 } else {
431 m->vmp_dirty = FALSE;
432 }
433
434 if (m->vmp_dirty) {
435 vm_page_unwire(m, TRUE); /* reactivates */
436 VM_STAT_INCR(reactivations);
437 PAGE_WAKEUP_DONE(m);
438 } else {
439 vm_page_free(m); /* clears busy, etc. */
440 }
441 vm_page_unlock_queues();
442 continue;
443 }
444 /*
445 * Handle the "adjacent" pages. These pages were cleaned in
446 * place, and should be left alone.
447 * If prep_pin_count is nonzero, then someone is using the
448 * page, so make it active.
449 */
450 if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) && !m->vmp_private) {
451 if (m->vmp_reference) {
452 vm_page_activate(m);
453 } else {
454 vm_page_deactivate(m);
455 }
456 }
457 if (m->vmp_overwriting) {
458 /*
459 * the (COPY_OUT_FROM == FALSE) request_page_list case
460 */
461 if (m->vmp_busy) {
462 /*
463 * We do not re-set m->vmp_dirty !
464 * The page was busy so no extraneous activity
465 * could have occurred. COPY_INTO is a read into the
466 * new pages. CLEAN_IN_PLACE does actually write
467 * out the pages but handling outside of this code
468 * will take care of resetting dirty. We clear the
469 * modify however for the Programmed I/O case.
470 */
471 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
472
473 m->vmp_busy = FALSE;
474 m->vmp_absent = FALSE;
475 } else {
476 /*
477 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
478 * Occurs when the original page was wired
479 * at the time of the list request
480 */
481 assert(VM_PAGE_WIRED(m));
482 vm_page_unwire(m, TRUE); /* reactivates */
483 }
484 m->vmp_overwriting = FALSE;
485 } else {
486 m->vmp_dirty = FALSE;
487 }
488 m->vmp_cleaning = FALSE;
489
490 /*
491 * Wakeup any thread waiting for the page to be un-cleaning.
492 */
493 PAGE_WAKEUP(m);
494 vm_page_unlock_queues();
495 }
496 /*
497 * Account for the paging reference taken in vm_paging_object_allocate.
498 */
499 vm_object_activity_end(shadow_object);
500 vm_object_unlock(shadow_object);
501
502 assert(object->ref_count == 0);
503 assert(object->paging_in_progress == 0);
504 assert(object->activity_in_progress == 0);
505 assert(object->resident_page_count == 0);
506 return;
507 }
508
509 /*
510 * Routine: vm_pageclean_setup
511 *
512 * Purpose: setup a page to be cleaned (made non-dirty), but not
513 * necessarily flushed from the VM page cache.
514 * This is accomplished by cleaning in place.
515 *
516 * The page must not be busy, and new_object
517 * must be locked.
518 *
519 */
520 static void
521 vm_pageclean_setup(
522 vm_page_t m,
523 vm_page_t new_m,
524 vm_object_t new_object,
525 vm_object_offset_t new_offset)
526 {
527 assert(!m->vmp_busy);
528 #if 0
529 assert(!m->vmp_cleaning);
530 #endif
531
532 XPR(XPR_VM_PAGEOUT,
533 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
534 VM_PAGE_OBJECT(m), m->vmp_offset, m,
535 new_m, new_offset);
536
537 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
538
539 /*
540 * Mark original page as cleaning in place.
541 */
542 m->vmp_cleaning = TRUE;
543 SET_PAGE_DIRTY(m, FALSE);
544 m->vmp_precious = FALSE;
545
546 /*
547 * Convert the fictitious page to a private shadow of
548 * the real page.
549 */
550 assert(new_m->vmp_fictitious);
551 assert(VM_PAGE_GET_PHYS_PAGE(new_m) == vm_page_fictitious_addr);
552 new_m->vmp_fictitious = FALSE;
553 new_m->vmp_private = TRUE;
554 new_m->vmp_free_when_done = TRUE;
555 VM_PAGE_SET_PHYS_PAGE(new_m, VM_PAGE_GET_PHYS_PAGE(m));
556
557 vm_page_lockspin_queues();
558 vm_page_wire(new_m, VM_KERN_MEMORY_NONE, TRUE);
559 vm_page_unlock_queues();
560
561 vm_page_insert_wired(new_m, new_object, new_offset, VM_KERN_MEMORY_NONE);
562 assert(!new_m->vmp_wanted);
563 new_m->vmp_busy = FALSE;
564 }
565
566 /*
567 * Routine: vm_pageout_initialize_page
568 * Purpose:
569 * Causes the specified page to be initialized in
570 * the appropriate memory object. This routine is used to push
571 * pages into a copy-object when they are modified in the
572 * permanent object.
573 *
574 * The page is moved to a temporary object and paged out.
575 *
576 * In/out conditions:
577 * The page in question must not be on any pageout queues.
578 * The object to which it belongs must be locked.
579 * The page must be busy, but not hold a paging reference.
580 *
581 * Implementation:
582 * Move this page to a completely new object.
583 */
584 void
585 vm_pageout_initialize_page(
586 vm_page_t m)
587 {
588 vm_object_t object;
589 vm_object_offset_t paging_offset;
590 memory_object_t pager;
591
592 XPR(XPR_VM_PAGEOUT,
593 "vm_pageout_initialize_page, page 0x%X\n",
594 m, 0, 0, 0, 0);
595
596 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
597
598 object = VM_PAGE_OBJECT(m);
599
600 assert(m->vmp_busy);
601 assert(object->internal);
602
603 /*
604 * Verify that we really want to clean this page
605 */
606 assert(!m->vmp_absent);
607 assert(!m->vmp_error);
608 assert(m->vmp_dirty);
609
610 /*
611 * Create a paging reference to let us play with the object.
612 */
613 paging_offset = m->vmp_offset + object->paging_offset;
614
615 if (m->vmp_absent || m->vmp_error || m->vmp_restart || (!m->vmp_dirty && !m->vmp_precious)) {
616 panic("reservation without pageout?"); /* alan */
617
618 VM_PAGE_FREE(m);
619 vm_object_unlock(object);
620
621 return;
622 }
623
624 /*
625 * If there's no pager, then we can't clean the page. This should
626 * never happen since this should be a copy object and therefore not
627 * an external object, so the pager should always be there.
628 */
629
630 pager = object->pager;
631
632 if (pager == MEMORY_OBJECT_NULL) {
633 panic("missing pager for copy object");
634
635 VM_PAGE_FREE(m);
636 return;
637 }
638
639 /*
640 * set the page for future call to vm_fault_list_request
641 */
642 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
643 SET_PAGE_DIRTY(m, FALSE);
644
645 /*
646 * keep the object from collapsing or terminating
647 */
648 vm_object_paging_begin(object);
649 vm_object_unlock(object);
650
651 /*
652 * Write the data to its pager.
653 * Note that the data is passed by naming the new object,
654 * not a virtual address; the pager interface has been
655 * manipulated to use the "internal memory" data type.
656 * [The object reference from its allocation is donated
657 * to the eventual recipient.]
658 */
659 memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
660
661 vm_object_lock(object);
662 vm_object_paging_end(object);
663 }
664
665
666 /*
667 * vm_pageout_cluster:
668 *
669 * Given a page, queue it to the appropriate I/O thread,
670 * which will page it out and attempt to clean adjacent pages
671 * in the same operation.
672 *
673 * The object and queues must be locked. We will take a
674 * paging reference to prevent deallocation or collapse when we
675 * release the object lock back at the call site. The I/O thread
676 * is responsible for consuming this reference
677 *
678 * The page must not be on any pageout queue.
679 */
680 #if DEVELOPMENT || DEBUG
681 vmct_stats_t vmct_stats;
682
683 int32_t vmct_active = 0;
684 uint64_t vm_compressor_epoch_start = 0;
685 uint64_t vm_compressor_epoch_stop = 0;
686
687 typedef enum vmct_state_t {
688 VMCT_IDLE,
689 VMCT_AWAKENED,
690 VMCT_ACTIVE,
691 } vmct_state_t;
692 vmct_state_t vmct_state[MAX_COMPRESSOR_THREAD_COUNT];
693 #endif
694
695
696 void
697 vm_pageout_cluster(vm_page_t m)
698 {
699 vm_object_t object = VM_PAGE_OBJECT(m);
700 struct vm_pageout_queue *q;
701
702
703 XPR(XPR_VM_PAGEOUT,
704 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
705 object, m->vmp_offset, m, 0, 0);
706
707 VM_PAGE_CHECK(m);
708 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
709 vm_object_lock_assert_exclusive(object);
710
711 /*
712 * Only a certain kind of page is appreciated here.
713 */
714 assert((m->vmp_dirty || m->vmp_precious) && (!VM_PAGE_WIRED(m)));
715 assert(!m->vmp_cleaning && !m->vmp_laundry);
716 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
717
718 /*
719 * protect the object from collapse or termination
720 */
721 vm_object_activity_begin(object);
722
723 if (object->internal == TRUE) {
724 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
725
726 m->vmp_busy = TRUE;
727
728 q = &vm_pageout_queue_internal;
729 } else {
730 q = &vm_pageout_queue_external;
731 }
732
733 /*
734 * pgo_laundry count is tied to the laundry bit
735 */
736 m->vmp_laundry = TRUE;
737 q->pgo_laundry++;
738
739 m->vmp_q_state = VM_PAGE_ON_PAGEOUT_Q;
740 vm_page_queue_enter(&q->pgo_pending, m, vmp_pageq);
741
742 if (q->pgo_idle == TRUE) {
743 q->pgo_idle = FALSE;
744 thread_wakeup((event_t) &q->pgo_pending);
745 }
746 VM_PAGE_CHECK(m);
747 }
748
749
750 /*
751 * A page is back from laundry or we are stealing it back from
752 * the laundering state. See if there are some pages waiting to
753 * go to laundry and if we can let some of them go now.
754 *
755 * Object and page queues must be locked.
756 */
757 void
758 vm_pageout_throttle_up(
759 vm_page_t m)
760 {
761 struct vm_pageout_queue *q;
762 vm_object_t m_object;
763
764 m_object = VM_PAGE_OBJECT(m);
765
766 assert(m_object != VM_OBJECT_NULL);
767 assert(m_object != kernel_object);
768
769 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
770 vm_object_lock_assert_exclusive(m_object);
771
772 if (m_object->internal == TRUE) {
773 q = &vm_pageout_queue_internal;
774 } else {
775 q = &vm_pageout_queue_external;
776 }
777
778 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
779 vm_page_queue_remove(&q->pgo_pending, m, vmp_pageq);
780 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
781
782 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
783
784 vm_object_activity_end(m_object);
785
786 VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page, 1);
787 }
788 if (m->vmp_laundry == TRUE) {
789 m->vmp_laundry = FALSE;
790 q->pgo_laundry--;
791
792 if (q->pgo_throttled == TRUE) {
793 q->pgo_throttled = FALSE;
794 thread_wakeup((event_t) &q->pgo_laundry);
795 }
796 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
797 q->pgo_draining = FALSE;
798 thread_wakeup((event_t) (&q->pgo_laundry + 1));
799 }
800 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, 1);
801 }
802 }
803
804
805 static void
806 vm_pageout_throttle_up_batch(
807 struct vm_pageout_queue *q,
808 int batch_cnt)
809 {
810 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
811
812 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, batch_cnt);
813
814 q->pgo_laundry -= batch_cnt;
815
816 if (q->pgo_throttled == TRUE) {
817 q->pgo_throttled = FALSE;
818 thread_wakeup((event_t) &q->pgo_laundry);
819 }
820 if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
821 q->pgo_draining = FALSE;
822 thread_wakeup((event_t) (&q->pgo_laundry + 1));
823 }
824 }
825
826
827
828 /*
829 * VM memory pressure monitoring.
830 *
831 * vm_pageout_scan() keeps track of the number of pages it considers and
832 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
833 *
834 * compute_memory_pressure() is called every second from compute_averages()
835 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
836 * of recalimed pages in a new vm_pageout_stat[] bucket.
837 *
838 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
839 * The caller provides the number of seconds ("nsecs") worth of statistics
840 * it wants, up to 30 seconds.
841 * It computes the number of pages reclaimed in the past "nsecs" seconds and
842 * also returns the number of pages the system still needs to reclaim at this
843 * moment in time.
844 */
845 #if DEVELOPMENT || DEBUG
846 #define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1
847 #else
848 #define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1
849 #endif
850 struct vm_pageout_stat {
851 unsigned long vm_page_active_count;
852 unsigned long vm_page_speculative_count;
853 unsigned long vm_page_inactive_count;
854 unsigned long vm_page_anonymous_count;
855
856 unsigned long vm_page_free_count;
857 unsigned long vm_page_wire_count;
858 unsigned long vm_page_compressor_count;
859
860 unsigned long vm_page_pages_compressed;
861 unsigned long vm_page_pageable_internal_count;
862 unsigned long vm_page_pageable_external_count;
863 unsigned long vm_page_xpmapped_external_count;
864
865 unsigned int pages_grabbed;
866 unsigned int pages_freed;
867
868 unsigned int pages_compressed;
869 unsigned int pages_grabbed_by_compressor;
870 unsigned int failed_compressions;
871
872 unsigned int pages_evicted;
873 unsigned int pages_purged;
874
875 unsigned int considered;
876 unsigned int considered_bq_internal;
877 unsigned int considered_bq_external;
878
879 unsigned int skipped_external;
880 unsigned int filecache_min_reactivations;
881
882 unsigned int freed_speculative;
883 unsigned int freed_cleaned;
884 unsigned int freed_internal;
885 unsigned int freed_external;
886
887 unsigned int cleaned_dirty_external;
888 unsigned int cleaned_dirty_internal;
889
890 unsigned int inactive_referenced;
891 unsigned int inactive_nolock;
892 unsigned int reactivation_limit_exceeded;
893 unsigned int forced_inactive_reclaim;
894
895 unsigned int throttled_internal_q;
896 unsigned int throttled_external_q;
897
898 unsigned int phantom_ghosts_found;
899 unsigned int phantom_ghosts_added;
900 } vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
901
902 unsigned int vm_pageout_stat_now = 0;
903
904 #define VM_PAGEOUT_STAT_BEFORE(i) \
905 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
906 #define VM_PAGEOUT_STAT_AFTER(i) \
907 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
908
909 #if VM_PAGE_BUCKETS_CHECK
910 int vm_page_buckets_check_interval = 80; /* in eighths of a second */
911 #endif /* VM_PAGE_BUCKETS_CHECK */
912
913
914 void
915 record_memory_pressure(void);
916 void
917 record_memory_pressure(void)
918 {
919 unsigned int vm_pageout_next;
920
921 #if VM_PAGE_BUCKETS_CHECK
922 /* check the consistency of VM page buckets at regular interval */
923 static int counter = 0;
924 if ((++counter % vm_page_buckets_check_interval) == 0) {
925 vm_page_buckets_check();
926 }
927 #endif /* VM_PAGE_BUCKETS_CHECK */
928
929 vm_pageout_state.vm_memory_pressure =
930 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_speculative +
931 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_cleaned +
932 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_internal +
933 vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_external;
934
935 commpage_set_memory_pressure((unsigned int)vm_pageout_state.vm_memory_pressure );
936
937 /* move "now" forward */
938 vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
939
940 bzero(&vm_pageout_stats[vm_pageout_next], sizeof(struct vm_pageout_stat));
941
942 vm_pageout_stat_now = vm_pageout_next;
943 }
944
945
946 /*
947 * IMPORTANT
948 * mach_vm_ctl_page_free_wanted() is called indirectly, via
949 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
950 * it must be safe in the restricted stackshot context. Locks and/or
951 * blocking are not allowable.
952 */
953 unsigned int
954 mach_vm_ctl_page_free_wanted(void)
955 {
956 unsigned int page_free_target, page_free_count, page_free_wanted;
957
958 page_free_target = vm_page_free_target;
959 page_free_count = vm_page_free_count;
960 if (page_free_target > page_free_count) {
961 page_free_wanted = page_free_target - page_free_count;
962 } else {
963 page_free_wanted = 0;
964 }
965
966 return page_free_wanted;
967 }
968
969
970 /*
971 * IMPORTANT:
972 * mach_vm_pressure_monitor() is called when taking a stackshot, with
973 * wait_for_pressure FALSE, so that code path must remain safe in the
974 * restricted stackshot context. No blocking or locks are allowable.
975 * on that code path.
976 */
977
978 kern_return_t
979 mach_vm_pressure_monitor(
980 boolean_t wait_for_pressure,
981 unsigned int nsecs_monitored,
982 unsigned int *pages_reclaimed_p,
983 unsigned int *pages_wanted_p)
984 {
985 wait_result_t wr;
986 unsigned int vm_pageout_then, vm_pageout_now;
987 unsigned int pages_reclaimed;
988 unsigned int units_of_monitor;
989
990 units_of_monitor = 8 * nsecs_monitored;
991 /*
992 * We don't take the vm_page_queue_lock here because we don't want
993 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
994 * thread when it's trying to reclaim memory. We don't need fully
995 * accurate monitoring anyway...
996 */
997
998 if (wait_for_pressure) {
999 /* wait until there's memory pressure */
1000 while (vm_page_free_count >= vm_page_free_target) {
1001 wr = assert_wait((event_t) &vm_page_free_wanted,
1002 THREAD_INTERRUPTIBLE);
1003 if (wr == THREAD_WAITING) {
1004 wr = thread_block(THREAD_CONTINUE_NULL);
1005 }
1006 if (wr == THREAD_INTERRUPTED) {
1007 return KERN_ABORTED;
1008 }
1009 if (wr == THREAD_AWAKENED) {
1010 /*
1011 * The memory pressure might have already
1012 * been relieved but let's not block again
1013 * and let's report that there was memory
1014 * pressure at some point.
1015 */
1016 break;
1017 }
1018 }
1019 }
1020
1021 /* provide the number of pages the system wants to reclaim */
1022 if (pages_wanted_p != NULL) {
1023 *pages_wanted_p = mach_vm_ctl_page_free_wanted();
1024 }
1025
1026 if (pages_reclaimed_p == NULL) {
1027 return KERN_SUCCESS;
1028 }
1029
1030 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1031 vm_pageout_now = vm_pageout_stat_now;
1032 pages_reclaimed = 0;
1033 for (vm_pageout_then =
1034 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
1035 vm_pageout_then != vm_pageout_now &&
1036 units_of_monitor-- != 0;
1037 vm_pageout_then =
1038 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
1039 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_speculative;
1040 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_cleaned;
1041 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_internal;
1042 pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_external;
1043 }
1044 *pages_reclaimed_p = pages_reclaimed;
1045
1046 return KERN_SUCCESS;
1047 }
1048
1049
1050
1051 #if DEVELOPMENT || DEBUG
1052
1053 static void
1054 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int);
1055
1056 /*
1057 * condition variable used to make sure there is
1058 * only a single sweep going on at a time
1059 */
1060 boolean_t vm_pageout_disconnect_all_pages_active = FALSE;
1061
1062
1063 void
1064 vm_pageout_disconnect_all_pages()
1065 {
1066 vm_page_lock_queues();
1067
1068 if (vm_pageout_disconnect_all_pages_active == TRUE) {
1069 vm_page_unlock_queues();
1070 return;
1071 }
1072 vm_pageout_disconnect_all_pages_active = TRUE;
1073 vm_page_unlock_queues();
1074
1075 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled, vm_page_throttled_count);
1076 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
1077 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active, vm_page_active_count);
1078
1079 vm_pageout_disconnect_all_pages_active = FALSE;
1080 }
1081
1082
1083 void
1084 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount)
1085 {
1086 vm_page_t m;
1087 vm_object_t t_object = NULL;
1088 vm_object_t l_object = NULL;
1089 vm_object_t m_object = NULL;
1090 int delayed_unlock = 0;
1091 int try_failed_count = 0;
1092 int disconnected_count = 0;
1093 int paused_count = 0;
1094 int object_locked_count = 0;
1095
1096 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_START,
1097 q, qcount, 0, 0, 0);
1098
1099 vm_page_lock_queues();
1100
1101 while (qcount && !vm_page_queue_empty(q)) {
1102 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1103
1104 m = (vm_page_t) vm_page_queue_first(q);
1105 m_object = VM_PAGE_OBJECT(m);
1106
1107 /*
1108 * check to see if we currently are working
1109 * with the same object... if so, we've
1110 * already got the lock
1111 */
1112 if (m_object != l_object) {
1113 /*
1114 * the object associated with candidate page is
1115 * different from the one we were just working
1116 * with... dump the lock if we still own it
1117 */
1118 if (l_object != NULL) {
1119 vm_object_unlock(l_object);
1120 l_object = NULL;
1121 }
1122 if (m_object != t_object) {
1123 try_failed_count = 0;
1124 }
1125
1126 /*
1127 * Try to lock object; since we've alread got the
1128 * page queues lock, we can only 'try' for this one.
1129 * if the 'try' fails, we need to do a mutex_pause
1130 * to allow the owner of the object lock a chance to
1131 * run...
1132 */
1133 if (!vm_object_lock_try_scan(m_object)) {
1134 if (try_failed_count > 20) {
1135 goto reenter_pg_on_q;
1136 }
1137 vm_page_unlock_queues();
1138 mutex_pause(try_failed_count++);
1139 vm_page_lock_queues();
1140 delayed_unlock = 0;
1141
1142 paused_count++;
1143
1144 t_object = m_object;
1145 continue;
1146 }
1147 object_locked_count++;
1148
1149 l_object = m_object;
1150 }
1151 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
1152 /*
1153 * put it back on the head of its queue
1154 */
1155 goto reenter_pg_on_q;
1156 }
1157 if (m->vmp_pmapped == TRUE) {
1158 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
1159
1160 disconnected_count++;
1161 }
1162 reenter_pg_on_q:
1163 vm_page_queue_remove(q, m, vmp_pageq);
1164 vm_page_queue_enter(q, m, vmp_pageq);
1165
1166 qcount--;
1167 try_failed_count = 0;
1168
1169 if (delayed_unlock++ > 128) {
1170 if (l_object != NULL) {
1171 vm_object_unlock(l_object);
1172 l_object = NULL;
1173 }
1174 lck_mtx_yield(&vm_page_queue_lock);
1175 delayed_unlock = 0;
1176 }
1177 }
1178 if (l_object != NULL) {
1179 vm_object_unlock(l_object);
1180 l_object = NULL;
1181 }
1182 vm_page_unlock_queues();
1183
1184 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_END,
1185 q, disconnected_count, object_locked_count, paused_count, 0);
1186 }
1187
1188 #endif
1189
1190
1191 static void
1192 vm_pageout_page_queue(vm_page_queue_head_t *, int);
1193
1194 /*
1195 * condition variable used to make sure there is
1196 * only a single sweep going on at a time
1197 */
1198 boolean_t vm_pageout_anonymous_pages_active = FALSE;
1199
1200
1201 void
1202 vm_pageout_anonymous_pages()
1203 {
1204 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
1205 vm_page_lock_queues();
1206
1207 if (vm_pageout_anonymous_pages_active == TRUE) {
1208 vm_page_unlock_queues();
1209 return;
1210 }
1211 vm_pageout_anonymous_pages_active = TRUE;
1212 vm_page_unlock_queues();
1213
1214 vm_pageout_page_queue(&vm_page_queue_throttled, vm_page_throttled_count);
1215 vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
1216 vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count);
1217
1218 if (VM_CONFIG_SWAP_IS_PRESENT) {
1219 vm_consider_swapping();
1220 }
1221
1222 vm_page_lock_queues();
1223 vm_pageout_anonymous_pages_active = FALSE;
1224 vm_page_unlock_queues();
1225 }
1226 }
1227
1228
1229 void
1230 vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount)
1231 {
1232 vm_page_t m;
1233 vm_object_t t_object = NULL;
1234 vm_object_t l_object = NULL;
1235 vm_object_t m_object = NULL;
1236 int delayed_unlock = 0;
1237 int try_failed_count = 0;
1238 int refmod_state;
1239 int pmap_options;
1240 struct vm_pageout_queue *iq;
1241 ppnum_t phys_page;
1242
1243
1244 iq = &vm_pageout_queue_internal;
1245
1246 vm_page_lock_queues();
1247
1248 while (qcount && !vm_page_queue_empty(q)) {
1249 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1250
1251 if (VM_PAGE_Q_THROTTLED(iq)) {
1252 if (l_object != NULL) {
1253 vm_object_unlock(l_object);
1254 l_object = NULL;
1255 }
1256 iq->pgo_draining = TRUE;
1257
1258 assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
1259 vm_page_unlock_queues();
1260
1261 thread_block(THREAD_CONTINUE_NULL);
1262
1263 vm_page_lock_queues();
1264 delayed_unlock = 0;
1265 continue;
1266 }
1267 m = (vm_page_t) vm_page_queue_first(q);
1268 m_object = VM_PAGE_OBJECT(m);
1269
1270 /*
1271 * check to see if we currently are working
1272 * with the same object... if so, we've
1273 * already got the lock
1274 */
1275 if (m_object != l_object) {
1276 if (!m_object->internal) {
1277 goto reenter_pg_on_q;
1278 }
1279
1280 /*
1281 * the object associated with candidate page is
1282 * different from the one we were just working
1283 * with... dump the lock if we still own it
1284 */
1285 if (l_object != NULL) {
1286 vm_object_unlock(l_object);
1287 l_object = NULL;
1288 }
1289 if (m_object != t_object) {
1290 try_failed_count = 0;
1291 }
1292
1293 /*
1294 * Try to lock object; since we've alread got the
1295 * page queues lock, we can only 'try' for this one.
1296 * if the 'try' fails, we need to do a mutex_pause
1297 * to allow the owner of the object lock a chance to
1298 * run...
1299 */
1300 if (!vm_object_lock_try_scan(m_object)) {
1301 if (try_failed_count > 20) {
1302 goto reenter_pg_on_q;
1303 }
1304 vm_page_unlock_queues();
1305 mutex_pause(try_failed_count++);
1306 vm_page_lock_queues();
1307 delayed_unlock = 0;
1308
1309 t_object = m_object;
1310 continue;
1311 }
1312 l_object = m_object;
1313 }
1314 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) {
1315 /*
1316 * page is not to be cleaned
1317 * put it back on the head of its queue
1318 */
1319 goto reenter_pg_on_q;
1320 }
1321 phys_page = VM_PAGE_GET_PHYS_PAGE(m);
1322
1323 if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
1324 refmod_state = pmap_get_refmod(phys_page);
1325
1326 if (refmod_state & VM_MEM_REFERENCED) {
1327 m->vmp_reference = TRUE;
1328 }
1329 if (refmod_state & VM_MEM_MODIFIED) {
1330 SET_PAGE_DIRTY(m, FALSE);
1331 }
1332 }
1333 if (m->vmp_reference == TRUE) {
1334 m->vmp_reference = FALSE;
1335 pmap_clear_refmod_options(phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
1336 goto reenter_pg_on_q;
1337 }
1338 if (m->vmp_pmapped == TRUE) {
1339 if (m->vmp_dirty || m->vmp_precious) {
1340 pmap_options = PMAP_OPTIONS_COMPRESSOR;
1341 } else {
1342 pmap_options = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
1343 }
1344 refmod_state = pmap_disconnect_options(phys_page, pmap_options, NULL);
1345 if (refmod_state & VM_MEM_MODIFIED) {
1346 SET_PAGE_DIRTY(m, FALSE);
1347 }
1348 }
1349
1350 if (!m->vmp_dirty && !m->vmp_precious) {
1351 vm_page_unlock_queues();
1352 VM_PAGE_FREE(m);
1353 vm_page_lock_queues();
1354 delayed_unlock = 0;
1355
1356 goto next_pg;
1357 }
1358 if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1359 if (!m_object->pager_initialized) {
1360 vm_page_unlock_queues();
1361
1362 vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE);
1363
1364 if (!m_object->pager_initialized) {
1365 vm_object_compressor_pager_create(m_object);
1366 }
1367
1368 vm_page_lock_queues();
1369 delayed_unlock = 0;
1370 }
1371 if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
1372 goto reenter_pg_on_q;
1373 }
1374 /*
1375 * vm_object_compressor_pager_create will drop the object lock
1376 * which means 'm' may no longer be valid to use
1377 */
1378 continue;
1379 }
1380 /*
1381 * we've already factored out pages in the laundry which
1382 * means this page can't be on the pageout queue so it's
1383 * safe to do the vm_page_queues_remove
1384 */
1385 vm_page_queues_remove(m, TRUE);
1386
1387 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1388
1389 vm_pageout_cluster(m);
1390
1391 goto next_pg;
1392
1393 reenter_pg_on_q:
1394 vm_page_queue_remove(q, m, vmp_pageq);
1395 vm_page_queue_enter(q, m, vmp_pageq);
1396 next_pg:
1397 qcount--;
1398 try_failed_count = 0;
1399
1400 if (delayed_unlock++ > 128) {
1401 if (l_object != NULL) {
1402 vm_object_unlock(l_object);
1403 l_object = NULL;
1404 }
1405 lck_mtx_yield(&vm_page_queue_lock);
1406 delayed_unlock = 0;
1407 }
1408 }
1409 if (l_object != NULL) {
1410 vm_object_unlock(l_object);
1411 l_object = NULL;
1412 }
1413 vm_page_unlock_queues();
1414 }
1415
1416
1417
1418 /*
1419 * function in BSD to apply I/O throttle to the pageout thread
1420 */
1421 extern void vm_pageout_io_throttle(void);
1422
1423 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \
1424 MACRO_BEGIN \
1425 /* \
1426 * If a "reusable" page somehow made it back into \
1427 * the active queue, it's been re-used and is not \
1428 * quite re-usable. \
1429 * If the VM object was "all_reusable", consider it \
1430 * as "all re-used" instead of converting it to \
1431 * "partially re-used", which could be expensive. \
1432 */ \
1433 assert(VM_PAGE_OBJECT((m)) == (obj)); \
1434 if ((m)->vmp_reusable || \
1435 (obj)->all_reusable) { \
1436 vm_object_reuse_pages((obj), \
1437 (m)->vmp_offset, \
1438 (m)->vmp_offset + PAGE_SIZE_64, \
1439 FALSE); \
1440 } \
1441 MACRO_END
1442
1443
1444 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1445 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1446
1447 #define FCS_IDLE 0
1448 #define FCS_DELAYED 1
1449 #define FCS_DEADLOCK_DETECTED 2
1450
1451 struct flow_control {
1452 int state;
1453 mach_timespec_t ts;
1454 };
1455
1456
1457 #if CONFIG_BACKGROUND_QUEUE
1458 uint64_t vm_pageout_rejected_bq_internal = 0;
1459 uint64_t vm_pageout_rejected_bq_external = 0;
1460 uint64_t vm_pageout_skipped_bq_internal = 0;
1461 #endif
1462
1463 #define ANONS_GRABBED_LIMIT 2
1464
1465
1466 #if 0
1467 static void vm_pageout_delayed_unlock(int *, int *, vm_page_t *);
1468 #endif
1469 static void vm_pageout_prepare_to_block(vm_object_t *, int *, vm_page_t *, int *, int);
1470
1471 #define VM_PAGEOUT_PB_NO_ACTION 0
1472 #define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
1473 #define VM_PAGEOUT_PB_THREAD_YIELD 2
1474
1475
1476 #if 0
1477 static void
1478 vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *local_freeq)
1479 {
1480 if (*local_freeq) {
1481 vm_page_unlock_queues();
1482
1483 VM_DEBUG_CONSTANT_EVENT(
1484 vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
1485 vm_page_free_count, 0, 0, 1);
1486
1487 vm_page_free_list(*local_freeq, TRUE);
1488
1489 VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
1490 vm_page_free_count, *local_freed, 0, 1);
1491
1492 *local_freeq = NULL;
1493 *local_freed = 0;
1494
1495 vm_page_lock_queues();
1496 } else {
1497 lck_mtx_yield(&vm_page_queue_lock);
1498 }
1499 *delayed_unlock = 1;
1500 }
1501 #endif
1502
1503
1504 static void
1505 vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock,
1506 vm_page_t *local_freeq, int *local_freed, int action)
1507 {
1508 vm_page_unlock_queues();
1509
1510 if (*object != NULL) {
1511 vm_object_unlock(*object);
1512 *object = NULL;
1513 }
1514 if (*local_freeq) {
1515 vm_page_free_list(*local_freeq, TRUE);
1516
1517 *local_freeq = NULL;
1518 *local_freed = 0;
1519 }
1520 *delayed_unlock = 1;
1521
1522 switch (action) {
1523 case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER:
1524 vm_consider_waking_compactor_swapper();
1525 break;
1526 case VM_PAGEOUT_PB_THREAD_YIELD:
1527 thread_yield_internal(1);
1528 break;
1529 case VM_PAGEOUT_PB_NO_ACTION:
1530 default:
1531 break;
1532 }
1533 vm_page_lock_queues();
1534 }
1535
1536
1537 static struct vm_pageout_vminfo last;
1538
1539 uint64_t last_vm_page_pages_grabbed = 0;
1540
1541 extern uint32_t c_segment_pages_compressed;
1542
1543 extern uint64_t shared_region_pager_reclaimed;
1544 extern struct memory_object_pager_ops shared_region_pager_ops;
1545
1546 void
1547 update_vm_info(void)
1548 {
1549 uint64_t tmp;
1550
1551 vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count = vm_page_active_count;
1552 vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count = vm_page_speculative_count;
1553 vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count = vm_page_inactive_count;
1554 vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count = vm_page_anonymous_count;
1555
1556 vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count = vm_page_free_count;
1557 vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count = vm_page_wire_count;
1558 vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count = VM_PAGE_COMPRESSOR_COUNT;
1559
1560 vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed = c_segment_pages_compressed;
1561 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count = vm_page_pageable_internal_count;
1562 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count = vm_page_pageable_external_count;
1563 vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count = vm_page_xpmapped_external_count;
1564
1565
1566 tmp = vm_pageout_vminfo.vm_pageout_considered_page;
1567 vm_pageout_stats[vm_pageout_stat_now].considered = (unsigned int)(tmp - last.vm_pageout_considered_page);
1568 last.vm_pageout_considered_page = tmp;
1569
1570 tmp = vm_pageout_vminfo.vm_pageout_compressions;
1571 vm_pageout_stats[vm_pageout_stat_now].pages_compressed = (unsigned int)(tmp - last.vm_pageout_compressions);
1572 last.vm_pageout_compressions = tmp;
1573
1574 tmp = vm_pageout_vminfo.vm_compressor_failed;
1575 vm_pageout_stats[vm_pageout_stat_now].failed_compressions = (unsigned int)(tmp - last.vm_compressor_failed);
1576 last.vm_compressor_failed = tmp;
1577
1578 tmp = vm_pageout_vminfo.vm_compressor_pages_grabbed;
1579 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor = (unsigned int)(tmp - last.vm_compressor_pages_grabbed);
1580 last.vm_compressor_pages_grabbed = tmp;
1581
1582 tmp = vm_pageout_vminfo.vm_phantom_cache_found_ghost;
1583 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found = (unsigned int)(tmp - last.vm_phantom_cache_found_ghost);
1584 last.vm_phantom_cache_found_ghost = tmp;
1585
1586 tmp = vm_pageout_vminfo.vm_phantom_cache_added_ghost;
1587 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added = (unsigned int)(tmp - last.vm_phantom_cache_added_ghost);
1588 last.vm_phantom_cache_added_ghost = tmp;
1589
1590 tmp = get_pages_grabbed_count();
1591 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed = (unsigned int)(tmp - last_vm_page_pages_grabbed);
1592 last_vm_page_pages_grabbed = tmp;
1593
1594 tmp = vm_pageout_vminfo.vm_page_pages_freed;
1595 vm_pageout_stats[vm_pageout_stat_now].pages_freed = (unsigned int)(tmp - last.vm_page_pages_freed);
1596 last.vm_page_pages_freed = tmp;
1597
1598
1599 if (vm_pageout_stats[vm_pageout_stat_now].considered) {
1600 tmp = vm_pageout_vminfo.vm_pageout_pages_evicted;
1601 vm_pageout_stats[vm_pageout_stat_now].pages_evicted = (unsigned int)(tmp - last.vm_pageout_pages_evicted);
1602 last.vm_pageout_pages_evicted = tmp;
1603
1604 tmp = vm_pageout_vminfo.vm_pageout_pages_purged;
1605 vm_pageout_stats[vm_pageout_stat_now].pages_purged = (unsigned int)(tmp - last.vm_pageout_pages_purged);
1606 last.vm_pageout_pages_purged = tmp;
1607
1608 tmp = vm_pageout_vminfo.vm_pageout_freed_speculative;
1609 vm_pageout_stats[vm_pageout_stat_now].freed_speculative = (unsigned int)(tmp - last.vm_pageout_freed_speculative);
1610 last.vm_pageout_freed_speculative = tmp;
1611
1612 tmp = vm_pageout_vminfo.vm_pageout_freed_external;
1613 vm_pageout_stats[vm_pageout_stat_now].freed_external = (unsigned int)(tmp - last.vm_pageout_freed_external);
1614 last.vm_pageout_freed_external = tmp;
1615
1616 tmp = vm_pageout_vminfo.vm_pageout_inactive_referenced;
1617 vm_pageout_stats[vm_pageout_stat_now].inactive_referenced = (unsigned int)(tmp - last.vm_pageout_inactive_referenced);
1618 last.vm_pageout_inactive_referenced = tmp;
1619
1620 tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external;
1621 vm_pageout_stats[vm_pageout_stat_now].throttled_external_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_external);
1622 last.vm_pageout_scan_inactive_throttled_external = tmp;
1623
1624 tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_external;
1625 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_external);
1626 last.vm_pageout_inactive_dirty_external = tmp;
1627
1628 tmp = vm_pageout_vminfo.vm_pageout_freed_cleaned;
1629 vm_pageout_stats[vm_pageout_stat_now].freed_cleaned = (unsigned int)(tmp - last.vm_pageout_freed_cleaned);
1630 last.vm_pageout_freed_cleaned = tmp;
1631
1632 tmp = vm_pageout_vminfo.vm_pageout_inactive_nolock;
1633 vm_pageout_stats[vm_pageout_stat_now].inactive_nolock = (unsigned int)(tmp - last.vm_pageout_inactive_nolock);
1634 last.vm_pageout_inactive_nolock = tmp;
1635
1636 tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal;
1637 vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_internal);
1638 last.vm_pageout_scan_inactive_throttled_internal = tmp;
1639
1640 tmp = vm_pageout_vminfo.vm_pageout_skipped_external;
1641 vm_pageout_stats[vm_pageout_stat_now].skipped_external = (unsigned int)(tmp - last.vm_pageout_skipped_external);
1642 last.vm_pageout_skipped_external = tmp;
1643
1644 tmp = vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded;
1645 vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded = (unsigned int)(tmp - last.vm_pageout_reactivation_limit_exceeded);
1646 last.vm_pageout_reactivation_limit_exceeded = tmp;
1647
1648 tmp = vm_pageout_vminfo.vm_pageout_inactive_force_reclaim;
1649 vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim = (unsigned int)(tmp - last.vm_pageout_inactive_force_reclaim);
1650 last.vm_pageout_inactive_force_reclaim = tmp;
1651
1652 tmp = vm_pageout_vminfo.vm_pageout_freed_internal;
1653 vm_pageout_stats[vm_pageout_stat_now].freed_internal = (unsigned int)(tmp - last.vm_pageout_freed_internal);
1654 last.vm_pageout_freed_internal = tmp;
1655
1656 tmp = vm_pageout_vminfo.vm_pageout_considered_bq_internal;
1657 vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal = (unsigned int)(tmp - last.vm_pageout_considered_bq_internal);
1658 last.vm_pageout_considered_bq_internal = tmp;
1659
1660 tmp = vm_pageout_vminfo.vm_pageout_considered_bq_external;
1661 vm_pageout_stats[vm_pageout_stat_now].considered_bq_external = (unsigned int)(tmp - last.vm_pageout_considered_bq_external);
1662 last.vm_pageout_considered_bq_external = tmp;
1663
1664 tmp = vm_pageout_vminfo.vm_pageout_filecache_min_reactivated;
1665 vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations = (unsigned int)(tmp - last.vm_pageout_filecache_min_reactivated);
1666 last.vm_pageout_filecache_min_reactivated = tmp;
1667
1668 tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_internal;
1669 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_internal);
1670 last.vm_pageout_inactive_dirty_internal = tmp;
1671 }
1672
1673 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO1)) | DBG_FUNC_NONE,
1674 vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count,
1675 vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count,
1676 vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count,
1677 vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count,
1678 0);
1679
1680 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO2)) | DBG_FUNC_NONE,
1681 vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count,
1682 vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count,
1683 vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count,
1684 0,
1685 0);
1686
1687 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO3)) | DBG_FUNC_NONE,
1688 vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed,
1689 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count,
1690 vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count,
1691 vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count,
1692 0);
1693
1694 if (vm_pageout_stats[vm_pageout_stat_now].considered ||
1695 vm_pageout_stats[vm_pageout_stat_now].pages_compressed ||
1696 vm_pageout_stats[vm_pageout_stat_now].failed_compressions) {
1697 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO4)) | DBG_FUNC_NONE,
1698 vm_pageout_stats[vm_pageout_stat_now].considered,
1699 vm_pageout_stats[vm_pageout_stat_now].freed_speculative,
1700 vm_pageout_stats[vm_pageout_stat_now].freed_external,
1701 vm_pageout_stats[vm_pageout_stat_now].inactive_referenced,
1702 0);
1703
1704 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO5)) | DBG_FUNC_NONE,
1705 vm_pageout_stats[vm_pageout_stat_now].throttled_external_q,
1706 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external,
1707 vm_pageout_stats[vm_pageout_stat_now].freed_cleaned,
1708 vm_pageout_stats[vm_pageout_stat_now].inactive_nolock,
1709 0);
1710
1711 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO6)) | DBG_FUNC_NONE,
1712 vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q,
1713 vm_pageout_stats[vm_pageout_stat_now].pages_compressed,
1714 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor,
1715 vm_pageout_stats[vm_pageout_stat_now].skipped_external,
1716 0);
1717
1718 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO7)) | DBG_FUNC_NONE,
1719 vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded,
1720 vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim,
1721 vm_pageout_stats[vm_pageout_stat_now].failed_compressions,
1722 vm_pageout_stats[vm_pageout_stat_now].freed_internal,
1723 0);
1724
1725 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO8)) | DBG_FUNC_NONE,
1726 vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal,
1727 vm_pageout_stats[vm_pageout_stat_now].considered_bq_external,
1728 vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations,
1729 vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal,
1730 0);
1731 }
1732 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO9)) | DBG_FUNC_NONE,
1733 vm_pageout_stats[vm_pageout_stat_now].pages_grabbed,
1734 vm_pageout_stats[vm_pageout_stat_now].pages_freed,
1735 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found,
1736 vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added,
1737 0);
1738
1739 record_memory_pressure();
1740 }
1741
1742 extern boolean_t hibernation_vmqueues_inspection;
1743
1744 void
1745 vm_page_balance_inactive(int max_to_move)
1746 {
1747 vm_page_t m;
1748
1749 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1750
1751 if (hibernation_vmqueues_inspection == TRUE) {
1752 /*
1753 * It is likely that the hibernation code path is
1754 * dealing with these very queues as we are about
1755 * to move pages around in/from them and completely
1756 * change the linkage of the pages.
1757 *
1758 * And so we skip the rebalancing of these queues.
1759 */
1760 return;
1761 }
1762 vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
1763 vm_page_inactive_count +
1764 vm_page_speculative_count);
1765
1766 while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) {
1767 VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1);
1768
1769 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
1770
1771 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
1772 assert(!m->vmp_laundry);
1773 assert(VM_PAGE_OBJECT(m) != kernel_object);
1774 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
1775
1776 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
1777
1778 /*
1779 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
1780 *
1781 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
1782 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
1783 * new reference happens. If no futher references happen on the page after that remote TLB flushes
1784 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
1785 * by pageout_scan, which is just fine since the last reference would have happened quite far
1786 * in the past (TLB caches don't hang around for very long), and of course could just as easily
1787 * have happened before we moved the page
1788 */
1789 if (m->vmp_pmapped == TRUE) {
1790 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
1791 }
1792
1793 /*
1794 * The page might be absent or busy,
1795 * but vm_page_deactivate can handle that.
1796 * FALSE indicates that we don't want a H/W clear reference
1797 */
1798 vm_page_deactivate_internal(m, FALSE);
1799 }
1800 }
1801
1802
1803 /*
1804 * vm_pageout_scan does the dirty work for the pageout daemon.
1805 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
1806 * held and vm_page_free_wanted == 0.
1807 */
1808 void
1809 vm_pageout_scan(void)
1810 {
1811 unsigned int loop_count = 0;
1812 unsigned int inactive_burst_count = 0;
1813 unsigned int reactivated_this_call;
1814 unsigned int reactivate_limit;
1815 vm_page_t local_freeq = NULL;
1816 int local_freed = 0;
1817 int delayed_unlock;
1818 int delayed_unlock_limit = 0;
1819 int refmod_state = 0;
1820 int vm_pageout_deadlock_target = 0;
1821 struct vm_pageout_queue *iq;
1822 struct vm_pageout_queue *eq;
1823 struct vm_speculative_age_q *sq;
1824 struct flow_control flow_control = { 0, { 0, 0 } };
1825 boolean_t inactive_throttled = FALSE;
1826 mach_timespec_t ts;
1827 unsigned int msecs = 0;
1828 vm_object_t object = NULL;
1829 uint32_t inactive_reclaim_run;
1830 boolean_t exceeded_burst_throttle;
1831 boolean_t grab_anonymous = FALSE;
1832 boolean_t force_anonymous = FALSE;
1833 boolean_t force_speculative_aging = FALSE;
1834 int anons_grabbed = 0;
1835 int page_prev_q_state = 0;
1836 #if CONFIG_BACKGROUND_QUEUE
1837 boolean_t page_from_bg_q = FALSE;
1838 #endif
1839 int cache_evict_throttle = 0;
1840 uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
1841 uint32_t inactive_external_count;
1842 int force_purge = 0;
1843 int divisor;
1844 #define DELAY_SPECULATIVE_AGE 1000
1845 int delay_speculative_age = 0;
1846 vm_object_t m_object = VM_OBJECT_NULL;
1847
1848 #if VM_PRESSURE_EVENTS
1849 vm_pressure_level_t pressure_level;
1850 #endif /* VM_PRESSURE_EVENTS */
1851
1852 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
1853 vm_pageout_vminfo.vm_pageout_freed_speculative,
1854 vm_pageout_state.vm_pageout_inactive_clean,
1855 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
1856 vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
1857
1858 flow_control.state = FCS_IDLE;
1859 iq = &vm_pageout_queue_internal;
1860 eq = &vm_pageout_queue_external;
1861 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
1862
1863
1864 XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1865
1866 /* Ask the pmap layer to return any pages it no longer needs. */
1867 uint64_t pmap_wired_pages_freed = pmap_release_pages_fast();
1868
1869 vm_page_lock_queues();
1870
1871 vm_page_wire_count -= pmap_wired_pages_freed;
1872
1873 delayed_unlock = 1;
1874
1875 /*
1876 * Calculate the max number of referenced pages on the inactive
1877 * queue that we will reactivate.
1878 */
1879 reactivated_this_call = 0;
1880 reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
1881 vm_page_inactive_count);
1882 inactive_reclaim_run = 0;
1883
1884 vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
1885
1886 /*
1887 * We must limit the rate at which we send pages to the pagers
1888 * so that we don't tie up too many pages in the I/O queues.
1889 * We implement a throttling mechanism using the laundry count
1890 * to limit the number of pages outstanding to the default
1891 * and external pagers. We can bypass the throttles and look
1892 * for clean pages if the pageout queues don't drain in a timely
1893 * fashion since this may indicate that the pageout paths are
1894 * stalled waiting for memory, which only we can provide.
1895 */
1896
1897 Restart:
1898
1899 assert(object == NULL);
1900 assert(delayed_unlock != 0);
1901
1902 vm_page_anonymous_min = vm_page_inactive_target / 20;
1903
1904 if (vm_pageout_state.vm_page_speculative_percentage > 50) {
1905 vm_pageout_state.vm_page_speculative_percentage = 50;
1906 } else if (vm_pageout_state.vm_page_speculative_percentage <= 0) {
1907 vm_pageout_state.vm_page_speculative_percentage = 1;
1908 }
1909
1910 vm_pageout_state.vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
1911 vm_page_inactive_count);
1912
1913 for (;;) {
1914 vm_page_t m;
1915
1916 DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
1917
1918 if (vm_upl_wait_for_pages < 0) {
1919 vm_upl_wait_for_pages = 0;
1920 }
1921
1922 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
1923
1924 if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) {
1925 delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
1926 }
1927
1928 #if CONFIG_SECLUDED_MEMORY
1929 /*
1930 * Deal with secluded_q overflow.
1931 */
1932 if (vm_page_secluded_count > vm_page_secluded_target) {
1933 vm_page_t secluded_page;
1934
1935 /*
1936 * SECLUDED_AGING_BEFORE_ACTIVE:
1937 * Excess secluded pages go to the active queue and
1938 * will later go to the inactive queue.
1939 */
1940 assert((vm_page_secluded_count_free +
1941 vm_page_secluded_count_inuse) ==
1942 vm_page_secluded_count);
1943 secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
1944 assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
1945
1946 vm_page_queues_remove(secluded_page, FALSE);
1947 assert(!secluded_page->vmp_fictitious);
1948 assert(!VM_PAGE_WIRED(secluded_page));
1949
1950 if (secluded_page->vmp_object == 0) {
1951 /* transfer to free queue */
1952 assert(secluded_page->vmp_busy);
1953 secluded_page->vmp_snext = local_freeq;
1954 local_freeq = secluded_page;
1955 local_freed++;
1956 } else {
1957 /* transfer to head of active queue */
1958 vm_page_enqueue_active(secluded_page, FALSE);
1959 secluded_page = VM_PAGE_NULL;
1960 }
1961 }
1962 #endif /* CONFIG_SECLUDED_MEMORY */
1963
1964 assert(delayed_unlock);
1965
1966 /*
1967 * maintain our balance
1968 */
1969 vm_page_balance_inactive(1);
1970
1971
1972 /**********************************************************************
1973 * above this point we're playing with the active and secluded queues
1974 * below this point we're playing with the throttling mechanisms
1975 * and the inactive queue
1976 **********************************************************************/
1977
1978 if (vm_page_free_count + local_freed >= vm_page_free_target) {
1979 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
1980
1981 vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
1982 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
1983 /*
1984 * make sure the pageout I/O threads are running
1985 * throttled in case there are still requests
1986 * in the laundry... since we have met our targets
1987 * we don't need the laundry to be cleaned in a timely
1988 * fashion... so let's avoid interfering with foreground
1989 * activity
1990 */
1991 vm_pageout_adjust_eq_iothrottle(eq, TRUE);
1992
1993 lck_mtx_lock(&vm_page_queue_free_lock);
1994
1995 if ((vm_page_free_count >= vm_page_free_target) &&
1996 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
1997 /*
1998 * done - we have met our target *and*
1999 * there is no one waiting for a page.
2000 */
2001 return_from_scan:
2002 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
2003
2004 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
2005 vm_pageout_state.vm_pageout_inactive,
2006 vm_pageout_state.vm_pageout_inactive_used, 0, 0);
2007 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
2008 vm_pageout_vminfo.vm_pageout_freed_speculative,
2009 vm_pageout_state.vm_pageout_inactive_clean,
2010 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
2011 vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
2012
2013 return;
2014 }
2015 lck_mtx_unlock(&vm_page_queue_free_lock);
2016 }
2017
2018 /*
2019 * Before anything, we check if we have any ripe volatile
2020 * objects around. If so, try to purge the first object.
2021 * If the purge fails, fall through to reclaim a page instead.
2022 * If the purge succeeds, go back to the top and reevalute
2023 * the new memory situation.
2024 */
2025
2026 assert(available_for_purge >= 0);
2027 force_purge = 0; /* no force-purging */
2028
2029 #if VM_PRESSURE_EVENTS
2030 pressure_level = memorystatus_vm_pressure_level;
2031
2032 if (pressure_level > kVMPressureNormal) {
2033 if (pressure_level >= kVMPressureCritical) {
2034 force_purge = vm_pageout_state.memorystatus_purge_on_critical;
2035 } else if (pressure_level >= kVMPressureUrgent) {
2036 force_purge = vm_pageout_state.memorystatus_purge_on_urgent;
2037 } else if (pressure_level >= kVMPressureWarning) {
2038 force_purge = vm_pageout_state.memorystatus_purge_on_warning;
2039 }
2040 }
2041 #endif /* VM_PRESSURE_EVENTS */
2042
2043 if (available_for_purge || force_purge) {
2044 if (object != NULL) {
2045 vm_object_unlock(object);
2046 object = NULL;
2047 }
2048
2049 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
2050
2051 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
2052 if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) {
2053 VM_PAGEOUT_DEBUG(vm_pageout_purged_objects, 1);
2054 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
2055 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
2056 continue;
2057 }
2058 VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
2059 memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
2060 }
2061
2062 if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
2063 /*
2064 * try to pull pages from the aging bins...
2065 * see vm_page.h for an explanation of how
2066 * this mechanism works
2067 */
2068 struct vm_speculative_age_q *aq;
2069 boolean_t can_steal = FALSE;
2070 int num_scanned_queues;
2071
2072 aq = &vm_page_queue_speculative[speculative_steal_index];
2073
2074 num_scanned_queues = 0;
2075 while (vm_page_queue_empty(&aq->age_q) &&
2076 num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
2077 speculative_steal_index++;
2078
2079 if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
2080 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
2081 }
2082
2083 aq = &vm_page_queue_speculative[speculative_steal_index];
2084 }
2085
2086 if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
2087 /*
2088 * XXX We've scanned all the speculative
2089 * queues but still haven't found one
2090 * that is not empty, even though
2091 * vm_page_speculative_count is not 0.
2092 */
2093 if (!vm_page_queue_empty(&sq->age_q)) {
2094 continue;
2095 }
2096 #if DEVELOPMENT || DEBUG
2097 panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
2098 #endif
2099 /* readjust... */
2100 vm_page_speculative_count = 0;
2101 /* ... and continue */
2102 continue;
2103 }
2104
2105 if (vm_page_speculative_count > vm_pageout_state.vm_page_speculative_target || force_speculative_aging == TRUE) {
2106 can_steal = TRUE;
2107 } else {
2108 if (!delay_speculative_age) {
2109 mach_timespec_t ts_fully_aged;
2110
2111 ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) / 1000;
2112 ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) % 1000)
2113 * 1000 * NSEC_PER_USEC;
2114
2115 ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
2116
2117 clock_sec_t sec;
2118 clock_nsec_t nsec;
2119 clock_get_system_nanotime(&sec, &nsec);
2120 ts.tv_sec = (unsigned int) sec;
2121 ts.tv_nsec = nsec;
2122
2123 if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0) {
2124 can_steal = TRUE;
2125 } else {
2126 delay_speculative_age++;
2127 }
2128 } else {
2129 delay_speculative_age++;
2130 if (delay_speculative_age == DELAY_SPECULATIVE_AGE) {
2131 delay_speculative_age = 0;
2132 }
2133 }
2134 }
2135 if (can_steal == TRUE) {
2136 vm_page_speculate_ageit(aq);
2137 }
2138 }
2139 force_speculative_aging = FALSE;
2140
2141 if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
2142 int pages_evicted;
2143
2144 if (object != NULL) {
2145 vm_object_unlock(object);
2146 object = NULL;
2147 }
2148 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
2149
2150 pages_evicted = vm_object_cache_evict(100, 10);
2151
2152 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END, pages_evicted, 0, 0, 0, 0);
2153
2154 if (pages_evicted) {
2155 vm_pageout_vminfo.vm_pageout_pages_evicted += pages_evicted;
2156
2157 VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
2158 vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0);
2159 memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
2160
2161 /*
2162 * we just freed up to 100 pages,
2163 * so go back to the top of the main loop
2164 * and re-evaulate the memory situation
2165 */
2166 continue;
2167 } else {
2168 cache_evict_throttle = 1000;
2169 }
2170 }
2171 if (cache_evict_throttle) {
2172 cache_evict_throttle--;
2173 }
2174
2175 divisor = vm_pageout_state.vm_page_filecache_min_divisor;
2176
2177 #if CONFIG_JETSAM
2178 /*
2179 * don't let the filecache_min fall below 15% of available memory
2180 * on systems with an active compressor that isn't nearing its
2181 * limits w/r to accepting new data
2182 *
2183 * on systems w/o the compressor/swapper, the filecache is always
2184 * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
2185 * since most (if not all) of the anonymous pages are in the
2186 * throttled queue (which isn't counted as available) which
2187 * effectively disables this filter
2188 */
2189 if (vm_compressor_low_on_space() || divisor == 0) {
2190 vm_pageout_state.vm_page_filecache_min = 0;
2191 } else {
2192 vm_pageout_state.vm_page_filecache_min =
2193 ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2194 }
2195 #else
2196 if (vm_compressor_out_of_space() || divisor == 0) {
2197 vm_pageout_state.vm_page_filecache_min = 0;
2198 } else {
2199 /*
2200 * don't let the filecache_min fall below the specified critical level
2201 */
2202 vm_pageout_state.vm_page_filecache_min =
2203 ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
2204 }
2205 #endif
2206 if (vm_page_free_count < (vm_page_free_reserved / 4)) {
2207 vm_pageout_state.vm_page_filecache_min = 0;
2208 }
2209
2210 exceeded_burst_throttle = FALSE;
2211 /*
2212 * Sometimes we have to pause:
2213 * 1) No inactive pages - nothing to do.
2214 * 2) Loop control - no acceptable pages found on the inactive queue
2215 * within the last vm_pageout_burst_inactive_throttle iterations
2216 * 3) Flow control - default pageout queue is full
2217 */
2218 if (vm_page_queue_empty(&vm_page_queue_inactive) &&
2219 vm_page_queue_empty(&vm_page_queue_anonymous) &&
2220 vm_page_queue_empty(&vm_page_queue_cleaned) &&
2221 vm_page_queue_empty(&sq->age_q)) {
2222 VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1);
2223 msecs = vm_pageout_state.vm_pageout_empty_wait;
2224 goto vm_pageout_scan_delay;
2225 } else if (inactive_burst_count >=
2226 MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle,
2227 (vm_page_inactive_count +
2228 vm_page_speculative_count))) {
2229 VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1);
2230 msecs = vm_pageout_state.vm_pageout_burst_wait;
2231
2232 exceeded_burst_throttle = TRUE;
2233 goto vm_pageout_scan_delay;
2234 } else if (VM_PAGE_Q_THROTTLED(iq) &&
2235 VM_DYNAMIC_PAGING_ENABLED()) {
2236 clock_sec_t sec;
2237 clock_nsec_t nsec;
2238
2239 switch (flow_control.state) {
2240 case FCS_IDLE:
2241 if ((vm_page_free_count + local_freed) < vm_page_free_target &&
2242 vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2243 /*
2244 * since the compressor is running independently of vm_pageout_scan
2245 * let's not wait for it just yet... as long as we have a healthy supply
2246 * of filecache pages to work with, let's keep stealing those.
2247 */
2248 inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2249
2250 if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min &&
2251 (inactive_external_count >= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2252 anons_grabbed = ANONS_GRABBED_LIMIT;
2253 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred, 1);
2254 goto consider_inactive;
2255 }
2256 }
2257 reset_deadlock_timer:
2258 ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000;
2259 ts.tv_nsec = (vm_pageout_state.vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
2260 clock_get_system_nanotime(&sec, &nsec);
2261 flow_control.ts.tv_sec = (unsigned int) sec;
2262 flow_control.ts.tv_nsec = nsec;
2263 ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
2264
2265 flow_control.state = FCS_DELAYED;
2266 msecs = vm_pageout_state.vm_pageout_deadlock_wait;
2267
2268 vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal++;
2269 break;
2270
2271 case FCS_DELAYED:
2272 clock_get_system_nanotime(&sec, &nsec);
2273 ts.tv_sec = (unsigned int) sec;
2274 ts.tv_nsec = nsec;
2275
2276 if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
2277 /*
2278 * the pageout thread for the default pager is potentially
2279 * deadlocked since the
2280 * default pager queue has been throttled for more than the
2281 * allowable time... we need to move some clean pages or dirty
2282 * pages belonging to the external pagers if they aren't throttled
2283 * vm_page_free_wanted represents the number of threads currently
2284 * blocked waiting for pages... we'll move one page for each of
2285 * these plus a fixed amount to break the logjam... once we're done
2286 * moving this number of pages, we'll re-enter the FSC_DELAYED state
2287 * with a new timeout target since we have no way of knowing
2288 * whether we've broken the deadlock except through observation
2289 * of the queue associated with the default pager... we need to
2290 * stop moving pages and allow the system to run to see what
2291 * state it settles into.
2292 */
2293 vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief +
2294 vm_page_free_wanted + vm_page_free_wanted_privileged;
2295 VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected, 1);
2296 flow_control.state = FCS_DEADLOCK_DETECTED;
2297 thread_wakeup((event_t) &vm_pageout_garbage_collect);
2298 goto consider_inactive;
2299 }
2300 /*
2301 * just resniff instead of trying
2302 * to compute a new delay time... we're going to be
2303 * awakened immediately upon a laundry completion,
2304 * so we won't wait any longer than necessary
2305 */
2306 msecs = vm_pageout_state.vm_pageout_idle_wait;
2307 break;
2308
2309 case FCS_DEADLOCK_DETECTED:
2310 if (vm_pageout_deadlock_target) {
2311 goto consider_inactive;
2312 }
2313 goto reset_deadlock_timer;
2314 }
2315 vm_pageout_scan_delay:
2316 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2317
2318 vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
2319 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
2320
2321 if (vm_page_free_count >= vm_page_free_target) {
2322 /*
2323 * we're here because
2324 * 1) someone else freed up some pages while we had
2325 * the queues unlocked above
2326 * and we've hit one of the 3 conditions that
2327 * cause us to pause the pageout scan thread
2328 *
2329 * since we already have enough free pages,
2330 * let's avoid stalling and return normally
2331 *
2332 * before we return, make sure the pageout I/O threads
2333 * are running throttled in case there are still requests
2334 * in the laundry... since we have enough free pages
2335 * we don't need the laundry to be cleaned in a timely
2336 * fashion... so let's avoid interfering with foreground
2337 * activity
2338 *
2339 * we don't want to hold vm_page_queue_free_lock when
2340 * calling vm_pageout_adjust_eq_iothrottle (since it
2341 * may cause other locks to be taken), we do the intitial
2342 * check outside of the lock. Once we take the lock,
2343 * we recheck the condition since it may have changed.
2344 * if it has, no problem, we will make the threads
2345 * non-throttled before actually blocking
2346 */
2347 vm_pageout_adjust_eq_iothrottle(eq, TRUE);
2348 }
2349 lck_mtx_lock(&vm_page_queue_free_lock);
2350
2351 if (vm_page_free_count >= vm_page_free_target &&
2352 (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
2353 goto return_from_scan;
2354 }
2355 lck_mtx_unlock(&vm_page_queue_free_lock);
2356
2357 if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
2358 /*
2359 * we're most likely about to block due to one of
2360 * the 3 conditions that cause vm_pageout_scan to
2361 * not be able to make forward progress w/r
2362 * to providing new pages to the free queue,
2363 * so unthrottle the I/O threads in case we
2364 * have laundry to be cleaned... it needs
2365 * to be completed ASAP.
2366 *
2367 * even if we don't block, we want the io threads
2368 * running unthrottled since the sum of free +
2369 * clean pages is still under our free target
2370 */
2371 vm_pageout_adjust_eq_iothrottle(eq, FALSE);
2372 }
2373 if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
2374 /*
2375 * if we get here we're below our free target and
2376 * we're stalling due to a full laundry queue or
2377 * we don't have any inactive pages other then
2378 * those in the clean queue...
2379 * however, we have pages on the clean queue that
2380 * can be moved to the free queue, so let's not
2381 * stall the pageout scan
2382 */
2383 flow_control.state = FCS_IDLE;
2384 goto consider_inactive;
2385 }
2386 if (flow_control.state == FCS_DELAYED && !VM_PAGE_Q_THROTTLED(iq)) {
2387 flow_control.state = FCS_IDLE;
2388 goto consider_inactive;
2389 }
2390
2391 VM_CHECK_MEMORYSTATUS;
2392
2393 if (flow_control.state != FCS_IDLE) {
2394 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1);
2395 }
2396
2397 iq->pgo_throttled = TRUE;
2398 assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000 * NSEC_PER_USEC);
2399
2400 counter(c_vm_pageout_scan_block++);
2401
2402 vm_page_unlock_queues();
2403
2404 assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
2405
2406 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
2407 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2408 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
2409
2410 thread_block(THREAD_CONTINUE_NULL);
2411
2412 VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
2413 iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
2414 memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
2415
2416 vm_page_lock_queues();
2417
2418 iq->pgo_throttled = FALSE;
2419
2420 if (loop_count >= vm_page_inactive_count) {
2421 loop_count = 0;
2422 }
2423 inactive_burst_count = 0;
2424
2425 goto Restart;
2426 /*NOTREACHED*/
2427 }
2428
2429
2430 flow_control.state = FCS_IDLE;
2431 consider_inactive:
2432 vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
2433 vm_pageout_inactive_external_forced_reactivate_limit);
2434 loop_count++;
2435 inactive_burst_count++;
2436 vm_pageout_state.vm_pageout_inactive++;
2437
2438 /*
2439 * Choose a victim.
2440 */
2441 while (1) {
2442 #if CONFIG_BACKGROUND_QUEUE
2443 page_from_bg_q = FALSE;
2444 #endif /* CONFIG_BACKGROUND_QUEUE */
2445
2446 m = NULL;
2447 m_object = VM_OBJECT_NULL;
2448
2449 if (VM_DYNAMIC_PAGING_ENABLED()) {
2450 assert(vm_page_throttled_count == 0);
2451 assert(vm_page_queue_empty(&vm_page_queue_throttled));
2452 }
2453
2454 /*
2455 * Try for a clean-queue inactive page.
2456 * These are pages that vm_pageout_scan tried to steal earlier, but
2457 * were dirty and had to be cleaned. Pick them up now that they are clean.
2458 */
2459 if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2460 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2461
2462 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
2463
2464 break;
2465 }
2466
2467 /*
2468 * The next most eligible pages are ones we paged in speculatively,
2469 * but which have not yet been touched and have been aged out.
2470 */
2471 if (!vm_page_queue_empty(&sq->age_q)) {
2472 m = (vm_page_t) vm_page_queue_first(&sq->age_q);
2473
2474 assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
2475
2476 if (!m->vmp_dirty || force_anonymous == FALSE) {
2477 break;
2478 } else {
2479 m = NULL;
2480 }
2481 }
2482
2483 #if CONFIG_BACKGROUND_QUEUE
2484 if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
2485 vm_object_t bg_m_object = NULL;
2486
2487 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
2488
2489 bg_m_object = VM_PAGE_OBJECT(m);
2490
2491 if (!VM_PAGE_PAGEABLE(m)) {
2492 /*
2493 * This page is on the background queue
2494 * but not on a pageable queue. This is
2495 * likely a transient state and whoever
2496 * took it out of its pageable queue
2497 * will likely put it back on a pageable
2498 * queue soon but we can't deal with it
2499 * at this point, so let's ignore this
2500 * page.
2501 */
2502 } else if (force_anonymous == FALSE || bg_m_object->internal) {
2503 if (bg_m_object->internal &&
2504 (VM_PAGE_Q_THROTTLED(iq) ||
2505 vm_compressor_out_of_space() == TRUE ||
2506 vm_page_free_count < (vm_page_free_reserved / 4))) {
2507 vm_pageout_skipped_bq_internal++;
2508 } else {
2509 page_from_bg_q = TRUE;
2510
2511 if (bg_m_object->internal) {
2512 vm_pageout_vminfo.vm_pageout_considered_bq_internal++;
2513 } else {
2514 vm_pageout_vminfo.vm_pageout_considered_bq_external++;
2515 }
2516 break;
2517 }
2518 }
2519 }
2520 #endif
2521 inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
2522
2523 if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) ||
2524 (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
2525 grab_anonymous = TRUE;
2526 anons_grabbed = 0;
2527
2528 vm_pageout_vminfo.vm_pageout_skipped_external++;
2529 goto want_anonymous;
2530 }
2531 grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
2532
2533 #if CONFIG_JETSAM
2534 /* If the file-backed pool has accumulated
2535 * significantly more pages than the jetsam
2536 * threshold, prefer to reclaim those
2537 * inline to minimise compute overhead of reclaiming
2538 * anonymous pages.
2539 * This calculation does not account for the CPU local
2540 * external page queues, as those are expected to be
2541 * much smaller relative to the global pools.
2542 */
2543 if (grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) {
2544 if (vm_page_pageable_external_count >
2545 vm_pageout_state.vm_page_filecache_min) {
2546 if ((vm_page_pageable_external_count *
2547 vm_pageout_memorystatus_fb_factor_dr) >
2548 (memorystatus_available_pages_critical *
2549 vm_pageout_memorystatus_fb_factor_nr)) {
2550 grab_anonymous = FALSE;
2551
2552 VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1);
2553 }
2554 }
2555 if (grab_anonymous) {
2556 VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1);
2557 }
2558 }
2559 #endif /* CONFIG_JETSAM */
2560
2561 want_anonymous:
2562 if (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
2563 if (!vm_page_queue_empty(&vm_page_queue_inactive)) {
2564 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2565
2566 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
2567 anons_grabbed = 0;
2568
2569 if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) {
2570 if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2571 if ((++reactivated_this_call % 100)) {
2572 vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++;
2573 goto must_activate_page;
2574 }
2575 /*
2576 * steal 1% of the file backed pages even if
2577 * we are under the limit that has been set
2578 * for a healthy filecache
2579 */
2580 }
2581 }
2582 break;
2583 }
2584 }
2585 if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2586 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2587
2588 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
2589 anons_grabbed++;
2590
2591 break;
2592 }
2593
2594 /*
2595 * if we've gotten here, we have no victim page.
2596 * check to see if we've not finished balancing the queues
2597 * or we have a page on the aged speculative queue that we
2598 * skipped due to force_anonymous == TRUE.. or we have
2599 * speculative pages that we can prematurely age... if
2600 * one of these cases we'll keep going, else panic
2601 */
2602 force_anonymous = FALSE;
2603 VM_PAGEOUT_DEBUG(vm_pageout_no_victim, 1);
2604
2605 if (!vm_page_queue_empty(&sq->age_q)) {
2606 goto done_with_inactivepage;
2607 }
2608
2609 if (vm_page_speculative_count) {
2610 force_speculative_aging = TRUE;
2611 goto done_with_inactivepage;
2612 }
2613 panic("vm_pageout: no victim");
2614
2615 /* NOTREACHED */
2616 }
2617 assert(VM_PAGE_PAGEABLE(m));
2618 m_object = VM_PAGE_OBJECT(m);
2619 force_anonymous = FALSE;
2620
2621 page_prev_q_state = m->vmp_q_state;
2622 /*
2623 * we just found this page on one of our queues...
2624 * it can't also be on the pageout queue, so safe
2625 * to call vm_page_queues_remove
2626 */
2627 vm_page_queues_remove(m, TRUE);
2628
2629 assert(!m->vmp_laundry);
2630 assert(!m->vmp_private);
2631 assert(!m->vmp_fictitious);
2632 assert(m_object != kernel_object);
2633 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
2634
2635 vm_pageout_vminfo.vm_pageout_considered_page++;
2636
2637 DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
2638
2639 /*
2640 * check to see if we currently are working
2641 * with the same object... if so, we've
2642 * already got the lock
2643 */
2644 if (m_object != object) {
2645 /*
2646 * the object associated with candidate page is
2647 * different from the one we were just working
2648 * with... dump the lock if we still own it
2649 */
2650 if (object != NULL) {
2651 vm_object_unlock(object);
2652 object = NULL;
2653 }
2654 /*
2655 * Try to lock object; since we've alread got the
2656 * page queues lock, we can only 'try' for this one.
2657 * if the 'try' fails, we need to do a mutex_pause
2658 * to allow the owner of the object lock a chance to
2659 * run... otherwise, we're likely to trip over this
2660 * object in the same state as we work our way through
2661 * the queue... clumps of pages associated with the same
2662 * object are fairly typical on the inactive and active queues
2663 */
2664 if (!vm_object_lock_try_scan(m_object)) {
2665 vm_page_t m_want = NULL;
2666
2667 vm_pageout_vminfo.vm_pageout_inactive_nolock++;
2668
2669 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2670 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1);
2671 }
2672
2673 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
2674
2675 m->vmp_reference = FALSE;
2676
2677 if (!m_object->object_is_shared_cache) {
2678 /*
2679 * don't apply this optimization if this is the shared cache
2680 * object, it's too easy to get rid of very hot and important
2681 * pages...
2682 * m->vmp_object must be stable since we hold the page queues lock...
2683 * we can update the scan_collisions field sans the object lock
2684 * since it is a separate field and this is the only spot that does
2685 * a read-modify-write operation and it is never executed concurrently...
2686 * we can asynchronously set this field to 0 when creating a UPL, so it
2687 * is possible for the value to be a bit non-determistic, but that's ok
2688 * since it's only used as a hint
2689 */
2690 m_object->scan_collisions = 1;
2691 }
2692 if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
2693 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
2694 } else if (!vm_page_queue_empty(&sq->age_q)) {
2695 m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
2696 } else if ((grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT ||
2697 vm_page_queue_empty(&vm_page_queue_anonymous)) &&
2698 !vm_page_queue_empty(&vm_page_queue_inactive)) {
2699 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
2700 } else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
2701 m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
2702 }
2703
2704 /*
2705 * this is the next object we're going to be interested in
2706 * try to make sure its available after the mutex_pause
2707 * returns control
2708 */
2709 if (m_want) {
2710 vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
2711 }
2712
2713 goto requeue_page;
2714 }
2715 object = m_object;
2716 vm_pageout_scan_wants_object = VM_OBJECT_NULL;
2717 }
2718 assert(m_object == object);
2719 assert(VM_PAGE_OBJECT(m) == m_object);
2720
2721 if (m->vmp_busy) {
2722 /*
2723 * Somebody is already playing with this page.
2724 * Put it back on the appropriate queue
2725 *
2726 */
2727 VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1);
2728
2729 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2730 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1);
2731 }
2732 requeue_page:
2733 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
2734 vm_page_enqueue_inactive(m, FALSE);
2735 } else {
2736 vm_page_activate(m);
2737 }
2738 #if CONFIG_BACKGROUND_QUEUE
2739 #if DEVELOPMENT || DEBUG
2740 if (page_from_bg_q == TRUE) {
2741 if (m_object->internal) {
2742 vm_pageout_rejected_bq_internal++;
2743 } else {
2744 vm_pageout_rejected_bq_external++;
2745 }
2746 }
2747 #endif
2748 #endif
2749 goto done_with_inactivepage;
2750 }
2751
2752 /*
2753 * if (m->vmp_cleaning && !m->vmp_free_when_done)
2754 * If already cleaning this page in place
2755 * just leave if off the paging queues.
2756 * We can leave the page mapped, and upl_commit_range
2757 * will put it on the clean queue.
2758 *
2759 * if (m->vmp_free_when_done && !m->vmp_cleaning)
2760 * an msync INVALIDATE is in progress...
2761 * this page has been marked for destruction
2762 * after it has been cleaned,
2763 * but not yet gathered into a UPL
2764 * where 'cleaning' will be set...
2765 * just leave it off the paging queues
2766 *
2767 * if (m->vmp_free_when_done && m->vmp_clenaing)
2768 * an msync INVALIDATE is in progress
2769 * and the UPL has already gathered this page...
2770 * just leave it off the paging queues
2771 */
2772 if (m->vmp_free_when_done || m->vmp_cleaning) {
2773 goto done_with_inactivepage;
2774 }
2775
2776
2777 /*
2778 * If it's absent, in error or the object is no longer alive,
2779 * we can reclaim the page... in the no longer alive case,
2780 * there are 2 states the page can be in that preclude us
2781 * from reclaiming it - busy or cleaning - that we've already
2782 * dealt with
2783 */
2784 if (m->vmp_absent || m->vmp_error || !object->alive) {
2785 if (m->vmp_absent) {
2786 VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1);
2787 } else if (!object->alive) {
2788 VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1);
2789 } else {
2790 VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1);
2791 }
2792 reclaim_page:
2793 if (vm_pageout_deadlock_target) {
2794 VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1);
2795 vm_pageout_deadlock_target--;
2796 }
2797
2798 DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
2799
2800 if (object->internal) {
2801 DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
2802 } else {
2803 DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
2804 }
2805 assert(!m->vmp_cleaning);
2806 assert(!m->vmp_laundry);
2807
2808 if (!object->internal &&
2809 object->pager != NULL &&
2810 object->pager->mo_pager_ops == &shared_region_pager_ops) {
2811 shared_region_pager_reclaimed++;
2812 }
2813
2814 m->vmp_busy = TRUE;
2815
2816 /*
2817 * remove page from object here since we're already
2818 * behind the object lock... defer the rest of the work
2819 * we'd normally do in vm_page_free_prepare_object
2820 * until 'vm_page_free_list' is called
2821 */
2822 if (m->vmp_tabled) {
2823 vm_page_remove(m, TRUE);
2824 }
2825
2826 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
2827 m->vmp_snext = local_freeq;
2828 local_freeq = m;
2829 local_freed++;
2830
2831 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
2832 vm_pageout_vminfo.vm_pageout_freed_speculative++;
2833 } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2834 vm_pageout_vminfo.vm_pageout_freed_cleaned++;
2835 } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) {
2836 vm_pageout_vminfo.vm_pageout_freed_internal++;
2837 } else {
2838 vm_pageout_vminfo.vm_pageout_freed_external++;
2839 }
2840
2841 inactive_burst_count = 0;
2842 goto done_with_inactivepage;
2843 }
2844 if (object->copy == VM_OBJECT_NULL) {
2845 /*
2846 * No one else can have any interest in this page.
2847 * If this is an empty purgable object, the page can be
2848 * reclaimed even if dirty.
2849 * If the page belongs to a volatile purgable object, we
2850 * reactivate it if the compressor isn't active.
2851 */
2852 if (object->purgable == VM_PURGABLE_EMPTY) {
2853 if (m->vmp_pmapped == TRUE) {
2854 /* unmap the page */
2855 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2856 if (refmod_state & VM_MEM_MODIFIED) {
2857 SET_PAGE_DIRTY(m, FALSE);
2858 }
2859 }
2860 if (m->vmp_dirty || m->vmp_precious) {
2861 /* we saved the cost of cleaning this page ! */
2862 vm_page_purged_count++;
2863 }
2864 goto reclaim_page;
2865 }
2866
2867 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
2868 /*
2869 * With the VM compressor, the cost of
2870 * reclaiming a page is much lower (no I/O),
2871 * so if we find a "volatile" page, it's better
2872 * to let it get compressed rather than letting
2873 * it occupy a full page until it gets purged.
2874 * So no need to check for "volatile" here.
2875 */
2876 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
2877 /*
2878 * Avoid cleaning a "volatile" page which might
2879 * be purged soon.
2880 */
2881
2882 /* if it's wired, we can't put it on our queue */
2883 assert(!VM_PAGE_WIRED(m));
2884
2885 /* just stick it back on! */
2886 reactivated_this_call++;
2887
2888 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2889 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1);
2890 }
2891
2892 goto reactivate_page;
2893 }
2894 }
2895 /*
2896 * If it's being used, reactivate.
2897 * (Fictitious pages are either busy or absent.)
2898 * First, update the reference and dirty bits
2899 * to make sure the page is unreferenced.
2900 */
2901 refmod_state = -1;
2902
2903 if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
2904 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
2905
2906 if (refmod_state & VM_MEM_REFERENCED) {
2907 m->vmp_reference = TRUE;
2908 }
2909 if (refmod_state & VM_MEM_MODIFIED) {
2910 SET_PAGE_DIRTY(m, FALSE);
2911 }
2912 }
2913
2914 if (m->vmp_reference || m->vmp_dirty) {
2915 /* deal with a rogue "reusable" page */
2916 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
2917 }
2918 divisor = vm_pageout_state.vm_page_xpmapped_min_divisor;
2919
2920 if (divisor == 0) {
2921 vm_pageout_state.vm_page_xpmapped_min = 0;
2922 } else {
2923 vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / divisor;
2924 }
2925
2926 if (!m->vmp_no_cache &&
2927 #if CONFIG_BACKGROUND_QUEUE
2928 page_from_bg_q == FALSE &&
2929 #endif
2930 (m->vmp_reference || (m->vmp_xpmapped && !object->internal &&
2931 (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) {
2932 /*
2933 * The page we pulled off the inactive list has
2934 * been referenced. It is possible for other
2935 * processors to be touching pages faster than we
2936 * can clear the referenced bit and traverse the
2937 * inactive queue, so we limit the number of
2938 * reactivations.
2939 */
2940 if (++reactivated_this_call >= reactivate_limit) {
2941 vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded++;
2942 } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
2943 vm_pageout_vminfo.vm_pageout_inactive_force_reclaim++;
2944 } else {
2945 uint32_t isinuse;
2946
2947 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2948 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1);
2949 }
2950
2951 vm_pageout_vminfo.vm_pageout_inactive_referenced++;
2952 reactivate_page:
2953 if (!object->internal && object->pager != MEMORY_OBJECT_NULL &&
2954 vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
2955 /*
2956 * no explict mappings of this object exist
2957 * and it's not open via the filesystem
2958 */
2959 vm_page_deactivate(m);
2960 VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated, 1);
2961 } else {
2962 must_activate_page:
2963 /*
2964 * The page was/is being used, so put back on active list.
2965 */
2966 vm_page_activate(m);
2967 VM_STAT_INCR(reactivations);
2968 inactive_burst_count = 0;
2969 }
2970 #if CONFIG_BACKGROUND_QUEUE
2971 #if DEVELOPMENT || DEBUG
2972 if (page_from_bg_q == TRUE) {
2973 if (m_object->internal) {
2974 vm_pageout_rejected_bq_internal++;
2975 } else {
2976 vm_pageout_rejected_bq_external++;
2977 }
2978 }
2979 #endif
2980 #endif
2981 if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
2982 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
2983 }
2984 vm_pageout_state.vm_pageout_inactive_used++;
2985
2986 goto done_with_inactivepage;
2987 }
2988 /*
2989 * Make sure we call pmap_get_refmod() if it
2990 * wasn't already called just above, to update
2991 * the dirty bit.
2992 */
2993 if ((refmod_state == -1) && !m->vmp_dirty && m->vmp_pmapped) {
2994 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
2995 if (refmod_state & VM_MEM_MODIFIED) {
2996 SET_PAGE_DIRTY(m, FALSE);
2997 }
2998 }
2999 }
3000
3001 XPR(XPR_VM_PAGEOUT,
3002 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
3003 object, m->vmp_offset, m, 0, 0);
3004
3005 /*
3006 * we've got a candidate page to steal...
3007 *
3008 * m->vmp_dirty is up to date courtesy of the
3009 * preceding check for m->vmp_reference... if
3010 * we get here, then m->vmp_reference had to be
3011 * FALSE (or possibly "reactivate_limit" was
3012 * exceeded), but in either case we called
3013 * pmap_get_refmod() and updated both
3014 * m->vmp_reference and m->vmp_dirty
3015 *
3016 * if it's dirty or precious we need to
3017 * see if the target queue is throtttled
3018 * it if is, we need to skip over it by moving it back
3019 * to the end of the inactive queue
3020 */
3021
3022 inactive_throttled = FALSE;
3023
3024 if (m->vmp_dirty || m->vmp_precious) {
3025 if (object->internal) {
3026 if (VM_PAGE_Q_THROTTLED(iq)) {
3027 inactive_throttled = TRUE;
3028 }
3029 } else if (VM_PAGE_Q_THROTTLED(eq)) {
3030 inactive_throttled = TRUE;
3031 }
3032 }
3033 throttle_inactive:
3034 if (!VM_DYNAMIC_PAGING_ENABLED() &&
3035 object->internal && m->vmp_dirty &&
3036 (object->purgable == VM_PURGABLE_DENY ||
3037 object->purgable == VM_PURGABLE_NONVOLATILE ||
3038 object->purgable == VM_PURGABLE_VOLATILE)) {
3039 vm_page_check_pageable_safe(m);
3040 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3041 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
3042 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
3043 vm_page_throttled_count++;
3044
3045 VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled, 1);
3046
3047 inactive_burst_count = 0;
3048 goto done_with_inactivepage;
3049 }
3050 if (inactive_throttled == TRUE) {
3051 if (object->internal == FALSE) {
3052 /*
3053 * we need to break up the following potential deadlock case...
3054 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
3055 * b) The thread doing the writing is waiting for pages while holding the truncate lock
3056 * c) Most of the pages in the inactive queue belong to this file.
3057 *
3058 * we are potentially in this deadlock because...
3059 * a) the external pageout queue is throttled
3060 * b) we're done with the active queue and moved on to the inactive queue
3061 * c) we've got a dirty external page
3062 *
3063 * since we don't know the reason for the external pageout queue being throttled we
3064 * must suspect that we are deadlocked, so move the current page onto the active queue
3065 * in an effort to cause a page from the active queue to 'age' to the inactive queue
3066 *
3067 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
3068 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
3069 * pool the next time we select a victim page... if we can make enough new free pages,
3070 * the deadlock will break, the external pageout queue will empty and it will no longer
3071 * be throttled
3072 *
3073 * if we have jetsam configured, keep a count of the pages reactivated this way so
3074 * that we can try to find clean pages in the active/inactive queues before
3075 * deciding to jetsam a process
3076 */
3077 vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external++;
3078
3079 vm_page_check_pageable_safe(m);
3080 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3081 vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq);
3082 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
3083 vm_page_active_count++;
3084 vm_page_pageable_external_count++;
3085
3086 vm_pageout_adjust_eq_iothrottle(eq, FALSE);
3087
3088 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
3089 vm_pageout_inactive_external_forced_reactivate_limit--;
3090
3091 if (vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
3092 vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
3093 /*
3094 * Possible deadlock scenario so request jetsam action
3095 */
3096 assert(object);
3097 vm_object_unlock(object);
3098 object = VM_OBJECT_NULL;
3099 vm_page_unlock_queues();
3100
3101 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
3102 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
3103
3104 /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
3105 if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) {
3106 VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1);
3107 }
3108
3109 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END,
3110 vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
3111
3112 vm_page_lock_queues();
3113 delayed_unlock = 1;
3114 }
3115 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
3116 force_anonymous = TRUE;
3117 #endif
3118 inactive_burst_count = 0;
3119 goto done_with_inactivepage;
3120 } else {
3121 goto must_activate_page;
3122 }
3123 }
3124
3125 /*
3126 * we've got a page that we can steal...
3127 * eliminate all mappings and make sure
3128 * we have the up-to-date modified state
3129 *
3130 * if we need to do a pmap_disconnect then we
3131 * need to re-evaluate m->vmp_dirty since the pmap_disconnect
3132 * provides the true state atomically... the
3133 * page was still mapped up to the pmap_disconnect
3134 * and may have been dirtied at the last microsecond
3135 *
3136 * Note that if 'pmapped' is FALSE then the page is not
3137 * and has not been in any map, so there is no point calling
3138 * pmap_disconnect(). m->vmp_dirty could have been set in anticipation
3139 * of likely usage of the page.
3140 */
3141 if (m->vmp_pmapped == TRUE) {
3142 int pmap_options;
3143
3144 /*
3145 * Don't count this page as going into the compressor
3146 * if any of these are true:
3147 * 1) compressed pager isn't enabled
3148 * 2) Freezer enabled device with compressed pager
3149 * backend (exclusive use) i.e. most of the VM system
3150 * (including vm_pageout_scan) has no knowledge of
3151 * the compressor
3152 * 3) This page belongs to a file and hence will not be
3153 * sent into the compressor
3154 */
3155 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
3156 object->internal == FALSE) {
3157 pmap_options = 0;
3158 } else if (m->vmp_dirty || m->vmp_precious) {
3159 /*
3160 * VM knows that this page is dirty (or
3161 * precious) and needs to be compressed
3162 * rather than freed.
3163 * Tell the pmap layer to count this page
3164 * as "compressed".
3165 */
3166 pmap_options = PMAP_OPTIONS_COMPRESSOR;
3167 } else {
3168 /*
3169 * VM does not know if the page needs to
3170 * be preserved but the pmap layer might tell
3171 * us if any mapping has "modified" it.
3172 * Let's the pmap layer to count this page
3173 * as compressed if and only if it has been
3174 * modified.
3175 */
3176 pmap_options =
3177 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
3178 }
3179 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
3180 pmap_options,
3181 NULL);
3182 if (refmod_state & VM_MEM_MODIFIED) {
3183 SET_PAGE_DIRTY(m, FALSE);
3184 }
3185 }
3186
3187 /*
3188 * reset our count of pages that have been reclaimed
3189 * since the last page was 'stolen'
3190 */
3191 inactive_reclaim_run = 0;
3192
3193 /*
3194 * If it's clean and not precious, we can free the page.
3195 */
3196 if (!m->vmp_dirty && !m->vmp_precious) {
3197 vm_pageout_state.vm_pageout_inactive_clean++;
3198
3199 /*
3200 * OK, at this point we have found a page we are going to free.
3201 */
3202 #if CONFIG_PHANTOM_CACHE
3203 if (!object->internal) {
3204 vm_phantom_cache_add_ghost(m);
3205 }
3206 #endif
3207 goto reclaim_page;
3208 }
3209
3210 /*
3211 * The page may have been dirtied since the last check
3212 * for a throttled target queue (which may have been skipped
3213 * if the page was clean then). With the dirty page
3214 * disconnected here, we can make one final check.
3215 */
3216 if (object->internal) {
3217 if (VM_PAGE_Q_THROTTLED(iq)) {
3218 inactive_throttled = TRUE;
3219 }
3220 } else if (VM_PAGE_Q_THROTTLED(eq)) {
3221 inactive_throttled = TRUE;
3222 }
3223
3224 if (inactive_throttled == TRUE) {
3225 goto throttle_inactive;
3226 }
3227
3228 #if VM_PRESSURE_EVENTS
3229 #if CONFIG_JETSAM
3230
3231 /*
3232 * If Jetsam is enabled, then the sending
3233 * of memory pressure notifications is handled
3234 * from the same thread that takes care of high-water
3235 * and other jetsams i.e. the memorystatus_thread.
3236 */
3237
3238 #else /* CONFIG_JETSAM */
3239
3240 vm_pressure_response();
3241
3242 #endif /* CONFIG_JETSAM */
3243 #endif /* VM_PRESSURE_EVENTS */
3244
3245 if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
3246 VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1);
3247 }
3248
3249 if (object->internal) {
3250 vm_pageout_vminfo.vm_pageout_inactive_dirty_internal++;
3251 } else {
3252 vm_pageout_vminfo.vm_pageout_inactive_dirty_external++;
3253 }
3254
3255 /*
3256 * internal pages will go to the compressor...
3257 * external pages will go to the appropriate pager to be cleaned
3258 * and upon completion will end up on 'vm_page_queue_cleaned' which
3259 * is a preferred queue to steal from
3260 */
3261 vm_pageout_cluster(m);
3262 inactive_burst_count = 0;
3263
3264 done_with_inactivepage:
3265
3266 if (delayed_unlock++ > delayed_unlock_limit) {
3267 int freed = local_freed;
3268
3269 vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
3270 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
3271 if (freed == 0) {
3272 lck_mtx_yield(&vm_page_queue_lock);
3273 }
3274 } else if (vm_pageout_scan_wants_object) {
3275 vm_page_unlock_queues();
3276 mutex_pause(0);
3277 vm_page_lock_queues();
3278 }
3279 /*
3280 * back to top of pageout scan loop
3281 */
3282 }
3283 }
3284
3285
3286 void
3287 vm_page_free_reserve(
3288 int pages)
3289 {
3290 int free_after_reserve;
3291
3292 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
3293 if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) {
3294 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
3295 } else {
3296 vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
3297 }
3298 } else {
3299 if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) {
3300 vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
3301 } else {
3302 vm_page_free_reserved += pages;
3303 }
3304 }
3305 free_after_reserve = vm_pageout_state.vm_page_free_count_init - vm_page_free_reserved;
3306
3307 vm_page_free_min = vm_page_free_reserved +
3308 VM_PAGE_FREE_MIN(free_after_reserve);
3309
3310 if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) {
3311 vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
3312 }
3313
3314 vm_page_free_target = vm_page_free_reserved +
3315 VM_PAGE_FREE_TARGET(free_after_reserve);
3316
3317 if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) {
3318 vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
3319 }
3320
3321 if (vm_page_free_target < vm_page_free_min + 5) {
3322 vm_page_free_target = vm_page_free_min + 5;
3323 }
3324
3325 vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
3326 }
3327
3328 /*
3329 * vm_pageout is the high level pageout daemon.
3330 */
3331
3332 void
3333 vm_pageout_continue(void)
3334 {
3335 DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
3336 VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter, 1);
3337
3338 #if !CONFIG_EMBEDDED
3339 lck_mtx_lock(&vm_page_queue_free_lock);
3340 vm_pageout_running = TRUE;
3341 lck_mtx_unlock(&vm_page_queue_free_lock);
3342 #endif /* CONFIG_EMBEDDED */
3343
3344 vm_pageout_scan();
3345 /*
3346 * we hold both the vm_page_queue_free_lock
3347 * and the vm_page_queues_lock at this point
3348 */
3349 assert(vm_page_free_wanted == 0);
3350 assert(vm_page_free_wanted_privileged == 0);
3351 assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
3352
3353 #if !CONFIG_EMBEDDED
3354 vm_pageout_running = FALSE;
3355 if (vm_pageout_waiter) {
3356 vm_pageout_waiter = FALSE;
3357 thread_wakeup((event_t)&vm_pageout_waiter);
3358 }
3359 #endif /* !CONFIG_EMBEDDED */
3360
3361 lck_mtx_unlock(&vm_page_queue_free_lock);
3362 vm_page_unlock_queues();
3363
3364 counter(c_vm_pageout_block++);
3365 thread_block((thread_continue_t)vm_pageout_continue);
3366 /*NOTREACHED*/
3367 }
3368
3369 #if !CONFIG_EMBEDDED
3370 kern_return_t
3371 vm_pageout_wait(uint64_t deadline)
3372 {
3373 kern_return_t kr;
3374
3375 lck_mtx_lock(&vm_page_queue_free_lock);
3376 for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr);) {
3377 vm_pageout_waiter = TRUE;
3378 if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
3379 &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
3380 (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
3381 kr = KERN_OPERATION_TIMED_OUT;
3382 }
3383 }
3384 lck_mtx_unlock(&vm_page_queue_free_lock);
3385
3386 return kr;
3387 }
3388 #endif /* !CONFIG_EMBEDDED */
3389
3390
3391 static void
3392 vm_pageout_iothread_external_continue(struct vm_pageout_queue *q)
3393 {
3394 vm_page_t m = NULL;
3395 vm_object_t object;
3396 vm_object_offset_t offset;
3397 memory_object_t pager;
3398
3399 /* On systems with a compressor, the external IO thread clears its
3400 * VM privileged bit to accommodate large allocations (e.g. bulk UPL
3401 * creation)
3402 */
3403 if (vm_pageout_state.vm_pageout_internal_iothread != THREAD_NULL) {
3404 current_thread()->options &= ~TH_OPT_VMPRIV;
3405 }
3406
3407 vm_page_lockspin_queues();
3408
3409 while (!vm_page_queue_empty(&q->pgo_pending)) {
3410 q->pgo_busy = TRUE;
3411 vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
3412
3413 assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
3414 VM_PAGE_CHECK(m);
3415 /*
3416 * grab a snapshot of the object and offset this
3417 * page is tabled in so that we can relookup this
3418 * page after we've taken the object lock - these
3419 * fields are stable while we hold the page queues lock
3420 * but as soon as we drop it, there is nothing to keep
3421 * this page in this object... we hold an activity_in_progress
3422 * on this object which will keep it from terminating
3423 */
3424 object = VM_PAGE_OBJECT(m);
3425 offset = m->vmp_offset;
3426
3427 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
3428 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
3429
3430 vm_page_unlock_queues();
3431
3432 vm_object_lock(object);
3433
3434 m = vm_page_lookup(object, offset);
3435
3436 if (m == NULL || m->vmp_busy || m->vmp_cleaning ||
3437 !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) {
3438 /*
3439 * it's either the same page that someone else has
3440 * started cleaning (or it's finished cleaning or
3441 * been put back on the pageout queue), or
3442 * the page has been freed or we have found a
3443 * new page at this offset... in all of these cases
3444 * we merely need to release the activity_in_progress
3445 * we took when we put the page on the pageout queue
3446 */
3447 vm_object_activity_end(object);
3448 vm_object_unlock(object);
3449
3450 vm_page_lockspin_queues();
3451 continue;
3452 }
3453 pager = object->pager;
3454
3455 if (pager == MEMORY_OBJECT_NULL) {
3456 /*
3457 * This pager has been destroyed by either
3458 * memory_object_destroy or vm_object_destroy, and
3459 * so there is nowhere for the page to go.
3460 */
3461 if (m->vmp_free_when_done) {
3462 /*
3463 * Just free the page... VM_PAGE_FREE takes
3464 * care of cleaning up all the state...
3465 * including doing the vm_pageout_throttle_up
3466 */
3467 VM_PAGE_FREE(m);
3468 } else {
3469 vm_page_lockspin_queues();
3470
3471 vm_pageout_throttle_up(m);
3472 vm_page_activate(m);
3473
3474 vm_page_unlock_queues();
3475
3476 /*
3477 * And we are done with it.
3478 */
3479 }
3480 vm_object_activity_end(object);
3481 vm_object_unlock(object);
3482
3483 vm_page_lockspin_queues();
3484 continue;
3485 }
3486 #if 0
3487 /*
3488 * we don't hold the page queue lock
3489 * so this check isn't safe to make
3490 */
3491 VM_PAGE_CHECK(m);
3492 #endif
3493 /*
3494 * give back the activity_in_progress reference we
3495 * took when we queued up this page and replace it
3496 * it with a paging_in_progress reference that will
3497 * also hold the paging offset from changing and
3498 * prevent the object from terminating
3499 */
3500 vm_object_activity_end(object);
3501 vm_object_paging_begin(object);
3502 vm_object_unlock(object);
3503
3504 /*
3505 * Send the data to the pager.
3506 * any pageout clustering happens there
3507 */
3508 memory_object_data_return(pager,
3509 m->vmp_offset + object->paging_offset,
3510 PAGE_SIZE,
3511 NULL,
3512 NULL,
3513 FALSE,
3514 FALSE,
3515 0);
3516
3517 vm_object_lock(object);
3518 vm_object_paging_end(object);
3519 vm_object_unlock(object);
3520
3521 vm_pageout_io_throttle();
3522
3523 vm_page_lockspin_queues();
3524 }
3525 q->pgo_busy = FALSE;
3526 q->pgo_idle = TRUE;
3527
3528 assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
3529 vm_page_unlock_queues();
3530
3531 thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q);
3532 /*NOTREACHED*/
3533 }
3534
3535
3536 #define MAX_FREE_BATCH 32
3537 uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by
3538 * this thread.
3539 */
3540
3541
3542 void
3543 vm_pageout_iothread_internal_continue(struct cq *);
3544 void
3545 vm_pageout_iothread_internal_continue(struct cq *cq)
3546 {
3547 struct vm_pageout_queue *q;
3548 vm_page_t m = NULL;
3549 boolean_t pgo_draining;
3550 vm_page_t local_q;
3551 int local_cnt;
3552 vm_page_t local_freeq = NULL;
3553 int local_freed = 0;
3554 int local_batch_size;
3555 #if DEVELOPMENT || DEBUG
3556 int ncomps = 0;
3557 boolean_t marked_active = FALSE;
3558 #endif
3559 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
3560
3561 q = cq->q;
3562 local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
3563
3564 #if RECORD_THE_COMPRESSED_DATA
3565 if (q->pgo_laundry) {
3566 c_compressed_record_init();
3567 }
3568 #endif
3569 while (TRUE) {
3570 int pages_left_on_q = 0;
3571
3572 local_cnt = 0;
3573 local_q = NULL;
3574
3575 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
3576
3577 vm_page_lock_queues();
3578 #if DEVELOPMENT || DEBUG
3579 if (marked_active == FALSE) {
3580 vmct_active++;
3581 vmct_state[cq->id] = VMCT_ACTIVE;
3582 marked_active = TRUE;
3583 if (vmct_active == 1) {
3584 vm_compressor_epoch_start = mach_absolute_time();
3585 }
3586 }
3587 #endif
3588 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
3589
3590 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
3591
3592 while (!vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
3593 vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
3594 assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
3595 VM_PAGE_CHECK(m);
3596
3597 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
3598 VM_PAGE_ZERO_PAGEQ_ENTRY(m);
3599 m->vmp_laundry = FALSE;
3600
3601 m->vmp_snext = local_q;
3602 local_q = m;
3603 local_cnt++;
3604 }
3605 if (local_q == NULL) {
3606 break;
3607 }
3608
3609 q->pgo_busy = TRUE;
3610
3611 if ((pgo_draining = q->pgo_draining) == FALSE) {
3612 vm_pageout_throttle_up_batch(q, local_cnt);
3613 pages_left_on_q = q->pgo_laundry;
3614 } else {
3615 pages_left_on_q = q->pgo_laundry - local_cnt;
3616 }
3617
3618 vm_page_unlock_queues();
3619
3620 #if !RECORD_THE_COMPRESSED_DATA
3621 if (pages_left_on_q >= local_batch_size && cq->id < (vm_pageout_state.vm_compressor_thread_count - 1)) {
3622 thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1));
3623 }
3624 #endif
3625 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
3626
3627 while (local_q) {
3628 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
3629
3630 m = local_q;
3631 local_q = m->vmp_snext;
3632 m->vmp_snext = NULL;
3633
3634 if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m) == KERN_SUCCESS) {
3635 #if DEVELOPMENT || DEBUG
3636 ncomps++;
3637 #endif
3638 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END, local_cnt, 0, 0, 0, 0);
3639
3640 m->vmp_snext = local_freeq;
3641 local_freeq = m;
3642 local_freed++;
3643
3644 if (local_freed >= MAX_FREE_BATCH) {
3645 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
3646
3647 vm_page_free_list(local_freeq, TRUE);
3648
3649 local_freeq = NULL;
3650 local_freed = 0;
3651 }
3652 }
3653 #if !CONFIG_JETSAM
3654 while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
3655 kern_return_t wait_result;
3656 int need_wakeup = 0;
3657
3658 if (local_freeq) {
3659 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
3660
3661 vm_page_free_list(local_freeq, TRUE);
3662 local_freeq = NULL;
3663 local_freed = 0;
3664
3665 continue;
3666 }
3667 lck_mtx_lock_spin(&vm_page_queue_free_lock);
3668
3669 if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
3670 if (vm_page_free_wanted_privileged++ == 0) {
3671 need_wakeup = 1;
3672 }
3673 wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
3674
3675 lck_mtx_unlock(&vm_page_queue_free_lock);
3676
3677 if (need_wakeup) {
3678 thread_wakeup((event_t)&vm_page_free_wanted);
3679 }
3680
3681 if (wait_result == THREAD_WAITING) {
3682 thread_block(THREAD_CONTINUE_NULL);
3683 }
3684 } else {
3685 lck_mtx_unlock(&vm_page_queue_free_lock);
3686 }
3687 }
3688 #endif
3689 }
3690 if (local_freeq) {
3691 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
3692
3693 vm_page_free_list(local_freeq, TRUE);
3694 local_freeq = NULL;
3695 local_freed = 0;
3696 }
3697 if (pgo_draining == TRUE) {
3698 vm_page_lockspin_queues();
3699 vm_pageout_throttle_up_batch(q, local_cnt);
3700 vm_page_unlock_queues();
3701 }
3702 }
3703 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
3704
3705 /*
3706 * queue lock is held and our q is empty
3707 */
3708 q->pgo_busy = FALSE;
3709 q->pgo_idle = TRUE;
3710
3711 assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT);
3712 #if DEVELOPMENT || DEBUG
3713 if (marked_active == TRUE) {
3714 vmct_active--;
3715 vmct_state[cq->id] = VMCT_IDLE;
3716
3717 if (vmct_active == 0) {
3718 vm_compressor_epoch_stop = mach_absolute_time();
3719 assertf(vm_compressor_epoch_stop >= vm_compressor_epoch_start,
3720 "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
3721 vm_compressor_epoch_start, vm_compressor_epoch_stop);
3722 /* This interval includes intervals where one or more
3723 * compressor threads were pre-empted
3724 */
3725 vmct_stats.vmct_cthreads_total += vm_compressor_epoch_stop - vm_compressor_epoch_start;
3726 }
3727 }
3728 #endif
3729 vm_page_unlock_queues();
3730 #if DEVELOPMENT || DEBUG
3731 if (__improbable(vm_compressor_time_thread)) {
3732 vmct_stats.vmct_runtimes[cq->id] = thread_get_runtime_self();
3733 vmct_stats.vmct_pages[cq->id] += ncomps;
3734 vmct_stats.vmct_iterations[cq->id]++;
3735 if (ncomps > vmct_stats.vmct_maxpages[cq->id]) {
3736 vmct_stats.vmct_maxpages[cq->id] = ncomps;
3737 }
3738 if (ncomps < vmct_stats.vmct_minpages[cq->id]) {
3739 vmct_stats.vmct_minpages[cq->id] = ncomps;
3740 }
3741 }
3742 #endif
3743
3744 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
3745
3746 thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
3747 /*NOTREACHED*/
3748 }
3749
3750
3751 kern_return_t
3752 vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m)
3753 {
3754 vm_object_t object;
3755 memory_object_t pager;
3756 int compressed_count_delta;
3757 kern_return_t retval;
3758
3759 object = VM_PAGE_OBJECT(m);
3760
3761 assert(!m->vmp_free_when_done);
3762 assert(!m->vmp_laundry);
3763
3764 pager = object->pager;
3765
3766 if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
3767 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
3768
3769 vm_object_lock(object);
3770
3771 /*
3772 * If there is no memory object for the page, create
3773 * one and hand it to the compression pager.
3774 */
3775
3776 if (!object->pager_initialized) {
3777 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
3778 }
3779 if (!object->pager_initialized) {
3780 vm_object_compressor_pager_create(object);
3781 }
3782
3783 pager = object->pager;
3784
3785 if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
3786 /*
3787 * Still no pager for the object,
3788 * or the pager has been destroyed.
3789 * Reactivate the page.
3790 *
3791 * Should only happen if there is no
3792 * compression pager
3793 */
3794 PAGE_WAKEUP_DONE(m);
3795
3796 vm_page_lockspin_queues();
3797 vm_page_activate(m);
3798 VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager, 1);
3799 vm_page_unlock_queues();
3800
3801 /*
3802 * And we are done with it.
3803 */
3804 vm_object_activity_end(object);
3805 vm_object_unlock(object);
3806
3807 return KERN_FAILURE;
3808 }
3809 vm_object_unlock(object);
3810
3811 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
3812 }
3813 assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
3814 assert(object->activity_in_progress > 0);
3815
3816 retval = vm_compressor_pager_put(
3817 pager,
3818 m->vmp_offset + object->paging_offset,
3819 VM_PAGE_GET_PHYS_PAGE(m),
3820 current_chead,
3821 scratch_buf,
3822 &compressed_count_delta);
3823
3824 vm_object_lock(object);
3825
3826 assert(object->activity_in_progress > 0);
3827 assert(VM_PAGE_OBJECT(m) == object);
3828 assert( !VM_PAGE_WIRED(m));
3829
3830 vm_compressor_pager_count(pager,
3831 compressed_count_delta,
3832 FALSE, /* shared_lock */
3833 object);
3834
3835 if (retval == KERN_SUCCESS) {
3836 /*
3837 * If the object is purgeable, its owner's
3838 * purgeable ledgers will be updated in
3839 * vm_page_remove() but the page still
3840 * contributes to the owner's memory footprint,
3841 * so account for it as such.
3842 */
3843 if ((object->purgable != VM_PURGABLE_DENY ||
3844 object->vo_ledger_tag) &&
3845 object->vo_owner != NULL) {
3846 /* one more compressed purgeable/tagged page */
3847 vm_object_owner_compressed_update(object,
3848 +1);
3849 }
3850 VM_STAT_INCR(compressions);
3851
3852 if (m->vmp_tabled) {
3853 vm_page_remove(m, TRUE);
3854 }
3855 } else {
3856 PAGE_WAKEUP_DONE(m);
3857
3858 vm_page_lockspin_queues();
3859
3860 vm_page_activate(m);
3861 vm_pageout_vminfo.vm_compressor_failed++;
3862
3863 vm_page_unlock_queues();
3864 }
3865 vm_object_activity_end(object);
3866 vm_object_unlock(object);
3867
3868 return retval;
3869 }
3870
3871
3872 static void
3873 vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpriority)
3874 {
3875 uint32_t policy;
3876
3877 if (hibernate_cleaning_in_progress == TRUE) {
3878 req_lowpriority = FALSE;
3879 }
3880
3881 if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority) {
3882 vm_page_unlock_queues();
3883
3884 if (req_lowpriority == TRUE) {
3885 policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
3886 DTRACE_VM(laundrythrottle);
3887 } else {
3888 policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
3889 DTRACE_VM(laundryunthrottle);
3890 }
3891 proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid,
3892 TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
3893
3894 eq->pgo_lowpriority = req_lowpriority;
3895
3896 vm_page_lock_queues();
3897 }
3898 }
3899
3900
3901 static void
3902 vm_pageout_iothread_external(void)
3903 {
3904 thread_t self = current_thread();
3905
3906 self->options |= TH_OPT_VMPRIV;
3907
3908 DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
3909
3910 proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
3911 TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
3912
3913 vm_page_lock_queues();
3914
3915 vm_pageout_queue_external.pgo_tid = self->thread_id;
3916 vm_pageout_queue_external.pgo_lowpriority = TRUE;
3917 vm_pageout_queue_external.pgo_inited = TRUE;
3918
3919 vm_page_unlock_queues();
3920
3921 vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
3922
3923 /*NOTREACHED*/
3924 }
3925
3926
3927 static void
3928 vm_pageout_iothread_internal(struct cq *cq)
3929 {
3930 thread_t self = current_thread();
3931
3932 self->options |= TH_OPT_VMPRIV;
3933
3934 vm_page_lock_queues();
3935
3936 vm_pageout_queue_internal.pgo_tid = self->thread_id;
3937 vm_pageout_queue_internal.pgo_lowpriority = TRUE;
3938 vm_pageout_queue_internal.pgo_inited = TRUE;
3939
3940 vm_page_unlock_queues();
3941
3942 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
3943 thread_vm_bind_group_add();
3944 }
3945
3946
3947 thread_set_thread_name(current_thread(), "VM_compressor");
3948 #if DEVELOPMENT || DEBUG
3949 vmct_stats.vmct_minpages[cq->id] = INT32_MAX;
3950 #endif
3951 vm_pageout_iothread_internal_continue(cq);
3952
3953 /*NOTREACHED*/
3954 }
3955
3956 kern_return_t
3957 vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
3958 {
3959 if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
3960 return KERN_SUCCESS;
3961 } else {
3962 return KERN_FAILURE; /* Already set */
3963 }
3964 }
3965
3966 extern boolean_t memorystatus_manual_testing_on;
3967 extern unsigned int memorystatus_level;
3968
3969
3970 #if VM_PRESSURE_EVENTS
3971
3972 boolean_t vm_pressure_events_enabled = FALSE;
3973
3974 void
3975 vm_pressure_response(void)
3976 {
3977 vm_pressure_level_t old_level = kVMPressureNormal;
3978 int new_level = -1;
3979 unsigned int total_pages;
3980 uint64_t available_memory = 0;
3981
3982 if (vm_pressure_events_enabled == FALSE) {
3983 return;
3984 }
3985
3986 #if CONFIG_EMBEDDED
3987
3988 available_memory = (uint64_t) memorystatus_available_pages;
3989
3990 #else /* CONFIG_EMBEDDED */
3991
3992 available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
3993 memorystatus_available_pages = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
3994
3995 #endif /* CONFIG_EMBEDDED */
3996
3997 total_pages = (unsigned int) atop_64(max_mem);
3998 #if CONFIG_SECLUDED_MEMORY
3999 total_pages -= vm_page_secluded_count;
4000 #endif /* CONFIG_SECLUDED_MEMORY */
4001 memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
4002
4003 if (memorystatus_manual_testing_on) {
4004 return;
4005 }
4006
4007 old_level = memorystatus_vm_pressure_level;
4008
4009 switch (memorystatus_vm_pressure_level) {
4010 case kVMPressureNormal:
4011 {
4012 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4013 new_level = kVMPressureCritical;
4014 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
4015 new_level = kVMPressureWarning;
4016 }
4017 break;
4018 }
4019
4020 case kVMPressureWarning:
4021 case kVMPressureUrgent:
4022 {
4023 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4024 new_level = kVMPressureNormal;
4025 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4026 new_level = kVMPressureCritical;
4027 }
4028 break;
4029 }
4030
4031 case kVMPressureCritical:
4032 {
4033 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4034 new_level = kVMPressureNormal;
4035 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
4036 new_level = kVMPressureWarning;
4037 }
4038 break;
4039 }
4040
4041 default:
4042 return;
4043 }
4044
4045 if (new_level != -1) {
4046 memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
4047
4048 if (new_level != (int) old_level) {
4049 VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
4050 new_level, old_level, 0, 0);
4051 }
4052
4053 if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != memorystatus_vm_pressure_level)) {
4054 if (vm_pageout_state.vm_pressure_thread_running == FALSE) {
4055 thread_wakeup(&vm_pressure_thread);
4056 }
4057
4058 if (old_level != memorystatus_vm_pressure_level) {
4059 thread_wakeup(&vm_pageout_state.vm_pressure_changed);
4060 }
4061 }
4062 }
4063 }
4064 #endif /* VM_PRESSURE_EVENTS */
4065
4066 kern_return_t
4067 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level)
4068 {
4069 #if CONFIG_EMBEDDED
4070
4071 return KERN_FAILURE;
4072
4073 #elif !VM_PRESSURE_EVENTS
4074
4075 return KERN_FAILURE;
4076
4077 #else /* VM_PRESSURE_EVENTS */
4078
4079 kern_return_t kr = KERN_SUCCESS;
4080
4081 if (pressure_level != NULL) {
4082 vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
4083
4084 if (wait_for_pressure == TRUE) {
4085 wait_result_t wr = 0;
4086
4087 while (old_level == *pressure_level) {
4088 wr = assert_wait((event_t) &vm_pageout_state.vm_pressure_changed,
4089 THREAD_INTERRUPTIBLE);
4090 if (wr == THREAD_WAITING) {
4091 wr = thread_block(THREAD_CONTINUE_NULL);
4092 }
4093 if (wr == THREAD_INTERRUPTED) {
4094 return KERN_ABORTED;
4095 }
4096 if (wr == THREAD_AWAKENED) {
4097 old_level = memorystatus_vm_pressure_level;
4098
4099 if (old_level != *pressure_level) {
4100 break;
4101 }
4102 }
4103 }
4104 }
4105
4106 *pressure_level = old_level;
4107 kr = KERN_SUCCESS;
4108 } else {
4109 kr = KERN_INVALID_ARGUMENT;
4110 }
4111
4112 return kr;
4113 #endif /* VM_PRESSURE_EVENTS */
4114 }
4115
4116 #if VM_PRESSURE_EVENTS
4117 void
4118 vm_pressure_thread(void)
4119 {
4120 static boolean_t thread_initialized = FALSE;
4121
4122 if (thread_initialized == TRUE) {
4123 vm_pageout_state.vm_pressure_thread_running = TRUE;
4124 consider_vm_pressure_events();
4125 vm_pageout_state.vm_pressure_thread_running = FALSE;
4126 }
4127
4128 thread_set_thread_name(current_thread(), "VM_pressure");
4129 thread_initialized = TRUE;
4130 assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
4131 thread_block((thread_continue_t)vm_pressure_thread);
4132 }
4133 #endif /* VM_PRESSURE_EVENTS */
4134
4135
4136 /*
4137 * called once per-second via "compute_averages"
4138 */
4139 void
4140 compute_pageout_gc_throttle(__unused void *arg)
4141 {
4142 if (vm_pageout_vminfo.vm_pageout_considered_page != vm_pageout_state.vm_pageout_considered_page_last) {
4143 vm_pageout_state.vm_pageout_considered_page_last = vm_pageout_vminfo.vm_pageout_considered_page;
4144
4145 thread_wakeup((event_t) &vm_pageout_garbage_collect);
4146 }
4147 }
4148
4149 /*
4150 * vm_pageout_garbage_collect can also be called when the zone allocator needs
4151 * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
4152 * jetsams. We need to check if the zone map size is above its jetsam limit to
4153 * decide if this was indeed the case.
4154 *
4155 * We need to do this on a different thread because of the following reasons:
4156 *
4157 * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
4158 * itself causing the system to hang. We perform synchronous jetsams if we're
4159 * leaking in the VM map entries zone, so the leaking process could be doing a
4160 * zalloc for a VM map entry while holding its vm_map lock, when it decides to
4161 * jetsam itself. We also need the vm_map lock on the process termination path,
4162 * which would now lead the dying process to deadlock against itself.
4163 *
4164 * 2. The jetsam path might need to allocate zone memory itself. We could try
4165 * using the non-blocking variant of zalloc for this path, but we can still
4166 * end up trying to do a kernel_memory_allocate when the zone_map is almost
4167 * full.
4168 */
4169
4170 extern boolean_t is_zone_map_nearing_exhaustion(void);
4171
4172 void
4173 vm_pageout_garbage_collect(int collect)
4174 {
4175 if (collect) {
4176 if (is_zone_map_nearing_exhaustion()) {
4177 /*
4178 * Woken up by the zone allocator for zone-map-exhaustion jetsams.
4179 *
4180 * Bail out after calling zone_gc (which triggers the
4181 * zone-map-exhaustion jetsams). If we fall through, the subsequent
4182 * operations that clear out a bunch of caches might allocate zone
4183 * memory themselves (for eg. vm_map operations would need VM map
4184 * entries). Since the zone map is almost full at this point, we
4185 * could end up with a panic. We just need to quickly jetsam a
4186 * process and exit here.
4187 *
4188 * It could so happen that we were woken up to relieve memory
4189 * pressure and the zone map also happened to be near its limit at
4190 * the time, in which case we'll skip out early. But that should be
4191 * ok; if memory pressure persists, the thread will simply be woken
4192 * up again.
4193 */
4194 consider_zone_gc(TRUE);
4195 } else {
4196 /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
4197 boolean_t buf_large_zfree = FALSE;
4198 boolean_t first_try = TRUE;
4199
4200 stack_collect();
4201
4202 consider_machine_collect();
4203 mbuf_drain(FALSE);
4204
4205 do {
4206 if (consider_buffer_cache_collect != NULL) {
4207 buf_large_zfree = (*consider_buffer_cache_collect)(0);
4208 }
4209 if (first_try == TRUE || buf_large_zfree == TRUE) {
4210 /*
4211 * consider_zone_gc should be last, because the other operations
4212 * might return memory to zones.
4213 */
4214 consider_zone_gc(FALSE);
4215 }
4216 first_try = FALSE;
4217 } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
4218
4219 consider_machine_adjust();
4220 }
4221 }
4222
4223 assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
4224
4225 thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
4226 /*NOTREACHED*/
4227 }
4228
4229
4230 #if VM_PAGE_BUCKETS_CHECK
4231 #if VM_PAGE_FAKE_BUCKETS
4232 extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
4233 #endif /* VM_PAGE_FAKE_BUCKETS */
4234 #endif /* VM_PAGE_BUCKETS_CHECK */
4235
4236
4237
4238 void
4239 vm_set_restrictions()
4240 {
4241 host_basic_info_data_t hinfo;
4242 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
4243
4244 #define BSD_HOST 1
4245 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
4246
4247 assert(hinfo.max_cpus > 0);
4248
4249 if (hinfo.max_cpus <= 3) {
4250 /*
4251 * on systems with a limited number of CPUS, bind the
4252 * 4 major threads that can free memory and that tend to use
4253 * a fair bit of CPU under pressured conditions to a single processor.
4254 * This insures that these threads don't hog all of the available CPUs
4255 * (important for camera launch), while allowing them to run independently
4256 * w/r to locks... the 4 threads are
4257 * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
4258 * vm_compressor_swap_trigger_thread (minor and major compactions),
4259 * memorystatus_thread (jetsams).
4260 *
4261 * the first time the thread is run, it is responsible for checking the
4262 * state of vm_restricted_to_single_processor, and if TRUE it calls
4263 * thread_bind_master... someday this should be replaced with a group
4264 * scheduling mechanism and KPI.
4265 */
4266 vm_pageout_state.vm_restricted_to_single_processor = TRUE;
4267 } else {
4268 vm_pageout_state.vm_restricted_to_single_processor = FALSE;
4269 }
4270 }
4271
4272 void
4273 vm_pageout(void)
4274 {
4275 thread_t self = current_thread();
4276 thread_t thread;
4277 kern_return_t result;
4278 spl_t s;
4279
4280 /*
4281 * Set thread privileges.
4282 */
4283 s = splsched();
4284
4285 thread_lock(self);
4286 self->options |= TH_OPT_VMPRIV;
4287 sched_set_thread_base_priority(self, BASEPRI_VM);
4288 thread_unlock(self);
4289
4290 if (!self->reserved_stack) {
4291 self->reserved_stack = self->kernel_stack;
4292 }
4293
4294 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
4295 thread_vm_bind_group_add();
4296 }
4297
4298 splx(s);
4299
4300 thread_set_thread_name(current_thread(), "VM_pageout_scan");
4301
4302 /*
4303 * Initialize some paging parameters.
4304 */
4305
4306 vm_pageout_state.vm_pressure_thread_running = FALSE;
4307 vm_pageout_state.vm_pressure_changed = FALSE;
4308 vm_pageout_state.memorystatus_purge_on_warning = 2;
4309 vm_pageout_state.memorystatus_purge_on_urgent = 5;
4310 vm_pageout_state.memorystatus_purge_on_critical = 8;
4311 vm_pageout_state.vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
4312 vm_pageout_state.vm_page_speculative_percentage = 5;
4313 vm_pageout_state.vm_page_speculative_target = 0;
4314
4315 vm_pageout_state.vm_pageout_external_iothread = THREAD_NULL;
4316 vm_pageout_state.vm_pageout_internal_iothread = THREAD_NULL;
4317
4318 vm_pageout_state.vm_pageout_swap_wait = 0;
4319 vm_pageout_state.vm_pageout_idle_wait = 0;
4320 vm_pageout_state.vm_pageout_empty_wait = 0;
4321 vm_pageout_state.vm_pageout_burst_wait = 0;
4322 vm_pageout_state.vm_pageout_deadlock_wait = 0;
4323 vm_pageout_state.vm_pageout_deadlock_relief = 0;
4324 vm_pageout_state.vm_pageout_burst_inactive_throttle = 0;
4325
4326 vm_pageout_state.vm_pageout_inactive = 0;
4327 vm_pageout_state.vm_pageout_inactive_used = 0;
4328 vm_pageout_state.vm_pageout_inactive_clean = 0;
4329
4330 vm_pageout_state.vm_memory_pressure = 0;
4331 vm_pageout_state.vm_page_filecache_min = 0;
4332 #if CONFIG_JETSAM
4333 vm_pageout_state.vm_page_filecache_min_divisor = 70;
4334 vm_pageout_state.vm_page_xpmapped_min_divisor = 40;
4335 #else
4336 vm_pageout_state.vm_page_filecache_min_divisor = 27;
4337 vm_pageout_state.vm_page_xpmapped_min_divisor = 36;
4338 #endif
4339 vm_pageout_state.vm_page_free_count_init = vm_page_free_count;
4340
4341 vm_pageout_state.vm_pageout_considered_page_last = 0;
4342
4343 if (vm_pageout_state.vm_pageout_swap_wait == 0) {
4344 vm_pageout_state.vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
4345 }
4346
4347 if (vm_pageout_state.vm_pageout_idle_wait == 0) {
4348 vm_pageout_state.vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
4349 }
4350
4351 if (vm_pageout_state.vm_pageout_burst_wait == 0) {
4352 vm_pageout_state.vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
4353 }
4354
4355 if (vm_pageout_state.vm_pageout_empty_wait == 0) {
4356 vm_pageout_state.vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
4357 }
4358
4359 if (vm_pageout_state.vm_pageout_deadlock_wait == 0) {
4360 vm_pageout_state.vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
4361 }
4362
4363 if (vm_pageout_state.vm_pageout_deadlock_relief == 0) {
4364 vm_pageout_state.vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
4365 }
4366
4367 if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) {
4368 vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
4369 }
4370 /*
4371 * even if we've already called vm_page_free_reserve
4372 * call it again here to insure that the targets are
4373 * accurately calculated (it uses vm_page_free_count_init)
4374 * calling it with an arg of 0 will not change the reserve
4375 * but will re-calculate free_min and free_target
4376 */
4377 if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
4378 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
4379 } else {
4380 vm_page_free_reserve(0);
4381 }
4382
4383
4384 vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
4385 vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
4386 vm_pageout_queue_external.pgo_laundry = 0;
4387 vm_pageout_queue_external.pgo_idle = FALSE;
4388 vm_pageout_queue_external.pgo_busy = FALSE;
4389 vm_pageout_queue_external.pgo_throttled = FALSE;
4390 vm_pageout_queue_external.pgo_draining = FALSE;
4391 vm_pageout_queue_external.pgo_lowpriority = FALSE;
4392 vm_pageout_queue_external.pgo_tid = -1;
4393 vm_pageout_queue_external.pgo_inited = FALSE;
4394
4395 vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
4396 vm_pageout_queue_internal.pgo_maxlaundry = 0;
4397 vm_pageout_queue_internal.pgo_laundry = 0;
4398 vm_pageout_queue_internal.pgo_idle = FALSE;
4399 vm_pageout_queue_internal.pgo_busy = FALSE;
4400 vm_pageout_queue_internal.pgo_throttled = FALSE;
4401 vm_pageout_queue_internal.pgo_draining = FALSE;
4402 vm_pageout_queue_internal.pgo_lowpriority = FALSE;
4403 vm_pageout_queue_internal.pgo_tid = -1;
4404 vm_pageout_queue_internal.pgo_inited = FALSE;
4405
4406 /* internal pageout thread started when default pager registered first time */
4407 /* external pageout and garbage collection threads started here */
4408
4409 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
4410 BASEPRI_VM,
4411 &vm_pageout_state.vm_pageout_external_iothread);
4412 if (result != KERN_SUCCESS) {
4413 panic("vm_pageout_iothread_external: create failed");
4414 }
4415
4416 thread_deallocate(vm_pageout_state.vm_pageout_external_iothread);
4417
4418 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
4419 BASEPRI_DEFAULT,
4420 &thread);
4421 if (result != KERN_SUCCESS) {
4422 panic("vm_pageout_garbage_collect: create failed");
4423 }
4424
4425 thread_deallocate(thread);
4426
4427 #if VM_PRESSURE_EVENTS
4428 result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
4429 BASEPRI_DEFAULT,
4430 &thread);
4431
4432 if (result != KERN_SUCCESS) {
4433 panic("vm_pressure_thread: create failed");
4434 }
4435
4436 thread_deallocate(thread);
4437 #endif
4438
4439 vm_object_reaper_init();
4440
4441
4442 bzero(&vm_config, sizeof(vm_config));
4443
4444 switch (vm_compressor_mode) {
4445 case VM_PAGER_DEFAULT:
4446 printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
4447
4448 case VM_PAGER_COMPRESSOR_WITH_SWAP:
4449 vm_config.compressor_is_present = TRUE;
4450 vm_config.swap_is_present = TRUE;
4451 vm_config.compressor_is_active = TRUE;
4452 vm_config.swap_is_active = TRUE;
4453 break;
4454
4455 case VM_PAGER_COMPRESSOR_NO_SWAP:
4456 vm_config.compressor_is_present = TRUE;
4457 vm_config.swap_is_present = TRUE;
4458 vm_config.compressor_is_active = TRUE;
4459 break;
4460
4461 case VM_PAGER_FREEZER_DEFAULT:
4462 printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
4463
4464 case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
4465 vm_config.compressor_is_present = TRUE;
4466 vm_config.swap_is_present = TRUE;
4467 break;
4468
4469 case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
4470 vm_config.compressor_is_present = TRUE;
4471 vm_config.swap_is_present = TRUE;
4472 vm_config.compressor_is_active = TRUE;
4473 vm_config.freezer_swap_is_active = TRUE;
4474 break;
4475
4476 case VM_PAGER_NOT_CONFIGURED:
4477 break;
4478
4479 default:
4480 printf("unknown compressor mode - %x\n", vm_compressor_mode);
4481 break;
4482 }
4483 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
4484 vm_compressor_pager_init();
4485 }
4486
4487 #if VM_PRESSURE_EVENTS
4488 vm_pressure_events_enabled = TRUE;
4489 #endif /* VM_PRESSURE_EVENTS */
4490
4491 #if CONFIG_PHANTOM_CACHE
4492 vm_phantom_cache_init();
4493 #endif
4494 #if VM_PAGE_BUCKETS_CHECK
4495 #if VM_PAGE_FAKE_BUCKETS
4496 printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
4497 (uint64_t) vm_page_fake_buckets_start,
4498 (uint64_t) vm_page_fake_buckets_end);
4499 pmap_protect(kernel_pmap,
4500 vm_page_fake_buckets_start,
4501 vm_page_fake_buckets_end,
4502 VM_PROT_READ);
4503 // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
4504 #endif /* VM_PAGE_FAKE_BUCKETS */
4505 #endif /* VM_PAGE_BUCKETS_CHECK */
4506
4507 #if VM_OBJECT_TRACKING
4508 vm_object_tracking_init();
4509 #endif /* VM_OBJECT_TRACKING */
4510
4511 vm_tests();
4512
4513 vm_pageout_continue();
4514
4515 /*
4516 * Unreached code!
4517 *
4518 * The vm_pageout_continue() call above never returns, so the code below is never
4519 * executed. We take advantage of this to declare several DTrace VM related probe
4520 * points that our kernel doesn't have an analog for. These are probe points that
4521 * exist in Solaris and are in the DTrace documentation, so people may have written
4522 * scripts that use them. Declaring the probe points here means their scripts will
4523 * compile and execute which we want for portability of the scripts, but since this
4524 * section of code is never reached, the probe points will simply never fire. Yes,
4525 * this is basically a hack. The problem is the DTrace probe points were chosen with
4526 * Solaris specific VM events in mind, not portability to different VM implementations.
4527 */
4528
4529 DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
4530 DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
4531 DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
4532 DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
4533 DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
4534 DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
4535 DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
4536 /*NOTREACHED*/
4537 }
4538
4539
4540
4541 kern_return_t
4542 vm_pageout_internal_start(void)
4543 {
4544 kern_return_t result;
4545 int i;
4546 host_basic_info_data_t hinfo;
4547
4548 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4549
4550 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
4551 #define BSD_HOST 1
4552 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
4553
4554 assert(hinfo.max_cpus > 0);
4555
4556 lck_grp_init(&vm_pageout_lck_grp, "vm_pageout", LCK_GRP_ATTR_NULL);
4557
4558 #if CONFIG_EMBEDDED
4559 vm_pageout_state.vm_compressor_thread_count = 1;
4560 #else
4561 if (hinfo.max_cpus > 4) {
4562 vm_pageout_state.vm_compressor_thread_count = 2;
4563 } else {
4564 vm_pageout_state.vm_compressor_thread_count = 1;
4565 }
4566 #endif
4567 PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state.vm_compressor_thread_count,
4568 sizeof(vm_pageout_state.vm_compressor_thread_count));
4569
4570 if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) {
4571 vm_pageout_state.vm_compressor_thread_count = hinfo.max_cpus - 1;
4572 }
4573 if (vm_pageout_state.vm_compressor_thread_count <= 0) {
4574 vm_pageout_state.vm_compressor_thread_count = 1;
4575 } else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) {
4576 vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
4577 }
4578
4579 vm_pageout_queue_internal.pgo_maxlaundry = (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
4580
4581 PE_parse_boot_argn("vmpgoi_maxlaundry", &vm_pageout_queue_internal.pgo_maxlaundry, sizeof(vm_pageout_queue_internal.pgo_maxlaundry));
4582
4583 for (i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
4584 ciq[i].id = i;
4585 ciq[i].q = &vm_pageout_queue_internal;
4586 ciq[i].current_chead = NULL;
4587 ciq[i].scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE);
4588
4589 result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i],
4590 BASEPRI_VM, &vm_pageout_state.vm_pageout_internal_iothread);
4591
4592 if (result == KERN_SUCCESS) {
4593 thread_deallocate(vm_pageout_state.vm_pageout_internal_iothread);
4594 } else {
4595 break;
4596 }
4597 }
4598 return result;
4599 }
4600
4601 #if CONFIG_IOSCHED
4602 /*
4603 * To support I/O Expedite for compressed files we mark the upls with special flags.
4604 * The way decmpfs works is that we create a big upl which marks all the pages needed to
4605 * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
4606 * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
4607 * being held in the big original UPL. We mark each of these smaller UPLs with the flag
4608 * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
4609 * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
4610 * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
4611 * unless the real I/O upl is being destroyed).
4612 */
4613
4614
4615 static void
4616 upl_set_decmp_info(upl_t upl, upl_t src_upl)
4617 {
4618 assert((src_upl->flags & UPL_DECMP_REQ) != 0);
4619
4620 upl_lock(src_upl);
4621 if (src_upl->decmp_io_upl) {
4622 /*
4623 * If there is already an alive real I/O UPL, ignore this new UPL.
4624 * This case should rarely happen and even if it does, it just means
4625 * that we might issue a spurious expedite which the driver is expected
4626 * to handle.
4627 */
4628 upl_unlock(src_upl);
4629 return;
4630 }
4631 src_upl->decmp_io_upl = (void *)upl;
4632 src_upl->ref_count++;
4633
4634 upl->flags |= UPL_DECMP_REAL_IO;
4635 upl->decmp_io_upl = (void *)src_upl;
4636 upl_unlock(src_upl);
4637 }
4638 #endif /* CONFIG_IOSCHED */
4639
4640 #if UPL_DEBUG
4641 int upl_debug_enabled = 1;
4642 #else
4643 int upl_debug_enabled = 0;
4644 #endif
4645
4646 static upl_t
4647 upl_create(int type, int flags, upl_size_t size)
4648 {
4649 upl_t upl;
4650 vm_size_t page_field_size = 0;
4651 int upl_flags = 0;
4652 vm_size_t upl_size = sizeof(struct upl);
4653
4654 size = round_page_32(size);
4655
4656 if (type & UPL_CREATE_LITE) {
4657 page_field_size = (atop(size) + 7) >> 3;
4658 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
4659
4660 upl_flags |= UPL_LITE;
4661 }
4662 if (type & UPL_CREATE_INTERNAL) {
4663 upl_size += sizeof(struct upl_page_info) * atop(size);
4664
4665 upl_flags |= UPL_INTERNAL;
4666 }
4667 upl = (upl_t)kalloc(upl_size + page_field_size);
4668
4669 if (page_field_size) {
4670 bzero((char *)upl + upl_size, page_field_size);
4671 }
4672
4673 upl->flags = upl_flags | flags;
4674 upl->kaddr = (vm_offset_t)0;
4675 upl->size = 0;
4676 upl->map_object = NULL;
4677 upl->ref_count = 1;
4678 upl->ext_ref_count = 0;
4679 upl->highest_page = 0;
4680 upl_lock_init(upl);
4681 upl->vector_upl = NULL;
4682 upl->associated_upl = NULL;
4683 upl->upl_iodone = NULL;
4684 #if CONFIG_IOSCHED
4685 if (type & UPL_CREATE_IO_TRACKING) {
4686 upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
4687 }
4688
4689 upl->upl_reprio_info = 0;
4690 upl->decmp_io_upl = 0;
4691 if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
4692 /* Only support expedite on internal UPLs */
4693 thread_t curthread = current_thread();
4694 upl->upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * atop(size));
4695 bzero(upl->upl_reprio_info, (sizeof(uint64_t) * atop(size)));
4696 upl->flags |= UPL_EXPEDITE_SUPPORTED;
4697 if (curthread->decmp_upl != NULL) {
4698 upl_set_decmp_info(upl, curthread->decmp_upl);
4699 }
4700 }
4701 #endif
4702 #if CONFIG_IOSCHED || UPL_DEBUG
4703 if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
4704 upl->upl_creator = current_thread();
4705 upl->uplq.next = 0;
4706 upl->uplq.prev = 0;
4707 upl->flags |= UPL_TRACKED_BY_OBJECT;
4708 }
4709 #endif
4710
4711 #if UPL_DEBUG
4712 upl->ubc_alias1 = 0;
4713 upl->ubc_alias2 = 0;
4714
4715 upl->upl_state = 0;
4716 upl->upl_commit_index = 0;
4717 bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
4718
4719 (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
4720 #endif /* UPL_DEBUG */
4721
4722 return upl;
4723 }
4724
4725 static void
4726 upl_destroy(upl_t upl)
4727 {
4728 int page_field_size; /* bit field in word size buf */
4729 int size;
4730
4731 if (upl->ext_ref_count) {
4732 panic("upl(%p) ext_ref_count", upl);
4733 }
4734
4735 #if CONFIG_IOSCHED
4736 if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
4737 upl_t src_upl;
4738 src_upl = upl->decmp_io_upl;
4739 assert((src_upl->flags & UPL_DECMP_REQ) != 0);
4740 upl_lock(src_upl);
4741 src_upl->decmp_io_upl = NULL;
4742 upl_unlock(src_upl);
4743 upl_deallocate(src_upl);
4744 }
4745 #endif /* CONFIG_IOSCHED */
4746
4747 #if CONFIG_IOSCHED || UPL_DEBUG
4748 if ((upl->flags & UPL_TRACKED_BY_OBJECT) && !(upl->flags & UPL_VECTOR)) {
4749 vm_object_t object;
4750
4751 if (upl->flags & UPL_SHADOWED) {
4752 object = upl->map_object->shadow;
4753 } else {
4754 object = upl->map_object;
4755 }
4756
4757 vm_object_lock(object);
4758 queue_remove(&object->uplq, upl, upl_t, uplq);
4759 vm_object_activity_end(object);
4760 vm_object_collapse(object, 0, TRUE);
4761 vm_object_unlock(object);
4762 }
4763 #endif
4764 /*
4765 * drop a reference on the map_object whether or
4766 * not a pageout object is inserted
4767 */
4768 if (upl->flags & UPL_SHADOWED) {
4769 vm_object_deallocate(upl->map_object);
4770 }
4771
4772 if (upl->flags & UPL_DEVICE_MEMORY) {
4773 size = PAGE_SIZE;
4774 } else {
4775 size = upl->size;
4776 }
4777 page_field_size = 0;
4778
4779 if (upl->flags & UPL_LITE) {
4780 page_field_size = ((size / PAGE_SIZE) + 7) >> 3;
4781 page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
4782 }
4783 upl_lock_destroy(upl);
4784 upl->vector_upl = (vector_upl_t) 0xfeedbeef;
4785
4786 #if CONFIG_IOSCHED
4787 if (upl->flags & UPL_EXPEDITE_SUPPORTED) {
4788 kfree(upl->upl_reprio_info, sizeof(uint64_t) * (size / PAGE_SIZE));
4789 }
4790 #endif
4791
4792 if (upl->flags & UPL_INTERNAL) {
4793 kfree(upl,
4794 sizeof(struct upl) +
4795 (sizeof(struct upl_page_info) * (size / PAGE_SIZE))
4796 + page_field_size);
4797 } else {
4798 kfree(upl, sizeof(struct upl) + page_field_size);
4799 }
4800 }
4801
4802 void
4803 upl_deallocate(upl_t upl)
4804 {
4805 upl_lock(upl);
4806
4807 if (--upl->ref_count == 0) {
4808 if (vector_upl_is_valid(upl)) {
4809 vector_upl_deallocate(upl);
4810 }
4811 upl_unlock(upl);
4812
4813 if (upl->upl_iodone) {
4814 upl_callout_iodone(upl);
4815 }
4816
4817 upl_destroy(upl);
4818 } else {
4819 upl_unlock(upl);
4820 }
4821 }
4822
4823 #if CONFIG_IOSCHED
4824 void
4825 upl_mark_decmp(upl_t upl)
4826 {
4827 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
4828 upl->flags |= UPL_DECMP_REQ;
4829 upl->upl_creator->decmp_upl = (void *)upl;
4830 }
4831 }
4832
4833 void
4834 upl_unmark_decmp(upl_t upl)
4835 {
4836 if (upl && (upl->flags & UPL_DECMP_REQ)) {
4837 upl->upl_creator->decmp_upl = NULL;
4838 }
4839 }
4840
4841 #endif /* CONFIG_IOSCHED */
4842
4843 #define VM_PAGE_Q_BACKING_UP(q) \
4844 ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
4845
4846 boolean_t must_throttle_writes(void);
4847
4848 boolean_t
4849 must_throttle_writes()
4850 {
4851 if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) &&
4852 vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10) {
4853 return TRUE;
4854 }
4855
4856 return FALSE;
4857 }
4858
4859
4860 /*
4861 * Routine: vm_object_upl_request
4862 * Purpose:
4863 * Cause the population of a portion of a vm_object.
4864 * Depending on the nature of the request, the pages
4865 * returned may be contain valid data or be uninitialized.
4866 * A page list structure, listing the physical pages
4867 * will be returned upon request.
4868 * This function is called by the file system or any other
4869 * supplier of backing store to a pager.
4870 * IMPORTANT NOTE: The caller must still respect the relationship
4871 * between the vm_object and its backing memory object. The
4872 * caller MUST NOT substitute changes in the backing file
4873 * without first doing a memory_object_lock_request on the
4874 * target range unless it is know that the pages are not
4875 * shared with another entity at the pager level.
4876 * Copy_in_to:
4877 * if a page list structure is present
4878 * return the mapped physical pages, where a
4879 * page is not present, return a non-initialized
4880 * one. If the no_sync bit is turned on, don't
4881 * call the pager unlock to synchronize with other
4882 * possible copies of the page. Leave pages busy
4883 * in the original object, if a page list structure
4884 * was specified. When a commit of the page list
4885 * pages is done, the dirty bit will be set for each one.
4886 * Copy_out_from:
4887 * If a page list structure is present, return
4888 * all mapped pages. Where a page does not exist
4889 * map a zero filled one. Leave pages busy in
4890 * the original object. If a page list structure
4891 * is not specified, this call is a no-op.
4892 *
4893 * Note: access of default pager objects has a rather interesting
4894 * twist. The caller of this routine, presumably the file system
4895 * page cache handling code, will never actually make a request
4896 * against a default pager backed object. Only the default
4897 * pager will make requests on backing store related vm_objects
4898 * In this way the default pager can maintain the relationship
4899 * between backing store files (abstract memory objects) and
4900 * the vm_objects (cache objects), they support.
4901 *
4902 */
4903
4904 __private_extern__ kern_return_t
4905 vm_object_upl_request(
4906 vm_object_t object,
4907 vm_object_offset_t offset,
4908 upl_size_t size,
4909 upl_t *upl_ptr,
4910 upl_page_info_array_t user_page_list,
4911 unsigned int *page_list_count,
4912 upl_control_flags_t cntrl_flags,
4913 vm_tag_t tag)
4914 {
4915 vm_page_t dst_page = VM_PAGE_NULL;
4916 vm_object_offset_t dst_offset;
4917 upl_size_t xfer_size;
4918 unsigned int size_in_pages;
4919 boolean_t dirty;
4920 boolean_t hw_dirty;
4921 upl_t upl = NULL;
4922 unsigned int entry;
4923 vm_page_t alias_page = NULL;
4924 int refmod_state = 0;
4925 wpl_array_t lite_list = NULL;
4926 vm_object_t last_copy_object;
4927 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
4928 struct vm_page_delayed_work *dwp;
4929 int dw_count;
4930 int dw_limit;
4931 int io_tracking_flag = 0;
4932 int grab_options;
4933 int page_grab_count = 0;
4934 ppnum_t phys_page;
4935 pmap_flush_context pmap_flush_context_storage;
4936 boolean_t pmap_flushes_delayed = FALSE;
4937 #if DEVELOPMENT || DEBUG
4938 task_t task = current_task();
4939 #endif /* DEVELOPMENT || DEBUG */
4940
4941 if (cntrl_flags & ~UPL_VALID_FLAGS) {
4942 /*
4943 * For forward compatibility's sake,
4944 * reject any unknown flag.
4945 */
4946 return KERN_INVALID_VALUE;
4947 }
4948 if ((!object->internal) && (object->paging_offset != 0)) {
4949 panic("vm_object_upl_request: external object with non-zero paging offset\n");
4950 }
4951 if (object->phys_contiguous) {
4952 panic("vm_object_upl_request: contiguous object specified\n");
4953 }
4954
4955 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, 0, 0);
4956
4957 if (size > MAX_UPL_SIZE_BYTES) {
4958 size = MAX_UPL_SIZE_BYTES;
4959 }
4960
4961 if ((cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL) {
4962 *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
4963 }
4964
4965 #if CONFIG_IOSCHED || UPL_DEBUG
4966 if (object->io_tracking || upl_debug_enabled) {
4967 io_tracking_flag |= UPL_CREATE_IO_TRACKING;
4968 }
4969 #endif
4970 #if CONFIG_IOSCHED
4971 if (object->io_tracking) {
4972 io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
4973 }
4974 #endif
4975
4976 if (cntrl_flags & UPL_SET_INTERNAL) {
4977 if (cntrl_flags & UPL_SET_LITE) {
4978 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
4979
4980 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
4981 lite_list = (wpl_array_t)
4982 (((uintptr_t)user_page_list) +
4983 ((size / PAGE_SIZE) * sizeof(upl_page_info_t)));
4984 if (size == 0) {
4985 user_page_list = NULL;
4986 lite_list = NULL;
4987 }
4988 } else {
4989 upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size);
4990
4991 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
4992 if (size == 0) {
4993 user_page_list = NULL;
4994 }
4995 }
4996 } else {
4997 if (cntrl_flags & UPL_SET_LITE) {
4998 upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
4999
5000 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
5001 if (size == 0) {
5002 lite_list = NULL;
5003 }
5004 } else {
5005 upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size);
5006 }
5007 }
5008 *upl_ptr = upl;
5009
5010 if (user_page_list) {
5011 user_page_list[0].device = FALSE;
5012 }
5013
5014 if (cntrl_flags & UPL_SET_LITE) {
5015 upl->map_object = object;
5016 } else {
5017 upl->map_object = vm_object_allocate(size);
5018 /*
5019 * No neeed to lock the new object: nobody else knows
5020 * about it yet, so it's all ours so far.
5021 */
5022 upl->map_object->shadow = object;
5023 upl->map_object->pageout = TRUE;
5024 upl->map_object->can_persist = FALSE;
5025 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
5026 upl->map_object->vo_shadow_offset = offset;
5027 upl->map_object->wimg_bits = object->wimg_bits;
5028
5029 VM_PAGE_GRAB_FICTITIOUS(alias_page);
5030
5031 upl->flags |= UPL_SHADOWED;
5032 }
5033 if (cntrl_flags & UPL_FOR_PAGEOUT) {
5034 upl->flags |= UPL_PAGEOUT;
5035 }
5036
5037 vm_object_lock(object);
5038 vm_object_activity_begin(object);
5039
5040 grab_options = 0;
5041 #if CONFIG_SECLUDED_MEMORY
5042 if (object->can_grab_secluded) {
5043 grab_options |= VM_PAGE_GRAB_SECLUDED;
5044 }
5045 #endif /* CONFIG_SECLUDED_MEMORY */
5046
5047 /*
5048 * we can lock in the paging_offset once paging_in_progress is set
5049 */
5050 upl->size = size;
5051 upl->offset = offset + object->paging_offset;
5052
5053 #if CONFIG_IOSCHED || UPL_DEBUG
5054 if (object->io_tracking || upl_debug_enabled) {
5055 vm_object_activity_begin(object);
5056 queue_enter(&object->uplq, upl, upl_t, uplq);
5057 }
5058 #endif
5059 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
5060 /*
5061 * Honor copy-on-write obligations
5062 *
5063 * The caller is gathering these pages and
5064 * might modify their contents. We need to
5065 * make sure that the copy object has its own
5066 * private copies of these pages before we let
5067 * the caller modify them.
5068 */
5069 vm_object_update(object,
5070 offset,
5071 size,
5072 NULL,
5073 NULL,
5074 FALSE, /* should_return */
5075 MEMORY_OBJECT_COPY_SYNC,
5076 VM_PROT_NO_CHANGE);
5077
5078 VM_PAGEOUT_DEBUG(upl_cow, 1);
5079 VM_PAGEOUT_DEBUG(upl_cow_pages, (size >> PAGE_SHIFT));
5080 }
5081 /*
5082 * remember which copy object we synchronized with
5083 */
5084 last_copy_object = object->copy;
5085 entry = 0;
5086
5087 xfer_size = size;
5088 dst_offset = offset;
5089 size_in_pages = size / PAGE_SIZE;
5090
5091 dwp = &dw_array[0];
5092 dw_count = 0;
5093 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
5094
5095 if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
5096 object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) {
5097 object->scan_collisions = 0;
5098 }
5099
5100 if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) {
5101 boolean_t isSSD = FALSE;
5102
5103 #if CONFIG_EMBEDDED
5104 isSSD = TRUE;
5105 #else
5106 vnode_pager_get_isSSD(object->pager, &isSSD);
5107 #endif
5108 vm_object_unlock(object);
5109
5110 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
5111
5112 if (isSSD == TRUE) {
5113 delay(1000 * size_in_pages);
5114 } else {
5115 delay(5000 * size_in_pages);
5116 }
5117 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
5118
5119 vm_object_lock(object);
5120 }
5121
5122 while (xfer_size) {
5123 dwp->dw_mask = 0;
5124
5125 if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
5126 vm_object_unlock(object);
5127 VM_PAGE_GRAB_FICTITIOUS(alias_page);
5128 vm_object_lock(object);
5129 }
5130 if (cntrl_flags & UPL_COPYOUT_FROM) {
5131 upl->flags |= UPL_PAGE_SYNC_DONE;
5132
5133 if (((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
5134 dst_page->vmp_fictitious ||
5135 dst_page->vmp_absent ||
5136 dst_page->vmp_error ||
5137 dst_page->vmp_cleaning ||
5138 (VM_PAGE_WIRED(dst_page))) {
5139 if (user_page_list) {
5140 user_page_list[entry].phys_addr = 0;
5141 }
5142
5143 goto try_next_page;
5144 }
5145 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
5146
5147 /*
5148 * grab this up front...
5149 * a high percentange of the time we're going to
5150 * need the hardware modification state a bit later
5151 * anyway... so we can eliminate an extra call into
5152 * the pmap layer by grabbing it here and recording it
5153 */
5154 if (dst_page->vmp_pmapped) {
5155 refmod_state = pmap_get_refmod(phys_page);
5156 } else {
5157 refmod_state = 0;
5158 }
5159
5160 if ((refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) {
5161 /*
5162 * page is on inactive list and referenced...
5163 * reactivate it now... this gets it out of the
5164 * way of vm_pageout_scan which would have to
5165 * reactivate it upon tripping over it
5166 */
5167 dwp->dw_mask |= DW_vm_page_activate;
5168 }
5169 if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
5170 /*
5171 * we're only asking for DIRTY pages to be returned
5172 */
5173 if (dst_page->vmp_laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
5174 /*
5175 * if we were the page stolen by vm_pageout_scan to be
5176 * cleaned (as opposed to a buddy being clustered in
5177 * or this request is not being driven by a PAGEOUT cluster
5178 * then we only need to check for the page being dirty or
5179 * precious to decide whether to return it
5180 */
5181 if (dst_page->vmp_dirty || dst_page->vmp_precious || (refmod_state & VM_MEM_MODIFIED)) {
5182 goto check_busy;
5183 }
5184 goto dont_return;
5185 }
5186 /*
5187 * this is a request for a PAGEOUT cluster and this page
5188 * is merely along for the ride as a 'buddy'... not only
5189 * does it have to be dirty to be returned, but it also
5190 * can't have been referenced recently...
5191 */
5192 if ((hibernate_cleaning_in_progress == TRUE ||
5193 (!((refmod_state & VM_MEM_REFERENCED) || dst_page->vmp_reference) ||
5194 (dst_page->vmp_q_state == VM_PAGE_ON_THROTTLED_Q))) &&
5195 ((refmod_state & VM_MEM_MODIFIED) || dst_page->vmp_dirty || dst_page->vmp_precious)) {
5196 goto check_busy;
5197 }
5198 dont_return:
5199 /*
5200 * if we reach here, we're not to return
5201 * the page... go on to the next one
5202 */
5203 if (dst_page->vmp_laundry == TRUE) {
5204 /*
5205 * if we get here, the page is not 'cleaning' (filtered out above).
5206 * since it has been referenced, remove it from the laundry
5207 * so we don't pay the cost of an I/O to clean a page
5208 * we're just going to take back
5209 */
5210 vm_page_lockspin_queues();
5211
5212 vm_pageout_steal_laundry(dst_page, TRUE);
5213 vm_page_activate(dst_page);
5214
5215 vm_page_unlock_queues();
5216 }
5217 if (user_page_list) {
5218 user_page_list[entry].phys_addr = 0;
5219 }
5220
5221 goto try_next_page;
5222 }
5223 check_busy:
5224 if (dst_page->vmp_busy) {
5225 if (cntrl_flags & UPL_NOBLOCK) {
5226 if (user_page_list) {
5227 user_page_list[entry].phys_addr = 0;
5228 }
5229 dwp->dw_mask = 0;
5230
5231 goto try_next_page;
5232 }
5233 /*
5234 * someone else is playing with the
5235 * page. We will have to wait.
5236 */
5237 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
5238
5239 continue;
5240 }
5241 if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
5242 vm_page_lockspin_queues();
5243
5244 if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
5245 /*
5246 * we've buddied up a page for a clustered pageout
5247 * that has already been moved to the pageout
5248 * queue by pageout_scan... we need to remove
5249 * it from the queue and drop the laundry count
5250 * on that queue
5251 */
5252 vm_pageout_throttle_up(dst_page);
5253 }
5254 vm_page_unlock_queues();
5255 }
5256 hw_dirty = refmod_state & VM_MEM_MODIFIED;
5257 dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
5258
5259 if (phys_page > upl->highest_page) {
5260 upl->highest_page = phys_page;
5261 }
5262
5263 assert(!pmap_is_noencrypt(phys_page));
5264
5265 if (cntrl_flags & UPL_SET_LITE) {
5266 unsigned int pg_num;
5267
5268 pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
5269 assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
5270 lite_list[pg_num >> 5] |= 1 << (pg_num & 31);
5271
5272 if (hw_dirty) {
5273 if (pmap_flushes_delayed == FALSE) {
5274 pmap_flush_context_init(&pmap_flush_context_storage);
5275 pmap_flushes_delayed = TRUE;
5276 }
5277 pmap_clear_refmod_options(phys_page,
5278 VM_MEM_MODIFIED,
5279 PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_CLEAR_WRITE,
5280 &pmap_flush_context_storage);
5281 }
5282
5283 /*
5284 * Mark original page as cleaning
5285 * in place.
5286 */
5287 dst_page->vmp_cleaning = TRUE;
5288 dst_page->vmp_precious = FALSE;
5289 } else {
5290 /*
5291 * use pageclean setup, it is more
5292 * convenient even for the pageout
5293 * cases here
5294 */
5295 vm_object_lock(upl->map_object);
5296 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
5297 vm_object_unlock(upl->map_object);
5298
5299 alias_page->vmp_absent = FALSE;
5300 alias_page = NULL;
5301 }
5302 if (dirty) {
5303 SET_PAGE_DIRTY(dst_page, FALSE);
5304 } else {
5305 dst_page->vmp_dirty = FALSE;
5306 }
5307
5308 if (!dirty) {
5309 dst_page->vmp_precious = TRUE;
5310 }
5311
5312 if (!(cntrl_flags & UPL_CLEAN_IN_PLACE)) {
5313 if (!VM_PAGE_WIRED(dst_page)) {
5314 dst_page->vmp_free_when_done = TRUE;
5315 }
5316 }
5317 } else {
5318 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
5319 /*
5320 * Honor copy-on-write obligations
5321 *
5322 * The copy object has changed since we
5323 * last synchronized for copy-on-write.
5324 * Another copy object might have been
5325 * inserted while we released the object's
5326 * lock. Since someone could have seen the
5327 * original contents of the remaining pages
5328 * through that new object, we have to
5329 * synchronize with it again for the remaining
5330 * pages only. The previous pages are "busy"
5331 * so they can not be seen through the new
5332 * mapping. The new mapping will see our
5333 * upcoming changes for those previous pages,
5334 * but that's OK since they couldn't see what
5335 * was there before. It's just a race anyway
5336 * and there's no guarantee of consistency or
5337 * atomicity. We just don't want new mappings
5338 * to see both the *before* and *after* pages.
5339 */
5340 if (object->copy != VM_OBJECT_NULL) {
5341 vm_object_update(
5342 object,
5343 dst_offset,/* current offset */
5344 xfer_size, /* remaining size */
5345 NULL,
5346 NULL,
5347 FALSE, /* should_return */
5348 MEMORY_OBJECT_COPY_SYNC,
5349 VM_PROT_NO_CHANGE);
5350
5351 VM_PAGEOUT_DEBUG(upl_cow_again, 1);
5352 VM_PAGEOUT_DEBUG(upl_cow_again_pages, (xfer_size >> PAGE_SHIFT));
5353 }
5354 /*
5355 * remember the copy object we synced with
5356 */
5357 last_copy_object = object->copy;
5358 }
5359 dst_page = vm_page_lookup(object, dst_offset);
5360
5361 if (dst_page != VM_PAGE_NULL) {
5362 if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
5363 /*
5364 * skip over pages already present in the cache
5365 */
5366 if (user_page_list) {
5367 user_page_list[entry].phys_addr = 0;
5368 }
5369
5370 goto try_next_page;
5371 }
5372 if (dst_page->vmp_fictitious) {
5373 panic("need corner case for fictitious page");
5374 }
5375
5376 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
5377 /*
5378 * someone else is playing with the
5379 * page. We will have to wait.
5380 */
5381 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
5382
5383 continue;
5384 }
5385 if (dst_page->vmp_laundry) {
5386 vm_pageout_steal_laundry(dst_page, FALSE);
5387 }
5388 } else {
5389 if (object->private) {
5390 /*
5391 * This is a nasty wrinkle for users
5392 * of upl who encounter device or
5393 * private memory however, it is
5394 * unavoidable, only a fault can
5395 * resolve the actual backing
5396 * physical page by asking the
5397 * backing device.
5398 */
5399 if (user_page_list) {
5400 user_page_list[entry].phys_addr = 0;
5401 }
5402
5403 goto try_next_page;
5404 }
5405 if (object->scan_collisions) {
5406 /*
5407 * the pageout_scan thread is trying to steal
5408 * pages from this object, but has run into our
5409 * lock... grab 2 pages from the head of the object...
5410 * the first is freed on behalf of pageout_scan, the
5411 * 2nd is for our own use... we use vm_object_page_grab
5412 * in both cases to avoid taking pages from the free
5413 * list since we are under memory pressure and our
5414 * lock on this object is getting in the way of
5415 * relieving it
5416 */
5417 dst_page = vm_object_page_grab(object);
5418
5419 if (dst_page != VM_PAGE_NULL) {
5420 vm_page_release(dst_page,
5421 FALSE);
5422 }
5423
5424 dst_page = vm_object_page_grab(object);
5425 }
5426 if (dst_page == VM_PAGE_NULL) {
5427 /*
5428 * need to allocate a page
5429 */
5430 dst_page = vm_page_grab_options(grab_options);
5431 if (dst_page != VM_PAGE_NULL) {
5432 page_grab_count++;
5433 }
5434 }
5435 if (dst_page == VM_PAGE_NULL) {
5436 if ((cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
5437 /*
5438 * we don't want to stall waiting for pages to come onto the free list
5439 * while we're already holding absent pages in this UPL
5440 * the caller will deal with the empty slots
5441 */
5442 if (user_page_list) {
5443 user_page_list[entry].phys_addr = 0;
5444 }
5445
5446 goto try_next_page;
5447 }
5448 /*
5449 * no pages available... wait
5450 * then try again for the same
5451 * offset...
5452 */
5453 vm_object_unlock(object);
5454
5455 OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
5456
5457 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
5458
5459 VM_PAGE_WAIT();
5460 OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
5461
5462 VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
5463
5464 vm_object_lock(object);
5465
5466 continue;
5467 }
5468 vm_page_insert(dst_page, object, dst_offset);
5469
5470 dst_page->vmp_absent = TRUE;
5471 dst_page->vmp_busy = FALSE;
5472
5473 if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
5474 /*
5475 * if UPL_RET_ONLY_ABSENT was specified,
5476 * than we're definitely setting up a
5477 * upl for a clustered read/pagein
5478 * operation... mark the pages as clustered
5479 * so upl_commit_range can put them on the
5480 * speculative list
5481 */
5482 dst_page->vmp_clustered = TRUE;
5483
5484 if (!(cntrl_flags & UPL_FILE_IO)) {
5485 VM_STAT_INCR(pageins);
5486 }
5487 }
5488 }
5489 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
5490
5491 dst_page->vmp_overwriting = TRUE;
5492
5493 if (dst_page->vmp_pmapped) {
5494 if (!(cntrl_flags & UPL_FILE_IO)) {
5495 /*
5496 * eliminate all mappings from the
5497 * original object and its prodigy
5498 */
5499 refmod_state = pmap_disconnect(phys_page);
5500 } else {
5501 refmod_state = pmap_get_refmod(phys_page);
5502 }
5503 } else {
5504 refmod_state = 0;
5505 }
5506
5507 hw_dirty = refmod_state & VM_MEM_MODIFIED;
5508 dirty = hw_dirty ? TRUE : dst_page->vmp_dirty;
5509
5510 if (cntrl_flags & UPL_SET_LITE) {
5511 unsigned int pg_num;
5512
5513 pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE);
5514 assert(pg_num == (dst_offset - offset) / PAGE_SIZE);
5515 lite_list[pg_num >> 5] |= 1 << (pg_num & 31);
5516
5517 if (hw_dirty) {
5518 pmap_clear_modify(phys_page);
5519 }
5520
5521 /*
5522 * Mark original page as cleaning
5523 * in place.
5524 */
5525 dst_page->vmp_cleaning = TRUE;
5526 dst_page->vmp_precious = FALSE;
5527 } else {
5528 /*
5529 * use pageclean setup, it is more
5530 * convenient even for the pageout
5531 * cases here
5532 */
5533 vm_object_lock(upl->map_object);
5534 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
5535 vm_object_unlock(upl->map_object);
5536
5537 alias_page->vmp_absent = FALSE;
5538 alias_page = NULL;
5539 }
5540
5541 if (cntrl_flags & UPL_REQUEST_SET_DIRTY) {
5542 upl->flags &= ~UPL_CLEAR_DIRTY;
5543 upl->flags |= UPL_SET_DIRTY;
5544 dirty = TRUE;
5545 upl->flags |= UPL_SET_DIRTY;
5546 } else if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
5547 /*
5548 * clean in place for read implies
5549 * that a write will be done on all
5550 * the pages that are dirty before
5551 * a upl commit is done. The caller
5552 * is obligated to preserve the
5553 * contents of all pages marked dirty
5554 */
5555 upl->flags |= UPL_CLEAR_DIRTY;
5556 }
5557 dst_page->vmp_dirty = dirty;
5558
5559 if (!dirty) {
5560 dst_page->vmp_precious = TRUE;
5561 }
5562
5563 if (!VM_PAGE_WIRED(dst_page)) {
5564 /*
5565 * deny access to the target page while
5566 * it is being worked on
5567 */
5568 dst_page->vmp_busy = TRUE;
5569 } else {
5570 dwp->dw_mask |= DW_vm_page_wire;
5571 }
5572
5573 /*
5574 * We might be about to satisfy a fault which has been
5575 * requested. So no need for the "restart" bit.
5576 */
5577 dst_page->vmp_restart = FALSE;
5578 if (!dst_page->vmp_absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
5579 /*
5580 * expect the page to be used
5581 */
5582 dwp->dw_mask |= DW_set_reference;
5583 }
5584 if (cntrl_flags & UPL_PRECIOUS) {
5585 if (object->internal) {
5586 SET_PAGE_DIRTY(dst_page, FALSE);
5587 dst_page->vmp_precious = FALSE;
5588 } else {
5589 dst_page->vmp_precious = TRUE;
5590 }
5591 } else {
5592 dst_page->vmp_precious = FALSE;
5593 }
5594 }
5595 if (dst_page->vmp_busy) {
5596 upl->flags |= UPL_HAS_BUSY;
5597 }
5598
5599 if (phys_page > upl->highest_page) {
5600 upl->highest_page = phys_page;
5601 }
5602 assert(!pmap_is_noencrypt(phys_page));
5603 if (user_page_list) {
5604 user_page_list[entry].phys_addr = phys_page;
5605 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
5606 user_page_list[entry].absent = dst_page->vmp_absent;
5607 user_page_list[entry].dirty = dst_page->vmp_dirty;
5608 user_page_list[entry].precious = dst_page->vmp_precious;
5609 user_page_list[entry].device = FALSE;
5610 user_page_list[entry].needed = FALSE;
5611 if (dst_page->vmp_clustered == TRUE) {
5612 user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
5613 } else {
5614 user_page_list[entry].speculative = FALSE;
5615 }
5616 user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
5617 user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
5618 user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
5619 user_page_list[entry].mark = FALSE;
5620 }
5621 /*
5622 * if UPL_RET_ONLY_ABSENT is set, then
5623 * we are working with a fresh page and we've
5624 * just set the clustered flag on it to
5625 * indicate that it was drug in as part of a
5626 * speculative cluster... so leave it alone
5627 */
5628 if (!(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
5629 /*
5630 * someone is explicitly grabbing this page...
5631 * update clustered and speculative state
5632 *
5633 */
5634 if (dst_page->vmp_clustered) {
5635 VM_PAGE_CONSUME_CLUSTERED(dst_page);
5636 }
5637 }
5638 try_next_page:
5639 if (dwp->dw_mask) {
5640 if (dwp->dw_mask & DW_vm_page_activate) {
5641 VM_STAT_INCR(reactivations);
5642 }
5643
5644 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
5645
5646 if (dw_count >= dw_limit) {
5647 vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
5648
5649 dwp = &dw_array[0];
5650 dw_count = 0;
5651 }
5652 }
5653 entry++;
5654 dst_offset += PAGE_SIZE_64;
5655 xfer_size -= PAGE_SIZE;
5656 }
5657 if (dw_count) {
5658 vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
5659 }
5660
5661 if (alias_page != NULL) {
5662 VM_PAGE_FREE(alias_page);
5663 }
5664 if (pmap_flushes_delayed == TRUE) {
5665 pmap_flush(&pmap_flush_context_storage);
5666 }
5667
5668 if (page_list_count != NULL) {
5669 if (upl->flags & UPL_INTERNAL) {
5670 *page_list_count = 0;
5671 } else if (*page_list_count > entry) {
5672 *page_list_count = entry;
5673 }
5674 }
5675 #if UPL_DEBUG
5676 upl->upl_state = 1;
5677 #endif
5678 vm_object_unlock(object);
5679
5680 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0);
5681 #if DEVELOPMENT || DEBUG
5682 if (task != NULL) {
5683 ledger_credit(task->ledger, task_ledgers.pages_grabbed_upl, page_grab_count);
5684 }
5685 #endif /* DEVELOPMENT || DEBUG */
5686
5687 return KERN_SUCCESS;
5688 }
5689
5690 /*
5691 * Routine: vm_object_super_upl_request
5692 * Purpose:
5693 * Cause the population of a portion of a vm_object
5694 * in much the same way as memory_object_upl_request.
5695 * Depending on the nature of the request, the pages
5696 * returned may be contain valid data or be uninitialized.
5697 * However, the region may be expanded up to the super
5698 * cluster size provided.
5699 */
5700
5701 __private_extern__ kern_return_t
5702 vm_object_super_upl_request(
5703 vm_object_t object,
5704 vm_object_offset_t offset,
5705 upl_size_t size,
5706 upl_size_t super_cluster,
5707 upl_t *upl,
5708 upl_page_info_t *user_page_list,
5709 unsigned int *page_list_count,
5710 upl_control_flags_t cntrl_flags,
5711 vm_tag_t tag)
5712 {
5713 if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR) == UPL_VECTOR)) {
5714 return KERN_FAILURE;
5715 }
5716
5717 assert(object->paging_in_progress);
5718 offset = offset - object->paging_offset;
5719
5720 if (super_cluster > size) {
5721 vm_object_offset_t base_offset;
5722 upl_size_t super_size;
5723 vm_object_size_t super_size_64;
5724
5725 base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
5726 super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster << 1 : super_cluster;
5727 super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size;
5728 super_size = (upl_size_t) super_size_64;
5729 assert(super_size == super_size_64);
5730
5731 if (offset > (base_offset + super_size)) {
5732 panic("vm_object_super_upl_request: Missed target pageout"
5733 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
5734 offset, base_offset, super_size, super_cluster,
5735 size, object->paging_offset);
5736 }
5737 /*
5738 * apparently there is a case where the vm requests a
5739 * page to be written out who's offset is beyond the
5740 * object size
5741 */
5742 if ((offset + size) > (base_offset + super_size)) {
5743 super_size_64 = (offset + size) - base_offset;
5744 super_size = (upl_size_t) super_size_64;
5745 assert(super_size == super_size_64);
5746 }
5747
5748 offset = base_offset;
5749 size = super_size;
5750 }
5751 return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags, tag);
5752 }
5753
5754 #if CONFIG_EMBEDDED
5755 int cs_executable_create_upl = 0;
5756 extern int proc_selfpid(void);
5757 extern char *proc_name_address(void *p);
5758 #endif /* CONFIG_EMBEDDED */
5759
5760 kern_return_t
5761 vm_map_create_upl(
5762 vm_map_t map,
5763 vm_map_address_t offset,
5764 upl_size_t *upl_size,
5765 upl_t *upl,
5766 upl_page_info_array_t page_list,
5767 unsigned int *count,
5768 upl_control_flags_t *flags,
5769 vm_tag_t tag)
5770 {
5771 vm_map_entry_t entry;
5772 upl_control_flags_t caller_flags;
5773 int force_data_sync;
5774 int sync_cow_data;
5775 vm_object_t local_object;
5776 vm_map_offset_t local_offset;
5777 vm_map_offset_t local_start;
5778 kern_return_t ret;
5779
5780 assert(page_aligned(offset));
5781
5782 caller_flags = *flags;
5783
5784 if (caller_flags & ~UPL_VALID_FLAGS) {
5785 /*
5786 * For forward compatibility's sake,
5787 * reject any unknown flag.
5788 */
5789 return KERN_INVALID_VALUE;
5790 }
5791 force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
5792 sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
5793
5794 if (upl == NULL) {
5795 return KERN_INVALID_ARGUMENT;
5796 }
5797
5798 REDISCOVER_ENTRY:
5799 vm_map_lock_read(map);
5800
5801 if (!vm_map_lookup_entry(map, offset, &entry)) {
5802 vm_map_unlock_read(map);
5803 return KERN_FAILURE;
5804 }
5805
5806 if ((entry->vme_end - offset) < *upl_size) {
5807 *upl_size = (upl_size_t) (entry->vme_end - offset);
5808 assert(*upl_size == entry->vme_end - offset);
5809 }
5810
5811 if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
5812 *flags = 0;
5813
5814 if (!entry->is_sub_map &&
5815 VME_OBJECT(entry) != VM_OBJECT_NULL) {
5816 if (VME_OBJECT(entry)->private) {
5817 *flags = UPL_DEV_MEMORY;
5818 }
5819
5820 if (VME_OBJECT(entry)->phys_contiguous) {
5821 *flags |= UPL_PHYS_CONTIG;
5822 }
5823 }
5824 vm_map_unlock_read(map);
5825 return KERN_SUCCESS;
5826 }
5827
5828 if (VME_OBJECT(entry) == VM_OBJECT_NULL ||
5829 !VME_OBJECT(entry)->phys_contiguous) {
5830 if (*upl_size > MAX_UPL_SIZE_BYTES) {
5831 *upl_size = MAX_UPL_SIZE_BYTES;
5832 }
5833 }
5834
5835 /*
5836 * Create an object if necessary.
5837 */
5838 if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
5839 if (vm_map_lock_read_to_write(map)) {
5840 goto REDISCOVER_ENTRY;
5841 }
5842
5843 VME_OBJECT_SET(entry,
5844 vm_object_allocate((vm_size_t)
5845 (entry->vme_end -
5846 entry->vme_start)));
5847 VME_OFFSET_SET(entry, 0);
5848 assert(entry->use_pmap);
5849
5850 vm_map_lock_write_to_read(map);
5851 }
5852
5853 if (!(caller_flags & UPL_COPYOUT_FROM) &&
5854 !entry->is_sub_map &&
5855 !(entry->protection & VM_PROT_WRITE)) {
5856 vm_map_unlock_read(map);
5857 return KERN_PROTECTION_FAILURE;
5858 }
5859
5860 #if CONFIG_EMBEDDED
5861 if (map->pmap != kernel_pmap &&
5862 (caller_flags & UPL_COPYOUT_FROM) &&
5863 (entry->protection & VM_PROT_EXECUTE) &&
5864 !(entry->protection & VM_PROT_WRITE)) {
5865 vm_offset_t kaddr;
5866 vm_size_t ksize;
5867
5868 /*
5869 * We're about to create a read-only UPL backed by
5870 * memory from an executable mapping.
5871 * Wiring the pages would result in the pages being copied
5872 * (due to the "MAP_PRIVATE" mapping) and no longer
5873 * code-signed, so no longer eligible for execution.
5874 * Instead, let's copy the data into a kernel buffer and
5875 * create the UPL from this kernel buffer.
5876 * The kernel buffer is then freed, leaving the UPL holding
5877 * the last reference on the VM object, so the memory will
5878 * be released when the UPL is committed.
5879 */
5880
5881 vm_map_unlock_read(map);
5882 /* allocate kernel buffer */
5883 ksize = round_page(*upl_size);
5884 kaddr = 0;
5885 ret = kmem_alloc_pageable(kernel_map,
5886 &kaddr,
5887 ksize,
5888 tag);
5889 if (ret == KERN_SUCCESS) {
5890 /* copyin the user data */
5891 assert(page_aligned(offset));
5892 ret = copyinmap(map, offset, (void *)kaddr, *upl_size);
5893 }
5894 if (ret == KERN_SUCCESS) {
5895 if (ksize > *upl_size) {
5896 /* zero out the extra space in kernel buffer */
5897 memset((void *)(kaddr + *upl_size),
5898 0,
5899 ksize - *upl_size);
5900 }
5901 /* create the UPL from the kernel buffer */
5902 ret = vm_map_create_upl(kernel_map, kaddr, upl_size,
5903 upl, page_list, count, flags, tag);
5904 }
5905 if (kaddr != 0) {
5906 /* free the kernel buffer */
5907 kmem_free(kernel_map, kaddr, ksize);
5908 kaddr = 0;
5909 ksize = 0;
5910 }
5911 #if DEVELOPMENT || DEBUG
5912 DTRACE_VM4(create_upl_from_executable,
5913 vm_map_t, map,
5914 vm_map_address_t, offset,
5915 upl_size_t, *upl_size,
5916 kern_return_t, ret);
5917 #endif /* DEVELOPMENT || DEBUG */
5918 return ret;
5919 }
5920 #endif /* CONFIG_EMBEDDED */
5921
5922 local_object = VME_OBJECT(entry);
5923 assert(local_object != VM_OBJECT_NULL);
5924
5925 if (!entry->is_sub_map &&
5926 !entry->needs_copy &&
5927 *upl_size != 0 &&
5928 local_object->vo_size > *upl_size && /* partial UPL */
5929 entry->wired_count == 0 && /* No COW for entries that are wired */
5930 (map->pmap != kernel_pmap) && /* alias checks */
5931 (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */
5932 ||
5933 ( /* case 2 */
5934 local_object->internal &&
5935 (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) &&
5936 local_object->ref_count > 1))) {
5937 vm_prot_t prot;
5938
5939 /*
5940 * Case 1:
5941 * Set up the targeted range for copy-on-write to avoid
5942 * applying true_share/copy_delay to the entire object.
5943 *
5944 * Case 2:
5945 * This map entry covers only part of an internal
5946 * object. There could be other map entries covering
5947 * other areas of this object and some of these map
5948 * entries could be marked as "needs_copy", which
5949 * assumes that the object is COPY_SYMMETRIC.
5950 * To avoid marking this object as COPY_DELAY and
5951 * "true_share", let's shadow it and mark the new
5952 * (smaller) object as "true_share" and COPY_DELAY.
5953 */
5954
5955 if (vm_map_lock_read_to_write(map)) {
5956 goto REDISCOVER_ENTRY;
5957 }
5958 vm_map_lock_assert_exclusive(map);
5959 assert(VME_OBJECT(entry) == local_object);
5960
5961 vm_map_clip_start(map,
5962 entry,
5963 vm_map_trunc_page(offset,
5964 VM_MAP_PAGE_MASK(map)));
5965 vm_map_clip_end(map,
5966 entry,
5967 vm_map_round_page(offset + *upl_size,
5968 VM_MAP_PAGE_MASK(map)));
5969 if ((entry->vme_end - offset) < *upl_size) {
5970 *upl_size = (upl_size_t) (entry->vme_end - offset);
5971 assert(*upl_size == entry->vme_end - offset);
5972 }
5973
5974 prot = entry->protection & ~VM_PROT_WRITE;
5975 if (override_nx(map, VME_ALIAS(entry)) && prot) {
5976 prot |= VM_PROT_EXECUTE;
5977 }
5978 vm_object_pmap_protect(local_object,
5979 VME_OFFSET(entry),
5980 entry->vme_end - entry->vme_start,
5981 ((entry->is_shared ||
5982 map->mapped_in_other_pmaps)
5983 ? PMAP_NULL
5984 : map->pmap),
5985 entry->vme_start,
5986 prot);
5987
5988 assert(entry->wired_count == 0);
5989
5990 /*
5991 * Lock the VM object and re-check its status: if it's mapped
5992 * in another address space, we could still be racing with
5993 * another thread holding that other VM map exclusively.
5994 */
5995 vm_object_lock(local_object);
5996 if (local_object->true_share) {
5997 /* object is already in proper state: no COW needed */
5998 assert(local_object->copy_strategy !=
5999 MEMORY_OBJECT_COPY_SYMMETRIC);
6000 } else {
6001 /* not true_share: ask for copy-on-write below */
6002 assert(local_object->copy_strategy ==
6003 MEMORY_OBJECT_COPY_SYMMETRIC);
6004 entry->needs_copy = TRUE;
6005 }
6006 vm_object_unlock(local_object);
6007
6008 vm_map_lock_write_to_read(map);
6009 }
6010
6011 if (entry->needs_copy) {
6012 /*
6013 * Honor copy-on-write for COPY_SYMMETRIC
6014 * strategy.
6015 */
6016 vm_map_t local_map;
6017 vm_object_t object;
6018 vm_object_offset_t new_offset;
6019 vm_prot_t prot;
6020 boolean_t wired;
6021 vm_map_version_t version;
6022 vm_map_t real_map;
6023 vm_prot_t fault_type;
6024
6025 local_map = map;
6026
6027 if (caller_flags & UPL_COPYOUT_FROM) {
6028 fault_type = VM_PROT_READ | VM_PROT_COPY;
6029 vm_counters.create_upl_extra_cow++;
6030 vm_counters.create_upl_extra_cow_pages +=
6031 (entry->vme_end - entry->vme_start) / PAGE_SIZE;
6032 } else {
6033 fault_type = VM_PROT_WRITE;
6034 }
6035 if (vm_map_lookup_locked(&local_map,
6036 offset, fault_type,
6037 OBJECT_LOCK_EXCLUSIVE,
6038 &version, &object,
6039 &new_offset, &prot, &wired,
6040 NULL,
6041 &real_map) != KERN_SUCCESS) {
6042 if (fault_type == VM_PROT_WRITE) {
6043 vm_counters.create_upl_lookup_failure_write++;
6044 } else {
6045 vm_counters.create_upl_lookup_failure_copy++;
6046 }
6047 vm_map_unlock_read(local_map);
6048 return KERN_FAILURE;
6049 }
6050 if (real_map != map) {
6051 vm_map_unlock(real_map);
6052 }
6053 vm_map_unlock_read(local_map);
6054
6055 vm_object_unlock(object);
6056
6057 goto REDISCOVER_ENTRY;
6058 }
6059
6060 if (entry->is_sub_map) {
6061 vm_map_t submap;
6062
6063 submap = VME_SUBMAP(entry);
6064 local_start = entry->vme_start;
6065 local_offset = VME_OFFSET(entry);
6066
6067 vm_map_reference(submap);
6068 vm_map_unlock_read(map);
6069
6070 ret = vm_map_create_upl(submap,
6071 local_offset + (offset - local_start),
6072 upl_size, upl, page_list, count, flags, tag);
6073 vm_map_deallocate(submap);
6074
6075 return ret;
6076 }
6077
6078 if (sync_cow_data &&
6079 (VME_OBJECT(entry)->shadow ||
6080 VME_OBJECT(entry)->copy)) {
6081 local_object = VME_OBJECT(entry);
6082 local_start = entry->vme_start;
6083 local_offset = VME_OFFSET(entry);
6084
6085 vm_object_reference(local_object);
6086 vm_map_unlock_read(map);
6087
6088 if (local_object->shadow && local_object->copy) {
6089 vm_object_lock_request(local_object->shadow,
6090 ((vm_object_offset_t)
6091 ((offset - local_start) +
6092 local_offset) +
6093 local_object->vo_shadow_offset),
6094 *upl_size, FALSE,
6095 MEMORY_OBJECT_DATA_SYNC,
6096 VM_PROT_NO_CHANGE);
6097 }
6098 sync_cow_data = FALSE;
6099 vm_object_deallocate(local_object);
6100
6101 goto REDISCOVER_ENTRY;
6102 }
6103 if (force_data_sync) {
6104 local_object = VME_OBJECT(entry);
6105 local_start = entry->vme_start;
6106 local_offset = VME_OFFSET(entry);
6107
6108 vm_object_reference(local_object);
6109 vm_map_unlock_read(map);
6110
6111 vm_object_lock_request(local_object,
6112 ((vm_object_offset_t)
6113 ((offset - local_start) +
6114 local_offset)),
6115 (vm_object_size_t)*upl_size,
6116 FALSE,
6117 MEMORY_OBJECT_DATA_SYNC,
6118 VM_PROT_NO_CHANGE);
6119
6120 force_data_sync = FALSE;
6121 vm_object_deallocate(local_object);
6122
6123 goto REDISCOVER_ENTRY;
6124 }
6125 if (VME_OBJECT(entry)->private) {
6126 *flags = UPL_DEV_MEMORY;
6127 } else {
6128 *flags = 0;
6129 }
6130
6131 if (VME_OBJECT(entry)->phys_contiguous) {
6132 *flags |= UPL_PHYS_CONTIG;
6133 }
6134
6135 local_object = VME_OBJECT(entry);
6136 local_offset = VME_OFFSET(entry);
6137 local_start = entry->vme_start;
6138
6139 #if CONFIG_EMBEDDED
6140 /*
6141 * Wiring will copy the pages to the shadow object.
6142 * The shadow object will not be code-signed so
6143 * attempting to execute code from these copied pages
6144 * would trigger a code-signing violation.
6145 */
6146 if (entry->protection & VM_PROT_EXECUTE) {
6147 #if MACH_ASSERT
6148 printf("pid %d[%s] create_upl out of executable range from "
6149 "0x%llx to 0x%llx: side effects may include "
6150 "code-signing violations later on\n",
6151 proc_selfpid(),
6152 (current_task()->bsd_info
6153 ? proc_name_address(current_task()->bsd_info)
6154 : "?"),
6155 (uint64_t) entry->vme_start,
6156 (uint64_t) entry->vme_end);
6157 #endif /* MACH_ASSERT */
6158 DTRACE_VM2(cs_executable_create_upl,
6159 uint64_t, (uint64_t)entry->vme_start,
6160 uint64_t, (uint64_t)entry->vme_end);
6161 cs_executable_create_upl++;
6162 }
6163 #endif /* CONFIG_EMBEDDED */
6164
6165 vm_object_lock(local_object);
6166
6167 /*
6168 * Ensure that this object is "true_share" and "copy_delay" now,
6169 * while we're still holding the VM map lock. After we unlock the map,
6170 * anything could happen to that mapping, including some copy-on-write
6171 * activity. We need to make sure that the IOPL will point at the
6172 * same memory as the mapping.
6173 */
6174 if (local_object->true_share) {
6175 assert(local_object->copy_strategy !=
6176 MEMORY_OBJECT_COPY_SYMMETRIC);
6177 } else if (local_object != kernel_object &&
6178 local_object != compressor_object &&
6179 !local_object->phys_contiguous) {
6180 #if VM_OBJECT_TRACKING_OP_TRUESHARE
6181 if (!local_object->true_share &&
6182 vm_object_tracking_inited) {
6183 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
6184 int num = 0;
6185 num = OSBacktrace(bt,
6186 VM_OBJECT_TRACKING_BTDEPTH);
6187 btlog_add_entry(vm_object_tracking_btlog,
6188 local_object,
6189 VM_OBJECT_TRACKING_OP_TRUESHARE,
6190 bt,
6191 num);
6192 }
6193 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
6194 local_object->true_share = TRUE;
6195 if (local_object->copy_strategy ==
6196 MEMORY_OBJECT_COPY_SYMMETRIC) {
6197 local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
6198 }
6199 }
6200
6201 vm_object_reference_locked(local_object);
6202 vm_object_unlock(local_object);
6203
6204 vm_map_unlock_read(map);
6205
6206 ret = vm_object_iopl_request(local_object,
6207 ((vm_object_offset_t)
6208 ((offset - local_start) + local_offset)),
6209 *upl_size,
6210 upl,
6211 page_list,
6212 count,
6213 caller_flags,
6214 tag);
6215 vm_object_deallocate(local_object);
6216
6217 return ret;
6218 }
6219
6220 /*
6221 * Internal routine to enter a UPL into a VM map.
6222 *
6223 * JMM - This should just be doable through the standard
6224 * vm_map_enter() API.
6225 */
6226 kern_return_t
6227 vm_map_enter_upl(
6228 vm_map_t map,
6229 upl_t upl,
6230 vm_map_offset_t *dst_addr)
6231 {
6232 vm_map_size_t size;
6233 vm_object_offset_t offset;
6234 vm_map_offset_t addr;
6235 vm_page_t m;
6236 kern_return_t kr;
6237 int isVectorUPL = 0, curr_upl = 0;
6238 upl_t vector_upl = NULL;
6239 vm_offset_t vector_upl_dst_addr = 0;
6240 vm_map_t vector_upl_submap = NULL;
6241 upl_offset_t subupl_offset = 0;
6242 upl_size_t subupl_size = 0;
6243
6244 if (upl == UPL_NULL) {
6245 return KERN_INVALID_ARGUMENT;
6246 }
6247
6248 if ((isVectorUPL = vector_upl_is_valid(upl))) {
6249 int mapped = 0, valid_upls = 0;
6250 vector_upl = upl;
6251
6252 upl_lock(vector_upl);
6253 for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
6254 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
6255 if (upl == NULL) {
6256 continue;
6257 }
6258 valid_upls++;
6259 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
6260 mapped++;
6261 }
6262 }
6263
6264 if (mapped) {
6265 if (mapped != valid_upls) {
6266 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
6267 } else {
6268 upl_unlock(vector_upl);
6269 return KERN_FAILURE;
6270 }
6271 }
6272
6273 kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE,
6274 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE,
6275 &vector_upl_submap);
6276 if (kr != KERN_SUCCESS) {
6277 panic("Vector UPL submap allocation failed\n");
6278 }
6279 map = vector_upl_submap;
6280 vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
6281 curr_upl = 0;
6282 } else {
6283 upl_lock(upl);
6284 }
6285
6286 process_upl_to_enter:
6287 if (isVectorUPL) {
6288 if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
6289 *dst_addr = vector_upl_dst_addr;
6290 upl_unlock(vector_upl);
6291 return KERN_SUCCESS;
6292 }
6293 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
6294 if (upl == NULL) {
6295 goto process_upl_to_enter;
6296 }
6297
6298 vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
6299 *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
6300 } else {
6301 /*
6302 * check to see if already mapped
6303 */
6304 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
6305 upl_unlock(upl);
6306 return KERN_FAILURE;
6307 }
6308 }
6309 if ((!(upl->flags & UPL_SHADOWED)) &&
6310 ((upl->flags & UPL_HAS_BUSY) ||
6311 !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
6312 vm_object_t object;
6313 vm_page_t alias_page;
6314 vm_object_offset_t new_offset;
6315 unsigned int pg_num;
6316 wpl_array_t lite_list;
6317
6318 if (upl->flags & UPL_INTERNAL) {
6319 lite_list = (wpl_array_t)
6320 ((((uintptr_t)upl) + sizeof(struct upl))
6321 + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t)));
6322 } else {
6323 lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
6324 }
6325 object = upl->map_object;
6326 upl->map_object = vm_object_allocate(upl->size);
6327
6328 vm_object_lock(upl->map_object);
6329
6330 upl->map_object->shadow = object;
6331 upl->map_object->pageout = TRUE;
6332 upl->map_object->can_persist = FALSE;
6333 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
6334 upl->map_object->vo_shadow_offset = upl->offset - object->paging_offset;
6335 upl->map_object->wimg_bits = object->wimg_bits;
6336 offset = upl->map_object->vo_shadow_offset;
6337 new_offset = 0;
6338 size = upl->size;
6339
6340 upl->flags |= UPL_SHADOWED;
6341
6342 while (size) {
6343 pg_num = (unsigned int) (new_offset / PAGE_SIZE);
6344 assert(pg_num == new_offset / PAGE_SIZE);
6345
6346 if (lite_list[pg_num >> 5] & (1 << (pg_num & 31))) {
6347 VM_PAGE_GRAB_FICTITIOUS(alias_page);
6348
6349 vm_object_lock(object);
6350
6351 m = vm_page_lookup(object, offset);
6352 if (m == VM_PAGE_NULL) {
6353 panic("vm_upl_map: page missing\n");
6354 }
6355
6356 /*
6357 * Convert the fictitious page to a private
6358 * shadow of the real page.
6359 */
6360 assert(alias_page->vmp_fictitious);
6361 alias_page->vmp_fictitious = FALSE;
6362 alias_page->vmp_private = TRUE;
6363 alias_page->vmp_free_when_done = TRUE;
6364 /*
6365 * since m is a page in the upl it must
6366 * already be wired or BUSY, so it's
6367 * safe to assign the underlying physical
6368 * page to the alias
6369 */
6370 VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m));
6371
6372 vm_object_unlock(object);
6373
6374 vm_page_lockspin_queues();
6375 vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE);
6376 vm_page_unlock_queues();
6377
6378 vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE);
6379
6380 assert(!alias_page->vmp_wanted);
6381 alias_page->vmp_busy = FALSE;
6382 alias_page->vmp_absent = FALSE;
6383 }
6384 size -= PAGE_SIZE;
6385 offset += PAGE_SIZE_64;
6386 new_offset += PAGE_SIZE_64;
6387 }
6388 vm_object_unlock(upl->map_object);
6389 }
6390 if (upl->flags & UPL_SHADOWED) {
6391 offset = 0;
6392 } else {
6393 offset = upl->offset - upl->map_object->paging_offset;
6394 }
6395
6396 size = upl->size;
6397
6398 vm_object_reference(upl->map_object);
6399
6400 if (!isVectorUPL) {
6401 *dst_addr = 0;
6402 /*
6403 * NEED A UPL_MAP ALIAS
6404 */
6405 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
6406 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
6407 upl->map_object, offset, FALSE,
6408 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
6409
6410 if (kr != KERN_SUCCESS) {
6411 vm_object_deallocate(upl->map_object);
6412 upl_unlock(upl);
6413 return kr;
6414 }
6415 } else {
6416 kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
6417 VM_FLAGS_FIXED, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK,
6418 upl->map_object, offset, FALSE,
6419 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
6420 if (kr) {
6421 panic("vm_map_enter failed for a Vector UPL\n");
6422 }
6423 }
6424 vm_object_lock(upl->map_object);
6425
6426 for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
6427 m = vm_page_lookup(upl->map_object, offset);
6428
6429 if (m) {
6430 m->vmp_pmapped = TRUE;
6431
6432 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
6433 * but only in kernel space. If this was on a user map,
6434 * we'd have to set the wpmapped bit. */
6435 /* m->vmp_wpmapped = TRUE; */
6436 assert(map->pmap == kernel_pmap);
6437
6438 PMAP_ENTER(map->pmap, addr, m, VM_PROT_DEFAULT, VM_PROT_NONE, 0, TRUE, kr);
6439
6440 assert(kr == KERN_SUCCESS);
6441 #if KASAN
6442 kasan_notify_address(addr, PAGE_SIZE_64);
6443 #endif
6444 }
6445 offset += PAGE_SIZE_64;
6446 }
6447 vm_object_unlock(upl->map_object);
6448
6449 /*
6450 * hold a reference for the mapping
6451 */
6452 upl->ref_count++;
6453 upl->flags |= UPL_PAGE_LIST_MAPPED;
6454 upl->kaddr = (vm_offset_t) *dst_addr;
6455 assert(upl->kaddr == *dst_addr);
6456
6457 if (isVectorUPL) {
6458 goto process_upl_to_enter;
6459 }
6460
6461 upl_unlock(upl);
6462
6463 return KERN_SUCCESS;
6464 }
6465
6466 /*
6467 * Internal routine to remove a UPL mapping from a VM map.
6468 *
6469 * XXX - This should just be doable through a standard
6470 * vm_map_remove() operation. Otherwise, implicit clean-up
6471 * of the target map won't be able to correctly remove
6472 * these (and release the reference on the UPL). Having
6473 * to do this means we can't map these into user-space
6474 * maps yet.
6475 */
6476 kern_return_t
6477 vm_map_remove_upl(
6478 vm_map_t map,
6479 upl_t upl)
6480 {
6481 vm_address_t addr;
6482 upl_size_t size;
6483 int isVectorUPL = 0, curr_upl = 0;
6484 upl_t vector_upl = NULL;
6485
6486 if (upl == UPL_NULL) {
6487 return KERN_INVALID_ARGUMENT;
6488 }
6489
6490 if ((isVectorUPL = vector_upl_is_valid(upl))) {
6491 int unmapped = 0, valid_upls = 0;
6492 vector_upl = upl;
6493 upl_lock(vector_upl);
6494 for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
6495 upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
6496 if (upl == NULL) {
6497 continue;
6498 }
6499 valid_upls++;
6500 if (!(UPL_PAGE_LIST_MAPPED & upl->flags)) {
6501 unmapped++;
6502 }
6503 }
6504
6505 if (unmapped) {
6506 if (unmapped != valid_upls) {
6507 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
6508 } else {
6509 upl_unlock(vector_upl);
6510 return KERN_FAILURE;
6511 }
6512 }
6513 curr_upl = 0;
6514 } else {
6515 upl_lock(upl);
6516 }
6517
6518 process_upl_to_remove:
6519 if (isVectorUPL) {
6520 if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
6521 vm_map_t v_upl_submap;
6522 vm_offset_t v_upl_submap_dst_addr;
6523 vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
6524
6525 vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_REMOVE_NO_FLAGS);
6526 vm_map_deallocate(v_upl_submap);
6527 upl_unlock(vector_upl);
6528 return KERN_SUCCESS;
6529 }
6530
6531 upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
6532 if (upl == NULL) {
6533 goto process_upl_to_remove;
6534 }
6535 }
6536
6537 if (upl->flags & UPL_PAGE_LIST_MAPPED) {
6538 addr = upl->kaddr;
6539 size = upl->size;
6540
6541 assert(upl->ref_count > 1);
6542 upl->ref_count--; /* removing mapping ref */
6543
6544 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
6545 upl->kaddr = (vm_offset_t) 0;
6546
6547 if (!isVectorUPL) {
6548 upl_unlock(upl);
6549
6550 vm_map_remove(
6551 map,
6552 vm_map_trunc_page(addr,
6553 VM_MAP_PAGE_MASK(map)),
6554 vm_map_round_page(addr + size,
6555 VM_MAP_PAGE_MASK(map)),
6556 VM_MAP_REMOVE_NO_FLAGS);
6557 return KERN_SUCCESS;
6558 } else {
6559 /*
6560 * If it's a Vectored UPL, we'll be removing the entire
6561 * submap anyways, so no need to remove individual UPL
6562 * element mappings from within the submap
6563 */
6564 goto process_upl_to_remove;
6565 }
6566 }
6567 upl_unlock(upl);
6568
6569 return KERN_FAILURE;
6570 }
6571
6572
6573 kern_return_t
6574 upl_commit_range(
6575 upl_t upl,
6576 upl_offset_t offset,
6577 upl_size_t size,
6578 int flags,
6579 upl_page_info_t *page_list,
6580 mach_msg_type_number_t count,
6581 boolean_t *empty)
6582 {
6583 upl_size_t xfer_size, subupl_size = size;
6584 vm_object_t shadow_object;
6585 vm_object_t object;
6586 vm_object_t m_object;
6587 vm_object_offset_t target_offset;
6588 upl_offset_t subupl_offset = offset;
6589 int entry;
6590 wpl_array_t lite_list;
6591 int occupied;
6592 int clear_refmod = 0;
6593 int pgpgout_count = 0;
6594 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
6595 struct vm_page_delayed_work *dwp;
6596 int dw_count;
6597 int dw_limit;
6598 int isVectorUPL = 0;
6599 upl_t vector_upl = NULL;
6600 boolean_t should_be_throttled = FALSE;
6601
6602 vm_page_t nxt_page = VM_PAGE_NULL;
6603 int fast_path_possible = 0;
6604 int fast_path_full_commit = 0;
6605 int throttle_page = 0;
6606 int unwired_count = 0;
6607 int local_queue_count = 0;
6608 vm_page_t first_local, last_local;
6609
6610 *empty = FALSE;
6611
6612 if (upl == UPL_NULL) {
6613 return KERN_INVALID_ARGUMENT;
6614 }
6615
6616 if (count == 0) {
6617 page_list = NULL;
6618 }
6619
6620 if ((isVectorUPL = vector_upl_is_valid(upl))) {
6621 vector_upl = upl;
6622 upl_lock(vector_upl);
6623 } else {
6624 upl_lock(upl);
6625 }
6626
6627 process_upl_to_commit:
6628
6629 if (isVectorUPL) {
6630 size = subupl_size;
6631 offset = subupl_offset;
6632 if (size == 0) {
6633 upl_unlock(vector_upl);
6634 return KERN_SUCCESS;
6635 }
6636 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
6637 if (upl == NULL) {
6638 upl_unlock(vector_upl);
6639 return KERN_FAILURE;
6640 }
6641 page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
6642 subupl_size -= size;
6643 subupl_offset += size;
6644 }
6645
6646 #if UPL_DEBUG
6647 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
6648 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
6649
6650 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
6651 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
6652
6653 upl->upl_commit_index++;
6654 }
6655 #endif
6656 if (upl->flags & UPL_DEVICE_MEMORY) {
6657 xfer_size = 0;
6658 } else if ((offset + size) <= upl->size) {
6659 xfer_size = size;
6660 } else {
6661 if (!isVectorUPL) {
6662 upl_unlock(upl);
6663 } else {
6664 upl_unlock(vector_upl);
6665 }
6666 return KERN_FAILURE;
6667 }
6668 if (upl->flags & UPL_SET_DIRTY) {
6669 flags |= UPL_COMMIT_SET_DIRTY;
6670 }
6671 if (upl->flags & UPL_CLEAR_DIRTY) {
6672 flags |= UPL_COMMIT_CLEAR_DIRTY;
6673 }
6674
6675 if (upl->flags & UPL_INTERNAL) {
6676 lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
6677 + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t)));
6678 } else {
6679 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
6680 }
6681
6682 object = upl->map_object;
6683
6684 if (upl->flags & UPL_SHADOWED) {
6685 vm_object_lock(object);
6686 shadow_object = object->shadow;
6687 } else {
6688 shadow_object = object;
6689 }
6690 entry = offset / PAGE_SIZE;
6691 target_offset = (vm_object_offset_t)offset;
6692
6693 assert(!(target_offset & PAGE_MASK));
6694 assert(!(xfer_size & PAGE_MASK));
6695
6696 if (upl->flags & UPL_KERNEL_OBJECT) {
6697 vm_object_lock_shared(shadow_object);
6698 } else {
6699 vm_object_lock(shadow_object);
6700 }
6701
6702 VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object);
6703
6704 if (upl->flags & UPL_ACCESS_BLOCKED) {
6705 assert(shadow_object->blocked_access);
6706 shadow_object->blocked_access = FALSE;
6707 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
6708 }
6709
6710 if (shadow_object->code_signed) {
6711 /*
6712 * CODE SIGNING:
6713 * If the object is code-signed, do not let this UPL tell
6714 * us if the pages are valid or not. Let the pages be
6715 * validated by VM the normal way (when they get mapped or
6716 * copied).
6717 */
6718 flags &= ~UPL_COMMIT_CS_VALIDATED;
6719 }
6720 if (!page_list) {
6721 /*
6722 * No page list to get the code-signing info from !?
6723 */
6724 flags &= ~UPL_COMMIT_CS_VALIDATED;
6725 }
6726 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) {
6727 should_be_throttled = TRUE;
6728 }
6729
6730 dwp = &dw_array[0];
6731 dw_count = 0;
6732 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
6733
6734 if ((upl->flags & UPL_IO_WIRE) &&
6735 !(flags & UPL_COMMIT_FREE_ABSENT) &&
6736 !isVectorUPL &&
6737 shadow_object->purgable != VM_PURGABLE_VOLATILE &&
6738 shadow_object->purgable != VM_PURGABLE_EMPTY) {
6739 if (!vm_page_queue_empty(&shadow_object->memq)) {
6740 if (size == shadow_object->vo_size) {
6741 nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
6742 fast_path_full_commit = 1;
6743 }
6744 fast_path_possible = 1;
6745
6746 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
6747 (shadow_object->purgable == VM_PURGABLE_DENY ||
6748 shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
6749 shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
6750 throttle_page = 1;
6751 }
6752 }
6753 }
6754 first_local = VM_PAGE_NULL;
6755 last_local = VM_PAGE_NULL;
6756
6757 while (xfer_size) {
6758 vm_page_t t, m;
6759
6760 dwp->dw_mask = 0;
6761 clear_refmod = 0;
6762
6763 m = VM_PAGE_NULL;
6764
6765 if (upl->flags & UPL_LITE) {
6766 unsigned int pg_num;
6767
6768 if (nxt_page != VM_PAGE_NULL) {
6769 m = nxt_page;
6770 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
6771 target_offset = m->vmp_offset;
6772 }
6773 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
6774 assert(pg_num == target_offset / PAGE_SIZE);
6775
6776 if (lite_list[pg_num >> 5] & (1 << (pg_num & 31))) {
6777 lite_list[pg_num >> 5] &= ~(1 << (pg_num & 31));
6778
6779 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
6780 m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
6781 }
6782 } else {
6783 m = NULL;
6784 }
6785 }
6786 if (upl->flags & UPL_SHADOWED) {
6787 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
6788 t->vmp_free_when_done = FALSE;
6789
6790 VM_PAGE_FREE(t);
6791
6792 if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) {
6793 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
6794 }
6795 }
6796 }
6797 if (m == VM_PAGE_NULL) {
6798 goto commit_next_page;
6799 }
6800
6801 m_object = VM_PAGE_OBJECT(m);
6802
6803 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6804 assert(m->vmp_busy);
6805
6806 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
6807 goto commit_next_page;
6808 }
6809
6810 if (flags & UPL_COMMIT_CS_VALIDATED) {
6811 /*
6812 * CODE SIGNING:
6813 * Set the code signing bits according to
6814 * what the UPL says they should be.
6815 */
6816 m->vmp_cs_validated = page_list[entry].cs_validated;
6817 m->vmp_cs_tainted = page_list[entry].cs_tainted;
6818 m->vmp_cs_nx = page_list[entry].cs_nx;
6819 }
6820 if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) {
6821 m->vmp_written_by_kernel = TRUE;
6822 }
6823
6824 if (upl->flags & UPL_IO_WIRE) {
6825 if (page_list) {
6826 page_list[entry].phys_addr = 0;
6827 }
6828
6829 if (flags & UPL_COMMIT_SET_DIRTY) {
6830 SET_PAGE_DIRTY(m, FALSE);
6831 } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
6832 m->vmp_dirty = FALSE;
6833
6834 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
6835 m->vmp_cs_validated && !m->vmp_cs_tainted) {
6836 /*
6837 * CODE SIGNING:
6838 * This page is no longer dirty
6839 * but could have been modified,
6840 * so it will need to be
6841 * re-validated.
6842 */
6843 m->vmp_cs_validated = FALSE;
6844
6845 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
6846
6847 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6848 }
6849 clear_refmod |= VM_MEM_MODIFIED;
6850 }
6851 if (upl->flags & UPL_ACCESS_BLOCKED) {
6852 /*
6853 * We blocked access to the pages in this UPL.
6854 * Clear the "busy" bit and wake up any waiter
6855 * for this page.
6856 */
6857 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
6858 }
6859 if (fast_path_possible) {
6860 assert(m_object->purgable != VM_PURGABLE_EMPTY);
6861 assert(m_object->purgable != VM_PURGABLE_VOLATILE);
6862 if (m->vmp_absent) {
6863 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
6864 assert(m->vmp_wire_count == 0);
6865 assert(m->vmp_busy);
6866
6867 m->vmp_absent = FALSE;
6868 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
6869 } else {
6870 if (m->vmp_wire_count == 0) {
6871 panic("wire_count == 0, m = %p, obj = %p\n", m, shadow_object);
6872 }
6873 assert(m->vmp_q_state == VM_PAGE_IS_WIRED);
6874
6875 /*
6876 * XXX FBDP need to update some other
6877 * counters here (purgeable_wired_count)
6878 * (ledgers), ...
6879 */
6880 assert(m->vmp_wire_count > 0);
6881 m->vmp_wire_count--;
6882
6883 if (m->vmp_wire_count == 0) {
6884 m->vmp_q_state = VM_PAGE_NOT_ON_Q;
6885 unwired_count++;
6886 }
6887 }
6888 if (m->vmp_wire_count == 0) {
6889 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6890
6891 if (last_local == VM_PAGE_NULL) {
6892 assert(first_local == VM_PAGE_NULL);
6893
6894 last_local = m;
6895 first_local = m;
6896 } else {
6897 assert(first_local != VM_PAGE_NULL);
6898
6899 m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
6900 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
6901 first_local = m;
6902 }
6903 local_queue_count++;
6904
6905 if (throttle_page) {
6906 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
6907 } else {
6908 if (flags & UPL_COMMIT_INACTIVATE) {
6909 if (shadow_object->internal) {
6910 m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
6911 } else {
6912 m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
6913 }
6914 } else {
6915 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
6916 }
6917 }
6918 }
6919 } else {
6920 if (flags & UPL_COMMIT_INACTIVATE) {
6921 dwp->dw_mask |= DW_vm_page_deactivate_internal;
6922 clear_refmod |= VM_MEM_REFERENCED;
6923 }
6924 if (m->vmp_absent) {
6925 if (flags & UPL_COMMIT_FREE_ABSENT) {
6926 dwp->dw_mask |= DW_vm_page_free;
6927 } else {
6928 m->vmp_absent = FALSE;
6929 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
6930
6931 if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) {
6932 dwp->dw_mask |= DW_vm_page_activate;
6933 }
6934 }
6935 } else {
6936 dwp->dw_mask |= DW_vm_page_unwire;
6937 }
6938 }
6939 goto commit_next_page;
6940 }
6941 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6942
6943 if (page_list) {
6944 page_list[entry].phys_addr = 0;
6945 }
6946
6947 /*
6948 * make sure to clear the hardware
6949 * modify or reference bits before
6950 * releasing the BUSY bit on this page
6951 * otherwise we risk losing a legitimate
6952 * change of state
6953 */
6954 if (flags & UPL_COMMIT_CLEAR_DIRTY) {
6955 m->vmp_dirty = FALSE;
6956
6957 clear_refmod |= VM_MEM_MODIFIED;
6958 }
6959 if (m->vmp_laundry) {
6960 dwp->dw_mask |= DW_vm_pageout_throttle_up;
6961 }
6962
6963 if (VM_PAGE_WIRED(m)) {
6964 m->vmp_free_when_done = FALSE;
6965 }
6966
6967 if (!(flags & UPL_COMMIT_CS_VALIDATED) &&
6968 m->vmp_cs_validated && !m->vmp_cs_tainted) {
6969 /*
6970 * CODE SIGNING:
6971 * This page is no longer dirty
6972 * but could have been modified,
6973 * so it will need to be
6974 * re-validated.
6975 */
6976 m->vmp_cs_validated = FALSE;
6977
6978 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
6979
6980 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6981 }
6982 if (m->vmp_overwriting) {
6983 /*
6984 * the (COPY_OUT_FROM == FALSE) request_page_list case
6985 */
6986 if (m->vmp_busy) {
6987 #if CONFIG_PHANTOM_CACHE
6988 if (m->vmp_absent && !m_object->internal) {
6989 dwp->dw_mask |= DW_vm_phantom_cache_update;
6990 }
6991 #endif
6992 m->vmp_absent = FALSE;
6993
6994 dwp->dw_mask |= DW_clear_busy;
6995 } else {
6996 /*
6997 * alternate (COPY_OUT_FROM == FALSE) page_list case
6998 * Occurs when the original page was wired
6999 * at the time of the list request
7000 */
7001 assert(VM_PAGE_WIRED(m));
7002
7003 dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
7004 }
7005 m->vmp_overwriting = FALSE;
7006 }
7007 m->vmp_cleaning = FALSE;
7008
7009 if (m->vmp_free_when_done) {
7010 /*
7011 * With the clean queue enabled, UPL_PAGEOUT should
7012 * no longer set the pageout bit. It's pages now go
7013 * to the clean queue.
7014 */
7015 assert(!(flags & UPL_PAGEOUT));
7016 assert(!m_object->internal);
7017
7018 m->vmp_free_when_done = FALSE;
7019
7020 if ((flags & UPL_COMMIT_SET_DIRTY) ||
7021 (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
7022 /*
7023 * page was re-dirtied after we started
7024 * the pageout... reactivate it since
7025 * we don't know whether the on-disk
7026 * copy matches what is now in memory
7027 */
7028 SET_PAGE_DIRTY(m, FALSE);
7029
7030 dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP;
7031
7032 if (upl->flags & UPL_PAGEOUT) {
7033 VM_STAT_INCR(reactivations);
7034 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
7035 }
7036 } else {
7037 /*
7038 * page has been successfully cleaned
7039 * go ahead and free it for other use
7040 */
7041 if (m_object->internal) {
7042 DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
7043 } else {
7044 DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
7045 }
7046 m->vmp_dirty = FALSE;
7047 m->vmp_busy = TRUE;
7048
7049 dwp->dw_mask |= DW_vm_page_free;
7050 }
7051 goto commit_next_page;
7052 }
7053 /*
7054 * It is a part of the semantic of COPYOUT_FROM
7055 * UPLs that a commit implies cache sync
7056 * between the vm page and the backing store
7057 * this can be used to strip the precious bit
7058 * as well as clean
7059 */
7060 if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) {
7061 m->vmp_precious = FALSE;
7062 }
7063
7064 if (flags & UPL_COMMIT_SET_DIRTY) {
7065 SET_PAGE_DIRTY(m, FALSE);
7066 } else {
7067 m->vmp_dirty = FALSE;
7068 }
7069
7070 /* with the clean queue on, move *all* cleaned pages to the clean queue */
7071 if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) {
7072 pgpgout_count++;
7073
7074 VM_STAT_INCR(pageouts);
7075 DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
7076
7077 dwp->dw_mask |= DW_enqueue_cleaned;
7078 } else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
7079 /*
7080 * page coming back in from being 'frozen'...
7081 * it was dirty before it was frozen, so keep it so
7082 * the vm_page_activate will notice that it really belongs
7083 * on the throttle queue and put it there
7084 */
7085 SET_PAGE_DIRTY(m, FALSE);
7086 dwp->dw_mask |= DW_vm_page_activate;
7087 } else {
7088 if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
7089 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7090 clear_refmod |= VM_MEM_REFERENCED;
7091 } else if (!VM_PAGE_PAGEABLE(m)) {
7092 if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) {
7093 dwp->dw_mask |= DW_vm_page_speculate;
7094 } else if (m->vmp_reference) {
7095 dwp->dw_mask |= DW_vm_page_activate;
7096 } else {
7097 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7098 clear_refmod |= VM_MEM_REFERENCED;
7099 }
7100 }
7101 }
7102 if (upl->flags & UPL_ACCESS_BLOCKED) {
7103 /*
7104 * We blocked access to the pages in this URL.
7105 * Clear the "busy" bit on this page before we
7106 * wake up any waiter.
7107 */
7108 dwp->dw_mask |= DW_clear_busy;
7109 }
7110 /*
7111 * Wakeup any thread waiting for the page to be un-cleaning.
7112 */
7113 dwp->dw_mask |= DW_PAGE_WAKEUP;
7114
7115 commit_next_page:
7116 if (clear_refmod) {
7117 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
7118 }
7119
7120 target_offset += PAGE_SIZE_64;
7121 xfer_size -= PAGE_SIZE;
7122 entry++;
7123
7124 if (dwp->dw_mask) {
7125 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
7126 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
7127
7128 if (dw_count >= dw_limit) {
7129 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
7130
7131 dwp = &dw_array[0];
7132 dw_count = 0;
7133 }
7134 } else {
7135 if (dwp->dw_mask & DW_clear_busy) {
7136 m->vmp_busy = FALSE;
7137 }
7138
7139 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7140 PAGE_WAKEUP(m);
7141 }
7142 }
7143 }
7144 }
7145 if (dw_count) {
7146 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
7147 }
7148
7149 if (fast_path_possible) {
7150 assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
7151 assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
7152
7153 if (local_queue_count || unwired_count) {
7154 if (local_queue_count) {
7155 vm_page_t first_target;
7156 vm_page_queue_head_t *target_queue;
7157
7158 if (throttle_page) {
7159 target_queue = &vm_page_queue_throttled;
7160 } else {
7161 if (flags & UPL_COMMIT_INACTIVATE) {
7162 if (shadow_object->internal) {
7163 target_queue = &vm_page_queue_anonymous;
7164 } else {
7165 target_queue = &vm_page_queue_inactive;
7166 }
7167 } else {
7168 target_queue = &vm_page_queue_active;
7169 }
7170 }
7171 /*
7172 * Transfer the entire local queue to a regular LRU page queues.
7173 */
7174 vm_page_lockspin_queues();
7175
7176 first_target = (vm_page_t) vm_page_queue_first(target_queue);
7177
7178 if (vm_page_queue_empty(target_queue)) {
7179 target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
7180 } else {
7181 first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
7182 }
7183
7184 target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
7185 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
7186 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
7187
7188 /*
7189 * Adjust the global page counts.
7190 */
7191 if (throttle_page) {
7192 vm_page_throttled_count += local_queue_count;
7193 } else {
7194 if (flags & UPL_COMMIT_INACTIVATE) {
7195 if (shadow_object->internal) {
7196 vm_page_anonymous_count += local_queue_count;
7197 }
7198 vm_page_inactive_count += local_queue_count;
7199
7200 token_new_pagecount += local_queue_count;
7201 } else {
7202 vm_page_active_count += local_queue_count;
7203 }
7204
7205 if (shadow_object->internal) {
7206 vm_page_pageable_internal_count += local_queue_count;
7207 } else {
7208 vm_page_pageable_external_count += local_queue_count;
7209 }
7210 }
7211 } else {
7212 vm_page_lockspin_queues();
7213 }
7214 if (unwired_count) {
7215 vm_page_wire_count -= unwired_count;
7216 VM_CHECK_MEMORYSTATUS;
7217 }
7218 vm_page_unlock_queues();
7219
7220 VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count);
7221 }
7222 }
7223 occupied = 1;
7224
7225 if (upl->flags & UPL_DEVICE_MEMORY) {
7226 occupied = 0;
7227 } else if (upl->flags & UPL_LITE) {
7228 int pg_num;
7229 int i;
7230
7231 occupied = 0;
7232
7233 if (!fast_path_full_commit) {
7234 pg_num = upl->size / PAGE_SIZE;
7235 pg_num = (pg_num + 31) >> 5;
7236
7237 for (i = 0; i < pg_num; i++) {
7238 if (lite_list[i] != 0) {
7239 occupied = 1;
7240 break;
7241 }
7242 }
7243 }
7244 } else {
7245 if (vm_page_queue_empty(&upl->map_object->memq)) {
7246 occupied = 0;
7247 }
7248 }
7249 if (occupied == 0) {
7250 /*
7251 * If this UPL element belongs to a Vector UPL and is
7252 * empty, then this is the right function to deallocate
7253 * it. So go ahead set the *empty variable. The flag
7254 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
7255 * should be considered relevant for the Vector UPL and not
7256 * the internal UPLs.
7257 */
7258 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
7259 *empty = TRUE;
7260 }
7261
7262 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
7263 /*
7264 * this is not a paging object
7265 * so we need to drop the paging reference
7266 * that was taken when we created the UPL
7267 * against this object
7268 */
7269 vm_object_activity_end(shadow_object);
7270 vm_object_collapse(shadow_object, 0, TRUE);
7271 } else {
7272 /*
7273 * we dontated the paging reference to
7274 * the map object... vm_pageout_object_terminate
7275 * will drop this reference
7276 */
7277 }
7278 }
7279 VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag);
7280 vm_object_unlock(shadow_object);
7281 if (object != shadow_object) {
7282 vm_object_unlock(object);
7283 }
7284
7285 if (!isVectorUPL) {
7286 upl_unlock(upl);
7287 } else {
7288 /*
7289 * If we completed our operations on an UPL that is
7290 * part of a Vectored UPL and if empty is TRUE, then
7291 * we should go ahead and deallocate this UPL element.
7292 * Then we check if this was the last of the UPL elements
7293 * within that Vectored UPL. If so, set empty to TRUE
7294 * so that in ubc_upl_commit_range or ubc_upl_commit, we
7295 * can go ahead and deallocate the Vector UPL too.
7296 */
7297 if (*empty == TRUE) {
7298 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
7299 upl_deallocate(upl);
7300 }
7301 goto process_upl_to_commit;
7302 }
7303 if (pgpgout_count) {
7304 DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
7305 }
7306
7307 return KERN_SUCCESS;
7308 }
7309
7310 kern_return_t
7311 upl_abort_range(
7312 upl_t upl,
7313 upl_offset_t offset,
7314 upl_size_t size,
7315 int error,
7316 boolean_t *empty)
7317 {
7318 upl_page_info_t *user_page_list = NULL;
7319 upl_size_t xfer_size, subupl_size = size;
7320 vm_object_t shadow_object;
7321 vm_object_t object;
7322 vm_object_offset_t target_offset;
7323 upl_offset_t subupl_offset = offset;
7324 int entry;
7325 wpl_array_t lite_list;
7326 int occupied;
7327 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
7328 struct vm_page_delayed_work *dwp;
7329 int dw_count;
7330 int dw_limit;
7331 int isVectorUPL = 0;
7332 upl_t vector_upl = NULL;
7333
7334 *empty = FALSE;
7335
7336 if (upl == UPL_NULL) {
7337 return KERN_INVALID_ARGUMENT;
7338 }
7339
7340 if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) {
7341 return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
7342 }
7343
7344 if ((isVectorUPL = vector_upl_is_valid(upl))) {
7345 vector_upl = upl;
7346 upl_lock(vector_upl);
7347 } else {
7348 upl_lock(upl);
7349 }
7350
7351 process_upl_to_abort:
7352 if (isVectorUPL) {
7353 size = subupl_size;
7354 offset = subupl_offset;
7355 if (size == 0) {
7356 upl_unlock(vector_upl);
7357 return KERN_SUCCESS;
7358 }
7359 upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
7360 if (upl == NULL) {
7361 upl_unlock(vector_upl);
7362 return KERN_FAILURE;
7363 }
7364 subupl_size -= size;
7365 subupl_offset += size;
7366 }
7367
7368 *empty = FALSE;
7369
7370 #if UPL_DEBUG
7371 if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
7372 (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
7373
7374 upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
7375 upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
7376 upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
7377
7378 upl->upl_commit_index++;
7379 }
7380 #endif
7381 if (upl->flags & UPL_DEVICE_MEMORY) {
7382 xfer_size = 0;
7383 } else if ((offset + size) <= upl->size) {
7384 xfer_size = size;
7385 } else {
7386 if (!isVectorUPL) {
7387 upl_unlock(upl);
7388 } else {
7389 upl_unlock(vector_upl);
7390 }
7391
7392 return KERN_FAILURE;
7393 }
7394 if (upl->flags & UPL_INTERNAL) {
7395 lite_list = (wpl_array_t)
7396 ((((uintptr_t)upl) + sizeof(struct upl))
7397 + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t)));
7398
7399 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
7400 } else {
7401 lite_list = (wpl_array_t)
7402 (((uintptr_t)upl) + sizeof(struct upl));
7403 }
7404 object = upl->map_object;
7405
7406 if (upl->flags & UPL_SHADOWED) {
7407 vm_object_lock(object);
7408 shadow_object = object->shadow;
7409 } else {
7410 shadow_object = object;
7411 }
7412
7413 entry = offset / PAGE_SIZE;
7414 target_offset = (vm_object_offset_t)offset;
7415
7416 assert(!(target_offset & PAGE_MASK));
7417 assert(!(xfer_size & PAGE_MASK));
7418
7419 if (upl->flags & UPL_KERNEL_OBJECT) {
7420 vm_object_lock_shared(shadow_object);
7421 } else {
7422 vm_object_lock(shadow_object);
7423 }
7424
7425 if (upl->flags & UPL_ACCESS_BLOCKED) {
7426 assert(shadow_object->blocked_access);
7427 shadow_object->blocked_access = FALSE;
7428 vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
7429 }
7430
7431 dwp = &dw_array[0];
7432 dw_count = 0;
7433 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
7434
7435 if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) {
7436 panic("upl_abort_range: kernel_object being DUMPED");
7437 }
7438
7439 while (xfer_size) {
7440 vm_page_t t, m;
7441 unsigned int pg_num;
7442 boolean_t needed;
7443
7444 pg_num = (unsigned int) (target_offset / PAGE_SIZE);
7445 assert(pg_num == target_offset / PAGE_SIZE);
7446
7447 needed = FALSE;
7448
7449 if (user_page_list) {
7450 needed = user_page_list[pg_num].needed;
7451 }
7452
7453 dwp->dw_mask = 0;
7454 m = VM_PAGE_NULL;
7455
7456 if (upl->flags & UPL_LITE) {
7457 if (lite_list[pg_num >> 5] & (1 << (pg_num & 31))) {
7458 lite_list[pg_num >> 5] &= ~(1 << (pg_num & 31));
7459
7460 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
7461 m = vm_page_lookup(shadow_object, target_offset +
7462 (upl->offset - shadow_object->paging_offset));
7463 }
7464 }
7465 }
7466 if (upl->flags & UPL_SHADOWED) {
7467 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
7468 t->vmp_free_when_done = FALSE;
7469
7470 VM_PAGE_FREE(t);
7471
7472 if (m == VM_PAGE_NULL) {
7473 m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
7474 }
7475 }
7476 }
7477 if ((upl->flags & UPL_KERNEL_OBJECT)) {
7478 goto abort_next_page;
7479 }
7480
7481 if (m != VM_PAGE_NULL) {
7482 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7483
7484 if (m->vmp_absent) {
7485 boolean_t must_free = TRUE;
7486
7487 /*
7488 * COPYOUT = FALSE case
7489 * check for error conditions which must
7490 * be passed back to the pages customer
7491 */
7492 if (error & UPL_ABORT_RESTART) {
7493 m->vmp_restart = TRUE;
7494 m->vmp_absent = FALSE;
7495 m->vmp_unusual = TRUE;
7496 must_free = FALSE;
7497 } else if (error & UPL_ABORT_UNAVAILABLE) {
7498 m->vmp_restart = FALSE;
7499 m->vmp_unusual = TRUE;
7500 must_free = FALSE;
7501 } else if (error & UPL_ABORT_ERROR) {
7502 m->vmp_restart = FALSE;
7503 m->vmp_absent = FALSE;
7504 m->vmp_error = TRUE;
7505 m->vmp_unusual = TRUE;
7506 must_free = FALSE;
7507 }
7508 if (m->vmp_clustered && needed == FALSE) {
7509 /*
7510 * This page was a part of a speculative
7511 * read-ahead initiated by the kernel
7512 * itself. No one is expecting this
7513 * page and no one will clean up its
7514 * error state if it ever becomes valid
7515 * in the future.
7516 * We have to free it here.
7517 */
7518 must_free = TRUE;
7519 }
7520 m->vmp_cleaning = FALSE;
7521
7522 if (m->vmp_overwriting && !m->vmp_busy) {
7523 /*
7524 * this shouldn't happen since
7525 * this is an 'absent' page, but
7526 * it doesn't hurt to check for
7527 * the 'alternate' method of
7528 * stabilizing the page...
7529 * we will mark 'busy' to be cleared
7530 * in the following code which will
7531 * take care of the primary stabilzation
7532 * method (i.e. setting 'busy' to TRUE)
7533 */
7534 dwp->dw_mask |= DW_vm_page_unwire;
7535 }
7536 m->vmp_overwriting = FALSE;
7537
7538 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
7539
7540 if (must_free == TRUE) {
7541 dwp->dw_mask |= DW_vm_page_free;
7542 } else {
7543 dwp->dw_mask |= DW_vm_page_activate;
7544 }
7545 } else {
7546 /*
7547 * Handle the trusted pager throttle.
7548 */
7549 if (m->vmp_laundry) {
7550 dwp->dw_mask |= DW_vm_pageout_throttle_up;
7551 }
7552
7553 if (upl->flags & UPL_ACCESS_BLOCKED) {
7554 /*
7555 * We blocked access to the pages in this UPL.
7556 * Clear the "busy" bit and wake up any waiter
7557 * for this page.
7558 */
7559 dwp->dw_mask |= DW_clear_busy;
7560 }
7561 if (m->vmp_overwriting) {
7562 if (m->vmp_busy) {
7563 dwp->dw_mask |= DW_clear_busy;
7564 } else {
7565 /*
7566 * deal with the 'alternate' method
7567 * of stabilizing the page...
7568 * we will either free the page
7569 * or mark 'busy' to be cleared
7570 * in the following code which will
7571 * take care of the primary stabilzation
7572 * method (i.e. setting 'busy' to TRUE)
7573 */
7574 dwp->dw_mask |= DW_vm_page_unwire;
7575 }
7576 m->vmp_overwriting = FALSE;
7577 }
7578 m->vmp_free_when_done = FALSE;
7579 m->vmp_cleaning = FALSE;
7580
7581 if (error & UPL_ABORT_DUMP_PAGES) {
7582 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7583
7584 dwp->dw_mask |= DW_vm_page_free;
7585 } else {
7586 if (!(dwp->dw_mask & DW_vm_page_unwire)) {
7587 if (error & UPL_ABORT_REFERENCE) {
7588 /*
7589 * we've been told to explictly
7590 * reference this page... for
7591 * file I/O, this is done by
7592 * implementing an LRU on the inactive q
7593 */
7594 dwp->dw_mask |= DW_vm_page_lru;
7595 } else if (!VM_PAGE_PAGEABLE(m)) {
7596 dwp->dw_mask |= DW_vm_page_deactivate_internal;
7597 }
7598 }
7599 dwp->dw_mask |= DW_PAGE_WAKEUP;
7600 }
7601 }
7602 }
7603 abort_next_page:
7604 target_offset += PAGE_SIZE_64;
7605 xfer_size -= PAGE_SIZE;
7606 entry++;
7607
7608 if (dwp->dw_mask) {
7609 if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
7610 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
7611
7612 if (dw_count >= dw_limit) {
7613 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
7614
7615 dwp = &dw_array[0];
7616 dw_count = 0;
7617 }
7618 } else {
7619 if (dwp->dw_mask & DW_clear_busy) {
7620 m->vmp_busy = FALSE;
7621 }
7622
7623 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7624 PAGE_WAKEUP(m);
7625 }
7626 }
7627 }
7628 }
7629 if (dw_count) {
7630 vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
7631 }
7632
7633 occupied = 1;
7634
7635 if (upl->flags & UPL_DEVICE_MEMORY) {
7636 occupied = 0;
7637 } else if (upl->flags & UPL_LITE) {
7638 int pg_num;
7639 int i;
7640
7641 pg_num = upl->size / PAGE_SIZE;
7642 pg_num = (pg_num + 31) >> 5;
7643 occupied = 0;
7644
7645 for (i = 0; i < pg_num; i++) {
7646 if (lite_list[i] != 0) {
7647 occupied = 1;
7648 break;
7649 }
7650 }
7651 } else {
7652 if (vm_page_queue_empty(&upl->map_object->memq)) {
7653 occupied = 0;
7654 }
7655 }
7656 if (occupied == 0) {
7657 /*
7658 * If this UPL element belongs to a Vector UPL and is
7659 * empty, then this is the right function to deallocate
7660 * it. So go ahead set the *empty variable. The flag
7661 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
7662 * should be considered relevant for the Vector UPL and
7663 * not the internal UPLs.
7664 */
7665 if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) {
7666 *empty = TRUE;
7667 }
7668
7669 if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
7670 /*
7671 * this is not a paging object
7672 * so we need to drop the paging reference
7673 * that was taken when we created the UPL
7674 * against this object
7675 */
7676 vm_object_activity_end(shadow_object);
7677 vm_object_collapse(shadow_object, 0, TRUE);
7678 } else {
7679 /*
7680 * we dontated the paging reference to
7681 * the map object... vm_pageout_object_terminate
7682 * will drop this reference
7683 */
7684 }
7685 }
7686 vm_object_unlock(shadow_object);
7687 if (object != shadow_object) {
7688 vm_object_unlock(object);
7689 }
7690
7691 if (!isVectorUPL) {
7692 upl_unlock(upl);
7693 } else {
7694 /*
7695 * If we completed our operations on an UPL that is
7696 * part of a Vectored UPL and if empty is TRUE, then
7697 * we should go ahead and deallocate this UPL element.
7698 * Then we check if this was the last of the UPL elements
7699 * within that Vectored UPL. If so, set empty to TRUE
7700 * so that in ubc_upl_abort_range or ubc_upl_abort, we
7701 * can go ahead and deallocate the Vector UPL too.
7702 */
7703 if (*empty == TRUE) {
7704 *empty = vector_upl_set_subupl(vector_upl, upl, 0);
7705 upl_deallocate(upl);
7706 }
7707 goto process_upl_to_abort;
7708 }
7709
7710 return KERN_SUCCESS;
7711 }
7712
7713
7714 kern_return_t
7715 upl_abort(
7716 upl_t upl,
7717 int error)
7718 {
7719 boolean_t empty;
7720
7721 if (upl == UPL_NULL) {
7722 return KERN_INVALID_ARGUMENT;
7723 }
7724
7725 return upl_abort_range(upl, 0, upl->size, error, &empty);
7726 }
7727
7728
7729 /* an option on commit should be wire */
7730 kern_return_t
7731 upl_commit(
7732 upl_t upl,
7733 upl_page_info_t *page_list,
7734 mach_msg_type_number_t count)
7735 {
7736 boolean_t empty;
7737
7738 if (upl == UPL_NULL) {
7739 return KERN_INVALID_ARGUMENT;
7740 }
7741
7742 return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
7743 }
7744
7745
7746 void
7747 iopl_valid_data(
7748 upl_t upl,
7749 vm_tag_t tag)
7750 {
7751 vm_object_t object;
7752 vm_offset_t offset;
7753 vm_page_t m, nxt_page = VM_PAGE_NULL;
7754 upl_size_t size;
7755 int wired_count = 0;
7756
7757 if (upl == NULL) {
7758 panic("iopl_valid_data: NULL upl");
7759 }
7760 if (vector_upl_is_valid(upl)) {
7761 panic("iopl_valid_data: vector upl");
7762 }
7763 if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_SHADOWED | UPL_ACCESS_BLOCKED | UPL_IO_WIRE | UPL_INTERNAL)) != UPL_IO_WIRE) {
7764 panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags);
7765 }
7766
7767 object = upl->map_object;
7768
7769 if (object == kernel_object || object == compressor_object) {
7770 panic("iopl_valid_data: object == kernel or compressor");
7771 }
7772
7773 if (object->purgable == VM_PURGABLE_VOLATILE ||
7774 object->purgable == VM_PURGABLE_EMPTY) {
7775 panic("iopl_valid_data: object %p purgable %d",
7776 object, object->purgable);
7777 }
7778
7779 size = upl->size;
7780
7781 vm_object_lock(object);
7782 VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
7783
7784 if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) {
7785 nxt_page = (vm_page_t)vm_page_queue_first(&object->memq);
7786 } else {
7787 offset = 0 + upl->offset - object->paging_offset;
7788 }
7789
7790 while (size) {
7791 if (nxt_page != VM_PAGE_NULL) {
7792 m = nxt_page;
7793 nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq);
7794 } else {
7795 m = vm_page_lookup(object, offset);
7796 offset += PAGE_SIZE;
7797
7798 if (m == VM_PAGE_NULL) {
7799 panic("iopl_valid_data: missing expected page at offset %lx", (long)offset);
7800 }
7801 }
7802 if (m->vmp_busy) {
7803 if (!m->vmp_absent) {
7804 panic("iopl_valid_data: busy page w/o absent");
7805 }
7806
7807 if (m->vmp_pageq.next || m->vmp_pageq.prev) {
7808 panic("iopl_valid_data: busy+absent page on page queue");
7809 }
7810 if (m->vmp_reusable) {
7811 panic("iopl_valid_data: %p is reusable", m);
7812 }
7813
7814 m->vmp_absent = FALSE;
7815 m->vmp_dirty = TRUE;
7816 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
7817 assert(m->vmp_wire_count == 0);
7818 m->vmp_wire_count++;
7819 assert(m->vmp_wire_count);
7820 if (m->vmp_wire_count == 1) {
7821 m->vmp_q_state = VM_PAGE_IS_WIRED;
7822 wired_count++;
7823 } else {
7824 panic("iopl_valid_data: %p already wired\n", m);
7825 }
7826
7827 PAGE_WAKEUP_DONE(m);
7828 }
7829 size -= PAGE_SIZE;
7830 }
7831 if (wired_count) {
7832 VM_OBJECT_WIRED_PAGE_COUNT(object, wired_count);
7833 assert(object->resident_page_count >= object->wired_page_count);
7834
7835 /* no need to adjust purgeable accounting for this object: */
7836 assert(object->purgable != VM_PURGABLE_VOLATILE);
7837 assert(object->purgable != VM_PURGABLE_EMPTY);
7838
7839 vm_page_lockspin_queues();
7840 vm_page_wire_count += wired_count;
7841 vm_page_unlock_queues();
7842 }
7843 VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
7844 vm_object_unlock(object);
7845 }
7846
7847
7848 void
7849 vm_object_set_pmap_cache_attr(
7850 vm_object_t object,
7851 upl_page_info_array_t user_page_list,
7852 unsigned int num_pages,
7853 boolean_t batch_pmap_op)
7854 {
7855 unsigned int cache_attr = 0;
7856
7857 cache_attr = object->wimg_bits & VM_WIMG_MASK;
7858 assert(user_page_list);
7859 if (cache_attr != VM_WIMG_USE_DEFAULT) {
7860 PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op);
7861 }
7862 }
7863
7864
7865 boolean_t vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t);
7866 kern_return_t vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t, vm_object_offset_t *, int, int*);
7867
7868
7869
7870 boolean_t
7871 vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
7872 wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag)
7873 {
7874 vm_page_t dst_page;
7875 unsigned int entry;
7876 int page_count;
7877 int delayed_unlock = 0;
7878 boolean_t retval = TRUE;
7879 ppnum_t phys_page;
7880
7881 vm_object_lock_assert_exclusive(object);
7882 assert(object->purgable != VM_PURGABLE_VOLATILE);
7883 assert(object->purgable != VM_PURGABLE_EMPTY);
7884 assert(object->pager == NULL);
7885 assert(object->copy == NULL);
7886 assert(object->shadow == NULL);
7887
7888 page_count = object->resident_page_count;
7889 dst_page = (vm_page_t)vm_page_queue_first(&object->memq);
7890
7891 vm_page_lock_queues();
7892
7893 while (page_count--) {
7894 if (dst_page->vmp_busy ||
7895 dst_page->vmp_fictitious ||
7896 dst_page->vmp_absent ||
7897 dst_page->vmp_error ||
7898 dst_page->vmp_cleaning ||
7899 dst_page->vmp_restart ||
7900 dst_page->vmp_laundry) {
7901 retval = FALSE;
7902 goto done;
7903 }
7904 if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
7905 retval = FALSE;
7906 goto done;
7907 }
7908 dst_page->vmp_reference = TRUE;
7909
7910 vm_page_wire(dst_page, tag, FALSE);
7911
7912 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
7913 SET_PAGE_DIRTY(dst_page, FALSE);
7914 }
7915 entry = (unsigned int)(dst_page->vmp_offset / PAGE_SIZE);
7916 assert(entry >= 0 && entry < object->resident_page_count);
7917 lite_list[entry >> 5] |= 1 << (entry & 31);
7918
7919 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
7920
7921 if (phys_page > upl->highest_page) {
7922 upl->highest_page = phys_page;
7923 }
7924
7925 if (user_page_list) {
7926 user_page_list[entry].phys_addr = phys_page;
7927 user_page_list[entry].absent = dst_page->vmp_absent;
7928 user_page_list[entry].dirty = dst_page->vmp_dirty;
7929 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
7930 user_page_list[entry].precious = dst_page->vmp_precious;
7931 user_page_list[entry].device = FALSE;
7932 user_page_list[entry].speculative = FALSE;
7933 user_page_list[entry].cs_validated = FALSE;
7934 user_page_list[entry].cs_tainted = FALSE;
7935 user_page_list[entry].cs_nx = FALSE;
7936 user_page_list[entry].needed = FALSE;
7937 user_page_list[entry].mark = FALSE;
7938 }
7939 if (delayed_unlock++ > 256) {
7940 delayed_unlock = 0;
7941 lck_mtx_yield(&vm_page_queue_lock);
7942
7943 VM_CHECK_MEMORYSTATUS;
7944 }
7945 dst_page = (vm_page_t)vm_page_queue_next(&dst_page->vmp_listq);
7946 }
7947 done:
7948 vm_page_unlock_queues();
7949
7950 VM_CHECK_MEMORYSTATUS;
7951
7952 return retval;
7953 }
7954
7955
7956 kern_return_t
7957 vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
7958 wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag, vm_object_offset_t *dst_offset,
7959 int page_count, int* page_grab_count)
7960 {
7961 vm_page_t dst_page;
7962 boolean_t no_zero_fill = FALSE;
7963 int interruptible;
7964 int pages_wired = 0;
7965 int pages_inserted = 0;
7966 int entry = 0;
7967 uint64_t delayed_ledger_update = 0;
7968 kern_return_t ret = KERN_SUCCESS;
7969 int grab_options;
7970 ppnum_t phys_page;
7971
7972 vm_object_lock_assert_exclusive(object);
7973 assert(object->purgable != VM_PURGABLE_VOLATILE);
7974 assert(object->purgable != VM_PURGABLE_EMPTY);
7975 assert(object->pager == NULL);
7976 assert(object->copy == NULL);
7977 assert(object->shadow == NULL);
7978
7979 if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
7980 interruptible = THREAD_ABORTSAFE;
7981 } else {
7982 interruptible = THREAD_UNINT;
7983 }
7984
7985 if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
7986 no_zero_fill = TRUE;
7987 }
7988
7989 grab_options = 0;
7990 #if CONFIG_SECLUDED_MEMORY
7991 if (object->can_grab_secluded) {
7992 grab_options |= VM_PAGE_GRAB_SECLUDED;
7993 }
7994 #endif /* CONFIG_SECLUDED_MEMORY */
7995
7996 while (page_count--) {
7997 while ((dst_page = vm_page_grab_options(grab_options))
7998 == VM_PAGE_NULL) {
7999 OSAddAtomic(page_count, &vm_upl_wait_for_pages);
8000
8001 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
8002
8003 if (vm_page_wait(interruptible) == FALSE) {
8004 /*
8005 * interrupted case
8006 */
8007 OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
8008
8009 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
8010
8011 ret = MACH_SEND_INTERRUPTED;
8012 goto done;
8013 }
8014 OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
8015
8016 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
8017 }
8018 if (no_zero_fill == FALSE) {
8019 vm_page_zero_fill(dst_page);
8020 } else {
8021 dst_page->vmp_absent = TRUE;
8022 }
8023
8024 dst_page->vmp_reference = TRUE;
8025
8026 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
8027 SET_PAGE_DIRTY(dst_page, FALSE);
8028 }
8029 if (dst_page->vmp_absent == FALSE) {
8030 assert(dst_page->vmp_q_state == VM_PAGE_NOT_ON_Q);
8031 assert(dst_page->vmp_wire_count == 0);
8032 dst_page->vmp_wire_count++;
8033 dst_page->vmp_q_state = VM_PAGE_IS_WIRED;
8034 assert(dst_page->vmp_wire_count);
8035 pages_wired++;
8036 PAGE_WAKEUP_DONE(dst_page);
8037 }
8038 pages_inserted++;
8039
8040 vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update);
8041
8042 lite_list[entry >> 5] |= 1 << (entry & 31);
8043
8044 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8045
8046 if (phys_page > upl->highest_page) {
8047 upl->highest_page = phys_page;
8048 }
8049
8050 if (user_page_list) {
8051 user_page_list[entry].phys_addr = phys_page;
8052 user_page_list[entry].absent = dst_page->vmp_absent;
8053 user_page_list[entry].dirty = dst_page->vmp_dirty;
8054 user_page_list[entry].free_when_done = FALSE;
8055 user_page_list[entry].precious = FALSE;
8056 user_page_list[entry].device = FALSE;
8057 user_page_list[entry].speculative = FALSE;
8058 user_page_list[entry].cs_validated = FALSE;
8059 user_page_list[entry].cs_tainted = FALSE;
8060 user_page_list[entry].cs_nx = FALSE;
8061 user_page_list[entry].needed = FALSE;
8062 user_page_list[entry].mark = FALSE;
8063 }
8064 entry++;
8065 *dst_offset += PAGE_SIZE_64;
8066 }
8067 done:
8068 if (pages_wired) {
8069 vm_page_lockspin_queues();
8070 vm_page_wire_count += pages_wired;
8071 vm_page_unlock_queues();
8072 }
8073 if (pages_inserted) {
8074 if (object->internal) {
8075 OSAddAtomic(pages_inserted, &vm_page_internal_count);
8076 } else {
8077 OSAddAtomic(pages_inserted, &vm_page_external_count);
8078 }
8079 }
8080 if (delayed_ledger_update) {
8081 task_t owner;
8082 int ledger_idx_volatile;
8083 int ledger_idx_nonvolatile;
8084 int ledger_idx_volatile_compressed;
8085 int ledger_idx_nonvolatile_compressed;
8086 boolean_t do_footprint;
8087
8088 owner = VM_OBJECT_OWNER(object);
8089 assert(owner);
8090
8091 vm_object_ledger_tag_ledgers(object,
8092 &ledger_idx_volatile,
8093 &ledger_idx_nonvolatile,
8094 &ledger_idx_volatile_compressed,
8095 &ledger_idx_nonvolatile_compressed,
8096 &do_footprint);
8097
8098 /* more non-volatile bytes */
8099 ledger_credit(owner->ledger,
8100 ledger_idx_nonvolatile,
8101 delayed_ledger_update);
8102 if (do_footprint) {
8103 /* more footprint */
8104 ledger_credit(owner->ledger,
8105 task_ledgers.phys_footprint,
8106 delayed_ledger_update);
8107 }
8108 }
8109
8110 assert(page_grab_count);
8111 *page_grab_count = pages_inserted;
8112
8113 return ret;
8114 }
8115
8116
8117
8118 kern_return_t
8119 vm_object_iopl_request(
8120 vm_object_t object,
8121 vm_object_offset_t offset,
8122 upl_size_t size,
8123 upl_t *upl_ptr,
8124 upl_page_info_array_t user_page_list,
8125 unsigned int *page_list_count,
8126 upl_control_flags_t cntrl_flags,
8127 vm_tag_t tag)
8128 {
8129 vm_page_t dst_page;
8130 vm_object_offset_t dst_offset;
8131 upl_size_t xfer_size;
8132 upl_t upl = NULL;
8133 unsigned int entry;
8134 wpl_array_t lite_list = NULL;
8135 int no_zero_fill = FALSE;
8136 unsigned int size_in_pages;
8137 int page_grab_count = 0;
8138 u_int32_t psize;
8139 kern_return_t ret;
8140 vm_prot_t prot;
8141 struct vm_object_fault_info fault_info = {};
8142 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
8143 struct vm_page_delayed_work *dwp;
8144 int dw_count;
8145 int dw_limit;
8146 int dw_index;
8147 boolean_t caller_lookup;
8148 int io_tracking_flag = 0;
8149 int interruptible;
8150 ppnum_t phys_page;
8151
8152 boolean_t set_cache_attr_needed = FALSE;
8153 boolean_t free_wired_pages = FALSE;
8154 boolean_t fast_path_empty_req = FALSE;
8155 boolean_t fast_path_full_req = FALSE;
8156
8157 #if DEVELOPMENT || DEBUG
8158 task_t task = current_task();
8159 #endif /* DEVELOPMENT || DEBUG */
8160
8161 if (cntrl_flags & ~UPL_VALID_FLAGS) {
8162 /*
8163 * For forward compatibility's sake,
8164 * reject any unknown flag.
8165 */
8166 return KERN_INVALID_VALUE;
8167 }
8168 if (vm_lopage_needed == FALSE) {
8169 cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
8170 }
8171
8172 if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
8173 if ((cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE)) {
8174 return KERN_INVALID_VALUE;
8175 }
8176
8177 if (object->phys_contiguous) {
8178 if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address) {
8179 return KERN_INVALID_ADDRESS;
8180 }
8181
8182 if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address) {
8183 return KERN_INVALID_ADDRESS;
8184 }
8185 }
8186 }
8187 if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) {
8188 no_zero_fill = TRUE;
8189 }
8190
8191 if (cntrl_flags & UPL_COPYOUT_FROM) {
8192 prot = VM_PROT_READ;
8193 } else {
8194 prot = VM_PROT_READ | VM_PROT_WRITE;
8195 }
8196
8197 if ((!object->internal) && (object->paging_offset != 0)) {
8198 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
8199 }
8200
8201 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, prot, 0);
8202
8203 #if CONFIG_IOSCHED || UPL_DEBUG
8204 if ((object->io_tracking && object != kernel_object) || upl_debug_enabled) {
8205 io_tracking_flag |= UPL_CREATE_IO_TRACKING;
8206 }
8207 #endif
8208
8209 #if CONFIG_IOSCHED
8210 if (object->io_tracking) {
8211 /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
8212 if (object != kernel_object) {
8213 io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
8214 }
8215 }
8216 #endif
8217
8218 if (object->phys_contiguous) {
8219 psize = PAGE_SIZE;
8220 } else {
8221 psize = size;
8222 }
8223
8224 if (cntrl_flags & UPL_SET_INTERNAL) {
8225 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
8226
8227 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
8228 lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
8229 ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
8230 if (size == 0) {
8231 user_page_list = NULL;
8232 lite_list = NULL;
8233 }
8234 } else {
8235 upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
8236
8237 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
8238 if (size == 0) {
8239 lite_list = NULL;
8240 }
8241 }
8242 if (user_page_list) {
8243 user_page_list[0].device = FALSE;
8244 }
8245 *upl_ptr = upl;
8246
8247 if (cntrl_flags & UPL_NOZEROFILLIO) {
8248 DTRACE_VM4(upl_nozerofillio,
8249 vm_object_t, object,
8250 vm_object_offset_t, offset,
8251 upl_size_t, size,
8252 upl_t, upl);
8253 }
8254
8255 upl->map_object = object;
8256 upl->size = size;
8257
8258 size_in_pages = size / PAGE_SIZE;
8259
8260 if (object == kernel_object &&
8261 !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
8262 upl->flags |= UPL_KERNEL_OBJECT;
8263 #if UPL_DEBUG
8264 vm_object_lock(object);
8265 #else
8266 vm_object_lock_shared(object);
8267 #endif
8268 } else {
8269 vm_object_lock(object);
8270 vm_object_activity_begin(object);
8271 }
8272 /*
8273 * paging in progress also protects the paging_offset
8274 */
8275 upl->offset = offset + object->paging_offset;
8276
8277 if (cntrl_flags & UPL_BLOCK_ACCESS) {
8278 /*
8279 * The user requested that access to the pages in this UPL
8280 * be blocked until the UPL is commited or aborted.
8281 */
8282 upl->flags |= UPL_ACCESS_BLOCKED;
8283 }
8284
8285 #if CONFIG_IOSCHED || UPL_DEBUG
8286 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
8287 vm_object_activity_begin(object);
8288 queue_enter(&object->uplq, upl, upl_t, uplq);
8289 }
8290 #endif
8291
8292 if (object->phys_contiguous) {
8293 if (upl->flags & UPL_ACCESS_BLOCKED) {
8294 assert(!object->blocked_access);
8295 object->blocked_access = TRUE;
8296 }
8297
8298 vm_object_unlock(object);
8299
8300 /*
8301 * don't need any shadow mappings for this one
8302 * since it is already I/O memory
8303 */
8304 upl->flags |= UPL_DEVICE_MEMORY;
8305
8306 upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1) >> PAGE_SHIFT);
8307
8308 if (user_page_list) {
8309 user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset) >> PAGE_SHIFT);
8310 user_page_list[0].device = TRUE;
8311 }
8312 if (page_list_count != NULL) {
8313 if (upl->flags & UPL_INTERNAL) {
8314 *page_list_count = 0;
8315 } else {
8316 *page_list_count = 1;
8317 }
8318 }
8319
8320 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
8321 #if DEVELOPMENT || DEBUG
8322 if (task != NULL) {
8323 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
8324 }
8325 #endif /* DEVELOPMENT || DEBUG */
8326 return KERN_SUCCESS;
8327 }
8328 if (object != kernel_object && object != compressor_object) {
8329 /*
8330 * Protect user space from future COW operations
8331 */
8332 #if VM_OBJECT_TRACKING_OP_TRUESHARE
8333 if (!object->true_share &&
8334 vm_object_tracking_inited) {
8335 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
8336 int num = 0;
8337
8338 num = OSBacktrace(bt,
8339 VM_OBJECT_TRACKING_BTDEPTH);
8340 btlog_add_entry(vm_object_tracking_btlog,
8341 object,
8342 VM_OBJECT_TRACKING_OP_TRUESHARE,
8343 bt,
8344 num);
8345 }
8346 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
8347
8348 vm_object_lock_assert_exclusive(object);
8349 object->true_share = TRUE;
8350
8351 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
8352 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
8353 }
8354 }
8355
8356 if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
8357 object->copy != VM_OBJECT_NULL) {
8358 /*
8359 * Honor copy-on-write obligations
8360 *
8361 * The caller is gathering these pages and
8362 * might modify their contents. We need to
8363 * make sure that the copy object has its own
8364 * private copies of these pages before we let
8365 * the caller modify them.
8366 *
8367 * NOTE: someone else could map the original object
8368 * after we've done this copy-on-write here, and they
8369 * could then see an inconsistent picture of the memory
8370 * while it's being modified via the UPL. To prevent this,
8371 * we would have to block access to these pages until the
8372 * UPL is released. We could use the UPL_BLOCK_ACCESS
8373 * code path for that...
8374 */
8375 vm_object_update(object,
8376 offset,
8377 size,
8378 NULL,
8379 NULL,
8380 FALSE, /* should_return */
8381 MEMORY_OBJECT_COPY_SYNC,
8382 VM_PROT_NO_CHANGE);
8383 VM_PAGEOUT_DEBUG(iopl_cow, 1);
8384 VM_PAGEOUT_DEBUG(iopl_cow_pages, (size >> PAGE_SHIFT));
8385 }
8386 if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) &&
8387 object->purgable != VM_PURGABLE_VOLATILE &&
8388 object->purgable != VM_PURGABLE_EMPTY &&
8389 object->copy == NULL &&
8390 size == object->vo_size &&
8391 offset == 0 &&
8392 object->shadow == NULL &&
8393 object->pager == NULL) {
8394 if (object->resident_page_count == size_in_pages) {
8395 assert(object != compressor_object);
8396 assert(object != kernel_object);
8397 fast_path_full_req = TRUE;
8398 } else if (object->resident_page_count == 0) {
8399 assert(object != compressor_object);
8400 assert(object != kernel_object);
8401 fast_path_empty_req = TRUE;
8402 set_cache_attr_needed = TRUE;
8403 }
8404 }
8405
8406 if (cntrl_flags & UPL_SET_INTERRUPTIBLE) {
8407 interruptible = THREAD_ABORTSAFE;
8408 } else {
8409 interruptible = THREAD_UNINT;
8410 }
8411
8412 entry = 0;
8413
8414 xfer_size = size;
8415 dst_offset = offset;
8416 dw_count = 0;
8417
8418 if (fast_path_full_req) {
8419 if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE) {
8420 goto finish;
8421 }
8422 /*
8423 * we couldn't complete the processing of this request on the fast path
8424 * so fall through to the slow path and finish up
8425 */
8426 } else if (fast_path_empty_req) {
8427 if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
8428 ret = KERN_MEMORY_ERROR;
8429 goto return_err;
8430 }
8431 ret = vm_object_iopl_wire_empty(object, upl, user_page_list, lite_list, cntrl_flags, tag, &dst_offset, size_in_pages, &page_grab_count);
8432
8433 if (ret) {
8434 free_wired_pages = TRUE;
8435 goto return_err;
8436 }
8437 goto finish;
8438 }
8439
8440 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
8441 fault_info.lo_offset = offset;
8442 fault_info.hi_offset = offset + xfer_size;
8443 fault_info.mark_zf_absent = TRUE;
8444 fault_info.interruptible = interruptible;
8445 fault_info.batch_pmap_op = TRUE;
8446
8447 dwp = &dw_array[0];
8448 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
8449
8450 while (xfer_size) {
8451 vm_fault_return_t result;
8452
8453 dwp->dw_mask = 0;
8454
8455 if (fast_path_full_req) {
8456 /*
8457 * if we get here, it means that we ran into a page
8458 * state we couldn't handle in the fast path and
8459 * bailed out to the slow path... since the order
8460 * we look at pages is different between the 2 paths,
8461 * the following check is needed to determine whether
8462 * this page was already processed in the fast path
8463 */
8464 if (lite_list[entry >> 5] & (1 << (entry & 31))) {
8465 goto skip_page;
8466 }
8467 }
8468 dst_page = vm_page_lookup(object, dst_offset);
8469
8470 if (dst_page == VM_PAGE_NULL ||
8471 dst_page->vmp_busy ||
8472 dst_page->vmp_error ||
8473 dst_page->vmp_restart ||
8474 dst_page->vmp_absent ||
8475 dst_page->vmp_fictitious) {
8476 if (object == kernel_object) {
8477 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
8478 }
8479 if (object == compressor_object) {
8480 panic("vm_object_iopl_request: missing/bad page in compressor object\n");
8481 }
8482
8483 if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
8484 ret = KERN_MEMORY_ERROR;
8485 goto return_err;
8486 }
8487 set_cache_attr_needed = TRUE;
8488
8489 /*
8490 * We just looked up the page and the result remains valid
8491 * until the object lock is release, so send it to
8492 * vm_fault_page() (as "dst_page"), to avoid having to
8493 * look it up again there.
8494 */
8495 caller_lookup = TRUE;
8496
8497 do {
8498 vm_page_t top_page;
8499 kern_return_t error_code;
8500
8501 fault_info.cluster_size = xfer_size;
8502
8503 vm_object_paging_begin(object);
8504
8505 result = vm_fault_page(object, dst_offset,
8506 prot | VM_PROT_WRITE, FALSE,
8507 caller_lookup,
8508 &prot, &dst_page, &top_page,
8509 (int *)0,
8510 &error_code, no_zero_fill,
8511 FALSE, &fault_info);
8512
8513 /* our lookup is no longer valid at this point */
8514 caller_lookup = FALSE;
8515
8516 switch (result) {
8517 case VM_FAULT_SUCCESS:
8518 page_grab_count++;
8519
8520 if (!dst_page->vmp_absent) {
8521 PAGE_WAKEUP_DONE(dst_page);
8522 } else {
8523 /*
8524 * we only get back an absent page if we
8525 * requested that it not be zero-filled
8526 * because we are about to fill it via I/O
8527 *
8528 * absent pages should be left BUSY
8529 * to prevent them from being faulted
8530 * into an address space before we've
8531 * had a chance to complete the I/O on
8532 * them since they may contain info that
8533 * shouldn't be seen by the faulting task
8534 */
8535 }
8536 /*
8537 * Release paging references and
8538 * top-level placeholder page, if any.
8539 */
8540 if (top_page != VM_PAGE_NULL) {
8541 vm_object_t local_object;
8542
8543 local_object = VM_PAGE_OBJECT(top_page);
8544
8545 /*
8546 * comparing 2 packed pointers
8547 */
8548 if (top_page->vmp_object != dst_page->vmp_object) {
8549 vm_object_lock(local_object);
8550 VM_PAGE_FREE(top_page);
8551 vm_object_paging_end(local_object);
8552 vm_object_unlock(local_object);
8553 } else {
8554 VM_PAGE_FREE(top_page);
8555 vm_object_paging_end(local_object);
8556 }
8557 }
8558 vm_object_paging_end(object);
8559 break;
8560
8561 case VM_FAULT_RETRY:
8562 vm_object_lock(object);
8563 break;
8564
8565 case VM_FAULT_MEMORY_SHORTAGE:
8566 OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages);
8567
8568 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
8569
8570 if (vm_page_wait(interruptible)) {
8571 OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
8572
8573 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
8574 vm_object_lock(object);
8575
8576 break;
8577 }
8578 OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
8579
8580 VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
8581
8582 /* fall thru */
8583
8584 case VM_FAULT_INTERRUPTED:
8585 error_code = MACH_SEND_INTERRUPTED;
8586 case VM_FAULT_MEMORY_ERROR:
8587 memory_error:
8588 ret = (error_code ? error_code: KERN_MEMORY_ERROR);
8589
8590 vm_object_lock(object);
8591 goto return_err;
8592
8593 case VM_FAULT_SUCCESS_NO_VM_PAGE:
8594 /* success but no page: fail */
8595 vm_object_paging_end(object);
8596 vm_object_unlock(object);
8597 goto memory_error;
8598
8599 default:
8600 panic("vm_object_iopl_request: unexpected error"
8601 " 0x%x from vm_fault_page()\n", result);
8602 }
8603 } while (result != VM_FAULT_SUCCESS);
8604 }
8605 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8606
8607 if (upl->flags & UPL_KERNEL_OBJECT) {
8608 goto record_phys_addr;
8609 }
8610
8611 if (dst_page->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
8612 dst_page->vmp_busy = TRUE;
8613 goto record_phys_addr;
8614 }
8615
8616 if (dst_page->vmp_cleaning) {
8617 /*
8618 * Someone else is cleaning this page in place.
8619 * In theory, we should be able to proceed and use this
8620 * page but they'll probably end up clearing the "busy"
8621 * bit on it in upl_commit_range() but they didn't set
8622 * it, so they would clear our "busy" bit and open
8623 * us to race conditions.
8624 * We'd better wait for the cleaning to complete and
8625 * then try again.
8626 */
8627 VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning, 1);
8628 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
8629 continue;
8630 }
8631 if (dst_page->vmp_laundry) {
8632 vm_pageout_steal_laundry(dst_page, FALSE);
8633 }
8634
8635 if ((cntrl_flags & UPL_NEED_32BIT_ADDR) &&
8636 phys_page >= (max_valid_dma_address >> PAGE_SHIFT)) {
8637 vm_page_t low_page;
8638 int refmod;
8639
8640 /*
8641 * support devices that can't DMA above 32 bits
8642 * by substituting pages from a pool of low address
8643 * memory for any pages we find above the 4G mark
8644 * can't substitute if the page is already wired because
8645 * we don't know whether that physical address has been
8646 * handed out to some other 64 bit capable DMA device to use
8647 */
8648 if (VM_PAGE_WIRED(dst_page)) {
8649 ret = KERN_PROTECTION_FAILURE;
8650 goto return_err;
8651 }
8652 low_page = vm_page_grablo();
8653
8654 if (low_page == VM_PAGE_NULL) {
8655 ret = KERN_RESOURCE_SHORTAGE;
8656 goto return_err;
8657 }
8658 /*
8659 * from here until the vm_page_replace completes
8660 * we musn't drop the object lock... we don't
8661 * want anyone refaulting this page in and using
8662 * it after we disconnect it... we want the fault
8663 * to find the new page being substituted.
8664 */
8665 if (dst_page->vmp_pmapped) {
8666 refmod = pmap_disconnect(phys_page);
8667 } else {
8668 refmod = 0;
8669 }
8670
8671 if (!dst_page->vmp_absent) {
8672 vm_page_copy(dst_page, low_page);
8673 }
8674
8675 low_page->vmp_reference = dst_page->vmp_reference;
8676 low_page->vmp_dirty = dst_page->vmp_dirty;
8677 low_page->vmp_absent = dst_page->vmp_absent;
8678
8679 if (refmod & VM_MEM_REFERENCED) {
8680 low_page->vmp_reference = TRUE;
8681 }
8682 if (refmod & VM_MEM_MODIFIED) {
8683 SET_PAGE_DIRTY(low_page, FALSE);
8684 }
8685
8686 vm_page_replace(low_page, object, dst_offset);
8687
8688 dst_page = low_page;
8689 /*
8690 * vm_page_grablo returned the page marked
8691 * BUSY... we don't need a PAGE_WAKEUP_DONE
8692 * here, because we've never dropped the object lock
8693 */
8694 if (!dst_page->vmp_absent) {
8695 dst_page->vmp_busy = FALSE;
8696 }
8697
8698 phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
8699 }
8700 if (!dst_page->vmp_busy) {
8701 dwp->dw_mask |= DW_vm_page_wire;
8702 }
8703
8704 if (cntrl_flags & UPL_BLOCK_ACCESS) {
8705 /*
8706 * Mark the page "busy" to block any future page fault
8707 * on this page in addition to wiring it.
8708 * We'll also remove the mapping
8709 * of all these pages before leaving this routine.
8710 */
8711 assert(!dst_page->vmp_fictitious);
8712 dst_page->vmp_busy = TRUE;
8713 }
8714 /*
8715 * expect the page to be used
8716 * page queues lock must be held to set 'reference'
8717 */
8718 dwp->dw_mask |= DW_set_reference;
8719
8720 if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
8721 SET_PAGE_DIRTY(dst_page, TRUE);
8722 }
8723 if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) {
8724 pmap_sync_page_attributes_phys(phys_page);
8725 dst_page->vmp_written_by_kernel = FALSE;
8726 }
8727
8728 record_phys_addr:
8729 if (dst_page->vmp_busy) {
8730 upl->flags |= UPL_HAS_BUSY;
8731 }
8732
8733 lite_list[entry >> 5] |= 1 << (entry & 31);
8734
8735 if (phys_page > upl->highest_page) {
8736 upl->highest_page = phys_page;
8737 }
8738
8739 if (user_page_list) {
8740 user_page_list[entry].phys_addr = phys_page;
8741 user_page_list[entry].free_when_done = dst_page->vmp_free_when_done;
8742 user_page_list[entry].absent = dst_page->vmp_absent;
8743 user_page_list[entry].dirty = dst_page->vmp_dirty;
8744 user_page_list[entry].precious = dst_page->vmp_precious;
8745 user_page_list[entry].device = FALSE;
8746 user_page_list[entry].needed = FALSE;
8747 if (dst_page->vmp_clustered == TRUE) {
8748 user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
8749 } else {
8750 user_page_list[entry].speculative = FALSE;
8751 }
8752 user_page_list[entry].cs_validated = dst_page->vmp_cs_validated;
8753 user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted;
8754 user_page_list[entry].cs_nx = dst_page->vmp_cs_nx;
8755 user_page_list[entry].mark = FALSE;
8756 }
8757 if (object != kernel_object && object != compressor_object) {
8758 /*
8759 * someone is explicitly grabbing this page...
8760 * update clustered and speculative state
8761 *
8762 */
8763 if (dst_page->vmp_clustered) {
8764 VM_PAGE_CONSUME_CLUSTERED(dst_page);
8765 }
8766 }
8767 skip_page:
8768 entry++;
8769 dst_offset += PAGE_SIZE_64;
8770 xfer_size -= PAGE_SIZE;
8771
8772 if (dwp->dw_mask) {
8773 VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
8774
8775 if (dw_count >= dw_limit) {
8776 vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
8777
8778 dwp = &dw_array[0];
8779 dw_count = 0;
8780 }
8781 }
8782 }
8783 assert(entry == size_in_pages);
8784
8785 if (dw_count) {
8786 vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count);
8787 }
8788 finish:
8789 if (user_page_list && set_cache_attr_needed == TRUE) {
8790 vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE);
8791 }
8792
8793 if (page_list_count != NULL) {
8794 if (upl->flags & UPL_INTERNAL) {
8795 *page_list_count = 0;
8796 } else if (*page_list_count > size_in_pages) {
8797 *page_list_count = size_in_pages;
8798 }
8799 }
8800 vm_object_unlock(object);
8801
8802 if (cntrl_flags & UPL_BLOCK_ACCESS) {
8803 /*
8804 * We've marked all the pages "busy" so that future
8805 * page faults will block.
8806 * Now remove the mapping for these pages, so that they
8807 * can't be accessed without causing a page fault.
8808 */
8809 vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
8810 PMAP_NULL, 0, VM_PROT_NONE);
8811 assert(!object->blocked_access);
8812 object->blocked_access = TRUE;
8813 }
8814
8815 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0);
8816 #if DEVELOPMENT || DEBUG
8817 if (task != NULL) {
8818 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
8819 }
8820 #endif /* DEVELOPMENT || DEBUG */
8821 return KERN_SUCCESS;
8822
8823 return_err:
8824 dw_index = 0;
8825
8826 for (; offset < dst_offset; offset += PAGE_SIZE) {
8827 boolean_t need_unwire;
8828
8829 dst_page = vm_page_lookup(object, offset);
8830
8831 if (dst_page == VM_PAGE_NULL) {
8832 panic("vm_object_iopl_request: Wired page missing. \n");
8833 }
8834
8835 /*
8836 * if we've already processed this page in an earlier
8837 * dw_do_work, we need to undo the wiring... we will
8838 * leave the dirty and reference bits on if they
8839 * were set, since we don't have a good way of knowing
8840 * what the previous state was and we won't get here
8841 * under any normal circumstances... we will always
8842 * clear BUSY and wakeup any waiters via vm_page_free
8843 * or PAGE_WAKEUP_DONE
8844 */
8845 need_unwire = TRUE;
8846
8847 if (dw_count) {
8848 if (dw_array[dw_index].dw_m == dst_page) {
8849 /*
8850 * still in the deferred work list
8851 * which means we haven't yet called
8852 * vm_page_wire on this page
8853 */
8854 need_unwire = FALSE;
8855
8856 dw_index++;
8857 dw_count--;
8858 }
8859 }
8860 vm_page_lock_queues();
8861
8862 if (dst_page->vmp_absent || free_wired_pages == TRUE) {
8863 vm_page_free(dst_page);
8864
8865 need_unwire = FALSE;
8866 } else {
8867 if (need_unwire == TRUE) {
8868 vm_page_unwire(dst_page, TRUE);
8869 }
8870
8871 PAGE_WAKEUP_DONE(dst_page);
8872 }
8873 vm_page_unlock_queues();
8874
8875 if (need_unwire == TRUE) {
8876 VM_STAT_INCR(reactivations);
8877 }
8878 }
8879 #if UPL_DEBUG
8880 upl->upl_state = 2;
8881 #endif
8882 if (!(upl->flags & UPL_KERNEL_OBJECT)) {
8883 vm_object_activity_end(object);
8884 vm_object_collapse(object, 0, TRUE);
8885 }
8886 vm_object_unlock(object);
8887 upl_destroy(upl);
8888
8889 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, ret, 0, 0);
8890 #if DEVELOPMENT || DEBUG
8891 if (task != NULL) {
8892 ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count);
8893 }
8894 #endif /* DEVELOPMENT || DEBUG */
8895 return ret;
8896 }
8897
8898 kern_return_t
8899 upl_transpose(
8900 upl_t upl1,
8901 upl_t upl2)
8902 {
8903 kern_return_t retval;
8904 boolean_t upls_locked;
8905 vm_object_t object1, object2;
8906
8907 if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR) == UPL_VECTOR) || ((upl2->flags & UPL_VECTOR) == UPL_VECTOR)) {
8908 return KERN_INVALID_ARGUMENT;
8909 }
8910
8911 upls_locked = FALSE;
8912
8913 /*
8914 * Since we need to lock both UPLs at the same time,
8915 * avoid deadlocks by always taking locks in the same order.
8916 */
8917 if (upl1 < upl2) {
8918 upl_lock(upl1);
8919 upl_lock(upl2);
8920 } else {
8921 upl_lock(upl2);
8922 upl_lock(upl1);
8923 }
8924 upls_locked = TRUE; /* the UPLs will need to be unlocked */
8925
8926 object1 = upl1->map_object;
8927 object2 = upl2->map_object;
8928
8929 if (upl1->offset != 0 || upl2->offset != 0 ||
8930 upl1->size != upl2->size) {
8931 /*
8932 * We deal only with full objects, not subsets.
8933 * That's because we exchange the entire backing store info
8934 * for the objects: pager, resident pages, etc... We can't do
8935 * only part of it.
8936 */
8937 retval = KERN_INVALID_VALUE;
8938 goto done;
8939 }
8940
8941 /*
8942 * Tranpose the VM objects' backing store.
8943 */
8944 retval = vm_object_transpose(object1, object2,
8945 (vm_object_size_t) upl1->size);
8946
8947 if (retval == KERN_SUCCESS) {
8948 /*
8949 * Make each UPL point to the correct VM object, i.e. the
8950 * object holding the pages that the UPL refers to...
8951 */
8952 #if CONFIG_IOSCHED || UPL_DEBUG
8953 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
8954 vm_object_lock(object1);
8955 vm_object_lock(object2);
8956 }
8957 if (upl1->flags & UPL_TRACKED_BY_OBJECT) {
8958 queue_remove(&object1->uplq, upl1, upl_t, uplq);
8959 }
8960 if (upl2->flags & UPL_TRACKED_BY_OBJECT) {
8961 queue_remove(&object2->uplq, upl2, upl_t, uplq);
8962 }
8963 #endif
8964 upl1->map_object = object2;
8965 upl2->map_object = object1;
8966
8967 #if CONFIG_IOSCHED || UPL_DEBUG
8968 if (upl1->flags & UPL_TRACKED_BY_OBJECT) {
8969 queue_enter(&object2->uplq, upl1, upl_t, uplq);
8970 }
8971 if (upl2->flags & UPL_TRACKED_BY_OBJECT) {
8972 queue_enter(&object1->uplq, upl2, upl_t, uplq);
8973 }
8974 if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
8975 vm_object_unlock(object2);
8976 vm_object_unlock(object1);
8977 }
8978 #endif
8979 }
8980
8981 done:
8982 /*
8983 * Cleanup.
8984 */
8985 if (upls_locked) {
8986 upl_unlock(upl1);
8987 upl_unlock(upl2);
8988 upls_locked = FALSE;
8989 }
8990
8991 return retval;
8992 }
8993
8994 void
8995 upl_range_needed(
8996 upl_t upl,
8997 int index,
8998 int count)
8999 {
9000 upl_page_info_t *user_page_list;
9001 int size_in_pages;
9002
9003 if (!(upl->flags & UPL_INTERNAL) || count <= 0) {
9004 return;
9005 }
9006
9007 size_in_pages = upl->size / PAGE_SIZE;
9008
9009 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
9010
9011 while (count-- && index < size_in_pages) {
9012 user_page_list[index++].needed = TRUE;
9013 }
9014 }
9015
9016
9017 /*
9018 * Reserve of virtual addresses in the kernel address space.
9019 * We need to map the physical pages in the kernel, so that we
9020 * can call the code-signing or slide routines with a kernel
9021 * virtual address. We keep this pool of pre-allocated kernel
9022 * virtual addresses so that we don't have to scan the kernel's
9023 * virtaul address space each time we need to work with
9024 * a physical page.
9025 */
9026 decl_simple_lock_data(, vm_paging_lock)
9027 #define VM_PAGING_NUM_PAGES 64
9028 vm_map_offset_t vm_paging_base_address = 0;
9029 boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
9030 int vm_paging_max_index = 0;
9031 int vm_paging_page_waiter = 0;
9032 int vm_paging_page_waiter_total = 0;
9033
9034 unsigned long vm_paging_no_kernel_page = 0;
9035 unsigned long vm_paging_objects_mapped = 0;
9036 unsigned long vm_paging_pages_mapped = 0;
9037 unsigned long vm_paging_objects_mapped_slow = 0;
9038 unsigned long vm_paging_pages_mapped_slow = 0;
9039
9040 void
9041 vm_paging_map_init(void)
9042 {
9043 kern_return_t kr;
9044 vm_map_offset_t page_map_offset;
9045 vm_map_entry_t map_entry;
9046
9047 assert(vm_paging_base_address == 0);
9048
9049 /*
9050 * Initialize our pool of pre-allocated kernel
9051 * virtual addresses.
9052 */
9053 page_map_offset = 0;
9054 kr = vm_map_find_space(kernel_map,
9055 &page_map_offset,
9056 VM_PAGING_NUM_PAGES * PAGE_SIZE,
9057 0,
9058 0,
9059 VM_MAP_KERNEL_FLAGS_NONE,
9060 VM_KERN_MEMORY_NONE,
9061 &map_entry);
9062 if (kr != KERN_SUCCESS) {
9063 panic("vm_paging_map_init: kernel_map full\n");
9064 }
9065 VME_OBJECT_SET(map_entry, kernel_object);
9066 VME_OFFSET_SET(map_entry, page_map_offset);
9067 map_entry->protection = VM_PROT_NONE;
9068 map_entry->max_protection = VM_PROT_NONE;
9069 map_entry->permanent = TRUE;
9070 vm_object_reference(kernel_object);
9071 vm_map_unlock(kernel_map);
9072
9073 assert(vm_paging_base_address == 0);
9074 vm_paging_base_address = page_map_offset;
9075 }
9076
9077 /*
9078 * vm_paging_map_object:
9079 * Maps part of a VM object's pages in the kernel
9080 * virtual address space, using the pre-allocated
9081 * kernel virtual addresses, if possible.
9082 * Context:
9083 * The VM object is locked. This lock will get
9084 * dropped and re-acquired though, so the caller
9085 * must make sure the VM object is kept alive
9086 * (by holding a VM map that has a reference
9087 * on it, for example, or taking an extra reference).
9088 * The page should also be kept busy to prevent
9089 * it from being reclaimed.
9090 */
9091 kern_return_t
9092 vm_paging_map_object(
9093 vm_page_t page,
9094 vm_object_t object,
9095 vm_object_offset_t offset,
9096 vm_prot_t protection,
9097 boolean_t can_unlock_object,
9098 vm_map_size_t *size, /* IN/OUT */
9099 vm_map_offset_t *address, /* OUT */
9100 boolean_t *need_unmap) /* OUT */
9101 {
9102 kern_return_t kr;
9103 vm_map_offset_t page_map_offset;
9104 vm_map_size_t map_size;
9105 vm_object_offset_t object_offset;
9106 int i;
9107
9108 if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
9109 /* use permanent 1-to-1 kernel mapping of physical memory ? */
9110 #if __x86_64__
9111 *address = (vm_map_offset_t)
9112 PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) <<
9113 PAGE_SHIFT);
9114 *need_unmap = FALSE;
9115 return KERN_SUCCESS;
9116 #elif __arm__ || __arm64__
9117 *address = (vm_map_offset_t)
9118 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT);
9119 *need_unmap = FALSE;
9120 return KERN_SUCCESS;
9121 #else
9122 #warn "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..."
9123 #endif
9124
9125 assert(page->vmp_busy);
9126 /*
9127 * Use one of the pre-allocated kernel virtual addresses
9128 * and just enter the VM page in the kernel address space
9129 * at that virtual address.
9130 */
9131 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
9132
9133 /*
9134 * Try and find an available kernel virtual address
9135 * from our pre-allocated pool.
9136 */
9137 page_map_offset = 0;
9138 for (;;) {
9139 for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
9140 if (vm_paging_page_inuse[i] == FALSE) {
9141 page_map_offset =
9142 vm_paging_base_address +
9143 (i * PAGE_SIZE);
9144 break;
9145 }
9146 }
9147 if (page_map_offset != 0) {
9148 /* found a space to map our page ! */
9149 break;
9150 }
9151
9152 if (can_unlock_object) {
9153 /*
9154 * If we can afford to unlock the VM object,
9155 * let's take the slow path now...
9156 */
9157 break;
9158 }
9159 /*
9160 * We can't afford to unlock the VM object, so
9161 * let's wait for a space to become available...
9162 */
9163 vm_paging_page_waiter_total++;
9164 vm_paging_page_waiter++;
9165 kr = assert_wait((event_t)&vm_paging_page_waiter, THREAD_UNINT);
9166 if (kr == THREAD_WAITING) {
9167 simple_unlock(&vm_paging_lock);
9168 kr = thread_block(THREAD_CONTINUE_NULL);
9169 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
9170 }
9171 vm_paging_page_waiter--;
9172 /* ... and try again */
9173 }
9174
9175 if (page_map_offset != 0) {
9176 /*
9177 * We found a kernel virtual address;
9178 * map the physical page to that virtual address.
9179 */
9180 if (i > vm_paging_max_index) {
9181 vm_paging_max_index = i;
9182 }
9183 vm_paging_page_inuse[i] = TRUE;
9184 simple_unlock(&vm_paging_lock);
9185
9186 page->vmp_pmapped = TRUE;
9187
9188 /*
9189 * Keep the VM object locked over the PMAP_ENTER
9190 * and the actual use of the page by the kernel,
9191 * or this pmap mapping might get undone by a
9192 * vm_object_pmap_protect() call...
9193 */
9194 PMAP_ENTER(kernel_pmap,
9195 page_map_offset,
9196 page,
9197 protection,
9198 VM_PROT_NONE,
9199 0,
9200 TRUE,
9201 kr);
9202 assert(kr == KERN_SUCCESS);
9203 vm_paging_objects_mapped++;
9204 vm_paging_pages_mapped++;
9205 *address = page_map_offset;
9206 *need_unmap = TRUE;
9207
9208 #if KASAN
9209 kasan_notify_address(page_map_offset, PAGE_SIZE);
9210 #endif
9211
9212 /* all done and mapped, ready to use ! */
9213 return KERN_SUCCESS;
9214 }
9215
9216 /*
9217 * We ran out of pre-allocated kernel virtual
9218 * addresses. Just map the page in the kernel
9219 * the slow and regular way.
9220 */
9221 vm_paging_no_kernel_page++;
9222 simple_unlock(&vm_paging_lock);
9223 }
9224
9225 if (!can_unlock_object) {
9226 *address = 0;
9227 *size = 0;
9228 *need_unmap = FALSE;
9229 return KERN_NOT_SUPPORTED;
9230 }
9231
9232 object_offset = vm_object_trunc_page(offset);
9233 map_size = vm_map_round_page(*size,
9234 VM_MAP_PAGE_MASK(kernel_map));
9235
9236 /*
9237 * Try and map the required range of the object
9238 * in the kernel_map
9239 */
9240
9241 vm_object_reference_locked(object); /* for the map entry */
9242 vm_object_unlock(object);
9243
9244 kr = vm_map_enter(kernel_map,
9245 address,
9246 map_size,
9247 0,
9248 VM_FLAGS_ANYWHERE,
9249 VM_MAP_KERNEL_FLAGS_NONE,
9250 VM_KERN_MEMORY_NONE,
9251 object,
9252 object_offset,
9253 FALSE,
9254 protection,
9255 VM_PROT_ALL,
9256 VM_INHERIT_NONE);
9257 if (kr != KERN_SUCCESS) {
9258 *address = 0;
9259 *size = 0;
9260 *need_unmap = FALSE;
9261 vm_object_deallocate(object); /* for the map entry */
9262 vm_object_lock(object);
9263 return kr;
9264 }
9265
9266 *size = map_size;
9267
9268 /*
9269 * Enter the mapped pages in the page table now.
9270 */
9271 vm_object_lock(object);
9272 /*
9273 * VM object must be kept locked from before PMAP_ENTER()
9274 * until after the kernel is done accessing the page(s).
9275 * Otherwise, the pmap mappings in the kernel could be
9276 * undone by a call to vm_object_pmap_protect().
9277 */
9278
9279 for (page_map_offset = 0;
9280 map_size != 0;
9281 map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
9282 page = vm_page_lookup(object, offset + page_map_offset);
9283 if (page == VM_PAGE_NULL) {
9284 printf("vm_paging_map_object: no page !?");
9285 vm_object_unlock(object);
9286 kr = vm_map_remove(kernel_map, *address, *size,
9287 VM_MAP_REMOVE_NO_FLAGS);
9288 assert(kr == KERN_SUCCESS);
9289 *address = 0;
9290 *size = 0;
9291 *need_unmap = FALSE;
9292 vm_object_lock(object);
9293 return KERN_MEMORY_ERROR;
9294 }
9295 page->vmp_pmapped = TRUE;
9296
9297 //assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page)));
9298 PMAP_ENTER(kernel_pmap,
9299 *address + page_map_offset,
9300 page,
9301 protection,
9302 VM_PROT_NONE,
9303 0,
9304 TRUE,
9305 kr);
9306 assert(kr == KERN_SUCCESS);
9307 #if KASAN
9308 kasan_notify_address(*address + page_map_offset, PAGE_SIZE);
9309 #endif
9310 }
9311
9312 vm_paging_objects_mapped_slow++;
9313 vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
9314
9315 *need_unmap = TRUE;
9316
9317 return KERN_SUCCESS;
9318 }
9319
9320 /*
9321 * vm_paging_unmap_object:
9322 * Unmaps part of a VM object's pages from the kernel
9323 * virtual address space.
9324 * Context:
9325 * The VM object is locked. This lock will get
9326 * dropped and re-acquired though.
9327 */
9328 void
9329 vm_paging_unmap_object(
9330 vm_object_t object,
9331 vm_map_offset_t start,
9332 vm_map_offset_t end)
9333 {
9334 kern_return_t kr;
9335 int i;
9336
9337 if ((vm_paging_base_address == 0) ||
9338 (start < vm_paging_base_address) ||
9339 (end > (vm_paging_base_address
9340 + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
9341 /*
9342 * We didn't use our pre-allocated pool of
9343 * kernel virtual address. Deallocate the
9344 * virtual memory.
9345 */
9346 if (object != VM_OBJECT_NULL) {
9347 vm_object_unlock(object);
9348 }
9349 kr = vm_map_remove(kernel_map, start, end,
9350 VM_MAP_REMOVE_NO_FLAGS);
9351 if (object != VM_OBJECT_NULL) {
9352 vm_object_lock(object);
9353 }
9354 assert(kr == KERN_SUCCESS);
9355 } else {
9356 /*
9357 * We used a kernel virtual address from our
9358 * pre-allocated pool. Put it back in the pool
9359 * for next time.
9360 */
9361 assert(end - start == PAGE_SIZE);
9362 i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
9363 assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
9364
9365 /* undo the pmap mapping */
9366 pmap_remove(kernel_pmap, start, end);
9367
9368 simple_lock(&vm_paging_lock, &vm_pageout_lck_grp);
9369 vm_paging_page_inuse[i] = FALSE;
9370 if (vm_paging_page_waiter) {
9371 thread_wakeup(&vm_paging_page_waiter);
9372 }
9373 simple_unlock(&vm_paging_lock);
9374 }
9375 }
9376
9377
9378 /*
9379 * page->vmp_object must be locked
9380 */
9381 void
9382 vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked)
9383 {
9384 if (!queues_locked) {
9385 vm_page_lockspin_queues();
9386 }
9387
9388 page->vmp_free_when_done = FALSE;
9389 /*
9390 * need to drop the laundry count...
9391 * we may also need to remove it
9392 * from the I/O paging queue...
9393 * vm_pageout_throttle_up handles both cases
9394 *
9395 * the laundry and pageout_queue flags are cleared...
9396 */
9397 vm_pageout_throttle_up(page);
9398
9399 if (!queues_locked) {
9400 vm_page_unlock_queues();
9401 }
9402 }
9403
9404 upl_t
9405 vector_upl_create(vm_offset_t upl_offset)
9406 {
9407 int vector_upl_size = sizeof(struct _vector_upl);
9408 int i = 0;
9409 upl_t upl;
9410 vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
9411
9412 upl = upl_create(0, UPL_VECTOR, 0);
9413 upl->vector_upl = vector_upl;
9414 upl->offset = upl_offset;
9415 vector_upl->size = 0;
9416 vector_upl->offset = upl_offset;
9417 vector_upl->invalid_upls = 0;
9418 vector_upl->num_upls = 0;
9419 vector_upl->pagelist = NULL;
9420
9421 for (i = 0; i < MAX_VECTOR_UPL_ELEMENTS; i++) {
9422 vector_upl->upl_iostates[i].size = 0;
9423 vector_upl->upl_iostates[i].offset = 0;
9424 }
9425 return upl;
9426 }
9427
9428 void
9429 vector_upl_deallocate(upl_t upl)
9430 {
9431 if (upl) {
9432 vector_upl_t vector_upl = upl->vector_upl;
9433 if (vector_upl) {
9434 if (vector_upl->invalid_upls != vector_upl->num_upls) {
9435 panic("Deallocating non-empty Vectored UPL\n");
9436 }
9437 kfree(vector_upl->pagelist, (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)));
9438 vector_upl->invalid_upls = 0;
9439 vector_upl->num_upls = 0;
9440 vector_upl->pagelist = NULL;
9441 vector_upl->size = 0;
9442 vector_upl->offset = 0;
9443 kfree(vector_upl, sizeof(struct _vector_upl));
9444 vector_upl = (vector_upl_t)0xfeedfeed;
9445 } else {
9446 panic("vector_upl_deallocate was passed a non-vectored upl\n");
9447 }
9448 } else {
9449 panic("vector_upl_deallocate was passed a NULL upl\n");
9450 }
9451 }
9452
9453 boolean_t
9454 vector_upl_is_valid(upl_t upl)
9455 {
9456 if (upl && ((upl->flags & UPL_VECTOR) == UPL_VECTOR)) {
9457 vector_upl_t vector_upl = upl->vector_upl;
9458 if (vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef) {
9459 return FALSE;
9460 } else {
9461 return TRUE;
9462 }
9463 }
9464 return FALSE;
9465 }
9466
9467 boolean_t
9468 vector_upl_set_subupl(upl_t upl, upl_t subupl, uint32_t io_size)
9469 {
9470 if (vector_upl_is_valid(upl)) {
9471 vector_upl_t vector_upl = upl->vector_upl;
9472
9473 if (vector_upl) {
9474 if (subupl) {
9475 if (io_size) {
9476 if (io_size < PAGE_SIZE) {
9477 io_size = PAGE_SIZE;
9478 }
9479 subupl->vector_upl = (void*)vector_upl;
9480 vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
9481 vector_upl->size += io_size;
9482 upl->size += io_size;
9483 } else {
9484 uint32_t i = 0, invalid_upls = 0;
9485 for (i = 0; i < vector_upl->num_upls; i++) {
9486 if (vector_upl->upl_elems[i] == subupl) {
9487 break;
9488 }
9489 }
9490 if (i == vector_upl->num_upls) {
9491 panic("Trying to remove sub-upl when none exists");
9492 }
9493
9494 vector_upl->upl_elems[i] = NULL;
9495 invalid_upls = hw_atomic_add(&(vector_upl)->invalid_upls, 1);
9496 if (invalid_upls == vector_upl->num_upls) {
9497 return TRUE;
9498 } else {
9499 return FALSE;
9500 }
9501 }
9502 } else {
9503 panic("vector_upl_set_subupl was passed a NULL upl element\n");
9504 }
9505 } else {
9506 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
9507 }
9508 } else {
9509 panic("vector_upl_set_subupl was passed a NULL upl\n");
9510 }
9511
9512 return FALSE;
9513 }
9514
9515 void
9516 vector_upl_set_pagelist(upl_t upl)
9517 {
9518 if (vector_upl_is_valid(upl)) {
9519 uint32_t i = 0;
9520 vector_upl_t vector_upl = upl->vector_upl;
9521
9522 if (vector_upl) {
9523 vm_offset_t pagelist_size = 0, cur_upl_pagelist_size = 0;
9524
9525 vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE));
9526
9527 for (i = 0; i < vector_upl->num_upls; i++) {
9528 cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size / PAGE_SIZE;
9529 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
9530 pagelist_size += cur_upl_pagelist_size;
9531 if (vector_upl->upl_elems[i]->highest_page > upl->highest_page) {
9532 upl->highest_page = vector_upl->upl_elems[i]->highest_page;
9533 }
9534 }
9535 assert( pagelist_size == (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)));
9536 } else {
9537 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
9538 }
9539 } else {
9540 panic("vector_upl_set_pagelist was passed a NULL upl\n");
9541 }
9542 }
9543
9544 upl_t
9545 vector_upl_subupl_byindex(upl_t upl, uint32_t index)
9546 {
9547 if (vector_upl_is_valid(upl)) {
9548 vector_upl_t vector_upl = upl->vector_upl;
9549 if (vector_upl) {
9550 if (index < vector_upl->num_upls) {
9551 return vector_upl->upl_elems[index];
9552 }
9553 } else {
9554 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
9555 }
9556 }
9557 return NULL;
9558 }
9559
9560 upl_t
9561 vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
9562 {
9563 if (vector_upl_is_valid(upl)) {
9564 uint32_t i = 0;
9565 vector_upl_t vector_upl = upl->vector_upl;
9566
9567 if (vector_upl) {
9568 upl_t subupl = NULL;
9569 vector_upl_iostates_t subupl_state;
9570
9571 for (i = 0; i < vector_upl->num_upls; i++) {
9572 subupl = vector_upl->upl_elems[i];
9573 subupl_state = vector_upl->upl_iostates[i];
9574 if (*upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
9575 /* We could have been passed an offset/size pair that belongs
9576 * to an UPL element that has already been committed/aborted.
9577 * If so, return NULL.
9578 */
9579 if (subupl == NULL) {
9580 return NULL;
9581 }
9582 if ((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
9583 *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
9584 if (*upl_size > subupl_state.size) {
9585 *upl_size = subupl_state.size;
9586 }
9587 }
9588 if (*upl_offset >= subupl_state.offset) {
9589 *upl_offset -= subupl_state.offset;
9590 } else if (i) {
9591 panic("Vector UPL offset miscalculation\n");
9592 }
9593 return subupl;
9594 }
9595 }
9596 } else {
9597 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
9598 }
9599 }
9600 return NULL;
9601 }
9602
9603 void
9604 vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
9605 {
9606 *v_upl_submap = NULL;
9607
9608 if (vector_upl_is_valid(upl)) {
9609 vector_upl_t vector_upl = upl->vector_upl;
9610 if (vector_upl) {
9611 *v_upl_submap = vector_upl->submap;
9612 *submap_dst_addr = vector_upl->submap_dst_addr;
9613 } else {
9614 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
9615 }
9616 } else {
9617 panic("vector_upl_get_submap was passed a null UPL\n");
9618 }
9619 }
9620
9621 void
9622 vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
9623 {
9624 if (vector_upl_is_valid(upl)) {
9625 vector_upl_t vector_upl = upl->vector_upl;
9626 if (vector_upl) {
9627 vector_upl->submap = submap;
9628 vector_upl->submap_dst_addr = submap_dst_addr;
9629 } else {
9630 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
9631 }
9632 } else {
9633 panic("vector_upl_get_submap was passed a NULL UPL\n");
9634 }
9635 }
9636
9637 void
9638 vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
9639 {
9640 if (vector_upl_is_valid(upl)) {
9641 uint32_t i = 0;
9642 vector_upl_t vector_upl = upl->vector_upl;
9643
9644 if (vector_upl) {
9645 for (i = 0; i < vector_upl->num_upls; i++) {
9646 if (vector_upl->upl_elems[i] == subupl) {
9647 break;
9648 }
9649 }
9650
9651 if (i == vector_upl->num_upls) {
9652 panic("setting sub-upl iostate when none exists");
9653 }
9654
9655 vector_upl->upl_iostates[i].offset = offset;
9656 if (size < PAGE_SIZE) {
9657 size = PAGE_SIZE;
9658 }
9659 vector_upl->upl_iostates[i].size = size;
9660 } else {
9661 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
9662 }
9663 } else {
9664 panic("vector_upl_set_iostate was passed a NULL UPL\n");
9665 }
9666 }
9667
9668 void
9669 vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
9670 {
9671 if (vector_upl_is_valid(upl)) {
9672 uint32_t i = 0;
9673 vector_upl_t vector_upl = upl->vector_upl;
9674
9675 if (vector_upl) {
9676 for (i = 0; i < vector_upl->num_upls; i++) {
9677 if (vector_upl->upl_elems[i] == subupl) {
9678 break;
9679 }
9680 }
9681
9682 if (i == vector_upl->num_upls) {
9683 panic("getting sub-upl iostate when none exists");
9684 }
9685
9686 *offset = vector_upl->upl_iostates[i].offset;
9687 *size = vector_upl->upl_iostates[i].size;
9688 } else {
9689 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
9690 }
9691 } else {
9692 panic("vector_upl_get_iostate was passed a NULL UPL\n");
9693 }
9694 }
9695
9696 void
9697 vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
9698 {
9699 if (vector_upl_is_valid(upl)) {
9700 vector_upl_t vector_upl = upl->vector_upl;
9701 if (vector_upl) {
9702 if (index < vector_upl->num_upls) {
9703 *offset = vector_upl->upl_iostates[index].offset;
9704 *size = vector_upl->upl_iostates[index].size;
9705 } else {
9706 *offset = *size = 0;
9707 }
9708 } else {
9709 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
9710 }
9711 } else {
9712 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
9713 }
9714 }
9715
9716 upl_page_info_t *
9717 upl_get_internal_vectorupl_pagelist(upl_t upl)
9718 {
9719 return ((vector_upl_t)(upl->vector_upl))->pagelist;
9720 }
9721
9722 void *
9723 upl_get_internal_vectorupl(upl_t upl)
9724 {
9725 return upl->vector_upl;
9726 }
9727
9728 vm_size_t
9729 upl_get_internal_pagelist_offset(void)
9730 {
9731 return sizeof(struct upl);
9732 }
9733
9734 void
9735 upl_clear_dirty(
9736 upl_t upl,
9737 boolean_t value)
9738 {
9739 if (value) {
9740 upl->flags |= UPL_CLEAR_DIRTY;
9741 } else {
9742 upl->flags &= ~UPL_CLEAR_DIRTY;
9743 }
9744 }
9745
9746 void
9747 upl_set_referenced(
9748 upl_t upl,
9749 boolean_t value)
9750 {
9751 upl_lock(upl);
9752 if (value) {
9753 upl->ext_ref_count++;
9754 } else {
9755 if (!upl->ext_ref_count) {
9756 panic("upl_set_referenced not %p\n", upl);
9757 }
9758 upl->ext_ref_count--;
9759 }
9760 upl_unlock(upl);
9761 }
9762
9763 #if CONFIG_IOSCHED
9764 void
9765 upl_set_blkno(
9766 upl_t upl,
9767 vm_offset_t upl_offset,
9768 int io_size,
9769 int64_t blkno)
9770 {
9771 int i, j;
9772 if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
9773 return;
9774 }
9775
9776 assert(upl->upl_reprio_info != 0);
9777 for (i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) {
9778 UPL_SET_REPRIO_INFO(upl, i, blkno, io_size);
9779 }
9780 }
9781 #endif
9782
9783 void inline
9784 memoryshot(unsigned int event, unsigned int control)
9785 {
9786 if (vm_debug_events) {
9787 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control,
9788 vm_page_active_count, vm_page_inactive_count,
9789 vm_page_free_count, vm_page_speculative_count,
9790 vm_page_throttled_count);
9791 } else {
9792 (void) event;
9793 (void) control;
9794 }
9795 }
9796
9797 #ifdef MACH_BSD
9798
9799 boolean_t
9800 upl_device_page(upl_page_info_t *upl)
9801 {
9802 return UPL_DEVICE_PAGE(upl);
9803 }
9804 boolean_t
9805 upl_page_present(upl_page_info_t *upl, int index)
9806 {
9807 return UPL_PAGE_PRESENT(upl, index);
9808 }
9809 boolean_t
9810 upl_speculative_page(upl_page_info_t *upl, int index)
9811 {
9812 return UPL_SPECULATIVE_PAGE(upl, index);
9813 }
9814 boolean_t
9815 upl_dirty_page(upl_page_info_t *upl, int index)
9816 {
9817 return UPL_DIRTY_PAGE(upl, index);
9818 }
9819 boolean_t
9820 upl_valid_page(upl_page_info_t *upl, int index)
9821 {
9822 return UPL_VALID_PAGE(upl, index);
9823 }
9824 ppnum_t
9825 upl_phys_page(upl_page_info_t *upl, int index)
9826 {
9827 return UPL_PHYS_PAGE(upl, index);
9828 }
9829
9830 void
9831 upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v)
9832 {
9833 upl[index].mark = v;
9834 }
9835
9836 boolean_t
9837 upl_page_get_mark(upl_page_info_t *upl, int index)
9838 {
9839 return upl[index].mark;
9840 }
9841
9842 void
9843 vm_countdirtypages(void)
9844 {
9845 vm_page_t m;
9846 int dpages;
9847 int pgopages;
9848 int precpages;
9849
9850
9851 dpages = 0;
9852 pgopages = 0;
9853 precpages = 0;
9854
9855 vm_page_lock_queues();
9856 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
9857 do {
9858 if (m == (vm_page_t)0) {
9859 break;
9860 }
9861
9862 if (m->vmp_dirty) {
9863 dpages++;
9864 }
9865 if (m->vmp_free_when_done) {
9866 pgopages++;
9867 }
9868 if (m->vmp_precious) {
9869 precpages++;
9870 }
9871
9872 assert(VM_PAGE_OBJECT(m) != kernel_object);
9873 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
9874 if (m == (vm_page_t)0) {
9875 break;
9876 }
9877 } while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m));
9878 vm_page_unlock_queues();
9879
9880 vm_page_lock_queues();
9881 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
9882 do {
9883 if (m == (vm_page_t)0) {
9884 break;
9885 }
9886
9887 dpages++;
9888 assert(m->vmp_dirty);
9889 assert(!m->vmp_free_when_done);
9890 assert(VM_PAGE_OBJECT(m) != kernel_object);
9891 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
9892 if (m == (vm_page_t)0) {
9893 break;
9894 }
9895 } while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m));
9896 vm_page_unlock_queues();
9897
9898 vm_page_lock_queues();
9899 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
9900 do {
9901 if (m == (vm_page_t)0) {
9902 break;
9903 }
9904
9905 if (m->vmp_dirty) {
9906 dpages++;
9907 }
9908 if (m->vmp_free_when_done) {
9909 pgopages++;
9910 }
9911 if (m->vmp_precious) {
9912 precpages++;
9913 }
9914
9915 assert(VM_PAGE_OBJECT(m) != kernel_object);
9916 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
9917 if (m == (vm_page_t)0) {
9918 break;
9919 }
9920 } while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m));
9921 vm_page_unlock_queues();
9922
9923 printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
9924
9925 dpages = 0;
9926 pgopages = 0;
9927 precpages = 0;
9928
9929 vm_page_lock_queues();
9930 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
9931
9932 do {
9933 if (m == (vm_page_t)0) {
9934 break;
9935 }
9936 if (m->vmp_dirty) {
9937 dpages++;
9938 }
9939 if (m->vmp_free_when_done) {
9940 pgopages++;
9941 }
9942 if (m->vmp_precious) {
9943 precpages++;
9944 }
9945
9946 assert(VM_PAGE_OBJECT(m) != kernel_object);
9947 m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq);
9948 if (m == (vm_page_t)0) {
9949 break;
9950 }
9951 } while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m));
9952 vm_page_unlock_queues();
9953
9954 printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
9955 }
9956 #endif /* MACH_BSD */
9957
9958
9959 #if CONFIG_IOSCHED
9960 int
9961 upl_get_cached_tier(upl_t upl)
9962 {
9963 assert(upl);
9964 if (upl->flags & UPL_TRACKED_BY_OBJECT) {
9965 return upl->upl_priority;
9966 }
9967 return -1;
9968 }
9969 #endif /* CONFIG_IOSCHED */
9970
9971
9972 void
9973 upl_callout_iodone(upl_t upl)
9974 {
9975 struct upl_io_completion *upl_ctx = upl->upl_iodone;
9976
9977 if (upl_ctx) {
9978 void (*iodone_func)(void *, int) = upl_ctx->io_done;
9979
9980 assert(upl_ctx->io_done);
9981
9982 (*iodone_func)(upl_ctx->io_context, upl_ctx->io_error);
9983 }
9984 }
9985
9986 void
9987 upl_set_iodone(upl_t upl, void *upl_iodone)
9988 {
9989 upl->upl_iodone = (struct upl_io_completion *)upl_iodone;
9990 }
9991
9992 void
9993 upl_set_iodone_error(upl_t upl, int error)
9994 {
9995 struct upl_io_completion *upl_ctx = upl->upl_iodone;
9996
9997 if (upl_ctx) {
9998 upl_ctx->io_error = error;
9999 }
10000 }
10001
10002
10003 ppnum_t
10004 upl_get_highest_page(
10005 upl_t upl)
10006 {
10007 return upl->highest_page;
10008 }
10009
10010 upl_size_t
10011 upl_get_size(
10012 upl_t upl)
10013 {
10014 return upl->size;
10015 }
10016
10017 upl_t
10018 upl_associated_upl(upl_t upl)
10019 {
10020 return upl->associated_upl;
10021 }
10022
10023 void
10024 upl_set_associated_upl(upl_t upl, upl_t associated_upl)
10025 {
10026 upl->associated_upl = associated_upl;
10027 }
10028
10029 struct vnode *
10030 upl_lookup_vnode(upl_t upl)
10031 {
10032 if (!upl->map_object->internal) {
10033 return vnode_pager_lookup_vnode(upl->map_object->pager);
10034 } else {
10035 return NULL;
10036 }
10037 }
10038
10039 #if UPL_DEBUG
10040 kern_return_t
10041 upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
10042 {
10043 upl->ubc_alias1 = alias1;
10044 upl->ubc_alias2 = alias2;
10045 return KERN_SUCCESS;
10046 }
10047 int
10048 upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
10049 {
10050 if (al) {
10051 *al = upl->ubc_alias1;
10052 }
10053 if (al2) {
10054 *al2 = upl->ubc_alias2;
10055 }
10056 return KERN_SUCCESS;
10057 }
10058 #endif /* UPL_DEBUG */
10059
10060 #if VM_PRESSURE_EVENTS
10061 /*
10062 * Upward trajectory.
10063 */
10064 extern boolean_t vm_compressor_low_on_space(void);
10065
10066 boolean_t
10067 VM_PRESSURE_NORMAL_TO_WARNING(void)
10068 {
10069 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10070 /* Available pages below our threshold */
10071 if (memorystatus_available_pages < memorystatus_available_pages_pressure) {
10072 /* No frozen processes to kill */
10073 if (memorystatus_frozen_count == 0) {
10074 /* Not enough suspended processes available. */
10075 if (memorystatus_suspended_count < MEMORYSTATUS_SUSPENDED_THRESHOLD) {
10076 return TRUE;
10077 }
10078 }
10079 }
10080 return FALSE;
10081 } else {
10082 return (AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0;
10083 }
10084 }
10085
10086 boolean_t
10087 VM_PRESSURE_WARNING_TO_CRITICAL(void)
10088 {
10089 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10090 /* Available pages below our threshold */
10091 if (memorystatus_available_pages < memorystatus_available_pages_critical) {
10092 return TRUE;
10093 }
10094 return FALSE;
10095 } else {
10096 return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
10097 }
10098 }
10099
10100 /*
10101 * Downward trajectory.
10102 */
10103 boolean_t
10104 VM_PRESSURE_WARNING_TO_NORMAL(void)
10105 {
10106 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10107 /* Available pages above our threshold */
10108 unsigned int target_threshold = (unsigned int) (memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100));
10109 if (memorystatus_available_pages > target_threshold) {
10110 return TRUE;
10111 }
10112 return FALSE;
10113 } else {
10114 return (AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0;
10115 }
10116 }
10117
10118 boolean_t
10119 VM_PRESSURE_CRITICAL_TO_WARNING(void)
10120 {
10121 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
10122 /* Available pages above our threshold */
10123 unsigned int target_threshold = (unsigned int)(memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100));
10124 if (memorystatus_available_pages > target_threshold) {
10125 return TRUE;
10126 }
10127 return FALSE;
10128 } else {
10129 return (AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0;
10130 }
10131 }
10132 #endif /* VM_PRESSURE_EVENTS */
10133
10134
10135
10136 #define VM_TEST_COLLAPSE_COMPRESSOR 0
10137 #define VM_TEST_WIRE_AND_EXTRACT 0
10138 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
10139 #if __arm64__
10140 #define VM_TEST_KERNEL_OBJECT_FAULT 0
10141 #endif /* __arm64__ */
10142 #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
10143
10144 #if VM_TEST_COLLAPSE_COMPRESSOR
10145 extern boolean_t vm_object_collapse_compressor_allowed;
10146 #include <IOKit/IOLib.h>
10147 static void
10148 vm_test_collapse_compressor(void)
10149 {
10150 vm_object_size_t backing_size, top_size;
10151 vm_object_t backing_object, top_object;
10152 vm_map_offset_t backing_offset, top_offset;
10153 unsigned char *backing_address, *top_address;
10154 kern_return_t kr;
10155
10156 printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
10157
10158 /* create backing object */
10159 backing_size = 15 * PAGE_SIZE;
10160 backing_object = vm_object_allocate(backing_size);
10161 assert(backing_object != VM_OBJECT_NULL);
10162 printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
10163 backing_object);
10164 /* map backing object */
10165 backing_offset = 0;
10166 kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
10167 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE,
10168 backing_object, 0, FALSE,
10169 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
10170 assert(kr == KERN_SUCCESS);
10171 backing_address = (unsigned char *) backing_offset;
10172 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10173 "mapped backing object %p at 0x%llx\n",
10174 backing_object, (uint64_t) backing_offset);
10175 /* populate with pages to be compressed in backing object */
10176 backing_address[0x1 * PAGE_SIZE] = 0xB1;
10177 backing_address[0x4 * PAGE_SIZE] = 0xB4;
10178 backing_address[0x7 * PAGE_SIZE] = 0xB7;
10179 backing_address[0xa * PAGE_SIZE] = 0xBA;
10180 backing_address[0xd * PAGE_SIZE] = 0xBD;
10181 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10182 "populated pages to be compressed in "
10183 "backing_object %p\n", backing_object);
10184 /* compress backing object */
10185 vm_object_pageout(backing_object);
10186 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
10187 backing_object);
10188 /* wait for all the pages to be gone */
10189 while (*(volatile int *)&backing_object->resident_page_count != 0) {
10190 IODelay(10);
10191 }
10192 printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
10193 backing_object);
10194 /* populate with pages to be resident in backing object */
10195 backing_address[0x0 * PAGE_SIZE] = 0xB0;
10196 backing_address[0x3 * PAGE_SIZE] = 0xB3;
10197 backing_address[0x6 * PAGE_SIZE] = 0xB6;
10198 backing_address[0x9 * PAGE_SIZE] = 0xB9;
10199 backing_address[0xc * PAGE_SIZE] = 0xBC;
10200 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10201 "populated pages to be resident in "
10202 "backing_object %p\n", backing_object);
10203 /* leave the other pages absent */
10204 /* mess with the paging_offset of the backing_object */
10205 assert(backing_object->paging_offset == 0);
10206 backing_object->paging_offset = 0x3000;
10207
10208 /* create top object */
10209 top_size = 9 * PAGE_SIZE;
10210 top_object = vm_object_allocate(top_size);
10211 assert(top_object != VM_OBJECT_NULL);
10212 printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
10213 top_object);
10214 /* map top object */
10215 top_offset = 0;
10216 kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
10217 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE,
10218 top_object, 0, FALSE,
10219 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
10220 assert(kr == KERN_SUCCESS);
10221 top_address = (unsigned char *) top_offset;
10222 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10223 "mapped top object %p at 0x%llx\n",
10224 top_object, (uint64_t) top_offset);
10225 /* populate with pages to be compressed in top object */
10226 top_address[0x3 * PAGE_SIZE] = 0xA3;
10227 top_address[0x4 * PAGE_SIZE] = 0xA4;
10228 top_address[0x5 * PAGE_SIZE] = 0xA5;
10229 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10230 "populated pages to be compressed in "
10231 "top_object %p\n", top_object);
10232 /* compress top object */
10233 vm_object_pageout(top_object);
10234 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
10235 top_object);
10236 /* wait for all the pages to be gone */
10237 while (top_object->resident_page_count != 0) {
10238 IODelay(10);
10239 }
10240 printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
10241 top_object);
10242 /* populate with pages to be resident in top object */
10243 top_address[0x0 * PAGE_SIZE] = 0xA0;
10244 top_address[0x1 * PAGE_SIZE] = 0xA1;
10245 top_address[0x2 * PAGE_SIZE] = 0xA2;
10246 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10247 "populated pages to be resident in "
10248 "top_object %p\n", top_object);
10249 /* leave the other pages absent */
10250
10251 /* link the 2 objects */
10252 vm_object_reference(backing_object);
10253 top_object->shadow = backing_object;
10254 top_object->vo_shadow_offset = 0x3000;
10255 printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
10256 top_object, backing_object);
10257
10258 /* unmap backing object */
10259 vm_map_remove(kernel_map,
10260 backing_offset,
10261 backing_offset + backing_size,
10262 VM_MAP_REMOVE_NO_FLAGS);
10263 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10264 "unmapped backing_object %p [0x%llx:0x%llx]\n",
10265 backing_object,
10266 (uint64_t) backing_offset,
10267 (uint64_t) (backing_offset + backing_size));
10268
10269 /* collapse */
10270 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
10271 vm_object_lock(top_object);
10272 vm_object_collapse(top_object, 0, FALSE);
10273 vm_object_unlock(top_object);
10274 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
10275
10276 /* did it work? */
10277 if (top_object->shadow != VM_OBJECT_NULL) {
10278 printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
10279 printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10280 if (vm_object_collapse_compressor_allowed) {
10281 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10282 }
10283 } else {
10284 /* check the contents of the mapping */
10285 unsigned char expect[9] =
10286 { 0xA0, 0xA1, 0xA2, /* resident in top */
10287 0xA3, 0xA4, 0xA5, /* compressed in top */
10288 0xB9, /* resident in backing + shadow_offset */
10289 0xBD, /* compressed in backing + shadow_offset + paging_offset */
10290 0x00 }; /* absent in both */
10291 unsigned char actual[9];
10292 unsigned int i, errors;
10293
10294 errors = 0;
10295 for (i = 0; i < sizeof(actual); i++) {
10296 actual[i] = (unsigned char) top_address[i * PAGE_SIZE];
10297 if (actual[i] != expect[i]) {
10298 errors++;
10299 }
10300 }
10301 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10302 "actual [%x %x %x %x %x %x %x %x %x] "
10303 "expect [%x %x %x %x %x %x %x %x %x] "
10304 "%d errors\n",
10305 actual[0], actual[1], actual[2], actual[3],
10306 actual[4], actual[5], actual[6], actual[7],
10307 actual[8],
10308 expect[0], expect[1], expect[2], expect[3],
10309 expect[4], expect[5], expect[6], expect[7],
10310 expect[8],
10311 errors);
10312 if (errors) {
10313 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10314 } else {
10315 printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
10316 }
10317 }
10318 }
10319 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
10320 #define vm_test_collapse_compressor()
10321 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
10322
10323 #if VM_TEST_WIRE_AND_EXTRACT
10324 extern ledger_template_t task_ledger_template;
10325 #include <mach/mach_vm.h>
10326 extern ppnum_t vm_map_get_phys_page(vm_map_t map,
10327 vm_offset_t offset);
10328 static void
10329 vm_test_wire_and_extract(void)
10330 {
10331 ledger_t ledger;
10332 vm_map_t user_map, wire_map;
10333 mach_vm_address_t user_addr, wire_addr;
10334 mach_vm_size_t user_size, wire_size;
10335 mach_vm_offset_t cur_offset;
10336 vm_prot_t cur_prot, max_prot;
10337 ppnum_t user_ppnum, wire_ppnum;
10338 kern_return_t kr;
10339
10340 ledger = ledger_instantiate(task_ledger_template,
10341 LEDGER_CREATE_ACTIVE_ENTRIES);
10342 user_map = vm_map_create(pmap_create(ledger, 0, PMAP_CREATE_64BIT),
10343 0x100000000ULL,
10344 0x200000000ULL,
10345 TRUE);
10346 wire_map = vm_map_create(NULL,
10347 0x100000000ULL,
10348 0x200000000ULL,
10349 TRUE);
10350 user_addr = 0;
10351 user_size = 0x10000;
10352 kr = mach_vm_allocate(user_map,
10353 &user_addr,
10354 user_size,
10355 VM_FLAGS_ANYWHERE);
10356 assert(kr == KERN_SUCCESS);
10357 wire_addr = 0;
10358 wire_size = user_size;
10359 kr = mach_vm_remap(wire_map,
10360 &wire_addr,
10361 wire_size,
10362 0,
10363 VM_FLAGS_ANYWHERE,
10364 user_map,
10365 user_addr,
10366 FALSE,
10367 &cur_prot,
10368 &max_prot,
10369 VM_INHERIT_NONE);
10370 assert(kr == KERN_SUCCESS);
10371 for (cur_offset = 0;
10372 cur_offset < wire_size;
10373 cur_offset += PAGE_SIZE) {
10374 kr = vm_map_wire_and_extract(wire_map,
10375 wire_addr + cur_offset,
10376 VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
10377 TRUE,
10378 &wire_ppnum);
10379 assert(kr == KERN_SUCCESS);
10380 user_ppnum = vm_map_get_phys_page(user_map,
10381 user_addr + cur_offset);
10382 printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
10383 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
10384 kr,
10385 user_map, user_addr + cur_offset, user_ppnum,
10386 wire_map, wire_addr + cur_offset, wire_ppnum);
10387 if (kr != KERN_SUCCESS ||
10388 wire_ppnum == 0 ||
10389 wire_ppnum != user_ppnum) {
10390 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
10391 }
10392 }
10393 cur_offset -= PAGE_SIZE;
10394 kr = vm_map_wire_and_extract(wire_map,
10395 wire_addr + cur_offset,
10396 VM_PROT_DEFAULT,
10397 TRUE,
10398 &wire_ppnum);
10399 assert(kr == KERN_SUCCESS);
10400 printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
10401 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
10402 kr,
10403 user_map, user_addr + cur_offset, user_ppnum,
10404 wire_map, wire_addr + cur_offset, wire_ppnum);
10405 if (kr != KERN_SUCCESS ||
10406 wire_ppnum == 0 ||
10407 wire_ppnum != user_ppnum) {
10408 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
10409 }
10410
10411 printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
10412 }
10413 #else /* VM_TEST_WIRE_AND_EXTRACT */
10414 #define vm_test_wire_and_extract()
10415 #endif /* VM_TEST_WIRE_AND_EXTRACT */
10416
10417 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
10418 static void
10419 vm_test_page_wire_overflow_panic(void)
10420 {
10421 vm_object_t object;
10422 vm_page_t page;
10423
10424 printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
10425
10426 object = vm_object_allocate(PAGE_SIZE);
10427 vm_object_lock(object);
10428 page = vm_page_alloc(object, 0x0);
10429 vm_page_lock_queues();
10430 do {
10431 vm_page_wire(page, 1, FALSE);
10432 } while (page->wire_count != 0);
10433 vm_page_unlock_queues();
10434 vm_object_unlock(object);
10435 panic("FBDP(%p,%p): wire_count overflow not detected\n",
10436 object, page);
10437 }
10438 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
10439 #define vm_test_page_wire_overflow_panic()
10440 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
10441
10442 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
10443 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
10444 static void
10445 vm_test_kernel_object_fault(void)
10446 {
10447 kern_return_t kr;
10448 vm_offset_t stack;
10449 uintptr_t frameb[2];
10450 int ret;
10451
10452 kr = kernel_memory_allocate(kernel_map, &stack,
10453 kernel_stack_size + (2 * PAGE_SIZE),
10454 0,
10455 (KMA_KSTACK | KMA_KOBJECT |
10456 KMA_GUARD_FIRST | KMA_GUARD_LAST),
10457 VM_KERN_MEMORY_STACK);
10458 if (kr != KERN_SUCCESS) {
10459 panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr);
10460 }
10461 ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE);
10462 if (ret != 0) {
10463 printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
10464 } else {
10465 printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
10466 }
10467 vm_map_remove(kernel_map,
10468 stack,
10469 stack + kernel_stack_size + (2 * PAGE_SIZE),
10470 VM_MAP_REMOVE_KUNWIRE);
10471 stack = 0;
10472 }
10473 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
10474 #define vm_test_kernel_object_fault()
10475 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
10476
10477 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
10478 static void
10479 vm_test_device_pager_transpose(void)
10480 {
10481 memory_object_t device_pager;
10482 vm_object_t anon_object, device_object;
10483 vm_size_t size;
10484 vm_map_offset_t device_mapping;
10485 kern_return_t kr;
10486
10487 size = 3 * PAGE_SIZE;
10488 anon_object = vm_object_allocate(size);
10489 assert(anon_object != VM_OBJECT_NULL);
10490 device_pager = device_pager_setup(NULL, 0, size, 0);
10491 assert(device_pager != NULL);
10492 device_object = memory_object_to_vm_object(device_pager);
10493 assert(device_object != VM_OBJECT_NULL);
10494 #if 0
10495 /*
10496 * Can't actually map this, since another thread might do a
10497 * vm_map_enter() that gets coalesced into this object, which
10498 * would cause the test to fail.
10499 */
10500 vm_map_offset_t anon_mapping = 0;
10501 kr = vm_map_enter(kernel_map, &anon_mapping, size, 0,
10502 VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE,
10503 anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
10504 VM_INHERIT_DEFAULT);
10505 assert(kr == KERN_SUCCESS);
10506 #endif
10507 device_mapping = 0;
10508 kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0,
10509 VM_FLAGS_ANYWHERE,
10510 VM_MAP_KERNEL_FLAGS_NONE,
10511 VM_KERN_MEMORY_NONE,
10512 (void *)device_pager, 0, FALSE,
10513 VM_PROT_DEFAULT, VM_PROT_ALL,
10514 VM_INHERIT_DEFAULT);
10515 assert(kr == KERN_SUCCESS);
10516 memory_object_deallocate(device_pager);
10517
10518 vm_object_lock(anon_object);
10519 vm_object_activity_begin(anon_object);
10520 anon_object->blocked_access = TRUE;
10521 vm_object_unlock(anon_object);
10522 vm_object_lock(device_object);
10523 vm_object_activity_begin(device_object);
10524 device_object->blocked_access = TRUE;
10525 vm_object_unlock(device_object);
10526
10527 assert(anon_object->ref_count == 1);
10528 assert(!anon_object->named);
10529 assert(device_object->ref_count == 2);
10530 assert(device_object->named);
10531
10532 kr = vm_object_transpose(device_object, anon_object, size);
10533 assert(kr == KERN_SUCCESS);
10534
10535 vm_object_lock(anon_object);
10536 vm_object_activity_end(anon_object);
10537 anon_object->blocked_access = FALSE;
10538 vm_object_unlock(anon_object);
10539 vm_object_lock(device_object);
10540 vm_object_activity_end(device_object);
10541 device_object->blocked_access = FALSE;
10542 vm_object_unlock(device_object);
10543
10544 assert(anon_object->ref_count == 2);
10545 assert(anon_object->named);
10546 #if 0
10547 kr = vm_deallocate(kernel_map, anon_mapping, size);
10548 assert(kr == KERN_SUCCESS);
10549 #endif
10550 assert(device_object->ref_count == 1);
10551 assert(!device_object->named);
10552 kr = vm_deallocate(kernel_map, device_mapping, size);
10553 assert(kr == KERN_SUCCESS);
10554
10555 printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
10556 }
10557 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
10558 #define vm_test_device_pager_transpose()
10559 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
10560
10561 void
10562 vm_tests(void)
10563 {
10564 vm_test_collapse_compressor();
10565 vm_test_wire_and_extract();
10566 vm_test_page_wire_overflow_panic();
10567 vm_test_kernel_object_fault();
10568 vm_test_device_pager_transpose();
10569 }