2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Page fault handling module.
65 #include <mach_cluster_stats.h>
66 #include <mach_pagemap.h>
67 #include <libkern/OSAtomic.h>
69 #include <mach/mach_types.h>
70 #include <mach/kern_return.h>
71 #include <mach/message.h> /* for error codes */
72 #include <mach/vm_param.h>
73 #include <mach/vm_behavior.h>
74 #include <mach/memory_object.h>
75 /* For memory_object_data_{request,unlock} */
78 #include <kern/kern_types.h>
79 #include <kern/host_statistics.h>
80 #include <kern/counters.h>
81 #include <kern/task.h>
82 #include <kern/thread.h>
83 #include <kern/sched_prim.h>
84 #include <kern/host.h>
85 #include <kern/mach_param.h>
86 #include <kern/macro_help.h>
87 #include <kern/zalloc.h>
88 #include <kern/misc_protos.h>
89 #include <kern/policy_internal.h>
91 #include <vm/vm_compressor.h>
92 #include <vm/vm_compressor_pager.h>
93 #include <vm/vm_fault.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_kern.h>
99 #include <vm/vm_pageout.h>
100 #include <vm/vm_protos.h>
101 #include <vm/vm_external.h>
102 #include <vm/memory_object.h>
103 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
104 #include <vm/vm_shared_region.h>
106 #include <sys/codesign.h>
107 #include <sys/reason.h>
108 #include <sys/signalvar.h>
110 #include <san/kasan.h>
112 #define VM_FAULT_CLASSIFY 0
114 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
116 int vm_protect_privileged_from_untrusted
= 1;
118 unsigned int vm_object_pagein_throttle
= 16;
121 * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
122 * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts
123 * of memory if they're buggy and can run the system completely out of swap space. If this happens, we
124 * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps
125 * keep the UI active so that the user has a chance to kill the offending task before the system
128 * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
129 * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold
130 * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a
131 * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
134 extern void throttle_lowpri_io(int);
136 extern struct vnode
*vnode_pager_lookup_vnode(memory_object_t
);
138 uint64_t vm_hard_throttle_threshold
;
143 NEED_TO_HARD_THROTTLE_THIS_TASK(void)
145 return vm_wants_task_throttled(current_task()) ||
146 ((vm_page_free_count
< vm_page_throttle_limit
||
147 HARD_THROTTLE_LIMIT_REACHED()) &&
148 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
) >= THROTTLE_LEVEL_THROTTLED
);
151 #define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */
152 #define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */
154 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6
155 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000
158 #define VM_STAT_DECOMPRESSIONS() \
160 VM_STAT_INCR(decompressions); \
161 current_thread()->decompressions++; \
164 boolean_t
current_thread_aborted(void);
166 /* Forward declarations of internal routines. */
167 static kern_return_t
vm_fault_wire_fast(
172 vm_map_entry_t entry
,
174 vm_map_offset_t pmap_addr
,
175 ppnum_t
*physpage_p
);
177 static kern_return_t
vm_fault_internal(
179 vm_map_offset_t vaddr
,
180 vm_prot_t caller_prot
,
181 boolean_t change_wiring
,
185 vm_map_offset_t pmap_addr
,
186 ppnum_t
*physpage_p
);
188 static void vm_fault_copy_cleanup(
192 static void vm_fault_copy_dst_cleanup(
195 #if VM_FAULT_CLASSIFY
196 extern void vm_fault_classify(vm_object_t object
,
197 vm_object_offset_t offset
,
198 vm_prot_t fault_type
);
200 extern void vm_fault_classify_init(void);
203 unsigned long vm_pmap_enter_blocked
= 0;
204 unsigned long vm_pmap_enter_retried
= 0;
206 unsigned long vm_cs_validates
= 0;
207 unsigned long vm_cs_revalidates
= 0;
208 unsigned long vm_cs_query_modified
= 0;
209 unsigned long vm_cs_validated_dirtied
= 0;
210 unsigned long vm_cs_bitmap_validated
= 0;
212 uint64_t vm_cs_defer_to_pmap_cs
= 0;
213 uint64_t vm_cs_defer_to_pmap_cs_not
= 0;
216 void vm_pre_fault(vm_map_offset_t
, vm_prot_t
);
218 extern char *kdp_compressor_decompressed_page
;
219 extern addr64_t kdp_compressor_decompressed_page_paddr
;
220 extern ppnum_t kdp_compressor_decompressed_page_ppnum
;
226 vm_rtfault_record_t
*vm_rtf_records
;
228 #define VMRTF_DEFAULT_BUFSIZE (4096)
229 #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
230 int vmrtf_num_records
= VMRTF_NUM_RECORDS_DEFAULT
;
232 static void vm_rtfrecord_lock(void);
233 static void vm_rtfrecord_unlock(void);
234 static void vm_record_rtfault(thread_t
, uint64_t, vm_map_offset_t
, int);
236 lck_spin_t vm_rtfr_slock
;
237 extern lck_grp_t vm_page_lck_grp_bucket
;
238 extern lck_attr_t vm_page_lck_attr
;
241 * Routine: vm_fault_init
243 * Initialize our private data structures.
248 int i
, vm_compressor_temp
;
249 boolean_t need_default_val
= TRUE
;
251 * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is
252 * computed as a percentage of available memory, and the percentage used is scaled inversely with
253 * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems
254 * and reduce the value down to 10% for very large memory configurations. This helps give us a
255 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
256 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
259 vm_hard_throttle_threshold
= sane_size
* (35 - MIN((int)(sane_size
/ (1024 * 1024 * 1024)), 25)) / 100;
262 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
265 if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp
, sizeof(vm_compressor_temp
))) {
266 for (i
= 0; i
< VM_PAGER_MAX_MODES
; i
++) {
267 if (vm_compressor_temp
> 0 &&
268 ((vm_compressor_temp
& (1 << i
)) == vm_compressor_temp
)) {
269 need_default_val
= FALSE
;
270 vm_compressor_mode
= vm_compressor_temp
;
274 if (need_default_val
) {
275 printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp
);
278 if (need_default_val
) {
279 /* If no boot arg or incorrect boot arg, try device tree. */
280 PE_get_default("kern.vm_compressor", &vm_compressor_mode
, sizeof(vm_compressor_mode
));
282 printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode
);
284 PE_parse_boot_argn("vm_protect_privileged_from_untrusted", &vm_protect_privileged_from_untrusted
, sizeof(vm_protect_privileged_from_untrusted
));
288 vm_rtfault_record_init(void)
290 PE_parse_boot_argn("vm_rtfault_records", &vmrtf_num_records
, sizeof(vmrtf_num_records
));
292 assert(vmrtf_num_records
>= 1);
293 vmrtf_num_records
= MAX(vmrtf_num_records
, 1);
294 size_t kallocsz
= vmrtf_num_records
* sizeof(vm_rtfault_record_t
);
295 vmrtfrs
.vm_rtf_records
= kalloc(kallocsz
);
296 bzero(vmrtfrs
.vm_rtf_records
, kallocsz
);
297 vmrtfrs
.vmrtfr_maxi
= vmrtf_num_records
- 1;
298 lck_spin_init(&vm_rtfr_slock
, &vm_page_lck_grp_bucket
, &vm_page_lck_attr
);
301 * Routine: vm_fault_cleanup
303 * Clean up the result of vm_fault_page.
305 * The paging reference for "object" is released.
306 * "object" is unlocked.
307 * If "top_page" is not null, "top_page" is
308 * freed and the paging reference for the object
309 * containing it is released.
312 * "object" must be locked.
319 vm_object_paging_end(object
);
320 vm_object_unlock(object
);
322 if (top_page
!= VM_PAGE_NULL
) {
323 object
= VM_PAGE_OBJECT(top_page
);
325 vm_object_lock(object
);
326 VM_PAGE_FREE(top_page
);
327 vm_object_paging_end(object
);
328 vm_object_unlock(object
);
332 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
335 boolean_t vm_page_deactivate_behind
= TRUE
;
337 * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
339 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128
340 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */
341 /* we use it to size an array on the stack */
343 int vm_default_behind
= VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW
;
345 #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
348 * vm_page_is_sequential
350 * Determine if sequential access is in progress
351 * in accordance with the behavior specified.
352 * Update state to indicate current access pattern.
354 * object must have at least the shared lock held
358 vm_fault_is_sequential(
360 vm_object_offset_t offset
,
361 vm_behavior_t behavior
)
363 vm_object_offset_t last_alloc
;
367 last_alloc
= object
->last_alloc
;
368 sequential
= object
->sequential
;
369 orig_sequential
= sequential
;
372 case VM_BEHAVIOR_RANDOM
:
374 * reset indicator of sequential behavior
379 case VM_BEHAVIOR_SEQUENTIAL
:
380 if (offset
&& last_alloc
== offset
- PAGE_SIZE_64
) {
382 * advance indicator of sequential behavior
384 if (sequential
< MAX_SEQUENTIAL_RUN
) {
385 sequential
+= PAGE_SIZE
;
389 * reset indicator of sequential behavior
395 case VM_BEHAVIOR_RSEQNTL
:
396 if (last_alloc
&& last_alloc
== offset
+ PAGE_SIZE_64
) {
398 * advance indicator of sequential behavior
400 if (sequential
> -MAX_SEQUENTIAL_RUN
) {
401 sequential
-= PAGE_SIZE
;
405 * reset indicator of sequential behavior
411 case VM_BEHAVIOR_DEFAULT
:
413 if (offset
&& last_alloc
== (offset
- PAGE_SIZE_64
)) {
415 * advance indicator of sequential behavior
417 if (sequential
< 0) {
420 if (sequential
< MAX_SEQUENTIAL_RUN
) {
421 sequential
+= PAGE_SIZE
;
423 } else if (last_alloc
&& last_alloc
== (offset
+ PAGE_SIZE_64
)) {
425 * advance indicator of sequential behavior
427 if (sequential
> 0) {
430 if (sequential
> -MAX_SEQUENTIAL_RUN
) {
431 sequential
-= PAGE_SIZE
;
435 * reset indicator of sequential behavior
441 if (sequential
!= orig_sequential
) {
442 if (!OSCompareAndSwap(orig_sequential
, sequential
, (UInt32
*)&object
->sequential
)) {
444 * if someone else has already updated object->sequential
445 * don't bother trying to update it or object->last_alloc
451 * I'd like to do this with a OSCompareAndSwap64, but that
452 * doesn't exist for PPC... however, it shouldn't matter
453 * that much... last_alloc is maintained so that we can determine
454 * if a sequential access pattern is taking place... if only
455 * one thread is banging on this object, no problem with the unprotected
456 * update... if 2 or more threads are banging away, we run the risk of
457 * someone seeing a mangled update... however, in the face of multiple
458 * accesses, no sequential access pattern can develop anyway, so we
459 * haven't lost any real info.
461 object
->last_alloc
= offset
;
465 int vm_page_deactivate_behind_count
= 0;
468 * vm_page_deactivate_behind
470 * Determine if sequential access is in progress
471 * in accordance with the behavior specified. If
472 * so, compute a potential page to deactivate and
475 * object must be locked.
477 * return TRUE if we actually deactivate a page
481 vm_fault_deactivate_behind(
483 vm_object_offset_t offset
,
484 vm_behavior_t behavior
)
487 int pages_in_run
= 0;
488 int max_pages_in_run
= 0;
490 int sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
491 vm_object_offset_t run_offset
= 0;
492 vm_object_offset_t pg_offset
= 0;
494 vm_page_t page_run
[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
];
498 dbgTrace(0xBEEF0018, (unsigned int) object
, (unsigned int) vm_fault_deactivate_behind
); /* (TEST/DEBUG) */
501 if (object
== kernel_object
|| vm_page_deactivate_behind
== FALSE
) {
503 * Do not deactivate pages from the kernel object: they
504 * are not intended to become pageable.
505 * or we've disabled the deactivate behind mechanism
509 if ((sequential_run
= object
->sequential
)) {
510 if (sequential_run
< 0) {
511 sequential_behavior
= VM_BEHAVIOR_RSEQNTL
;
512 sequential_run
= 0 - sequential_run
;
514 sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
518 case VM_BEHAVIOR_RANDOM
:
520 case VM_BEHAVIOR_SEQUENTIAL
:
521 if (sequential_run
>= (int)PAGE_SIZE
) {
522 run_offset
= 0 - PAGE_SIZE_64
;
523 max_pages_in_run
= 1;
526 case VM_BEHAVIOR_RSEQNTL
:
527 if (sequential_run
>= (int)PAGE_SIZE
) {
528 run_offset
= PAGE_SIZE_64
;
529 max_pages_in_run
= 1;
532 case VM_BEHAVIOR_DEFAULT
:
534 { vm_object_offset_t behind
= vm_default_behind
* PAGE_SIZE_64
;
537 * determine if the run of sequential accesss has been
538 * long enough on an object with default access behavior
539 * to consider it for deactivation
541 if ((uint64_t)sequential_run
>= behind
&& (sequential_run
% (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
* PAGE_SIZE
)) == 0) {
543 * the comparisons between offset and behind are done
544 * in this kind of odd fashion in order to prevent wrap around
547 if (sequential_behavior
== VM_BEHAVIOR_SEQUENTIAL
) {
548 if (offset
>= behind
) {
549 run_offset
= 0 - behind
;
550 pg_offset
= PAGE_SIZE_64
;
551 max_pages_in_run
= VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
;
554 if (offset
< -behind
) {
556 pg_offset
= 0 - PAGE_SIZE_64
;
557 max_pages_in_run
= VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
;
563 for (n
= 0; n
< max_pages_in_run
; n
++) {
564 m
= vm_page_lookup(object
, offset
+ run_offset
+ (n
* pg_offset
));
566 if (m
&& !m
->vmp_laundry
&& !m
->vmp_busy
&& !m
->vmp_no_cache
&& (m
->vmp_q_state
!= VM_PAGE_ON_THROTTLED_Q
) && !m
->vmp_fictitious
&& !m
->vmp_absent
) {
567 page_run
[pages_in_run
++] = m
;
570 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
572 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
573 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
574 * new reference happens. If no futher references happen on the page after that remote TLB flushes
575 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
576 * by pageout_scan, which is just fine since the last reference would have happened quite far
577 * in the past (TLB caches don't hang around for very long), and of course could just as easily
578 * have happened before we did the deactivate_behind.
580 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m
), VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
584 vm_page_lockspin_queues();
586 for (n
= 0; n
< pages_in_run
; n
++) {
589 vm_page_deactivate_internal(m
, FALSE
);
591 vm_page_deactivate_behind_count
++;
593 dbgTrace(0xBEEF0019, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
596 vm_page_unlock_queues();
604 #if (DEVELOPMENT || DEBUG)
605 uint32_t vm_page_creation_throttled_hard
= 0;
606 uint32_t vm_page_creation_throttled_soft
= 0;
607 uint64_t vm_page_creation_throttle_avoided
= 0;
608 #endif /* DEVELOPMENT || DEBUG */
611 vm_page_throttled(boolean_t page_kept
)
613 clock_sec_t elapsed_sec
;
615 clock_usec_t tv_usec
;
617 thread_t thread
= current_thread();
619 if (thread
->options
& TH_OPT_VMPRIV
) {
623 if (thread
->t_page_creation_throttled
) {
624 thread
->t_page_creation_throttled
= 0;
626 if (page_kept
== FALSE
) {
630 if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
631 #if (DEVELOPMENT || DEBUG)
632 thread
->t_page_creation_throttled_hard
++;
633 OSAddAtomic(1, &vm_page_creation_throttled_hard
);
634 #endif /* DEVELOPMENT || DEBUG */
635 return HARD_THROTTLE_DELAY
;
638 if ((vm_page_free_count
< vm_page_throttle_limit
|| (VM_CONFIG_COMPRESSOR_IS_PRESENT
&& SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
639 thread
->t_page_creation_count
> (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS
* VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC
)) {
640 if (vm_page_free_wanted
== 0 && vm_page_free_wanted_privileged
== 0) {
641 #if (DEVELOPMENT || DEBUG)
642 OSAddAtomic64(1, &vm_page_creation_throttle_avoided
);
646 clock_get_system_microtime(&tv_sec
, &tv_usec
);
648 elapsed_sec
= tv_sec
- thread
->t_page_creation_time
;
650 if (elapsed_sec
<= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS
||
651 (thread
->t_page_creation_count
/ elapsed_sec
) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC
) {
652 if (elapsed_sec
>= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS
)) {
654 * we'll reset our stats to give a well behaved app
655 * that was unlucky enough to accumulate a bunch of pages
656 * over a long period of time a chance to get out of
657 * the throttled state... we reset the counter and timestamp
658 * so that if it stays under the rate limit for the next second
659 * it will be back in our good graces... if it exceeds it, it
660 * will remain in the throttled state
662 thread
->t_page_creation_time
= tv_sec
;
663 thread
->t_page_creation_count
= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC
* (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS
- 1);
665 VM_PAGEOUT_DEBUG(vm_page_throttle_count
, 1);
667 thread
->t_page_creation_throttled
= 1;
669 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
&& HARD_THROTTLE_LIMIT_REACHED()) {
670 #if (DEVELOPMENT || DEBUG)
671 thread
->t_page_creation_throttled_hard
++;
672 OSAddAtomic(1, &vm_page_creation_throttled_hard
);
673 #endif /* DEVELOPMENT || DEBUG */
674 return HARD_THROTTLE_DELAY
;
676 #if (DEVELOPMENT || DEBUG)
677 thread
->t_page_creation_throttled_soft
++;
678 OSAddAtomic(1, &vm_page_creation_throttled_soft
);
679 #endif /* DEVELOPMENT || DEBUG */
680 return SOFT_THROTTLE_DELAY
;
683 thread
->t_page_creation_time
= tv_sec
;
684 thread
->t_page_creation_count
= 0;
687 thread
->t_page_creation_count
++;
694 * check for various conditions that would
695 * prevent us from creating a ZF page...
696 * cleanup is based on being called from vm_fault_page
698 * object must be locked
699 * object == m->vmp_object
701 static vm_fault_return_t
702 vm_fault_check(vm_object_t object
, vm_page_t m
, vm_page_t first_m
, wait_interrupt_t interruptible_state
, boolean_t page_throttle
)
706 if (object
->shadow_severed
||
707 VM_OBJECT_PURGEABLE_FAULT_ERROR(object
)) {
710 * 1. the shadow chain was severed,
711 * 2. the purgeable object is volatile or empty and is marked
712 * to fault on access while volatile.
713 * Just have to return an error at this point
715 if (m
!= VM_PAGE_NULL
) {
718 vm_fault_cleanup(object
, first_m
);
720 thread_interrupt_level(interruptible_state
);
722 return VM_FAULT_MEMORY_ERROR
;
724 if (page_throttle
== TRUE
) {
725 if ((throttle_delay
= vm_page_throttled(FALSE
))) {
727 * we're throttling zero-fills...
728 * treat this as if we couldn't grab a page
730 if (m
!= VM_PAGE_NULL
) {
733 vm_fault_cleanup(object
, first_m
);
735 VM_DEBUG_EVENT(vmf_check_zfdelay
, VMF_CHECK_ZFDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
737 delay(throttle_delay
);
739 if (current_thread_aborted()) {
740 thread_interrupt_level(interruptible_state
);
741 return VM_FAULT_INTERRUPTED
;
743 thread_interrupt_level(interruptible_state
);
745 return VM_FAULT_MEMORY_SHORTAGE
;
748 return VM_FAULT_SUCCESS
;
753 * do the work to zero fill a page and
754 * inject it into the correct paging queue
756 * m->vmp_object must be locked
757 * page queue lock must NOT be held
760 vm_fault_zero_page(vm_page_t m
, boolean_t no_zero_fill
)
762 int my_fault
= DBG_ZERO_FILL_FAULT
;
765 object
= VM_PAGE_OBJECT(m
);
768 * This is is a zero-fill page fault...
770 * Checking the page lock is a waste of
771 * time; this page was absent, so
772 * it can't be page locked by a pager.
774 * we also consider it undefined
775 * with respect to instruction
776 * execution. i.e. it is the responsibility
777 * of higher layers to call for an instruction
778 * sync after changing the contents and before
779 * sending a program into this area. We
780 * choose this approach for performance
782 m
->vmp_pmapped
= TRUE
;
784 m
->vmp_cs_validated
= FALSE
;
785 m
->vmp_cs_tainted
= FALSE
;
786 m
->vmp_cs_nx
= FALSE
;
788 if (no_zero_fill
== TRUE
) {
789 my_fault
= DBG_NZF_PAGE_FAULT
;
791 if (m
->vmp_absent
&& m
->vmp_busy
) {
795 vm_page_zero_fill(m
);
797 VM_STAT_INCR(zero_fill_count
);
798 DTRACE_VM2(zfod
, int, 1, (uint64_t *), NULL
);
800 assert(!m
->vmp_laundry
);
801 assert(object
!= kernel_object
);
802 //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
804 if (!VM_DYNAMIC_PAGING_ENABLED() &&
805 (object
->purgable
== VM_PURGABLE_DENY
||
806 object
->purgable
== VM_PURGABLE_NONVOLATILE
||
807 object
->purgable
== VM_PURGABLE_VOLATILE
)) {
808 vm_page_lockspin_queues();
810 if (!VM_DYNAMIC_PAGING_ENABLED()) {
811 assert(!VM_PAGE_WIRED(m
));
814 * can't be on the pageout queue since we don't
815 * have a pager to try and clean to
817 vm_page_queues_remove(m
, TRUE
);
818 vm_page_check_pageable_safe(m
);
819 vm_page_queue_enter(&vm_page_queue_throttled
, m
, vmp_pageq
);
820 m
->vmp_q_state
= VM_PAGE_ON_THROTTLED_Q
;
821 vm_page_throttled_count
++;
823 vm_page_unlock_queues();
830 * Routine: vm_fault_page
832 * Find the resident page for the virtual memory
833 * specified by the given virtual memory object
835 * Additional arguments:
836 * The required permissions for the page is given
837 * in "fault_type". Desired permissions are included
839 * fault_info is passed along to determine pagein cluster
840 * limits... it contains the expected reference pattern,
841 * cluster size if available, etc...
843 * If the desired page is known to be resident (for
844 * example, because it was previously wired down), asserting
845 * the "unwiring" parameter will speed the search.
847 * If the operation can be interrupted (by thread_abort
848 * or thread_terminate), then the "interruptible"
849 * parameter should be asserted.
852 * The page containing the proper data is returned
856 * The source object must be locked and referenced,
857 * and must donate one paging reference. The reference
858 * is not affected. The paging reference and lock are
861 * If the call succeeds, the object in which "result_page"
862 * resides is left locked and holding a paging reference.
863 * If this is not the original object, a busy page in the
864 * original object is returned in "top_page", to prevent other
865 * callers from pursuing this same data, along with a paging
866 * reference for the original object. The "top_page" should
867 * be destroyed when this guarantee is no longer required.
868 * The "result_page" is also left busy. It is not removed
869 * from the pageout queues.
871 * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
872 * fault succeeded but there's no VM page (i.e. the VM object
873 * does not actually hold VM pages, but device memory or
874 * large pages). The object is still locked and we still hold a
875 * paging_in_progress reference.
877 unsigned int vm_fault_page_blocked_access
= 0;
878 unsigned int vm_fault_page_forced_retry
= 0;
883 vm_object_t first_object
, /* Object to begin search */
884 vm_object_offset_t first_offset
, /* Offset into object */
885 vm_prot_t fault_type
, /* What access is requested */
886 boolean_t must_be_resident
,/* Must page be resident? */
887 boolean_t caller_lookup
, /* caller looked up page */
888 /* Modifies in place: */
889 vm_prot_t
*protection
, /* Protection for mapping */
890 vm_page_t
*result_page
, /* Page found, if successful */
892 vm_page_t
*top_page
, /* Page in top object, if
893 * not result_page. */
894 int *type_of_fault
, /* if non-null, fill in with type of fault
895 * COW, zero-fill, etc... returned in trace point */
896 /* More arguments: */
897 kern_return_t
*error_code
, /* code if page is in error */
898 boolean_t no_zero_fill
, /* don't zero fill absent pages */
899 boolean_t data_supply
, /* treat as data_supply if
900 * it is a write fault and a full
901 * page is provided */
902 vm_object_fault_info_t fault_info
)
906 vm_object_offset_t offset
;
908 vm_object_t next_object
;
909 vm_object_t copy_object
;
910 boolean_t look_for_page
;
911 boolean_t force_fault_retry
= FALSE
;
912 vm_prot_t access_required
= fault_type
;
913 vm_prot_t wants_copy_flag
;
914 kern_return_t wait_result
;
915 wait_interrupt_t interruptible_state
;
916 boolean_t data_already_requested
= FALSE
;
917 vm_behavior_t orig_behavior
;
918 vm_size_t orig_cluster_size
;
919 vm_fault_return_t error
;
921 uint32_t try_failed_count
;
922 int interruptible
; /* how may fault be interrupted? */
923 int external_state
= VM_EXTERNAL_STATE_UNKNOWN
;
924 memory_object_t pager
;
925 vm_fault_return_t retval
;
929 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
930 * marked as paged out in the compressor pager or the pager doesn't exist.
931 * Note also that if the pager for an internal object
932 * has not been created, the pager is not invoked regardless of the value
933 * of MUST_ASK_PAGER().
935 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
936 * is marked as paged out in the compressor pager.
937 * PAGED_OUT() is used to determine if a page has already been pushed
938 * into a copy object in order to avoid a redundant page out operation.
940 #define MUST_ASK_PAGER(o, f, s) \
941 ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
943 #define PAGED_OUT(o, f) \
944 (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
949 #define RELEASE_PAGE(m) \
951 PAGE_WAKEUP_DONE(m); \
952 if ( !VM_PAGE_PAGEABLE(m)) { \
953 vm_page_lockspin_queues(); \
954 if ( !VM_PAGE_PAGEABLE(m)) { \
955 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) \
956 vm_page_deactivate(m); \
958 vm_page_activate(m); \
960 vm_page_unlock_queues(); \
965 dbgTrace(0xBEEF0002, (unsigned int) first_object
, (unsigned int) first_offset
); /* (TEST/DEBUG) */
968 interruptible
= fault_info
->interruptible
;
969 interruptible_state
= thread_interrupt_level(interruptible
);
972 * INVARIANTS (through entire routine):
974 * 1) At all times, we must either have the object
975 * lock or a busy page in some object to prevent
976 * some other thread from trying to bring in
979 * Note that we cannot hold any locks during the
980 * pager access or when waiting for memory, so
981 * we use a busy page then.
983 * 2) To prevent another thread from racing us down the
984 * shadow chain and entering a new page in the top
985 * object before we do, we must keep a busy page in
986 * the top object while following the shadow chain.
988 * 3) We must increment paging_in_progress on any object
989 * for which we have a busy page before dropping
992 * 4) We leave busy pages on the pageout queues.
993 * If the pageout daemon comes across a busy page,
994 * it will remove the page from the pageout queues.
997 object
= first_object
;
998 offset
= first_offset
;
999 first_m
= VM_PAGE_NULL
;
1000 access_required
= fault_type
;
1003 * default type of fault
1005 my_fault
= DBG_CACHE_HIT_FAULT
;
1009 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1013 #if CONFIG_SECLUDED_MEMORY
1014 if (object
->can_grab_secluded
) {
1015 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
1017 #endif /* CONFIG_SECLUDED_MEMORY */
1019 if (!object
->alive
) {
1021 * object is no longer valid
1022 * clean up and return error
1024 vm_fault_cleanup(object
, first_m
);
1025 thread_interrupt_level(interruptible_state
);
1027 return VM_FAULT_MEMORY_ERROR
;
1030 if (!object
->pager_created
&& object
->phys_contiguous
) {
1032 * A physically-contiguous object without a pager:
1033 * must be a "large page" object. We do not deal
1034 * with VM pages for this object.
1036 caller_lookup
= FALSE
;
1038 goto phys_contig_object
;
1041 if (object
->blocked_access
) {
1043 * Access to this VM object has been blocked.
1044 * Replace our "paging_in_progress" reference with
1045 * a "activity_in_progress" reference and wait for
1046 * access to be unblocked.
1048 caller_lookup
= FALSE
; /* no longer valid after sleep */
1049 vm_object_activity_begin(object
);
1050 vm_object_paging_end(object
);
1051 while (object
->blocked_access
) {
1052 vm_object_sleep(object
,
1053 VM_OBJECT_EVENT_UNBLOCKED
,
1056 vm_fault_page_blocked_access
++;
1057 vm_object_paging_begin(object
);
1058 vm_object_activity_end(object
);
1062 * See whether the page at 'offset' is resident
1064 if (caller_lookup
== TRUE
) {
1066 * The caller has already looked up the page
1067 * and gave us the result in "result_page".
1068 * We can use this for the first lookup but
1069 * it loses its validity as soon as we unlock
1073 caller_lookup
= FALSE
; /* no longer valid after that */
1075 m
= vm_page_lookup(object
, offset
);
1078 dbgTrace(0xBEEF0004, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1080 if (m
!= VM_PAGE_NULL
) {
1083 * The page is being brought in,
1084 * wait for it and then retry.
1087 dbgTrace(0xBEEF0005, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1089 wait_result
= PAGE_SLEEP(object
, m
, interruptible
);
1091 counter(c_vm_fault_page_block_busy_kernel
++);
1093 if (wait_result
!= THREAD_AWAKENED
) {
1094 vm_fault_cleanup(object
, first_m
);
1095 thread_interrupt_level(interruptible_state
);
1097 if (wait_result
== THREAD_RESTART
) {
1098 return VM_FAULT_RETRY
;
1100 return VM_FAULT_INTERRUPTED
;
1105 if (m
->vmp_laundry
) {
1106 m
->vmp_free_when_done
= FALSE
;
1108 if (!m
->vmp_cleaning
) {
1109 vm_pageout_steal_laundry(m
, FALSE
);
1112 if (VM_PAGE_GET_PHYS_PAGE(m
) == vm_page_guard_addr
) {
1114 * Guard page: off limits !
1116 if (fault_type
== VM_PROT_NONE
) {
1118 * The fault is not requesting any
1119 * access to the guard page, so it must
1120 * be just to wire or unwire it.
1121 * Let's pretend it succeeded...
1125 assert(first_m
== VM_PAGE_NULL
);
1126 *top_page
= first_m
;
1127 if (type_of_fault
) {
1128 *type_of_fault
= DBG_GUARD_FAULT
;
1130 thread_interrupt_level(interruptible_state
);
1131 return VM_FAULT_SUCCESS
;
1134 * The fault requests access to the
1135 * guard page: let's deny that !
1137 vm_fault_cleanup(object
, first_m
);
1138 thread_interrupt_level(interruptible_state
);
1139 return VM_FAULT_MEMORY_ERROR
;
1145 * The page is in error, give up now.
1148 dbgTrace(0xBEEF0006, (unsigned int) m
, (unsigned int) error_code
); /* (TEST/DEBUG) */
1151 *error_code
= KERN_MEMORY_ERROR
;
1155 vm_fault_cleanup(object
, first_m
);
1156 thread_interrupt_level(interruptible_state
);
1158 return VM_FAULT_MEMORY_ERROR
;
1160 if (m
->vmp_restart
) {
1162 * The pager wants us to restart
1163 * at the top of the chain,
1164 * typically because it has moved the
1165 * page to another pager, then do so.
1168 dbgTrace(0xBEEF0007, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1172 vm_fault_cleanup(object
, first_m
);
1173 thread_interrupt_level(interruptible_state
);
1175 return VM_FAULT_RETRY
;
1177 if (m
->vmp_absent
) {
1179 * The page isn't busy, but is absent,
1180 * therefore it's deemed "unavailable".
1182 * Remove the non-existent page (unless it's
1183 * in the top object) and move on down to the
1184 * next object (if there is one).
1187 dbgTrace(0xBEEF0008, (unsigned int) m
, (unsigned int) object
->shadow
); /* (TEST/DEBUG) */
1189 next_object
= object
->shadow
;
1191 if (next_object
== VM_OBJECT_NULL
) {
1193 * Absent page at bottom of shadow
1194 * chain; zero fill the page we left
1195 * busy in the first object, and free
1198 assert(!must_be_resident
);
1201 * check for any conditions that prevent
1202 * us from creating a new zero-fill page
1203 * vm_fault_check will do all of the
1204 * fault cleanup in the case of an error condition
1205 * including resetting the thread_interrupt_level
1207 error
= vm_fault_check(object
, m
, first_m
, interruptible_state
, (type_of_fault
== NULL
) ? TRUE
: FALSE
);
1209 if (error
!= VM_FAULT_SUCCESS
) {
1213 if (object
!= first_object
) {
1215 * free the absent page we just found
1220 * drop reference and lock on current object
1222 vm_object_paging_end(object
);
1223 vm_object_unlock(object
);
1226 * grab the original page we
1227 * 'soldered' in place and
1228 * retake lock on 'first_object'
1231 first_m
= VM_PAGE_NULL
;
1233 object
= first_object
;
1234 offset
= first_offset
;
1236 vm_object_lock(object
);
1239 * we're going to use the absent page we just found
1240 * so convert it to a 'busy' page
1242 m
->vmp_absent
= FALSE
;
1245 if (fault_info
->mark_zf_absent
&& no_zero_fill
== TRUE
) {
1246 m
->vmp_absent
= TRUE
;
1249 * zero-fill the page and put it on
1250 * the correct paging queue
1252 my_fault
= vm_fault_zero_page(m
, no_zero_fill
);
1256 if (must_be_resident
) {
1257 vm_object_paging_end(object
);
1258 } else if (object
!= first_object
) {
1259 vm_object_paging_end(object
);
1263 m
->vmp_absent
= FALSE
;
1266 vm_page_lockspin_queues();
1267 vm_page_queues_remove(m
, FALSE
);
1268 vm_page_unlock_queues();
1271 offset
+= object
->vo_shadow_offset
;
1272 fault_info
->lo_offset
+= object
->vo_shadow_offset
;
1273 fault_info
->hi_offset
+= object
->vo_shadow_offset
;
1274 access_required
= VM_PROT_READ
;
1276 vm_object_lock(next_object
);
1277 vm_object_unlock(object
);
1278 object
= next_object
;
1279 vm_object_paging_begin(object
);
1282 * reset to default type of fault
1284 my_fault
= DBG_CACHE_HIT_FAULT
;
1289 if ((m
->vmp_cleaning
)
1290 && ((object
!= first_object
) || (object
->copy
!= VM_OBJECT_NULL
))
1291 && (fault_type
& VM_PROT_WRITE
)) {
1293 * This is a copy-on-write fault that will
1294 * cause us to revoke access to this page, but
1295 * this page is in the process of being cleaned
1296 * in a clustered pageout. We must wait until
1297 * the cleaning operation completes before
1298 * revoking access to the original page,
1299 * otherwise we might attempt to remove a
1303 dbgTrace(0xBEEF0009, (unsigned int) m
, (unsigned int) offset
); /* (TEST/DEBUG) */
1306 * take an extra ref so that object won't die
1308 vm_object_reference_locked(object
);
1310 vm_fault_cleanup(object
, first_m
);
1312 counter(c_vm_fault_page_block_backoff_kernel
++);
1313 vm_object_lock(object
);
1314 assert(object
->ref_count
> 0);
1316 m
= vm_page_lookup(object
, offset
);
1318 if (m
!= VM_PAGE_NULL
&& m
->vmp_cleaning
) {
1319 PAGE_ASSERT_WAIT(m
, interruptible
);
1321 vm_object_unlock(object
);
1322 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1323 vm_object_deallocate(object
);
1327 vm_object_unlock(object
);
1329 vm_object_deallocate(object
);
1330 thread_interrupt_level(interruptible_state
);
1332 return VM_FAULT_RETRY
;
1335 if (type_of_fault
== NULL
&& (m
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) &&
1336 !(fault_info
!= NULL
&& fault_info
->stealth
)) {
1338 * If we were passed a non-NULL pointer for
1339 * "type_of_fault", than we came from
1340 * vm_fault... we'll let it deal with
1341 * this condition, since it
1342 * needs to see m->vmp_speculative to correctly
1343 * account the pageins, otherwise...
1344 * take it off the speculative queue, we'll
1345 * let the caller of vm_fault_page deal
1346 * with getting it onto the correct queue
1348 * If the caller specified in fault_info that
1349 * it wants a "stealth" fault, we also leave
1350 * the page in the speculative queue.
1352 vm_page_lockspin_queues();
1353 if (m
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
1354 vm_page_queues_remove(m
, FALSE
);
1356 vm_page_unlock_queues();
1358 assert(object
== VM_PAGE_OBJECT(m
));
1360 if (object
->code_signed
) {
1363 * We just paged in a page from a signed
1364 * memory object but we don't need to
1365 * validate it now. We'll validate it if
1366 * when it gets mapped into a user address
1367 * space for the first time or when the page
1368 * gets copied to another object as a result
1369 * of a copy-on-write.
1374 * We mark the page busy and leave it on
1375 * the pageout queues. If the pageout
1376 * deamon comes across it, then it will
1377 * remove the page from the queue, but not the object
1380 dbgTrace(0xBEEF000B, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1382 assert(!m
->vmp_busy
);
1383 assert(!m
->vmp_absent
);
1391 * we get here when there is no page present in the object at
1392 * the offset we're interested in... we'll allocate a page
1393 * at this point if the pager associated with
1394 * this object can provide the data or we're the top object...
1395 * object is locked; m == NULL
1398 if (must_be_resident
) {
1399 if (fault_type
== VM_PROT_NONE
&&
1400 object
== kernel_object
) {
1402 * We've been called from vm_fault_unwire()
1403 * while removing a map entry that was allocated
1404 * with KMA_KOBJECT and KMA_VAONLY. This page
1405 * is not present and there's nothing more to
1406 * do here (nothing to unwire).
1408 vm_fault_cleanup(object
, first_m
);
1409 thread_interrupt_level(interruptible_state
);
1411 return VM_FAULT_MEMORY_ERROR
;
1414 goto dont_look_for_page
;
1417 /* Don't expect to fault pages into the kernel object. */
1418 assert(object
!= kernel_object
);
1420 data_supply
= FALSE
;
1422 look_for_page
= (object
->pager_created
&& (MUST_ASK_PAGER(object
, offset
, external_state
) == TRUE
) && !data_supply
);
1425 dbgTrace(0xBEEF000C, (unsigned int) look_for_page
, (unsigned int) object
); /* (TEST/DEBUG) */
1427 if (!look_for_page
&& object
== first_object
&& !object
->phys_contiguous
) {
1429 * Allocate a new page for this object/offset pair as a placeholder
1431 m
= vm_page_grab_options(grab_options
);
1433 dbgTrace(0xBEEF000D, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1435 if (m
== VM_PAGE_NULL
) {
1436 vm_fault_cleanup(object
, first_m
);
1437 thread_interrupt_level(interruptible_state
);
1439 return VM_FAULT_MEMORY_SHORTAGE
;
1442 if (fault_info
&& fault_info
->batch_pmap_op
== TRUE
) {
1443 vm_page_insert_internal(m
, object
, offset
, VM_KERN_MEMORY_NONE
, FALSE
, TRUE
, TRUE
, FALSE
, NULL
);
1445 vm_page_insert(m
, object
, offset
);
1448 if (look_for_page
) {
1453 * If the memory manager is not ready, we
1454 * cannot make requests.
1456 if (!object
->pager_ready
) {
1458 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1460 if (m
!= VM_PAGE_NULL
) {
1465 * take an extra ref so object won't die
1467 vm_object_reference_locked(object
);
1468 vm_fault_cleanup(object
, first_m
);
1469 counter(c_vm_fault_page_block_backoff_kernel
++);
1471 vm_object_lock(object
);
1472 assert(object
->ref_count
> 0);
1474 if (!object
->pager_ready
) {
1475 wait_result
= vm_object_assert_wait(object
, VM_OBJECT_EVENT_PAGER_READY
, interruptible
);
1477 vm_object_unlock(object
);
1478 if (wait_result
== THREAD_WAITING
) {
1479 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1481 vm_object_deallocate(object
);
1485 vm_object_unlock(object
);
1486 vm_object_deallocate(object
);
1487 thread_interrupt_level(interruptible_state
);
1489 return VM_FAULT_RETRY
;
1492 if (!object
->internal
&& !object
->phys_contiguous
&& object
->paging_in_progress
> vm_object_pagein_throttle
) {
1494 * If there are too many outstanding page
1495 * requests pending on this external object, we
1496 * wait for them to be resolved now.
1499 dbgTrace(0xBEEF0010, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1501 if (m
!= VM_PAGE_NULL
) {
1505 * take an extra ref so object won't die
1507 vm_object_reference_locked(object
);
1509 vm_fault_cleanup(object
, first_m
);
1511 counter(c_vm_fault_page_block_backoff_kernel
++);
1513 vm_object_lock(object
);
1514 assert(object
->ref_count
> 0);
1516 if (object
->paging_in_progress
>= vm_object_pagein_throttle
) {
1517 vm_object_assert_wait(object
, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS
, interruptible
);
1519 vm_object_unlock(object
);
1520 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1521 vm_object_deallocate(object
);
1525 vm_object_unlock(object
);
1526 vm_object_deallocate(object
);
1527 thread_interrupt_level(interruptible_state
);
1529 return VM_FAULT_RETRY
;
1532 if (object
->internal
) {
1533 int compressed_count_delta
;
1535 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
1537 if (m
== VM_PAGE_NULL
) {
1539 * Allocate a new page for this object/offset pair as a placeholder
1541 m
= vm_page_grab_options(grab_options
);
1543 dbgTrace(0xBEEF000D, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1545 if (m
== VM_PAGE_NULL
) {
1546 vm_fault_cleanup(object
, first_m
);
1547 thread_interrupt_level(interruptible_state
);
1549 return VM_FAULT_MEMORY_SHORTAGE
;
1552 m
->vmp_absent
= TRUE
;
1553 if (fault_info
&& fault_info
->batch_pmap_op
== TRUE
) {
1554 vm_page_insert_internal(m
, object
, offset
, VM_KERN_MEMORY_NONE
, FALSE
, TRUE
, TRUE
, FALSE
, NULL
);
1556 vm_page_insert(m
, object
, offset
);
1559 assert(m
->vmp_busy
);
1561 m
->vmp_absent
= TRUE
;
1562 pager
= object
->pager
;
1564 assert(object
->paging_in_progress
> 0);
1565 vm_object_unlock(object
);
1567 rc
= vm_compressor_pager_get(
1569 offset
+ object
->paging_offset
,
1570 VM_PAGE_GET_PHYS_PAGE(m
),
1573 &compressed_count_delta
);
1575 if (type_of_fault
== NULL
) {
1579 * we weren't called from vm_fault, so we
1580 * need to apply page creation throttling
1581 * do it before we re-acquire any locks
1583 if (my_fault_type
== DBG_COMPRESSOR_FAULT
) {
1584 if ((throttle_delay
= vm_page_throttled(TRUE
))) {
1585 VM_DEBUG_EVENT(vmf_compressordelay
, VMF_COMPRESSORDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 1, 0);
1586 delay(throttle_delay
);
1590 vm_object_lock(object
);
1591 assert(object
->paging_in_progress
> 0);
1593 vm_compressor_pager_count(
1595 compressed_count_delta
,
1596 FALSE
, /* shared_lock */
1601 m
->vmp_absent
= FALSE
;
1602 m
->vmp_dirty
= TRUE
;
1603 if ((object
->wimg_bits
&
1605 VM_WIMG_USE_DEFAULT
) {
1607 * If the page is not cacheable,
1608 * we can't let its contents
1609 * linger in the data cache
1610 * after the decompression.
1612 pmap_sync_page_attributes_phys(
1613 VM_PAGE_GET_PHYS_PAGE(m
));
1615 m
->vmp_written_by_kernel
= TRUE
;
1619 * If the object is purgeable, its
1620 * owner's purgeable ledgers have been
1621 * updated in vm_page_insert() but the
1622 * page was also accounted for in a
1623 * "compressed purgeable" ledger, so
1626 if (((object
->purgable
!=
1627 VM_PURGABLE_DENY
) ||
1628 object
->vo_ledger_tag
) &&
1629 (object
->vo_owner
!=
1632 * One less compressed
1633 * purgeable/tagged page.
1635 vm_object_owner_compressed_update(
1641 case KERN_MEMORY_FAILURE
:
1642 m
->vmp_unusual
= TRUE
;
1643 m
->vmp_error
= TRUE
;
1644 m
->vmp_absent
= FALSE
;
1646 case KERN_MEMORY_ERROR
:
1647 assert(m
->vmp_absent
);
1650 panic("vm_fault_page(): unexpected "
1652 "vm_compressor_pager_get()\n",
1655 PAGE_WAKEUP_DONE(m
);
1658 goto data_requested
;
1660 my_fault_type
= DBG_PAGEIN_FAULT
;
1662 if (m
!= VM_PAGE_NULL
) {
1668 dbgTrace(0xBEEF0012, (unsigned int) object
, (unsigned int) 0); /* (TEST/DEBUG) */
1672 * It's possible someone called vm_object_destroy while we weren't
1673 * holding the object lock. If that has happened, then bail out
1677 pager
= object
->pager
;
1679 if (pager
== MEMORY_OBJECT_NULL
) {
1680 vm_fault_cleanup(object
, first_m
);
1681 thread_interrupt_level(interruptible_state
);
1682 return VM_FAULT_MEMORY_ERROR
;
1686 * We have an absent page in place for the faulting offset,
1687 * so we can release the object lock.
1690 if (object
->object_is_shared_cache
) {
1691 set_thread_rwlock_boost();
1694 vm_object_unlock(object
);
1697 * If this object uses a copy_call strategy,
1698 * and we are interested in a copy of this object
1699 * (having gotten here only by following a
1700 * shadow chain), then tell the memory manager
1701 * via a flag added to the desired_access
1702 * parameter, so that it can detect a race
1703 * between our walking down the shadow chain
1704 * and its pushing pages up into a copy of
1705 * the object that it manages.
1707 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_CALL
&& object
!= first_object
) {
1708 wants_copy_flag
= VM_PROT_WANTS_COPY
;
1710 wants_copy_flag
= VM_PROT_NONE
;
1713 if (object
->copy
== first_object
) {
1715 * if we issue the memory_object_data_request in
1716 * this state, we are subject to a deadlock with
1717 * the underlying filesystem if it is trying to
1718 * shrink the file resulting in a push of pages
1719 * into the copy object... that push will stall
1720 * on the placeholder page, and if the pushing thread
1721 * is holding a lock that is required on the pagein
1722 * path (such as a truncate lock), we'll deadlock...
1723 * to avoid this potential deadlock, we throw away
1724 * our placeholder page before calling memory_object_data_request
1725 * and force this thread to retry the vm_fault_page after
1726 * we have issued the I/O. the second time through this path
1727 * we will find the page already in the cache (presumably still
1728 * busy waiting for the I/O to complete) and then complete
1729 * the fault w/o having to go through memory_object_data_request again
1731 assert(first_m
!= VM_PAGE_NULL
);
1732 assert(VM_PAGE_OBJECT(first_m
) == first_object
);
1734 vm_object_lock(first_object
);
1735 VM_PAGE_FREE(first_m
);
1736 vm_object_paging_end(first_object
);
1737 vm_object_unlock(first_object
);
1739 first_m
= VM_PAGE_NULL
;
1740 force_fault_retry
= TRUE
;
1742 vm_fault_page_forced_retry
++;
1745 if (data_already_requested
== TRUE
) {
1746 orig_behavior
= fault_info
->behavior
;
1747 orig_cluster_size
= fault_info
->cluster_size
;
1749 fault_info
->behavior
= VM_BEHAVIOR_RANDOM
;
1750 fault_info
->cluster_size
= PAGE_SIZE
;
1753 * Call the memory manager to retrieve the data.
1755 rc
= memory_object_data_request(
1757 offset
+ object
->paging_offset
,
1759 access_required
| wants_copy_flag
,
1760 (memory_object_fault_info_t
)fault_info
);
1762 if (data_already_requested
== TRUE
) {
1763 fault_info
->behavior
= orig_behavior
;
1764 fault_info
->cluster_size
= orig_cluster_size
;
1766 data_already_requested
= TRUE
;
1769 DTRACE_VM2(maj_fault
, int, 1, (uint64_t *), NULL
);
1771 dbgTrace(0xBEEF0013, (unsigned int) object
, (unsigned int) rc
); /* (TEST/DEBUG) */
1773 vm_object_lock(object
);
1775 if (object
->object_is_shared_cache
) {
1776 clear_thread_rwlock_boost();
1780 if (rc
!= KERN_SUCCESS
) {
1781 vm_fault_cleanup(object
, first_m
);
1782 thread_interrupt_level(interruptible_state
);
1784 return (rc
== MACH_SEND_INTERRUPTED
) ?
1785 VM_FAULT_INTERRUPTED
:
1786 VM_FAULT_MEMORY_ERROR
;
1789 clock_usec_t tv_usec
;
1791 if (my_fault_type
== DBG_PAGEIN_FAULT
) {
1792 clock_get_system_microtime(&tv_sec
, &tv_usec
);
1793 current_thread()->t_page_creation_time
= tv_sec
;
1794 current_thread()->t_page_creation_count
= 0;
1797 if ((interruptible
!= THREAD_UNINT
) && (current_thread()->sched_flags
& TH_SFLAG_ABORT
)) {
1798 vm_fault_cleanup(object
, first_m
);
1799 thread_interrupt_level(interruptible_state
);
1801 return VM_FAULT_INTERRUPTED
;
1803 if (force_fault_retry
== TRUE
) {
1804 vm_fault_cleanup(object
, first_m
);
1805 thread_interrupt_level(interruptible_state
);
1807 return VM_FAULT_RETRY
;
1809 if (m
== VM_PAGE_NULL
&& object
->phys_contiguous
) {
1811 * No page here means that the object we
1812 * initially looked up was "physically
1813 * contiguous" (i.e. device memory). However,
1814 * with Virtual VRAM, the object might not
1815 * be backed by that device memory anymore,
1816 * so we're done here only if the object is
1817 * still "phys_contiguous".
1818 * Otherwise, if the object is no longer
1819 * "phys_contiguous", we need to retry the
1820 * page fault against the object's new backing
1821 * store (different memory object).
1827 * potentially a pagein fault
1828 * if we make it through the state checks
1829 * above, than we'll count it as such
1831 my_fault
= my_fault_type
;
1834 * Retry with same object/offset, since new data may
1835 * be in a different page (i.e., m is meaningless at
1842 * We get here if the object has no pager, or an existence map
1843 * exists and indicates the page isn't present on the pager
1844 * or we're unwiring a page. If a pager exists, but there
1845 * is no existence map, then the m->vmp_absent case above handles
1846 * the ZF case when the pager can't provide the page
1849 dbgTrace(0xBEEF0014, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1851 if (object
== first_object
) {
1854 assert(m
== VM_PAGE_NULL
);
1857 next_object
= object
->shadow
;
1859 if (next_object
== VM_OBJECT_NULL
) {
1861 * we've hit the bottom of the shadown chain,
1862 * fill the page in the top object with zeros.
1864 assert(!must_be_resident
);
1866 if (object
!= first_object
) {
1867 vm_object_paging_end(object
);
1868 vm_object_unlock(object
);
1870 object
= first_object
;
1871 offset
= first_offset
;
1872 vm_object_lock(object
);
1875 assert(VM_PAGE_OBJECT(m
) == object
);
1876 first_m
= VM_PAGE_NULL
;
1879 * check for any conditions that prevent
1880 * us from creating a new zero-fill page
1881 * vm_fault_check will do all of the
1882 * fault cleanup in the case of an error condition
1883 * including resetting the thread_interrupt_level
1885 error
= vm_fault_check(object
, m
, first_m
, interruptible_state
, (type_of_fault
== NULL
) ? TRUE
: FALSE
);
1887 if (error
!= VM_FAULT_SUCCESS
) {
1891 if (m
== VM_PAGE_NULL
) {
1892 m
= vm_page_grab_options(grab_options
);
1894 if (m
== VM_PAGE_NULL
) {
1895 vm_fault_cleanup(object
, VM_PAGE_NULL
);
1896 thread_interrupt_level(interruptible_state
);
1898 return VM_FAULT_MEMORY_SHORTAGE
;
1900 vm_page_insert(m
, object
, offset
);
1902 if (fault_info
->mark_zf_absent
&& no_zero_fill
== TRUE
) {
1903 m
->vmp_absent
= TRUE
;
1906 my_fault
= vm_fault_zero_page(m
, no_zero_fill
);
1911 * Move on to the next object. Lock the next
1912 * object before unlocking the current one.
1914 if ((object
!= first_object
) || must_be_resident
) {
1915 vm_object_paging_end(object
);
1918 offset
+= object
->vo_shadow_offset
;
1919 fault_info
->lo_offset
+= object
->vo_shadow_offset
;
1920 fault_info
->hi_offset
+= object
->vo_shadow_offset
;
1921 access_required
= VM_PROT_READ
;
1923 vm_object_lock(next_object
);
1924 vm_object_unlock(object
);
1926 object
= next_object
;
1927 vm_object_paging_begin(object
);
1932 * PAGE HAS BEEN FOUND.
1935 * busy, so that we can play with it;
1936 * not absent, so that nobody else will fill it;
1937 * possibly eligible for pageout;
1939 * The top-level page (first_m) is:
1940 * VM_PAGE_NULL if the page was found in the
1942 * busy, not absent, and ineligible for pageout.
1944 * The current object (object) is locked. A paging
1945 * reference is held for the current and top-level
1950 dbgTrace(0xBEEF0015, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1952 #if EXTRA_ASSERTIONS
1953 assert(m
->vmp_busy
&& !m
->vmp_absent
);
1954 assert((first_m
== VM_PAGE_NULL
) ||
1955 (first_m
->vmp_busy
&& !first_m
->vmp_absent
&&
1956 !first_m
->vmp_active
&& !first_m
->vmp_inactive
&& !first_m
->vmp_secluded
));
1957 #endif /* EXTRA_ASSERTIONS */
1960 * If the page is being written, but isn't
1961 * already owned by the top-level object,
1962 * we have to copy it into a new page owned
1963 * by the top-level object.
1965 if (object
!= first_object
) {
1967 dbgTrace(0xBEEF0016, (unsigned int) object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
1969 if (fault_type
& VM_PROT_WRITE
) {
1973 * We only really need to copy if we
1976 assert(!must_be_resident
);
1979 * If we try to collapse first_object at this
1980 * point, we may deadlock when we try to get
1981 * the lock on an intermediate object (since we
1982 * have the bottom object locked). We can't
1983 * unlock the bottom object, because the page
1984 * we found may move (by collapse) if we do.
1986 * Instead, we first copy the page. Then, when
1987 * we have no more use for the bottom object,
1988 * we unlock it and try to collapse.
1990 * Note that we copy the page even if we didn't
1991 * need to... that's the breaks.
1995 * Allocate a page for the copy
1997 copy_m
= vm_page_grab_options(grab_options
);
1999 if (copy_m
== VM_PAGE_NULL
) {
2002 vm_fault_cleanup(object
, first_m
);
2003 thread_interrupt_level(interruptible_state
);
2005 return VM_FAULT_MEMORY_SHORTAGE
;
2008 vm_page_copy(m
, copy_m
);
2011 * If another map is truly sharing this
2012 * page with us, we have to flush all
2013 * uses of the original page, since we
2014 * can't distinguish those which want the
2015 * original from those which need the
2018 * XXXO If we know that only one map has
2019 * access to this page, then we could
2020 * avoid the pmap_disconnect() call.
2022 if (m
->vmp_pmapped
) {
2023 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
2026 if (m
->vmp_clustered
) {
2027 VM_PAGE_COUNT_AS_PAGEIN(m
);
2028 VM_PAGE_CONSUME_CLUSTERED(m
);
2030 assert(!m
->vmp_cleaning
);
2033 * We no longer need the old page or object.
2038 * This check helps with marking the object as having a sequential pattern
2039 * Normally we'll miss doing this below because this fault is about COW to
2040 * the first_object i.e. bring page in from disk, push to object above but
2041 * don't update the file object's sequential pattern.
2043 if (object
->internal
== FALSE
) {
2044 vm_fault_is_sequential(object
, offset
, fault_info
->behavior
);
2047 vm_object_paging_end(object
);
2048 vm_object_unlock(object
);
2050 my_fault
= DBG_COW_FAULT
;
2051 VM_STAT_INCR(cow_faults
);
2052 DTRACE_VM2(cow_fault
, int, 1, (uint64_t *), NULL
);
2053 current_task()->cow_faults
++;
2055 object
= first_object
;
2056 offset
= first_offset
;
2058 vm_object_lock(object
);
2060 * get rid of the place holder
2061 * page that we soldered in earlier
2063 VM_PAGE_FREE(first_m
);
2064 first_m
= VM_PAGE_NULL
;
2067 * and replace it with the
2068 * page we just copied into
2070 assert(copy_m
->vmp_busy
);
2071 vm_page_insert(copy_m
, object
, offset
);
2072 SET_PAGE_DIRTY(copy_m
, TRUE
);
2076 * Now that we've gotten the copy out of the
2077 * way, let's try to collapse the top object.
2078 * But we have to play ugly games with
2079 * paging_in_progress to do that...
2081 vm_object_paging_end(object
);
2082 vm_object_collapse(object
, offset
, TRUE
);
2083 vm_object_paging_begin(object
);
2085 *protection
&= (~VM_PROT_WRITE
);
2089 * Now check whether the page needs to be pushed into the
2090 * copy object. The use of asymmetric copy on write for
2091 * shared temporary objects means that we may do two copies to
2092 * satisfy the fault; one above to get the page from a
2093 * shadowed object, and one here to push it into the copy.
2095 try_failed_count
= 0;
2097 while ((copy_object
= first_object
->copy
) != VM_OBJECT_NULL
) {
2098 vm_object_offset_t copy_offset
;
2102 dbgTrace(0xBEEF0017, (unsigned int) copy_object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
2105 * If the page is being written, but hasn't been
2106 * copied to the copy-object, we have to copy it there.
2108 if ((fault_type
& VM_PROT_WRITE
) == 0) {
2109 *protection
&= ~VM_PROT_WRITE
;
2114 * If the page was guaranteed to be resident,
2115 * we must have already performed the copy.
2117 if (must_be_resident
) {
2122 * Try to get the lock on the copy_object.
2124 if (!vm_object_lock_try(copy_object
)) {
2125 vm_object_unlock(object
);
2128 mutex_pause(try_failed_count
); /* wait a bit */
2129 vm_object_lock(object
);
2133 try_failed_count
= 0;
2136 * Make another reference to the copy-object,
2137 * to keep it from disappearing during the
2140 vm_object_reference_locked(copy_object
);
2143 * Does the page exist in the copy?
2145 copy_offset
= first_offset
- copy_object
->vo_shadow_offset
;
2147 if (copy_object
->vo_size
<= copy_offset
) {
2149 * Copy object doesn't cover this page -- do nothing.
2152 } else if ((copy_m
= vm_page_lookup(copy_object
, copy_offset
)) != VM_PAGE_NULL
) {
2154 * Page currently exists in the copy object
2156 if (copy_m
->vmp_busy
) {
2158 * If the page is being brought
2159 * in, wait for it and then retry.
2164 * take an extra ref so object won't die
2166 vm_object_reference_locked(copy_object
);
2167 vm_object_unlock(copy_object
);
2168 vm_fault_cleanup(object
, first_m
);
2169 counter(c_vm_fault_page_block_backoff_kernel
++);
2171 vm_object_lock(copy_object
);
2172 assert(copy_object
->ref_count
> 0);
2173 VM_OBJ_RES_DECR(copy_object
);
2174 vm_object_lock_assert_exclusive(copy_object
);
2175 copy_object
->ref_count
--;
2176 assert(copy_object
->ref_count
> 0);
2177 copy_m
= vm_page_lookup(copy_object
, copy_offset
);
2179 if (copy_m
!= VM_PAGE_NULL
&& copy_m
->vmp_busy
) {
2180 PAGE_ASSERT_WAIT(copy_m
, interruptible
);
2182 vm_object_unlock(copy_object
);
2183 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
2184 vm_object_deallocate(copy_object
);
2188 vm_object_unlock(copy_object
);
2189 vm_object_deallocate(copy_object
);
2190 thread_interrupt_level(interruptible_state
);
2192 return VM_FAULT_RETRY
;
2195 } else if (!PAGED_OUT(copy_object
, copy_offset
)) {
2197 * If PAGED_OUT is TRUE, then the page used to exist
2198 * in the copy-object, and has already been paged out.
2199 * We don't need to repeat this. If PAGED_OUT is
2200 * FALSE, then either we don't know (!pager_created,
2201 * for example) or it hasn't been paged out.
2202 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2203 * We must copy the page to the copy object.
2205 * Allocate a page for the copy
2207 copy_m
= vm_page_alloc(copy_object
, copy_offset
);
2209 if (copy_m
== VM_PAGE_NULL
) {
2212 VM_OBJ_RES_DECR(copy_object
);
2213 vm_object_lock_assert_exclusive(copy_object
);
2214 copy_object
->ref_count
--;
2215 assert(copy_object
->ref_count
> 0);
2217 vm_object_unlock(copy_object
);
2218 vm_fault_cleanup(object
, first_m
);
2219 thread_interrupt_level(interruptible_state
);
2221 return VM_FAULT_MEMORY_SHORTAGE
;
2224 * Must copy page into copy-object.
2226 vm_page_copy(m
, copy_m
);
2229 * If the old page was in use by any users
2230 * of the copy-object, it must be removed
2231 * from all pmaps. (We can't know which
2234 if (m
->vmp_pmapped
) {
2235 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
2238 if (m
->vmp_clustered
) {
2239 VM_PAGE_COUNT_AS_PAGEIN(m
);
2240 VM_PAGE_CONSUME_CLUSTERED(m
);
2243 * If there's a pager, then immediately
2244 * page out this page, using the "initialize"
2245 * option. Else, we use the copy.
2247 if ((!copy_object
->pager_ready
)
2248 || VM_COMPRESSOR_PAGER_STATE_GET(copy_object
, copy_offset
) == VM_EXTERNAL_STATE_ABSENT
2250 vm_page_lockspin_queues();
2251 assert(!m
->vmp_cleaning
);
2252 vm_page_activate(copy_m
);
2253 vm_page_unlock_queues();
2255 SET_PAGE_DIRTY(copy_m
, TRUE
);
2256 PAGE_WAKEUP_DONE(copy_m
);
2258 assert(copy_m
->vmp_busy
== TRUE
);
2259 assert(!m
->vmp_cleaning
);
2262 * dirty is protected by the object lock
2264 SET_PAGE_DIRTY(copy_m
, TRUE
);
2267 * The page is already ready for pageout:
2268 * not on pageout queues and busy.
2269 * Unlock everything except the
2270 * copy_object itself.
2272 vm_object_unlock(object
);
2275 * Write the page to the copy-object,
2276 * flushing it from the kernel.
2278 vm_pageout_initialize_page(copy_m
);
2281 * Since the pageout may have
2282 * temporarily dropped the
2283 * copy_object's lock, we
2284 * check whether we'll have
2285 * to deallocate the hard way.
2287 if ((copy_object
->shadow
!= object
) || (copy_object
->ref_count
== 1)) {
2288 vm_object_unlock(copy_object
);
2289 vm_object_deallocate(copy_object
);
2290 vm_object_lock(object
);
2295 * Pick back up the old object's
2296 * lock. [It is safe to do so,
2297 * since it must be deeper in the
2300 vm_object_lock(object
);
2304 * Because we're pushing a page upward
2305 * in the object tree, we must restart
2306 * any faults that are waiting here.
2307 * [Note that this is an expansion of
2308 * PAGE_WAKEUP that uses the THREAD_RESTART
2309 * wait result]. Can't turn off the page's
2310 * busy bit because we're not done with it.
2312 if (m
->vmp_wanted
) {
2313 m
->vmp_wanted
= FALSE
;
2314 thread_wakeup_with_result((event_t
) m
, THREAD_RESTART
);
2318 * The reference count on copy_object must be
2319 * at least 2: one for our extra reference,
2320 * and at least one from the outside world
2321 * (we checked that when we last locked
2324 vm_object_lock_assert_exclusive(copy_object
);
2325 copy_object
->ref_count
--;
2326 assert(copy_object
->ref_count
> 0);
2328 VM_OBJ_RES_DECR(copy_object
);
2329 vm_object_unlock(copy_object
);
2336 *top_page
= first_m
;
2338 if (m
!= VM_PAGE_NULL
) {
2339 assert(VM_PAGE_OBJECT(m
) == object
);
2341 retval
= VM_FAULT_SUCCESS
;
2343 if (my_fault
== DBG_PAGEIN_FAULT
) {
2344 VM_PAGE_COUNT_AS_PAGEIN(m
);
2346 if (object
->internal
) {
2347 my_fault
= DBG_PAGEIND_FAULT
;
2349 my_fault
= DBG_PAGEINV_FAULT
;
2353 * evaluate access pattern and update state
2354 * vm_fault_deactivate_behind depends on the
2355 * state being up to date
2357 vm_fault_is_sequential(object
, offset
, fault_info
->behavior
);
2358 vm_fault_deactivate_behind(object
, offset
, fault_info
->behavior
);
2359 } else if (type_of_fault
== NULL
&& my_fault
== DBG_CACHE_HIT_FAULT
) {
2361 * we weren't called from vm_fault, so handle the
2362 * accounting here for hits in the cache
2364 if (m
->vmp_clustered
) {
2365 VM_PAGE_COUNT_AS_PAGEIN(m
);
2366 VM_PAGE_CONSUME_CLUSTERED(m
);
2368 vm_fault_is_sequential(object
, offset
, fault_info
->behavior
);
2369 vm_fault_deactivate_behind(object
, offset
, fault_info
->behavior
);
2370 } else if (my_fault
== DBG_COMPRESSOR_FAULT
|| my_fault
== DBG_COMPRESSOR_SWAPIN_FAULT
) {
2371 VM_STAT_DECOMPRESSIONS();
2373 if (type_of_fault
) {
2374 *type_of_fault
= my_fault
;
2377 retval
= VM_FAULT_SUCCESS_NO_VM_PAGE
;
2378 assert(first_m
== VM_PAGE_NULL
);
2379 assert(object
== first_object
);
2382 thread_interrupt_level(interruptible_state
);
2385 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS
, 0); /* (TEST/DEBUG) */
2390 thread_interrupt_level(interruptible_state
);
2392 if (wait_result
== THREAD_INTERRUPTED
) {
2393 return VM_FAULT_INTERRUPTED
;
2395 return VM_FAULT_RETRY
;
2404 * When soft faulting a page, we have to validate the page if:
2405 * 1. the page is being mapped in user space
2406 * 2. the page hasn't already been found to be "tainted"
2407 * 3. the page belongs to a code-signed object
2408 * 4. the page has not been validated yet or has been mapped for write.
2410 #define VM_FAULT_NEED_CS_VALIDATION(pmap, page, page_obj) \
2411 ((pmap) != kernel_pmap /*1*/ && \
2412 !(page)->vmp_cs_tainted /*2*/ && \
2413 (page_obj)->code_signed /*3*/ && \
2414 (!(page)->vmp_cs_validated || (page)->vmp_wpmapped /*4*/ ))
2418 * page queue lock must NOT be held
2419 * m->vmp_object must be locked
2421 * NOTE: m->vmp_object could be locked "shared" only if we are called
2422 * from vm_fault() as part of a soft fault. If so, we must be
2423 * careful not to modify the VM object in any way that is not
2424 * legal under a shared lock...
2426 extern int panic_on_cs_killed
;
2427 extern int proc_selfpid(void);
2428 extern char *proc_name_address(void *p
);
2429 unsigned long cs_enter_tainted_rejected
= 0;
2430 unsigned long cs_enter_tainted_accepted
= 0;
2432 vm_fault_enter(vm_page_t m
,
2434 vm_map_offset_t vaddr
,
2436 vm_prot_t caller_prot
,
2438 boolean_t change_wiring
,
2440 vm_object_fault_info_t fault_info
,
2441 boolean_t
*need_retry
,
2444 kern_return_t kr
, pe_result
;
2445 boolean_t previously_pmapped
= m
->vmp_pmapped
;
2446 boolean_t must_disconnect
= 0;
2447 boolean_t map_is_switched
, map_is_switch_protected
;
2448 boolean_t cs_violation
;
2449 int cs_enforcement_enabled
;
2450 vm_prot_t fault_type
;
2452 boolean_t no_cache
= fault_info
->no_cache
;
2453 boolean_t cs_bypass
= fault_info
->cs_bypass
;
2454 int pmap_options
= fault_info
->pmap_options
;
2456 fault_type
= change_wiring
? VM_PROT_NONE
: caller_prot
;
2457 object
= VM_PAGE_OBJECT(m
);
2459 vm_object_lock_assert_held(object
);
2462 if (pmap
== kernel_pmap
) {
2463 kasan_notify_address(vaddr
, PAGE_SIZE
);
2467 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
2469 if (VM_PAGE_GET_PHYS_PAGE(m
) == vm_page_guard_addr
) {
2470 assert(m
->vmp_fictitious
);
2471 return KERN_SUCCESS
;
2474 if (*type_of_fault
== DBG_ZERO_FILL_FAULT
) {
2475 vm_object_lock_assert_exclusive(object
);
2476 } else if ((fault_type
& VM_PROT_WRITE
) == 0 &&
2478 #if VM_OBJECT_ACCESS_TRACKING
2479 || object
->access_tracking
2480 #endif /* VM_OBJECT_ACCESS_TRACKING */
2483 * This is not a "write" fault, so we
2484 * might not have taken the object lock
2485 * exclusively and we might not be able
2486 * to update the "wpmapped" bit in
2488 * Let's just grant read access to
2489 * the page for now and we'll
2490 * soft-fault again if we need write
2494 /* This had better not be a JIT page. */
2495 if (!pmap_has_prot_policy(prot
)) {
2496 prot
&= ~VM_PROT_WRITE
;
2501 if (m
->vmp_pmapped
== FALSE
) {
2502 if (m
->vmp_clustered
) {
2503 if (*type_of_fault
== DBG_CACHE_HIT_FAULT
) {
2505 * found it in the cache, but this
2506 * is the first fault-in of the page (m->vmp_pmapped == FALSE)
2507 * so it must have come in as part of
2508 * a cluster... account 1 pagein against it
2510 if (object
->internal
) {
2511 *type_of_fault
= DBG_PAGEIND_FAULT
;
2513 *type_of_fault
= DBG_PAGEINV_FAULT
;
2516 VM_PAGE_COUNT_AS_PAGEIN(m
);
2518 VM_PAGE_CONSUME_CLUSTERED(m
);
2522 if (*type_of_fault
!= DBG_COW_FAULT
) {
2523 DTRACE_VM2(as_fault
, int, 1, (uint64_t *), NULL
);
2525 if (pmap
== kernel_pmap
) {
2526 DTRACE_VM2(kernel_asflt
, int, 1, (uint64_t *), NULL
);
2530 /* Validate code signature if necessary. */
2532 VM_FAULT_NEED_CS_VALIDATION(pmap
, m
, object
)) {
2533 vm_object_lock_assert_exclusive(object
);
2535 if (m
->vmp_cs_validated
) {
2536 vm_cs_revalidates
++;
2539 /* VM map is locked, so 1 ref will remain on VM object -
2540 * so no harm if vm_page_validate_cs drops the object lock */
2543 if (fault_info
->pmap_cs_associated
&&
2544 pmap_cs_enforced(pmap
) &&
2545 !m
->vmp_cs_validated
&&
2546 !m
->vmp_cs_tainted
&&
2548 (prot
& VM_PROT_EXECUTE
) &&
2549 (caller_prot
& VM_PROT_EXECUTE
)) {
2551 * With pmap_cs, the pmap layer will validate the
2552 * code signature for any executable pmap mapping.
2553 * No need for us to validate this page too:
2554 * in pmap_cs we trust...
2556 vm_cs_defer_to_pmap_cs
++;
2558 vm_cs_defer_to_pmap_cs_not
++;
2559 vm_page_validate_cs(m
);
2562 vm_page_validate_cs(m
);
2563 #endif /* PMAP_CS */
2566 #define page_immutable(m, prot) ((m)->vmp_cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/ )
2567 #define page_nx(m) ((m)->vmp_cs_nx)
2569 map_is_switched
= ((pmap
!= vm_map_pmap(current_task()->map
)) &&
2570 (pmap
== vm_map_pmap(current_thread()->map
)));
2571 map_is_switch_protected
= current_thread()->map
->switch_protect
;
2573 /* If the map is switched, and is switch-protected, we must protect
2574 * some pages from being write-faulted: immutable pages because by
2575 * definition they may not be written, and executable pages because that
2576 * would provide a way to inject unsigned code.
2577 * If the page is immutable, we can simply return. However, we can't
2578 * immediately determine whether a page is executable anywhere. But,
2579 * we can disconnect it everywhere and remove the executable protection
2580 * from the current map. We do that below right before we do the
2583 cs_enforcement_enabled
= cs_process_enforcement(NULL
);
2585 if (cs_enforcement_enabled
&& map_is_switched
&&
2586 map_is_switch_protected
&& page_immutable(m
, prot
) &&
2587 (prot
& VM_PROT_WRITE
)) {
2588 return KERN_CODESIGN_ERROR
;
2591 if (cs_enforcement_enabled
&& page_nx(m
) && (prot
& VM_PROT_EXECUTE
)) {
2593 printf("page marked to be NX, not letting it be mapped EXEC\n");
2595 return KERN_CODESIGN_ERROR
;
2598 /* A page could be tainted, or pose a risk of being tainted later.
2599 * Check whether the receiving process wants it, and make it feel
2600 * the consequences (that hapens in cs_invalid_page()).
2601 * For CS Enforcement, two other conditions will
2602 * cause that page to be tainted as well:
2603 * - pmapping an unsigned page executable - this means unsigned code;
2604 * - writeable mapping of a validated page - the content of that page
2605 * can be changed without the kernel noticing, therefore unsigned
2606 * code can be created
2609 /* code-signing is bypassed */
2610 cs_violation
= FALSE
;
2611 } else if (m
->vmp_cs_tainted
) {
2613 cs_violation
= TRUE
;
2614 } else if (!cs_enforcement_enabled
) {
2615 /* no further code-signing enforcement */
2616 cs_violation
= FALSE
;
2617 } else if (page_immutable(m
, prot
) &&
2618 ((prot
& VM_PROT_WRITE
) ||
2621 * The page should be immutable, but is in danger of being
2623 * This is the case where we want policy from the code
2624 * directory - is the page immutable or not? For now we have
2625 * to assume that code pages will be immutable, data pages not.
2626 * We'll assume a page is a code page if it has a code directory
2627 * and we fault for execution.
2628 * That is good enough since if we faulted the code page for
2629 * writing in another map before, it is wpmapped; if we fault
2630 * it for writing in this map later it will also be faulted for
2631 * executing at the same time; and if we fault for writing in
2632 * another map later, we will disconnect it from this pmap so
2633 * we'll notice the change.
2635 cs_violation
= TRUE
;
2636 } else if (!m
->vmp_cs_validated
&&
2637 (prot
& VM_PROT_EXECUTE
)
2640 * Executable pages will be validated by pmap_cs;
2641 * in pmap_cs we trust...
2642 * If pmap_cs is turned off, this is a code-signing
2645 && !(pmap_cs_enforced(pmap
))
2646 #endif /* PMAP_CS */
2648 cs_violation
= TRUE
;
2650 cs_violation
= FALSE
;
2654 /* We will have a tainted page. Have to handle the special case
2655 * of a switched map now. If the map is not switched, standard
2656 * procedure applies - call cs_invalid_page().
2657 * If the map is switched, the real owner is invalid already.
2658 * There is no point in invalidating the switching process since
2659 * it will not be executing from the map. So we don't call
2660 * cs_invalid_page() in that case. */
2661 boolean_t reject_page
, cs_killed
;
2662 if (map_is_switched
) {
2663 assert(pmap
== vm_map_pmap(current_thread()->map
));
2664 assert(!(prot
& VM_PROT_WRITE
) || (map_is_switch_protected
== FALSE
));
2665 reject_page
= FALSE
;
2668 printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
2669 object
->code_signed
? "yes" : "no",
2670 m
->vmp_cs_validated
? "yes" : "no",
2671 m
->vmp_cs_tainted
? "yes" : "no",
2672 m
->vmp_wpmapped
? "yes" : "no",
2675 reject_page
= cs_invalid_page((addr64_t
) vaddr
, &cs_killed
);
2679 /* reject the invalid page: abort the page fault */
2681 const char *procname
;
2683 vm_object_t file_object
, shadow
;
2684 vm_object_offset_t file_offset
;
2685 char *pathname
, *filename
;
2686 vm_size_t pathname_len
, filename_len
;
2687 boolean_t truncated_path
;
2688 #define __PATH_MAX 1024
2689 struct timespec mtime
, cs_mtime
;
2691 os_reason_t codesigning_exit_reason
= OS_REASON_NULL
;
2693 kr
= KERN_CODESIGN_ERROR
;
2694 cs_enter_tainted_rejected
++;
2696 /* get process name and pid */
2698 task
= current_task();
2699 pid
= proc_selfpid();
2700 if (task
->bsd_info
!= NULL
) {
2701 procname
= proc_name_address(task
->bsd_info
);
2704 /* get file's VM object */
2705 file_object
= object
;
2706 file_offset
= m
->vmp_offset
;
2707 for (shadow
= file_object
->shadow
,
2709 shadow
!= VM_OBJECT_NULL
;
2710 shadow
= file_object
->shadow
,
2712 vm_object_lock_shared(shadow
);
2713 if (file_object
!= object
) {
2714 vm_object_unlock(file_object
);
2716 file_offset
+= file_object
->vo_shadow_offset
;
2717 file_object
= shadow
;
2722 cs_mtime
.tv_sec
= 0;
2723 cs_mtime
.tv_nsec
= 0;
2725 /* get file's pathname and/or filename */
2730 truncated_path
= FALSE
;
2731 /* no pager -> no file -> no pathname, use "<nil>" in that case */
2732 if (file_object
->pager
!= NULL
) {
2733 pathname
= (char *)kalloc(__PATH_MAX
* 2);
2736 pathname_len
= __PATH_MAX
;
2737 filename
= pathname
+ pathname_len
;
2738 filename_len
= __PATH_MAX
;
2740 if (vnode_pager_get_object_name(file_object
->pager
,
2745 &truncated_path
) == KERN_SUCCESS
) {
2746 /* safety first... */
2747 pathname
[__PATH_MAX
- 1] = '\0';
2748 filename
[__PATH_MAX
- 1] = '\0';
2750 vnode_pager_get_object_mtime(file_object
->pager
,
2754 kfree(pathname
, __PATH_MAX
* 2);
2759 truncated_path
= FALSE
;
2763 printf("CODE SIGNING: process %d[%s]: "
2764 "rejecting invalid page at address 0x%llx "
2765 "from offset 0x%llx in file \"%s%s%s\" "
2766 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2767 "(signed:%d validated:%d tainted:%d nx:%d "
2768 "wpmapped:%d dirty:%d depth:%d)\n",
2769 pid
, procname
, (addr64_t
) vaddr
,
2771 (pathname
? pathname
: "<nil>"),
2772 (truncated_path
? "/.../" : ""),
2773 (truncated_path
? filename
: ""),
2774 cs_mtime
.tv_sec
, cs_mtime
.tv_nsec
,
2775 ((cs_mtime
.tv_sec
== mtime
.tv_sec
&&
2776 cs_mtime
.tv_nsec
== mtime
.tv_nsec
)
2779 mtime
.tv_sec
, mtime
.tv_nsec
,
2780 object
->code_signed
,
2781 m
->vmp_cs_validated
,
2789 * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
2790 * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
2791 * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
2792 * will deal with the segmentation fault.
2795 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC
, BSD_PROC_EXITREASON_CREATE
) | DBG_FUNC_NONE
,
2796 pid
, OS_REASON_CODESIGNING
, CODESIGNING_EXIT_REASON_INVALID_PAGE
, 0, 0);
2798 codesigning_exit_reason
= os_reason_create(OS_REASON_CODESIGNING
, CODESIGNING_EXIT_REASON_INVALID_PAGE
);
2799 if (codesigning_exit_reason
== NULL
) {
2800 printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
2802 mach_vm_address_t data_addr
= 0;
2803 struct codesigning_exit_reason_info
*ceri
= NULL
;
2804 uint32_t reason_buffer_size_estimate
= kcdata_estimate_required_buffer_size(1, sizeof(*ceri
));
2806 if (os_reason_alloc_buffer_noblock(codesigning_exit_reason
, reason_buffer_size_estimate
)) {
2807 printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
2809 if (KERN_SUCCESS
== kcdata_get_memory_addr(&codesigning_exit_reason
->osr_kcd_descriptor
,
2810 EXIT_REASON_CODESIGNING_INFO
, sizeof(*ceri
), &data_addr
)) {
2811 ceri
= (struct codesigning_exit_reason_info
*)data_addr
;
2812 static_assert(__PATH_MAX
== sizeof(ceri
->ceri_pathname
));
2814 ceri
->ceri_virt_addr
= vaddr
;
2815 ceri
->ceri_file_offset
= file_offset
;
2817 strncpy((char *)&ceri
->ceri_pathname
, pathname
, sizeof(ceri
->ceri_pathname
));
2819 ceri
->ceri_pathname
[0] = '\0';
2822 strncpy((char *)&ceri
->ceri_filename
, filename
, sizeof(ceri
->ceri_filename
));
2824 ceri
->ceri_filename
[0] = '\0';
2826 ceri
->ceri_path_truncated
= (truncated_path
);
2827 ceri
->ceri_codesig_modtime_secs
= cs_mtime
.tv_sec
;
2828 ceri
->ceri_codesig_modtime_nsecs
= cs_mtime
.tv_nsec
;
2829 ceri
->ceri_page_modtime_secs
= mtime
.tv_sec
;
2830 ceri
->ceri_page_modtime_nsecs
= mtime
.tv_nsec
;
2831 ceri
->ceri_object_codesigned
= (object
->code_signed
);
2832 ceri
->ceri_page_codesig_validated
= (m
->vmp_cs_validated
);
2833 ceri
->ceri_page_codesig_tainted
= (m
->vmp_cs_tainted
);
2834 ceri
->ceri_page_codesig_nx
= (m
->vmp_cs_nx
);
2835 ceri
->ceri_page_wpmapped
= (m
->vmp_wpmapped
);
2836 ceri
->ceri_page_slid
= 0;
2837 ceri
->ceri_page_dirty
= (m
->vmp_dirty
);
2838 ceri
->ceri_page_shadow_depth
= shadow_depth
;
2840 #if DEBUG || DEVELOPMENT
2841 panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
2843 printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
2844 #endif /* DEBUG || DEVELOPMENT */
2845 /* Free the buffer */
2846 os_reason_alloc_buffer_noblock(codesigning_exit_reason
, 0);
2851 set_thread_exit_reason(current_thread(), codesigning_exit_reason
, FALSE
);
2853 if (panic_on_cs_killed
&&
2854 object
->object_is_shared_cache
) {
2855 char *tainted_contents
;
2856 vm_map_offset_t src_vaddr
;
2857 src_vaddr
= (vm_map_offset_t
) phystokv((pmap_paddr_t
)VM_PAGE_GET_PHYS_PAGE(m
) << PAGE_SHIFT
);
2858 tainted_contents
= kalloc(PAGE_SIZE
);
2859 bcopy((const char *)src_vaddr
, tainted_contents
, PAGE_SIZE
);
2860 printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m
, VM_PAGE_GET_PHYS_PAGE(m
), (uint64_t)src_vaddr
, tainted_contents
);
2861 panic("CODE SIGNING: process %d[%s]: "
2862 "rejecting invalid page (phys#0x%x) at address 0x%llx "
2863 "from offset 0x%llx in file \"%s%s%s\" "
2864 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2865 "(signed:%d validated:%d tainted:%d nx:%d"
2866 "wpmapped:%d dirty:%d depth:%d)\n",
2868 VM_PAGE_GET_PHYS_PAGE(m
),
2871 (pathname
? pathname
: "<nil>"),
2872 (truncated_path
? "/.../" : ""),
2873 (truncated_path
? filename
: ""),
2874 cs_mtime
.tv_sec
, cs_mtime
.tv_nsec
,
2875 ((cs_mtime
.tv_sec
== mtime
.tv_sec
&&
2876 cs_mtime
.tv_nsec
== mtime
.tv_nsec
)
2879 mtime
.tv_sec
, mtime
.tv_nsec
,
2880 object
->code_signed
,
2881 m
->vmp_cs_validated
,
2889 if (file_object
!= object
) {
2890 vm_object_unlock(file_object
);
2892 if (pathname_len
!= 0) {
2893 kfree(pathname
, __PATH_MAX
* 2);
2898 /* proceed with the invalid page */
2900 if (!m
->vmp_cs_validated
&&
2901 !object
->code_signed
) {
2903 * This page has not been (fully) validated but
2904 * does not belong to a code-signed object
2905 * so it should not be forcefully considered
2907 * We're just concerned about it here because
2908 * we've been asked to "execute" it but that
2909 * does not mean that it should cause other
2911 * This happens when a debugger sets a
2912 * breakpoint and we then execute code in
2913 * that page. Marking the page as "tainted"
2914 * would cause any inspection tool ("leaks",
2915 * "vmmap", "CrashReporter", ...) to get killed
2916 * due to code-signing violation on that page,
2917 * even though they're just reading it and not
2918 * executing from it.
2922 * Page might have been tainted before or not;
2923 * now it definitively is. If the page wasn't
2924 * tainted, we must disconnect it from all
2925 * pmaps later, to force existing mappings
2926 * through that code path for re-consideration
2927 * of the validity of that page.
2929 must_disconnect
= !m
->vmp_cs_tainted
;
2930 m
->vmp_cs_tainted
= TRUE
;
2932 cs_enter_tainted_accepted
++;
2934 if (kr
!= KERN_SUCCESS
) {
2936 printf("CODESIGNING: vm_fault_enter(0x%llx): "
2937 "*** INVALID PAGE ***\n",
2941 if (cs_enforcement_panic
) {
2942 panic("CODESIGNING: panicking on invalid page\n");
2947 /* proceed with the valid page */
2951 boolean_t page_queues_locked
= FALSE
;
2952 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
2954 if (! page_queues_locked) { \
2955 page_queues_locked = TRUE; \
2956 vm_page_lockspin_queues(); \
2959 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
2961 if (page_queues_locked) { \
2962 page_queues_locked = FALSE; \
2963 vm_page_unlock_queues(); \
2968 * Hold queues lock to manipulate
2969 * the page queues. Change wiring
2972 assert((m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) || object
!= compressor_object
);
2974 #if CONFIG_BACKGROUND_QUEUE
2975 vm_page_update_background_state(m
);
2977 if (m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
2979 * Compressor pages are neither wired
2980 * nor pageable and should never change.
2982 assert(object
== compressor_object
);
2983 } else if (change_wiring
) {
2984 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
2987 if (kr
== KERN_SUCCESS
) {
2988 vm_page_wire(m
, wire_tag
, TRUE
);
2991 vm_page_unwire(m
, TRUE
);
2993 /* we keep the page queues lock, if we need it later */
2995 if (object
->internal
== TRUE
) {
2997 * don't allow anonymous pages on
2998 * the speculative queues
3002 if (kr
!= KERN_SUCCESS
) {
3003 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3004 vm_page_deactivate(m
);
3005 /* we keep the page queues lock, if we need it later */
3006 } else if (((m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
) ||
3007 (m
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ||
3008 (m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) ||
3009 ((m
->vmp_q_state
!= VM_PAGE_ON_THROTTLED_Q
) && no_cache
)) &&
3010 !VM_PAGE_WIRED(m
)) {
3011 if (vm_page_local_q
&&
3012 (*type_of_fault
== DBG_COW_FAULT
||
3013 *type_of_fault
== DBG_ZERO_FILL_FAULT
)) {
3017 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
3019 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3020 vm_object_lock_assert_exclusive(object
);
3023 * we got a local queue to stuff this
3025 * its safe to manipulate local and
3026 * local_id at this point since we're
3027 * behind an exclusive object lock and
3028 * the page is not on any global queue.
3030 * we'll use the current cpu number to
3031 * select the queue note that we don't
3032 * need to disable preemption... we're
3033 * going to be behind the local queue's
3034 * lock to do the real work
3038 lq
= &vm_page_local_q
[lid
].vpl_un
.vpl
;
3040 VPL_LOCK(&lq
->vpl_lock
);
3042 vm_page_check_pageable_safe(m
);
3043 vm_page_queue_enter(&lq
->vpl_queue
, m
, vmp_pageq
);
3044 m
->vmp_q_state
= VM_PAGE_ON_ACTIVE_LOCAL_Q
;
3045 m
->vmp_local_id
= lid
;
3048 if (object
->internal
) {
3049 lq
->vpl_internal_count
++;
3051 lq
->vpl_external_count
++;
3054 VPL_UNLOCK(&lq
->vpl_lock
);
3056 if (lq
->vpl_count
> vm_page_local_q_soft_limit
) {
3058 * we're beyond the soft limit
3059 * for the local queue
3060 * vm_page_reactivate_local will
3061 * 'try' to take the global page
3062 * queue lock... if it can't
3063 * that's ok... we'll let the
3064 * queue continue to grow up
3065 * to the hard limit... at that
3066 * point we'll wait for the
3067 * lock... once we've got the
3068 * lock, we'll transfer all of
3069 * the pages from the local
3070 * queue to the global active
3073 vm_page_reactivate_local(lid
, FALSE
, FALSE
);
3076 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3079 * test again now that we hold the
3082 if (!VM_PAGE_WIRED(m
)) {
3083 if (m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3084 vm_page_queues_remove(m
, FALSE
);
3086 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated
, 1);
3087 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated
, 1);
3090 if (!VM_PAGE_ACTIVE_OR_INACTIVE(m
) ||
3093 * If this is a no_cache mapping
3094 * and the page has never been
3095 * mapped before or was
3096 * previously a no_cache page,
3097 * then we want to leave pages
3098 * in the speculative state so
3099 * that they can be readily
3100 * recycled if free memory runs
3101 * low. Otherwise the page is
3102 * activated as normal.
3106 (!previously_pmapped
||
3108 m
->vmp_no_cache
= TRUE
;
3110 if (m
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
) {
3111 vm_page_speculate(m
, FALSE
);
3113 } else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m
)) {
3114 vm_page_activate(m
);
3118 /* we keep the page queues lock, if we need it later */
3122 /* we're done with the page queues lock, if we ever took it */
3123 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3126 /* If we have a KERN_SUCCESS from the previous checks, we either have
3127 * a good page, or a tainted page that has been accepted by the process.
3128 * In both cases the page will be entered into the pmap.
3129 * If the page is writeable, we need to disconnect it from other pmaps
3130 * now so those processes can take note.
3132 if (kr
== KERN_SUCCESS
) {
3134 * NOTE: we may only hold the vm_object lock SHARED
3135 * at this point, so we need the phys_page lock to
3136 * properly serialize updating the pmapped and
3139 if ((prot
& VM_PROT_EXECUTE
) && !m
->vmp_xpmapped
) {
3140 ppnum_t phys_page
= VM_PAGE_GET_PHYS_PAGE(m
);
3142 pmap_lock_phys_page(phys_page
);
3144 * go ahead and take the opportunity
3145 * to set 'pmapped' here so that we don't
3146 * need to grab this lock a 2nd time
3149 m
->vmp_pmapped
= TRUE
;
3151 if (!m
->vmp_xpmapped
) {
3152 m
->vmp_xpmapped
= TRUE
;
3154 pmap_unlock_phys_page(phys_page
);
3156 if (!object
->internal
) {
3157 OSAddAtomic(1, &vm_page_xpmapped_external_count
);
3160 #if defined(__arm__) || defined(__arm64__)
3161 pmap_sync_page_data_phys(phys_page
);
3163 if (object
->internal
&&
3164 object
->pager
!= NULL
) {
3166 * This page could have been
3167 * uncompressed by the
3168 * compressor pager and its
3169 * contents might be only in
3171 * Since it's being mapped for
3172 * "execute" for the fist time,
3173 * make sure the icache is in
3176 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
3177 pmap_sync_page_data_phys(phys_page
);
3181 pmap_unlock_phys_page(phys_page
);
3184 if (m
->vmp_pmapped
== FALSE
) {
3185 ppnum_t phys_page
= VM_PAGE_GET_PHYS_PAGE(m
);
3187 pmap_lock_phys_page(phys_page
);
3188 m
->vmp_pmapped
= TRUE
;
3189 pmap_unlock_phys_page(phys_page
);
3193 if (fault_type
& VM_PROT_WRITE
) {
3194 if (m
->vmp_wpmapped
== FALSE
) {
3195 vm_object_lock_assert_exclusive(object
);
3196 if (!object
->internal
&& object
->pager
) {
3197 task_update_logical_writes(current_task(), PAGE_SIZE
, TASK_WRITE_DEFERRED
, vnode_pager_lookup_vnode(object
->pager
));
3199 m
->vmp_wpmapped
= TRUE
;
3201 if (must_disconnect
) {
3203 * We can only get here
3204 * because of the CSE logic
3206 assert(cs_enforcement_enabled
);
3207 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
3209 * If we are faulting for a write, we can clear
3210 * the execute bit - that will ensure the page is
3211 * checked again before being executable, which
3212 * protects against a map switch.
3213 * This only happens the first time the page
3214 * gets tainted, so we won't get stuck here
3215 * to make an already writeable page executable.
3218 assert(!pmap_has_prot_policy(prot
));
3219 prot
&= ~VM_PROT_EXECUTE
;
3223 assert(VM_PAGE_OBJECT(m
) == object
);
3225 #if VM_OBJECT_ACCESS_TRACKING
3226 if (object
->access_tracking
) {
3227 DTRACE_VM2(access_tracking
, vm_map_offset_t
, vaddr
, int, fault_type
);
3228 if (fault_type
& VM_PROT_WRITE
) {
3229 object
->access_tracking_writes
++;
3230 vm_object_access_tracking_writes
++;
3232 object
->access_tracking_reads
++;
3233 vm_object_access_tracking_reads
++;
3236 #endif /* VM_OBJECT_ACCESS_TRACKING */
3242 /* Prevent a deadlock by not
3243 * holding the object lock if we need to wait for a page in
3244 * pmap_enter() - <rdar://problem/7138958> */
3245 PMAP_ENTER_OPTIONS(pmap
, vaddr
, m
, prot
, fault_type
, 0,
3247 pmap_options
| PMAP_OPTIONS_NOWAIT
,
3251 * Retry without execute permission if we encountered a codesigning
3252 * failure on a non-execute fault. This allows applications which
3253 * don't actually need to execute code to still map it for read access.
3255 if ((pe_result
== KERN_CODESIGN_ERROR
) && pmap_cs_enforced(pmap
) &&
3256 (prot
& VM_PROT_EXECUTE
) && !(caller_prot
& VM_PROT_EXECUTE
)) {
3257 prot
&= ~VM_PROT_EXECUTE
;
3258 goto pmap_enter_retry
;
3262 if (pe_result
== KERN_INVALID_ARGUMENT
&&
3263 pmap
== PMAP_NULL
&&
3266 * Wiring a page in a pmap-less VM map:
3267 * VMware's "vmmon" kernel extension does this
3269 * Let it proceed even though the PMAP_ENTER() failed.
3271 pe_result
= KERN_SUCCESS
;
3273 #endif /* __x86_64__ */
3275 if (pe_result
== KERN_RESOURCE_SHORTAGE
) {
3278 * this will be non-null in the case where we hold the lock
3279 * on the top-object in this chain... we can't just drop
3280 * the lock on the object we're inserting the page into
3281 * and recall the PMAP_ENTER since we can still cause
3282 * a deadlock if one of the critical paths tries to
3283 * acquire the lock on the top-object and we're blocked
3284 * in PMAP_ENTER waiting for memory... our only recourse
3285 * is to deal with it at a higher level where we can
3289 vm_pmap_enter_retried
++;
3290 goto after_the_pmap_enter
;
3292 /* The nonblocking version of pmap_enter did not succeed.
3293 * and we don't need to drop other locks and retry
3294 * at the level above us, so
3295 * use the blocking version instead. Requires marking
3296 * the page busy and unlocking the object */
3297 boolean_t was_busy
= m
->vmp_busy
;
3299 vm_object_lock_assert_exclusive(object
);
3302 vm_object_unlock(object
);
3304 PMAP_ENTER_OPTIONS(pmap
, vaddr
, m
, prot
, fault_type
,
3306 pmap_options
, pe_result
);
3308 assert(VM_PAGE_OBJECT(m
) == object
);
3310 /* Take the object lock again. */
3311 vm_object_lock(object
);
3313 /* If the page was busy, someone else will wake it up.
3314 * Otherwise, we have to do it now. */
3315 assert(m
->vmp_busy
);
3317 PAGE_WAKEUP_DONE(m
);
3319 vm_pmap_enter_blocked
++;
3325 after_the_pmap_enter
:
3330 vm_pre_fault(vm_map_offset_t vaddr
, vm_prot_t prot
)
3332 if (pmap_find_phys(current_map()->pmap
, vaddr
) == 0) {
3333 vm_fault(current_map(), /* map */
3335 prot
, /* fault_type */
3336 FALSE
, /* change_wiring */
3337 VM_KERN_MEMORY_NONE
, /* tag - not wiring */
3338 THREAD_UNINT
, /* interruptible */
3339 NULL
, /* caller_pmap */
3340 0 /* caller_pmap_addr */);
3348 * Handle page faults, including pseudo-faults
3349 * used to change the wiring status of pages.
3351 * Explicit continuations have been removed.
3353 * vm_fault and vm_fault_page save mucho state
3354 * in the moral equivalent of a closure. The state
3355 * structure is allocated when first entering vm_fault
3356 * and deallocated when leaving vm_fault.
3359 extern int _map_enter_debug
;
3360 extern uint64_t get_current_unique_pid(void);
3362 unsigned long vm_fault_collapse_total
= 0;
3363 unsigned long vm_fault_collapse_skipped
= 0;
3369 vm_map_offset_t vaddr
,
3370 vm_prot_t fault_type
,
3371 boolean_t change_wiring
,
3374 vm_map_offset_t caller_pmap_addr
)
3376 return vm_fault_internal(map
, vaddr
, fault_type
, change_wiring
, vm_tag_bt(),
3377 interruptible
, caller_pmap
, caller_pmap_addr
,
3384 vm_map_offset_t vaddr
,
3385 vm_prot_t fault_type
,
3386 boolean_t change_wiring
,
3387 vm_tag_t wire_tag
, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
3390 vm_map_offset_t caller_pmap_addr
)
3392 return vm_fault_internal(map
, vaddr
, fault_type
, change_wiring
, wire_tag
,
3393 interruptible
, caller_pmap
, caller_pmap_addr
,
3398 current_proc_is_privileged(void)
3400 return csproc_get_platform_binary(current_proc());
3403 uint64_t vm_copied_on_read
= 0;
3408 vm_map_offset_t vaddr
,
3409 vm_prot_t caller_prot
,
3410 boolean_t change_wiring
,
3411 vm_tag_t wire_tag
, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
3414 vm_map_offset_t caller_pmap_addr
,
3415 ppnum_t
*physpage_p
)
3417 vm_map_version_t version
; /* Map version for verificiation */
3418 boolean_t wired
; /* Should mapping be wired down? */
3419 vm_object_t object
; /* Top-level object */
3420 vm_object_offset_t offset
; /* Top-level offset */
3421 vm_prot_t prot
; /* Protection for mapping */
3422 vm_object_t old_copy_object
; /* Saved copy object */
3423 vm_page_t result_page
; /* Result of vm_fault_page */
3424 vm_page_t top_page
; /* Placeholder page */
3427 vm_page_t m
; /* Fast access to result_page */
3428 kern_return_t error_code
;
3429 vm_object_t cur_object
;
3430 vm_object_t m_object
= NULL
;
3431 vm_object_offset_t cur_offset
;
3433 vm_object_t new_object
;
3436 wait_interrupt_t interruptible_state
;
3437 vm_map_t real_map
= map
;
3438 vm_map_t original_map
= map
;
3439 boolean_t object_locks_dropped
= FALSE
;
3440 vm_prot_t fault_type
;
3441 vm_prot_t original_fault_type
;
3442 struct vm_object_fault_info fault_info
= {};
3443 boolean_t need_collapse
= FALSE
;
3444 boolean_t need_retry
= FALSE
;
3445 boolean_t
*need_retry_ptr
= NULL
;
3446 int object_lock_type
= 0;
3447 int cur_object_lock_type
;
3448 vm_object_t top_object
= VM_OBJECT_NULL
;
3449 vm_object_t written_on_object
= VM_OBJECT_NULL
;
3450 memory_object_t written_on_pager
= NULL
;
3451 vm_object_offset_t written_on_offset
= 0;
3453 int compressed_count_delta
;
3455 boolean_t need_copy
;
3456 boolean_t need_copy_on_read
;
3457 vm_map_offset_t trace_vaddr
;
3458 vm_map_offset_t trace_real_vaddr
;
3459 vm_map_offset_t real_vaddr
;
3460 boolean_t resilient_media_retry
= FALSE
;
3461 vm_object_t resilient_media_object
= VM_OBJECT_NULL
;
3462 vm_object_offset_t resilient_media_offset
= (vm_object_offset_t
)-1;
3465 trace_real_vaddr
= vaddr
;
3466 vaddr
= vm_map_trunc_page(vaddr
, PAGE_MASK
);
3468 if (map
== kernel_map
) {
3469 trace_vaddr
= VM_KERNEL_ADDRHIDE(vaddr
);
3470 trace_real_vaddr
= VM_KERNEL_ADDRHIDE(trace_real_vaddr
);
3472 trace_vaddr
= vaddr
;
3475 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3476 (MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_START
,
3477 ((uint64_t)trace_vaddr
>> 32),
3479 (map
== kernel_map
),
3483 if (get_preemption_level() != 0) {
3484 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3485 (MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_END
,
3486 ((uint64_t)trace_vaddr
>> 32),
3492 return KERN_FAILURE
;
3495 thread_t cthread
= current_thread();
3496 boolean_t rtfault
= (cthread
->sched_mode
== TH_MODE_REALTIME
);
3497 uint64_t fstart
= 0;
3500 fstart
= mach_continuous_time();
3503 interruptible_state
= thread_interrupt_level(interruptible
);
3505 fault_type
= (change_wiring
? VM_PROT_NONE
: caller_prot
);
3507 VM_STAT_INCR(faults
);
3508 current_task()->faults
++;
3509 original_fault_type
= fault_type
;
3512 if (fault_type
& VM_PROT_WRITE
) {
3517 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3519 object_lock_type
= OBJECT_LOCK_SHARED
;
3522 cur_object_lock_type
= OBJECT_LOCK_SHARED
;
3524 if ((map
== kernel_map
) && (caller_prot
& VM_PROT_WRITE
)) {
3525 if (compressor_map
) {
3526 if ((vaddr
>= vm_map_min(compressor_map
)) && (vaddr
< vm_map_max(compressor_map
))) {
3527 panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr
, caller_prot
, (void *) vm_map_min(compressor_map
), (void *) vm_map_max(compressor_map
));
3532 assert(written_on_object
== VM_OBJECT_NULL
);
3535 * assume we will hit a page in the cache
3536 * otherwise, explicitly override with
3537 * the real fault type once we determine it
3539 type_of_fault
= DBG_CACHE_HIT_FAULT
;
3542 * Find the backing store object and offset into
3543 * it to begin the search.
3545 fault_type
= original_fault_type
;
3547 vm_map_lock_read(map
);
3549 if (resilient_media_retry
) {
3551 * If we have to insert a fake zero-filled page to hide
3552 * a media failure to provide the real page, we need to
3553 * resolve any pending copy-on-write on this mapping.
3554 * VM_PROT_COPY tells vm_map_lookup_locked() to deal
3555 * with that even if this is not a "write" fault.
3558 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3561 kr
= vm_map_lookup_locked(&map
, vaddr
,
3562 (fault_type
| (need_copy
? VM_PROT_COPY
: 0)),
3563 object_lock_type
, &version
,
3564 &object
, &offset
, &prot
, &wired
,
3568 if (kr
!= KERN_SUCCESS
) {
3569 vm_map_unlock_read(map
);
3572 pmap
= real_map
->pmap
;
3573 fault_info
.interruptible
= interruptible
;
3574 fault_info
.stealth
= FALSE
;
3575 fault_info
.io_sync
= FALSE
;
3576 fault_info
.mark_zf_absent
= FALSE
;
3577 fault_info
.batch_pmap_op
= FALSE
;
3579 if (resilient_media_retry
) {
3581 * We're retrying this fault after having detected a media
3582 * failure from a "resilient_media" mapping.
3583 * Check that the mapping is still pointing at the object
3584 * that just failed to provide a page.
3586 assert(resilient_media_object
!= VM_OBJECT_NULL
);
3587 assert(resilient_media_offset
!= (vm_object_offset_t
)-1);
3588 if (object
!= VM_OBJECT_NULL
&&
3589 object
== resilient_media_object
&&
3590 offset
== resilient_media_offset
&&
3591 fault_info
.resilient_media
) {
3593 * This mapping still points at the same object
3594 * and is still "resilient_media": proceed in
3595 * "recovery-from-media-failure" mode, where we'll
3596 * insert a zero-filled page in the top object.
3598 // printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
3600 /* not recovering: reset state */
3601 // printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info.resilient_media, object, resilient_media_object, offset, resilient_media_offset);
3602 resilient_media_retry
= FALSE
;
3603 /* release our extra reference on failed object */
3604 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
3605 vm_object_deallocate(resilient_media_object
);
3606 resilient_media_object
= VM_OBJECT_NULL
;
3607 resilient_media_offset
= (vm_object_offset_t
)-1;
3610 assert(resilient_media_object
== VM_OBJECT_NULL
);
3611 resilient_media_offset
= (vm_object_offset_t
)-1;
3615 * If the page is wired, we must fault for the current protection
3616 * value, to avoid further faults.
3619 fault_type
= prot
| VM_PROT_WRITE
;
3621 if (wired
|| need_copy
) {
3623 * since we're treating this fault as a 'write'
3624 * we must hold the top object lock exclusively
3626 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3627 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3629 if (vm_object_lock_upgrade(object
) == FALSE
) {
3631 * couldn't upgrade, so explictly
3632 * take the lock exclusively
3634 vm_object_lock(object
);
3639 #if VM_FAULT_CLASSIFY
3641 * Temporary data gathering code
3643 vm_fault_classify(object
, offset
, fault_type
);
3646 * Fast fault code. The basic idea is to do as much as
3647 * possible while holding the map lock and object locks.
3648 * Busy pages are not used until the object lock has to
3649 * be dropped to do something (copy, zero fill, pmap enter).
3650 * Similarly, paging references aren't acquired until that
3651 * point, and object references aren't used.
3653 * If we can figure out what to do
3654 * (zero fill, copy on write, pmap enter) while holding
3655 * the locks, then it gets done. Otherwise, we give up,
3656 * and use the original fault path (which doesn't hold
3657 * the map lock, and relies on busy pages).
3658 * The give up cases include:
3659 * - Have to talk to pager.
3660 * - Page is busy, absent or in error.
3661 * - Pager has locked out desired access.
3662 * - Fault needs to be restarted.
3663 * - Have to push page into copy object.
3665 * The code is an infinite loop that moves one level down
3666 * the shadow chain each time. cur_object and cur_offset
3667 * refer to the current object being examined. object and offset
3668 * are the original object from the map. The loop is at the
3669 * top level if and only if object and cur_object are the same.
3671 * Invariants: Map lock is held throughout. Lock is held on
3672 * original object and cur_object (if different) when
3673 * continuing or exiting loop.
3677 #if defined(__arm64__)
3679 * Fail if reading an execute-only page in a
3680 * pmap that enforces execute-only protection.
3682 if (fault_type
== VM_PROT_READ
&&
3683 (prot
& VM_PROT_EXECUTE
) &&
3684 !(prot
& VM_PROT_READ
) &&
3685 pmap_enforces_execute_only(pmap
)) {
3686 vm_object_unlock(object
);
3687 vm_map_unlock_read(map
);
3688 if (real_map
!= map
) {
3689 vm_map_unlock(real_map
);
3691 kr
= KERN_PROTECTION_FAILURE
;
3697 * If this page is to be inserted in a copy delay object
3698 * for writing, and if the object has a copy, then the
3699 * copy delay strategy is implemented in the slow fault page.
3701 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_DELAY
&&
3702 object
->copy
!= VM_OBJECT_NULL
&& (fault_type
& VM_PROT_WRITE
)) {
3703 goto handle_copy_delay
;
3706 cur_object
= object
;
3707 cur_offset
= offset
;
3710 #if CONFIG_SECLUDED_MEMORY
3711 if (object
->can_grab_secluded
) {
3712 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
3714 #endif /* CONFIG_SECLUDED_MEMORY */
3717 if (!cur_object
->pager_created
&&
3718 cur_object
->phys_contiguous
) { /* superpage */
3722 if (cur_object
->blocked_access
) {
3724 * Access to this VM object has been blocked.
3725 * Let the slow path handle it.
3730 m
= vm_page_lookup(cur_object
, cur_offset
);
3733 if (m
!= VM_PAGE_NULL
) {
3734 m_object
= cur_object
;
3737 wait_result_t result
;
3740 * in order to do the PAGE_ASSERT_WAIT, we must
3741 * have object that 'm' belongs to locked exclusively
3743 if (object
!= cur_object
) {
3744 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3745 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3747 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
3749 * couldn't upgrade so go do a full retry
3750 * immediately since we can no longer be
3751 * certain about cur_object (since we
3752 * don't hold a reference on it)...
3753 * first drop the top object lock
3755 vm_object_unlock(object
);
3757 vm_map_unlock_read(map
);
3758 if (real_map
!= map
) {
3759 vm_map_unlock(real_map
);
3765 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3766 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3768 if (vm_object_lock_upgrade(object
) == FALSE
) {
3770 * couldn't upgrade, so explictly take the lock
3771 * exclusively and go relookup the page since we
3772 * will have dropped the object lock and
3773 * a different thread could have inserted
3774 * a page at this offset
3775 * no need for a full retry since we're
3776 * at the top level of the object chain
3778 vm_object_lock(object
);
3783 if ((m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) && m_object
->internal
) {
3785 * m->vmp_busy == TRUE and the object is locked exclusively
3786 * if m->pageout_queue == TRUE after we acquire the
3787 * queues lock, we are guaranteed that it is stable on
3788 * the pageout queue and therefore reclaimable
3790 * NOTE: this is only true for the internal pageout queue
3791 * in the compressor world
3793 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
3795 vm_page_lock_queues();
3797 if (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
3798 vm_pageout_throttle_up(m
);
3799 vm_page_unlock_queues();
3801 PAGE_WAKEUP_DONE(m
);
3802 goto reclaimed_from_pageout
;
3804 vm_page_unlock_queues();
3806 if (object
!= cur_object
) {
3807 vm_object_unlock(object
);
3810 vm_map_unlock_read(map
);
3811 if (real_map
!= map
) {
3812 vm_map_unlock(real_map
);
3815 result
= PAGE_ASSERT_WAIT(m
, interruptible
);
3817 vm_object_unlock(cur_object
);
3819 if (result
== THREAD_WAITING
) {
3820 result
= thread_block(THREAD_CONTINUE_NULL
);
3822 counter(c_vm_fault_page_block_busy_kernel
++);
3824 if (result
== THREAD_AWAKENED
|| result
== THREAD_RESTART
) {
3831 reclaimed_from_pageout
:
3832 if (m
->vmp_laundry
) {
3833 if (object
!= cur_object
) {
3834 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3835 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3837 vm_object_unlock(object
);
3838 vm_object_unlock(cur_object
);
3840 vm_map_unlock_read(map
);
3841 if (real_map
!= map
) {
3842 vm_map_unlock(real_map
);
3847 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3848 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3850 if (vm_object_lock_upgrade(object
) == FALSE
) {
3852 * couldn't upgrade, so explictly take the lock
3853 * exclusively and go relookup the page since we
3854 * will have dropped the object lock and
3855 * a different thread could have inserted
3856 * a page at this offset
3857 * no need for a full retry since we're
3858 * at the top level of the object chain
3860 vm_object_lock(object
);
3865 vm_pageout_steal_laundry(m
, FALSE
);
3868 if (VM_PAGE_GET_PHYS_PAGE(m
) == vm_page_guard_addr
) {
3870 * Guard page: let the slow path deal with it
3874 if (m
->vmp_unusual
&& (m
->vmp_error
|| m
->vmp_restart
|| m
->vmp_private
|| m
->vmp_absent
)) {
3876 * Unusual case... let the slow path deal with it
3880 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object
)) {
3881 if (object
!= cur_object
) {
3882 vm_object_unlock(object
);
3884 vm_map_unlock_read(map
);
3885 if (real_map
!= map
) {
3886 vm_map_unlock(real_map
);
3888 vm_object_unlock(cur_object
);
3889 kr
= KERN_MEMORY_ERROR
;
3892 assert(m_object
== VM_PAGE_OBJECT(m
));
3894 if (VM_FAULT_NEED_CS_VALIDATION(map
->pmap
, m
, m_object
) ||
3895 (physpage_p
!= NULL
&& (prot
& VM_PROT_WRITE
))) {
3896 upgrade_lock_and_retry
:
3898 * We might need to validate this page
3899 * against its code signature, so we
3900 * want to hold the VM object exclusively.
3902 if (object
!= cur_object
) {
3903 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3904 vm_object_unlock(object
);
3905 vm_object_unlock(cur_object
);
3907 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3909 vm_map_unlock_read(map
);
3910 if (real_map
!= map
) {
3911 vm_map_unlock(real_map
);
3916 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3917 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3919 if (vm_object_lock_upgrade(object
) == FALSE
) {
3921 * couldn't upgrade, so explictly take the lock
3922 * exclusively and go relookup the page since we
3923 * will have dropped the object lock and
3924 * a different thread could have inserted
3925 * a page at this offset
3926 * no need for a full retry since we're
3927 * at the top level of the object chain
3929 vm_object_lock(object
);
3936 * Two cases of map in faults:
3937 * - At top level w/o copy object.
3938 * - Read fault anywhere.
3939 * --> must disallow write.
3942 if (object
== cur_object
&& object
->copy
== VM_OBJECT_NULL
) {
3947 !fault_info
.no_copy_on_read
&&
3948 cur_object
!= object
&&
3949 !cur_object
->internal
&&
3950 !cur_object
->pager_trusted
&&
3951 vm_protect_privileged_from_untrusted
&&
3952 !((prot
& VM_PROT_EXECUTE
) &&
3953 cur_object
->code_signed
&&
3954 cs_process_enforcement(NULL
)) &&
3955 current_proc_is_privileged()) {
3957 * We're faulting on a page in "object" and
3958 * went down the shadow chain to "cur_object"
3959 * to find out that "cur_object"'s pager
3960 * is not "trusted", i.e. we can not trust it
3961 * to always return the same contents.
3962 * Since the target is a "privileged" process,
3963 * let's treat this as a copy-on-read fault, as
3964 * if it was a copy-on-write fault.
3965 * Once "object" gets a copy of this page, it
3966 * won't have to rely on "cur_object" to
3967 * provide the contents again.
3969 * This is done by setting "need_copy" and
3970 * retrying the fault from the top with the
3971 * appropriate locking.
3973 * Special case: if the mapping is executable
3974 * and the untrusted object is code-signed and
3975 * the process is "cs_enforced", we do not
3976 * copy-on-read because that would break
3977 * code-signing enforcement expectations (an
3978 * executable page must belong to a code-signed
3979 * object) and we can rely on code-signing
3980 * to re-validate the page if it gets evicted
3981 * and paged back in.
3983 // printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
3984 vm_copied_on_read
++;
3987 vm_object_unlock(object
);
3988 vm_object_unlock(cur_object
);
3989 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3990 vm_map_unlock_read(map
);
3991 if (real_map
!= map
) {
3992 vm_map_unlock(real_map
);
3997 if (!(fault_type
& VM_PROT_WRITE
) && !need_copy
) {
3998 if (!pmap_has_prot_policy(prot
)) {
3999 prot
&= ~VM_PROT_WRITE
;
4002 * For a protection that the pmap cares
4003 * about, we must hand over the full
4004 * set of protections (so that the pmap
4005 * layer can apply any desired policy).
4006 * This means that cs_bypass must be
4007 * set, as this can force us to pass
4010 assert(fault_info
.cs_bypass
);
4013 if (object
!= cur_object
) {
4015 * We still need to hold the top object
4016 * lock here to prevent a race between
4017 * a read fault (taking only "shared"
4018 * locks) and a write fault (taking
4019 * an "exclusive" lock on the top
4021 * Otherwise, as soon as we release the
4022 * top lock, the write fault could
4023 * proceed and actually complete before
4024 * the read fault, and the copied page's
4025 * translation could then be overwritten
4026 * by the read fault's translation for
4027 * the original page.
4029 * Let's just record what the top object
4030 * is and we'll release it later.
4032 top_object
= object
;
4035 * switch to the object that has the new page
4037 object
= cur_object
;
4038 object_lock_type
= cur_object_lock_type
;
4041 assert(m_object
== VM_PAGE_OBJECT(m
));
4044 * prepare for the pmap_enter...
4045 * object and map are both locked
4046 * m contains valid data
4047 * object == m->vmp_object
4048 * cur_object == NULL or it's been unlocked
4049 * no paging references on either object or cur_object
4051 if (top_object
!= VM_OBJECT_NULL
|| object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
4052 need_retry_ptr
= &need_retry
;
4054 need_retry_ptr
= NULL
;
4058 kr
= vm_fault_enter(m
,
4070 kr
= vm_fault_enter(m
,
4085 if (m_object
->internal
) {
4086 event_code
= (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_REAL_FAULT_ADDR_INTERNAL
));
4087 } else if (m_object
->object_is_shared_cache
) {
4088 event_code
= (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_REAL_FAULT_ADDR_SHAREDCACHE
));
4090 event_code
= (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_REAL_FAULT_ADDR_EXTERNAL
));
4093 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, event_code
, trace_real_vaddr
, (fault_info
.user_tag
<< 16) | (caller_prot
<< 8) | type_of_fault
, m
->vmp_offset
, get_current_unique_pid(), 0);
4094 if (need_retry
== FALSE
) {
4095 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_REAL_FAULT_FAST
), get_current_unique_pid(), 0, 0, 0, 0);
4097 DTRACE_VM6(real_fault
, vm_map_offset_t
, real_vaddr
, vm_map_offset_t
, m
->vmp_offset
, int, event_code
, int, caller_prot
, int, type_of_fault
, int, fault_info
.user_tag
);
4099 if (kr
== KERN_SUCCESS
&&
4100 physpage_p
!= NULL
) {
4101 /* for vm_map_wire_and_extract() */
4102 *physpage_p
= VM_PAGE_GET_PHYS_PAGE(m
);
4103 if (prot
& VM_PROT_WRITE
) {
4104 vm_object_lock_assert_exclusive(m_object
);
4105 m
->vmp_dirty
= TRUE
;
4109 if (top_object
!= VM_OBJECT_NULL
) {
4111 * It's safe to drop the top object
4112 * now that we've done our
4113 * vm_fault_enter(). Any other fault
4114 * in progress for that virtual
4115 * address will either find our page
4116 * and translation or put in a new page
4119 vm_object_unlock(top_object
);
4120 top_object
= VM_OBJECT_NULL
;
4123 if (need_collapse
== TRUE
) {
4124 vm_object_collapse(object
, offset
, TRUE
);
4127 if (need_retry
== FALSE
&&
4128 (type_of_fault
== DBG_PAGEIND_FAULT
|| type_of_fault
== DBG_PAGEINV_FAULT
|| type_of_fault
== DBG_CACHE_HIT_FAULT
)) {
4130 * evaluate access pattern and update state
4131 * vm_fault_deactivate_behind depends on the
4132 * state being up to date
4134 vm_fault_is_sequential(m_object
, cur_offset
, fault_info
.behavior
);
4136 vm_fault_deactivate_behind(m_object
, cur_offset
, fault_info
.behavior
);
4139 * That's it, clean up and return.
4142 PAGE_WAKEUP_DONE(m
);
4145 if (need_retry
== FALSE
&& !m_object
->internal
&& (fault_type
& VM_PROT_WRITE
)) {
4146 vm_object_paging_begin(m_object
);
4148 assert(written_on_object
== VM_OBJECT_NULL
);
4149 written_on_object
= m_object
;
4150 written_on_pager
= m_object
->pager
;
4151 written_on_offset
= m_object
->paging_offset
+ m
->vmp_offset
;
4153 vm_object_unlock(object
);
4155 vm_map_unlock_read(map
);
4156 if (real_map
!= map
) {
4157 vm_map_unlock(real_map
);
4160 if (need_retry
== TRUE
) {
4162 * vm_fault_enter couldn't complete the PMAP_ENTER...
4163 * at this point we don't hold any locks so it's safe
4164 * to ask the pmap layer to expand the page table to
4165 * accommodate this mapping... once expanded, we'll
4166 * re-drive the fault which should result in vm_fault_enter
4167 * being able to successfully enter the mapping this time around
4169 (void)pmap_enter_options(
4170 pmap
, vaddr
, 0, 0, 0, 0, 0,
4171 PMAP_OPTIONS_NOENTER
, NULL
);
4179 * COPY ON WRITE FAULT
4181 assert(object_lock_type
== OBJECT_LOCK_EXCLUSIVE
);
4184 * If objects match, then
4185 * object->copy must not be NULL (else control
4186 * would be in previous code block), and we
4187 * have a potential push into the copy object
4188 * with which we can't cope with here.
4190 if (cur_object
== object
) {
4192 * must take the slow path to
4193 * deal with the copy push
4199 * This is now a shadow based copy on write
4200 * fault -- it requires a copy up the shadow
4203 assert(m_object
== VM_PAGE_OBJECT(m
));
4205 if ((cur_object_lock_type
== OBJECT_LOCK_SHARED
) &&
4206 VM_FAULT_NEED_CS_VALIDATION(NULL
, m
, m_object
)) {
4207 goto upgrade_lock_and_retry
;
4211 * Allocate a page in the original top level
4212 * object. Give up if allocate fails. Also
4213 * need to remember current page, as it's the
4214 * source of the copy.
4216 * at this point we hold locks on both
4217 * object and cur_object... no need to take
4218 * paging refs or mark pages BUSY since
4219 * we don't drop either object lock until
4220 * the page has been copied and inserted
4223 m
= vm_page_grab_options(grab_options
);
4226 if (m
== VM_PAGE_NULL
) {
4228 * no free page currently available...
4229 * must take the slow path
4234 * Now do the copy. Mark the source page busy...
4236 * NOTE: This code holds the map lock across
4239 vm_page_copy(cur_m
, m
);
4240 vm_page_insert(m
, object
, offset
);
4242 SET_PAGE_DIRTY(m
, FALSE
);
4245 * Now cope with the source page and object
4247 if (object
->ref_count
> 1 && cur_m
->vmp_pmapped
) {
4248 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m
));
4251 if (cur_m
->vmp_clustered
) {
4252 VM_PAGE_COUNT_AS_PAGEIN(cur_m
);
4253 VM_PAGE_CONSUME_CLUSTERED(cur_m
);
4254 vm_fault_is_sequential(cur_object
, cur_offset
, fault_info
.behavior
);
4256 need_collapse
= TRUE
;
4258 if (!cur_object
->internal
&&
4259 cur_object
->copy_strategy
== MEMORY_OBJECT_COPY_DELAY
) {
4261 * The object from which we've just
4262 * copied a page is most probably backed
4263 * by a vnode. We don't want to waste too
4264 * much time trying to collapse the VM objects
4265 * and create a bottleneck when several tasks
4266 * map the same file.
4268 if (cur_object
->copy
== object
) {
4270 * Shared mapping or no COW yet.
4271 * We can never collapse a copy
4272 * object into its backing object.
4274 need_collapse
= FALSE
;
4275 } else if (cur_object
->copy
== object
->shadow
&&
4276 object
->shadow
->resident_page_count
== 0) {
4278 * Shared mapping after a COW occurred.
4280 need_collapse
= FALSE
;
4283 vm_object_unlock(cur_object
);
4285 if (need_collapse
== FALSE
) {
4286 vm_fault_collapse_skipped
++;
4288 vm_fault_collapse_total
++;
4290 type_of_fault
= DBG_COW_FAULT
;
4291 VM_STAT_INCR(cow_faults
);
4292 DTRACE_VM2(cow_fault
, int, 1, (uint64_t *), NULL
);
4293 current_task()->cow_faults
++;
4298 * No page at cur_object, cur_offset... m == NULL
4300 if (cur_object
->pager_created
) {
4301 int compressor_external_state
= VM_EXTERNAL_STATE_UNKNOWN
;
4303 if (MUST_ASK_PAGER(cur_object
, cur_offset
, compressor_external_state
) == TRUE
) {
4305 int c_flags
= C_DONT_BLOCK
;
4306 boolean_t insert_cur_object
= FALSE
;
4309 * May have to talk to a pager...
4310 * if so, take the slow path by
4311 * doing a 'break' from the while (TRUE) loop
4313 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
4314 * if the compressor is active and the page exists there
4316 if (compressor_external_state
!= VM_EXTERNAL_STATE_EXISTS
) {
4320 if (map
== kernel_map
|| real_map
== kernel_map
) {
4322 * can't call into the compressor with the kernel_map
4323 * lock held, since the compressor may try to operate
4324 * on the kernel map in order to return an empty c_segment
4328 if (object
!= cur_object
) {
4329 if (fault_type
& VM_PROT_WRITE
) {
4332 insert_cur_object
= TRUE
;
4335 if (insert_cur_object
== TRUE
) {
4336 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
4337 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4339 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
4341 * couldn't upgrade so go do a full retry
4342 * immediately since we can no longer be
4343 * certain about cur_object (since we
4344 * don't hold a reference on it)...
4345 * first drop the top object lock
4347 vm_object_unlock(object
);
4349 vm_map_unlock_read(map
);
4350 if (real_map
!= map
) {
4351 vm_map_unlock(real_map
);
4357 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
4358 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4360 if (object
!= cur_object
) {
4362 * we can't go for the upgrade on the top
4363 * lock since the upgrade may block waiting
4364 * for readers to drain... since we hold
4365 * cur_object locked at this point, waiting
4366 * for the readers to drain would represent
4367 * a lock order inversion since the lock order
4368 * for objects is the reference order in the
4371 vm_object_unlock(object
);
4372 vm_object_unlock(cur_object
);
4374 vm_map_unlock_read(map
);
4375 if (real_map
!= map
) {
4376 vm_map_unlock(real_map
);
4381 if (vm_object_lock_upgrade(object
) == FALSE
) {
4383 * couldn't upgrade, so explictly take the lock
4384 * exclusively and go relookup the page since we
4385 * will have dropped the object lock and
4386 * a different thread could have inserted
4387 * a page at this offset
4388 * no need for a full retry since we're
4389 * at the top level of the object chain
4391 vm_object_lock(object
);
4396 m
= vm_page_grab_options(grab_options
);
4399 if (m
== VM_PAGE_NULL
) {
4401 * no free page currently available...
4402 * must take the slow path
4408 * The object is and remains locked
4409 * so no need to take a
4410 * "paging_in_progress" reference.
4412 boolean_t shared_lock
;
4413 if ((object
== cur_object
&&
4414 object_lock_type
== OBJECT_LOCK_EXCLUSIVE
) ||
4415 (object
!= cur_object
&&
4416 cur_object_lock_type
== OBJECT_LOCK_EXCLUSIVE
)) {
4417 shared_lock
= FALSE
;
4422 kr
= vm_compressor_pager_get(
4425 cur_object
->paging_offset
),
4426 VM_PAGE_GET_PHYS_PAGE(m
),
4429 &compressed_count_delta
);
4431 vm_compressor_pager_count(
4433 compressed_count_delta
,
4437 if (kr
!= KERN_SUCCESS
) {
4438 vm_page_release(m
, FALSE
);
4442 m
->vmp_dirty
= TRUE
;
4445 * If the object is purgeable, its
4446 * owner's purgeable ledgers will be
4447 * updated in vm_page_insert() but the
4448 * page was also accounted for in a
4449 * "compressed purgeable" ledger, so
4452 if (object
!= cur_object
&&
4453 !insert_cur_object
) {
4455 * We're not going to insert
4456 * the decompressed page into
4457 * the object it came from.
4459 * We're dealing with a
4460 * copy-on-write fault on
4462 * We're going to decompress
4463 * the page directly into the
4464 * target "object" while
4465 * keepin the compressed
4466 * page for "cur_object", so
4467 * no ledger update in that
4470 } else if (((cur_object
->purgable
==
4471 VM_PURGABLE_DENY
) &&
4472 (!cur_object
->vo_ledger_tag
)) ||
4473 (cur_object
->vo_owner
==
4476 * "cur_object" is not purgeable
4477 * and is not ledger-taged, or
4478 * there's no owner for it,
4479 * so no owner's ledgers to
4484 * One less compressed
4485 * purgeable/tagged page for
4486 * cur_object's owner.
4488 vm_object_owner_compressed_update(
4493 if (insert_cur_object
) {
4494 vm_page_insert(m
, cur_object
, cur_offset
);
4495 m_object
= cur_object
;
4497 vm_page_insert(m
, object
, offset
);
4501 if ((m_object
->wimg_bits
& VM_WIMG_MASK
) != VM_WIMG_USE_DEFAULT
) {
4503 * If the page is not cacheable,
4504 * we can't let its contents
4505 * linger in the data cache
4506 * after the decompression.
4508 pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m
));
4511 type_of_fault
= my_fault_type
;
4513 VM_STAT_DECOMPRESSIONS();
4515 if (cur_object
!= object
) {
4516 if (insert_cur_object
) {
4517 top_object
= object
;
4519 * switch to the object that has the new page
4521 object
= cur_object
;
4522 object_lock_type
= cur_object_lock_type
;
4524 vm_object_unlock(cur_object
);
4525 cur_object
= object
;
4531 * existence map present and indicates
4532 * that the pager doesn't have this page
4535 if (cur_object
->shadow
== VM_OBJECT_NULL
||
4536 resilient_media_retry
) {
4538 * Zero fill fault. Page gets
4539 * inserted into the original object.
4541 if (cur_object
->shadow_severed
||
4542 VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object
) ||
4543 cur_object
== compressor_object
||
4544 cur_object
== kernel_object
||
4545 cur_object
== vm_submap_object
) {
4546 if (object
!= cur_object
) {
4547 vm_object_unlock(cur_object
);
4549 vm_object_unlock(object
);
4551 vm_map_unlock_read(map
);
4552 if (real_map
!= map
) {
4553 vm_map_unlock(real_map
);
4556 kr
= KERN_MEMORY_ERROR
;
4559 if (cur_object
!= object
) {
4560 vm_object_unlock(cur_object
);
4562 cur_object
= object
;
4564 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
4565 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4567 if (vm_object_lock_upgrade(object
) == FALSE
) {
4569 * couldn't upgrade so do a full retry on the fault
4570 * since we dropped the object lock which
4571 * could allow another thread to insert
4572 * a page at this offset
4574 vm_map_unlock_read(map
);
4575 if (real_map
!= map
) {
4576 vm_map_unlock(real_map
);
4582 if (!object
->internal
) {
4583 panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__
, __LINE__
, (uint64_t)offset
, object
);
4585 m
= vm_page_alloc(object
, offset
);
4588 if (m
== VM_PAGE_NULL
) {
4590 * no free page currently available...
4591 * must take the slow path
4598 * Now zero fill page...
4599 * the page is probably going to
4600 * be written soon, so don't bother
4601 * to clear the modified bit
4603 * NOTE: This code holds the map
4604 * lock across the zero fill.
4606 type_of_fault
= vm_fault_zero_page(m
, map
->no_zero_fill
);
4611 * On to the next level in the shadow chain
4613 cur_offset
+= cur_object
->vo_shadow_offset
;
4614 new_object
= cur_object
->shadow
;
4617 * take the new_object's lock with the indicated state
4619 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
4620 vm_object_lock_shared(new_object
);
4622 vm_object_lock(new_object
);
4625 if (cur_object
!= object
) {
4626 vm_object_unlock(cur_object
);
4629 cur_object
= new_object
;
4635 * Cleanup from fast fault failure. Drop any object
4636 * lock other than original and drop map lock.
4638 if (object
!= cur_object
) {
4639 vm_object_unlock(cur_object
);
4643 * must own the object lock exclusively at this point
4645 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
4646 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4648 if (vm_object_lock_upgrade(object
) == FALSE
) {
4650 * couldn't upgrade, so explictly
4651 * take the lock exclusively
4652 * no need to retry the fault at this
4653 * point since "vm_fault_page" will
4654 * completely re-evaluate the state
4656 vm_object_lock(object
);
4661 vm_map_unlock_read(map
);
4662 if (real_map
!= map
) {
4663 vm_map_unlock(real_map
);
4666 if (__improbable(object
== compressor_object
||
4667 object
== kernel_object
||
4668 object
== vm_submap_object
)) {
4670 * These objects are explicitly managed and populated by the
4671 * kernel. The virtual ranges backed by these objects should
4672 * either have wired pages or "holes" that are not supposed to
4673 * be accessed at all until they get explicitly populated.
4674 * We should never have to resolve a fault on a mapping backed
4675 * by one of these VM objects and providing a zero-filled page
4676 * would be wrong here, so let's fail the fault and let the
4677 * caller crash or recover.
4679 vm_object_unlock(object
);
4680 kr
= KERN_MEMORY_ERROR
;
4684 assert(object
!= compressor_object
);
4685 assert(object
!= kernel_object
);
4686 assert(object
!= vm_submap_object
);
4688 if (resilient_media_retry
) {
4690 * We could get here if we failed to get a free page
4691 * to zero-fill and had to take the slow path again.
4692 * Reset our "recovery-from-failed-media" state.
4694 assert(resilient_media_object
!= VM_OBJECT_NULL
);
4695 assert(resilient_media_offset
!= (vm_object_offset_t
)-1);
4696 /* release our extra reference on failed object */
4697 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
4698 vm_object_deallocate(resilient_media_object
);
4699 resilient_media_object
= VM_OBJECT_NULL
;
4700 resilient_media_offset
= (vm_object_offset_t
)-1;
4701 resilient_media_retry
= FALSE
;
4705 * Make a reference to this object to
4706 * prevent its disposal while we are messing with
4707 * it. Once we have the reference, the map is free
4708 * to be diddled. Since objects reference their
4709 * shadows (and copies), they will stay around as well.
4711 vm_object_reference_locked(object
);
4712 vm_object_paging_begin(object
);
4714 set_thread_pagein_error(cthread
, 0);
4717 result_page
= VM_PAGE_NULL
;
4718 kr
= vm_fault_page(object
, offset
, fault_type
,
4719 (change_wiring
&& !wired
),
4720 FALSE
, /* page not looked up */
4721 &prot
, &result_page
, &top_page
,
4723 &error_code
, map
->no_zero_fill
,
4724 FALSE
, &fault_info
);
4727 * if kr != VM_FAULT_SUCCESS, then the paging reference
4728 * has been dropped and the object unlocked... the ref_count
4731 * if kr == VM_FAULT_SUCCESS, then the paging reference
4732 * is still held along with the ref_count on the original object
4734 * the object is returned locked with a paging reference
4736 * if top_page != NULL, then it's BUSY and the
4737 * object it belongs to has a paging reference
4738 * but is returned unlocked
4740 if (kr
!= VM_FAULT_SUCCESS
&&
4741 kr
!= VM_FAULT_SUCCESS_NO_VM_PAGE
) {
4742 if (kr
== VM_FAULT_MEMORY_ERROR
&&
4743 fault_info
.resilient_media
) {
4744 assertf(object
->internal
, "object %p", object
);
4746 * This fault failed but the mapping was
4747 * "media resilient", so we'll retry the fault in
4748 * recovery mode to get a zero-filled page in the
4750 * Keep the reference on the failing object so
4751 * that we can check that the mapping is still
4752 * pointing to it when we retry the fault.
4754 // printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
4755 assert(!resilient_media_retry
); /* no double retry */
4756 assert(resilient_media_object
== VM_OBJECT_NULL
);
4757 assert(resilient_media_offset
== (vm_object_offset_t
)-1);
4758 resilient_media_retry
= TRUE
;
4759 resilient_media_object
= object
;
4760 resilient_media_offset
= offset
;
4761 // printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
4765 * we didn't succeed, lose the object reference
4768 vm_object_deallocate(object
);
4769 object
= VM_OBJECT_NULL
; /* no longer valid */
4773 * See why we failed, and take corrective action.
4776 case VM_FAULT_MEMORY_SHORTAGE
:
4777 if (vm_page_wait((change_wiring
) ?
4779 THREAD_ABORTSAFE
)) {
4785 case VM_FAULT_INTERRUPTED
:
4788 case VM_FAULT_RETRY
:
4790 case VM_FAULT_MEMORY_ERROR
:
4794 kr
= KERN_MEMORY_ERROR
;
4798 panic("vm_fault: unexpected error 0x%x from "
4799 "vm_fault_page()\n", kr
);
4805 if (m
!= VM_PAGE_NULL
) {
4806 m_object
= VM_PAGE_OBJECT(m
);
4807 assert((change_wiring
&& !wired
) ?
4808 (top_page
== VM_PAGE_NULL
) :
4809 ((top_page
== VM_PAGE_NULL
) == (m_object
== object
)));
4813 * What to do with the resulting page from vm_fault_page
4814 * if it doesn't get entered into the physical map:
4816 #define RELEASE_PAGE(m) \
4818 PAGE_WAKEUP_DONE(m); \
4819 if ( !VM_PAGE_PAGEABLE(m)) { \
4820 vm_page_lockspin_queues(); \
4821 if ( !VM_PAGE_PAGEABLE(m)) \
4822 vm_page_activate(m); \
4823 vm_page_unlock_queues(); \
4828 object_locks_dropped
= FALSE
;
4830 * We must verify that the maps have not changed
4831 * since our last lookup. vm_map_verify() needs the
4832 * map lock (shared) but we are holding object locks.
4833 * So we do a try_lock() first and, if that fails, we
4834 * drop the object locks and go in for the map lock again.
4836 if (!vm_map_try_lock_read(original_map
)) {
4837 if (m
!= VM_PAGE_NULL
) {
4838 old_copy_object
= m_object
->copy
;
4839 vm_object_unlock(m_object
);
4841 old_copy_object
= VM_OBJECT_NULL
;
4842 vm_object_unlock(object
);
4845 object_locks_dropped
= TRUE
;
4847 vm_map_lock_read(original_map
);
4850 if ((map
!= original_map
) || !vm_map_verify(map
, &version
)) {
4851 if (object_locks_dropped
== FALSE
) {
4852 if (m
!= VM_PAGE_NULL
) {
4853 old_copy_object
= m_object
->copy
;
4854 vm_object_unlock(m_object
);
4856 old_copy_object
= VM_OBJECT_NULL
;
4857 vm_object_unlock(object
);
4860 object_locks_dropped
= TRUE
;
4864 * no object locks are held at this point
4866 vm_object_t retry_object
;
4867 vm_object_offset_t retry_offset
;
4868 vm_prot_t retry_prot
;
4871 * To avoid trying to write_lock the map while another
4872 * thread has it read_locked (in vm_map_pageable), we
4873 * do not try for write permission. If the page is
4874 * still writable, we will get write permission. If it
4875 * is not, or has been marked needs_copy, we enter the
4876 * mapping without write permission, and will merely
4877 * take another fault.
4881 kr
= vm_map_lookup_locked(&map
, vaddr
,
4882 fault_type
& ~VM_PROT_WRITE
,
4883 OBJECT_LOCK_EXCLUSIVE
, &version
,
4884 &retry_object
, &retry_offset
, &retry_prot
,
4888 pmap
= real_map
->pmap
;
4890 if (kr
!= KERN_SUCCESS
) {
4891 vm_map_unlock_read(map
);
4893 if (m
!= VM_PAGE_NULL
) {
4894 assert(VM_PAGE_OBJECT(m
) == m_object
);
4897 * retake the lock so that
4898 * we can drop the paging reference
4899 * in vm_fault_cleanup and do the
4900 * PAGE_WAKEUP_DONE in RELEASE_PAGE
4902 vm_object_lock(m_object
);
4906 vm_fault_cleanup(m_object
, top_page
);
4909 * retake the lock so that
4910 * we can drop the paging reference
4911 * in vm_fault_cleanup
4913 vm_object_lock(object
);
4915 vm_fault_cleanup(object
, top_page
);
4917 vm_object_deallocate(object
);
4921 vm_object_unlock(retry_object
);
4923 if ((retry_object
!= object
) || (retry_offset
!= offset
)) {
4924 vm_map_unlock_read(map
);
4925 if (real_map
!= map
) {
4926 vm_map_unlock(real_map
);
4929 if (m
!= VM_PAGE_NULL
) {
4930 assert(VM_PAGE_OBJECT(m
) == m_object
);
4933 * retake the lock so that
4934 * we can drop the paging reference
4935 * in vm_fault_cleanup and do the
4936 * PAGE_WAKEUP_DONE in RELEASE_PAGE
4938 vm_object_lock(m_object
);
4942 vm_fault_cleanup(m_object
, top_page
);
4945 * retake the lock so that
4946 * we can drop the paging reference
4947 * in vm_fault_cleanup
4949 vm_object_lock(object
);
4951 vm_fault_cleanup(object
, top_page
);
4953 vm_object_deallocate(object
);
4958 * Check whether the protection has changed or the object
4959 * has been copied while we left the map unlocked.
4961 if (pmap_has_prot_policy(retry_prot
)) {
4962 /* If the pmap layer cares, pass the full set. */
4969 if (object_locks_dropped
== TRUE
) {
4970 if (m
!= VM_PAGE_NULL
) {
4971 vm_object_lock(m_object
);
4973 if (m_object
->copy
!= old_copy_object
) {
4975 * The copy object changed while the top-level object
4976 * was unlocked, so take away write permission.
4978 assert(!pmap_has_prot_policy(prot
));
4979 prot
&= ~VM_PROT_WRITE
;
4982 vm_object_lock(object
);
4985 object_locks_dropped
= FALSE
;
4989 !fault_info
.no_copy_on_read
&&
4990 m
!= VM_PAGE_NULL
&&
4991 VM_PAGE_OBJECT(m
) != object
&&
4992 !VM_PAGE_OBJECT(m
)->pager_trusted
&&
4993 vm_protect_privileged_from_untrusted
&&
4994 !((prot
& VM_PROT_EXECUTE
) &&
4995 VM_PAGE_OBJECT(m
)->code_signed
&&
4996 cs_process_enforcement(NULL
)) &&
4997 current_proc_is_privileged()) {
4999 * We found the page we want in an "untrusted" VM object
5000 * down the shadow chain. Since the target is "privileged"
5001 * we want to perform a copy-on-read of that page, so that the
5002 * mapped object gets a stable copy and does not have to
5003 * rely on the "untrusted" object to provide the same
5004 * contents if the page gets reclaimed and has to be paged
5005 * in again later on.
5007 * Special case: if the mapping is executable and the untrusted
5008 * object is code-signed and the process is "cs_enforced", we
5009 * do not copy-on-read because that would break code-signing
5010 * enforcement expectations (an executable page must belong
5011 * to a code-signed object) and we can rely on code-signing
5012 * to re-validate the page if it gets evicted and paged back in.
5014 // printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5015 vm_copied_on_read
++;
5016 need_copy_on_read
= TRUE
;
5019 need_copy_on_read
= FALSE
;
5023 * If we want to wire down this page, but no longer have
5024 * adequate permissions, we must start all over.
5025 * If we decided to copy-on-read, we must also start all over.
5027 if ((wired
&& (fault_type
!= (prot
| VM_PROT_WRITE
))) ||
5028 need_copy_on_read
) {
5029 vm_map_unlock_read(map
);
5030 if (real_map
!= map
) {
5031 vm_map_unlock(real_map
);
5034 if (m
!= VM_PAGE_NULL
) {
5035 assert(VM_PAGE_OBJECT(m
) == m_object
);
5039 vm_fault_cleanup(m_object
, top_page
);
5041 vm_fault_cleanup(object
, top_page
);
5044 vm_object_deallocate(object
);
5048 if (m
!= VM_PAGE_NULL
) {
5050 * Put this page into the physical map.
5051 * We had to do the unlock above because pmap_enter
5052 * may cause other faults. The page may be on
5053 * the pageout queues. If the pageout daemon comes
5054 * across the page, it will remove it from the queues.
5057 kr
= vm_fault_enter(m
,
5069 kr
= vm_fault_enter(m
,
5081 assert(VM_PAGE_OBJECT(m
) == m_object
);
5086 if (m_object
->internal
) {
5087 event_code
= (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_REAL_FAULT_ADDR_INTERNAL
));
5088 } else if (m_object
->object_is_shared_cache
) {
5089 event_code
= (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_REAL_FAULT_ADDR_SHAREDCACHE
));
5091 event_code
= (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_REAL_FAULT_ADDR_EXTERNAL
));
5094 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, event_code
, trace_real_vaddr
, (fault_info
.user_tag
<< 16) | (caller_prot
<< 8) | type_of_fault
, m
->vmp_offset
, get_current_unique_pid(), 0);
5095 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_REAL_FAULT_SLOW
), get_current_unique_pid(), 0, 0, 0, 0);
5097 DTRACE_VM6(real_fault
, vm_map_offset_t
, real_vaddr
, vm_map_offset_t
, m
->vmp_offset
, int, event_code
, int, caller_prot
, int, type_of_fault
, int, fault_info
.user_tag
);
5099 if (kr
!= KERN_SUCCESS
) {
5100 /* abort this page fault */
5101 vm_map_unlock_read(map
);
5102 if (real_map
!= map
) {
5103 vm_map_unlock(real_map
);
5105 PAGE_WAKEUP_DONE(m
);
5106 vm_fault_cleanup(m_object
, top_page
);
5107 vm_object_deallocate(object
);
5110 if (physpage_p
!= NULL
) {
5111 /* for vm_map_wire_and_extract() */
5112 *physpage_p
= VM_PAGE_GET_PHYS_PAGE(m
);
5113 if (prot
& VM_PROT_WRITE
) {
5114 vm_object_lock_assert_exclusive(m_object
);
5115 m
->vmp_dirty
= TRUE
;
5119 vm_map_entry_t entry
;
5120 vm_map_offset_t laddr
;
5121 vm_map_offset_t ldelta
, hdelta
;
5124 * do a pmap block mapping from the physical address
5128 if (real_map
!= map
) {
5129 vm_map_unlock(real_map
);
5132 if (original_map
!= map
) {
5133 vm_map_unlock_read(map
);
5134 vm_map_lock_read(original_map
);
5140 hdelta
= 0xFFFFF000;
5141 ldelta
= 0xFFFFF000;
5143 while (vm_map_lookup_entry(map
, laddr
, &entry
)) {
5144 if (ldelta
> (laddr
- entry
->vme_start
)) {
5145 ldelta
= laddr
- entry
->vme_start
;
5147 if (hdelta
> (entry
->vme_end
- laddr
)) {
5148 hdelta
= entry
->vme_end
- laddr
;
5150 if (entry
->is_sub_map
) {
5151 laddr
= ((laddr
- entry
->vme_start
)
5152 + VME_OFFSET(entry
));
5153 vm_map_lock_read(VME_SUBMAP(entry
));
5155 if (map
!= real_map
) {
5156 vm_map_unlock_read(map
);
5158 if (entry
->use_pmap
) {
5159 vm_map_unlock_read(real_map
);
5160 real_map
= VME_SUBMAP(entry
);
5162 map
= VME_SUBMAP(entry
);
5168 if (vm_map_lookup_entry(map
, laddr
, &entry
) &&
5169 (VME_OBJECT(entry
) != NULL
) &&
5170 (VME_OBJECT(entry
) == object
)) {
5173 if (!object
->pager_created
&&
5174 object
->phys_contiguous
&&
5175 VME_OFFSET(entry
) == 0 &&
5176 (entry
->vme_end
- entry
->vme_start
== object
->vo_size
) &&
5177 VM_MAP_PAGE_ALIGNED(entry
->vme_start
, (object
->vo_size
- 1))) {
5178 superpage
= VM_MEM_SUPERPAGE
;
5183 if (superpage
&& physpage_p
) {
5184 /* for vm_map_wire_and_extract() */
5185 *physpage_p
= (ppnum_t
)
5186 ((((vm_map_offset_t
)
5187 object
->vo_shadow_offset
)
5189 + (laddr
- entry
->vme_start
))
5195 * Set up a block mapped area
5197 assert((uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
) == ((ldelta
+ hdelta
) >> PAGE_SHIFT
));
5198 kr
= pmap_map_block(caller_pmap
,
5199 (addr64_t
)(caller_pmap_addr
- ldelta
),
5200 (ppnum_t
)((((vm_map_offset_t
) (VME_OBJECT(entry
)->vo_shadow_offset
)) +
5201 VME_OFFSET(entry
) + (laddr
- entry
->vme_start
) - ldelta
) >> PAGE_SHIFT
),
5202 (uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
), prot
,
5203 (VM_WIMG_MASK
& (int)object
->wimg_bits
) | superpage
, 0);
5205 if (kr
!= KERN_SUCCESS
) {
5210 * Set up a block mapped area
5212 assert((uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
) == ((ldelta
+ hdelta
) >> PAGE_SHIFT
));
5213 kr
= pmap_map_block(real_map
->pmap
,
5214 (addr64_t
)(vaddr
- ldelta
),
5215 (ppnum_t
)((((vm_map_offset_t
)(VME_OBJECT(entry
)->vo_shadow_offset
)) +
5216 VME_OFFSET(entry
) + (laddr
- entry
->vme_start
) - ldelta
) >> PAGE_SHIFT
),
5217 (uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
), prot
,
5218 (VM_WIMG_MASK
& (int)object
->wimg_bits
) | superpage
, 0);
5220 if (kr
!= KERN_SUCCESS
) {
5233 * TODO: could most of the done cases just use cleanup?
5237 * Unlock everything, and return
5239 vm_map_unlock_read(map
);
5240 if (real_map
!= map
) {
5241 vm_map_unlock(real_map
);
5244 if (m
!= VM_PAGE_NULL
) {
5245 assert(VM_PAGE_OBJECT(m
) == m_object
);
5247 if (!m_object
->internal
&& (fault_type
& VM_PROT_WRITE
)) {
5248 vm_object_paging_begin(m_object
);
5250 assert(written_on_object
== VM_OBJECT_NULL
);
5251 written_on_object
= m_object
;
5252 written_on_pager
= m_object
->pager
;
5253 written_on_offset
= m_object
->paging_offset
+ m
->vmp_offset
;
5255 PAGE_WAKEUP_DONE(m
);
5257 vm_fault_cleanup(m_object
, top_page
);
5259 vm_fault_cleanup(object
, top_page
);
5262 vm_object_deallocate(object
);
5267 thread_interrupt_level(interruptible_state
);
5269 if (resilient_media_object
!= VM_OBJECT_NULL
) {
5270 assert(resilient_media_retry
);
5271 assert(resilient_media_offset
!= (vm_object_offset_t
)-1);
5272 /* release extra reference on failed object */
5273 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
5274 vm_object_deallocate(resilient_media_object
);
5275 resilient_media_object
= VM_OBJECT_NULL
;
5276 resilient_media_offset
= (vm_object_offset_t
)-1;
5277 resilient_media_retry
= FALSE
;
5279 assert(!resilient_media_retry
);
5282 * Only I/O throttle on faults which cause a pagein/swapin.
5284 if ((type_of_fault
== DBG_PAGEIND_FAULT
) || (type_of_fault
== DBG_PAGEINV_FAULT
) || (type_of_fault
== DBG_COMPRESSOR_SWAPIN_FAULT
)) {
5285 throttle_lowpri_io(1);
5287 if (kr
== KERN_SUCCESS
&& type_of_fault
!= DBG_CACHE_HIT_FAULT
&& type_of_fault
!= DBG_GUARD_FAULT
) {
5288 if ((throttle_delay
= vm_page_throttled(TRUE
))) {
5289 if (vm_debug_events
) {
5290 if (type_of_fault
== DBG_COMPRESSOR_FAULT
) {
5291 VM_DEBUG_EVENT(vmf_compressordelay
, VMF_COMPRESSORDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
5292 } else if (type_of_fault
== DBG_COW_FAULT
) {
5293 VM_DEBUG_EVENT(vmf_cowdelay
, VMF_COWDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
5295 VM_DEBUG_EVENT(vmf_zfdelay
, VMF_ZFDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
5298 delay(throttle_delay
);
5303 if (written_on_object
) {
5304 vnode_pager_dirtied(written_on_pager
, written_on_offset
, written_on_offset
+ PAGE_SIZE_64
);
5306 vm_object_lock(written_on_object
);
5307 vm_object_paging_end(written_on_object
);
5308 vm_object_unlock(written_on_object
);
5310 written_on_object
= VM_OBJECT_NULL
;
5314 vm_record_rtfault(cthread
, fstart
, trace_vaddr
, type_of_fault
);
5317 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
5318 (MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_END
,
5319 ((uint64_t)trace_vaddr
>> 32),
5331 * Wire down a range of virtual addresses in a map.
5336 vm_map_entry_t entry
,
5340 vm_map_offset_t pmap_addr
,
5341 ppnum_t
*physpage_p
)
5344 vm_map_offset_t end_addr
= entry
->vme_end
;
5347 assert(entry
->in_transition
);
5349 if ((VME_OBJECT(entry
) != NULL
) &&
5350 !entry
->is_sub_map
&&
5351 VME_OBJECT(entry
)->phys_contiguous
) {
5352 return KERN_SUCCESS
;
5356 * Inform the physical mapping system that the
5357 * range of addresses may not fault, so that
5358 * page tables and such can be locked down as well.
5361 pmap_pageable(pmap
, pmap_addr
,
5362 pmap_addr
+ (end_addr
- entry
->vme_start
), FALSE
);
5365 * We simulate a fault to get the page and enter it
5366 * in the physical map.
5369 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
5370 rc
= vm_fault_wire_fast(map
, va
, prot
, wire_tag
, entry
, pmap
,
5371 pmap_addr
+ (va
- entry
->vme_start
),
5373 if (rc
!= KERN_SUCCESS
) {
5374 rc
= vm_fault_internal(map
, va
, prot
, TRUE
, wire_tag
,
5375 ((pmap
== kernel_pmap
)
5377 : THREAD_ABORTSAFE
),
5380 (va
- entry
->vme_start
)),
5382 DTRACE_VM2(softlock
, int, 1, (uint64_t *), NULL
);
5385 if (rc
!= KERN_SUCCESS
) {
5386 struct vm_map_entry tmp_entry
= *entry
;
5388 /* unwire wired pages */
5389 tmp_entry
.vme_end
= va
;
5390 vm_fault_unwire(map
,
5391 &tmp_entry
, FALSE
, pmap
, pmap_addr
);
5396 return KERN_SUCCESS
;
5402 * Unwire a range of virtual addresses in a map.
5407 vm_map_entry_t entry
,
5408 boolean_t deallocate
,
5410 vm_map_offset_t pmap_addr
)
5413 vm_map_offset_t end_addr
= entry
->vme_end
;
5415 struct vm_object_fault_info fault_info
= {};
5416 unsigned int unwired_pages
;
5418 object
= (entry
->is_sub_map
) ? VM_OBJECT_NULL
: VME_OBJECT(entry
);
5421 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
5422 * do anything since such memory is wired by default. So we don't have
5423 * anything to undo here.
5426 if (object
!= VM_OBJECT_NULL
&& object
->phys_contiguous
) {
5430 fault_info
.interruptible
= THREAD_UNINT
;
5431 fault_info
.behavior
= entry
->behavior
;
5432 fault_info
.user_tag
= VME_ALIAS(entry
);
5433 if (entry
->iokit_acct
||
5434 (!entry
->is_sub_map
&& !entry
->use_pmap
)) {
5435 fault_info
.pmap_options
|= PMAP_OPTIONS_ALT_ACCT
;
5437 fault_info
.lo_offset
= VME_OFFSET(entry
);
5438 fault_info
.hi_offset
= (entry
->vme_end
- entry
->vme_start
) + VME_OFFSET(entry
);
5439 fault_info
.no_cache
= entry
->no_cache
;
5440 fault_info
.stealth
= TRUE
;
5445 * Since the pages are wired down, we must be able to
5446 * get their mappings from the physical map system.
5449 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
5450 if (object
== VM_OBJECT_NULL
) {
5452 pmap_change_wiring(pmap
,
5453 pmap_addr
+ (va
- entry
->vme_start
), FALSE
);
5455 (void) vm_fault(map
, va
, VM_PROT_NONE
,
5456 TRUE
, VM_KERN_MEMORY_NONE
, THREAD_UNINT
, pmap
, pmap_addr
);
5459 vm_page_t result_page
;
5461 vm_object_t result_object
;
5462 vm_fault_return_t result
;
5464 /* cap cluster size at maximum UPL size */
5465 upl_size_t cluster_size
;
5466 if (os_sub_overflow(end_addr
, va
, &cluster_size
)) {
5467 cluster_size
= 0 - (upl_size_t
)PAGE_SIZE
;
5469 fault_info
.cluster_size
= cluster_size
;
5472 prot
= VM_PROT_NONE
;
5474 vm_object_lock(object
);
5475 vm_object_paging_begin(object
);
5476 result_page
= VM_PAGE_NULL
;
5477 result
= vm_fault_page(
5479 (VME_OFFSET(entry
) +
5480 (va
- entry
->vme_start
)),
5482 FALSE
, /* page not looked up */
5483 &prot
, &result_page
, &top_page
,
5485 NULL
, map
->no_zero_fill
,
5486 FALSE
, &fault_info
);
5487 } while (result
== VM_FAULT_RETRY
);
5490 * If this was a mapping to a file on a device that has been forcibly
5491 * unmounted, then we won't get a page back from vm_fault_page(). Just
5492 * move on to the next one in case the remaining pages are mapped from
5493 * different objects. During a forced unmount, the object is terminated
5494 * so the alive flag will be false if this happens. A forced unmount will
5495 * will occur when an external disk is unplugged before the user does an
5496 * eject, so we don't want to panic in that situation.
5499 if (result
== VM_FAULT_MEMORY_ERROR
&& !object
->alive
) {
5503 if (result
== VM_FAULT_MEMORY_ERROR
&&
5504 object
== kernel_object
) {
5506 * This must have been allocated with
5507 * KMA_KOBJECT and KMA_VAONLY and there's
5508 * no physical page at this offset.
5509 * We're done (no page to free).
5515 if (result
!= VM_FAULT_SUCCESS
) {
5516 panic("vm_fault_unwire: failure");
5519 result_object
= VM_PAGE_OBJECT(result_page
);
5522 assert(VM_PAGE_GET_PHYS_PAGE(result_page
) !=
5523 vm_page_fictitious_addr
);
5524 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page
));
5525 if (VM_PAGE_WIRED(result_page
)) {
5528 VM_PAGE_FREE(result_page
);
5530 if ((pmap
) && (VM_PAGE_GET_PHYS_PAGE(result_page
) != vm_page_guard_addr
)) {
5531 pmap_change_wiring(pmap
,
5532 pmap_addr
+ (va
- entry
->vme_start
), FALSE
);
5536 if (VM_PAGE_WIRED(result_page
)) {
5537 vm_page_lockspin_queues();
5538 vm_page_unwire(result_page
, TRUE
);
5539 vm_page_unlock_queues();
5542 if (entry
->zero_wired_pages
) {
5543 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page
));
5544 entry
->zero_wired_pages
= FALSE
;
5547 PAGE_WAKEUP_DONE(result_page
);
5549 vm_fault_cleanup(result_object
, top_page
);
5554 * Inform the physical mapping system that the range
5555 * of addresses may fault, so that page tables and
5556 * such may be unwired themselves.
5559 pmap_pageable(pmap
, pmap_addr
,
5560 pmap_addr
+ (end_addr
- entry
->vme_start
), TRUE
);
5562 if (kernel_object
== object
) {
5563 vm_tag_update_size(fault_info
.user_tag
, -ptoa_64(unwired_pages
));
5568 * vm_fault_wire_fast:
5570 * Handle common case of a wire down page fault at the given address.
5571 * If successful, the page is inserted into the associated physical map.
5572 * The map entry is passed in to avoid the overhead of a map lookup.
5574 * NOTE: the given address should be truncated to the
5575 * proper page address.
5577 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
5578 * a standard error specifying why the fault is fatal is returned.
5580 * The map in question must be referenced, and remains so.
5581 * Caller has a read lock on the map.
5583 * This is a stripped version of vm_fault() for wiring pages. Anything
5584 * other than the common case will return KERN_FAILURE, and the caller
5585 * is expected to call vm_fault().
5587 static kern_return_t
5589 __unused vm_map_t map
,
5591 __unused vm_prot_t caller_prot
,
5593 vm_map_entry_t entry
,
5595 vm_map_offset_t pmap_addr
,
5596 ppnum_t
*physpage_p
)
5599 vm_object_offset_t offset
;
5602 thread_t thread
= current_thread();
5605 struct vm_object_fault_info fault_info
= {};
5607 VM_STAT_INCR(faults
);
5609 if (thread
!= THREAD_NULL
&& thread
->task
!= TASK_NULL
) {
5610 thread
->task
->faults
++;
5618 #define RELEASE_PAGE(m) { \
5619 PAGE_WAKEUP_DONE(m); \
5620 vm_page_lockspin_queues(); \
5621 vm_page_unwire(m, TRUE); \
5622 vm_page_unlock_queues(); \
5626 #undef UNLOCK_THINGS
5627 #define UNLOCK_THINGS { \
5628 vm_object_paging_end(object); \
5629 vm_object_unlock(object); \
5632 #undef UNLOCK_AND_DEALLOCATE
5633 #define UNLOCK_AND_DEALLOCATE { \
5635 vm_object_deallocate(object); \
5638 * Give up and have caller do things the hard way.
5642 UNLOCK_AND_DEALLOCATE; \
5643 return(KERN_FAILURE); \
5648 * If this entry is not directly to a vm_object, bail out.
5650 if (entry
->is_sub_map
) {
5651 assert(physpage_p
== NULL
);
5652 return KERN_FAILURE
;
5656 * Find the backing store object and offset into it.
5659 object
= VME_OBJECT(entry
);
5660 offset
= (va
- entry
->vme_start
) + VME_OFFSET(entry
);
5661 prot
= entry
->protection
;
5664 * Make a reference to this object to prevent its
5665 * disposal while we are messing with it.
5668 vm_object_lock(object
);
5669 vm_object_reference_locked(object
);
5670 vm_object_paging_begin(object
);
5673 * INVARIANTS (through entire routine):
5675 * 1) At all times, we must either have the object
5676 * lock or a busy page in some object to prevent
5677 * some other thread from trying to bring in
5680 * 2) Once we have a busy page, we must remove it from
5681 * the pageout queues, so that the pageout daemon
5682 * will not grab it away.
5687 * Look for page in top-level object. If it's not there or
5688 * there's something going on, give up.
5690 m
= vm_page_lookup(object
, offset
);
5691 if ((m
== VM_PAGE_NULL
) || (m
->vmp_busy
) ||
5692 (m
->vmp_unusual
&& (m
->vmp_error
|| m
->vmp_restart
|| m
->vmp_absent
))) {
5695 if (m
->vmp_fictitious
&&
5696 VM_PAGE_GET_PHYS_PAGE(m
) == vm_page_guard_addr
) {
5698 * Guard pages are fictitious pages and are never
5699 * entered into a pmap, so let's say it's been wired...
5706 * Wire the page down now. All bail outs beyond this
5707 * point must unwire the page.
5710 vm_page_lockspin_queues();
5711 vm_page_wire(m
, wire_tag
, TRUE
);
5712 vm_page_unlock_queues();
5715 * Mark page busy for other threads.
5717 assert(!m
->vmp_busy
);
5719 assert(!m
->vmp_absent
);
5722 * Give up if the page is being written and there's a copy object
5724 if ((object
->copy
!= VM_OBJECT_NULL
) && (prot
& VM_PROT_WRITE
)) {
5729 fault_info
.user_tag
= VME_ALIAS(entry
);
5730 fault_info
.pmap_options
= 0;
5731 if (entry
->iokit_acct
||
5732 (!entry
->is_sub_map
&& !entry
->use_pmap
)) {
5733 fault_info
.pmap_options
|= PMAP_OPTIONS_ALT_ACCT
;
5737 * Put this page into the physical map.
5739 type_of_fault
= DBG_CACHE_HIT_FAULT
;
5740 kr
= vm_fault_enter(m
,
5746 FALSE
, /* change_wiring */
5751 if (kr
!= KERN_SUCCESS
) {
5758 * Unlock everything, and return
5762 /* for vm_map_wire_and_extract() */
5763 if (kr
== KERN_SUCCESS
) {
5764 assert(object
== VM_PAGE_OBJECT(m
));
5765 *physpage_p
= VM_PAGE_GET_PHYS_PAGE(m
);
5766 if (prot
& VM_PROT_WRITE
) {
5767 vm_object_lock_assert_exclusive(object
);
5768 m
->vmp_dirty
= TRUE
;
5775 PAGE_WAKEUP_DONE(m
);
5776 UNLOCK_AND_DEALLOCATE
;
5782 * Routine: vm_fault_copy_cleanup
5784 * Release a page used by vm_fault_copy.
5788 vm_fault_copy_cleanup(
5792 vm_object_t object
= VM_PAGE_OBJECT(page
);
5794 vm_object_lock(object
);
5795 PAGE_WAKEUP_DONE(page
);
5796 if (!VM_PAGE_PAGEABLE(page
)) {
5797 vm_page_lockspin_queues();
5798 if (!VM_PAGE_PAGEABLE(page
)) {
5799 vm_page_activate(page
);
5801 vm_page_unlock_queues();
5803 vm_fault_cleanup(object
, top_page
);
5807 vm_fault_copy_dst_cleanup(
5812 if (page
!= VM_PAGE_NULL
) {
5813 object
= VM_PAGE_OBJECT(page
);
5814 vm_object_lock(object
);
5815 vm_page_lockspin_queues();
5816 vm_page_unwire(page
, TRUE
);
5817 vm_page_unlock_queues();
5818 vm_object_paging_end(object
);
5819 vm_object_unlock(object
);
5824 * Routine: vm_fault_copy
5827 * Copy pages from one virtual memory object to another --
5828 * neither the source nor destination pages need be resident.
5830 * Before actually copying a page, the version associated with
5831 * the destination address map wil be verified.
5833 * In/out conditions:
5834 * The caller must hold a reference, but not a lock, to
5835 * each of the source and destination objects and to the
5839 * Returns KERN_SUCCESS if no errors were encountered in
5840 * reading or writing the data. Returns KERN_INTERRUPTED if
5841 * the operation was interrupted (only possible if the
5842 * "interruptible" argument is asserted). Other return values
5843 * indicate a permanent error in copying the data.
5845 * The actual amount of data copied will be returned in the
5846 * "copy_size" argument. In the event that the destination map
5847 * verification failed, this amount may be less than the amount
5852 vm_object_t src_object
,
5853 vm_object_offset_t src_offset
,
5854 vm_map_size_t
*copy_size
, /* INOUT */
5855 vm_object_t dst_object
,
5856 vm_object_offset_t dst_offset
,
5858 vm_map_version_t
*dst_version
,
5861 vm_page_t result_page
;
5864 vm_page_t src_top_page
;
5868 vm_page_t dst_top_page
;
5871 vm_map_size_t amount_left
;
5872 vm_object_t old_copy_object
;
5873 vm_object_t result_page_object
= NULL
;
5874 kern_return_t error
= 0;
5875 vm_fault_return_t result
;
5877 vm_map_size_t part_size
;
5878 struct vm_object_fault_info fault_info_src
= {};
5879 struct vm_object_fault_info fault_info_dst
= {};
5882 * In order not to confuse the clustered pageins, align
5883 * the different offsets on a page boundary.
5888 *copy_size -= amount_left; \
5892 amount_left
= *copy_size
;
5894 fault_info_src
.interruptible
= interruptible
;
5895 fault_info_src
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
5896 fault_info_src
.lo_offset
= vm_object_trunc_page(src_offset
);
5897 fault_info_src
.hi_offset
= fault_info_src
.lo_offset
+ amount_left
;
5898 fault_info_src
.stealth
= TRUE
;
5900 fault_info_dst
.interruptible
= interruptible
;
5901 fault_info_dst
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
5902 fault_info_dst
.lo_offset
= vm_object_trunc_page(dst_offset
);
5903 fault_info_dst
.hi_offset
= fault_info_dst
.lo_offset
+ amount_left
;
5904 fault_info_dst
.stealth
= TRUE
;
5906 do { /* while (amount_left > 0) */
5908 * There may be a deadlock if both source and destination
5909 * pages are the same. To avoid this deadlock, the copy must
5910 * start by getting the destination page in order to apply
5911 * COW semantics if any.
5914 RetryDestinationFault
:;
5916 dst_prot
= VM_PROT_WRITE
| VM_PROT_READ
;
5918 vm_object_lock(dst_object
);
5919 vm_object_paging_begin(dst_object
);
5921 /* cap cluster size at maximum UPL size */
5922 upl_size_t cluster_size
;
5923 if (os_convert_overflow(amount_left
, &cluster_size
)) {
5924 cluster_size
= 0 - (upl_size_t
)PAGE_SIZE
;
5926 fault_info_dst
.cluster_size
= cluster_size
;
5928 dst_page
= VM_PAGE_NULL
;
5929 result
= vm_fault_page(dst_object
,
5930 vm_object_trunc_page(dst_offset
),
5931 VM_PROT_WRITE
| VM_PROT_READ
,
5933 FALSE
, /* page not looked up */
5934 &dst_prot
, &dst_page
, &dst_top_page
,
5937 dst_map
->no_zero_fill
,
5938 FALSE
, &fault_info_dst
);
5940 case VM_FAULT_SUCCESS
:
5942 case VM_FAULT_RETRY
:
5943 goto RetryDestinationFault
;
5944 case VM_FAULT_MEMORY_SHORTAGE
:
5945 if (vm_page_wait(interruptible
)) {
5946 goto RetryDestinationFault
;
5949 case VM_FAULT_INTERRUPTED
:
5950 RETURN(MACH_SEND_INTERRUPTED
);
5951 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
5952 /* success but no VM page: fail the copy */
5953 vm_object_paging_end(dst_object
);
5954 vm_object_unlock(dst_object
);
5956 case VM_FAULT_MEMORY_ERROR
:
5960 return KERN_MEMORY_ERROR
;
5963 panic("vm_fault_copy: unexpected error 0x%x from "
5964 "vm_fault_page()\n", result
);
5966 assert((dst_prot
& VM_PROT_WRITE
) != VM_PROT_NONE
);
5968 assert(dst_object
== VM_PAGE_OBJECT(dst_page
));
5969 old_copy_object
= dst_object
->copy
;
5972 * There exists the possiblity that the source and
5973 * destination page are the same. But we can't
5974 * easily determine that now. If they are the
5975 * same, the call to vm_fault_page() for the
5976 * destination page will deadlock. To prevent this we
5977 * wire the page so we can drop busy without having
5978 * the page daemon steal the page. We clean up the
5979 * top page but keep the paging reference on the object
5980 * holding the dest page so it doesn't go away.
5983 vm_page_lockspin_queues();
5984 vm_page_wire(dst_page
, VM_KERN_MEMORY_OSFMK
, TRUE
);
5985 vm_page_unlock_queues();
5986 PAGE_WAKEUP_DONE(dst_page
);
5987 vm_object_unlock(dst_object
);
5989 if (dst_top_page
!= VM_PAGE_NULL
) {
5990 vm_object_lock(dst_object
);
5991 VM_PAGE_FREE(dst_top_page
);
5992 vm_object_paging_end(dst_object
);
5993 vm_object_unlock(dst_object
);
5998 if (src_object
== VM_OBJECT_NULL
) {
6000 * No source object. We will just
6001 * zero-fill the page in dst_object.
6003 src_page
= VM_PAGE_NULL
;
6004 result_page
= VM_PAGE_NULL
;
6006 vm_object_lock(src_object
);
6007 src_page
= vm_page_lookup(src_object
,
6008 vm_object_trunc_page(src_offset
));
6009 if (src_page
== dst_page
) {
6010 src_prot
= dst_prot
;
6011 result_page
= VM_PAGE_NULL
;
6013 src_prot
= VM_PROT_READ
;
6014 vm_object_paging_begin(src_object
);
6016 /* cap cluster size at maximum UPL size */
6017 if (os_convert_overflow(amount_left
, &cluster_size
)) {
6018 cluster_size
= 0 - (upl_size_t
)PAGE_SIZE
;
6020 fault_info_src
.cluster_size
= cluster_size
;
6022 result_page
= VM_PAGE_NULL
;
6023 result
= vm_fault_page(
6025 vm_object_trunc_page(src_offset
),
6026 VM_PROT_READ
, FALSE
,
6027 FALSE
, /* page not looked up */
6029 &result_page
, &src_top_page
,
6030 (int *)0, &error
, FALSE
,
6031 FALSE
, &fault_info_src
);
6034 case VM_FAULT_SUCCESS
:
6036 case VM_FAULT_RETRY
:
6037 goto RetrySourceFault
;
6038 case VM_FAULT_MEMORY_SHORTAGE
:
6039 if (vm_page_wait(interruptible
)) {
6040 goto RetrySourceFault
;
6043 case VM_FAULT_INTERRUPTED
:
6044 vm_fault_copy_dst_cleanup(dst_page
);
6045 RETURN(MACH_SEND_INTERRUPTED
);
6046 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
6047 /* success but no VM page: fail */
6048 vm_object_paging_end(src_object
);
6049 vm_object_unlock(src_object
);
6051 case VM_FAULT_MEMORY_ERROR
:
6052 vm_fault_copy_dst_cleanup(dst_page
);
6056 return KERN_MEMORY_ERROR
;
6059 panic("vm_fault_copy(2): unexpected "
6061 "vm_fault_page()\n", result
);
6064 result_page_object
= VM_PAGE_OBJECT(result_page
);
6065 assert((src_top_page
== VM_PAGE_NULL
) ==
6066 (result_page_object
== src_object
));
6068 assert((src_prot
& VM_PROT_READ
) != VM_PROT_NONE
);
6069 vm_object_unlock(result_page_object
);
6072 vm_map_lock_read(dst_map
);
6074 if (!vm_map_verify(dst_map
, dst_version
)) {
6075 vm_map_unlock_read(dst_map
);
6076 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
) {
6077 vm_fault_copy_cleanup(result_page
, src_top_page
);
6079 vm_fault_copy_dst_cleanup(dst_page
);
6082 assert(dst_object
== VM_PAGE_OBJECT(dst_page
));
6084 vm_object_lock(dst_object
);
6086 if (dst_object
->copy
!= old_copy_object
) {
6087 vm_object_unlock(dst_object
);
6088 vm_map_unlock_read(dst_map
);
6089 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
) {
6090 vm_fault_copy_cleanup(result_page
, src_top_page
);
6092 vm_fault_copy_dst_cleanup(dst_page
);
6095 vm_object_unlock(dst_object
);
6098 * Copy the page, and note that it is dirty
6102 if (!page_aligned(src_offset
) ||
6103 !page_aligned(dst_offset
) ||
6104 !page_aligned(amount_left
)) {
6105 vm_object_offset_t src_po
,
6108 src_po
= src_offset
- vm_object_trunc_page(src_offset
);
6109 dst_po
= dst_offset
- vm_object_trunc_page(dst_offset
);
6111 if (dst_po
> src_po
) {
6112 part_size
= PAGE_SIZE
- dst_po
;
6114 part_size
= PAGE_SIZE
- src_po
;
6116 if (part_size
> (amount_left
)) {
6117 part_size
= amount_left
;
6120 if (result_page
== VM_PAGE_NULL
) {
6121 assert((vm_offset_t
) dst_po
== dst_po
);
6122 assert((vm_size_t
) part_size
== part_size
);
6123 vm_page_part_zero_fill(dst_page
,
6124 (vm_offset_t
) dst_po
,
6125 (vm_size_t
) part_size
);
6127 assert((vm_offset_t
) src_po
== src_po
);
6128 assert((vm_offset_t
) dst_po
== dst_po
);
6129 assert((vm_size_t
) part_size
== part_size
);
6130 vm_page_part_copy(result_page
,
6131 (vm_offset_t
) src_po
,
6133 (vm_offset_t
) dst_po
,
6134 (vm_size_t
)part_size
);
6135 if (!dst_page
->vmp_dirty
) {
6136 vm_object_lock(dst_object
);
6137 SET_PAGE_DIRTY(dst_page
, TRUE
);
6138 vm_object_unlock(dst_object
);
6142 part_size
= PAGE_SIZE
;
6144 if (result_page
== VM_PAGE_NULL
) {
6145 vm_page_zero_fill(dst_page
);
6147 vm_object_lock(result_page_object
);
6148 vm_page_copy(result_page
, dst_page
);
6149 vm_object_unlock(result_page_object
);
6151 if (!dst_page
->vmp_dirty
) {
6152 vm_object_lock(dst_object
);
6153 SET_PAGE_DIRTY(dst_page
, TRUE
);
6154 vm_object_unlock(dst_object
);
6160 * Unlock everything, and return
6163 vm_map_unlock_read(dst_map
);
6165 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
) {
6166 vm_fault_copy_cleanup(result_page
, src_top_page
);
6168 vm_fault_copy_dst_cleanup(dst_page
);
6170 amount_left
-= part_size
;
6171 src_offset
+= part_size
;
6172 dst_offset
+= part_size
;
6173 } while (amount_left
> 0);
6175 RETURN(KERN_SUCCESS
);
6181 #if VM_FAULT_CLASSIFY
6183 * Temporary statistics gathering support.
6187 * Statistics arrays:
6189 #define VM_FAULT_TYPES_MAX 5
6190 #define VM_FAULT_LEVEL_MAX 8
6192 int vm_fault_stats
[VM_FAULT_TYPES_MAX
][VM_FAULT_LEVEL_MAX
];
6194 #define VM_FAULT_TYPE_ZERO_FILL 0
6195 #define VM_FAULT_TYPE_MAP_IN 1
6196 #define VM_FAULT_TYPE_PAGER 2
6197 #define VM_FAULT_TYPE_COPY 3
6198 #define VM_FAULT_TYPE_OTHER 4
6202 vm_fault_classify(vm_object_t object
,
6203 vm_object_offset_t offset
,
6204 vm_prot_t fault_type
)
6206 int type
, level
= 0;
6210 m
= vm_page_lookup(object
, offset
);
6211 if (m
!= VM_PAGE_NULL
) {
6212 if (m
->vmp_busy
|| m
->vmp_error
|| m
->vmp_restart
|| m
->vmp_absent
) {
6213 type
= VM_FAULT_TYPE_OTHER
;
6216 if (((fault_type
& VM_PROT_WRITE
) == 0) ||
6217 ((level
== 0) && object
->copy
== VM_OBJECT_NULL
)) {
6218 type
= VM_FAULT_TYPE_MAP_IN
;
6221 type
= VM_FAULT_TYPE_COPY
;
6224 if (object
->pager_created
) {
6225 type
= VM_FAULT_TYPE_PAGER
;
6228 if (object
->shadow
== VM_OBJECT_NULL
) {
6229 type
= VM_FAULT_TYPE_ZERO_FILL
;
6233 offset
+= object
->vo_shadow_offset
;
6234 object
= object
->shadow
;
6240 if (level
> VM_FAULT_LEVEL_MAX
) {
6241 level
= VM_FAULT_LEVEL_MAX
;
6244 vm_fault_stats
[type
][level
] += 1;
6249 /* cleanup routine to call from debugger */
6252 vm_fault_classify_init(void)
6256 for (type
= 0; type
< VM_FAULT_TYPES_MAX
; type
++) {
6257 for (level
= 0; level
< VM_FAULT_LEVEL_MAX
; level
++) {
6258 vm_fault_stats
[type
][level
] = 0;
6264 #endif /* VM_FAULT_CLASSIFY */
6267 kdp_lightweight_fault(vm_map_t map
, vm_offset_t cur_target_addr
)
6269 vm_map_entry_t entry
;
6271 vm_offset_t object_offset
;
6273 int compressor_external_state
, compressed_count_delta
;
6274 int compressor_flags
= (C_DONT_BLOCK
| C_KEEP
| C_KDP
);
6275 int my_fault_type
= VM_PROT_READ
;
6279 panic("kdp_lightweight_fault called from outside of debugger context");
6282 assert(map
!= VM_MAP_NULL
);
6284 assert((cur_target_addr
& PAGE_MASK
) == 0);
6285 if ((cur_target_addr
& PAGE_MASK
) != 0) {
6289 if (kdp_lck_rw_lock_is_acquired_exclusive(&map
->lock
)) {
6293 if (!vm_map_lookup_entry(map
, cur_target_addr
, &entry
)) {
6297 if (entry
->is_sub_map
) {
6301 object
= VME_OBJECT(entry
);
6302 if (object
== VM_OBJECT_NULL
) {
6306 object_offset
= cur_target_addr
- entry
->vme_start
+ VME_OFFSET(entry
);
6309 if (kdp_lck_rw_lock_is_acquired_exclusive(&object
->Lock
)) {
6313 if (object
->pager_created
&& (object
->paging_in_progress
||
6314 object
->activity_in_progress
)) {
6318 m
= kdp_vm_page_lookup(object
, object_offset
);
6320 if (m
!= VM_PAGE_NULL
) {
6321 if ((object
->wimg_bits
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
6325 if (m
->vmp_laundry
|| m
->vmp_busy
|| m
->vmp_free_when_done
|| m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_cleaning
||
6326 m
->vmp_overwriting
|| m
->vmp_restart
|| m
->vmp_unusual
) {
6330 assert(!m
->vmp_private
);
6331 if (m
->vmp_private
) {
6335 assert(!m
->vmp_fictitious
);
6336 if (m
->vmp_fictitious
) {
6340 assert(m
->vmp_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
);
6341 if (m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
6345 return ptoa(VM_PAGE_GET_PHYS_PAGE(m
));
6348 compressor_external_state
= VM_EXTERNAL_STATE_UNKNOWN
;
6350 if (object
->pager_created
&& MUST_ASK_PAGER(object
, object_offset
, compressor_external_state
)) {
6351 if (compressor_external_state
== VM_EXTERNAL_STATE_EXISTS
) {
6352 kr
= vm_compressor_pager_get(object
->pager
, (object_offset
+ object
->paging_offset
),
6353 kdp_compressor_decompressed_page_ppnum
, &my_fault_type
,
6354 compressor_flags
, &compressed_count_delta
);
6355 if (kr
== KERN_SUCCESS
) {
6356 return kdp_compressor_decompressed_page_paddr
;
6363 if (object
->shadow
== VM_OBJECT_NULL
) {
6367 object_offset
+= object
->vo_shadow_offset
;
6368 object
= object
->shadow
;
6373 * vm_page_validate_cs_fast():
6374 * Performs a few quick checks to determine if the page's code signature
6375 * really needs to be fully validated. It could:
6376 * 1. have been modified (i.e. automatically tainted),
6377 * 2. have already been validated,
6378 * 3. have already been found to be tainted,
6379 * 4. no longer have a backing store.
6380 * Returns FALSE if the page needs to be fully validated.
6383 vm_page_validate_cs_fast(
6388 object
= VM_PAGE_OBJECT(page
);
6389 vm_object_lock_assert_held(object
);
6391 if (page
->vmp_wpmapped
&& !page
->vmp_cs_tainted
) {
6393 * This page was mapped for "write" access sometime in the
6394 * past and could still be modifiable in the future.
6395 * Consider it tainted.
6396 * [ If the page was already found to be "tainted", no
6397 * need to re-validate. ]
6399 vm_object_lock_assert_exclusive(object
);
6400 page
->vmp_cs_validated
= TRUE
;
6401 page
->vmp_cs_tainted
= TRUE
;
6403 printf("CODESIGNING: %s: "
6404 "page %p obj %p off 0x%llx "
6407 page
, object
, page
->vmp_offset
);
6409 vm_cs_validated_dirtied
++;
6412 if (page
->vmp_cs_validated
|| page
->vmp_cs_tainted
) {
6415 vm_object_lock_assert_exclusive(object
);
6417 #if CHECK_CS_VALIDATION_BITMAP
6420 kr
= vnode_pager_cs_check_validation_bitmap(
6422 page
->vmp_offset
+ object
->paging_offset
,
6424 if (kr
== KERN_SUCCESS
) {
6425 page
->vmp_cs_validated
= TRUE
;
6426 page
->vmp_cs_tainted
= FALSE
;
6427 vm_cs_bitmap_validated
++;
6430 #endif /* CHECK_CS_VALIDATION_BITMAP */
6432 if (!object
->alive
|| object
->terminating
|| object
->pager
== NULL
) {
6434 * The object is terminating and we don't have its pager
6435 * so we can't validate the data...
6440 /* we need to really validate this page */
6441 vm_object_lock_assert_exclusive(object
);
6446 vm_page_validate_cs_mapped_slow(
6451 memory_object_offset_t mo_offset
;
6452 memory_object_t pager
;
6453 struct vnode
*vnode
;
6454 boolean_t validated
;
6457 assert(page
->vmp_busy
);
6458 object
= VM_PAGE_OBJECT(page
);
6459 vm_object_lock_assert_exclusive(object
);
6464 * Since we get here to validate a page that was brought in by
6465 * the pager, we know that this pager is all setup and ready
6468 assert(object
->code_signed
);
6469 assert(!object
->internal
);
6470 assert(object
->pager
!= NULL
);
6471 assert(object
->pager_ready
);
6473 pager
= object
->pager
;
6474 assert(object
->paging_in_progress
);
6475 vnode
= vnode_pager_lookup_vnode(pager
);
6476 mo_offset
= page
->vmp_offset
+ object
->paging_offset
;
6478 /* verify the SHA1 hash for this page */
6480 validated
= cs_validate_range(vnode
,
6483 (const void *)((const char *)kaddr
),
6487 if (tainted
& CS_VALIDATE_TAINTED
) {
6488 page
->vmp_cs_tainted
= TRUE
;
6490 if (tainted
& CS_VALIDATE_NX
) {
6491 page
->vmp_cs_nx
= TRUE
;
6494 page
->vmp_cs_validated
= TRUE
;
6497 #if CHECK_CS_VALIDATION_BITMAP
6498 if (page
->vmp_cs_validated
&& !page
->vmp_cs_tainted
) {
6499 vnode_pager_cs_check_validation_bitmap(object
->pager
,
6503 #endif /* CHECK_CS_VALIDATION_BITMAP */
6507 vm_page_validate_cs_mapped(
6511 if (!vm_page_validate_cs_fast(page
)) {
6512 vm_page_validate_cs_mapped_slow(page
, kaddr
);
6517 vm_page_validate_cs(
6521 vm_object_offset_t offset
;
6522 vm_map_offset_t koffset
;
6523 vm_map_size_t ksize
;
6526 boolean_t busy_page
;
6527 boolean_t need_unmap
;
6529 object
= VM_PAGE_OBJECT(page
);
6530 vm_object_lock_assert_held(object
);
6532 if (vm_page_validate_cs_fast(page
)) {
6535 vm_object_lock_assert_exclusive(object
);
6537 assert(object
->code_signed
);
6538 offset
= page
->vmp_offset
;
6540 busy_page
= page
->vmp_busy
;
6542 /* keep page busy while we map (and unlock) the VM object */
6543 page
->vmp_busy
= TRUE
;
6547 * Take a paging reference on the VM object
6548 * to protect it from collapse or bypass,
6549 * and keep it from disappearing too.
6551 vm_object_paging_begin(object
);
6553 /* map the page in the kernel address space */
6554 ksize
= PAGE_SIZE_64
;
6557 kr
= vm_paging_map_object(page
,
6561 FALSE
, /* can't unlock object ! */
6565 if (kr
!= KERN_SUCCESS
) {
6566 panic("%s: could not map page: 0x%x\n", __FUNCTION__
, kr
);
6568 kaddr
= CAST_DOWN(vm_offset_t
, koffset
);
6570 /* validate the mapped page */
6571 vm_page_validate_cs_mapped_slow(page
, (const void *) kaddr
);
6573 assert(page
->vmp_busy
);
6574 assert(object
== VM_PAGE_OBJECT(page
));
6575 vm_object_lock_assert_exclusive(object
);
6578 PAGE_WAKEUP_DONE(page
);
6581 /* unmap the map from the kernel address space */
6582 vm_paging_unmap_object(object
, koffset
, koffset
+ ksize
);
6587 vm_object_paging_end(object
);
6591 vm_page_validate_cs_mapped_chunk(
6594 vm_offset_t chunk_offset
,
6595 vm_size_t chunk_size
,
6596 boolean_t
*validated_p
,
6597 unsigned *tainted_p
)
6600 vm_object_offset_t offset
, offset_in_page
;
6601 memory_object_t pager
;
6602 struct vnode
*vnode
;
6603 boolean_t validated
;
6606 *validated_p
= FALSE
;
6609 assert(page
->vmp_busy
);
6610 object
= VM_PAGE_OBJECT(page
);
6611 vm_object_lock_assert_exclusive(object
);
6613 assert(object
->code_signed
);
6614 offset
= page
->vmp_offset
;
6616 if (!object
->alive
|| object
->terminating
|| object
->pager
== NULL
) {
6618 * The object is terminating and we don't have its pager
6619 * so we can't validate the data...
6624 * Since we get here to validate a page that was brought in by
6625 * the pager, we know that this pager is all setup and ready
6628 assert(!object
->internal
);
6629 assert(object
->pager
!= NULL
);
6630 assert(object
->pager_ready
);
6632 pager
= object
->pager
;
6633 assert(object
->paging_in_progress
);
6634 vnode
= vnode_pager_lookup_vnode(pager
);
6636 /* verify the signature for this chunk */
6637 offset_in_page
= chunk_offset
;
6638 assert(offset_in_page
< PAGE_SIZE
);
6641 validated
= cs_validate_range(vnode
,
6643 (object
->paging_offset
+
6646 (const void *)((const char *)kaddr
6651 *validated_p
= TRUE
;
6654 *tainted_p
= tainted
;
6659 vm_rtfrecord_lock(void)
6661 lck_spin_lock(&vm_rtfr_slock
);
6665 vm_rtfrecord_unlock(void)
6667 lck_spin_unlock(&vm_rtfr_slock
);
6671 vmrtfaultinfo_bufsz(void)
6673 return vmrtf_num_records
* sizeof(vm_rtfault_record_t
);
6676 #include <kern/backtrace.h>
6679 vm_record_rtfault(thread_t cthread
, uint64_t fstart
, vm_map_offset_t fault_vaddr
, int type_of_fault
)
6681 uint64_t fend
= mach_continuous_time();
6684 uint64_t ctid
= cthread
->thread_id
;
6685 uint64_t cupid
= get_current_unique_pid();
6691 /* Capture a single-frame backtrace; this extracts just the program
6692 * counter at the point of the fault into "bpc", and should perform no
6693 * further user stack traversals, thus avoiding copyin()s and further
6696 unsigned int bfrs
= backtrace_thread_user(cthread
, &bpc
, 1U, &btr
, &u64
, NULL
);
6698 if ((btr
== 0) && (bfrs
> 0)) {
6702 assert((fstart
!= 0) && fend
>= fstart
);
6703 vm_rtfrecord_lock();
6704 assert(vmrtfrs
.vmrtfr_curi
<= vmrtfrs
.vmrtfr_maxi
);
6706 vmrtfrs
.vmrtf_total
++;
6707 vm_rtfault_record_t
*cvmr
= &vmrtfrs
.vm_rtf_records
[vmrtfrs
.vmrtfr_curi
++];
6709 cvmr
->rtfabstime
= fstart
;
6710 cvmr
->rtfduration
= fend
- fstart
;
6711 cvmr
->rtfaddr
= fault_vaddr
;
6713 cvmr
->rtftype
= type_of_fault
;
6714 cvmr
->rtfupid
= cupid
;
6715 cvmr
->rtftid
= ctid
;
6717 if (vmrtfrs
.vmrtfr_curi
> vmrtfrs
.vmrtfr_maxi
) {
6718 vmrtfrs
.vmrtfr_curi
= 0;
6721 vm_rtfrecord_unlock();
6725 vmrtf_extract(uint64_t cupid
, __unused boolean_t isroot
, int vrecordsz
, void *vrecords
, int *vmrtfrv
)
6727 vm_rtfault_record_t
*cvmrd
= vrecords
;
6728 size_t residue
= vrecordsz
;
6729 int numextracted
= 0;
6730 boolean_t early_exit
= FALSE
;
6732 vm_rtfrecord_lock();
6734 for (int vmfi
= 0; vmfi
<= vmrtfrs
.vmrtfr_maxi
; vmfi
++) {
6735 if (residue
< sizeof(vm_rtfault_record_t
)) {
6740 if (vmrtfrs
.vm_rtf_records
[vmfi
].rtfupid
!= cupid
) {
6741 #if DEVELOPMENT || DEBUG
6742 if (isroot
== FALSE
) {
6747 #endif /* DEVDEBUG */
6750 *cvmrd
= vmrtfrs
.vm_rtf_records
[vmfi
];
6752 residue
-= sizeof(vm_rtfault_record_t
);
6756 vm_rtfrecord_unlock();
6758 *vmrtfrv
= numextracted
;