2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Page fault handling module.
65 #include <mach_cluster_stats.h>
66 #include <mach_pagemap.h>
67 #include <libkern/OSAtomic.h>
69 #include <mach/mach_types.h>
70 #include <mach/kern_return.h>
71 #include <mach/message.h> /* for error codes */
72 #include <mach/vm_param.h>
73 #include <mach/vm_behavior.h>
74 #include <mach/memory_object.h>
75 /* For memory_object_data_{request,unlock} */
78 #include <kern/kern_types.h>
79 #include <kern/host_statistics.h>
80 #include <kern/counters.h>
81 #include <kern/task.h>
82 #include <kern/thread.h>
83 #include <kern/sched_prim.h>
84 #include <kern/host.h>
86 #include <kern/mach_param.h>
87 #include <kern/macro_help.h>
88 #include <kern/zalloc.h>
89 #include <kern/misc_protos.h>
91 #include <vm/vm_compressor.h>
92 #include <vm/vm_compressor_pager.h>
93 #include <vm/vm_fault.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_kern.h>
99 #include <vm/vm_pageout.h>
100 #include <vm/vm_protos.h>
101 #include <vm/vm_external.h>
102 #include <vm/memory_object.h>
103 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
104 #include <vm/vm_shared_region.h>
106 #include <sys/codesign.h>
108 #include <libsa/sys/timers.h> /* for struct timespec */
110 #define VM_FAULT_CLASSIFY 0
112 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
114 unsigned int vm_object_pagein_throttle
= 16;
117 * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
118 * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts
119 * of memory if they're buggy and can run the system completely out of swap space. If this happens, we
120 * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps
121 * keep the UI active so that the user has a chance to kill the offending task before the system
124 * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
125 * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold
126 * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a
127 * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
130 extern void throttle_lowpri_io(int);
132 uint64_t vm_hard_throttle_threshold
;
136 #define NEED_TO_HARD_THROTTLE_THIS_TASK() (vm_wants_task_throttled(current_task()) || \
137 (vm_page_free_count < vm_page_throttle_limit && \
138 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) > THROTTLE_LEVEL_THROTTLED))
141 #define HARD_THROTTLE_DELAY 5000 /* 5000 us == 5 ms */
142 #define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */
144 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6
145 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000
148 boolean_t
current_thread_aborted(void);
150 /* Forward declarations of internal routines. */
151 static kern_return_t
vm_fault_wire_fast(
155 vm_map_entry_t entry
,
157 vm_map_offset_t pmap_addr
,
158 ppnum_t
*physpage_p
);
160 static kern_return_t
vm_fault_internal(
162 vm_map_offset_t vaddr
,
163 vm_prot_t caller_prot
,
164 boolean_t change_wiring
,
167 vm_map_offset_t pmap_addr
,
168 ppnum_t
*physpage_p
);
170 static void vm_fault_copy_cleanup(
174 static void vm_fault_copy_dst_cleanup(
177 #if VM_FAULT_CLASSIFY
178 extern void vm_fault_classify(vm_object_t object
,
179 vm_object_offset_t offset
,
180 vm_prot_t fault_type
);
182 extern void vm_fault_classify_init(void);
185 unsigned long vm_pmap_enter_blocked
= 0;
186 unsigned long vm_pmap_enter_retried
= 0;
188 unsigned long vm_cs_validates
= 0;
189 unsigned long vm_cs_revalidates
= 0;
190 unsigned long vm_cs_query_modified
= 0;
191 unsigned long vm_cs_validated_dirtied
= 0;
192 unsigned long vm_cs_bitmap_validated
= 0;
194 void vm_pre_fault(vm_map_offset_t
);
196 extern int not_in_kdp
;
197 extern char *kdp_compressor_decompressed_page
;
198 extern addr64_t kdp_compressor_decompressed_page_paddr
;
199 extern ppnum_t kdp_compressor_decompressed_page_ppnum
;
202 * Routine: vm_fault_init
204 * Initialize our private data structures.
209 int i
, vm_compressor_temp
;
210 boolean_t need_default_val
= TRUE
;
212 * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is
213 * computed as a percentage of available memory, and the percentage used is scaled inversely with
214 * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems
215 * and reduce the value down to 10% for very large memory configurations. This helps give us a
216 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
217 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
220 vm_hard_throttle_threshold
= sane_size
* (35 - MIN((int)(sane_size
/ (1024*1024*1024)), 25)) / 100;
223 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
226 if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp
, sizeof (vm_compressor_temp
))) {
227 for ( i
= 0; i
< VM_PAGER_MAX_MODES
; i
++) {
228 if (vm_compressor_temp
> 0 &&
229 ((vm_compressor_temp
& ( 1 << i
)) == vm_compressor_temp
)) {
230 need_default_val
= FALSE
;
231 vm_compressor_mode
= vm_compressor_temp
;
235 if (need_default_val
)
236 printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp
);
238 if (need_default_val
) {
239 /* If no boot arg or incorrect boot arg, try device tree. */
240 PE_get_default("kern.vm_compressor", &vm_compressor_mode
, sizeof(vm_compressor_mode
));
242 PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count
, sizeof (vm_compressor_thread_count
));
244 if (PE_parse_boot_argn("vm_compressor_immediate", &vm_compressor_temp
, sizeof (vm_compressor_temp
)))
245 vm_compressor_immediate_preferred_override
= TRUE
;
247 if (PE_get_default("kern.vm_compressor_immediate", &vm_compressor_temp
, sizeof(vm_compressor_temp
)))
248 vm_compressor_immediate_preferred_override
= TRUE
;
250 if (vm_compressor_immediate_preferred_override
== TRUE
) {
251 if (vm_compressor_temp
)
252 vm_compressor_immediate_preferred
= TRUE
;
254 vm_compressor_immediate_preferred
= FALSE
;
256 printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode
);
260 * Routine: vm_fault_cleanup
262 * Clean up the result of vm_fault_page.
264 * The paging reference for "object" is released.
265 * "object" is unlocked.
266 * If "top_page" is not null, "top_page" is
267 * freed and the paging reference for the object
268 * containing it is released.
271 * "object" must be locked.
275 register vm_object_t object
,
276 register vm_page_t top_page
)
278 vm_object_paging_end(object
);
279 vm_object_unlock(object
);
281 if (top_page
!= VM_PAGE_NULL
) {
282 object
= top_page
->object
;
284 vm_object_lock(object
);
285 VM_PAGE_FREE(top_page
);
286 vm_object_paging_end(object
);
287 vm_object_unlock(object
);
291 #if MACH_CLUSTER_STATS
292 #define MAXCLUSTERPAGES 16
294 unsigned long pages_in_cluster
;
295 unsigned long pages_at_higher_offsets
;
296 unsigned long pages_at_lower_offsets
;
297 } cluster_stats_in
[MAXCLUSTERPAGES
];
298 #define CLUSTER_STAT(clause) clause
299 #define CLUSTER_STAT_HIGHER(x) \
300 ((cluster_stats_in[(x)].pages_at_higher_offsets)++)
301 #define CLUSTER_STAT_LOWER(x) \
302 ((cluster_stats_in[(x)].pages_at_lower_offsets)++)
303 #define CLUSTER_STAT_CLUSTER(x) \
304 ((cluster_stats_in[(x)].pages_in_cluster)++)
305 #else /* MACH_CLUSTER_STATS */
306 #define CLUSTER_STAT(clause)
307 #endif /* MACH_CLUSTER_STATS */
309 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
312 boolean_t vm_page_deactivate_behind
= TRUE
;
314 * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
316 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128
317 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */
318 /* we use it to size an array on the stack */
320 int vm_default_behind
= VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW
;
322 #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
325 * vm_page_is_sequential
327 * Determine if sequential access is in progress
328 * in accordance with the behavior specified.
329 * Update state to indicate current access pattern.
331 * object must have at least the shared lock held
335 vm_fault_is_sequential(
337 vm_object_offset_t offset
,
338 vm_behavior_t behavior
)
340 vm_object_offset_t last_alloc
;
344 last_alloc
= object
->last_alloc
;
345 sequential
= object
->sequential
;
346 orig_sequential
= sequential
;
349 case VM_BEHAVIOR_RANDOM
:
351 * reset indicator of sequential behavior
356 case VM_BEHAVIOR_SEQUENTIAL
:
357 if (offset
&& last_alloc
== offset
- PAGE_SIZE_64
) {
359 * advance indicator of sequential behavior
361 if (sequential
< MAX_SEQUENTIAL_RUN
)
362 sequential
+= PAGE_SIZE
;
365 * reset indicator of sequential behavior
371 case VM_BEHAVIOR_RSEQNTL
:
372 if (last_alloc
&& last_alloc
== offset
+ PAGE_SIZE_64
) {
374 * advance indicator of sequential behavior
376 if (sequential
> -MAX_SEQUENTIAL_RUN
)
377 sequential
-= PAGE_SIZE
;
380 * reset indicator of sequential behavior
386 case VM_BEHAVIOR_DEFAULT
:
388 if (offset
&& last_alloc
== (offset
- PAGE_SIZE_64
)) {
390 * advance indicator of sequential behavior
394 if (sequential
< MAX_SEQUENTIAL_RUN
)
395 sequential
+= PAGE_SIZE
;
397 } else if (last_alloc
&& last_alloc
== (offset
+ PAGE_SIZE_64
)) {
399 * advance indicator of sequential behavior
403 if (sequential
> -MAX_SEQUENTIAL_RUN
)
404 sequential
-= PAGE_SIZE
;
407 * reset indicator of sequential behavior
413 if (sequential
!= orig_sequential
) {
414 if (!OSCompareAndSwap(orig_sequential
, sequential
, (UInt32
*)&object
->sequential
)) {
416 * if someone else has already updated object->sequential
417 * don't bother trying to update it or object->last_alloc
423 * I'd like to do this with a OSCompareAndSwap64, but that
424 * doesn't exist for PPC... however, it shouldn't matter
425 * that much... last_alloc is maintained so that we can determine
426 * if a sequential access pattern is taking place... if only
427 * one thread is banging on this object, no problem with the unprotected
428 * update... if 2 or more threads are banging away, we run the risk of
429 * someone seeing a mangled update... however, in the face of multiple
430 * accesses, no sequential access pattern can develop anyway, so we
431 * haven't lost any real info.
433 object
->last_alloc
= offset
;
437 int vm_page_deactivate_behind_count
= 0;
440 * vm_page_deactivate_behind
442 * Determine if sequential access is in progress
443 * in accordance with the behavior specified. If
444 * so, compute a potential page to deactivate and
447 * object must be locked.
449 * return TRUE if we actually deactivate a page
453 vm_fault_deactivate_behind(
455 vm_object_offset_t offset
,
456 vm_behavior_t behavior
)
459 int pages_in_run
= 0;
460 int max_pages_in_run
= 0;
462 int sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
463 vm_object_offset_t run_offset
= 0;
464 vm_object_offset_t pg_offset
= 0;
466 vm_page_t page_run
[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
];
470 dbgTrace(0xBEEF0018, (unsigned int) object
, (unsigned int) vm_fault_deactivate_behind
); /* (TEST/DEBUG) */
473 if (object
== kernel_object
|| vm_page_deactivate_behind
== FALSE
) {
475 * Do not deactivate pages from the kernel object: they
476 * are not intended to become pageable.
477 * or we've disabled the deactivate behind mechanism
481 if ((sequential_run
= object
->sequential
)) {
482 if (sequential_run
< 0) {
483 sequential_behavior
= VM_BEHAVIOR_RSEQNTL
;
484 sequential_run
= 0 - sequential_run
;
486 sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
490 case VM_BEHAVIOR_RANDOM
:
492 case VM_BEHAVIOR_SEQUENTIAL
:
493 if (sequential_run
>= (int)PAGE_SIZE
) {
494 run_offset
= 0 - PAGE_SIZE_64
;
495 max_pages_in_run
= 1;
498 case VM_BEHAVIOR_RSEQNTL
:
499 if (sequential_run
>= (int)PAGE_SIZE
) {
500 run_offset
= PAGE_SIZE_64
;
501 max_pages_in_run
= 1;
504 case VM_BEHAVIOR_DEFAULT
:
506 { vm_object_offset_t behind
= vm_default_behind
* PAGE_SIZE_64
;
509 * determine if the run of sequential accesss has been
510 * long enough on an object with default access behavior
511 * to consider it for deactivation
513 if ((uint64_t)sequential_run
>= behind
&& (sequential_run
% (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
* PAGE_SIZE
)) == 0) {
515 * the comparisons between offset and behind are done
516 * in this kind of odd fashion in order to prevent wrap around
519 if (sequential_behavior
== VM_BEHAVIOR_SEQUENTIAL
) {
520 if (offset
>= behind
) {
521 run_offset
= 0 - behind
;
522 pg_offset
= PAGE_SIZE_64
;
523 max_pages_in_run
= VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
;
526 if (offset
< -behind
) {
528 pg_offset
= 0 - PAGE_SIZE_64
;
529 max_pages_in_run
= VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
;
536 for (n
= 0; n
< max_pages_in_run
; n
++) {
537 m
= vm_page_lookup(object
, offset
+ run_offset
+ (n
* pg_offset
));
539 if (m
&& !m
->laundry
&& !m
->busy
&& !m
->no_cache
&& !m
->throttled
&& !m
->fictitious
&& !m
->absent
) {
540 page_run
[pages_in_run
++] = m
;
543 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
545 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
546 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
547 * new reference happens. If no futher references happen on the page after that remote TLB flushes
548 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
549 * by pageout_scan, which is just fine since the last reference would have happened quite far
550 * in the past (TLB caches don't hang around for very long), and of course could just as easily
551 * have happened before we did the deactivate_behind.
553 pmap_clear_refmod_options(m
->phys_page
, VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
557 vm_page_lockspin_queues();
559 for (n
= 0; n
< pages_in_run
; n
++) {
563 vm_page_deactivate_internal(m
, FALSE
);
565 vm_page_deactivate_behind_count
++;
567 dbgTrace(0xBEEF0019, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
570 vm_page_unlock_queues();
578 #if (DEVELOPMENT || DEBUG)
579 uint32_t vm_page_creation_throttled_hard
= 0;
580 uint32_t vm_page_creation_throttled_soft
= 0;
581 uint64_t vm_page_creation_throttle_avoided
= 0;
582 #endif /* DEVELOPMENT || DEBUG */
585 vm_page_throttled(boolean_t page_kept
)
587 clock_sec_t elapsed_sec
;
589 clock_usec_t tv_usec
;
591 thread_t thread
= current_thread();
593 if (thread
->options
& TH_OPT_VMPRIV
)
596 if (thread
->t_page_creation_throttled
) {
597 thread
->t_page_creation_throttled
= 0;
599 if (page_kept
== FALSE
)
602 if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
603 #if (DEVELOPMENT || DEBUG)
604 thread
->t_page_creation_throttled_hard
++;
605 OSAddAtomic(1, &vm_page_creation_throttled_hard
);
606 #endif /* DEVELOPMENT || DEBUG */
607 return (HARD_THROTTLE_DELAY
);
610 if ((vm_page_free_count
< vm_page_throttle_limit
|| ((COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
611 thread
->t_page_creation_count
> (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS
* VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC
)) {
613 if (vm_page_free_wanted
== 0 && vm_page_free_wanted_privileged
== 0) {
614 #if (DEVELOPMENT || DEBUG)
615 OSAddAtomic64(1, &vm_page_creation_throttle_avoided
);
619 clock_get_system_microtime(&tv_sec
, &tv_usec
);
621 elapsed_sec
= tv_sec
- thread
->t_page_creation_time
;
623 if (elapsed_sec
<= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS
||
624 (thread
->t_page_creation_count
/ elapsed_sec
) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC
) {
626 if (elapsed_sec
>= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS
)) {
628 * we'll reset our stats to give a well behaved app
629 * that was unlucky enough to accumulate a bunch of pages
630 * over a long period of time a chance to get out of
631 * the throttled state... we reset the counter and timestamp
632 * so that if it stays under the rate limit for the next second
633 * it will be back in our good graces... if it exceeds it, it
634 * will remain in the throttled state
636 thread
->t_page_creation_time
= tv_sec
;
637 thread
->t_page_creation_count
= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC
* (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS
- 1);
639 ++vm_page_throttle_count
;
641 thread
->t_page_creation_throttled
= 1;
643 if ((COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) && HARD_THROTTLE_LIMIT_REACHED()) {
644 #if (DEVELOPMENT || DEBUG)
645 thread
->t_page_creation_throttled_hard
++;
646 OSAddAtomic(1, &vm_page_creation_throttled_hard
);
647 #endif /* DEVELOPMENT || DEBUG */
648 return (HARD_THROTTLE_DELAY
);
650 #if (DEVELOPMENT || DEBUG)
651 thread
->t_page_creation_throttled_soft
++;
652 OSAddAtomic(1, &vm_page_creation_throttled_soft
);
653 #endif /* DEVELOPMENT || DEBUG */
654 return (SOFT_THROTTLE_DELAY
);
657 thread
->t_page_creation_time
= tv_sec
;
658 thread
->t_page_creation_count
= 0;
661 thread
->t_page_creation_count
++;
668 * check for various conditions that would
669 * prevent us from creating a ZF page...
670 * cleanup is based on being called from vm_fault_page
672 * object must be locked
673 * object == m->object
675 static vm_fault_return_t
676 vm_fault_check(vm_object_t object
, vm_page_t m
, vm_page_t first_m
, boolean_t interruptible_state
, boolean_t page_throttle
)
680 if (object
->shadow_severed
||
681 VM_OBJECT_PURGEABLE_FAULT_ERROR(object
)) {
684 * 1. the shadow chain was severed,
685 * 2. the purgeable object is volatile or empty and is marked
686 * to fault on access while volatile.
687 * Just have to return an error at this point
689 if (m
!= VM_PAGE_NULL
)
691 vm_fault_cleanup(object
, first_m
);
693 thread_interrupt_level(interruptible_state
);
695 return (VM_FAULT_MEMORY_ERROR
);
697 if (vm_backing_store_low
) {
699 * are we protecting the system from
700 * backing store exhaustion. If so
701 * sleep unless we are privileged.
703 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
)) {
705 if (m
!= VM_PAGE_NULL
)
707 vm_fault_cleanup(object
, first_m
);
709 assert_wait((event_t
)&vm_backing_store_low
, THREAD_UNINT
);
711 thread_block(THREAD_CONTINUE_NULL
);
712 thread_interrupt_level(interruptible_state
);
714 return (VM_FAULT_RETRY
);
717 if (page_throttle
== TRUE
) {
718 if ((throttle_delay
= vm_page_throttled(FALSE
))) {
720 * we're throttling zero-fills...
721 * treat this as if we couldn't grab a page
723 if (m
!= VM_PAGE_NULL
)
725 vm_fault_cleanup(object
, first_m
);
727 VM_DEBUG_EVENT(vmf_check_zfdelay
, VMF_CHECK_ZFDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
729 delay(throttle_delay
);
731 if (current_thread_aborted()) {
732 thread_interrupt_level(interruptible_state
);
733 return VM_FAULT_INTERRUPTED
;
735 thread_interrupt_level(interruptible_state
);
737 return (VM_FAULT_MEMORY_SHORTAGE
);
740 return (VM_FAULT_SUCCESS
);
745 * do the work to zero fill a page and
746 * inject it into the correct paging queue
748 * m->object must be locked
749 * page queue lock must NOT be held
752 vm_fault_zero_page(vm_page_t m
, boolean_t no_zero_fill
)
754 int my_fault
= DBG_ZERO_FILL_FAULT
;
757 * This is is a zero-fill page fault...
759 * Checking the page lock is a waste of
760 * time; this page was absent, so
761 * it can't be page locked by a pager.
763 * we also consider it undefined
764 * with respect to instruction
765 * execution. i.e. it is the responsibility
766 * of higher layers to call for an instruction
767 * sync after changing the contents and before
768 * sending a program into this area. We
769 * choose this approach for performance
773 m
->cs_validated
= FALSE
;
774 m
->cs_tainted
= FALSE
;
777 if (no_zero_fill
== TRUE
) {
778 my_fault
= DBG_NZF_PAGE_FAULT
;
780 if (m
->absent
&& m
->busy
)
783 vm_page_zero_fill(m
);
785 VM_STAT_INCR(zero_fill_count
);
786 DTRACE_VM2(zfod
, int, 1, (uint64_t *), NULL
);
789 assert(m
->object
!= kernel_object
);
790 //assert(m->pageq.next == NULL && m->pageq.prev == NULL);
792 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) &&
793 (m
->object
->purgable
== VM_PURGABLE_DENY
||
794 m
->object
->purgable
== VM_PURGABLE_NONVOLATILE
||
795 m
->object
->purgable
== VM_PURGABLE_VOLATILE
)) {
797 vm_page_lockspin_queues();
799 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) {
800 assert(!VM_PAGE_WIRED(m
));
803 * can't be on the pageout queue since we don't
804 * have a pager to try and clean to
806 assert(!m
->pageout_queue
);
808 vm_page_queues_remove(m
);
809 vm_page_check_pageable_safe(m
);
810 queue_enter(&vm_page_queue_throttled
, m
, vm_page_t
, pageq
);
812 vm_page_throttled_count
++;
814 vm_page_unlock_queues();
821 * Routine: vm_fault_page
823 * Find the resident page for the virtual memory
824 * specified by the given virtual memory object
826 * Additional arguments:
827 * The required permissions for the page is given
828 * in "fault_type". Desired permissions are included
830 * fault_info is passed along to determine pagein cluster
831 * limits... it contains the expected reference pattern,
832 * cluster size if available, etc...
834 * If the desired page is known to be resident (for
835 * example, because it was previously wired down), asserting
836 * the "unwiring" parameter will speed the search.
838 * If the operation can be interrupted (by thread_abort
839 * or thread_terminate), then the "interruptible"
840 * parameter should be asserted.
843 * The page containing the proper data is returned
847 * The source object must be locked and referenced,
848 * and must donate one paging reference. The reference
849 * is not affected. The paging reference and lock are
852 * If the call succeeds, the object in which "result_page"
853 * resides is left locked and holding a paging reference.
854 * If this is not the original object, a busy page in the
855 * original object is returned in "top_page", to prevent other
856 * callers from pursuing this same data, along with a paging
857 * reference for the original object. The "top_page" should
858 * be destroyed when this guarantee is no longer required.
859 * The "result_page" is also left busy. It is not removed
860 * from the pageout queues.
862 * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
863 * fault succeeded but there's no VM page (i.e. the VM object
864 * does not actually hold VM pages, but device memory or
865 * large pages). The object is still locked and we still hold a
866 * paging_in_progress reference.
868 unsigned int vm_fault_page_blocked_access
= 0;
869 unsigned int vm_fault_page_forced_retry
= 0;
874 vm_object_t first_object
, /* Object to begin search */
875 vm_object_offset_t first_offset
, /* Offset into object */
876 vm_prot_t fault_type
, /* What access is requested */
877 boolean_t must_be_resident
,/* Must page be resident? */
878 boolean_t caller_lookup
, /* caller looked up page */
879 /* Modifies in place: */
880 vm_prot_t
*protection
, /* Protection for mapping */
881 vm_page_t
*result_page
, /* Page found, if successful */
883 vm_page_t
*top_page
, /* Page in top object, if
884 * not result_page. */
885 int *type_of_fault
, /* if non-null, fill in with type of fault
886 * COW, zero-fill, etc... returned in trace point */
887 /* More arguments: */
888 kern_return_t
*error_code
, /* code if page is in error */
889 boolean_t no_zero_fill
, /* don't zero fill absent pages */
890 boolean_t data_supply
, /* treat as data_supply if
891 * it is a write fault and a full
892 * page is provided */
893 vm_object_fault_info_t fault_info
)
897 vm_object_offset_t offset
;
899 vm_object_t next_object
;
900 vm_object_t copy_object
;
901 boolean_t look_for_page
;
902 boolean_t force_fault_retry
= FALSE
;
903 vm_prot_t access_required
= fault_type
;
904 vm_prot_t wants_copy_flag
;
905 CLUSTER_STAT(int pages_at_higher_offsets
;)
906 CLUSTER_STAT(int pages_at_lower_offsets
;)
907 kern_return_t wait_result
;
908 boolean_t interruptible_state
;
909 boolean_t data_already_requested
= FALSE
;
910 vm_behavior_t orig_behavior
;
911 vm_size_t orig_cluster_size
;
912 vm_fault_return_t error
;
914 uint32_t try_failed_count
;
915 int interruptible
; /* how may fault be interrupted? */
916 int external_state
= VM_EXTERNAL_STATE_UNKNOWN
;
917 memory_object_t pager
;
918 vm_fault_return_t retval
;
921 * MACH page map - an optional optimization where a bit map is maintained
922 * by the VM subsystem for internal objects to indicate which pages of
923 * the object currently reside on backing store. This existence map
924 * duplicates information maintained by the vnode pager. It is
925 * created at the time of the first pageout against the object, i.e.
926 * at the same time pager for the object is created. The optimization
927 * is designed to eliminate pager interaction overhead, if it is
928 * 'known' that the page does not exist on backing store.
930 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
931 * either marked as paged out in the existence map for the object or no
932 * existence map exists for the object. MUST_ASK_PAGER() is one of the
933 * criteria in the decision to invoke the pager. It is also used as one
934 * of the criteria to terminate the scan for adjacent pages in a clustered
935 * pagein operation. Note that MUST_ASK_PAGER() always evaluates to TRUE for
936 * permanent objects. Note also that if the pager for an internal object
937 * has not been created, the pager is not invoked regardless of the value
938 * of MUST_ASK_PAGER() and that clustered pagein scans are only done on an object
939 * for which a pager has been created.
941 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
942 * is marked as paged out in the existence map for the object. PAGED_OUT()
943 * PAGED_OUT() is used to determine if a page has already been pushed
944 * into a copy object in order to avoid a redundant page out operation.
947 #define MUST_ASK_PAGER(o, f, s) \
948 ((vm_external_state_get((o)->existence_map, (f)) \
949 != VM_EXTERNAL_STATE_ABSENT) && \
950 (s = (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)))) \
951 != VM_EXTERNAL_STATE_ABSENT)
952 #define PAGED_OUT(o, f) \
953 ((vm_external_state_get((o)->existence_map, (f)) \
954 == VM_EXTERNAL_STATE_EXISTS) || \
955 (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) \
956 == VM_EXTERNAL_STATE_EXISTS))
957 #else /* MACH_PAGEMAP */
958 #define MUST_ASK_PAGER(o, f, s) \
959 ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
960 #define PAGED_OUT(o, f) \
961 (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
962 #endif /* MACH_PAGEMAP */
967 #define RELEASE_PAGE(m) \
969 PAGE_WAKEUP_DONE(m); \
970 if (!m->active && !m->inactive && !m->throttled) { \
971 vm_page_lockspin_queues(); \
972 if (!m->active && !m->inactive && !m->throttled) { \
973 if (COMPRESSED_PAGER_IS_ACTIVE) \
974 vm_page_deactivate(m); \
976 vm_page_activate(m); \
978 vm_page_unlock_queues(); \
983 dbgTrace(0xBEEF0002, (unsigned int) first_object
, (unsigned int) first_offset
); /* (TEST/DEBUG) */
986 interruptible
= fault_info
->interruptible
;
987 interruptible_state
= thread_interrupt_level(interruptible
);
990 * INVARIANTS (through entire routine):
992 * 1) At all times, we must either have the object
993 * lock or a busy page in some object to prevent
994 * some other thread from trying to bring in
997 * Note that we cannot hold any locks during the
998 * pager access or when waiting for memory, so
999 * we use a busy page then.
1001 * 2) To prevent another thread from racing us down the
1002 * shadow chain and entering a new page in the top
1003 * object before we do, we must keep a busy page in
1004 * the top object while following the shadow chain.
1006 * 3) We must increment paging_in_progress on any object
1007 * for which we have a busy page before dropping
1010 * 4) We leave busy pages on the pageout queues.
1011 * If the pageout daemon comes across a busy page,
1012 * it will remove the page from the pageout queues.
1015 object
= first_object
;
1016 offset
= first_offset
;
1017 first_m
= VM_PAGE_NULL
;
1018 access_required
= fault_type
;
1022 "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n",
1023 object
, offset
, fault_type
, *protection
, 0);
1026 * default type of fault
1028 my_fault
= DBG_CACHE_HIT_FAULT
;
1032 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1034 if (!object
->alive
) {
1036 * object is no longer valid
1037 * clean up and return error
1039 vm_fault_cleanup(object
, first_m
);
1040 thread_interrupt_level(interruptible_state
);
1042 return (VM_FAULT_MEMORY_ERROR
);
1045 if (!object
->pager_created
&& object
->phys_contiguous
) {
1047 * A physically-contiguous object without a pager:
1048 * must be a "large page" object. We do not deal
1049 * with VM pages for this object.
1051 caller_lookup
= FALSE
;
1053 goto phys_contig_object
;
1056 if (object
->blocked_access
) {
1058 * Access to this VM object has been blocked.
1059 * Replace our "paging_in_progress" reference with
1060 * a "activity_in_progress" reference and wait for
1061 * access to be unblocked.
1063 caller_lookup
= FALSE
; /* no longer valid after sleep */
1064 vm_object_activity_begin(object
);
1065 vm_object_paging_end(object
);
1066 while (object
->blocked_access
) {
1067 vm_object_sleep(object
,
1068 VM_OBJECT_EVENT_UNBLOCKED
,
1071 vm_fault_page_blocked_access
++;
1072 vm_object_paging_begin(object
);
1073 vm_object_activity_end(object
);
1077 * See whether the page at 'offset' is resident
1079 if (caller_lookup
== TRUE
) {
1081 * The caller has already looked up the page
1082 * and gave us the result in "result_page".
1083 * We can use this for the first lookup but
1084 * it loses its validity as soon as we unlock
1088 caller_lookup
= FALSE
; /* no longer valid after that */
1090 m
= vm_page_lookup(object
, offset
);
1093 dbgTrace(0xBEEF0004, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1095 if (m
!= VM_PAGE_NULL
) {
1099 * The page is being brought in,
1100 * wait for it and then retry.
1103 dbgTrace(0xBEEF0005, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1105 wait_result
= PAGE_SLEEP(object
, m
, interruptible
);
1108 "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
1111 counter(c_vm_fault_page_block_busy_kernel
++);
1113 if (wait_result
!= THREAD_AWAKENED
) {
1114 vm_fault_cleanup(object
, first_m
);
1115 thread_interrupt_level(interruptible_state
);
1117 if (wait_result
== THREAD_RESTART
)
1118 return (VM_FAULT_RETRY
);
1120 return (VM_FAULT_INTERRUPTED
);
1128 vm_pageout_steal_laundry(m
, FALSE
);
1130 if (m
->phys_page
== vm_page_guard_addr
) {
1132 * Guard page: off limits !
1134 if (fault_type
== VM_PROT_NONE
) {
1136 * The fault is not requesting any
1137 * access to the guard page, so it must
1138 * be just to wire or unwire it.
1139 * Let's pretend it succeeded...
1143 assert(first_m
== VM_PAGE_NULL
);
1144 *top_page
= first_m
;
1146 *type_of_fault
= DBG_GUARD_FAULT
;
1147 thread_interrupt_level(interruptible_state
);
1148 return VM_FAULT_SUCCESS
;
1151 * The fault requests access to the
1152 * guard page: let's deny that !
1154 vm_fault_cleanup(object
, first_m
);
1155 thread_interrupt_level(interruptible_state
);
1156 return VM_FAULT_MEMORY_ERROR
;
1162 * The page is in error, give up now.
1165 dbgTrace(0xBEEF0006, (unsigned int) m
, (unsigned int) error_code
); /* (TEST/DEBUG) */
1168 *error_code
= KERN_MEMORY_ERROR
;
1171 vm_fault_cleanup(object
, first_m
);
1172 thread_interrupt_level(interruptible_state
);
1174 return (VM_FAULT_MEMORY_ERROR
);
1178 * The pager wants us to restart
1179 * at the top of the chain,
1180 * typically because it has moved the
1181 * page to another pager, then do so.
1184 dbgTrace(0xBEEF0007, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1188 vm_fault_cleanup(object
, first_m
);
1189 thread_interrupt_level(interruptible_state
);
1191 return (VM_FAULT_RETRY
);
1195 * The page isn't busy, but is absent,
1196 * therefore it's deemed "unavailable".
1198 * Remove the non-existent page (unless it's
1199 * in the top object) and move on down to the
1200 * next object (if there is one).
1203 dbgTrace(0xBEEF0008, (unsigned int) m
, (unsigned int) object
->shadow
); /* (TEST/DEBUG) */
1205 next_object
= object
->shadow
;
1207 if (next_object
== VM_OBJECT_NULL
) {
1209 * Absent page at bottom of shadow
1210 * chain; zero fill the page we left
1211 * busy in the first object, and free
1214 assert(!must_be_resident
);
1217 * check for any conditions that prevent
1218 * us from creating a new zero-fill page
1219 * vm_fault_check will do all of the
1220 * fault cleanup in the case of an error condition
1221 * including resetting the thread_interrupt_level
1223 error
= vm_fault_check(object
, m
, first_m
, interruptible_state
, (type_of_fault
== NULL
) ? TRUE
: FALSE
);
1225 if (error
!= VM_FAULT_SUCCESS
)
1229 "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
1234 if (object
!= first_object
) {
1236 * free the absent page we just found
1241 * drop reference and lock on current object
1243 vm_object_paging_end(object
);
1244 vm_object_unlock(object
);
1247 * grab the original page we
1248 * 'soldered' in place and
1249 * retake lock on 'first_object'
1252 first_m
= VM_PAGE_NULL
;
1254 object
= first_object
;
1255 offset
= first_offset
;
1257 vm_object_lock(object
);
1260 * we're going to use the absent page we just found
1261 * so convert it to a 'busy' page
1266 if (fault_info
->mark_zf_absent
&& no_zero_fill
== TRUE
)
1269 * zero-fill the page and put it on
1270 * the correct paging queue
1272 my_fault
= vm_fault_zero_page(m
, no_zero_fill
);
1276 if (must_be_resident
)
1277 vm_object_paging_end(object
);
1278 else if (object
!= first_object
) {
1279 vm_object_paging_end(object
);
1286 vm_page_lockspin_queues();
1288 assert(!m
->pageout_queue
);
1289 vm_page_queues_remove(m
);
1291 vm_page_unlock_queues();
1294 "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n",
1297 offset
+object
->vo_shadow_offset
,0);
1299 offset
+= object
->vo_shadow_offset
;
1300 fault_info
->lo_offset
+= object
->vo_shadow_offset
;
1301 fault_info
->hi_offset
+= object
->vo_shadow_offset
;
1302 access_required
= VM_PROT_READ
;
1304 vm_object_lock(next_object
);
1305 vm_object_unlock(object
);
1306 object
= next_object
;
1307 vm_object_paging_begin(object
);
1310 * reset to default type of fault
1312 my_fault
= DBG_CACHE_HIT_FAULT
;
1318 && ((object
!= first_object
) || (object
->copy
!= VM_OBJECT_NULL
))
1319 && (fault_type
& VM_PROT_WRITE
)) {
1321 * This is a copy-on-write fault that will
1322 * cause us to revoke access to this page, but
1323 * this page is in the process of being cleaned
1324 * in a clustered pageout. We must wait until
1325 * the cleaning operation completes before
1326 * revoking access to the original page,
1327 * otherwise we might attempt to remove a
1331 dbgTrace(0xBEEF0009, (unsigned int) m
, (unsigned int) offset
); /* (TEST/DEBUG) */
1334 "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n",
1338 * take an extra ref so that object won't die
1340 vm_object_reference_locked(object
);
1342 vm_fault_cleanup(object
, first_m
);
1344 counter(c_vm_fault_page_block_backoff_kernel
++);
1345 vm_object_lock(object
);
1346 assert(object
->ref_count
> 0);
1348 m
= vm_page_lookup(object
, offset
);
1350 if (m
!= VM_PAGE_NULL
&& m
->cleaning
) {
1351 PAGE_ASSERT_WAIT(m
, interruptible
);
1353 vm_object_unlock(object
);
1354 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1355 vm_object_deallocate(object
);
1359 vm_object_unlock(object
);
1361 vm_object_deallocate(object
);
1362 thread_interrupt_level(interruptible_state
);
1364 return (VM_FAULT_RETRY
);
1367 if (type_of_fault
== NULL
&& m
->speculative
&&
1368 !(fault_info
!= NULL
&& fault_info
->stealth
)) {
1370 * If we were passed a non-NULL pointer for
1371 * "type_of_fault", than we came from
1372 * vm_fault... we'll let it deal with
1373 * this condition, since it
1374 * needs to see m->speculative to correctly
1375 * account the pageins, otherwise...
1376 * take it off the speculative queue, we'll
1377 * let the caller of vm_fault_page deal
1378 * with getting it onto the correct queue
1380 * If the caller specified in fault_info that
1381 * it wants a "stealth" fault, we also leave
1382 * the page in the speculative queue.
1384 vm_page_lockspin_queues();
1386 vm_page_queues_remove(m
);
1387 vm_page_unlock_queues();
1393 * the user needs access to a page that we
1394 * encrypted before paging it out.
1395 * Decrypt the page now.
1396 * Keep it busy to prevent anyone from
1397 * accessing it during the decryption.
1400 vm_page_decrypt(m
, 0);
1401 assert(object
== m
->object
);
1403 PAGE_WAKEUP_DONE(m
);
1406 * Retry from the top, in case
1407 * something changed while we were
1412 ASSERT_PAGE_DECRYPTED(m
);
1414 if (m
->object
->code_signed
) {
1417 * We just paged in a page from a signed
1418 * memory object but we don't need to
1419 * validate it now. We'll validate it if
1420 * when it gets mapped into a user address
1421 * space for the first time or when the page
1422 * gets copied to another object as a result
1423 * of a copy-on-write.
1428 * We mark the page busy and leave it on
1429 * the pageout queues. If the pageout
1430 * deamon comes across it, then it will
1431 * remove the page from the queue, but not the object
1434 dbgTrace(0xBEEF000B, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1437 "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
1438 object
, offset
, m
, 0, 0);
1448 * we get here when there is no page present in the object at
1449 * the offset we're interested in... we'll allocate a page
1450 * at this point if the pager associated with
1451 * this object can provide the data or we're the top object...
1452 * object is locked; m == NULL
1454 if (must_be_resident
) {
1455 if (fault_type
== VM_PROT_NONE
&&
1456 object
== kernel_object
) {
1458 * We've been called from vm_fault_unwire()
1459 * while removing a map entry that was allocated
1460 * with KMA_KOBJECT and KMA_VAONLY. This page
1461 * is not present and there's nothing more to
1462 * do here (nothing to unwire).
1464 vm_fault_cleanup(object
, first_m
);
1465 thread_interrupt_level(interruptible_state
);
1467 return VM_FAULT_MEMORY_ERROR
;
1470 goto dont_look_for_page
;
1474 data_supply
= FALSE
;
1475 #endif /* !MACH_PAGEMAP */
1477 look_for_page
= (object
->pager_created
&& (MUST_ASK_PAGER(object
, offset
, external_state
) == TRUE
) && !data_supply
);
1480 dbgTrace(0xBEEF000C, (unsigned int) look_for_page
, (unsigned int) object
); /* (TEST/DEBUG) */
1482 if (!look_for_page
&& object
== first_object
&& !object
->phys_contiguous
) {
1484 * Allocate a new page for this object/offset pair as a placeholder
1488 dbgTrace(0xBEEF000D, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1490 if (m
== VM_PAGE_NULL
) {
1492 vm_fault_cleanup(object
, first_m
);
1493 thread_interrupt_level(interruptible_state
);
1495 return (VM_FAULT_MEMORY_SHORTAGE
);
1498 if (fault_info
&& fault_info
->batch_pmap_op
== TRUE
) {
1499 vm_page_insert_internal(m
, object
, offset
, VM_KERN_MEMORY_NONE
, FALSE
, TRUE
, TRUE
, FALSE
, NULL
);
1501 vm_page_insert(m
, object
, offset
);
1504 if (look_for_page
) {
1509 * If the memory manager is not ready, we
1510 * cannot make requests.
1512 if (!object
->pager_ready
) {
1514 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1516 if (m
!= VM_PAGE_NULL
)
1520 "vm_f_page: ready wait obj 0x%X, offset 0x%X\n",
1521 object
, offset
, 0, 0, 0);
1524 * take an extra ref so object won't die
1526 vm_object_reference_locked(object
);
1527 vm_fault_cleanup(object
, first_m
);
1528 counter(c_vm_fault_page_block_backoff_kernel
++);
1530 vm_object_lock(object
);
1531 assert(object
->ref_count
> 0);
1533 if (!object
->pager_ready
) {
1534 wait_result
= vm_object_assert_wait(object
, VM_OBJECT_EVENT_PAGER_READY
, interruptible
);
1536 vm_object_unlock(object
);
1537 if (wait_result
== THREAD_WAITING
)
1538 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1539 vm_object_deallocate(object
);
1543 vm_object_unlock(object
);
1544 vm_object_deallocate(object
);
1545 thread_interrupt_level(interruptible_state
);
1547 return (VM_FAULT_RETRY
);
1550 if (!object
->internal
&& !object
->phys_contiguous
&& object
->paging_in_progress
> vm_object_pagein_throttle
) {
1552 * If there are too many outstanding page
1553 * requests pending on this external object, we
1554 * wait for them to be resolved now.
1557 dbgTrace(0xBEEF0010, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1559 if (m
!= VM_PAGE_NULL
)
1562 * take an extra ref so object won't die
1564 vm_object_reference_locked(object
);
1566 vm_fault_cleanup(object
, first_m
);
1568 counter(c_vm_fault_page_block_backoff_kernel
++);
1570 vm_object_lock(object
);
1571 assert(object
->ref_count
> 0);
1573 if (object
->paging_in_progress
>= vm_object_pagein_throttle
) {
1574 vm_object_assert_wait(object
, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS
, interruptible
);
1576 vm_object_unlock(object
);
1577 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1578 vm_object_deallocate(object
);
1582 vm_object_unlock(object
);
1583 vm_object_deallocate(object
);
1584 thread_interrupt_level(interruptible_state
);
1586 return (VM_FAULT_RETRY
);
1589 if (object
->internal
&&
1590 (COMPRESSED_PAGER_IS_ACTIVE
1591 || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)) {
1592 int compressed_count_delta
;
1594 if (m
== VM_PAGE_NULL
) {
1596 * Allocate a new page for this object/offset pair as a placeholder
1600 dbgTrace(0xBEEF000D, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1602 if (m
== VM_PAGE_NULL
) {
1604 vm_fault_cleanup(object
, first_m
);
1605 thread_interrupt_level(interruptible_state
);
1607 return (VM_FAULT_MEMORY_SHORTAGE
);
1611 if (fault_info
&& fault_info
->batch_pmap_op
== TRUE
) {
1612 vm_page_insert_internal(m
, object
, offset
, VM_KERN_MEMORY_NONE
, FALSE
, TRUE
, TRUE
, FALSE
, NULL
);
1614 vm_page_insert(m
, object
, offset
);
1620 pager
= object
->pager
;
1622 assert(object
->paging_in_progress
> 0);
1623 vm_object_unlock(object
);
1625 rc
= vm_compressor_pager_get(
1627 offset
+ object
->paging_offset
,
1631 &compressed_count_delta
);
1633 if (type_of_fault
== NULL
) {
1637 * we weren't called from vm_fault, so we
1638 * need to apply page creation throttling
1639 * do it before we re-acquire any locks
1641 if (my_fault_type
== DBG_COMPRESSOR_FAULT
) {
1642 if ((throttle_delay
= vm_page_throttled(TRUE
))) {
1643 VM_DEBUG_EVENT(vmf_compressordelay
, VMF_COMPRESSORDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 1, 0);
1644 delay(throttle_delay
);
1648 vm_object_lock(object
);
1649 assert(object
->paging_in_progress
> 0);
1651 vm_compressor_pager_count(
1653 compressed_count_delta
,
1654 FALSE
, /* shared_lock */
1661 if ((m
->object
->wimg_bits
&
1663 VM_WIMG_USE_DEFAULT
) {
1665 * If the page is not cacheable,
1666 * we can't let its contents
1667 * linger in the data cache
1668 * after the decompression.
1670 pmap_sync_page_attributes_phys(
1673 m
->written_by_kernel
= TRUE
;
1677 * If the object is purgeable, its
1678 * owner's purgeable ledgers have been
1679 * updated in vm_page_insert() but the
1680 * page was also accounted for in a
1681 * "compressed purgeable" ledger, so
1684 if ((object
->purgable
!=
1685 VM_PURGABLE_DENY
) &&
1686 (object
->vo_purgeable_owner
!=
1689 * One less compressed
1692 vm_purgeable_compressed_update(
1698 case KERN_MEMORY_FAILURE
:
1703 case KERN_MEMORY_ERROR
:
1707 panic("vm_fault_page(): unexpected "
1709 "vm_compressor_pager_get()\n",
1712 PAGE_WAKEUP_DONE(m
);
1715 goto data_requested
;
1717 my_fault_type
= DBG_PAGEIN_FAULT
;
1719 if (m
!= VM_PAGE_NULL
) {
1725 dbgTrace(0xBEEF0012, (unsigned int) object
, (unsigned int) 0); /* (TEST/DEBUG) */
1729 * It's possible someone called vm_object_destroy while we weren't
1730 * holding the object lock. If that has happened, then bail out
1734 pager
= object
->pager
;
1736 if (pager
== MEMORY_OBJECT_NULL
) {
1737 vm_fault_cleanup(object
, first_m
);
1738 thread_interrupt_level(interruptible_state
);
1739 return VM_FAULT_MEMORY_ERROR
;
1743 * We have an absent page in place for the faulting offset,
1744 * so we can release the object lock.
1747 vm_object_unlock(object
);
1750 * If this object uses a copy_call strategy,
1751 * and we are interested in a copy of this object
1752 * (having gotten here only by following a
1753 * shadow chain), then tell the memory manager
1754 * via a flag added to the desired_access
1755 * parameter, so that it can detect a race
1756 * between our walking down the shadow chain
1757 * and its pushing pages up into a copy of
1758 * the object that it manages.
1760 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_CALL
&& object
!= first_object
)
1761 wants_copy_flag
= VM_PROT_WANTS_COPY
;
1763 wants_copy_flag
= VM_PROT_NONE
;
1766 "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n",
1768 access_required
| wants_copy_flag
, 0);
1770 if (object
->copy
== first_object
) {
1772 * if we issue the memory_object_data_request in
1773 * this state, we are subject to a deadlock with
1774 * the underlying filesystem if it is trying to
1775 * shrink the file resulting in a push of pages
1776 * into the copy object... that push will stall
1777 * on the placeholder page, and if the pushing thread
1778 * is holding a lock that is required on the pagein
1779 * path (such as a truncate lock), we'll deadlock...
1780 * to avoid this potential deadlock, we throw away
1781 * our placeholder page before calling memory_object_data_request
1782 * and force this thread to retry the vm_fault_page after
1783 * we have issued the I/O. the second time through this path
1784 * we will find the page already in the cache (presumably still
1785 * busy waiting for the I/O to complete) and then complete
1786 * the fault w/o having to go through memory_object_data_request again
1788 assert(first_m
!= VM_PAGE_NULL
);
1789 assert(first_m
->object
== first_object
);
1791 vm_object_lock(first_object
);
1792 VM_PAGE_FREE(first_m
);
1793 vm_object_paging_end(first_object
);
1794 vm_object_unlock(first_object
);
1796 first_m
= VM_PAGE_NULL
;
1797 force_fault_retry
= TRUE
;
1799 vm_fault_page_forced_retry
++;
1802 if (data_already_requested
== TRUE
) {
1803 orig_behavior
= fault_info
->behavior
;
1804 orig_cluster_size
= fault_info
->cluster_size
;
1806 fault_info
->behavior
= VM_BEHAVIOR_RANDOM
;
1807 fault_info
->cluster_size
= PAGE_SIZE
;
1810 * Call the memory manager to retrieve the data.
1812 rc
= memory_object_data_request(
1814 offset
+ object
->paging_offset
,
1816 access_required
| wants_copy_flag
,
1817 (memory_object_fault_info_t
)fault_info
);
1819 if (data_already_requested
== TRUE
) {
1820 fault_info
->behavior
= orig_behavior
;
1821 fault_info
->cluster_size
= orig_cluster_size
;
1823 data_already_requested
= TRUE
;
1825 DTRACE_VM2(maj_fault
, int, 1, (uint64_t *), NULL
);
1827 dbgTrace(0xBEEF0013, (unsigned int) object
, (unsigned int) rc
); /* (TEST/DEBUG) */
1829 vm_object_lock(object
);
1832 if (rc
!= KERN_SUCCESS
) {
1834 vm_fault_cleanup(object
, first_m
);
1835 thread_interrupt_level(interruptible_state
);
1837 return ((rc
== MACH_SEND_INTERRUPTED
) ?
1838 VM_FAULT_INTERRUPTED
:
1839 VM_FAULT_MEMORY_ERROR
);
1842 clock_usec_t tv_usec
;
1844 if (my_fault_type
== DBG_PAGEIN_FAULT
) {
1845 clock_get_system_microtime(&tv_sec
, &tv_usec
);
1846 current_thread()->t_page_creation_time
= tv_sec
;
1847 current_thread()->t_page_creation_count
= 0;
1850 if ((interruptible
!= THREAD_UNINT
) && (current_thread()->sched_flags
& TH_SFLAG_ABORT
)) {
1852 vm_fault_cleanup(object
, first_m
);
1853 thread_interrupt_level(interruptible_state
);
1855 return (VM_FAULT_INTERRUPTED
);
1857 if (force_fault_retry
== TRUE
) {
1859 vm_fault_cleanup(object
, first_m
);
1860 thread_interrupt_level(interruptible_state
);
1862 return (VM_FAULT_RETRY
);
1864 if (m
== VM_PAGE_NULL
&& object
->phys_contiguous
) {
1866 * No page here means that the object we
1867 * initially looked up was "physically
1868 * contiguous" (i.e. device memory). However,
1869 * with Virtual VRAM, the object might not
1870 * be backed by that device memory anymore,
1871 * so we're done here only if the object is
1872 * still "phys_contiguous".
1873 * Otherwise, if the object is no longer
1874 * "phys_contiguous", we need to retry the
1875 * page fault against the object's new backing
1876 * store (different memory object).
1882 * potentially a pagein fault
1883 * if we make it through the state checks
1884 * above, than we'll count it as such
1886 my_fault
= my_fault_type
;
1889 * Retry with same object/offset, since new data may
1890 * be in a different page (i.e., m is meaningless at
1897 * We get here if the object has no pager, or an existence map
1898 * exists and indicates the page isn't present on the pager
1899 * or we're unwiring a page. If a pager exists, but there
1900 * is no existence map, then the m->absent case above handles
1901 * the ZF case when the pager can't provide the page
1904 dbgTrace(0xBEEF0014, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1906 if (object
== first_object
)
1909 assert(m
== VM_PAGE_NULL
);
1912 "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n",
1916 next_object
= object
->shadow
;
1918 if (next_object
== VM_OBJECT_NULL
) {
1920 * we've hit the bottom of the shadown chain,
1921 * fill the page in the top object with zeros.
1923 assert(!must_be_resident
);
1925 if (object
!= first_object
) {
1926 vm_object_paging_end(object
);
1927 vm_object_unlock(object
);
1929 object
= first_object
;
1930 offset
= first_offset
;
1931 vm_object_lock(object
);
1934 assert(m
->object
== object
);
1935 first_m
= VM_PAGE_NULL
;
1938 * check for any conditions that prevent
1939 * us from creating a new zero-fill page
1940 * vm_fault_check will do all of the
1941 * fault cleanup in the case of an error condition
1942 * including resetting the thread_interrupt_level
1944 error
= vm_fault_check(object
, m
, first_m
, interruptible_state
, (type_of_fault
== NULL
) ? TRUE
: FALSE
);
1946 if (error
!= VM_FAULT_SUCCESS
)
1949 if (m
== VM_PAGE_NULL
) {
1952 if (m
== VM_PAGE_NULL
) {
1953 vm_fault_cleanup(object
, VM_PAGE_NULL
);
1954 thread_interrupt_level(interruptible_state
);
1956 return (VM_FAULT_MEMORY_SHORTAGE
);
1958 vm_page_insert(m
, object
, offset
);
1960 if (fault_info
->mark_zf_absent
&& no_zero_fill
== TRUE
)
1963 my_fault
= vm_fault_zero_page(m
, no_zero_fill
);
1969 * Move on to the next object. Lock the next
1970 * object before unlocking the current one.
1972 if ((object
!= first_object
) || must_be_resident
)
1973 vm_object_paging_end(object
);
1975 offset
+= object
->vo_shadow_offset
;
1976 fault_info
->lo_offset
+= object
->vo_shadow_offset
;
1977 fault_info
->hi_offset
+= object
->vo_shadow_offset
;
1978 access_required
= VM_PROT_READ
;
1980 vm_object_lock(next_object
);
1981 vm_object_unlock(object
);
1983 object
= next_object
;
1984 vm_object_paging_begin(object
);
1989 * PAGE HAS BEEN FOUND.
1992 * busy, so that we can play with it;
1993 * not absent, so that nobody else will fill it;
1994 * possibly eligible for pageout;
1996 * The top-level page (first_m) is:
1997 * VM_PAGE_NULL if the page was found in the
1999 * busy, not absent, and ineligible for pageout.
2001 * The current object (object) is locked. A paging
2002 * reference is held for the current and top-level
2007 dbgTrace(0xBEEF0015, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
2009 #if EXTRA_ASSERTIONS
2010 assert(m
->busy
&& !m
->absent
);
2011 assert((first_m
== VM_PAGE_NULL
) ||
2012 (first_m
->busy
&& !first_m
->absent
&&
2013 !first_m
->active
&& !first_m
->inactive
));
2014 #endif /* EXTRA_ASSERTIONS */
2018 * If we found a page, we must have decrypted it before we
2021 ASSERT_PAGE_DECRYPTED(m
);
2024 "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
2026 first_object
, first_m
);
2029 * If the page is being written, but isn't
2030 * already owned by the top-level object,
2031 * we have to copy it into a new page owned
2032 * by the top-level object.
2034 if (object
!= first_object
) {
2037 dbgTrace(0xBEEF0016, (unsigned int) object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
2039 if (fault_type
& VM_PROT_WRITE
) {
2043 * We only really need to copy if we
2046 assert(!must_be_resident
);
2049 * are we protecting the system from
2050 * backing store exhaustion. If so
2051 * sleep unless we are privileged.
2053 if (vm_backing_store_low
) {
2054 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
)) {
2057 vm_fault_cleanup(object
, first_m
);
2059 assert_wait((event_t
)&vm_backing_store_low
, THREAD_UNINT
);
2061 thread_block(THREAD_CONTINUE_NULL
);
2062 thread_interrupt_level(interruptible_state
);
2064 return (VM_FAULT_RETRY
);
2068 * If we try to collapse first_object at this
2069 * point, we may deadlock when we try to get
2070 * the lock on an intermediate object (since we
2071 * have the bottom object locked). We can't
2072 * unlock the bottom object, because the page
2073 * we found may move (by collapse) if we do.
2075 * Instead, we first copy the page. Then, when
2076 * we have no more use for the bottom object,
2077 * we unlock it and try to collapse.
2079 * Note that we copy the page even if we didn't
2080 * need to... that's the breaks.
2084 * Allocate a page for the copy
2086 copy_m
= vm_page_grab();
2088 if (copy_m
== VM_PAGE_NULL
) {
2091 vm_fault_cleanup(object
, first_m
);
2092 thread_interrupt_level(interruptible_state
);
2094 return (VM_FAULT_MEMORY_SHORTAGE
);
2097 "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n",
2101 vm_page_copy(m
, copy_m
);
2104 * If another map is truly sharing this
2105 * page with us, we have to flush all
2106 * uses of the original page, since we
2107 * can't distinguish those which want the
2108 * original from those which need the
2111 * XXXO If we know that only one map has
2112 * access to this page, then we could
2113 * avoid the pmap_disconnect() call.
2116 pmap_disconnect(m
->phys_page
);
2119 VM_PAGE_COUNT_AS_PAGEIN(m
);
2120 VM_PAGE_CONSUME_CLUSTERED(m
);
2122 assert(!m
->cleaning
);
2125 * We no longer need the old page or object.
2129 vm_object_paging_end(object
);
2130 vm_object_unlock(object
);
2132 my_fault
= DBG_COW_FAULT
;
2133 VM_STAT_INCR(cow_faults
);
2134 DTRACE_VM2(cow_fault
, int, 1, (uint64_t *), NULL
);
2135 current_task()->cow_faults
++;
2137 object
= first_object
;
2138 offset
= first_offset
;
2140 vm_object_lock(object
);
2142 * get rid of the place holder
2143 * page that we soldered in earlier
2145 VM_PAGE_FREE(first_m
);
2146 first_m
= VM_PAGE_NULL
;
2149 * and replace it with the
2150 * page we just copied into
2152 assert(copy_m
->busy
);
2153 vm_page_insert(copy_m
, object
, offset
);
2154 SET_PAGE_DIRTY(copy_m
, TRUE
);
2158 * Now that we've gotten the copy out of the
2159 * way, let's try to collapse the top object.
2160 * But we have to play ugly games with
2161 * paging_in_progress to do that...
2163 vm_object_paging_end(object
);
2164 vm_object_collapse(object
, offset
, TRUE
);
2165 vm_object_paging_begin(object
);
2168 *protection
&= (~VM_PROT_WRITE
);
2171 * Now check whether the page needs to be pushed into the
2172 * copy object. The use of asymmetric copy on write for
2173 * shared temporary objects means that we may do two copies to
2174 * satisfy the fault; one above to get the page from a
2175 * shadowed object, and one here to push it into the copy.
2177 try_failed_count
= 0;
2179 while ((copy_object
= first_object
->copy
) != VM_OBJECT_NULL
) {
2180 vm_object_offset_t copy_offset
;
2184 dbgTrace(0xBEEF0017, (unsigned int) copy_object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
2187 * If the page is being written, but hasn't been
2188 * copied to the copy-object, we have to copy it there.
2190 if ((fault_type
& VM_PROT_WRITE
) == 0) {
2191 *protection
&= ~VM_PROT_WRITE
;
2196 * If the page was guaranteed to be resident,
2197 * we must have already performed the copy.
2199 if (must_be_resident
)
2203 * Try to get the lock on the copy_object.
2205 if (!vm_object_lock_try(copy_object
)) {
2207 vm_object_unlock(object
);
2210 mutex_pause(try_failed_count
); /* wait a bit */
2211 vm_object_lock(object
);
2215 try_failed_count
= 0;
2218 * Make another reference to the copy-object,
2219 * to keep it from disappearing during the
2222 vm_object_reference_locked(copy_object
);
2225 * Does the page exist in the copy?
2227 copy_offset
= first_offset
- copy_object
->vo_shadow_offset
;
2229 if (copy_object
->vo_size
<= copy_offset
)
2231 * Copy object doesn't cover this page -- do nothing.
2234 else if ((copy_m
= vm_page_lookup(copy_object
, copy_offset
)) != VM_PAGE_NULL
) {
2236 * Page currently exists in the copy object
2240 * If the page is being brought
2241 * in, wait for it and then retry.
2246 * take an extra ref so object won't die
2248 vm_object_reference_locked(copy_object
);
2249 vm_object_unlock(copy_object
);
2250 vm_fault_cleanup(object
, first_m
);
2251 counter(c_vm_fault_page_block_backoff_kernel
++);
2253 vm_object_lock(copy_object
);
2254 assert(copy_object
->ref_count
> 0);
2255 VM_OBJ_RES_DECR(copy_object
);
2256 vm_object_lock_assert_exclusive(copy_object
);
2257 copy_object
->ref_count
--;
2258 assert(copy_object
->ref_count
> 0);
2259 copy_m
= vm_page_lookup(copy_object
, copy_offset
);
2262 * it's OK if the "copy_m" page is encrypted,
2263 * because we're not moving it nor handling its
2266 if (copy_m
!= VM_PAGE_NULL
&& copy_m
->busy
) {
2267 PAGE_ASSERT_WAIT(copy_m
, interruptible
);
2269 vm_object_unlock(copy_object
);
2270 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
2271 vm_object_deallocate(copy_object
);
2275 vm_object_unlock(copy_object
);
2276 vm_object_deallocate(copy_object
);
2277 thread_interrupt_level(interruptible_state
);
2279 return (VM_FAULT_RETRY
);
2283 else if (!PAGED_OUT(copy_object
, copy_offset
)) {
2285 * If PAGED_OUT is TRUE, then the page used to exist
2286 * in the copy-object, and has already been paged out.
2287 * We don't need to repeat this. If PAGED_OUT is
2288 * FALSE, then either we don't know (!pager_created,
2289 * for example) or it hasn't been paged out.
2290 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2291 * We must copy the page to the copy object.
2294 if (vm_backing_store_low
) {
2296 * we are protecting the system from
2297 * backing store exhaustion. If so
2298 * sleep unless we are privileged.
2300 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
)) {
2301 assert_wait((event_t
)&vm_backing_store_low
, THREAD_UNINT
);
2304 VM_OBJ_RES_DECR(copy_object
);
2305 vm_object_lock_assert_exclusive(copy_object
);
2306 copy_object
->ref_count
--;
2307 assert(copy_object
->ref_count
> 0);
2309 vm_object_unlock(copy_object
);
2310 vm_fault_cleanup(object
, first_m
);
2311 thread_block(THREAD_CONTINUE_NULL
);
2312 thread_interrupt_level(interruptible_state
);
2314 return (VM_FAULT_RETRY
);
2318 * Allocate a page for the copy
2320 copy_m
= vm_page_alloc(copy_object
, copy_offset
);
2322 if (copy_m
== VM_PAGE_NULL
) {
2325 VM_OBJ_RES_DECR(copy_object
);
2326 vm_object_lock_assert_exclusive(copy_object
);
2327 copy_object
->ref_count
--;
2328 assert(copy_object
->ref_count
> 0);
2330 vm_object_unlock(copy_object
);
2331 vm_fault_cleanup(object
, first_m
);
2332 thread_interrupt_level(interruptible_state
);
2334 return (VM_FAULT_MEMORY_SHORTAGE
);
2337 * Must copy page into copy-object.
2339 vm_page_copy(m
, copy_m
);
2342 * If the old page was in use by any users
2343 * of the copy-object, it must be removed
2344 * from all pmaps. (We can't know which
2348 pmap_disconnect(m
->phys_page
);
2351 VM_PAGE_COUNT_AS_PAGEIN(m
);
2352 VM_PAGE_CONSUME_CLUSTERED(m
);
2355 * If there's a pager, then immediately
2356 * page out this page, using the "initialize"
2357 * option. Else, we use the copy.
2359 if ((!copy_object
->pager_ready
)
2361 || vm_external_state_get(copy_object
->existence_map
, copy_offset
) == VM_EXTERNAL_STATE_ABSENT
2363 || VM_COMPRESSOR_PAGER_STATE_GET(copy_object
, copy_offset
) == VM_EXTERNAL_STATE_ABSENT
2366 vm_page_lockspin_queues();
2367 assert(!m
->cleaning
);
2368 vm_page_activate(copy_m
);
2369 vm_page_unlock_queues();
2371 SET_PAGE_DIRTY(copy_m
, TRUE
);
2372 PAGE_WAKEUP_DONE(copy_m
);
2374 } else if (copy_object
->internal
&&
2375 (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
)) {
2377 * For internal objects check with the pager to see
2378 * if the page already exists in the backing store.
2379 * If yes, then we can drop the copy page. If not,
2380 * then we'll activate it, mark it dirty and keep it
2384 kern_return_t kr
= KERN_SUCCESS
;
2386 memory_object_t copy_pager
= copy_object
->pager
;
2387 assert(copy_pager
!= MEMORY_OBJECT_NULL
);
2388 vm_object_paging_begin(copy_object
);
2390 vm_object_unlock(copy_object
);
2392 kr
= memory_object_data_request(
2394 copy_offset
+ copy_object
->paging_offset
,
2395 0, /* Only query the pager. */
2399 vm_object_lock(copy_object
);
2401 vm_object_paging_end(copy_object
);
2404 * Since we dropped the copy_object's lock,
2405 * check whether we'll have to deallocate
2408 if ((copy_object
->shadow
!= object
) || (copy_object
->ref_count
== 1)) {
2409 vm_object_unlock(copy_object
);
2410 vm_object_deallocate(copy_object
);
2411 vm_object_lock(object
);
2415 if (kr
== KERN_SUCCESS
) {
2417 * The pager has the page. We don't want to overwrite
2418 * that page by sending this one out to the backing store.
2419 * So we drop the copy page.
2421 VM_PAGE_FREE(copy_m
);
2425 * The pager doesn't have the page. We'll keep this one
2426 * around in the copy object. It might get sent out to
2427 * the backing store under memory pressure.
2429 vm_page_lockspin_queues();
2430 assert(!m
->cleaning
);
2431 vm_page_activate(copy_m
);
2432 vm_page_unlock_queues();
2434 SET_PAGE_DIRTY(copy_m
, TRUE
);
2435 PAGE_WAKEUP_DONE(copy_m
);
2439 assert(copy_m
->busy
== TRUE
);
2440 assert(!m
->cleaning
);
2443 * dirty is protected by the object lock
2445 SET_PAGE_DIRTY(copy_m
, TRUE
);
2448 * The page is already ready for pageout:
2449 * not on pageout queues and busy.
2450 * Unlock everything except the
2451 * copy_object itself.
2453 vm_object_unlock(object
);
2456 * Write the page to the copy-object,
2457 * flushing it from the kernel.
2459 vm_pageout_initialize_page(copy_m
);
2462 * Since the pageout may have
2463 * temporarily dropped the
2464 * copy_object's lock, we
2465 * check whether we'll have
2466 * to deallocate the hard way.
2468 if ((copy_object
->shadow
!= object
) || (copy_object
->ref_count
== 1)) {
2469 vm_object_unlock(copy_object
);
2470 vm_object_deallocate(copy_object
);
2471 vm_object_lock(object
);
2476 * Pick back up the old object's
2477 * lock. [It is safe to do so,
2478 * since it must be deeper in the
2481 vm_object_lock(object
);
2485 * Because we're pushing a page upward
2486 * in the object tree, we must restart
2487 * any faults that are waiting here.
2488 * [Note that this is an expansion of
2489 * PAGE_WAKEUP that uses the THREAD_RESTART
2490 * wait result]. Can't turn off the page's
2491 * busy bit because we're not done with it.
2495 thread_wakeup_with_result((event_t
) m
, THREAD_RESTART
);
2499 * The reference count on copy_object must be
2500 * at least 2: one for our extra reference,
2501 * and at least one from the outside world
2502 * (we checked that when we last locked
2505 vm_object_lock_assert_exclusive(copy_object
);
2506 copy_object
->ref_count
--;
2507 assert(copy_object
->ref_count
> 0);
2509 VM_OBJ_RES_DECR(copy_object
);
2510 vm_object_unlock(copy_object
);
2517 *top_page
= first_m
;
2520 "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n",
2521 object
, offset
, m
, first_m
, 0);
2523 if (m
!= VM_PAGE_NULL
) {
2524 retval
= VM_FAULT_SUCCESS
;
2526 if (my_fault
== DBG_PAGEIN_FAULT
) {
2528 VM_PAGE_COUNT_AS_PAGEIN(m
);
2530 if (m
->object
->internal
)
2531 my_fault
= DBG_PAGEIND_FAULT
;
2533 my_fault
= DBG_PAGEINV_FAULT
;
2536 * evaluate access pattern and update state
2537 * vm_fault_deactivate_behind depends on the
2538 * state being up to date
2540 vm_fault_is_sequential(object
, offset
, fault_info
->behavior
);
2542 vm_fault_deactivate_behind(object
, offset
, fault_info
->behavior
);
2543 } else if (my_fault
== DBG_COMPRESSOR_FAULT
|| my_fault
== DBG_COMPRESSOR_SWAPIN_FAULT
) {
2545 VM_STAT_INCR(decompressions
);
2548 *type_of_fault
= my_fault
;
2550 retval
= VM_FAULT_SUCCESS_NO_VM_PAGE
;
2551 assert(first_m
== VM_PAGE_NULL
);
2552 assert(object
== first_object
);
2555 thread_interrupt_level(interruptible_state
);
2558 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS
, 0); /* (TEST/DEBUG) */
2563 thread_interrupt_level(interruptible_state
);
2565 if (wait_result
== THREAD_INTERRUPTED
)
2566 return (VM_FAULT_INTERRUPTED
);
2567 return (VM_FAULT_RETRY
);
2576 * When soft faulting a page, we have to validate the page if:
2577 * 1. the page is being mapped in user space
2578 * 2. the page hasn't already been found to be "tainted"
2579 * 3. the page belongs to a code-signed object
2580 * 4. the page has not been validated yet or has been mapped for write.
2582 #define VM_FAULT_NEED_CS_VALIDATION(pmap, page) \
2583 ((pmap) != kernel_pmap /*1*/ && \
2584 !(page)->cs_tainted /*2*/ && \
2585 (page)->object->code_signed /*3*/ && \
2586 (!(page)->cs_validated || (page)->wpmapped /*4*/))
2590 * page queue lock must NOT be held
2591 * m->object must be locked
2593 * NOTE: m->object could be locked "shared" only if we are called
2594 * from vm_fault() as part of a soft fault. If so, we must be
2595 * careful not to modify the VM object in any way that is not
2596 * legal under a shared lock...
2598 extern int proc_selfpid(void);
2599 extern char *proc_name_address(void *p
);
2600 unsigned long cs_enter_tainted_rejected
= 0;
2601 unsigned long cs_enter_tainted_accepted
= 0;
2603 vm_fault_enter(vm_page_t m
,
2605 vm_map_offset_t vaddr
,
2607 vm_prot_t caller_prot
,
2609 boolean_t change_wiring
,
2611 boolean_t cs_bypass
,
2612 __unused
int user_tag
,
2614 boolean_t
*need_retry
,
2617 kern_return_t kr
, pe_result
;
2618 boolean_t previously_pmapped
= m
->pmapped
;
2619 boolean_t must_disconnect
= 0;
2620 boolean_t map_is_switched
, map_is_switch_protected
;
2621 int cs_enforcement_enabled
;
2622 vm_prot_t fault_type
;
2624 fault_type
= change_wiring
? VM_PROT_NONE
: caller_prot
;
2626 vm_object_lock_assert_held(m
->object
);
2628 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
2631 if (m
->phys_page
== vm_page_guard_addr
) {
2632 assert(m
->fictitious
);
2633 return KERN_SUCCESS
;
2636 if (*type_of_fault
== DBG_ZERO_FILL_FAULT
) {
2638 vm_object_lock_assert_exclusive(m
->object
);
2640 } else if ((fault_type
& VM_PROT_WRITE
) == 0) {
2642 * This is not a "write" fault, so we
2643 * might not have taken the object lock
2644 * exclusively and we might not be able
2645 * to update the "wpmapped" bit in
2647 * Let's just grant read access to
2648 * the page for now and we'll
2649 * soft-fault again if we need write
2652 prot
&= ~VM_PROT_WRITE
;
2654 if (m
->pmapped
== FALSE
) {
2657 if (*type_of_fault
== DBG_CACHE_HIT_FAULT
) {
2659 * found it in the cache, but this
2660 * is the first fault-in of the page (m->pmapped == FALSE)
2661 * so it must have come in as part of
2662 * a cluster... account 1 pagein against it
2664 if (m
->object
->internal
)
2665 *type_of_fault
= DBG_PAGEIND_FAULT
;
2667 *type_of_fault
= DBG_PAGEINV_FAULT
;
2669 VM_PAGE_COUNT_AS_PAGEIN(m
);
2671 VM_PAGE_CONSUME_CLUSTERED(m
);
2675 if (*type_of_fault
!= DBG_COW_FAULT
) {
2676 DTRACE_VM2(as_fault
, int, 1, (uint64_t *), NULL
);
2678 if (pmap
== kernel_pmap
) {
2679 DTRACE_VM2(kernel_asflt
, int, 1, (uint64_t *), NULL
);
2683 /* Validate code signature if necessary. */
2684 if (VM_FAULT_NEED_CS_VALIDATION(pmap
, m
)) {
2685 vm_object_lock_assert_exclusive(m
->object
);
2687 if (m
->cs_validated
) {
2688 vm_cs_revalidates
++;
2691 /* VM map is locked, so 1 ref will remain on VM object -
2692 * so no harm if vm_page_validate_cs drops the object lock */
2693 vm_page_validate_cs(m
);
2696 #define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/)
2697 #define page_nx(m) ((m)->cs_nx)
2699 map_is_switched
= ((pmap
!= vm_map_pmap(current_task()->map
)) &&
2700 (pmap
== vm_map_pmap(current_thread()->map
)));
2701 map_is_switch_protected
= current_thread()->map
->switch_protect
;
2703 /* If the map is switched, and is switch-protected, we must protect
2704 * some pages from being write-faulted: immutable pages because by
2705 * definition they may not be written, and executable pages because that
2706 * would provide a way to inject unsigned code.
2707 * If the page is immutable, we can simply return. However, we can't
2708 * immediately determine whether a page is executable anywhere. But,
2709 * we can disconnect it everywhere and remove the executable protection
2710 * from the current map. We do that below right before we do the
2713 cs_enforcement_enabled
= cs_enforcement(NULL
);
2715 if(cs_enforcement_enabled
&& map_is_switched
&&
2716 map_is_switch_protected
&& page_immutable(m
, prot
) &&
2717 (prot
& VM_PROT_WRITE
))
2719 return KERN_CODESIGN_ERROR
;
2722 if (cs_enforcement_enabled
&& page_nx(m
) && (prot
& VM_PROT_EXECUTE
)) {
2724 printf("page marked to be NX, not letting it be mapped EXEC\n");
2725 return KERN_CODESIGN_ERROR
;
2728 /* A page could be tainted, or pose a risk of being tainted later.
2729 * Check whether the receiving process wants it, and make it feel
2730 * the consequences (that hapens in cs_invalid_page()).
2731 * For CS Enforcement, two other conditions will
2732 * cause that page to be tainted as well:
2733 * - pmapping an unsigned page executable - this means unsigned code;
2734 * - writeable mapping of a validated page - the content of that page
2735 * can be changed without the kernel noticing, therefore unsigned
2736 * code can be created
2740 (cs_enforcement_enabled
&&
2741 (/* The page is unsigned and wants to be executable */
2742 (!m
->cs_validated
&& (prot
& VM_PROT_EXECUTE
)) ||
2743 /* The page should be immutable, but is in danger of being modified
2744 * This is the case where we want policy from the code directory -
2745 * is the page immutable or not? For now we have to assume that
2746 * code pages will be immutable, data pages not.
2747 * We'll assume a page is a code page if it has a code directory
2748 * and we fault for execution.
2749 * That is good enough since if we faulted the code page for
2750 * writing in another map before, it is wpmapped; if we fault
2751 * it for writing in this map later it will also be faulted for executing
2752 * at the same time; and if we fault for writing in another map
2753 * later, we will disconnect it from this pmap so we'll notice
2756 (page_immutable(m
, prot
) && ((prot
& VM_PROT_WRITE
) || m
->wpmapped
))
2760 /* We will have a tainted page. Have to handle the special case
2761 * of a switched map now. If the map is not switched, standard
2762 * procedure applies - call cs_invalid_page().
2763 * If the map is switched, the real owner is invalid already.
2764 * There is no point in invalidating the switching process since
2765 * it will not be executing from the map. So we don't call
2766 * cs_invalid_page() in that case. */
2767 boolean_t reject_page
;
2768 if(map_is_switched
) {
2769 assert(pmap
==vm_map_pmap(current_thread()->map
));
2770 assert(!(prot
& VM_PROT_WRITE
) || (map_is_switch_protected
== FALSE
));
2771 reject_page
= FALSE
;
2774 printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s slid: %s prot: 0x%x\n",
2775 m
->object
->code_signed
? "yes" : "no",
2776 m
->cs_validated
? "yes" : "no",
2777 m
->cs_tainted
? "yes" : "no",
2778 m
->wpmapped
? "yes" : "no",
2779 m
->slid
? "yes" : "no",
2781 reject_page
= cs_invalid_page((addr64_t
) vaddr
);
2785 /* reject the invalid page: abort the page fault */
2787 const char *procname
;
2789 vm_object_t file_object
, shadow
;
2790 vm_object_offset_t file_offset
;
2791 char *pathname
, *filename
;
2792 vm_size_t pathname_len
, filename_len
;
2793 boolean_t truncated_path
;
2794 #define __PATH_MAX 1024
2795 struct timespec mtime
, cs_mtime
;
2797 kr
= KERN_CODESIGN_ERROR
;
2798 cs_enter_tainted_rejected
++;
2800 /* get process name and pid */
2802 task
= current_task();
2803 pid
= proc_selfpid();
2804 if (task
->bsd_info
!= NULL
)
2805 procname
= proc_name_address(task
->bsd_info
);
2807 /* get file's VM object */
2808 file_object
= m
->object
;
2809 file_offset
= m
->offset
;
2810 for (shadow
= file_object
->shadow
;
2811 shadow
!= VM_OBJECT_NULL
;
2812 shadow
= file_object
->shadow
) {
2813 vm_object_lock_shared(shadow
);
2814 if (file_object
!= m
->object
) {
2815 vm_object_unlock(file_object
);
2817 file_offset
+= file_object
->vo_shadow_offset
;
2818 file_object
= shadow
;
2823 cs_mtime
.tv_sec
= 0;
2824 cs_mtime
.tv_nsec
= 0;
2826 /* get file's pathname and/or filename */
2831 truncated_path
= FALSE
;
2832 /* no pager -> no file -> no pathname, use "<nil>" in that case */
2833 if (file_object
->pager
!= NULL
) {
2834 pathname
= (char *)kalloc(__PATH_MAX
* 2);
2837 pathname_len
= __PATH_MAX
;
2838 filename
= pathname
+ pathname_len
;
2839 filename_len
= __PATH_MAX
;
2841 vnode_pager_get_object_name(file_object
->pager
,
2848 /* safety first... */
2849 pathname
[__PATH_MAX
-1] = '\0';
2850 filename
[__PATH_MAX
-1] = '\0';
2852 vnode_pager_get_object_mtime(file_object
->pager
,
2856 printf("CODE SIGNING: process %d[%s]: "
2857 "rejecting invalid page at address 0x%llx "
2858 "from offset 0x%llx in file \"%s%s%s\" "
2859 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2860 "(signed:%d validated:%d tainted:%d "
2861 "wpmapped:%d slid:%d)\n",
2862 pid
, procname
, (addr64_t
) vaddr
,
2864 (pathname
? pathname
: "<nil>"),
2865 (truncated_path
? "/.../" : ""),
2866 (truncated_path
? filename
: ""),
2867 cs_mtime
.tv_sec
, cs_mtime
.tv_nsec
,
2868 ((cs_mtime
.tv_sec
== mtime
.tv_sec
&&
2869 cs_mtime
.tv_nsec
== mtime
.tv_nsec
)
2872 mtime
.tv_sec
, mtime
.tv_nsec
,
2873 m
->object
->code_signed
,
2878 if (file_object
!= m
->object
) {
2879 vm_object_unlock(file_object
);
2881 if (pathname_len
!= 0) {
2882 kfree(pathname
, __PATH_MAX
* 2);
2887 /* proceed with the invalid page */
2889 if (!m
->cs_validated
&&
2890 !m
->object
->code_signed
) {
2892 * This page has not been (fully) validated but
2893 * does not belong to a code-signed object
2894 * so it should not be forcefully considered
2896 * We're just concerned about it here because
2897 * we've been asked to "execute" it but that
2898 * does not mean that it should cause other
2900 * This happens when a debugger sets a
2901 * breakpoint and we then execute code in
2902 * that page. Marking the page as "tainted"
2903 * would cause any inspection tool ("leaks",
2904 * "vmmap", "CrashReporter", ...) to get killed
2905 * due to code-signing violation on that page,
2906 * even though they're just reading it and not
2907 * executing from it.
2911 * Page might have been tainted before or not;
2912 * now it definitively is. If the page wasn't
2913 * tainted, we must disconnect it from all
2914 * pmaps later, to force existing mappings
2915 * through that code path for re-consideration
2916 * of the validity of that page.
2918 must_disconnect
= !m
->cs_tainted
;
2919 m
->cs_tainted
= TRUE
;
2921 cs_enter_tainted_accepted
++;
2923 if (kr
!= KERN_SUCCESS
) {
2925 printf("CODESIGNING: vm_fault_enter(0x%llx): "
2926 "*** INVALID PAGE ***\n",
2930 if (cs_enforcement_panic
) {
2931 panic("CODESIGNING: panicking on invalid page\n");
2937 /* proceed with the valid page */
2941 boolean_t page_queues_locked
= FALSE
;
2942 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
2944 if (! page_queues_locked) { \
2945 page_queues_locked = TRUE; \
2946 vm_page_lockspin_queues(); \
2949 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
2951 if (page_queues_locked) { \
2952 page_queues_locked = FALSE; \
2953 vm_page_unlock_queues(); \
2958 * Hold queues lock to manipulate
2959 * the page queues. Change wiring
2962 assert(m
->compressor
|| m
->object
!= compressor_object
);
2963 if (m
->compressor
) {
2965 * Compressor pages are neither wired
2966 * nor pageable and should never change.
2968 assert(m
->object
== compressor_object
);
2969 } else if (change_wiring
) {
2970 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
2973 if (kr
== KERN_SUCCESS
) {
2974 vm_page_wire(m
, VM_PROT_MEMORY_TAG(caller_prot
), TRUE
);
2977 vm_page_unwire(m
, TRUE
);
2979 /* we keep the page queues lock, if we need it later */
2982 if (kr
!= KERN_SUCCESS
) {
2983 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
2984 vm_page_deactivate(m
);
2985 /* we keep the page queues lock, if we need it later */
2986 } else if (((!m
->active
&& !m
->inactive
) ||
2989 !VM_PAGE_WIRED(m
) && !m
->throttled
) {
2991 if (vm_page_local_q
&&
2993 (*type_of_fault
== DBG_COW_FAULT
||
2994 *type_of_fault
== DBG_ZERO_FILL_FAULT
) ) {
2998 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
2999 vm_object_lock_assert_exclusive(m
->object
);
3002 * we got a local queue to stuff this
3004 * its safe to manipulate local and
3005 * local_id at this point since we're
3006 * behind an exclusive object lock and
3007 * the page is not on any global queue.
3009 * we'll use the current cpu number to
3010 * select the queue note that we don't
3011 * need to disable preemption... we're
3012 * going to behind the local queue's
3013 * lock to do the real work
3017 lq
= &vm_page_local_q
[lid
].vpl_un
.vpl
;
3019 VPL_LOCK(&lq
->vpl_lock
);
3021 vm_page_check_pageable_safe(m
);
3022 queue_enter(&lq
->vpl_queue
, m
,
3028 if (m
->object
->internal
)
3029 lq
->vpl_internal_count
++;
3031 lq
->vpl_external_count
++;
3033 VPL_UNLOCK(&lq
->vpl_lock
);
3035 if (lq
->vpl_count
> vm_page_local_q_soft_limit
)
3038 * we're beyond the soft limit
3039 * for the local queue
3040 * vm_page_reactivate_local will
3041 * 'try' to take the global page
3042 * queue lock... if it can't
3043 * that's ok... we'll let the
3044 * queue continue to grow up
3045 * to the hard limit... at that
3046 * point we'll wait for the
3047 * lock... once we've got the
3048 * lock, we'll transfer all of
3049 * the pages from the local
3050 * queue to the global active
3053 vm_page_reactivate_local(lid
, FALSE
, FALSE
);
3057 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3060 * test again now that we hold the
3063 if (!VM_PAGE_WIRED(m
)) {
3064 if (m
->clean_queue
) {
3065 vm_page_queues_remove(m
);
3067 vm_pageout_cleaned_reactivated
++;
3068 vm_pageout_cleaned_fault_reactivated
++;
3075 * If this is a no_cache mapping
3076 * and the page has never been
3077 * mapped before or was
3078 * previously a no_cache page,
3079 * then we want to leave pages
3080 * in the speculative state so
3081 * that they can be readily
3082 * recycled if free memory runs
3083 * low. Otherwise the page is
3084 * activated as normal.
3088 (!previously_pmapped
||
3092 if (!m
->speculative
)
3093 vm_page_speculate(m
, FALSE
);
3095 } else if (!m
->active
&&
3098 vm_page_activate(m
);
3102 /* we keep the page queues lock, if we need it later */
3106 /* we're done with the page queues lock, if we ever took it */
3107 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3110 /* If we have a KERN_SUCCESS from the previous checks, we either have
3111 * a good page, or a tainted page that has been accepted by the process.
3112 * In both cases the page will be entered into the pmap.
3113 * If the page is writeable, we need to disconnect it from other pmaps
3114 * now so those processes can take note.
3116 if (kr
== KERN_SUCCESS
) {
3119 * NOTE: we may only hold the vm_object lock SHARED
3120 * at this point, so we need the phys_page lock to
3121 * properly serialize updating the pmapped and
3124 if ((prot
& VM_PROT_EXECUTE
) && !m
->xpmapped
) {
3126 pmap_lock_phys_page(m
->phys_page
);
3128 * go ahead and take the opportunity
3129 * to set 'pmapped' here so that we don't
3130 * need to grab this lock a 2nd time
3139 pmap_unlock_phys_page(m
->phys_page
);
3141 if (!m
->object
->internal
)
3142 OSAddAtomic(1, &vm_page_xpmapped_external_count
);
3144 if ((COMPRESSED_PAGER_IS_ACTIVE
) &&
3145 m
->object
->internal
&&
3146 m
->object
->pager
!= NULL
) {
3148 * This page could have been
3149 * uncompressed by the
3150 * compressor pager and its
3151 * contents might be only in
3153 * Since it's being mapped for
3154 * "execute" for the fist time,
3155 * make sure the icache is in
3158 pmap_sync_page_data_phys(m
->phys_page
);
3161 pmap_unlock_phys_page(m
->phys_page
);
3163 if (m
->pmapped
== FALSE
) {
3164 pmap_lock_phys_page(m
->phys_page
);
3166 pmap_unlock_phys_page(m
->phys_page
);
3169 if (vm_page_is_slideable(m
)) {
3170 boolean_t was_busy
= m
->busy
;
3172 vm_object_lock_assert_exclusive(m
->object
);
3175 kr
= vm_page_slide(m
, 0);
3178 PAGE_WAKEUP_DONE(m
);
3180 if (kr
!= KERN_SUCCESS
) {
3182 * This page has not been slid correctly,
3183 * do not do the pmap_enter() !
3184 * Let vm_fault_enter() return the error
3185 * so the caller can fail the fault.
3187 goto after_the_pmap_enter
;
3191 if (fault_type
& VM_PROT_WRITE
) {
3193 if (m
->wpmapped
== FALSE
) {
3194 vm_object_lock_assert_exclusive(m
->object
);
3195 if (!m
->object
->internal
)
3196 task_update_logical_writes(current_task(), PAGE_SIZE
, TASK_WRITE_DEFERRED
);
3199 if (must_disconnect
) {
3201 * We can only get here
3202 * because of the CSE logic
3204 assert(cs_enforcement_enabled
);
3205 pmap_disconnect(m
->phys_page
);
3207 * If we are faulting for a write, we can clear
3208 * the execute bit - that will ensure the page is
3209 * checked again before being executable, which
3210 * protects against a map switch.
3211 * This only happens the first time the page
3212 * gets tainted, so we won't get stuck here
3213 * to make an already writeable page executable.
3216 prot
&= ~VM_PROT_EXECUTE
;
3221 /* Prevent a deadlock by not
3222 * holding the object lock if we need to wait for a page in
3223 * pmap_enter() - <rdar://problem/7138958> */
3224 PMAP_ENTER_OPTIONS(pmap
, vaddr
, m
, prot
, fault_type
, 0,
3226 pmap_options
| PMAP_OPTIONS_NOWAIT
,
3229 if(pe_result
== KERN_RESOURCE_SHORTAGE
) {
3233 * this will be non-null in the case where we hold the lock
3234 * on the top-object in this chain... we can't just drop
3235 * the lock on the object we're inserting the page into
3236 * and recall the PMAP_ENTER since we can still cause
3237 * a deadlock if one of the critical paths tries to
3238 * acquire the lock on the top-object and we're blocked
3239 * in PMAP_ENTER waiting for memory... our only recourse
3240 * is to deal with it at a higher level where we can
3244 vm_pmap_enter_retried
++;
3245 goto after_the_pmap_enter
;
3247 /* The nonblocking version of pmap_enter did not succeed.
3248 * and we don't need to drop other locks and retry
3249 * at the level above us, so
3250 * use the blocking version instead. Requires marking
3251 * the page busy and unlocking the object */
3252 boolean_t was_busy
= m
->busy
;
3254 vm_object_lock_assert_exclusive(m
->object
);
3257 vm_object_unlock(m
->object
);
3259 PMAP_ENTER_OPTIONS(pmap
, vaddr
, m
, prot
, fault_type
,
3261 pmap_options
, pe_result
);
3263 /* Take the object lock again. */
3264 vm_object_lock(m
->object
);
3266 /* If the page was busy, someone else will wake it up.
3267 * Otherwise, we have to do it now. */
3270 PAGE_WAKEUP_DONE(m
);
3272 vm_pmap_enter_blocked
++;
3276 after_the_pmap_enter
:
3281 vm_pre_fault(vm_map_offset_t vaddr
)
3283 if (pmap_find_phys(current_map()->pmap
, vaddr
) == 0) {
3285 vm_fault(current_map(), /* map */
3287 VM_PROT_READ
, /* fault_type */
3288 FALSE
, /* change_wiring */
3289 THREAD_UNINT
, /* interruptible */
3290 NULL
, /* caller_pmap */
3291 0 /* caller_pmap_addr */);
3299 * Handle page faults, including pseudo-faults
3300 * used to change the wiring status of pages.
3302 * Explicit continuations have been removed.
3304 * vm_fault and vm_fault_page save mucho state
3305 * in the moral equivalent of a closure. The state
3306 * structure is allocated when first entering vm_fault
3307 * and deallocated when leaving vm_fault.
3310 extern int _map_enter_debug
;
3312 unsigned long vm_fault_collapse_total
= 0;
3313 unsigned long vm_fault_collapse_skipped
= 0;
3319 vm_map_offset_t vaddr
,
3320 vm_prot_t fault_type
,
3321 boolean_t change_wiring
,
3324 vm_map_offset_t caller_pmap_addr
)
3326 return vm_fault_internal(map
, vaddr
, fault_type
, change_wiring
,
3327 interruptible
, caller_pmap
, caller_pmap_addr
,
3335 vm_map_offset_t vaddr
,
3336 vm_prot_t caller_prot
,
3337 boolean_t change_wiring
,
3340 vm_map_offset_t caller_pmap_addr
,
3341 ppnum_t
*physpage_p
)
3343 vm_map_version_t version
; /* Map version for verificiation */
3344 boolean_t wired
; /* Should mapping be wired down? */
3345 vm_object_t object
; /* Top-level object */
3346 vm_object_offset_t offset
; /* Top-level offset */
3347 vm_prot_t prot
; /* Protection for mapping */
3348 vm_object_t old_copy_object
; /* Saved copy object */
3349 vm_page_t result_page
; /* Result of vm_fault_page */
3350 vm_page_t top_page
; /* Placeholder page */
3353 vm_page_t m
; /* Fast access to result_page */
3354 kern_return_t error_code
;
3355 vm_object_t cur_object
;
3356 vm_object_offset_t cur_offset
;
3358 vm_object_t new_object
;
3361 boolean_t interruptible_state
;
3362 vm_map_t real_map
= map
;
3363 vm_map_t original_map
= map
;
3364 vm_prot_t fault_type
;
3365 vm_prot_t original_fault_type
;
3366 struct vm_object_fault_info fault_info
;
3367 boolean_t need_collapse
= FALSE
;
3368 boolean_t need_retry
= FALSE
;
3369 boolean_t
*need_retry_ptr
= NULL
;
3370 int object_lock_type
= 0;
3371 int cur_object_lock_type
;
3372 vm_object_t top_object
= VM_OBJECT_NULL
;
3374 int compressed_count_delta
;
3377 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3378 (MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_START
,
3379 ((uint64_t)vaddr
>> 32),
3381 (map
== kernel_map
),
3385 if (get_preemption_level() != 0) {
3386 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3387 (MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_END
,
3388 ((uint64_t)vaddr
>> 32),
3394 return (KERN_FAILURE
);
3397 interruptible_state
= thread_interrupt_level(interruptible
);
3399 fault_type
= (change_wiring
? VM_PROT_NONE
: caller_prot
);
3401 VM_STAT_INCR(faults
);
3402 current_task()->faults
++;
3403 original_fault_type
= fault_type
;
3405 if (fault_type
& VM_PROT_WRITE
)
3406 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3408 object_lock_type
= OBJECT_LOCK_SHARED
;
3410 cur_object_lock_type
= OBJECT_LOCK_SHARED
;
3414 * assume we will hit a page in the cache
3415 * otherwise, explicitly override with
3416 * the real fault type once we determine it
3418 type_of_fault
= DBG_CACHE_HIT_FAULT
;
3421 * Find the backing store object and offset into
3422 * it to begin the search.
3424 fault_type
= original_fault_type
;
3426 vm_map_lock_read(map
);
3428 kr
= vm_map_lookup_locked(&map
, vaddr
, fault_type
,
3429 object_lock_type
, &version
,
3430 &object
, &offset
, &prot
, &wired
,
3435 if (kr
!= KERN_SUCCESS
) {
3436 vm_map_unlock_read(map
);
3439 pmap
= real_map
->pmap
;
3440 fault_info
.interruptible
= interruptible
;
3441 fault_info
.stealth
= FALSE
;
3442 fault_info
.io_sync
= FALSE
;
3443 fault_info
.mark_zf_absent
= FALSE
;
3444 fault_info
.batch_pmap_op
= FALSE
;
3447 * If the page is wired, we must fault for the current protection
3448 * value, to avoid further faults.
3451 fault_type
= prot
| VM_PROT_WRITE
;
3453 * since we're treating this fault as a 'write'
3454 * we must hold the top object lock exclusively
3456 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3458 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3460 if (vm_object_lock_upgrade(object
) == FALSE
) {
3462 * couldn't upgrade, so explictly
3463 * take the lock exclusively
3465 vm_object_lock(object
);
3470 #if VM_FAULT_CLASSIFY
3472 * Temporary data gathering code
3474 vm_fault_classify(object
, offset
, fault_type
);
3477 * Fast fault code. The basic idea is to do as much as
3478 * possible while holding the map lock and object locks.
3479 * Busy pages are not used until the object lock has to
3480 * be dropped to do something (copy, zero fill, pmap enter).
3481 * Similarly, paging references aren't acquired until that
3482 * point, and object references aren't used.
3484 * If we can figure out what to do
3485 * (zero fill, copy on write, pmap enter) while holding
3486 * the locks, then it gets done. Otherwise, we give up,
3487 * and use the original fault path (which doesn't hold
3488 * the map lock, and relies on busy pages).
3489 * The give up cases include:
3490 * - Have to talk to pager.
3491 * - Page is busy, absent or in error.
3492 * - Pager has locked out desired access.
3493 * - Fault needs to be restarted.
3494 * - Have to push page into copy object.
3496 * The code is an infinite loop that moves one level down
3497 * the shadow chain each time. cur_object and cur_offset
3498 * refer to the current object being examined. object and offset
3499 * are the original object from the map. The loop is at the
3500 * top level if and only if object and cur_object are the same.
3502 * Invariants: Map lock is held throughout. Lock is held on
3503 * original object and cur_object (if different) when
3504 * continuing or exiting loop.
3510 * If this page is to be inserted in a copy delay object
3511 * for writing, and if the object has a copy, then the
3512 * copy delay strategy is implemented in the slow fault page.
3514 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_DELAY
&&
3515 object
->copy
!= VM_OBJECT_NULL
&& (fault_type
& VM_PROT_WRITE
))
3516 goto handle_copy_delay
;
3518 cur_object
= object
;
3519 cur_offset
= offset
;
3522 if (!cur_object
->pager_created
&&
3523 cur_object
->phys_contiguous
) /* superpage */
3526 if (cur_object
->blocked_access
) {
3528 * Access to this VM object has been blocked.
3529 * Let the slow path handle it.
3534 m
= vm_page_lookup(cur_object
, cur_offset
);
3536 if (m
!= VM_PAGE_NULL
) {
3538 wait_result_t result
;
3541 * in order to do the PAGE_ASSERT_WAIT, we must
3542 * have object that 'm' belongs to locked exclusively
3544 if (object
!= cur_object
) {
3546 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3548 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3550 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
3552 * couldn't upgrade so go do a full retry
3553 * immediately since we can no longer be
3554 * certain about cur_object (since we
3555 * don't hold a reference on it)...
3556 * first drop the top object lock
3558 vm_object_unlock(object
);
3560 vm_map_unlock_read(map
);
3561 if (real_map
!= map
)
3562 vm_map_unlock(real_map
);
3567 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3569 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3571 if (vm_object_lock_upgrade(object
) == FALSE
) {
3573 * couldn't upgrade, so explictly take the lock
3574 * exclusively and go relookup the page since we
3575 * will have dropped the object lock and
3576 * a different thread could have inserted
3577 * a page at this offset
3578 * no need for a full retry since we're
3579 * at the top level of the object chain
3581 vm_object_lock(object
);
3586 if (m
->pageout_queue
&& m
->object
->internal
&& COMPRESSED_PAGER_IS_ACTIVE
) {
3588 * m->busy == TRUE and the object is locked exclusively
3589 * if m->pageout_queue == TRUE after we acquire the
3590 * queues lock, we are guaranteed that it is stable on
3591 * the pageout queue and therefore reclaimable
3593 * NOTE: this is only true for the internal pageout queue
3594 * in the compressor world
3596 vm_page_lock_queues();
3598 if (m
->pageout_queue
) {
3599 vm_pageout_throttle_up(m
);
3600 vm_page_unlock_queues();
3602 PAGE_WAKEUP_DONE(m
);
3603 goto reclaimed_from_pageout
;
3605 vm_page_unlock_queues();
3607 if (object
!= cur_object
)
3608 vm_object_unlock(object
);
3610 vm_map_unlock_read(map
);
3611 if (real_map
!= map
)
3612 vm_map_unlock(real_map
);
3614 result
= PAGE_ASSERT_WAIT(m
, interruptible
);
3616 vm_object_unlock(cur_object
);
3618 if (result
== THREAD_WAITING
) {
3619 result
= thread_block(THREAD_CONTINUE_NULL
);
3621 counter(c_vm_fault_page_block_busy_kernel
++);
3623 if (result
== THREAD_AWAKENED
|| result
== THREAD_RESTART
)
3629 reclaimed_from_pageout
:
3631 if (object
!= cur_object
) {
3632 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3633 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3635 vm_object_unlock(object
);
3636 vm_object_unlock(cur_object
);
3638 vm_map_unlock_read(map
);
3639 if (real_map
!= map
)
3640 vm_map_unlock(real_map
);
3645 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3647 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3649 if (vm_object_lock_upgrade(object
) == FALSE
) {
3651 * couldn't upgrade, so explictly take the lock
3652 * exclusively and go relookup the page since we
3653 * will have dropped the object lock and
3654 * a different thread could have inserted
3655 * a page at this offset
3656 * no need for a full retry since we're
3657 * at the top level of the object chain
3659 vm_object_lock(object
);
3666 vm_pageout_steal_laundry(m
, FALSE
);
3669 if (m
->phys_page
== vm_page_guard_addr
) {
3671 * Guard page: let the slow path deal with it
3675 if (m
->unusual
&& (m
->error
|| m
->restart
|| m
->private || m
->absent
)) {
3677 * Unusual case... let the slow path deal with it
3681 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m
->object
)) {
3682 if (object
!= cur_object
)
3683 vm_object_unlock(object
);
3684 vm_map_unlock_read(map
);
3685 if (real_map
!= map
)
3686 vm_map_unlock(real_map
);
3687 vm_object_unlock(cur_object
);
3688 kr
= KERN_MEMORY_ERROR
;
3695 * We've soft-faulted (because it's not in the page
3696 * table) on an encrypted page.
3697 * Keep the page "busy" so that no one messes with
3698 * it during the decryption.
3699 * Release the extra locks we're holding, keep only
3700 * the page's VM object lock.
3702 * in order to set 'busy' on 'm', we must
3703 * have object that 'm' belongs to locked exclusively
3705 if (object
!= cur_object
) {
3706 vm_object_unlock(object
);
3708 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3710 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3712 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
3714 * couldn't upgrade so go do a full retry
3715 * immediately since we've already dropped
3716 * the top object lock associated with this page
3717 * and the current one got dropped due to the
3718 * failed upgrade... the state is no longer valid
3720 vm_map_unlock_read(map
);
3721 if (real_map
!= map
)
3722 vm_map_unlock(real_map
);
3727 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3729 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3731 if (vm_object_lock_upgrade(object
) == FALSE
) {
3733 * couldn't upgrade, so explictly take the lock
3734 * exclusively and go relookup the page since we
3735 * will have dropped the object lock and
3736 * a different thread could have inserted
3737 * a page at this offset
3738 * no need for a full retry since we're
3739 * at the top level of the object chain
3741 vm_object_lock(object
);
3748 vm_map_unlock_read(map
);
3749 if (real_map
!= map
)
3750 vm_map_unlock(real_map
);
3752 vm_page_decrypt(m
, 0);
3755 PAGE_WAKEUP_DONE(m
);
3757 vm_object_unlock(cur_object
);
3759 * Retry from the top, in case anything
3760 * changed while we were decrypting...
3764 ASSERT_PAGE_DECRYPTED(m
);
3766 if(vm_page_is_slideable(m
)) {
3768 * We might need to slide this page, and so,
3769 * we want to hold the VM object exclusively.
3771 if (object
!= cur_object
) {
3772 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3773 vm_object_unlock(object
);
3774 vm_object_unlock(cur_object
);
3776 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3778 vm_map_unlock_read(map
);
3779 if (real_map
!= map
)
3780 vm_map_unlock(real_map
);
3784 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3786 vm_object_unlock(object
);
3787 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3788 vm_map_unlock_read(map
);
3793 if (VM_FAULT_NEED_CS_VALIDATION(map
->pmap
, m
) ||
3794 (physpage_p
!= NULL
&& (prot
& VM_PROT_WRITE
))) {
3795 upgrade_for_validation
:
3797 * We might need to validate this page
3798 * against its code signature, so we
3799 * want to hold the VM object exclusively.
3801 if (object
!= cur_object
) {
3802 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3803 vm_object_unlock(object
);
3804 vm_object_unlock(cur_object
);
3806 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3808 vm_map_unlock_read(map
);
3809 if (real_map
!= map
)
3810 vm_map_unlock(real_map
);
3815 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3817 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3819 if (vm_object_lock_upgrade(object
) == FALSE
) {
3821 * couldn't upgrade, so explictly take the lock
3822 * exclusively and go relookup the page since we
3823 * will have dropped the object lock and
3824 * a different thread could have inserted
3825 * a page at this offset
3826 * no need for a full retry since we're
3827 * at the top level of the object chain
3829 vm_object_lock(object
);
3836 * Two cases of map in faults:
3837 * - At top level w/o copy object.
3838 * - Read fault anywhere.
3839 * --> must disallow write.
3842 if (object
== cur_object
&& object
->copy
== VM_OBJECT_NULL
) {
3847 if ((fault_type
& VM_PROT_WRITE
) == 0) {
3849 if (object
!= cur_object
) {
3851 * We still need to hold the top object
3852 * lock here to prevent a race between
3853 * a read fault (taking only "shared"
3854 * locks) and a write fault (taking
3855 * an "exclusive" lock on the top
3857 * Otherwise, as soon as we release the
3858 * top lock, the write fault could
3859 * proceed and actually complete before
3860 * the read fault, and the copied page's
3861 * translation could then be overwritten
3862 * by the read fault's translation for
3863 * the original page.
3865 * Let's just record what the top object
3866 * is and we'll release it later.
3868 top_object
= object
;
3871 * switch to the object that has the new page
3873 object
= cur_object
;
3874 object_lock_type
= cur_object_lock_type
;
3878 * prepare for the pmap_enter...
3879 * object and map are both locked
3880 * m contains valid data
3881 * object == m->object
3882 * cur_object == NULL or it's been unlocked
3883 * no paging references on either object or cur_object
3885 if (top_object
!= VM_OBJECT_NULL
|| object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
)
3886 need_retry_ptr
= &need_retry
;
3888 need_retry_ptr
= NULL
;
3891 kr
= vm_fault_enter(m
,
3898 fault_info
.no_cache
,
3899 fault_info
.cs_bypass
,
3900 fault_info
.user_tag
,
3901 fault_info
.pmap_options
,
3905 kr
= vm_fault_enter(m
,
3912 fault_info
.no_cache
,
3913 fault_info
.cs_bypass
,
3914 fault_info
.user_tag
,
3915 fault_info
.pmap_options
,
3920 if (kr
== KERN_SUCCESS
&&
3921 physpage_p
!= NULL
) {
3922 /* for vm_map_wire_and_extract() */
3923 *physpage_p
= m
->phys_page
;
3924 if (prot
& VM_PROT_WRITE
) {
3925 vm_object_lock_assert_exclusive(
3931 if (top_object
!= VM_OBJECT_NULL
) {
3933 * It's safe to drop the top object
3934 * now that we've done our
3935 * vm_fault_enter(). Any other fault
3936 * in progress for that virtual
3937 * address will either find our page
3938 * and translation or put in a new page
3941 vm_object_unlock(top_object
);
3942 top_object
= VM_OBJECT_NULL
;
3945 if (need_collapse
== TRUE
)
3946 vm_object_collapse(object
, offset
, TRUE
);
3948 if (need_retry
== FALSE
&&
3949 (type_of_fault
== DBG_PAGEIND_FAULT
|| type_of_fault
== DBG_PAGEINV_FAULT
|| type_of_fault
== DBG_CACHE_HIT_FAULT
)) {
3951 * evaluate access pattern and update state
3952 * vm_fault_deactivate_behind depends on the
3953 * state being up to date
3955 vm_fault_is_sequential(object
, cur_offset
, fault_info
.behavior
);
3957 vm_fault_deactivate_behind(object
, cur_offset
, fault_info
.behavior
);
3960 * That's it, clean up and return.
3963 PAGE_WAKEUP_DONE(m
);
3965 vm_object_unlock(object
);
3967 vm_map_unlock_read(map
);
3968 if (real_map
!= map
)
3969 vm_map_unlock(real_map
);
3971 if (need_retry
== TRUE
) {
3973 * vm_fault_enter couldn't complete the PMAP_ENTER...
3974 * at this point we don't hold any locks so it's safe
3975 * to ask the pmap layer to expand the page table to
3976 * accommodate this mapping... once expanded, we'll
3977 * re-drive the fault which should result in vm_fault_enter
3978 * being able to successfully enter the mapping this time around
3980 (void)pmap_enter_options(
3981 pmap
, vaddr
, 0, 0, 0, 0, 0,
3982 PMAP_OPTIONS_NOENTER
, NULL
);
3990 * COPY ON WRITE FAULT
3992 assert(object_lock_type
== OBJECT_LOCK_EXCLUSIVE
);
3995 * If objects match, then
3996 * object->copy must not be NULL (else control
3997 * would be in previous code block), and we
3998 * have a potential push into the copy object
3999 * with which we can't cope with here.
4001 if (cur_object
== object
) {
4003 * must take the slow path to
4004 * deal with the copy push
4010 * This is now a shadow based copy on write
4011 * fault -- it requires a copy up the shadow
4015 if ((cur_object_lock_type
== OBJECT_LOCK_SHARED
) &&
4016 VM_FAULT_NEED_CS_VALIDATION(NULL
, m
)) {
4017 goto upgrade_for_validation
;
4021 * Allocate a page in the original top level
4022 * object. Give up if allocate fails. Also
4023 * need to remember current page, as it's the
4024 * source of the copy.
4026 * at this point we hold locks on both
4027 * object and cur_object... no need to take
4028 * paging refs or mark pages BUSY since
4029 * we don't drop either object lock until
4030 * the page has been copied and inserted
4035 if (m
== VM_PAGE_NULL
) {
4037 * no free page currently available...
4038 * must take the slow path
4043 * Now do the copy. Mark the source page busy...
4045 * NOTE: This code holds the map lock across
4048 vm_page_copy(cur_m
, m
);
4049 vm_page_insert(m
, object
, offset
);
4050 SET_PAGE_DIRTY(m
, FALSE
);
4053 * Now cope with the source page and object
4055 if (object
->ref_count
> 1 && cur_m
->pmapped
)
4056 pmap_disconnect(cur_m
->phys_page
);
4058 if (cur_m
->clustered
) {
4059 VM_PAGE_COUNT_AS_PAGEIN(cur_m
);
4060 VM_PAGE_CONSUME_CLUSTERED(cur_m
);
4062 need_collapse
= TRUE
;
4064 if (!cur_object
->internal
&&
4065 cur_object
->copy_strategy
== MEMORY_OBJECT_COPY_DELAY
) {
4067 * The object from which we've just
4068 * copied a page is most probably backed
4069 * by a vnode. We don't want to waste too
4070 * much time trying to collapse the VM objects
4071 * and create a bottleneck when several tasks
4072 * map the same file.
4074 if (cur_object
->copy
== object
) {
4076 * Shared mapping or no COW yet.
4077 * We can never collapse a copy
4078 * object into its backing object.
4080 need_collapse
= FALSE
;
4081 } else if (cur_object
->copy
== object
->shadow
&&
4082 object
->shadow
->resident_page_count
== 0) {
4084 * Shared mapping after a COW occurred.
4086 need_collapse
= FALSE
;
4089 vm_object_unlock(cur_object
);
4091 if (need_collapse
== FALSE
)
4092 vm_fault_collapse_skipped
++;
4093 vm_fault_collapse_total
++;
4095 type_of_fault
= DBG_COW_FAULT
;
4096 VM_STAT_INCR(cow_faults
);
4097 DTRACE_VM2(cow_fault
, int, 1, (uint64_t *), NULL
);
4098 current_task()->cow_faults
++;
4104 * No page at cur_object, cur_offset... m == NULL
4106 if (cur_object
->pager_created
) {
4107 int compressor_external_state
= VM_EXTERNAL_STATE_UNKNOWN
;
4109 if (MUST_ASK_PAGER(cur_object
, cur_offset
, compressor_external_state
) == TRUE
) {
4111 int c_flags
= C_DONT_BLOCK
;
4112 boolean_t insert_cur_object
= FALSE
;
4115 * May have to talk to a pager...
4116 * if so, take the slow path by
4117 * doing a 'break' from the while (TRUE) loop
4119 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
4120 * if the compressor is active and the page exists there
4122 if (compressor_external_state
!= VM_EXTERNAL_STATE_EXISTS
)
4125 if (map
== kernel_map
|| real_map
== kernel_map
) {
4127 * can't call into the compressor with the kernel_map
4128 * lock held, since the compressor may try to operate
4129 * on the kernel map in order to return an empty c_segment
4133 if (object
!= cur_object
) {
4134 if (fault_type
& VM_PROT_WRITE
)
4137 insert_cur_object
= TRUE
;
4139 if (insert_cur_object
== TRUE
) {
4141 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
4143 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4145 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
4147 * couldn't upgrade so go do a full retry
4148 * immediately since we can no longer be
4149 * certain about cur_object (since we
4150 * don't hold a reference on it)...
4151 * first drop the top object lock
4153 vm_object_unlock(object
);
4155 vm_map_unlock_read(map
);
4156 if (real_map
!= map
)
4157 vm_map_unlock(real_map
);
4162 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
4164 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4166 if (object
!= cur_object
) {
4168 * we can't go for the upgrade on the top
4169 * lock since the upgrade may block waiting
4170 * for readers to drain... since we hold
4171 * cur_object locked at this point, waiting
4172 * for the readers to drain would represent
4173 * a lock order inversion since the lock order
4174 * for objects is the reference order in the
4177 vm_object_unlock(object
);
4178 vm_object_unlock(cur_object
);
4180 vm_map_unlock_read(map
);
4181 if (real_map
!= map
)
4182 vm_map_unlock(real_map
);
4186 if (vm_object_lock_upgrade(object
) == FALSE
) {
4188 * couldn't upgrade, so explictly take the lock
4189 * exclusively and go relookup the page since we
4190 * will have dropped the object lock and
4191 * a different thread could have inserted
4192 * a page at this offset
4193 * no need for a full retry since we're
4194 * at the top level of the object chain
4196 vm_object_lock(object
);
4203 if (m
== VM_PAGE_NULL
) {
4205 * no free page currently available...
4206 * must take the slow path
4212 * The object is and remains locked
4213 * so no need to take a
4214 * "paging_in_progress" reference.
4216 boolean_t shared_lock
;
4217 if ((object
== cur_object
&&
4218 object_lock_type
== OBJECT_LOCK_EXCLUSIVE
) ||
4219 (object
!= cur_object
&&
4220 cur_object_lock_type
== OBJECT_LOCK_EXCLUSIVE
)) {
4221 shared_lock
= FALSE
;
4226 kr
= vm_compressor_pager_get(
4229 cur_object
->paging_offset
),
4233 &compressed_count_delta
);
4235 vm_compressor_pager_count(
4237 compressed_count_delta
,
4241 if (kr
!= KERN_SUCCESS
) {
4248 * If the object is purgeable, its
4249 * owner's purgeable ledgers will be
4250 * updated in vm_page_insert() but the
4251 * page was also accounted for in a
4252 * "compressed purgeable" ledger, so
4255 if (object
!= cur_object
&&
4256 !insert_cur_object
) {
4258 * We're not going to insert
4259 * the decompressed page into
4260 * the object it came from.
4262 * We're dealing with a
4263 * copy-on-write fault on
4265 * We're going to decompress
4266 * the page directly into the
4267 * target "object" while
4268 * keepin the compressed
4269 * page for "cur_object", so
4270 * no ledger update in that
4273 } else if ((cur_object
->purgable
==
4274 VM_PURGABLE_DENY
) ||
4275 (cur_object
->vo_purgeable_owner
==
4278 * "cur_object" is not purgeable
4279 * or is not owned, so no
4280 * purgeable ledgers to update.
4284 * One less compressed
4285 * purgeable page for
4286 * cur_object's owner.
4288 vm_purgeable_compressed_update(
4293 if (insert_cur_object
) {
4294 vm_page_insert(m
, cur_object
, cur_offset
);
4296 vm_page_insert(m
, object
, offset
);
4299 if ((m
->object
->wimg_bits
& VM_WIMG_MASK
) != VM_WIMG_USE_DEFAULT
) {
4301 * If the page is not cacheable,
4302 * we can't let its contents
4303 * linger in the data cache
4304 * after the decompression.
4306 pmap_sync_page_attributes_phys(m
->phys_page
);
4309 type_of_fault
= my_fault_type
;
4311 VM_STAT_INCR(decompressions
);
4313 if (cur_object
!= object
) {
4314 if (insert_cur_object
) {
4315 top_object
= object
;
4317 * switch to the object that has the new page
4319 object
= cur_object
;
4320 object_lock_type
= cur_object_lock_type
;
4322 vm_object_unlock(cur_object
);
4323 cur_object
= object
;
4329 * existence map present and indicates
4330 * that the pager doesn't have this page
4333 if (cur_object
->shadow
== VM_OBJECT_NULL
) {
4335 * Zero fill fault. Page gets
4336 * inserted into the original object.
4338 if (cur_object
->shadow_severed
||
4339 VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object
))
4341 if (object
!= cur_object
)
4342 vm_object_unlock(cur_object
);
4343 vm_object_unlock(object
);
4345 vm_map_unlock_read(map
);
4346 if (real_map
!= map
)
4347 vm_map_unlock(real_map
);
4349 kr
= KERN_MEMORY_ERROR
;
4352 if (vm_backing_store_low
) {
4354 * we are protecting the system from
4355 * backing store exhaustion...
4356 * must take the slow path if we're
4359 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
))
4362 if (cur_object
!= object
) {
4363 vm_object_unlock(cur_object
);
4365 cur_object
= object
;
4367 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
4369 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4371 if (vm_object_lock_upgrade(object
) == FALSE
) {
4373 * couldn't upgrade so do a full retry on the fault
4374 * since we dropped the object lock which
4375 * could allow another thread to insert
4376 * a page at this offset
4378 vm_map_unlock_read(map
);
4379 if (real_map
!= map
)
4380 vm_map_unlock(real_map
);
4385 m
= vm_page_alloc(object
, offset
);
4387 if (m
== VM_PAGE_NULL
) {
4389 * no free page currently available...
4390 * must take the slow path
4396 * Now zero fill page...
4397 * the page is probably going to
4398 * be written soon, so don't bother
4399 * to clear the modified bit
4401 * NOTE: This code holds the map
4402 * lock across the zero fill.
4404 type_of_fault
= vm_fault_zero_page(m
, map
->no_zero_fill
);
4409 * On to the next level in the shadow chain
4411 cur_offset
+= cur_object
->vo_shadow_offset
;
4412 new_object
= cur_object
->shadow
;
4415 * take the new_object's lock with the indicated state
4417 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
)
4418 vm_object_lock_shared(new_object
);
4420 vm_object_lock(new_object
);
4422 if (cur_object
!= object
)
4423 vm_object_unlock(cur_object
);
4425 cur_object
= new_object
;
4431 * Cleanup from fast fault failure. Drop any object
4432 * lock other than original and drop map lock.
4434 if (object
!= cur_object
)
4435 vm_object_unlock(cur_object
);
4438 * must own the object lock exclusively at this point
4440 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
4441 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4443 if (vm_object_lock_upgrade(object
) == FALSE
) {
4445 * couldn't upgrade, so explictly
4446 * take the lock exclusively
4447 * no need to retry the fault at this
4448 * point since "vm_fault_page" will
4449 * completely re-evaluate the state
4451 vm_object_lock(object
);
4456 vm_map_unlock_read(map
);
4457 if (real_map
!= map
)
4458 vm_map_unlock(real_map
);
4461 * Make a reference to this object to
4462 * prevent its disposal while we are messing with
4463 * it. Once we have the reference, the map is free
4464 * to be diddled. Since objects reference their
4465 * shadows (and copies), they will stay around as well.
4467 vm_object_reference_locked(object
);
4468 vm_object_paging_begin(object
);
4470 XPR(XPR_VM_FAULT
,"vm_fault -> vm_fault_page\n",0,0,0,0,0);
4474 result_page
= VM_PAGE_NULL
;
4475 kr
= vm_fault_page(object
, offset
, fault_type
,
4476 (change_wiring
&& !wired
),
4477 FALSE
, /* page not looked up */
4478 &prot
, &result_page
, &top_page
,
4480 &error_code
, map
->no_zero_fill
,
4481 FALSE
, &fault_info
);
4484 * if kr != VM_FAULT_SUCCESS, then the paging reference
4485 * has been dropped and the object unlocked... the ref_count
4488 * if kr == VM_FAULT_SUCCESS, then the paging reference
4489 * is still held along with the ref_count on the original object
4491 * the object is returned locked with a paging reference
4493 * if top_page != NULL, then it's BUSY and the
4494 * object it belongs to has a paging reference
4495 * but is returned unlocked
4497 if (kr
!= VM_FAULT_SUCCESS
&&
4498 kr
!= VM_FAULT_SUCCESS_NO_VM_PAGE
) {
4500 * we didn't succeed, lose the object reference immediately.
4502 vm_object_deallocate(object
);
4505 * See why we failed, and take corrective action.
4508 case VM_FAULT_MEMORY_SHORTAGE
:
4509 if (vm_page_wait((change_wiring
) ?
4516 case VM_FAULT_INTERRUPTED
:
4519 case VM_FAULT_RETRY
:
4521 case VM_FAULT_MEMORY_ERROR
:
4525 kr
= KERN_MEMORY_ERROR
;
4528 panic("vm_fault: unexpected error 0x%x from "
4529 "vm_fault_page()\n", kr
);
4534 if (m
!= VM_PAGE_NULL
) {
4535 assert((change_wiring
&& !wired
) ?
4536 (top_page
== VM_PAGE_NULL
) :
4537 ((top_page
== VM_PAGE_NULL
) == (m
->object
== object
)));
4541 * What to do with the resulting page from vm_fault_page
4542 * if it doesn't get entered into the physical map:
4544 #define RELEASE_PAGE(m) \
4546 PAGE_WAKEUP_DONE(m); \
4547 if (!m->active && !m->inactive && !m->throttled) { \
4548 vm_page_lockspin_queues(); \
4549 if (!m->active && !m->inactive && !m->throttled) \
4550 vm_page_activate(m); \
4551 vm_page_unlock_queues(); \
4556 * We must verify that the maps have not changed
4557 * since our last lookup.
4559 if (m
!= VM_PAGE_NULL
) {
4560 old_copy_object
= m
->object
->copy
;
4561 vm_object_unlock(m
->object
);
4563 old_copy_object
= VM_OBJECT_NULL
;
4564 vm_object_unlock(object
);
4568 * no object locks are held at this point
4570 if ((map
!= original_map
) || !vm_map_verify(map
, &version
)) {
4571 vm_object_t retry_object
;
4572 vm_object_offset_t retry_offset
;
4573 vm_prot_t retry_prot
;
4576 * To avoid trying to write_lock the map while another
4577 * thread has it read_locked (in vm_map_pageable), we
4578 * do not try for write permission. If the page is
4579 * still writable, we will get write permission. If it
4580 * is not, or has been marked needs_copy, we enter the
4581 * mapping without write permission, and will merely
4582 * take another fault.
4585 vm_map_lock_read(map
);
4587 kr
= vm_map_lookup_locked(&map
, vaddr
,
4588 fault_type
& ~VM_PROT_WRITE
,
4589 OBJECT_LOCK_EXCLUSIVE
, &version
,
4590 &retry_object
, &retry_offset
, &retry_prot
,
4594 pmap
= real_map
->pmap
;
4596 if (kr
!= KERN_SUCCESS
) {
4597 vm_map_unlock_read(map
);
4599 if (m
!= VM_PAGE_NULL
) {
4601 * retake the lock so that
4602 * we can drop the paging reference
4603 * in vm_fault_cleanup and do the
4604 * PAGE_WAKEUP_DONE in RELEASE_PAGE
4606 vm_object_lock(m
->object
);
4610 vm_fault_cleanup(m
->object
, top_page
);
4613 * retake the lock so that
4614 * we can drop the paging reference
4615 * in vm_fault_cleanup
4617 vm_object_lock(object
);
4619 vm_fault_cleanup(object
, top_page
);
4621 vm_object_deallocate(object
);
4625 vm_object_unlock(retry_object
);
4627 if ((retry_object
!= object
) || (retry_offset
!= offset
)) {
4629 vm_map_unlock_read(map
);
4630 if (real_map
!= map
)
4631 vm_map_unlock(real_map
);
4633 if (m
!= VM_PAGE_NULL
) {
4635 * retake the lock so that
4636 * we can drop the paging reference
4637 * in vm_fault_cleanup and do the
4638 * PAGE_WAKEUP_DONE in RELEASE_PAGE
4640 vm_object_lock(m
->object
);
4644 vm_fault_cleanup(m
->object
, top_page
);
4647 * retake the lock so that
4648 * we can drop the paging reference
4649 * in vm_fault_cleanup
4651 vm_object_lock(object
);
4653 vm_fault_cleanup(object
, top_page
);
4655 vm_object_deallocate(object
);
4660 * Check whether the protection has changed or the object
4661 * has been copied while we left the map unlocked.
4665 if (m
!= VM_PAGE_NULL
) {
4666 vm_object_lock(m
->object
);
4668 if (m
->object
->copy
!= old_copy_object
) {
4670 * The copy object changed while the top-level object
4671 * was unlocked, so take away write permission.
4673 prot
&= ~VM_PROT_WRITE
;
4676 vm_object_lock(object
);
4679 * If we want to wire down this page, but no longer have
4680 * adequate permissions, we must start all over.
4682 if (wired
&& (fault_type
!= (prot
| VM_PROT_WRITE
))) {
4684 vm_map_verify_done(map
, &version
);
4685 if (real_map
!= map
)
4686 vm_map_unlock(real_map
);
4688 if (m
!= VM_PAGE_NULL
) {
4691 vm_fault_cleanup(m
->object
, top_page
);
4693 vm_fault_cleanup(object
, top_page
);
4695 vm_object_deallocate(object
);
4699 if (m
!= VM_PAGE_NULL
) {
4701 * Put this page into the physical map.
4702 * We had to do the unlock above because pmap_enter
4703 * may cause other faults. The page may be on
4704 * the pageout queues. If the pageout daemon comes
4705 * across the page, it will remove it from the queues.
4708 kr
= vm_fault_enter(m
,
4715 fault_info
.no_cache
,
4716 fault_info
.cs_bypass
,
4717 fault_info
.user_tag
,
4718 fault_info
.pmap_options
,
4722 kr
= vm_fault_enter(m
,
4729 fault_info
.no_cache
,
4730 fault_info
.cs_bypass
,
4731 fault_info
.user_tag
,
4732 fault_info
.pmap_options
,
4736 if (kr
!= KERN_SUCCESS
) {
4737 /* abort this page fault */
4738 vm_map_verify_done(map
, &version
);
4739 if (real_map
!= map
)
4740 vm_map_unlock(real_map
);
4741 PAGE_WAKEUP_DONE(m
);
4742 vm_fault_cleanup(m
->object
, top_page
);
4743 vm_object_deallocate(object
);
4746 if (physpage_p
!= NULL
) {
4747 /* for vm_map_wire_and_extract() */
4748 *physpage_p
= m
->phys_page
;
4749 if (prot
& VM_PROT_WRITE
) {
4750 vm_object_lock_assert_exclusive(m
->object
);
4756 vm_map_entry_t entry
;
4757 vm_map_offset_t laddr
;
4758 vm_map_offset_t ldelta
, hdelta
;
4761 * do a pmap block mapping from the physical address
4766 /* While we do not worry about execution protection in */
4767 /* general, certian pages may have instruction execution */
4768 /* disallowed. We will check here, and if not allowed */
4769 /* to execute, we return with a protection failure. */
4771 if ((fault_type
& VM_PROT_EXECUTE
) &&
4772 (!pmap_eligible_for_execute((ppnum_t
)(object
->vo_shadow_offset
>> 12)))) {
4774 vm_map_verify_done(map
, &version
);
4776 if (real_map
!= map
)
4777 vm_map_unlock(real_map
);
4779 vm_fault_cleanup(object
, top_page
);
4780 vm_object_deallocate(object
);
4782 kr
= KERN_PROTECTION_FAILURE
;
4787 if (real_map
!= map
)
4788 vm_map_unlock(real_map
);
4790 if (original_map
!= map
) {
4791 vm_map_unlock_read(map
);
4792 vm_map_lock_read(original_map
);
4798 hdelta
= 0xFFFFF000;
4799 ldelta
= 0xFFFFF000;
4801 while (vm_map_lookup_entry(map
, laddr
, &entry
)) {
4802 if (ldelta
> (laddr
- entry
->vme_start
))
4803 ldelta
= laddr
- entry
->vme_start
;
4804 if (hdelta
> (entry
->vme_end
- laddr
))
4805 hdelta
= entry
->vme_end
- laddr
;
4806 if (entry
->is_sub_map
) {
4808 laddr
= ((laddr
- entry
->vme_start
)
4809 + VME_OFFSET(entry
));
4810 vm_map_lock_read(VME_SUBMAP(entry
));
4812 if (map
!= real_map
)
4813 vm_map_unlock_read(map
);
4814 if (entry
->use_pmap
) {
4815 vm_map_unlock_read(real_map
);
4816 real_map
= VME_SUBMAP(entry
);
4818 map
= VME_SUBMAP(entry
);
4825 if (vm_map_lookup_entry(map
, laddr
, &entry
) &&
4826 (VME_OBJECT(entry
) != NULL
) &&
4827 (VME_OBJECT(entry
) == object
)) {
4830 if (!object
->pager_created
&&
4831 object
->phys_contiguous
&&
4832 VME_OFFSET(entry
) == 0 &&
4833 (entry
->vme_end
- entry
->vme_start
== object
->vo_size
) &&
4834 VM_MAP_PAGE_ALIGNED(entry
->vme_start
, (object
->vo_size
-1))) {
4835 superpage
= VM_MEM_SUPERPAGE
;
4840 if (superpage
&& physpage_p
) {
4841 /* for vm_map_wire_and_extract() */
4842 *physpage_p
= (ppnum_t
)
4843 ((((vm_map_offset_t
)
4844 object
->vo_shadow_offset
)
4846 + (laddr
- entry
->vme_start
))
4852 * Set up a block mapped area
4854 assert((uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
) == ((ldelta
+ hdelta
) >> PAGE_SHIFT
));
4855 pmap_map_block(caller_pmap
,
4856 (addr64_t
)(caller_pmap_addr
- ldelta
),
4857 (ppnum_t
)((((vm_map_offset_t
) (VME_OBJECT(entry
)->vo_shadow_offset
)) +
4858 VME_OFFSET(entry
) + (laddr
- entry
->vme_start
) - ldelta
) >> PAGE_SHIFT
),
4859 (uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
), prot
,
4860 (VM_WIMG_MASK
& (int)object
->wimg_bits
) | superpage
, 0);
4863 * Set up a block mapped area
4865 assert((uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
) == ((ldelta
+ hdelta
) >> PAGE_SHIFT
));
4866 pmap_map_block(real_map
->pmap
,
4867 (addr64_t
)(vaddr
- ldelta
),
4868 (ppnum_t
)((((vm_map_offset_t
)(VME_OBJECT(entry
)->vo_shadow_offset
)) +
4869 VME_OFFSET(entry
) + (laddr
- entry
->vme_start
) - ldelta
) >> PAGE_SHIFT
),
4870 (uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
), prot
,
4871 (VM_WIMG_MASK
& (int)object
->wimg_bits
) | superpage
, 0);
4877 * Unlock everything, and return
4879 vm_map_verify_done(map
, &version
);
4880 if (real_map
!= map
)
4881 vm_map_unlock(real_map
);
4883 if (m
!= VM_PAGE_NULL
) {
4884 PAGE_WAKEUP_DONE(m
);
4886 vm_fault_cleanup(m
->object
, top_page
);
4888 vm_fault_cleanup(object
, top_page
);
4890 vm_object_deallocate(object
);
4896 thread_interrupt_level(interruptible_state
);
4899 * Only I/O throttle on faults which cause a pagein/swapin.
4901 if ((type_of_fault
== DBG_PAGEIND_FAULT
) || (type_of_fault
== DBG_PAGEINV_FAULT
) || (type_of_fault
== DBG_COMPRESSOR_SWAPIN_FAULT
)) {
4902 throttle_lowpri_io(1);
4904 if (kr
== KERN_SUCCESS
&& type_of_fault
!= DBG_CACHE_HIT_FAULT
&& type_of_fault
!= DBG_GUARD_FAULT
) {
4906 if ((throttle_delay
= vm_page_throttled(TRUE
))) {
4908 if (vm_debug_events
) {
4909 if (type_of_fault
== DBG_COMPRESSOR_FAULT
)
4910 VM_DEBUG_EVENT(vmf_compressordelay
, VMF_COMPRESSORDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
4911 else if (type_of_fault
== DBG_COW_FAULT
)
4912 VM_DEBUG_EVENT(vmf_cowdelay
, VMF_COWDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
4914 VM_DEBUG_EVENT(vmf_zfdelay
, VMF_ZFDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
4916 delay(throttle_delay
);
4920 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4921 (MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_END
,
4922 ((uint64_t)vaddr
>> 32),
4934 * Wire down a range of virtual addresses in a map.
4939 vm_map_entry_t entry
,
4942 vm_map_offset_t pmap_addr
,
4943 ppnum_t
*physpage_p
)
4946 register vm_map_offset_t va
;
4947 register vm_map_offset_t end_addr
= entry
->vme_end
;
4948 register kern_return_t rc
;
4950 assert(entry
->in_transition
);
4952 if ((VME_OBJECT(entry
) != NULL
) &&
4953 !entry
->is_sub_map
&&
4954 VME_OBJECT(entry
)->phys_contiguous
) {
4955 return KERN_SUCCESS
;
4959 * Inform the physical mapping system that the
4960 * range of addresses may not fault, so that
4961 * page tables and such can be locked down as well.
4964 pmap_pageable(pmap
, pmap_addr
,
4965 pmap_addr
+ (end_addr
- entry
->vme_start
), FALSE
);
4968 * We simulate a fault to get the page and enter it
4969 * in the physical map.
4972 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
4973 rc
= vm_fault_wire_fast(map
, va
, prot
, entry
, pmap
,
4974 pmap_addr
+ (va
- entry
->vme_start
),
4976 if (rc
!= KERN_SUCCESS
) {
4977 rc
= vm_fault_internal(map
, va
, prot
, TRUE
,
4978 ((pmap
== kernel_pmap
)
4980 : THREAD_ABORTSAFE
),
4983 (va
- entry
->vme_start
)),
4985 DTRACE_VM2(softlock
, int, 1, (uint64_t *), NULL
);
4988 if (rc
!= KERN_SUCCESS
) {
4989 struct vm_map_entry tmp_entry
= *entry
;
4991 /* unwire wired pages */
4992 tmp_entry
.vme_end
= va
;
4993 vm_fault_unwire(map
,
4994 &tmp_entry
, FALSE
, pmap
, pmap_addr
);
4999 return KERN_SUCCESS
;
5005 * Unwire a range of virtual addresses in a map.
5010 vm_map_entry_t entry
,
5011 boolean_t deallocate
,
5013 vm_map_offset_t pmap_addr
)
5015 register vm_map_offset_t va
;
5016 register vm_map_offset_t end_addr
= entry
->vme_end
;
5018 struct vm_object_fault_info fault_info
;
5020 object
= (entry
->is_sub_map
) ? VM_OBJECT_NULL
: VME_OBJECT(entry
);
5023 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
5024 * do anything since such memory is wired by default. So we don't have
5025 * anything to undo here.
5028 if (object
!= VM_OBJECT_NULL
&& object
->phys_contiguous
)
5031 fault_info
.interruptible
= THREAD_UNINT
;
5032 fault_info
.behavior
= entry
->behavior
;
5033 fault_info
.user_tag
= VME_ALIAS(entry
);
5034 fault_info
.pmap_options
= 0;
5035 if (entry
->iokit_acct
||
5036 (!entry
->is_sub_map
&& !entry
->use_pmap
)) {
5037 fault_info
.pmap_options
|= PMAP_OPTIONS_ALT_ACCT
;
5039 fault_info
.lo_offset
= VME_OFFSET(entry
);
5040 fault_info
.hi_offset
= (entry
->vme_end
- entry
->vme_start
) + VME_OFFSET(entry
);
5041 fault_info
.no_cache
= entry
->no_cache
;
5042 fault_info
.stealth
= TRUE
;
5043 fault_info
.io_sync
= FALSE
;
5044 fault_info
.cs_bypass
= FALSE
;
5045 fault_info
.mark_zf_absent
= FALSE
;
5046 fault_info
.batch_pmap_op
= FALSE
;
5049 * Since the pages are wired down, we must be able to
5050 * get their mappings from the physical map system.
5053 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
5055 if (object
== VM_OBJECT_NULL
) {
5057 pmap_change_wiring(pmap
,
5058 pmap_addr
+ (va
- entry
->vme_start
), FALSE
);
5060 (void) vm_fault(map
, va
, VM_PROT_NONE
,
5061 TRUE
, THREAD_UNINT
, pmap
, pmap_addr
);
5064 vm_page_t result_page
;
5066 vm_object_t result_object
;
5067 vm_fault_return_t result
;
5069 if (end_addr
- va
> (vm_size_t
) -1) {
5070 /* 32-bit overflow */
5071 fault_info
.cluster_size
= (vm_size_t
) (0 - PAGE_SIZE
);
5073 fault_info
.cluster_size
= (vm_size_t
) (end_addr
- va
);
5074 assert(fault_info
.cluster_size
== end_addr
- va
);
5078 prot
= VM_PROT_NONE
;
5080 vm_object_lock(object
);
5081 vm_object_paging_begin(object
);
5083 "vm_fault_unwire -> vm_fault_page\n",
5085 result_page
= VM_PAGE_NULL
;
5086 result
= vm_fault_page(
5088 (VME_OFFSET(entry
) +
5089 (va
- entry
->vme_start
)),
5091 FALSE
, /* page not looked up */
5092 &prot
, &result_page
, &top_page
,
5094 NULL
, map
->no_zero_fill
,
5095 FALSE
, &fault_info
);
5096 } while (result
== VM_FAULT_RETRY
);
5099 * If this was a mapping to a file on a device that has been forcibly
5100 * unmounted, then we won't get a page back from vm_fault_page(). Just
5101 * move on to the next one in case the remaining pages are mapped from
5102 * different objects. During a forced unmount, the object is terminated
5103 * so the alive flag will be false if this happens. A forced unmount will
5104 * will occur when an external disk is unplugged before the user does an
5105 * eject, so we don't want to panic in that situation.
5108 if (result
== VM_FAULT_MEMORY_ERROR
&& !object
->alive
)
5111 if (result
== VM_FAULT_MEMORY_ERROR
&&
5112 object
== kernel_object
) {
5114 * This must have been allocated with
5115 * KMA_KOBJECT and KMA_VAONLY and there's
5116 * no physical page at this offset.
5117 * We're done (no page to free).
5123 if (result
!= VM_FAULT_SUCCESS
)
5124 panic("vm_fault_unwire: failure");
5126 result_object
= result_page
->object
;
5129 assert(result_page
->phys_page
!=
5130 vm_page_fictitious_addr
);
5131 pmap_disconnect(result_page
->phys_page
);
5132 VM_PAGE_FREE(result_page
);
5134 if ((pmap
) && (result_page
->phys_page
!= vm_page_guard_addr
))
5135 pmap_change_wiring(pmap
,
5136 pmap_addr
+ (va
- entry
->vme_start
), FALSE
);
5139 if (VM_PAGE_WIRED(result_page
)) {
5140 vm_page_lockspin_queues();
5141 vm_page_unwire(result_page
, TRUE
);
5142 vm_page_unlock_queues();
5144 if(entry
->zero_wired_pages
) {
5145 pmap_zero_page(result_page
->phys_page
);
5146 entry
->zero_wired_pages
= FALSE
;
5149 PAGE_WAKEUP_DONE(result_page
);
5151 vm_fault_cleanup(result_object
, top_page
);
5156 * Inform the physical mapping system that the range
5157 * of addresses may fault, so that page tables and
5158 * such may be unwired themselves.
5161 pmap_pageable(pmap
, pmap_addr
,
5162 pmap_addr
+ (end_addr
- entry
->vme_start
), TRUE
);
5167 * vm_fault_wire_fast:
5169 * Handle common case of a wire down page fault at the given address.
5170 * If successful, the page is inserted into the associated physical map.
5171 * The map entry is passed in to avoid the overhead of a map lookup.
5173 * NOTE: the given address should be truncated to the
5174 * proper page address.
5176 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
5177 * a standard error specifying why the fault is fatal is returned.
5179 * The map in question must be referenced, and remains so.
5180 * Caller has a read lock on the map.
5182 * This is a stripped version of vm_fault() for wiring pages. Anything
5183 * other than the common case will return KERN_FAILURE, and the caller
5184 * is expected to call vm_fault().
5186 static kern_return_t
5188 __unused vm_map_t map
,
5190 vm_prot_t caller_prot
,
5191 vm_map_entry_t entry
,
5193 vm_map_offset_t pmap_addr
,
5194 ppnum_t
*physpage_p
)
5197 vm_object_offset_t offset
;
5198 register vm_page_t m
;
5200 thread_t thread
= current_thread();
5204 VM_STAT_INCR(faults
);
5206 if (thread
!= THREAD_NULL
&& thread
->task
!= TASK_NULL
)
5207 thread
->task
->faults
++;
5214 #define RELEASE_PAGE(m) { \
5215 PAGE_WAKEUP_DONE(m); \
5216 vm_page_lockspin_queues(); \
5217 vm_page_unwire(m, TRUE); \
5218 vm_page_unlock_queues(); \
5222 #undef UNLOCK_THINGS
5223 #define UNLOCK_THINGS { \
5224 vm_object_paging_end(object); \
5225 vm_object_unlock(object); \
5228 #undef UNLOCK_AND_DEALLOCATE
5229 #define UNLOCK_AND_DEALLOCATE { \
5231 vm_object_deallocate(object); \
5234 * Give up and have caller do things the hard way.
5238 UNLOCK_AND_DEALLOCATE; \
5239 return(KERN_FAILURE); \
5244 * If this entry is not directly to a vm_object, bail out.
5246 if (entry
->is_sub_map
) {
5247 assert(physpage_p
== NULL
);
5248 return(KERN_FAILURE
);
5252 * Find the backing store object and offset into it.
5255 object
= VME_OBJECT(entry
);
5256 offset
= (va
- entry
->vme_start
) + VME_OFFSET(entry
);
5257 prot
= entry
->protection
;
5260 * Make a reference to this object to prevent its
5261 * disposal while we are messing with it.
5264 vm_object_lock(object
);
5265 vm_object_reference_locked(object
);
5266 vm_object_paging_begin(object
);
5269 * INVARIANTS (through entire routine):
5271 * 1) At all times, we must either have the object
5272 * lock or a busy page in some object to prevent
5273 * some other thread from trying to bring in
5276 * 2) Once we have a busy page, we must remove it from
5277 * the pageout queues, so that the pageout daemon
5278 * will not grab it away.
5283 * Look for page in top-level object. If it's not there or
5284 * there's something going on, give up.
5285 * ENCRYPTED SWAP: use the slow fault path, since we'll need to
5286 * decrypt the page before wiring it down.
5288 m
= vm_page_lookup(object
, offset
);
5289 if ((m
== VM_PAGE_NULL
) || (m
->busy
) || (m
->encrypted
) ||
5290 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
))) {
5294 ASSERT_PAGE_DECRYPTED(m
);
5296 if (m
->fictitious
&&
5297 m
->phys_page
== vm_page_guard_addr
) {
5299 * Guard pages are fictitious pages and are never
5300 * entered into a pmap, so let's say it's been wired...
5307 * Wire the page down now. All bail outs beyond this
5308 * point must unwire the page.
5311 vm_page_lockspin_queues();
5312 vm_page_wire(m
, VM_PROT_MEMORY_TAG(caller_prot
), TRUE
);
5313 vm_page_unlock_queues();
5316 * Mark page busy for other threads.
5323 * Give up if the page is being written and there's a copy object
5325 if ((object
->copy
!= VM_OBJECT_NULL
) && (prot
& VM_PROT_WRITE
)) {
5331 * Put this page into the physical map.
5333 type_of_fault
= DBG_CACHE_HIT_FAULT
;
5334 kr
= vm_fault_enter(m
,
5344 ((entry
->iokit_acct
||
5345 (!entry
->is_sub_map
&& !entry
->use_pmap
))
5346 ? PMAP_OPTIONS_ALT_ACCT
5353 * Unlock everything, and return
5357 /* for vm_map_wire_and_extract() */
5358 if (kr
== KERN_SUCCESS
) {
5359 *physpage_p
= m
->phys_page
;
5360 if (prot
& VM_PROT_WRITE
) {
5361 vm_object_lock_assert_exclusive(m
->object
);
5369 PAGE_WAKEUP_DONE(m
);
5370 UNLOCK_AND_DEALLOCATE
;
5377 * Routine: vm_fault_copy_cleanup
5379 * Release a page used by vm_fault_copy.
5383 vm_fault_copy_cleanup(
5387 vm_object_t object
= page
->object
;
5389 vm_object_lock(object
);
5390 PAGE_WAKEUP_DONE(page
);
5391 if (!page
->active
&& !page
->inactive
&& !page
->throttled
) {
5392 vm_page_lockspin_queues();
5393 if (!page
->active
&& !page
->inactive
&& !page
->throttled
)
5394 vm_page_activate(page
);
5395 vm_page_unlock_queues();
5397 vm_fault_cleanup(object
, top_page
);
5401 vm_fault_copy_dst_cleanup(
5406 if (page
!= VM_PAGE_NULL
) {
5407 object
= page
->object
;
5408 vm_object_lock(object
);
5409 vm_page_lockspin_queues();
5410 vm_page_unwire(page
, TRUE
);
5411 vm_page_unlock_queues();
5412 vm_object_paging_end(object
);
5413 vm_object_unlock(object
);
5418 * Routine: vm_fault_copy
5421 * Copy pages from one virtual memory object to another --
5422 * neither the source nor destination pages need be resident.
5424 * Before actually copying a page, the version associated with
5425 * the destination address map wil be verified.
5427 * In/out conditions:
5428 * The caller must hold a reference, but not a lock, to
5429 * each of the source and destination objects and to the
5433 * Returns KERN_SUCCESS if no errors were encountered in
5434 * reading or writing the data. Returns KERN_INTERRUPTED if
5435 * the operation was interrupted (only possible if the
5436 * "interruptible" argument is asserted). Other return values
5437 * indicate a permanent error in copying the data.
5439 * The actual amount of data copied will be returned in the
5440 * "copy_size" argument. In the event that the destination map
5441 * verification failed, this amount may be less than the amount
5446 vm_object_t src_object
,
5447 vm_object_offset_t src_offset
,
5448 vm_map_size_t
*copy_size
, /* INOUT */
5449 vm_object_t dst_object
,
5450 vm_object_offset_t dst_offset
,
5452 vm_map_version_t
*dst_version
,
5455 vm_page_t result_page
;
5458 vm_page_t src_top_page
;
5462 vm_page_t dst_top_page
;
5465 vm_map_size_t amount_left
;
5466 vm_object_t old_copy_object
;
5467 kern_return_t error
= 0;
5468 vm_fault_return_t result
;
5470 vm_map_size_t part_size
;
5471 struct vm_object_fault_info fault_info_src
;
5472 struct vm_object_fault_info fault_info_dst
;
5475 * In order not to confuse the clustered pageins, align
5476 * the different offsets on a page boundary.
5481 *copy_size -= amount_left; \
5485 amount_left
= *copy_size
;
5487 fault_info_src
.interruptible
= interruptible
;
5488 fault_info_src
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
5489 fault_info_src
.user_tag
= 0;
5490 fault_info_src
.pmap_options
= 0;
5491 fault_info_src
.lo_offset
= vm_object_trunc_page(src_offset
);
5492 fault_info_src
.hi_offset
= fault_info_src
.lo_offset
+ amount_left
;
5493 fault_info_src
.no_cache
= FALSE
;
5494 fault_info_src
.stealth
= TRUE
;
5495 fault_info_src
.io_sync
= FALSE
;
5496 fault_info_src
.cs_bypass
= FALSE
;
5497 fault_info_src
.mark_zf_absent
= FALSE
;
5498 fault_info_src
.batch_pmap_op
= FALSE
;
5500 fault_info_dst
.interruptible
= interruptible
;
5501 fault_info_dst
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
5502 fault_info_dst
.user_tag
= 0;
5503 fault_info_dst
.pmap_options
= 0;
5504 fault_info_dst
.lo_offset
= vm_object_trunc_page(dst_offset
);
5505 fault_info_dst
.hi_offset
= fault_info_dst
.lo_offset
+ amount_left
;
5506 fault_info_dst
.no_cache
= FALSE
;
5507 fault_info_dst
.stealth
= TRUE
;
5508 fault_info_dst
.io_sync
= FALSE
;
5509 fault_info_dst
.cs_bypass
= FALSE
;
5510 fault_info_dst
.mark_zf_absent
= FALSE
;
5511 fault_info_dst
.batch_pmap_op
= FALSE
;
5513 do { /* while (amount_left > 0) */
5515 * There may be a deadlock if both source and destination
5516 * pages are the same. To avoid this deadlock, the copy must
5517 * start by getting the destination page in order to apply
5518 * COW semantics if any.
5521 RetryDestinationFault
: ;
5523 dst_prot
= VM_PROT_WRITE
|VM_PROT_READ
;
5525 vm_object_lock(dst_object
);
5526 vm_object_paging_begin(dst_object
);
5528 if (amount_left
> (vm_size_t
) -1) {
5529 /* 32-bit overflow */
5530 fault_info_dst
.cluster_size
= (vm_size_t
) (0 - PAGE_SIZE
);
5532 fault_info_dst
.cluster_size
= (vm_size_t
) amount_left
;
5533 assert(fault_info_dst
.cluster_size
== amount_left
);
5536 XPR(XPR_VM_FAULT
,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
5537 dst_page
= VM_PAGE_NULL
;
5538 result
= vm_fault_page(dst_object
,
5539 vm_object_trunc_page(dst_offset
),
5540 VM_PROT_WRITE
|VM_PROT_READ
,
5542 FALSE
, /* page not looked up */
5543 &dst_prot
, &dst_page
, &dst_top_page
,
5546 dst_map
->no_zero_fill
,
5547 FALSE
, &fault_info_dst
);
5549 case VM_FAULT_SUCCESS
:
5551 case VM_FAULT_RETRY
:
5552 goto RetryDestinationFault
;
5553 case VM_FAULT_MEMORY_SHORTAGE
:
5554 if (vm_page_wait(interruptible
))
5555 goto RetryDestinationFault
;
5557 case VM_FAULT_INTERRUPTED
:
5558 RETURN(MACH_SEND_INTERRUPTED
);
5559 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
5560 /* success but no VM page: fail the copy */
5561 vm_object_paging_end(dst_object
);
5562 vm_object_unlock(dst_object
);
5564 case VM_FAULT_MEMORY_ERROR
:
5568 return(KERN_MEMORY_ERROR
);
5570 panic("vm_fault_copy: unexpected error 0x%x from "
5571 "vm_fault_page()\n", result
);
5573 assert ((dst_prot
& VM_PROT_WRITE
) != VM_PROT_NONE
);
5575 old_copy_object
= dst_page
->object
->copy
;
5578 * There exists the possiblity that the source and
5579 * destination page are the same. But we can't
5580 * easily determine that now. If they are the
5581 * same, the call to vm_fault_page() for the
5582 * destination page will deadlock. To prevent this we
5583 * wire the page so we can drop busy without having
5584 * the page daemon steal the page. We clean up the
5585 * top page but keep the paging reference on the object
5586 * holding the dest page so it doesn't go away.
5589 vm_page_lockspin_queues();
5590 vm_page_wire(dst_page
, VM_KERN_MEMORY_OSFMK
, TRUE
);
5591 vm_page_unlock_queues();
5592 PAGE_WAKEUP_DONE(dst_page
);
5593 vm_object_unlock(dst_page
->object
);
5595 if (dst_top_page
!= VM_PAGE_NULL
) {
5596 vm_object_lock(dst_object
);
5597 VM_PAGE_FREE(dst_top_page
);
5598 vm_object_paging_end(dst_object
);
5599 vm_object_unlock(dst_object
);
5604 if (src_object
== VM_OBJECT_NULL
) {
5606 * No source object. We will just
5607 * zero-fill the page in dst_object.
5609 src_page
= VM_PAGE_NULL
;
5610 result_page
= VM_PAGE_NULL
;
5612 vm_object_lock(src_object
);
5613 src_page
= vm_page_lookup(src_object
,
5614 vm_object_trunc_page(src_offset
));
5615 if (src_page
== dst_page
) {
5616 src_prot
= dst_prot
;
5617 result_page
= VM_PAGE_NULL
;
5619 src_prot
= VM_PROT_READ
;
5620 vm_object_paging_begin(src_object
);
5622 if (amount_left
> (vm_size_t
) -1) {
5623 /* 32-bit overflow */
5624 fault_info_src
.cluster_size
= (vm_size_t
) (0 - PAGE_SIZE
);
5626 fault_info_src
.cluster_size
= (vm_size_t
) amount_left
;
5627 assert(fault_info_src
.cluster_size
== amount_left
);
5631 "vm_fault_copy(2) -> vm_fault_page\n",
5633 result_page
= VM_PAGE_NULL
;
5634 result
= vm_fault_page(
5636 vm_object_trunc_page(src_offset
),
5637 VM_PROT_READ
, FALSE
,
5638 FALSE
, /* page not looked up */
5640 &result_page
, &src_top_page
,
5641 (int *)0, &error
, FALSE
,
5642 FALSE
, &fault_info_src
);
5645 case VM_FAULT_SUCCESS
:
5647 case VM_FAULT_RETRY
:
5648 goto RetrySourceFault
;
5649 case VM_FAULT_MEMORY_SHORTAGE
:
5650 if (vm_page_wait(interruptible
))
5651 goto RetrySourceFault
;
5653 case VM_FAULT_INTERRUPTED
:
5654 vm_fault_copy_dst_cleanup(dst_page
);
5655 RETURN(MACH_SEND_INTERRUPTED
);
5656 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
5657 /* success but no VM page: fail */
5658 vm_object_paging_end(src_object
);
5659 vm_object_unlock(src_object
);
5661 case VM_FAULT_MEMORY_ERROR
:
5662 vm_fault_copy_dst_cleanup(dst_page
);
5666 return(KERN_MEMORY_ERROR
);
5668 panic("vm_fault_copy(2): unexpected "
5670 "vm_fault_page()\n", result
);
5674 assert((src_top_page
== VM_PAGE_NULL
) ==
5675 (result_page
->object
== src_object
));
5677 assert ((src_prot
& VM_PROT_READ
) != VM_PROT_NONE
);
5678 vm_object_unlock(result_page
->object
);
5681 if (!vm_map_verify(dst_map
, dst_version
)) {
5682 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
5683 vm_fault_copy_cleanup(result_page
, src_top_page
);
5684 vm_fault_copy_dst_cleanup(dst_page
);
5688 vm_object_lock(dst_page
->object
);
5690 if (dst_page
->object
->copy
!= old_copy_object
) {
5691 vm_object_unlock(dst_page
->object
);
5692 vm_map_verify_done(dst_map
, dst_version
);
5693 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
5694 vm_fault_copy_cleanup(result_page
, src_top_page
);
5695 vm_fault_copy_dst_cleanup(dst_page
);
5698 vm_object_unlock(dst_page
->object
);
5701 * Copy the page, and note that it is dirty
5705 if (!page_aligned(src_offset
) ||
5706 !page_aligned(dst_offset
) ||
5707 !page_aligned(amount_left
)) {
5709 vm_object_offset_t src_po
,
5712 src_po
= src_offset
- vm_object_trunc_page(src_offset
);
5713 dst_po
= dst_offset
- vm_object_trunc_page(dst_offset
);
5715 if (dst_po
> src_po
) {
5716 part_size
= PAGE_SIZE
- dst_po
;
5718 part_size
= PAGE_SIZE
- src_po
;
5720 if (part_size
> (amount_left
)){
5721 part_size
= amount_left
;
5724 if (result_page
== VM_PAGE_NULL
) {
5725 assert((vm_offset_t
) dst_po
== dst_po
);
5726 assert((vm_size_t
) part_size
== part_size
);
5727 vm_page_part_zero_fill(dst_page
,
5728 (vm_offset_t
) dst_po
,
5729 (vm_size_t
) part_size
);
5731 assert((vm_offset_t
) src_po
== src_po
);
5732 assert((vm_offset_t
) dst_po
== dst_po
);
5733 assert((vm_size_t
) part_size
== part_size
);
5734 vm_page_part_copy(result_page
,
5735 (vm_offset_t
) src_po
,
5737 (vm_offset_t
) dst_po
,
5738 (vm_size_t
)part_size
);
5739 if(!dst_page
->dirty
){
5740 vm_object_lock(dst_object
);
5741 SET_PAGE_DIRTY(dst_page
, TRUE
);
5742 vm_object_unlock(dst_page
->object
);
5747 part_size
= PAGE_SIZE
;
5749 if (result_page
== VM_PAGE_NULL
)
5750 vm_page_zero_fill(dst_page
);
5752 vm_object_lock(result_page
->object
);
5753 vm_page_copy(result_page
, dst_page
);
5754 vm_object_unlock(result_page
->object
);
5756 if(!dst_page
->dirty
){
5757 vm_object_lock(dst_object
);
5758 SET_PAGE_DIRTY(dst_page
, TRUE
);
5759 vm_object_unlock(dst_page
->object
);
5766 * Unlock everything, and return
5769 vm_map_verify_done(dst_map
, dst_version
);
5771 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
5772 vm_fault_copy_cleanup(result_page
, src_top_page
);
5773 vm_fault_copy_dst_cleanup(dst_page
);
5775 amount_left
-= part_size
;
5776 src_offset
+= part_size
;
5777 dst_offset
+= part_size
;
5778 } while (amount_left
> 0);
5780 RETURN(KERN_SUCCESS
);
5786 #if VM_FAULT_CLASSIFY
5788 * Temporary statistics gathering support.
5792 * Statistics arrays:
5794 #define VM_FAULT_TYPES_MAX 5
5795 #define VM_FAULT_LEVEL_MAX 8
5797 int vm_fault_stats
[VM_FAULT_TYPES_MAX
][VM_FAULT_LEVEL_MAX
];
5799 #define VM_FAULT_TYPE_ZERO_FILL 0
5800 #define VM_FAULT_TYPE_MAP_IN 1
5801 #define VM_FAULT_TYPE_PAGER 2
5802 #define VM_FAULT_TYPE_COPY 3
5803 #define VM_FAULT_TYPE_OTHER 4
5807 vm_fault_classify(vm_object_t object
,
5808 vm_object_offset_t offset
,
5809 vm_prot_t fault_type
)
5811 int type
, level
= 0;
5815 m
= vm_page_lookup(object
, offset
);
5816 if (m
!= VM_PAGE_NULL
) {
5817 if (m
->busy
|| m
->error
|| m
->restart
|| m
->absent
) {
5818 type
= VM_FAULT_TYPE_OTHER
;
5821 if (((fault_type
& VM_PROT_WRITE
) == 0) ||
5822 ((level
== 0) && object
->copy
== VM_OBJECT_NULL
)) {
5823 type
= VM_FAULT_TYPE_MAP_IN
;
5826 type
= VM_FAULT_TYPE_COPY
;
5830 if (object
->pager_created
) {
5831 type
= VM_FAULT_TYPE_PAGER
;
5834 if (object
->shadow
== VM_OBJECT_NULL
) {
5835 type
= VM_FAULT_TYPE_ZERO_FILL
;
5839 offset
+= object
->vo_shadow_offset
;
5840 object
= object
->shadow
;
5846 if (level
> VM_FAULT_LEVEL_MAX
)
5847 level
= VM_FAULT_LEVEL_MAX
;
5849 vm_fault_stats
[type
][level
] += 1;
5854 /* cleanup routine to call from debugger */
5857 vm_fault_classify_init(void)
5861 for (type
= 0; type
< VM_FAULT_TYPES_MAX
; type
++) {
5862 for (level
= 0; level
< VM_FAULT_LEVEL_MAX
; level
++) {
5863 vm_fault_stats
[type
][level
] = 0;
5869 #endif /* VM_FAULT_CLASSIFY */
5872 kdp_lightweight_fault(vm_map_t map
, vm_offset_t cur_target_addr
, uint32_t *fault_results
)
5874 #pragma unused(map, cur_target_addr, fault_results)
5878 vm_map_entry_t entry
;
5880 vm_offset_t object_offset
;
5882 int compressor_external_state
, compressed_count_delta
;
5883 int compressor_flags
= (C_DONT_BLOCK
| C_KEEP
| C_KDP
);
5884 int my_fault_type
= VM_PROT_READ
;
5889 panic("kdp_lightweight_fault called from outside of debugger context");
5892 assert(map
!= VM_MAP_NULL
);
5894 assert((cur_target_addr
& PAGE_MASK
) == 0);
5895 if ((cur_target_addr
& PAGE_MASK
) != 0) {
5899 if (kdp_lck_rw_lock_is_acquired_exclusive(&map
->lock
)) {
5903 if (!vm_map_lookup_entry(map
, cur_target_addr
, &entry
)) {
5907 if (entry
->is_sub_map
) {
5911 object
= VME_OBJECT(entry
);
5912 if (object
== VM_OBJECT_NULL
) {
5916 object_offset
= cur_target_addr
- entry
->vme_start
+ VME_OFFSET(entry
);
5919 if (kdp_lck_rw_lock_is_acquired_exclusive(&object
->Lock
)) {
5923 if (object
->pager_created
&& (object
->paging_in_progress
||
5924 object
->activity_in_progress
)) {
5928 m
= kdp_vm_page_lookup(object
, object_offset
);
5930 if (m
!= VM_PAGE_NULL
) {
5932 if ((object
->wimg_bits
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
5936 if (m
->laundry
|| m
->busy
|| m
->pageout
|| m
->absent
|| m
->error
|| m
->cleaning
||
5937 m
->overwriting
|| m
->restart
|| m
->unusual
) {
5941 assert(!m
->private);
5946 assert(!m
->fictitious
);
5947 if (m
->fictitious
) {
5951 assert(!m
->encrypted
);
5956 assert(!m
->encrypted_cleaning
);
5957 if (m
->encrypted_cleaning
) {
5961 assert(!m
->compressor
);
5962 if (m
->compressor
) {
5966 if (fault_results
) {
5967 *fault_results
|= kThreadFaultedBT
;
5969 return ptoa(m
->phys_page
);
5972 compressor_external_state
= VM_EXTERNAL_STATE_UNKNOWN
;
5974 if (object
->pager_created
&& MUST_ASK_PAGER(object
, object_offset
, compressor_external_state
)) {
5975 if (compressor_external_state
== VM_EXTERNAL_STATE_EXISTS
) {
5976 kr
= vm_compressor_pager_get(object
->pager
, (object_offset
+ object
->paging_offset
),
5977 kdp_compressor_decompressed_page_ppnum
, &my_fault_type
,
5978 compressor_flags
, &compressed_count_delta
);
5979 if (kr
== KERN_SUCCESS
) {
5980 if (fault_results
) {
5981 *fault_results
|= kThreadDecompressedBT
;
5983 return kdp_compressor_decompressed_page_paddr
;
5990 if (object
->shadow
== VM_OBJECT_NULL
) {
5994 object_offset
+= object
->vo_shadow_offset
;
5995 object
= object
->shadow
;
6001 #define CODE_SIGNING_CHUNK_SIZE 4096
6003 vm_page_validate_cs_mapped(
6008 vm_object_offset_t offset
, offset_in_page
;
6010 memory_object_t pager
;
6012 boolean_t validated
;
6014 int num_chunks
, num_chunks_validated
;
6017 vm_object_lock_assert_exclusive(page
->object
);
6019 if (page
->wpmapped
&& !page
->cs_tainted
) {
6021 * This page was mapped for "write" access sometime in the
6022 * past and could still be modifiable in the future.
6023 * Consider it tainted.
6024 * [ If the page was already found to be "tainted", no
6025 * need to re-validate. ]
6027 page
->cs_validated
= TRUE
;
6028 page
->cs_tainted
= TRUE
;
6030 printf("CODESIGNING: vm_page_validate_cs: "
6031 "page %p obj %p off 0x%llx "
6033 page
, page
->object
, page
->offset
);
6035 vm_cs_validated_dirtied
++;
6038 if (page
->cs_validated
|| page
->cs_tainted
) {
6044 object
= page
->object
;
6045 assert(object
->code_signed
);
6046 offset
= page
->offset
;
6048 if (!object
->alive
|| object
->terminating
|| object
->pager
== NULL
) {
6050 * The object is terminating and we don't have its pager
6051 * so we can't validate the data...
6056 * Since we get here to validate a page that was brought in by
6057 * the pager, we know that this pager is all setup and ready
6060 assert(!object
->internal
);
6061 assert(object
->pager
!= NULL
);
6062 assert(object
->pager_ready
);
6064 pager
= object
->pager
;
6065 assert(object
->paging_in_progress
);
6066 kr
= vnode_pager_get_object_cs_blobs(pager
, &blobs
);
6067 if (kr
!= KERN_SUCCESS
) {
6071 /* verify the SHA1 hash for this page */
6072 num_chunks_validated
= 0;
6073 for (offset_in_page
= 0, num_chunks
= 0;
6074 offset_in_page
< PAGE_SIZE_64
;
6075 offset_in_page
+= CODE_SIGNING_CHUNK_SIZE
, num_chunks
++) {
6077 validated
= cs_validate_page(blobs
,
6079 (object
->paging_offset
+
6082 (const void *)((const char *)kaddr
6086 num_chunks_validated
++;
6088 if (tainted
& CS_VALIDATE_TAINTED
) {
6089 page
->cs_tainted
= TRUE
;
6091 if (tainted
& CS_VALIDATE_NX
) {
6095 /* page is validated only if all its chunks are */
6096 if (num_chunks_validated
== num_chunks
) {
6097 page
->cs_validated
= TRUE
;
6102 vm_page_validate_cs(
6106 vm_object_offset_t offset
;
6107 vm_map_offset_t koffset
;
6108 vm_map_size_t ksize
;
6111 boolean_t busy_page
;
6112 boolean_t need_unmap
;
6114 vm_object_lock_assert_held(page
->object
);
6116 if (page
->wpmapped
&& !page
->cs_tainted
) {
6117 vm_object_lock_assert_exclusive(page
->object
);
6120 * This page was mapped for "write" access sometime in the
6121 * past and could still be modifiable in the future.
6122 * Consider it tainted.
6123 * [ If the page was already found to be "tainted", no
6124 * need to re-validate. ]
6126 page
->cs_validated
= TRUE
;
6127 page
->cs_tainted
= TRUE
;
6129 printf("CODESIGNING: vm_page_validate_cs: "
6130 "page %p obj %p off 0x%llx "
6132 page
, page
->object
, page
->offset
);
6134 vm_cs_validated_dirtied
++;
6137 if (page
->cs_validated
|| page
->cs_tainted
) {
6142 panic("vm_page_validate_cs(%p): page is slid\n", page
);
6144 assert(!page
->slid
);
6146 #if CHECK_CS_VALIDATION_BITMAP
6147 if ( vnode_pager_cs_check_validation_bitmap( page
->object
->pager
, trunc_page(page
->offset
+ page
->object
->paging_offset
), CS_BITMAP_CHECK
) == KERN_SUCCESS
) {
6148 page
->cs_validated
= TRUE
;
6149 page
->cs_tainted
= FALSE
;
6150 vm_cs_bitmap_validated
++;
6154 vm_object_lock_assert_exclusive(page
->object
);
6156 object
= page
->object
;
6157 assert(object
->code_signed
);
6158 offset
= page
->offset
;
6160 busy_page
= page
->busy
;
6162 /* keep page busy while we map (and unlock) the VM object */
6167 * Take a paging reference on the VM object
6168 * to protect it from collapse or bypass,
6169 * and keep it from disappearing too.
6171 vm_object_paging_begin(object
);
6173 /* map the page in the kernel address space */
6174 ksize
= PAGE_SIZE_64
;
6177 kr
= vm_paging_map_object(page
,
6181 FALSE
, /* can't unlock object ! */
6185 if (kr
!= KERN_SUCCESS
) {
6186 panic("vm_page_validate_cs: could not map page: 0x%x\n", kr
);
6188 kaddr
= CAST_DOWN(vm_offset_t
, koffset
);
6190 /* validate the mapped page */
6191 vm_page_validate_cs_mapped(page
, (const void *) kaddr
);
6193 #if CHECK_CS_VALIDATION_BITMAP
6194 if ( page
->cs_validated
== TRUE
&& page
->cs_tainted
== FALSE
) {
6195 vnode_pager_cs_check_validation_bitmap( object
->pager
, trunc_page( offset
+ object
->paging_offset
), CS_BITMAP_SET
);
6199 assert(object
== page
->object
);
6200 vm_object_lock_assert_exclusive(object
);
6203 PAGE_WAKEUP_DONE(page
);
6206 /* unmap the map from the kernel address space */
6207 vm_paging_unmap_object(object
, koffset
, koffset
+ ksize
);
6212 vm_object_paging_end(object
);
6216 vm_page_validate_cs_mapped_chunk(
6219 vm_offset_t chunk_offset
,
6220 boolean_t
*validated_p
,
6221 unsigned *tainted_p
)
6224 vm_object_offset_t offset
, offset_in_page
;
6226 memory_object_t pager
;
6228 boolean_t validated
;
6231 *validated_p
= FALSE
;
6235 vm_object_lock_assert_exclusive(page
->object
);
6237 object
= page
->object
;
6238 assert(object
->code_signed
);
6239 offset
= page
->offset
;
6241 if (!object
->alive
|| object
->terminating
|| object
->pager
== NULL
) {
6243 * The object is terminating and we don't have its pager
6244 * so we can't validate the data...
6249 * Since we get here to validate a page that was brought in by
6250 * the pager, we know that this pager is all setup and ready
6253 assert(!object
->internal
);
6254 assert(object
->pager
!= NULL
);
6255 assert(object
->pager_ready
);
6257 pager
= object
->pager
;
6258 assert(object
->paging_in_progress
);
6259 kr
= vnode_pager_get_object_cs_blobs(pager
, &blobs
);
6260 if (kr
!= KERN_SUCCESS
) {
6264 /* verify the signature for this chunk */
6265 offset_in_page
= chunk_offset
;
6266 assert(offset_in_page
< PAGE_SIZE
);
6267 assert((offset_in_page
& (CODE_SIGNING_CHUNK_SIZE
-1)) == 0);
6270 validated
= cs_validate_page(blobs
,
6272 (object
->paging_offset
+
6275 (const void *)((const char *)kaddr
6279 *validated_p
= TRUE
;
6282 *tainted_p
= tainted
;