2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Page fault handling module.
65 #include <mach_cluster_stats.h>
66 #include <mach_pagemap.h>
67 #include <libkern/OSAtomic.h>
69 #include <mach/mach_types.h>
70 #include <mach/kern_return.h>
71 #include <mach/message.h> /* for error codes */
72 #include <mach/vm_param.h>
73 #include <mach/vm_behavior.h>
74 #include <mach/memory_object.h>
75 /* For memory_object_data_{request,unlock} */
78 #include <kern/kern_types.h>
79 #include <kern/host_statistics.h>
80 #include <kern/counters.h>
81 #include <kern/task.h>
82 #include <kern/thread.h>
83 #include <kern/sched_prim.h>
84 #include <kern/host.h>
86 #include <kern/mach_param.h>
87 #include <kern/macro_help.h>
88 #include <kern/zalloc.h>
89 #include <kern/misc_protos.h>
91 #include <vm/vm_compressor.h>
92 #include <vm/vm_compressor_pager.h>
93 #include <vm/vm_fault.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_kern.h>
99 #include <vm/vm_pageout.h>
100 #include <vm/vm_protos.h>
101 #include <vm/vm_external.h>
102 #include <vm/memory_object.h>
103 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
104 #include <vm/vm_shared_region.h>
106 #include <sys/codesign.h>
108 #include <libsa/sys/timers.h> /* for struct timespec */
110 #define VM_FAULT_CLASSIFY 0
112 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
114 unsigned int vm_object_pagein_throttle
= 16;
117 * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
118 * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts
119 * of memory if they're buggy and can run the system completely out of swap space. If this happens, we
120 * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps
121 * keep the UI active so that the user has a chance to kill the offending task before the system
124 * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
125 * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold
126 * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a
127 * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
130 extern void throttle_lowpri_io(int);
132 uint64_t vm_hard_throttle_threshold
;
136 #define NEED_TO_HARD_THROTTLE_THIS_TASK() (vm_wants_task_throttled(current_task()) || \
137 (vm_page_free_count < vm_page_throttle_limit && \
138 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED))
141 #define HARD_THROTTLE_DELAY 20000 /* 20000 us == 20 ms */
142 #define SOFT_THROTTLE_DELAY 2000 /* 2000 us == 2 ms */
144 boolean_t
current_thread_aborted(void);
146 /* Forward declarations of internal routines. */
147 extern kern_return_t
vm_fault_wire_fast(
150 vm_map_entry_t entry
,
152 vm_map_offset_t pmap_addr
,
153 ppnum_t
*physpage_p
);
155 extern void vm_fault_continue(void);
157 extern void vm_fault_copy_cleanup(
161 extern void vm_fault_copy_dst_cleanup(
164 #if VM_FAULT_CLASSIFY
165 extern void vm_fault_classify(vm_object_t object
,
166 vm_object_offset_t offset
,
167 vm_prot_t fault_type
);
169 extern void vm_fault_classify_init(void);
172 unsigned long vm_pmap_enter_blocked
= 0;
173 unsigned long vm_pmap_enter_retried
= 0;
175 unsigned long vm_cs_validates
= 0;
176 unsigned long vm_cs_revalidates
= 0;
177 unsigned long vm_cs_query_modified
= 0;
178 unsigned long vm_cs_validated_dirtied
= 0;
179 unsigned long vm_cs_bitmap_validated
= 0;
181 void vm_pre_fault(vm_map_offset_t
);
184 * Routine: vm_fault_init
186 * Initialize our private data structures.
191 int i
, vm_compressor_temp
;
192 boolean_t need_default_val
= TRUE
;
194 * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is
195 * computed as a percentage of available memory, and the percentage used is scaled inversely with
196 * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems
197 * and reduce the value down to 10% for very large memory configurations. This helps give us a
198 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
199 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
202 vm_hard_throttle_threshold
= sane_size
* (35 - MIN((int)(sane_size
/ (1024*1024*1024)), 25)) / 100;
205 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
208 if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp
, sizeof (vm_compressor_temp
))) {
209 for ( i
= 0; i
< VM_PAGER_MAX_MODES
; i
++) {
210 if (vm_compressor_temp
> 0 &&
211 ((vm_compressor_temp
& ( 1 << i
)) == vm_compressor_temp
)) {
212 need_default_val
= FALSE
;
213 vm_compressor_mode
= vm_compressor_temp
;
217 if (need_default_val
)
218 printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp
);
220 if (need_default_val
) {
221 /* If no boot arg or incorrect boot arg, try device tree. */
222 PE_get_default("kern.vm_compressor", &vm_compressor_mode
, sizeof(vm_compressor_mode
));
224 PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count
, sizeof (vm_compressor_thread_count
));
225 printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode
);
229 * Routine: vm_fault_cleanup
231 * Clean up the result of vm_fault_page.
233 * The paging reference for "object" is released.
234 * "object" is unlocked.
235 * If "top_page" is not null, "top_page" is
236 * freed and the paging reference for the object
237 * containing it is released.
240 * "object" must be locked.
244 register vm_object_t object
,
245 register vm_page_t top_page
)
247 vm_object_paging_end(object
);
248 vm_object_unlock(object
);
250 if (top_page
!= VM_PAGE_NULL
) {
251 object
= top_page
->object
;
253 vm_object_lock(object
);
254 VM_PAGE_FREE(top_page
);
255 vm_object_paging_end(object
);
256 vm_object_unlock(object
);
260 #if MACH_CLUSTER_STATS
261 #define MAXCLUSTERPAGES 16
263 unsigned long pages_in_cluster
;
264 unsigned long pages_at_higher_offsets
;
265 unsigned long pages_at_lower_offsets
;
266 } cluster_stats_in
[MAXCLUSTERPAGES
];
267 #define CLUSTER_STAT(clause) clause
268 #define CLUSTER_STAT_HIGHER(x) \
269 ((cluster_stats_in[(x)].pages_at_higher_offsets)++)
270 #define CLUSTER_STAT_LOWER(x) \
271 ((cluster_stats_in[(x)].pages_at_lower_offsets)++)
272 #define CLUSTER_STAT_CLUSTER(x) \
273 ((cluster_stats_in[(x)].pages_in_cluster)++)
274 #else /* MACH_CLUSTER_STATS */
275 #define CLUSTER_STAT(clause)
276 #endif /* MACH_CLUSTER_STATS */
278 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
281 boolean_t vm_page_deactivate_behind
= TRUE
;
283 * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
285 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128
286 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */
287 /* we use it to size an array on the stack */
289 int vm_default_behind
= VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW
;
291 #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
294 * vm_page_is_sequential
296 * Determine if sequential access is in progress
297 * in accordance with the behavior specified.
298 * Update state to indicate current access pattern.
300 * object must have at least the shared lock held
304 vm_fault_is_sequential(
306 vm_object_offset_t offset
,
307 vm_behavior_t behavior
)
309 vm_object_offset_t last_alloc
;
313 last_alloc
= object
->last_alloc
;
314 sequential
= object
->sequential
;
315 orig_sequential
= sequential
;
318 case VM_BEHAVIOR_RANDOM
:
320 * reset indicator of sequential behavior
325 case VM_BEHAVIOR_SEQUENTIAL
:
326 if (offset
&& last_alloc
== offset
- PAGE_SIZE_64
) {
328 * advance indicator of sequential behavior
330 if (sequential
< MAX_SEQUENTIAL_RUN
)
331 sequential
+= PAGE_SIZE
;
334 * reset indicator of sequential behavior
340 case VM_BEHAVIOR_RSEQNTL
:
341 if (last_alloc
&& last_alloc
== offset
+ PAGE_SIZE_64
) {
343 * advance indicator of sequential behavior
345 if (sequential
> -MAX_SEQUENTIAL_RUN
)
346 sequential
-= PAGE_SIZE
;
349 * reset indicator of sequential behavior
355 case VM_BEHAVIOR_DEFAULT
:
357 if (offset
&& last_alloc
== (offset
- PAGE_SIZE_64
)) {
359 * advance indicator of sequential behavior
363 if (sequential
< MAX_SEQUENTIAL_RUN
)
364 sequential
+= PAGE_SIZE
;
366 } else if (last_alloc
&& last_alloc
== (offset
+ PAGE_SIZE_64
)) {
368 * advance indicator of sequential behavior
372 if (sequential
> -MAX_SEQUENTIAL_RUN
)
373 sequential
-= PAGE_SIZE
;
376 * reset indicator of sequential behavior
382 if (sequential
!= orig_sequential
) {
383 if (!OSCompareAndSwap(orig_sequential
, sequential
, (UInt32
*)&object
->sequential
)) {
385 * if someone else has already updated object->sequential
386 * don't bother trying to update it or object->last_alloc
392 * I'd like to do this with a OSCompareAndSwap64, but that
393 * doesn't exist for PPC... however, it shouldn't matter
394 * that much... last_alloc is maintained so that we can determine
395 * if a sequential access pattern is taking place... if only
396 * one thread is banging on this object, no problem with the unprotected
397 * update... if 2 or more threads are banging away, we run the risk of
398 * someone seeing a mangled update... however, in the face of multiple
399 * accesses, no sequential access pattern can develop anyway, so we
400 * haven't lost any real info.
402 object
->last_alloc
= offset
;
406 int vm_page_deactivate_behind_count
= 0;
409 * vm_page_deactivate_behind
411 * Determine if sequential access is in progress
412 * in accordance with the behavior specified. If
413 * so, compute a potential page to deactivate and
416 * object must be locked.
418 * return TRUE if we actually deactivate a page
422 vm_fault_deactivate_behind(
424 vm_object_offset_t offset
,
425 vm_behavior_t behavior
)
428 int pages_in_run
= 0;
429 int max_pages_in_run
= 0;
431 int sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
432 vm_object_offset_t run_offset
= 0;
433 vm_object_offset_t pg_offset
= 0;
435 vm_page_t page_run
[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
];
439 dbgTrace(0xBEEF0018, (unsigned int) object
, (unsigned int) vm_fault_deactivate_behind
); /* (TEST/DEBUG) */
442 if (object
== kernel_object
|| vm_page_deactivate_behind
== FALSE
) {
444 * Do not deactivate pages from the kernel object: they
445 * are not intended to become pageable.
446 * or we've disabled the deactivate behind mechanism
450 if ((sequential_run
= object
->sequential
)) {
451 if (sequential_run
< 0) {
452 sequential_behavior
= VM_BEHAVIOR_RSEQNTL
;
453 sequential_run
= 0 - sequential_run
;
455 sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
459 case VM_BEHAVIOR_RANDOM
:
461 case VM_BEHAVIOR_SEQUENTIAL
:
462 if (sequential_run
>= (int)PAGE_SIZE
) {
463 run_offset
= 0 - PAGE_SIZE_64
;
464 max_pages_in_run
= 1;
467 case VM_BEHAVIOR_RSEQNTL
:
468 if (sequential_run
>= (int)PAGE_SIZE
) {
469 run_offset
= PAGE_SIZE_64
;
470 max_pages_in_run
= 1;
473 case VM_BEHAVIOR_DEFAULT
:
475 { vm_object_offset_t behind
= vm_default_behind
* PAGE_SIZE_64
;
478 * determine if the run of sequential accesss has been
479 * long enough on an object with default access behavior
480 * to consider it for deactivation
482 if ((uint64_t)sequential_run
>= behind
&& (sequential_run
% (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
* PAGE_SIZE
)) == 0) {
484 * the comparisons between offset and behind are done
485 * in this kind of odd fashion in order to prevent wrap around
488 if (sequential_behavior
== VM_BEHAVIOR_SEQUENTIAL
) {
489 if (offset
>= behind
) {
490 run_offset
= 0 - behind
;
491 pg_offset
= PAGE_SIZE_64
;
492 max_pages_in_run
= VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
;
495 if (offset
< -behind
) {
497 pg_offset
= 0 - PAGE_SIZE_64
;
498 max_pages_in_run
= VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER
;
505 for (n
= 0; n
< max_pages_in_run
; n
++) {
506 m
= vm_page_lookup(object
, offset
+ run_offset
+ (n
* pg_offset
));
508 if (m
&& !m
->laundry
&& !m
->busy
&& !m
->no_cache
&& !m
->throttled
&& !m
->fictitious
&& !m
->absent
) {
509 page_run
[pages_in_run
++] = m
;
512 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
514 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
515 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
516 * new reference happens. If no futher references happen on the page after that remote TLB flushes
517 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
518 * by pageout_scan, which is just fine since the last reference would have happened quite far
519 * in the past (TLB caches don't hang around for very long), and of course could just as easily
520 * have happened before we did the deactivate_behind.
522 pmap_clear_refmod_options(m
->phys_page
, VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
526 vm_page_lockspin_queues();
528 for (n
= 0; n
< pages_in_run
; n
++) {
532 vm_page_deactivate_internal(m
, FALSE
);
534 vm_page_deactivate_behind_count
++;
536 dbgTrace(0xBEEF0019, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
539 vm_page_unlock_queues();
548 vm_page_throttled(void)
550 clock_sec_t elapsed_sec
;
552 clock_usec_t tv_usec
;
554 thread_t thread
= current_thread();
556 if (thread
->options
& TH_OPT_VMPRIV
)
559 thread
->t_page_creation_count
++;
561 if (NEED_TO_HARD_THROTTLE_THIS_TASK())
562 return (HARD_THROTTLE_DELAY
);
564 if ((vm_page_free_count
< vm_page_throttle_limit
|| ((COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
565 thread
->t_page_creation_count
> vm_page_creation_throttle
) {
567 clock_get_system_microtime(&tv_sec
, &tv_usec
);
569 elapsed_sec
= tv_sec
- thread
->t_page_creation_time
;
571 if (elapsed_sec
<= 6 || (thread
->t_page_creation_count
/ elapsed_sec
) >= (vm_page_creation_throttle
/ 6)) {
573 if (elapsed_sec
>= 60) {
575 * we'll reset our stats to give a well behaved app
576 * that was unlucky enough to accumulate a bunch of pages
577 * over a long period of time a chance to get out of
578 * the throttled state... we reset the counter and timestamp
579 * so that if it stays under the rate limit for the next second
580 * it will be back in our good graces... if it exceeds it, it
581 * will remain in the throttled state
583 thread
->t_page_creation_time
= tv_sec
;
584 thread
->t_page_creation_count
= (vm_page_creation_throttle
/ 6) * 5;
586 ++vm_page_throttle_count
;
588 if ((COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) && HARD_THROTTLE_LIMIT_REACHED())
589 return (HARD_THROTTLE_DELAY
);
591 return (SOFT_THROTTLE_DELAY
);
593 thread
->t_page_creation_time
= tv_sec
;
594 thread
->t_page_creation_count
= 0;
601 * check for various conditions that would
602 * prevent us from creating a ZF page...
603 * cleanup is based on being called from vm_fault_page
605 * object must be locked
606 * object == m->object
608 static vm_fault_return_t
609 vm_fault_check(vm_object_t object
, vm_page_t m
, vm_page_t first_m
, boolean_t interruptible_state
)
613 if (object
->shadow_severed
||
614 VM_OBJECT_PURGEABLE_FAULT_ERROR(object
)) {
617 * 1. the shadow chain was severed,
618 * 2. the purgeable object is volatile or empty and is marked
619 * to fault on access while volatile.
620 * Just have to return an error at this point
622 if (m
!= VM_PAGE_NULL
)
624 vm_fault_cleanup(object
, first_m
);
626 thread_interrupt_level(interruptible_state
);
628 return (VM_FAULT_MEMORY_ERROR
);
630 if (vm_backing_store_low
) {
632 * are we protecting the system from
633 * backing store exhaustion. If so
634 * sleep unless we are privileged.
636 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
)) {
638 if (m
!= VM_PAGE_NULL
)
640 vm_fault_cleanup(object
, first_m
);
642 assert_wait((event_t
)&vm_backing_store_low
, THREAD_UNINT
);
644 thread_block(THREAD_CONTINUE_NULL
);
645 thread_interrupt_level(interruptible_state
);
647 return (VM_FAULT_RETRY
);
650 if ((throttle_delay
= vm_page_throttled())) {
652 * we're throttling zero-fills...
653 * treat this as if we couldn't grab a page
655 if (m
!= VM_PAGE_NULL
)
657 vm_fault_cleanup(object
, first_m
);
659 VM_DEBUG_EVENT(vmf_check_zfdelay
, VMF_CHECK_ZFDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
661 delay(throttle_delay
);
663 if (current_thread_aborted()) {
664 thread_interrupt_level(interruptible_state
);
665 return VM_FAULT_INTERRUPTED
;
667 thread_interrupt_level(interruptible_state
);
669 return (VM_FAULT_MEMORY_SHORTAGE
);
671 return (VM_FAULT_SUCCESS
);
676 * do the work to zero fill a page and
677 * inject it into the correct paging queue
679 * m->object must be locked
680 * page queue lock must NOT be held
683 vm_fault_zero_page(vm_page_t m
, boolean_t no_zero_fill
)
685 int my_fault
= DBG_ZERO_FILL_FAULT
;
688 * This is is a zero-fill page fault...
690 * Checking the page lock is a waste of
691 * time; this page was absent, so
692 * it can't be page locked by a pager.
694 * we also consider it undefined
695 * with respect to instruction
696 * execution. i.e. it is the responsibility
697 * of higher layers to call for an instruction
698 * sync after changing the contents and before
699 * sending a program into this area. We
700 * choose this approach for performance
704 m
->cs_validated
= FALSE
;
705 m
->cs_tainted
= FALSE
;
707 if (no_zero_fill
== TRUE
) {
708 my_fault
= DBG_NZF_PAGE_FAULT
;
710 if (m
->absent
&& m
->busy
)
713 vm_page_zero_fill(m
);
715 VM_STAT_INCR(zero_fill_count
);
716 DTRACE_VM2(zfod
, int, 1, (uint64_t *), NULL
);
719 assert(m
->object
!= kernel_object
);
720 //assert(m->pageq.next == NULL && m->pageq.prev == NULL);
722 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) &&
723 (m
->object
->purgable
== VM_PURGABLE_DENY
||
724 m
->object
->purgable
== VM_PURGABLE_NONVOLATILE
||
725 m
->object
->purgable
== VM_PURGABLE_VOLATILE
)) {
727 vm_page_lockspin_queues();
729 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) {
730 assert(!VM_PAGE_WIRED(m
));
733 * can't be on the pageout queue since we don't
734 * have a pager to try and clean to
736 assert(!m
->pageout_queue
);
738 VM_PAGE_QUEUES_REMOVE(m
);
740 queue_enter(&vm_page_queue_throttled
, m
, vm_page_t
, pageq
);
742 vm_page_throttled_count
++;
744 vm_page_unlock_queues();
751 * Routine: vm_fault_page
753 * Find the resident page for the virtual memory
754 * specified by the given virtual memory object
756 * Additional arguments:
757 * The required permissions for the page is given
758 * in "fault_type". Desired permissions are included
760 * fault_info is passed along to determine pagein cluster
761 * limits... it contains the expected reference pattern,
762 * cluster size if available, etc...
764 * If the desired page is known to be resident (for
765 * example, because it was previously wired down), asserting
766 * the "unwiring" parameter will speed the search.
768 * If the operation can be interrupted (by thread_abort
769 * or thread_terminate), then the "interruptible"
770 * parameter should be asserted.
773 * The page containing the proper data is returned
777 * The source object must be locked and referenced,
778 * and must donate one paging reference. The reference
779 * is not affected. The paging reference and lock are
782 * If the call succeeds, the object in which "result_page"
783 * resides is left locked and holding a paging reference.
784 * If this is not the original object, a busy page in the
785 * original object is returned in "top_page", to prevent other
786 * callers from pursuing this same data, along with a paging
787 * reference for the original object. The "top_page" should
788 * be destroyed when this guarantee is no longer required.
789 * The "result_page" is also left busy. It is not removed
790 * from the pageout queues.
792 * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
793 * fault succeeded but there's no VM page (i.e. the VM object
794 * does not actually hold VM pages, but device memory or
795 * large pages). The object is still locked and we still hold a
796 * paging_in_progress reference.
798 unsigned int vm_fault_page_blocked_access
= 0;
799 unsigned int vm_fault_page_forced_retry
= 0;
804 vm_object_t first_object
, /* Object to begin search */
805 vm_object_offset_t first_offset
, /* Offset into object */
806 vm_prot_t fault_type
, /* What access is requested */
807 boolean_t must_be_resident
,/* Must page be resident? */
808 boolean_t caller_lookup
, /* caller looked up page */
809 /* Modifies in place: */
810 vm_prot_t
*protection
, /* Protection for mapping */
811 vm_page_t
*result_page
, /* Page found, if successful */
813 vm_page_t
*top_page
, /* Page in top object, if
814 * not result_page. */
815 int *type_of_fault
, /* if non-null, fill in with type of fault
816 * COW, zero-fill, etc... returned in trace point */
817 /* More arguments: */
818 kern_return_t
*error_code
, /* code if page is in error */
819 boolean_t no_zero_fill
, /* don't zero fill absent pages */
820 boolean_t data_supply
, /* treat as data_supply if
821 * it is a write fault and a full
822 * page is provided */
823 vm_object_fault_info_t fault_info
)
827 vm_object_offset_t offset
;
829 vm_object_t next_object
;
830 vm_object_t copy_object
;
831 boolean_t look_for_page
;
832 boolean_t force_fault_retry
= FALSE
;
833 vm_prot_t access_required
= fault_type
;
834 vm_prot_t wants_copy_flag
;
835 CLUSTER_STAT(int pages_at_higher_offsets
;)
836 CLUSTER_STAT(int pages_at_lower_offsets
;)
837 kern_return_t wait_result
;
838 boolean_t interruptible_state
;
839 boolean_t data_already_requested
= FALSE
;
840 vm_behavior_t orig_behavior
;
841 vm_size_t orig_cluster_size
;
842 vm_fault_return_t error
;
844 uint32_t try_failed_count
;
845 int interruptible
; /* how may fault be interrupted? */
846 int external_state
= VM_EXTERNAL_STATE_UNKNOWN
;
847 memory_object_t pager
;
848 vm_fault_return_t retval
;
851 * MACH page map - an optional optimization where a bit map is maintained
852 * by the VM subsystem for internal objects to indicate which pages of
853 * the object currently reside on backing store. This existence map
854 * duplicates information maintained by the vnode pager. It is
855 * created at the time of the first pageout against the object, i.e.
856 * at the same time pager for the object is created. The optimization
857 * is designed to eliminate pager interaction overhead, if it is
858 * 'known' that the page does not exist on backing store.
860 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
861 * either marked as paged out in the existence map for the object or no
862 * existence map exists for the object. MUST_ASK_PAGER() is one of the
863 * criteria in the decision to invoke the pager. It is also used as one
864 * of the criteria to terminate the scan for adjacent pages in a clustered
865 * pagein operation. Note that MUST_ASK_PAGER() always evaluates to TRUE for
866 * permanent objects. Note also that if the pager for an internal object
867 * has not been created, the pager is not invoked regardless of the value
868 * of MUST_ASK_PAGER() and that clustered pagein scans are only done on an object
869 * for which a pager has been created.
871 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
872 * is marked as paged out in the existence map for the object. PAGED_OUT()
873 * PAGED_OUT() is used to determine if a page has already been pushed
874 * into a copy object in order to avoid a redundant page out operation.
877 #define MUST_ASK_PAGER(o, f, s) \
878 ((vm_external_state_get((o)->existence_map, (f)) \
879 != VM_EXTERNAL_STATE_ABSENT) && \
880 (s = (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)))) \
881 != VM_EXTERNAL_STATE_ABSENT)
882 #define PAGED_OUT(o, f) \
883 ((vm_external_state_get((o)->existence_map, (f)) \
884 == VM_EXTERNAL_STATE_EXISTS) || \
885 (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) \
886 == VM_EXTERNAL_STATE_EXISTS))
887 #else /* MACH_PAGEMAP */
888 #define MUST_ASK_PAGER(o, f, s) \
889 ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
890 #define PAGED_OUT(o, f) \
891 (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
892 #endif /* MACH_PAGEMAP */
897 #define RELEASE_PAGE(m) \
899 PAGE_WAKEUP_DONE(m); \
900 if (!m->active && !m->inactive && !m->throttled) { \
901 vm_page_lockspin_queues(); \
902 if (!m->active && !m->inactive && !m->throttled) { \
903 if (COMPRESSED_PAGER_IS_ACTIVE) \
904 vm_page_deactivate(m); \
906 vm_page_activate(m); \
908 vm_page_unlock_queues(); \
913 dbgTrace(0xBEEF0002, (unsigned int) first_object
, (unsigned int) first_offset
); /* (TEST/DEBUG) */
916 interruptible
= fault_info
->interruptible
;
917 interruptible_state
= thread_interrupt_level(interruptible
);
920 * INVARIANTS (through entire routine):
922 * 1) At all times, we must either have the object
923 * lock or a busy page in some object to prevent
924 * some other thread from trying to bring in
927 * Note that we cannot hold any locks during the
928 * pager access or when waiting for memory, so
929 * we use a busy page then.
931 * 2) To prevent another thread from racing us down the
932 * shadow chain and entering a new page in the top
933 * object before we do, we must keep a busy page in
934 * the top object while following the shadow chain.
936 * 3) We must increment paging_in_progress on any object
937 * for which we have a busy page before dropping
940 * 4) We leave busy pages on the pageout queues.
941 * If the pageout daemon comes across a busy page,
942 * it will remove the page from the pageout queues.
945 object
= first_object
;
946 offset
= first_offset
;
947 first_m
= VM_PAGE_NULL
;
948 access_required
= fault_type
;
952 "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n",
953 object
, offset
, fault_type
, *protection
, 0);
956 * default type of fault
958 my_fault
= DBG_CACHE_HIT_FAULT
;
962 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
964 if (!object
->alive
) {
966 * object is no longer valid
967 * clean up and return error
969 vm_fault_cleanup(object
, first_m
);
970 thread_interrupt_level(interruptible_state
);
972 return (VM_FAULT_MEMORY_ERROR
);
975 if (!object
->pager_created
&& object
->phys_contiguous
) {
977 * A physically-contiguous object without a pager:
978 * must be a "large page" object. We do not deal
979 * with VM pages for this object.
981 caller_lookup
= FALSE
;
983 goto phys_contig_object
;
986 if (object
->blocked_access
) {
988 * Access to this VM object has been blocked.
989 * Replace our "paging_in_progress" reference with
990 * a "activity_in_progress" reference and wait for
991 * access to be unblocked.
993 caller_lookup
= FALSE
; /* no longer valid after sleep */
994 vm_object_activity_begin(object
);
995 vm_object_paging_end(object
);
996 while (object
->blocked_access
) {
997 vm_object_sleep(object
,
998 VM_OBJECT_EVENT_UNBLOCKED
,
1001 vm_fault_page_blocked_access
++;
1002 vm_object_paging_begin(object
);
1003 vm_object_activity_end(object
);
1007 * See whether the page at 'offset' is resident
1009 if (caller_lookup
== TRUE
) {
1011 * The caller has already looked up the page
1012 * and gave us the result in "result_page".
1013 * We can use this for the first lookup but
1014 * it loses its validity as soon as we unlock
1018 caller_lookup
= FALSE
; /* no longer valid after that */
1020 m
= vm_page_lookup(object
, offset
);
1023 dbgTrace(0xBEEF0004, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1025 if (m
!= VM_PAGE_NULL
) {
1029 * The page is being brought in,
1030 * wait for it and then retry.
1033 dbgTrace(0xBEEF0005, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1035 wait_result
= PAGE_SLEEP(object
, m
, interruptible
);
1038 "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
1041 counter(c_vm_fault_page_block_busy_kernel
++);
1043 if (wait_result
!= THREAD_AWAKENED
) {
1044 vm_fault_cleanup(object
, first_m
);
1045 thread_interrupt_level(interruptible_state
);
1047 if (wait_result
== THREAD_RESTART
)
1048 return (VM_FAULT_RETRY
);
1050 return (VM_FAULT_INTERRUPTED
);
1058 vm_pageout_steal_laundry(m
, FALSE
);
1060 if (m
->phys_page
== vm_page_guard_addr
) {
1062 * Guard page: off limits !
1064 if (fault_type
== VM_PROT_NONE
) {
1066 * The fault is not requesting any
1067 * access to the guard page, so it must
1068 * be just to wire or unwire it.
1069 * Let's pretend it succeeded...
1073 assert(first_m
== VM_PAGE_NULL
);
1074 *top_page
= first_m
;
1076 *type_of_fault
= DBG_GUARD_FAULT
;
1077 thread_interrupt_level(interruptible_state
);
1078 return VM_FAULT_SUCCESS
;
1081 * The fault requests access to the
1082 * guard page: let's deny that !
1084 vm_fault_cleanup(object
, first_m
);
1085 thread_interrupt_level(interruptible_state
);
1086 return VM_FAULT_MEMORY_ERROR
;
1092 * The page is in error, give up now.
1095 dbgTrace(0xBEEF0006, (unsigned int) m
, (unsigned int) error_code
); /* (TEST/DEBUG) */
1098 *error_code
= KERN_MEMORY_ERROR
;
1101 vm_fault_cleanup(object
, first_m
);
1102 thread_interrupt_level(interruptible_state
);
1104 return (VM_FAULT_MEMORY_ERROR
);
1108 * The pager wants us to restart
1109 * at the top of the chain,
1110 * typically because it has moved the
1111 * page to another pager, then do so.
1114 dbgTrace(0xBEEF0007, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1118 vm_fault_cleanup(object
, first_m
);
1119 thread_interrupt_level(interruptible_state
);
1121 return (VM_FAULT_RETRY
);
1125 * The page isn't busy, but is absent,
1126 * therefore it's deemed "unavailable".
1128 * Remove the non-existent page (unless it's
1129 * in the top object) and move on down to the
1130 * next object (if there is one).
1133 dbgTrace(0xBEEF0008, (unsigned int) m
, (unsigned int) object
->shadow
); /* (TEST/DEBUG) */
1135 next_object
= object
->shadow
;
1137 if (next_object
== VM_OBJECT_NULL
) {
1139 * Absent page at bottom of shadow
1140 * chain; zero fill the page we left
1141 * busy in the first object, and free
1144 assert(!must_be_resident
);
1147 * check for any conditions that prevent
1148 * us from creating a new zero-fill page
1149 * vm_fault_check will do all of the
1150 * fault cleanup in the case of an error condition
1151 * including resetting the thread_interrupt_level
1153 error
= vm_fault_check(object
, m
, first_m
, interruptible_state
);
1155 if (error
!= VM_FAULT_SUCCESS
)
1159 "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
1164 if (object
!= first_object
) {
1166 * free the absent page we just found
1171 * drop reference and lock on current object
1173 vm_object_paging_end(object
);
1174 vm_object_unlock(object
);
1177 * grab the original page we
1178 * 'soldered' in place and
1179 * retake lock on 'first_object'
1182 first_m
= VM_PAGE_NULL
;
1184 object
= first_object
;
1185 offset
= first_offset
;
1187 vm_object_lock(object
);
1190 * we're going to use the absent page we just found
1191 * so convert it to a 'busy' page
1196 if (fault_info
->mark_zf_absent
&& no_zero_fill
== TRUE
)
1199 * zero-fill the page and put it on
1200 * the correct paging queue
1202 my_fault
= vm_fault_zero_page(m
, no_zero_fill
);
1206 if (must_be_resident
)
1207 vm_object_paging_end(object
);
1208 else if (object
!= first_object
) {
1209 vm_object_paging_end(object
);
1216 vm_page_lockspin_queues();
1218 assert(!m
->pageout_queue
);
1219 VM_PAGE_QUEUES_REMOVE(m
);
1221 vm_page_unlock_queues();
1224 "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n",
1227 offset
+object
->vo_shadow_offset
,0);
1229 offset
+= object
->vo_shadow_offset
;
1230 fault_info
->lo_offset
+= object
->vo_shadow_offset
;
1231 fault_info
->hi_offset
+= object
->vo_shadow_offset
;
1232 access_required
= VM_PROT_READ
;
1234 vm_object_lock(next_object
);
1235 vm_object_unlock(object
);
1236 object
= next_object
;
1237 vm_object_paging_begin(object
);
1240 * reset to default type of fault
1242 my_fault
= DBG_CACHE_HIT_FAULT
;
1248 && ((object
!= first_object
) || (object
->copy
!= VM_OBJECT_NULL
))
1249 && (fault_type
& VM_PROT_WRITE
)) {
1251 * This is a copy-on-write fault that will
1252 * cause us to revoke access to this page, but
1253 * this page is in the process of being cleaned
1254 * in a clustered pageout. We must wait until
1255 * the cleaning operation completes before
1256 * revoking access to the original page,
1257 * otherwise we might attempt to remove a
1261 dbgTrace(0xBEEF0009, (unsigned int) m
, (unsigned int) offset
); /* (TEST/DEBUG) */
1264 "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n",
1268 * take an extra ref so that object won't die
1270 vm_object_reference_locked(object
);
1272 vm_fault_cleanup(object
, first_m
);
1274 counter(c_vm_fault_page_block_backoff_kernel
++);
1275 vm_object_lock(object
);
1276 assert(object
->ref_count
> 0);
1278 m
= vm_page_lookup(object
, offset
);
1280 if (m
!= VM_PAGE_NULL
&& m
->cleaning
) {
1281 PAGE_ASSERT_WAIT(m
, interruptible
);
1283 vm_object_unlock(object
);
1284 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1285 vm_object_deallocate(object
);
1289 vm_object_unlock(object
);
1291 vm_object_deallocate(object
);
1292 thread_interrupt_level(interruptible_state
);
1294 return (VM_FAULT_RETRY
);
1297 if (type_of_fault
== NULL
&& m
->speculative
&&
1298 !(fault_info
!= NULL
&& fault_info
->stealth
)) {
1300 * If we were passed a non-NULL pointer for
1301 * "type_of_fault", than we came from
1302 * vm_fault... we'll let it deal with
1303 * this condition, since it
1304 * needs to see m->speculative to correctly
1305 * account the pageins, otherwise...
1306 * take it off the speculative queue, we'll
1307 * let the caller of vm_fault_page deal
1308 * with getting it onto the correct queue
1310 * If the caller specified in fault_info that
1311 * it wants a "stealth" fault, we also leave
1312 * the page in the speculative queue.
1314 vm_page_lockspin_queues();
1316 VM_PAGE_QUEUES_REMOVE(m
);
1317 vm_page_unlock_queues();
1323 * the user needs access to a page that we
1324 * encrypted before paging it out.
1325 * Decrypt the page now.
1326 * Keep it busy to prevent anyone from
1327 * accessing it during the decryption.
1330 vm_page_decrypt(m
, 0);
1331 assert(object
== m
->object
);
1333 PAGE_WAKEUP_DONE(m
);
1336 * Retry from the top, in case
1337 * something changed while we were
1342 ASSERT_PAGE_DECRYPTED(m
);
1344 if (m
->object
->code_signed
) {
1347 * We just paged in a page from a signed
1348 * memory object but we don't need to
1349 * validate it now. We'll validate it if
1350 * when it gets mapped into a user address
1351 * space for the first time or when the page
1352 * gets copied to another object as a result
1353 * of a copy-on-write.
1358 * We mark the page busy and leave it on
1359 * the pageout queues. If the pageout
1360 * deamon comes across it, then it will
1361 * remove the page from the queue, but not the object
1364 dbgTrace(0xBEEF000B, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1367 "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
1368 object
, offset
, m
, 0, 0);
1378 * we get here when there is no page present in the object at
1379 * the offset we're interested in... we'll allocate a page
1380 * at this point if the pager associated with
1381 * this object can provide the data or we're the top object...
1382 * object is locked; m == NULL
1384 if (must_be_resident
) {
1385 if (fault_type
== VM_PROT_NONE
&&
1386 object
== kernel_object
) {
1388 * We've been called from vm_fault_unwire()
1389 * while removing a map entry that was allocated
1390 * with KMA_KOBJECT and KMA_VAONLY. This page
1391 * is not present and there's nothing more to
1392 * do here (nothing to unwire).
1394 vm_fault_cleanup(object
, first_m
);
1395 thread_interrupt_level(interruptible_state
);
1397 return VM_FAULT_MEMORY_ERROR
;
1400 goto dont_look_for_page
;
1404 data_supply
= FALSE
;
1405 #endif /* !MACH_PAGEMAP */
1407 look_for_page
= (object
->pager_created
&& (MUST_ASK_PAGER(object
, offset
, external_state
) == TRUE
) && !data_supply
);
1410 dbgTrace(0xBEEF000C, (unsigned int) look_for_page
, (unsigned int) object
); /* (TEST/DEBUG) */
1412 if (!look_for_page
&& object
== first_object
&& !object
->phys_contiguous
) {
1414 * Allocate a new page for this object/offset pair as a placeholder
1418 dbgTrace(0xBEEF000D, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1420 if (m
== VM_PAGE_NULL
) {
1422 vm_fault_cleanup(object
, first_m
);
1423 thread_interrupt_level(interruptible_state
);
1425 return (VM_FAULT_MEMORY_SHORTAGE
);
1428 if (fault_info
&& fault_info
->batch_pmap_op
== TRUE
) {
1429 vm_page_insert_internal(m
, object
, offset
, FALSE
, TRUE
, TRUE
);
1431 vm_page_insert(m
, object
, offset
);
1434 if (look_for_page
) {
1439 * If the memory manager is not ready, we
1440 * cannot make requests.
1442 if (!object
->pager_ready
) {
1444 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1446 if (m
!= VM_PAGE_NULL
)
1450 "vm_f_page: ready wait obj 0x%X, offset 0x%X\n",
1451 object
, offset
, 0, 0, 0);
1454 * take an extra ref so object won't die
1456 vm_object_reference_locked(object
);
1457 vm_fault_cleanup(object
, first_m
);
1458 counter(c_vm_fault_page_block_backoff_kernel
++);
1460 vm_object_lock(object
);
1461 assert(object
->ref_count
> 0);
1463 if (!object
->pager_ready
) {
1464 wait_result
= vm_object_assert_wait(object
, VM_OBJECT_EVENT_PAGER_READY
, interruptible
);
1466 vm_object_unlock(object
);
1467 if (wait_result
== THREAD_WAITING
)
1468 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1469 vm_object_deallocate(object
);
1473 vm_object_unlock(object
);
1474 vm_object_deallocate(object
);
1475 thread_interrupt_level(interruptible_state
);
1477 return (VM_FAULT_RETRY
);
1480 if (!object
->internal
&& !object
->phys_contiguous
&& object
->paging_in_progress
> vm_object_pagein_throttle
) {
1482 * If there are too many outstanding page
1483 * requests pending on this external object, we
1484 * wait for them to be resolved now.
1487 dbgTrace(0xBEEF0010, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1489 if (m
!= VM_PAGE_NULL
)
1492 * take an extra ref so object won't die
1494 vm_object_reference_locked(object
);
1496 vm_fault_cleanup(object
, first_m
);
1498 counter(c_vm_fault_page_block_backoff_kernel
++);
1500 vm_object_lock(object
);
1501 assert(object
->ref_count
> 0);
1503 if (object
->paging_in_progress
>= vm_object_pagein_throttle
) {
1504 vm_object_assert_wait(object
, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS
, interruptible
);
1506 vm_object_unlock(object
);
1507 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1508 vm_object_deallocate(object
);
1512 vm_object_unlock(object
);
1513 vm_object_deallocate(object
);
1514 thread_interrupt_level(interruptible_state
);
1516 return (VM_FAULT_RETRY
);
1519 if (object
->internal
&&
1520 (COMPRESSED_PAGER_IS_ACTIVE
1521 || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)) {
1522 int compressed_count_delta
;
1524 if (m
== VM_PAGE_NULL
) {
1526 * Allocate a new page for this object/offset pair as a placeholder
1530 dbgTrace(0xBEEF000D, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1532 if (m
== VM_PAGE_NULL
) {
1534 vm_fault_cleanup(object
, first_m
);
1535 thread_interrupt_level(interruptible_state
);
1537 return (VM_FAULT_MEMORY_SHORTAGE
);
1541 if (fault_info
&& fault_info
->batch_pmap_op
== TRUE
) {
1542 vm_page_insert_internal(m
, object
, offset
, FALSE
, TRUE
, TRUE
);
1544 vm_page_insert(m
, object
, offset
);
1550 pager
= object
->pager
;
1552 assert(object
->paging_in_progress
> 0);
1553 vm_object_unlock(object
);
1555 rc
= vm_compressor_pager_get(
1557 offset
+ object
->paging_offset
,
1561 &compressed_count_delta
);
1563 vm_object_lock(object
);
1564 assert(object
->paging_in_progress
> 0);
1566 vm_compressor_pager_count(
1568 compressed_count_delta
,
1569 FALSE
, /* shared_lock */
1576 if ((m
->object
->wimg_bits
&
1578 VM_WIMG_USE_DEFAULT
) {
1580 * If the page is not cacheable,
1581 * we can't let its contents
1582 * linger in the data cache
1583 * after the decompression.
1585 pmap_sync_page_attributes_phys(
1588 m
->written_by_kernel
= TRUE
;
1592 * If the object is purgeable, its
1593 * owner's purgeable ledgers have been
1594 * updated in vm_page_insert() but the
1595 * page was also accounted for in a
1596 * "compressed purgeable" ledger, so
1599 if ((object
->purgable
!=
1600 VM_PURGABLE_DENY
) &&
1601 (object
->vo_purgeable_owner
!=
1604 * One less compressed
1607 vm_purgeable_compressed_update(
1613 case KERN_MEMORY_FAILURE
:
1618 case KERN_MEMORY_ERROR
:
1622 panic("vm_fault_page(): unexpected "
1624 "vm_compressor_pager_get()\n",
1627 PAGE_WAKEUP_DONE(m
);
1630 goto data_requested
;
1632 my_fault_type
= DBG_PAGEIN_FAULT
;
1634 if (m
!= VM_PAGE_NULL
) {
1640 dbgTrace(0xBEEF0012, (unsigned int) object
, (unsigned int) 0); /* (TEST/DEBUG) */
1644 * It's possible someone called vm_object_destroy while we weren't
1645 * holding the object lock. If that has happened, then bail out
1649 pager
= object
->pager
;
1651 if (pager
== MEMORY_OBJECT_NULL
) {
1652 vm_fault_cleanup(object
, first_m
);
1653 thread_interrupt_level(interruptible_state
);
1654 return VM_FAULT_MEMORY_ERROR
;
1658 * We have an absent page in place for the faulting offset,
1659 * so we can release the object lock.
1662 vm_object_unlock(object
);
1665 * If this object uses a copy_call strategy,
1666 * and we are interested in a copy of this object
1667 * (having gotten here only by following a
1668 * shadow chain), then tell the memory manager
1669 * via a flag added to the desired_access
1670 * parameter, so that it can detect a race
1671 * between our walking down the shadow chain
1672 * and its pushing pages up into a copy of
1673 * the object that it manages.
1675 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_CALL
&& object
!= first_object
)
1676 wants_copy_flag
= VM_PROT_WANTS_COPY
;
1678 wants_copy_flag
= VM_PROT_NONE
;
1681 "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n",
1683 access_required
| wants_copy_flag
, 0);
1685 if (object
->copy
== first_object
) {
1687 * if we issue the memory_object_data_request in
1688 * this state, we are subject to a deadlock with
1689 * the underlying filesystem if it is trying to
1690 * shrink the file resulting in a push of pages
1691 * into the copy object... that push will stall
1692 * on the placeholder page, and if the pushing thread
1693 * is holding a lock that is required on the pagein
1694 * path (such as a truncate lock), we'll deadlock...
1695 * to avoid this potential deadlock, we throw away
1696 * our placeholder page before calling memory_object_data_request
1697 * and force this thread to retry the vm_fault_page after
1698 * we have issued the I/O. the second time through this path
1699 * we will find the page already in the cache (presumably still
1700 * busy waiting for the I/O to complete) and then complete
1701 * the fault w/o having to go through memory_object_data_request again
1703 assert(first_m
!= VM_PAGE_NULL
);
1704 assert(first_m
->object
== first_object
);
1706 vm_object_lock(first_object
);
1707 VM_PAGE_FREE(first_m
);
1708 vm_object_paging_end(first_object
);
1709 vm_object_unlock(first_object
);
1711 first_m
= VM_PAGE_NULL
;
1712 force_fault_retry
= TRUE
;
1714 vm_fault_page_forced_retry
++;
1717 if (data_already_requested
== TRUE
) {
1718 orig_behavior
= fault_info
->behavior
;
1719 orig_cluster_size
= fault_info
->cluster_size
;
1721 fault_info
->behavior
= VM_BEHAVIOR_RANDOM
;
1722 fault_info
->cluster_size
= PAGE_SIZE
;
1725 * Call the memory manager to retrieve the data.
1727 rc
= memory_object_data_request(
1729 offset
+ object
->paging_offset
,
1731 access_required
| wants_copy_flag
,
1732 (memory_object_fault_info_t
)fault_info
);
1734 if (data_already_requested
== TRUE
) {
1735 fault_info
->behavior
= orig_behavior
;
1736 fault_info
->cluster_size
= orig_cluster_size
;
1738 data_already_requested
= TRUE
;
1740 DTRACE_VM2(maj_fault
, int, 1, (uint64_t *), NULL
);
1742 dbgTrace(0xBEEF0013, (unsigned int) object
, (unsigned int) rc
); /* (TEST/DEBUG) */
1744 vm_object_lock(object
);
1747 if (rc
!= KERN_SUCCESS
) {
1749 vm_fault_cleanup(object
, first_m
);
1750 thread_interrupt_level(interruptible_state
);
1752 return ((rc
== MACH_SEND_INTERRUPTED
) ?
1753 VM_FAULT_INTERRUPTED
:
1754 VM_FAULT_MEMORY_ERROR
);
1757 clock_usec_t tv_usec
;
1759 if (my_fault_type
== DBG_PAGEIN_FAULT
) {
1760 clock_get_system_microtime(&tv_sec
, &tv_usec
);
1761 current_thread()->t_page_creation_time
= tv_sec
;
1762 current_thread()->t_page_creation_count
= 0;
1765 if ((interruptible
!= THREAD_UNINT
) && (current_thread()->sched_flags
& TH_SFLAG_ABORT
)) {
1767 vm_fault_cleanup(object
, first_m
);
1768 thread_interrupt_level(interruptible_state
);
1770 return (VM_FAULT_INTERRUPTED
);
1772 if (force_fault_retry
== TRUE
) {
1774 vm_fault_cleanup(object
, first_m
);
1775 thread_interrupt_level(interruptible_state
);
1777 return (VM_FAULT_RETRY
);
1779 if (m
== VM_PAGE_NULL
&& object
->phys_contiguous
) {
1781 * No page here means that the object we
1782 * initially looked up was "physically
1783 * contiguous" (i.e. device memory). However,
1784 * with Virtual VRAM, the object might not
1785 * be backed by that device memory anymore,
1786 * so we're done here only if the object is
1787 * still "phys_contiguous".
1788 * Otherwise, if the object is no longer
1789 * "phys_contiguous", we need to retry the
1790 * page fault against the object's new backing
1791 * store (different memory object).
1797 * potentially a pagein fault
1798 * if we make it through the state checks
1799 * above, than we'll count it as such
1801 my_fault
= my_fault_type
;
1804 * Retry with same object/offset, since new data may
1805 * be in a different page (i.e., m is meaningless at
1812 * We get here if the object has no pager, or an existence map
1813 * exists and indicates the page isn't present on the pager
1814 * or we're unwiring a page. If a pager exists, but there
1815 * is no existence map, then the m->absent case above handles
1816 * the ZF case when the pager can't provide the page
1819 dbgTrace(0xBEEF0014, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1821 if (object
== first_object
)
1824 assert(m
== VM_PAGE_NULL
);
1827 "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n",
1831 next_object
= object
->shadow
;
1833 if (next_object
== VM_OBJECT_NULL
) {
1835 * we've hit the bottom of the shadown chain,
1836 * fill the page in the top object with zeros.
1838 assert(!must_be_resident
);
1840 if (object
!= first_object
) {
1841 vm_object_paging_end(object
);
1842 vm_object_unlock(object
);
1844 object
= first_object
;
1845 offset
= first_offset
;
1846 vm_object_lock(object
);
1849 assert(m
->object
== object
);
1850 first_m
= VM_PAGE_NULL
;
1853 * check for any conditions that prevent
1854 * us from creating a new zero-fill page
1855 * vm_fault_check will do all of the
1856 * fault cleanup in the case of an error condition
1857 * including resetting the thread_interrupt_level
1859 error
= vm_fault_check(object
, m
, first_m
, interruptible_state
);
1861 if (error
!= VM_FAULT_SUCCESS
)
1864 if (m
== VM_PAGE_NULL
) {
1867 if (m
== VM_PAGE_NULL
) {
1868 vm_fault_cleanup(object
, VM_PAGE_NULL
);
1869 thread_interrupt_level(interruptible_state
);
1871 return (VM_FAULT_MEMORY_SHORTAGE
);
1873 vm_page_insert(m
, object
, offset
);
1875 if (fault_info
->mark_zf_absent
&& no_zero_fill
== TRUE
)
1878 my_fault
= vm_fault_zero_page(m
, no_zero_fill
);
1884 * Move on to the next object. Lock the next
1885 * object before unlocking the current one.
1887 if ((object
!= first_object
) || must_be_resident
)
1888 vm_object_paging_end(object
);
1890 offset
+= object
->vo_shadow_offset
;
1891 fault_info
->lo_offset
+= object
->vo_shadow_offset
;
1892 fault_info
->hi_offset
+= object
->vo_shadow_offset
;
1893 access_required
= VM_PROT_READ
;
1895 vm_object_lock(next_object
);
1896 vm_object_unlock(object
);
1898 object
= next_object
;
1899 vm_object_paging_begin(object
);
1904 * PAGE HAS BEEN FOUND.
1907 * busy, so that we can play with it;
1908 * not absent, so that nobody else will fill it;
1909 * possibly eligible for pageout;
1911 * The top-level page (first_m) is:
1912 * VM_PAGE_NULL if the page was found in the
1914 * busy, not absent, and ineligible for pageout.
1916 * The current object (object) is locked. A paging
1917 * reference is held for the current and top-level
1922 dbgTrace(0xBEEF0015, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1924 #if EXTRA_ASSERTIONS
1925 assert(m
->busy
&& !m
->absent
);
1926 assert((first_m
== VM_PAGE_NULL
) ||
1927 (first_m
->busy
&& !first_m
->absent
&&
1928 !first_m
->active
&& !first_m
->inactive
));
1929 #endif /* EXTRA_ASSERTIONS */
1933 * If we found a page, we must have decrypted it before we
1936 ASSERT_PAGE_DECRYPTED(m
);
1939 "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
1941 first_object
, first_m
);
1944 * If the page is being written, but isn't
1945 * already owned by the top-level object,
1946 * we have to copy it into a new page owned
1947 * by the top-level object.
1949 if (object
!= first_object
) {
1952 dbgTrace(0xBEEF0016, (unsigned int) object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
1954 if (fault_type
& VM_PROT_WRITE
) {
1958 * We only really need to copy if we
1961 assert(!must_be_resident
);
1964 * are we protecting the system from
1965 * backing store exhaustion. If so
1966 * sleep unless we are privileged.
1968 if (vm_backing_store_low
) {
1969 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
)) {
1972 vm_fault_cleanup(object
, first_m
);
1974 assert_wait((event_t
)&vm_backing_store_low
, THREAD_UNINT
);
1976 thread_block(THREAD_CONTINUE_NULL
);
1977 thread_interrupt_level(interruptible_state
);
1979 return (VM_FAULT_RETRY
);
1983 * If we try to collapse first_object at this
1984 * point, we may deadlock when we try to get
1985 * the lock on an intermediate object (since we
1986 * have the bottom object locked). We can't
1987 * unlock the bottom object, because the page
1988 * we found may move (by collapse) if we do.
1990 * Instead, we first copy the page. Then, when
1991 * we have no more use for the bottom object,
1992 * we unlock it and try to collapse.
1994 * Note that we copy the page even if we didn't
1995 * need to... that's the breaks.
1999 * Allocate a page for the copy
2001 copy_m
= vm_page_grab();
2003 if (copy_m
== VM_PAGE_NULL
) {
2006 vm_fault_cleanup(object
, first_m
);
2007 thread_interrupt_level(interruptible_state
);
2009 return (VM_FAULT_MEMORY_SHORTAGE
);
2012 "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n",
2016 vm_page_copy(m
, copy_m
);
2019 * If another map is truly sharing this
2020 * page with us, we have to flush all
2021 * uses of the original page, since we
2022 * can't distinguish those which want the
2023 * original from those which need the
2026 * XXXO If we know that only one map has
2027 * access to this page, then we could
2028 * avoid the pmap_disconnect() call.
2031 pmap_disconnect(m
->phys_page
);
2034 VM_PAGE_COUNT_AS_PAGEIN(m
);
2035 VM_PAGE_CONSUME_CLUSTERED(m
);
2037 assert(!m
->cleaning
);
2040 * We no longer need the old page or object.
2044 vm_object_paging_end(object
);
2045 vm_object_unlock(object
);
2047 my_fault
= DBG_COW_FAULT
;
2048 VM_STAT_INCR(cow_faults
);
2049 DTRACE_VM2(cow_fault
, int, 1, (uint64_t *), NULL
);
2050 current_task()->cow_faults
++;
2052 object
= first_object
;
2053 offset
= first_offset
;
2055 vm_object_lock(object
);
2057 * get rid of the place holder
2058 * page that we soldered in earlier
2060 VM_PAGE_FREE(first_m
);
2061 first_m
= VM_PAGE_NULL
;
2064 * and replace it with the
2065 * page we just copied into
2067 assert(copy_m
->busy
);
2068 vm_page_insert(copy_m
, object
, offset
);
2069 SET_PAGE_DIRTY(copy_m
, TRUE
);
2073 * Now that we've gotten the copy out of the
2074 * way, let's try to collapse the top object.
2075 * But we have to play ugly games with
2076 * paging_in_progress to do that...
2078 vm_object_paging_end(object
);
2079 vm_object_collapse(object
, offset
, TRUE
);
2080 vm_object_paging_begin(object
);
2083 *protection
&= (~VM_PROT_WRITE
);
2086 * Now check whether the page needs to be pushed into the
2087 * copy object. The use of asymmetric copy on write for
2088 * shared temporary objects means that we may do two copies to
2089 * satisfy the fault; one above to get the page from a
2090 * shadowed object, and one here to push it into the copy.
2092 try_failed_count
= 0;
2094 while ((copy_object
= first_object
->copy
) != VM_OBJECT_NULL
) {
2095 vm_object_offset_t copy_offset
;
2099 dbgTrace(0xBEEF0017, (unsigned int) copy_object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
2102 * If the page is being written, but hasn't been
2103 * copied to the copy-object, we have to copy it there.
2105 if ((fault_type
& VM_PROT_WRITE
) == 0) {
2106 *protection
&= ~VM_PROT_WRITE
;
2111 * If the page was guaranteed to be resident,
2112 * we must have already performed the copy.
2114 if (must_be_resident
)
2118 * Try to get the lock on the copy_object.
2120 if (!vm_object_lock_try(copy_object
)) {
2122 vm_object_unlock(object
);
2125 mutex_pause(try_failed_count
); /* wait a bit */
2126 vm_object_lock(object
);
2130 try_failed_count
= 0;
2133 * Make another reference to the copy-object,
2134 * to keep it from disappearing during the
2137 vm_object_reference_locked(copy_object
);
2140 * Does the page exist in the copy?
2142 copy_offset
= first_offset
- copy_object
->vo_shadow_offset
;
2144 if (copy_object
->vo_size
<= copy_offset
)
2146 * Copy object doesn't cover this page -- do nothing.
2149 else if ((copy_m
= vm_page_lookup(copy_object
, copy_offset
)) != VM_PAGE_NULL
) {
2151 * Page currently exists in the copy object
2155 * If the page is being brought
2156 * in, wait for it and then retry.
2161 * take an extra ref so object won't die
2163 vm_object_reference_locked(copy_object
);
2164 vm_object_unlock(copy_object
);
2165 vm_fault_cleanup(object
, first_m
);
2166 counter(c_vm_fault_page_block_backoff_kernel
++);
2168 vm_object_lock(copy_object
);
2169 assert(copy_object
->ref_count
> 0);
2170 VM_OBJ_RES_DECR(copy_object
);
2171 vm_object_lock_assert_exclusive(copy_object
);
2172 copy_object
->ref_count
--;
2173 assert(copy_object
->ref_count
> 0);
2174 copy_m
= vm_page_lookup(copy_object
, copy_offset
);
2177 * it's OK if the "copy_m" page is encrypted,
2178 * because we're not moving it nor handling its
2181 if (copy_m
!= VM_PAGE_NULL
&& copy_m
->busy
) {
2182 PAGE_ASSERT_WAIT(copy_m
, interruptible
);
2184 vm_object_unlock(copy_object
);
2185 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
2186 vm_object_deallocate(copy_object
);
2190 vm_object_unlock(copy_object
);
2191 vm_object_deallocate(copy_object
);
2192 thread_interrupt_level(interruptible_state
);
2194 return (VM_FAULT_RETRY
);
2198 else if (!PAGED_OUT(copy_object
, copy_offset
)) {
2200 * If PAGED_OUT is TRUE, then the page used to exist
2201 * in the copy-object, and has already been paged out.
2202 * We don't need to repeat this. If PAGED_OUT is
2203 * FALSE, then either we don't know (!pager_created,
2204 * for example) or it hasn't been paged out.
2205 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2206 * We must copy the page to the copy object.
2209 if (vm_backing_store_low
) {
2211 * we are protecting the system from
2212 * backing store exhaustion. If so
2213 * sleep unless we are privileged.
2215 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
)) {
2216 assert_wait((event_t
)&vm_backing_store_low
, THREAD_UNINT
);
2219 VM_OBJ_RES_DECR(copy_object
);
2220 vm_object_lock_assert_exclusive(copy_object
);
2221 copy_object
->ref_count
--;
2222 assert(copy_object
->ref_count
> 0);
2224 vm_object_unlock(copy_object
);
2225 vm_fault_cleanup(object
, first_m
);
2226 thread_block(THREAD_CONTINUE_NULL
);
2227 thread_interrupt_level(interruptible_state
);
2229 return (VM_FAULT_RETRY
);
2233 * Allocate a page for the copy
2235 copy_m
= vm_page_alloc(copy_object
, copy_offset
);
2237 if (copy_m
== VM_PAGE_NULL
) {
2240 VM_OBJ_RES_DECR(copy_object
);
2241 vm_object_lock_assert_exclusive(copy_object
);
2242 copy_object
->ref_count
--;
2243 assert(copy_object
->ref_count
> 0);
2245 vm_object_unlock(copy_object
);
2246 vm_fault_cleanup(object
, first_m
);
2247 thread_interrupt_level(interruptible_state
);
2249 return (VM_FAULT_MEMORY_SHORTAGE
);
2252 * Must copy page into copy-object.
2254 vm_page_copy(m
, copy_m
);
2257 * If the old page was in use by any users
2258 * of the copy-object, it must be removed
2259 * from all pmaps. (We can't know which
2263 pmap_disconnect(m
->phys_page
);
2266 VM_PAGE_COUNT_AS_PAGEIN(m
);
2267 VM_PAGE_CONSUME_CLUSTERED(m
);
2270 * If there's a pager, then immediately
2271 * page out this page, using the "initialize"
2272 * option. Else, we use the copy.
2274 if ((!copy_object
->pager_ready
)
2276 || vm_external_state_get(copy_object
->existence_map
, copy_offset
) == VM_EXTERNAL_STATE_ABSENT
2278 || VM_COMPRESSOR_PAGER_STATE_GET(copy_object
, copy_offset
) == VM_EXTERNAL_STATE_ABSENT
2281 vm_page_lockspin_queues();
2282 assert(!m
->cleaning
);
2283 vm_page_activate(copy_m
);
2284 vm_page_unlock_queues();
2286 SET_PAGE_DIRTY(copy_m
, TRUE
);
2287 PAGE_WAKEUP_DONE(copy_m
);
2289 } else if (copy_object
->internal
&&
2290 (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
)) {
2292 * For internal objects check with the pager to see
2293 * if the page already exists in the backing store.
2294 * If yes, then we can drop the copy page. If not,
2295 * then we'll activate it, mark it dirty and keep it
2299 kern_return_t kr
= KERN_SUCCESS
;
2301 memory_object_t copy_pager
= copy_object
->pager
;
2302 assert(copy_pager
!= MEMORY_OBJECT_NULL
);
2303 vm_object_paging_begin(copy_object
);
2305 vm_object_unlock(copy_object
);
2307 kr
= memory_object_data_request(
2309 copy_offset
+ copy_object
->paging_offset
,
2310 0, /* Only query the pager. */
2314 vm_object_lock(copy_object
);
2316 vm_object_paging_end(copy_object
);
2319 * Since we dropped the copy_object's lock,
2320 * check whether we'll have to deallocate
2323 if ((copy_object
->shadow
!= object
) || (copy_object
->ref_count
== 1)) {
2324 vm_object_unlock(copy_object
);
2325 vm_object_deallocate(copy_object
);
2326 vm_object_lock(object
);
2330 if (kr
== KERN_SUCCESS
) {
2332 * The pager has the page. We don't want to overwrite
2333 * that page by sending this one out to the backing store.
2334 * So we drop the copy page.
2336 VM_PAGE_FREE(copy_m
);
2340 * The pager doesn't have the page. We'll keep this one
2341 * around in the copy object. It might get sent out to
2342 * the backing store under memory pressure.
2344 vm_page_lockspin_queues();
2345 assert(!m
->cleaning
);
2346 vm_page_activate(copy_m
);
2347 vm_page_unlock_queues();
2349 SET_PAGE_DIRTY(copy_m
, TRUE
);
2350 PAGE_WAKEUP_DONE(copy_m
);
2354 assert(copy_m
->busy
== TRUE
);
2355 assert(!m
->cleaning
);
2358 * dirty is protected by the object lock
2360 SET_PAGE_DIRTY(copy_m
, TRUE
);
2363 * The page is already ready for pageout:
2364 * not on pageout queues and busy.
2365 * Unlock everything except the
2366 * copy_object itself.
2368 vm_object_unlock(object
);
2371 * Write the page to the copy-object,
2372 * flushing it from the kernel.
2374 vm_pageout_initialize_page(copy_m
);
2377 * Since the pageout may have
2378 * temporarily dropped the
2379 * copy_object's lock, we
2380 * check whether we'll have
2381 * to deallocate the hard way.
2383 if ((copy_object
->shadow
!= object
) || (copy_object
->ref_count
== 1)) {
2384 vm_object_unlock(copy_object
);
2385 vm_object_deallocate(copy_object
);
2386 vm_object_lock(object
);
2391 * Pick back up the old object's
2392 * lock. [It is safe to do so,
2393 * since it must be deeper in the
2396 vm_object_lock(object
);
2400 * Because we're pushing a page upward
2401 * in the object tree, we must restart
2402 * any faults that are waiting here.
2403 * [Note that this is an expansion of
2404 * PAGE_WAKEUP that uses the THREAD_RESTART
2405 * wait result]. Can't turn off the page's
2406 * busy bit because we're not done with it.
2410 thread_wakeup_with_result((event_t
) m
, THREAD_RESTART
);
2414 * The reference count on copy_object must be
2415 * at least 2: one for our extra reference,
2416 * and at least one from the outside world
2417 * (we checked that when we last locked
2420 vm_object_lock_assert_exclusive(copy_object
);
2421 copy_object
->ref_count
--;
2422 assert(copy_object
->ref_count
> 0);
2424 VM_OBJ_RES_DECR(copy_object
);
2425 vm_object_unlock(copy_object
);
2432 *top_page
= first_m
;
2435 "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n",
2436 object
, offset
, m
, first_m
, 0);
2438 if (m
!= VM_PAGE_NULL
) {
2439 retval
= VM_FAULT_SUCCESS
;
2441 if (my_fault
== DBG_PAGEIN_FAULT
) {
2443 VM_PAGE_COUNT_AS_PAGEIN(m
);
2445 if (m
->object
->internal
)
2446 my_fault
= DBG_PAGEIND_FAULT
;
2448 my_fault
= DBG_PAGEINV_FAULT
;
2451 * evaluate access pattern and update state
2452 * vm_fault_deactivate_behind depends on the
2453 * state being up to date
2455 vm_fault_is_sequential(object
, offset
, fault_info
->behavior
);
2457 vm_fault_deactivate_behind(object
, offset
, fault_info
->behavior
);
2458 } else if (my_fault
== DBG_COMPRESSOR_FAULT
|| my_fault
== DBG_COMPRESSOR_SWAPIN_FAULT
) {
2460 VM_STAT_INCR(decompressions
);
2463 *type_of_fault
= my_fault
;
2465 retval
= VM_FAULT_SUCCESS_NO_VM_PAGE
;
2466 assert(first_m
== VM_PAGE_NULL
);
2467 assert(object
== first_object
);
2470 thread_interrupt_level(interruptible_state
);
2473 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS
, 0); /* (TEST/DEBUG) */
2478 thread_interrupt_level(interruptible_state
);
2480 if (wait_result
== THREAD_INTERRUPTED
)
2481 return (VM_FAULT_INTERRUPTED
);
2482 return (VM_FAULT_RETRY
);
2491 * When soft faulting a page, we have to validate the page if:
2492 * 1. the page is being mapped in user space
2493 * 2. the page hasn't already been found to be "tainted"
2494 * 3. the page belongs to a code-signed object
2495 * 4. the page has not been validated yet or has been mapped for write.
2497 #define VM_FAULT_NEED_CS_VALIDATION(pmap, page) \
2498 ((pmap) != kernel_pmap /*1*/ && \
2499 !(page)->cs_tainted /*2*/ && \
2500 (page)->object->code_signed /*3*/ && \
2501 (!(page)->cs_validated || (page)->wpmapped /*4*/))
2505 * page queue lock must NOT be held
2506 * m->object must be locked
2508 * NOTE: m->object could be locked "shared" only if we are called
2509 * from vm_fault() as part of a soft fault. If so, we must be
2510 * careful not to modify the VM object in any way that is not
2511 * legal under a shared lock...
2513 extern int proc_selfpid(void);
2514 extern char *proc_name_address(void *p
);
2515 unsigned long cs_enter_tainted_rejected
= 0;
2516 unsigned long cs_enter_tainted_accepted
= 0;
2518 vm_fault_enter(vm_page_t m
,
2520 vm_map_offset_t vaddr
,
2522 vm_prot_t fault_type
,
2524 boolean_t change_wiring
,
2526 boolean_t cs_bypass
,
2527 __unused
int user_tag
,
2529 boolean_t
*need_retry
,
2532 kern_return_t kr
, pe_result
;
2533 boolean_t previously_pmapped
= m
->pmapped
;
2534 boolean_t must_disconnect
= 0;
2535 boolean_t map_is_switched
, map_is_switch_protected
;
2536 int cs_enforcement_enabled
;
2538 vm_object_lock_assert_held(m
->object
);
2540 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_NOTOWNED
);
2543 if (m
->phys_page
== vm_page_guard_addr
) {
2544 assert(m
->fictitious
);
2545 return KERN_SUCCESS
;
2548 if (*type_of_fault
== DBG_ZERO_FILL_FAULT
) {
2550 vm_object_lock_assert_exclusive(m
->object
);
2552 } else if ((fault_type
& VM_PROT_WRITE
) == 0) {
2554 * This is not a "write" fault, so we
2555 * might not have taken the object lock
2556 * exclusively and we might not be able
2557 * to update the "wpmapped" bit in
2559 * Let's just grant read access to
2560 * the page for now and we'll
2561 * soft-fault again if we need write
2564 prot
&= ~VM_PROT_WRITE
;
2566 if (m
->pmapped
== FALSE
) {
2569 if (*type_of_fault
== DBG_CACHE_HIT_FAULT
) {
2571 * found it in the cache, but this
2572 * is the first fault-in of the page (m->pmapped == FALSE)
2573 * so it must have come in as part of
2574 * a cluster... account 1 pagein against it
2576 if (m
->object
->internal
)
2577 *type_of_fault
= DBG_PAGEIND_FAULT
;
2579 *type_of_fault
= DBG_PAGEINV_FAULT
;
2581 VM_PAGE_COUNT_AS_PAGEIN(m
);
2583 VM_PAGE_CONSUME_CLUSTERED(m
);
2587 if (*type_of_fault
!= DBG_COW_FAULT
) {
2588 DTRACE_VM2(as_fault
, int, 1, (uint64_t *), NULL
);
2590 if (pmap
== kernel_pmap
) {
2591 DTRACE_VM2(kernel_asflt
, int, 1, (uint64_t *), NULL
);
2595 /* Validate code signature if necessary. */
2596 if (VM_FAULT_NEED_CS_VALIDATION(pmap
, m
)) {
2597 vm_object_lock_assert_exclusive(m
->object
);
2599 if (m
->cs_validated
) {
2600 vm_cs_revalidates
++;
2603 /* VM map is locked, so 1 ref will remain on VM object -
2604 * so no harm if vm_page_validate_cs drops the object lock */
2605 vm_page_validate_cs(m
);
2608 #define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/)
2610 map_is_switched
= ((pmap
!= vm_map_pmap(current_task()->map
)) &&
2611 (pmap
== vm_map_pmap(current_thread()->map
)));
2612 map_is_switch_protected
= current_thread()->map
->switch_protect
;
2614 /* If the map is switched, and is switch-protected, we must protect
2615 * some pages from being write-faulted: immutable pages because by
2616 * definition they may not be written, and executable pages because that
2617 * would provide a way to inject unsigned code.
2618 * If the page is immutable, we can simply return. However, we can't
2619 * immediately determine whether a page is executable anywhere. But,
2620 * we can disconnect it everywhere and remove the executable protection
2621 * from the current map. We do that below right before we do the
2624 cs_enforcement_enabled
= cs_enforcement(NULL
);
2626 if(cs_enforcement_enabled
&& map_is_switched
&&
2627 map_is_switch_protected
&& page_immutable(m
, prot
) &&
2628 (prot
& VM_PROT_WRITE
))
2630 return KERN_CODESIGN_ERROR
;
2633 /* A page could be tainted, or pose a risk of being tainted later.
2634 * Check whether the receiving process wants it, and make it feel
2635 * the consequences (that hapens in cs_invalid_page()).
2636 * For CS Enforcement, two other conditions will
2637 * cause that page to be tainted as well:
2638 * - pmapping an unsigned page executable - this means unsigned code;
2639 * - writeable mapping of a validated page - the content of that page
2640 * can be changed without the kernel noticing, therefore unsigned
2641 * code can be created
2643 if (m
->cs_tainted
||
2644 ((cs_enforcement_enabled
&& !cs_bypass
) &&
2645 (/* The page is unsigned and wants to be executable */
2646 (!m
->cs_validated
&& (prot
& VM_PROT_EXECUTE
)) ||
2647 /* The page should be immutable, but is in danger of being modified
2648 * This is the case where we want policy from the code directory -
2649 * is the page immutable or not? For now we have to assume that
2650 * code pages will be immutable, data pages not.
2651 * We'll assume a page is a code page if it has a code directory
2652 * and we fault for execution.
2653 * That is good enough since if we faulted the code page for
2654 * writing in another map before, it is wpmapped; if we fault
2655 * it for writing in this map later it will also be faulted for executing
2656 * at the same time; and if we fault for writing in another map
2657 * later, we will disconnect it from this pmap so we'll notice
2660 (page_immutable(m
, prot
) && ((prot
& VM_PROT_WRITE
) || m
->wpmapped
))
2664 /* We will have a tainted page. Have to handle the special case
2665 * of a switched map now. If the map is not switched, standard
2666 * procedure applies - call cs_invalid_page().
2667 * If the map is switched, the real owner is invalid already.
2668 * There is no point in invalidating the switching process since
2669 * it will not be executing from the map. So we don't call
2670 * cs_invalid_page() in that case. */
2671 boolean_t reject_page
;
2672 if(map_is_switched
) {
2673 assert(pmap
==vm_map_pmap(current_thread()->map
));
2674 assert(!(prot
& VM_PROT_WRITE
) || (map_is_switch_protected
== FALSE
));
2675 reject_page
= FALSE
;
2678 printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s slid: %s prot: 0x%x\n",
2679 m
->object
->code_signed
? "yes" : "no",
2680 m
->cs_validated
? "yes" : "no",
2681 m
->cs_tainted
? "yes" : "no",
2682 m
->wpmapped
? "yes" : "no",
2683 m
->slid
? "yes" : "no",
2685 reject_page
= cs_invalid_page((addr64_t
) vaddr
);
2689 /* reject the invalid page: abort the page fault */
2691 const char *procname
;
2693 vm_object_t file_object
, shadow
;
2694 vm_object_offset_t file_offset
;
2695 char *pathname
, *filename
;
2696 vm_size_t pathname_len
, filename_len
;
2697 boolean_t truncated_path
;
2698 #define __PATH_MAX 1024
2699 struct timespec mtime
, cs_mtime
;
2701 kr
= KERN_CODESIGN_ERROR
;
2702 cs_enter_tainted_rejected
++;
2704 /* get process name and pid */
2706 task
= current_task();
2707 pid
= proc_selfpid();
2708 if (task
->bsd_info
!= NULL
)
2709 procname
= proc_name_address(task
->bsd_info
);
2711 /* get file's VM object */
2712 file_object
= m
->object
;
2713 file_offset
= m
->offset
;
2714 for (shadow
= file_object
->shadow
;
2715 shadow
!= VM_OBJECT_NULL
;
2716 shadow
= file_object
->shadow
) {
2717 vm_object_lock_shared(shadow
);
2718 if (file_object
!= m
->object
) {
2719 vm_object_unlock(file_object
);
2721 file_offset
+= file_object
->vo_shadow_offset
;
2722 file_object
= shadow
;
2727 cs_mtime
.tv_sec
= 0;
2728 cs_mtime
.tv_nsec
= 0;
2730 /* get file's pathname and/or filename */
2735 truncated_path
= FALSE
;
2736 if (file_object
->pager
== NULL
) {
2737 /* no pager -> no file -> no pathname */
2738 pathname
= (char *) "<nil>";
2740 pathname
= (char *)kalloc(__PATH_MAX
* 2);
2743 pathname_len
= __PATH_MAX
;
2744 filename
= pathname
+ pathname_len
;
2745 filename_len
= __PATH_MAX
;
2747 vnode_pager_get_object_name(file_object
->pager
,
2753 vnode_pager_get_object_mtime(file_object
->pager
,
2757 printf("CODE SIGNING: process %d[%s]: "
2758 "rejecting invalid page at address 0x%llx "
2759 "from offset 0x%llx in file \"%s%s%s\" "
2760 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2761 "(signed:%d validated:%d tainted:%d "
2762 "wpmapped:%d slid:%d)\n",
2763 pid
, procname
, (addr64_t
) vaddr
,
2765 (pathname
? pathname
: ""),
2766 (truncated_path
? "/.../" : ""),
2767 (truncated_path
? filename
: ""),
2768 cs_mtime
.tv_sec
, cs_mtime
.tv_nsec
,
2769 ((cs_mtime
.tv_sec
== mtime
.tv_sec
&&
2770 cs_mtime
.tv_nsec
== mtime
.tv_nsec
)
2773 mtime
.tv_sec
, mtime
.tv_nsec
,
2774 m
->object
->code_signed
,
2779 if (file_object
!= m
->object
) {
2780 vm_object_unlock(file_object
);
2782 if (pathname_len
!= 0) {
2783 kfree(pathname
, __PATH_MAX
* 2);
2788 /* proceed with the invalid page */
2790 if (!m
->cs_validated
) {
2792 * This page has not been validated, so it
2793 * must not belong to a code-signed object
2794 * and should not be forcefully considered
2796 * We're just concerned about it here because
2797 * we've been asked to "execute" it but that
2798 * does not mean that it should cause other
2800 * This happens when a debugger sets a
2801 * breakpoint and we then execute code in
2802 * that page. Marking the page as "tainted"
2803 * would cause any inspection tool ("leaks",
2804 * "vmmap", "CrashReporter", ...) to get killed
2805 * due to code-signing violation on that page,
2806 * even though they're just reading it and not
2807 * executing from it.
2809 assert(!m
->object
->code_signed
);
2812 * Page might have been tainted before or not;
2813 * now it definitively is. If the page wasn't
2814 * tainted, we must disconnect it from all
2815 * pmaps later, to force existing mappings
2816 * through that code path for re-consideration
2817 * of the validity of that page.
2819 must_disconnect
= !m
->cs_tainted
;
2820 m
->cs_tainted
= TRUE
;
2822 cs_enter_tainted_accepted
++;
2824 if (kr
!= KERN_SUCCESS
) {
2826 printf("CODESIGNING: vm_fault_enter(0x%llx): "
2827 "*** INVALID PAGE ***\n",
2831 if (cs_enforcement_panic
) {
2832 panic("CODESIGNING: panicking on invalid page\n");
2838 /* proceed with the valid page */
2842 boolean_t page_queues_locked
= FALSE
;
2843 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
2845 if (! page_queues_locked) { \
2846 page_queues_locked = TRUE; \
2847 vm_page_lockspin_queues(); \
2850 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
2852 if (page_queues_locked) { \
2853 page_queues_locked = FALSE; \
2854 vm_page_unlock_queues(); \
2859 * Hold queues lock to manipulate
2860 * the page queues. Change wiring
2863 assert(m
->compressor
|| m
->object
!= compressor_object
);
2864 if (m
->compressor
) {
2866 * Compressor pages are neither wired
2867 * nor pageable and should never change.
2869 assert(m
->object
== compressor_object
);
2870 } else if (change_wiring
) {
2871 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
2874 if (kr
== KERN_SUCCESS
) {
2878 vm_page_unwire(m
, TRUE
);
2880 /* we keep the page queues lock, if we need it later */
2883 if (kr
!= KERN_SUCCESS
) {
2884 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
2885 vm_page_deactivate(m
);
2886 /* we keep the page queues lock, if we need it later */
2887 } else if (((!m
->active
&& !m
->inactive
) ||
2890 !VM_PAGE_WIRED(m
) && !m
->throttled
) {
2892 if (vm_page_local_q
&&
2894 (*type_of_fault
== DBG_COW_FAULT
||
2895 *type_of_fault
== DBG_ZERO_FILL_FAULT
) ) {
2899 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
2900 vm_object_lock_assert_exclusive(m
->object
);
2903 * we got a local queue to stuff this
2905 * its safe to manipulate local and
2906 * local_id at this point since we're
2907 * behind an exclusive object lock and
2908 * the page is not on any global queue.
2910 * we'll use the current cpu number to
2911 * select the queue note that we don't
2912 * need to disable preemption... we're
2913 * going to behind the local queue's
2914 * lock to do the real work
2918 lq
= &vm_page_local_q
[lid
].vpl_un
.vpl
;
2920 VPL_LOCK(&lq
->vpl_lock
);
2922 queue_enter(&lq
->vpl_queue
, m
,
2928 if (m
->object
->internal
)
2929 lq
->vpl_internal_count
++;
2931 lq
->vpl_external_count
++;
2933 VPL_UNLOCK(&lq
->vpl_lock
);
2935 if (lq
->vpl_count
> vm_page_local_q_soft_limit
)
2938 * we're beyond the soft limit
2939 * for the local queue
2940 * vm_page_reactivate_local will
2941 * 'try' to take the global page
2942 * queue lock... if it can't
2943 * that's ok... we'll let the
2944 * queue continue to grow up
2945 * to the hard limit... at that
2946 * point we'll wait for the
2947 * lock... once we've got the
2948 * lock, we'll transfer all of
2949 * the pages from the local
2950 * queue to the global active
2953 vm_page_reactivate_local(lid
, FALSE
, FALSE
);
2957 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
2960 * test again now that we hold the
2963 if (!VM_PAGE_WIRED(m
)) {
2964 if (m
->clean_queue
) {
2965 VM_PAGE_QUEUES_REMOVE(m
);
2967 vm_pageout_cleaned_reactivated
++;
2968 vm_pageout_cleaned_fault_reactivated
++;
2975 * If this is a no_cache mapping
2976 * and the page has never been
2977 * mapped before or was
2978 * previously a no_cache page,
2979 * then we want to leave pages
2980 * in the speculative state so
2981 * that they can be readily
2982 * recycled if free memory runs
2983 * low. Otherwise the page is
2984 * activated as normal.
2988 (!previously_pmapped
||
2992 if (!m
->speculative
)
2993 vm_page_speculate(m
, FALSE
);
2995 } else if (!m
->active
&&
2998 vm_page_activate(m
);
3002 /* we keep the page queues lock, if we need it later */
3006 /* we're done with the page queues lock, if we ever took it */
3007 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3010 /* If we have a KERN_SUCCESS from the previous checks, we either have
3011 * a good page, or a tainted page that has been accepted by the process.
3012 * In both cases the page will be entered into the pmap.
3013 * If the page is writeable, we need to disconnect it from other pmaps
3014 * now so those processes can take note.
3016 if (kr
== KERN_SUCCESS
) {
3019 * NOTE: we may only hold the vm_object lock SHARED
3020 * at this point, so we need the phys_page lock to
3021 * properly serialize updating the pmapped and
3024 if ((prot
& VM_PROT_EXECUTE
) && !m
->xpmapped
) {
3026 pmap_lock_phys_page(m
->phys_page
);
3028 * go ahead and take the opportunity
3029 * to set 'pmapped' here so that we don't
3030 * need to grab this lock a 2nd time
3039 pmap_unlock_phys_page(m
->phys_page
);
3041 if (!m
->object
->internal
)
3042 OSAddAtomic(1, &vm_page_xpmapped_external_count
);
3044 if ((COMPRESSED_PAGER_IS_ACTIVE
) &&
3045 m
->object
->internal
&&
3046 m
->object
->pager
!= NULL
) {
3048 * This page could have been
3049 * uncompressed by the
3050 * compressor pager and its
3051 * contents might be only in
3053 * Since it's being mapped for
3054 * "execute" for the fist time,
3055 * make sure the icache is in
3058 pmap_sync_page_data_phys(m
->phys_page
);
3061 pmap_unlock_phys_page(m
->phys_page
);
3063 if (m
->pmapped
== FALSE
) {
3064 pmap_lock_phys_page(m
->phys_page
);
3066 pmap_unlock_phys_page(m
->phys_page
);
3069 if (vm_page_is_slideable(m
)) {
3070 boolean_t was_busy
= m
->busy
;
3072 vm_object_lock_assert_exclusive(m
->object
);
3075 kr
= vm_page_slide(m
, 0);
3078 PAGE_WAKEUP_DONE(m
);
3080 if (kr
!= KERN_SUCCESS
) {
3082 * This page has not been slid correctly,
3083 * do not do the pmap_enter() !
3084 * Let vm_fault_enter() return the error
3085 * so the caller can fail the fault.
3087 goto after_the_pmap_enter
;
3091 if (fault_type
& VM_PROT_WRITE
) {
3093 if (m
->wpmapped
== FALSE
) {
3094 vm_object_lock_assert_exclusive(m
->object
);
3098 if (must_disconnect
) {
3100 * We can only get here
3101 * because of the CSE logic
3103 assert(cs_enforcement_enabled
);
3104 pmap_disconnect(m
->phys_page
);
3106 * If we are faulting for a write, we can clear
3107 * the execute bit - that will ensure the page is
3108 * checked again before being executable, which
3109 * protects against a map switch.
3110 * This only happens the first time the page
3111 * gets tainted, so we won't get stuck here
3112 * to make an already writeable page executable.
3115 prot
&= ~VM_PROT_EXECUTE
;
3120 /* Prevent a deadlock by not
3121 * holding the object lock if we need to wait for a page in
3122 * pmap_enter() - <rdar://problem/7138958> */
3123 PMAP_ENTER_OPTIONS(pmap
, vaddr
, m
, prot
, fault_type
, 0,
3125 pmap_options
| PMAP_OPTIONS_NOWAIT
,
3128 if(pe_result
== KERN_RESOURCE_SHORTAGE
) {
3132 * this will be non-null in the case where we hold the lock
3133 * on the top-object in this chain... we can't just drop
3134 * the lock on the object we're inserting the page into
3135 * and recall the PMAP_ENTER since we can still cause
3136 * a deadlock if one of the critical paths tries to
3137 * acquire the lock on the top-object and we're blocked
3138 * in PMAP_ENTER waiting for memory... our only recourse
3139 * is to deal with it at a higher level where we can
3143 vm_pmap_enter_retried
++;
3144 goto after_the_pmap_enter
;
3146 /* The nonblocking version of pmap_enter did not succeed.
3147 * and we don't need to drop other locks and retry
3148 * at the level above us, so
3149 * use the blocking version instead. Requires marking
3150 * the page busy and unlocking the object */
3151 boolean_t was_busy
= m
->busy
;
3153 vm_object_lock_assert_exclusive(m
->object
);
3156 vm_object_unlock(m
->object
);
3158 PMAP_ENTER_OPTIONS(pmap
, vaddr
, m
, prot
, fault_type
,
3160 pmap_options
, pe_result
);
3162 /* Take the object lock again. */
3163 vm_object_lock(m
->object
);
3165 /* If the page was busy, someone else will wake it up.
3166 * Otherwise, we have to do it now. */
3169 PAGE_WAKEUP_DONE(m
);
3171 vm_pmap_enter_blocked
++;
3175 after_the_pmap_enter
:
3180 vm_pre_fault(vm_map_offset_t vaddr
)
3182 if (pmap_find_phys(current_map()->pmap
, vaddr
) == 0) {
3184 vm_fault(current_map(), /* map */
3186 VM_PROT_READ
, /* fault_type */
3187 FALSE
, /* change_wiring */
3188 THREAD_UNINT
, /* interruptible */
3189 NULL
, /* caller_pmap */
3190 0 /* caller_pmap_addr */);
3198 * Handle page faults, including pseudo-faults
3199 * used to change the wiring status of pages.
3201 * Explicit continuations have been removed.
3203 * vm_fault and vm_fault_page save mucho state
3204 * in the moral equivalent of a closure. The state
3205 * structure is allocated when first entering vm_fault
3206 * and deallocated when leaving vm_fault.
3209 extern int _map_enter_debug
;
3211 unsigned long vm_fault_collapse_total
= 0;
3212 unsigned long vm_fault_collapse_skipped
= 0;
3218 vm_map_offset_t vaddr
,
3219 vm_prot_t fault_type
,
3220 boolean_t change_wiring
,
3223 vm_map_offset_t caller_pmap_addr
)
3225 return vm_fault_internal(map
, vaddr
, fault_type
, change_wiring
,
3226 interruptible
, caller_pmap
, caller_pmap_addr
,
3233 vm_map_offset_t vaddr
,
3234 vm_prot_t fault_type
,
3235 boolean_t change_wiring
,
3238 vm_map_offset_t caller_pmap_addr
,
3239 ppnum_t
*physpage_p
)
3241 vm_map_version_t version
; /* Map version for verificiation */
3242 boolean_t wired
; /* Should mapping be wired down? */
3243 vm_object_t object
; /* Top-level object */
3244 vm_object_offset_t offset
; /* Top-level offset */
3245 vm_prot_t prot
; /* Protection for mapping */
3246 vm_object_t old_copy_object
; /* Saved copy object */
3247 vm_page_t result_page
; /* Result of vm_fault_page */
3248 vm_page_t top_page
; /* Placeholder page */
3251 vm_page_t m
; /* Fast access to result_page */
3252 kern_return_t error_code
;
3253 vm_object_t cur_object
;
3254 vm_object_offset_t cur_offset
;
3256 vm_object_t new_object
;
3259 boolean_t interruptible_state
;
3260 vm_map_t real_map
= map
;
3261 vm_map_t original_map
= map
;
3262 vm_prot_t original_fault_type
;
3263 struct vm_object_fault_info fault_info
;
3264 boolean_t need_collapse
= FALSE
;
3265 boolean_t need_retry
= FALSE
;
3266 boolean_t
*need_retry_ptr
= NULL
;
3267 int object_lock_type
= 0;
3268 int cur_object_lock_type
;
3269 vm_object_t top_object
= VM_OBJECT_NULL
;
3271 int compressed_count_delta
;
3274 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3275 (MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_START
,
3276 ((uint64_t)vaddr
>> 32),
3278 (map
== kernel_map
),
3282 if (get_preemption_level() != 0) {
3283 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
3284 (MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_END
,
3285 ((uint64_t)vaddr
>> 32),
3291 return (KERN_FAILURE
);
3294 interruptible_state
= thread_interrupt_level(interruptible
);
3296 VM_STAT_INCR(faults
);
3297 current_task()->faults
++;
3298 original_fault_type
= fault_type
;
3300 if (fault_type
& VM_PROT_WRITE
)
3301 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3303 object_lock_type
= OBJECT_LOCK_SHARED
;
3305 cur_object_lock_type
= OBJECT_LOCK_SHARED
;
3309 * assume we will hit a page in the cache
3310 * otherwise, explicitly override with
3311 * the real fault type once we determine it
3313 type_of_fault
= DBG_CACHE_HIT_FAULT
;
3316 * Find the backing store object and offset into
3317 * it to begin the search.
3319 fault_type
= original_fault_type
;
3321 vm_map_lock_read(map
);
3323 kr
= vm_map_lookup_locked(&map
, vaddr
, fault_type
,
3324 object_lock_type
, &version
,
3325 &object
, &offset
, &prot
, &wired
,
3329 if (kr
!= KERN_SUCCESS
) {
3330 vm_map_unlock_read(map
);
3333 pmap
= real_map
->pmap
;
3334 fault_info
.interruptible
= interruptible
;
3335 fault_info
.stealth
= FALSE
;
3336 fault_info
.io_sync
= FALSE
;
3337 fault_info
.mark_zf_absent
= FALSE
;
3338 fault_info
.batch_pmap_op
= FALSE
;
3341 * If the page is wired, we must fault for the current protection
3342 * value, to avoid further faults.
3345 fault_type
= prot
| VM_PROT_WRITE
;
3347 * since we're treating this fault as a 'write'
3348 * we must hold the top object lock exclusively
3350 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3352 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3354 if (vm_object_lock_upgrade(object
) == FALSE
) {
3356 * couldn't upgrade, so explictly
3357 * take the lock exclusively
3359 vm_object_lock(object
);
3364 #if VM_FAULT_CLASSIFY
3366 * Temporary data gathering code
3368 vm_fault_classify(object
, offset
, fault_type
);
3371 * Fast fault code. The basic idea is to do as much as
3372 * possible while holding the map lock and object locks.
3373 * Busy pages are not used until the object lock has to
3374 * be dropped to do something (copy, zero fill, pmap enter).
3375 * Similarly, paging references aren't acquired until that
3376 * point, and object references aren't used.
3378 * If we can figure out what to do
3379 * (zero fill, copy on write, pmap enter) while holding
3380 * the locks, then it gets done. Otherwise, we give up,
3381 * and use the original fault path (which doesn't hold
3382 * the map lock, and relies on busy pages).
3383 * The give up cases include:
3384 * - Have to talk to pager.
3385 * - Page is busy, absent or in error.
3386 * - Pager has locked out desired access.
3387 * - Fault needs to be restarted.
3388 * - Have to push page into copy object.
3390 * The code is an infinite loop that moves one level down
3391 * the shadow chain each time. cur_object and cur_offset
3392 * refer to the current object being examined. object and offset
3393 * are the original object from the map. The loop is at the
3394 * top level if and only if object and cur_object are the same.
3396 * Invariants: Map lock is held throughout. Lock is held on
3397 * original object and cur_object (if different) when
3398 * continuing or exiting loop.
3404 * If this page is to be inserted in a copy delay object
3405 * for writing, and if the object has a copy, then the
3406 * copy delay strategy is implemented in the slow fault page.
3408 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_DELAY
&&
3409 object
->copy
!= VM_OBJECT_NULL
&& (fault_type
& VM_PROT_WRITE
))
3410 goto handle_copy_delay
;
3412 cur_object
= object
;
3413 cur_offset
= offset
;
3416 if (!cur_object
->pager_created
&&
3417 cur_object
->phys_contiguous
) /* superpage */
3420 if (cur_object
->blocked_access
) {
3422 * Access to this VM object has been blocked.
3423 * Let the slow path handle it.
3428 m
= vm_page_lookup(cur_object
, cur_offset
);
3430 if (m
!= VM_PAGE_NULL
) {
3432 wait_result_t result
;
3435 * in order to do the PAGE_ASSERT_WAIT, we must
3436 * have object that 'm' belongs to locked exclusively
3438 if (object
!= cur_object
) {
3440 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3442 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3444 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
3446 * couldn't upgrade so go do a full retry
3447 * immediately since we can no longer be
3448 * certain about cur_object (since we
3449 * don't hold a reference on it)...
3450 * first drop the top object lock
3452 vm_object_unlock(object
);
3454 vm_map_unlock_read(map
);
3455 if (real_map
!= map
)
3456 vm_map_unlock(real_map
);
3461 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3463 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3465 if (vm_object_lock_upgrade(object
) == FALSE
) {
3467 * couldn't upgrade, so explictly take the lock
3468 * exclusively and go relookup the page since we
3469 * will have dropped the object lock and
3470 * a different thread could have inserted
3471 * a page at this offset
3472 * no need for a full retry since we're
3473 * at the top level of the object chain
3475 vm_object_lock(object
);
3480 if (m
->pageout_queue
&& m
->object
->internal
&& COMPRESSED_PAGER_IS_ACTIVE
) {
3482 * m->busy == TRUE and the object is locked exclusively
3483 * if m->pageout_queue == TRUE after we acquire the
3484 * queues lock, we are guaranteed that it is stable on
3485 * the pageout queue and therefore reclaimable
3487 * NOTE: this is only true for the internal pageout queue
3488 * in the compressor world
3490 vm_page_lock_queues();
3492 if (m
->pageout_queue
) {
3493 vm_pageout_throttle_up(m
);
3494 vm_page_unlock_queues();
3496 PAGE_WAKEUP_DONE(m
);
3497 goto reclaimed_from_pageout
;
3499 vm_page_unlock_queues();
3501 if (object
!= cur_object
)
3502 vm_object_unlock(object
);
3504 vm_map_unlock_read(map
);
3505 if (real_map
!= map
)
3506 vm_map_unlock(real_map
);
3508 result
= PAGE_ASSERT_WAIT(m
, interruptible
);
3510 vm_object_unlock(cur_object
);
3512 if (result
== THREAD_WAITING
) {
3513 result
= thread_block(THREAD_CONTINUE_NULL
);
3515 counter(c_vm_fault_page_block_busy_kernel
++);
3517 if (result
== THREAD_AWAKENED
|| result
== THREAD_RESTART
)
3523 reclaimed_from_pageout
:
3525 if (object
!= cur_object
) {
3526 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3527 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3529 vm_object_unlock(object
);
3530 vm_object_unlock(cur_object
);
3532 vm_map_unlock_read(map
);
3533 if (real_map
!= map
)
3534 vm_map_unlock(real_map
);
3539 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3541 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3543 if (vm_object_lock_upgrade(object
) == FALSE
) {
3545 * couldn't upgrade, so explictly take the lock
3546 * exclusively and go relookup the page since we
3547 * will have dropped the object lock and
3548 * a different thread could have inserted
3549 * a page at this offset
3550 * no need for a full retry since we're
3551 * at the top level of the object chain
3553 vm_object_lock(object
);
3560 vm_pageout_steal_laundry(m
, FALSE
);
3563 if (m
->phys_page
== vm_page_guard_addr
) {
3565 * Guard page: let the slow path deal with it
3569 if (m
->unusual
&& (m
->error
|| m
->restart
|| m
->private || m
->absent
)) {
3571 * Unusual case... let the slow path deal with it
3575 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m
->object
)) {
3576 if (object
!= cur_object
)
3577 vm_object_unlock(object
);
3578 vm_map_unlock_read(map
);
3579 if (real_map
!= map
)
3580 vm_map_unlock(real_map
);
3581 vm_object_unlock(cur_object
);
3582 kr
= KERN_MEMORY_ERROR
;
3589 * We've soft-faulted (because it's not in the page
3590 * table) on an encrypted page.
3591 * Keep the page "busy" so that no one messes with
3592 * it during the decryption.
3593 * Release the extra locks we're holding, keep only
3594 * the page's VM object lock.
3596 * in order to set 'busy' on 'm', we must
3597 * have object that 'm' belongs to locked exclusively
3599 if (object
!= cur_object
) {
3600 vm_object_unlock(object
);
3602 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3604 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3606 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
3608 * couldn't upgrade so go do a full retry
3609 * immediately since we've already dropped
3610 * the top object lock associated with this page
3611 * and the current one got dropped due to the
3612 * failed upgrade... the state is no longer valid
3614 vm_map_unlock_read(map
);
3615 if (real_map
!= map
)
3616 vm_map_unlock(real_map
);
3621 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3623 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3625 if (vm_object_lock_upgrade(object
) == FALSE
) {
3627 * couldn't upgrade, so explictly take the lock
3628 * exclusively and go relookup the page since we
3629 * will have dropped the object lock and
3630 * a different thread could have inserted
3631 * a page at this offset
3632 * no need for a full retry since we're
3633 * at the top level of the object chain
3635 vm_object_lock(object
);
3642 vm_map_unlock_read(map
);
3643 if (real_map
!= map
)
3644 vm_map_unlock(real_map
);
3646 vm_page_decrypt(m
, 0);
3649 PAGE_WAKEUP_DONE(m
);
3651 vm_object_unlock(cur_object
);
3653 * Retry from the top, in case anything
3654 * changed while we were decrypting...
3658 ASSERT_PAGE_DECRYPTED(m
);
3660 if(vm_page_is_slideable(m
)) {
3662 * We might need to slide this page, and so,
3663 * we want to hold the VM object exclusively.
3665 if (object
!= cur_object
) {
3666 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3667 vm_object_unlock(object
);
3668 vm_object_unlock(cur_object
);
3670 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3672 vm_map_unlock_read(map
);
3673 if (real_map
!= map
)
3674 vm_map_unlock(real_map
);
3678 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3680 vm_object_unlock(object
);
3681 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3682 vm_map_unlock_read(map
);
3687 if (VM_FAULT_NEED_CS_VALIDATION(map
->pmap
, m
) ||
3688 (physpage_p
!= NULL
&& (prot
& VM_PROT_WRITE
))) {
3689 upgrade_for_validation
:
3691 * We might need to validate this page
3692 * against its code signature, so we
3693 * want to hold the VM object exclusively.
3695 if (object
!= cur_object
) {
3696 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
3697 vm_object_unlock(object
);
3698 vm_object_unlock(cur_object
);
3700 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3702 vm_map_unlock_read(map
);
3703 if (real_map
!= map
)
3704 vm_map_unlock(real_map
);
3709 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
3711 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
3713 if (vm_object_lock_upgrade(object
) == FALSE
) {
3715 * couldn't upgrade, so explictly take the lock
3716 * exclusively and go relookup the page since we
3717 * will have dropped the object lock and
3718 * a different thread could have inserted
3719 * a page at this offset
3720 * no need for a full retry since we're
3721 * at the top level of the object chain
3723 vm_object_lock(object
);
3730 * Two cases of map in faults:
3731 * - At top level w/o copy object.
3732 * - Read fault anywhere.
3733 * --> must disallow write.
3736 if (object
== cur_object
&& object
->copy
== VM_OBJECT_NULL
) {
3741 if ((fault_type
& VM_PROT_WRITE
) == 0) {
3743 if (object
!= cur_object
) {
3745 * We still need to hold the top object
3746 * lock here to prevent a race between
3747 * a read fault (taking only "shared"
3748 * locks) and a write fault (taking
3749 * an "exclusive" lock on the top
3751 * Otherwise, as soon as we release the
3752 * top lock, the write fault could
3753 * proceed and actually complete before
3754 * the read fault, and the copied page's
3755 * translation could then be overwritten
3756 * by the read fault's translation for
3757 * the original page.
3759 * Let's just record what the top object
3760 * is and we'll release it later.
3762 top_object
= object
;
3765 * switch to the object that has the new page
3767 object
= cur_object
;
3768 object_lock_type
= cur_object_lock_type
;
3772 * prepare for the pmap_enter...
3773 * object and map are both locked
3774 * m contains valid data
3775 * object == m->object
3776 * cur_object == NULL or it's been unlocked
3777 * no paging references on either object or cur_object
3779 if (top_object
!= VM_OBJECT_NULL
|| object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
)
3780 need_retry_ptr
= &need_retry
;
3782 need_retry_ptr
= NULL
;
3785 kr
= vm_fault_enter(m
,
3792 fault_info
.no_cache
,
3793 fault_info
.cs_bypass
,
3794 fault_info
.user_tag
,
3795 fault_info
.pmap_options
,
3799 kr
= vm_fault_enter(m
,
3806 fault_info
.no_cache
,
3807 fault_info
.cs_bypass
,
3808 fault_info
.user_tag
,
3809 fault_info
.pmap_options
,
3814 if (kr
== KERN_SUCCESS
&&
3815 physpage_p
!= NULL
) {
3816 /* for vm_map_wire_and_extract() */
3817 *physpage_p
= m
->phys_page
;
3818 if (prot
& VM_PROT_WRITE
) {
3819 vm_object_lock_assert_exclusive(
3825 if (top_object
!= VM_OBJECT_NULL
) {
3827 * It's safe to drop the top object
3828 * now that we've done our
3829 * vm_fault_enter(). Any other fault
3830 * in progress for that virtual
3831 * address will either find our page
3832 * and translation or put in a new page
3835 vm_object_unlock(top_object
);
3836 top_object
= VM_OBJECT_NULL
;
3839 if (need_collapse
== TRUE
)
3840 vm_object_collapse(object
, offset
, TRUE
);
3842 if (need_retry
== FALSE
&&
3843 (type_of_fault
== DBG_PAGEIND_FAULT
|| type_of_fault
== DBG_PAGEINV_FAULT
|| type_of_fault
== DBG_CACHE_HIT_FAULT
)) {
3845 * evaluate access pattern and update state
3846 * vm_fault_deactivate_behind depends on the
3847 * state being up to date
3849 vm_fault_is_sequential(object
, cur_offset
, fault_info
.behavior
);
3851 vm_fault_deactivate_behind(object
, cur_offset
, fault_info
.behavior
);
3854 * That's it, clean up and return.
3857 PAGE_WAKEUP_DONE(m
);
3859 vm_object_unlock(object
);
3861 vm_map_unlock_read(map
);
3862 if (real_map
!= map
)
3863 vm_map_unlock(real_map
);
3865 if (need_retry
== TRUE
) {
3867 * vm_fault_enter couldn't complete the PMAP_ENTER...
3868 * at this point we don't hold any locks so it's safe
3869 * to ask the pmap layer to expand the page table to
3870 * accommodate this mapping... once expanded, we'll
3871 * re-drive the fault which should result in vm_fault_enter
3872 * being able to successfully enter the mapping this time around
3874 (void)pmap_enter_options(
3875 pmap
, vaddr
, 0, 0, 0, 0, 0,
3876 PMAP_OPTIONS_NOENTER
, NULL
);
3884 * COPY ON WRITE FAULT
3886 assert(object_lock_type
== OBJECT_LOCK_EXCLUSIVE
);
3888 if ((throttle_delay
= vm_page_throttled())) {
3890 * drop all of our locks...
3891 * wait until the free queue is
3892 * pumped back up and then
3895 if (object
!= cur_object
)
3896 vm_object_unlock(cur_object
);
3897 vm_object_unlock(object
);
3898 vm_map_unlock_read(map
);
3899 if (real_map
!= map
)
3900 vm_map_unlock(real_map
);
3902 VM_DEBUG_EVENT(vmf_cowdelay
, VMF_COWDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
3904 delay(throttle_delay
);
3906 if (!current_thread_aborted() && vm_page_wait((change_wiring
) ?
3914 * If objects match, then
3915 * object->copy must not be NULL (else control
3916 * would be in previous code block), and we
3917 * have a potential push into the copy object
3918 * with which we can't cope with here.
3920 if (cur_object
== object
) {
3922 * must take the slow path to
3923 * deal with the copy push
3929 * This is now a shadow based copy on write
3930 * fault -- it requires a copy up the shadow
3934 if ((cur_object_lock_type
== OBJECT_LOCK_SHARED
) &&
3935 VM_FAULT_NEED_CS_VALIDATION(NULL
, m
)) {
3936 goto upgrade_for_validation
;
3940 * Allocate a page in the original top level
3941 * object. Give up if allocate fails. Also
3942 * need to remember current page, as it's the
3943 * source of the copy.
3945 * at this point we hold locks on both
3946 * object and cur_object... no need to take
3947 * paging refs or mark pages BUSY since
3948 * we don't drop either object lock until
3949 * the page has been copied and inserted
3954 if (m
== VM_PAGE_NULL
) {
3956 * no free page currently available...
3957 * must take the slow path
3962 * Now do the copy. Mark the source page busy...
3964 * NOTE: This code holds the map lock across
3967 vm_page_copy(cur_m
, m
);
3968 vm_page_insert(m
, object
, offset
);
3969 SET_PAGE_DIRTY(m
, FALSE
);
3972 * Now cope with the source page and object
3974 if (object
->ref_count
> 1 && cur_m
->pmapped
)
3975 pmap_disconnect(cur_m
->phys_page
);
3977 if (cur_m
->clustered
) {
3978 VM_PAGE_COUNT_AS_PAGEIN(cur_m
);
3979 VM_PAGE_CONSUME_CLUSTERED(cur_m
);
3981 need_collapse
= TRUE
;
3983 if (!cur_object
->internal
&&
3984 cur_object
->copy_strategy
== MEMORY_OBJECT_COPY_DELAY
) {
3986 * The object from which we've just
3987 * copied a page is most probably backed
3988 * by a vnode. We don't want to waste too
3989 * much time trying to collapse the VM objects
3990 * and create a bottleneck when several tasks
3991 * map the same file.
3993 if (cur_object
->copy
== object
) {
3995 * Shared mapping or no COW yet.
3996 * We can never collapse a copy
3997 * object into its backing object.
3999 need_collapse
= FALSE
;
4000 } else if (cur_object
->copy
== object
->shadow
&&
4001 object
->shadow
->resident_page_count
== 0) {
4003 * Shared mapping after a COW occurred.
4005 need_collapse
= FALSE
;
4008 vm_object_unlock(cur_object
);
4010 if (need_collapse
== FALSE
)
4011 vm_fault_collapse_skipped
++;
4012 vm_fault_collapse_total
++;
4014 type_of_fault
= DBG_COW_FAULT
;
4015 VM_STAT_INCR(cow_faults
);
4016 DTRACE_VM2(cow_fault
, int, 1, (uint64_t *), NULL
);
4017 current_task()->cow_faults
++;
4023 * No page at cur_object, cur_offset... m == NULL
4025 if (cur_object
->pager_created
) {
4026 int compressor_external_state
= VM_EXTERNAL_STATE_UNKNOWN
;
4028 if (MUST_ASK_PAGER(cur_object
, cur_offset
, compressor_external_state
) == TRUE
) {
4030 int c_flags
= C_DONT_BLOCK
;
4031 boolean_t insert_cur_object
= FALSE
;
4034 * May have to talk to a pager...
4035 * if so, take the slow path by
4036 * doing a 'break' from the while (TRUE) loop
4038 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
4039 * if the compressor is active and the page exists there
4041 if (compressor_external_state
!= VM_EXTERNAL_STATE_EXISTS
)
4044 if (map
== kernel_map
|| real_map
== kernel_map
) {
4046 * can't call into the compressor with the kernel_map
4047 * lock held, since the compressor may try to operate
4048 * on the kernel map in order to return an empty c_segment
4052 if (object
!= cur_object
) {
4053 if (fault_type
& VM_PROT_WRITE
)
4056 insert_cur_object
= TRUE
;
4058 if (insert_cur_object
== TRUE
) {
4060 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
4062 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4064 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
4066 * couldn't upgrade so go do a full retry
4067 * immediately since we can no longer be
4068 * certain about cur_object (since we
4069 * don't hold a reference on it)...
4070 * first drop the top object lock
4072 vm_object_unlock(object
);
4074 vm_map_unlock_read(map
);
4075 if (real_map
!= map
)
4076 vm_map_unlock(real_map
);
4081 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
4083 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4085 if (object
!= cur_object
) {
4087 * we can't go for the upgrade on the top
4088 * lock since the upgrade may block waiting
4089 * for readers to drain... since we hold
4090 * cur_object locked at this point, waiting
4091 * for the readers to drain would represent
4092 * a lock order inversion since the lock order
4093 * for objects is the reference order in the
4096 vm_object_unlock(object
);
4097 vm_object_unlock(cur_object
);
4099 vm_map_unlock_read(map
);
4100 if (real_map
!= map
)
4101 vm_map_unlock(real_map
);
4105 if (vm_object_lock_upgrade(object
) == FALSE
) {
4107 * couldn't upgrade, so explictly take the lock
4108 * exclusively and go relookup the page since we
4109 * will have dropped the object lock and
4110 * a different thread could have inserted
4111 * a page at this offset
4112 * no need for a full retry since we're
4113 * at the top level of the object chain
4115 vm_object_lock(object
);
4122 if (m
== VM_PAGE_NULL
) {
4124 * no free page currently available...
4125 * must take the slow path
4131 * The object is and remains locked
4132 * so no need to take a
4133 * "paging_in_progress" reference.
4135 boolean_t shared_lock
;
4136 if ((object
== cur_object
&&
4137 object_lock_type
== OBJECT_LOCK_EXCLUSIVE
) ||
4138 (object
!= cur_object
&&
4139 cur_object_lock_type
== OBJECT_LOCK_EXCLUSIVE
)) {
4140 shared_lock
= FALSE
;
4145 kr
= vm_compressor_pager_get(
4148 cur_object
->paging_offset
),
4152 &compressed_count_delta
);
4154 vm_compressor_pager_count(
4156 compressed_count_delta
,
4160 if (kr
!= KERN_SUCCESS
) {
4167 * If the object is purgeable, its
4168 * owner's purgeable ledgers will be
4169 * updated in vm_page_insert() but the
4170 * page was also accounted for in a
4171 * "compressed purgeable" ledger, so
4174 if (object
!= cur_object
&&
4175 !insert_cur_object
) {
4177 * We're not going to insert
4178 * the decompressed page into
4179 * the object it came from.
4181 * We're dealing with a
4182 * copy-on-write fault on
4184 * We're going to decompress
4185 * the page directly into the
4186 * target "object" while
4187 * keepin the compressed
4188 * page for "cur_object", so
4189 * no ledger update in that
4192 } else if ((cur_object
->purgable
==
4193 VM_PURGABLE_DENY
) ||
4194 (cur_object
->vo_purgeable_owner
==
4197 * "cur_object" is not purgeable
4198 * or is not owned, so no
4199 * purgeable ledgers to update.
4203 * One less compressed
4204 * purgeable page for
4205 * cur_object's owner.
4207 vm_purgeable_compressed_update(
4212 if (insert_cur_object
) {
4213 vm_page_insert(m
, cur_object
, cur_offset
);
4215 vm_page_insert(m
, object
, offset
);
4218 if ((m
->object
->wimg_bits
& VM_WIMG_MASK
) != VM_WIMG_USE_DEFAULT
) {
4220 * If the page is not cacheable,
4221 * we can't let its contents
4222 * linger in the data cache
4223 * after the decompression.
4225 pmap_sync_page_attributes_phys(m
->phys_page
);
4228 type_of_fault
= my_fault_type
;
4230 VM_STAT_INCR(decompressions
);
4232 if (cur_object
!= object
) {
4233 if (insert_cur_object
) {
4234 top_object
= object
;
4236 * switch to the object that has the new page
4238 object
= cur_object
;
4239 object_lock_type
= cur_object_lock_type
;
4241 vm_object_unlock(cur_object
);
4242 cur_object
= object
;
4248 * existence map present and indicates
4249 * that the pager doesn't have this page
4252 if (cur_object
->shadow
== VM_OBJECT_NULL
) {
4254 * Zero fill fault. Page gets
4255 * inserted into the original object.
4257 if (cur_object
->shadow_severed
||
4258 VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object
))
4260 if (object
!= cur_object
)
4261 vm_object_unlock(cur_object
);
4262 vm_object_unlock(object
);
4264 vm_map_unlock_read(map
);
4265 if (real_map
!= map
)
4266 vm_map_unlock(real_map
);
4268 kr
= KERN_MEMORY_ERROR
;
4271 if ((throttle_delay
= vm_page_throttled())) {
4273 * drop all of our locks...
4274 * wait until the free queue is
4275 * pumped back up and then
4278 if (object
!= cur_object
)
4279 vm_object_unlock(cur_object
);
4280 vm_object_unlock(object
);
4281 vm_map_unlock_read(map
);
4282 if (real_map
!= map
)
4283 vm_map_unlock(real_map
);
4285 VM_DEBUG_EVENT(vmf_zfdelay
, VMF_ZFDELAY
, DBG_FUNC_NONE
, throttle_delay
, 0, 0, 0);
4287 delay(throttle_delay
);
4289 if (!current_thread_aborted() && vm_page_wait((change_wiring
) ?
4296 if (vm_backing_store_low
) {
4298 * we are protecting the system from
4299 * backing store exhaustion...
4300 * must take the slow path if we're
4303 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
))
4306 if (cur_object
!= object
) {
4307 vm_object_unlock(cur_object
);
4309 cur_object
= object
;
4311 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
4313 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4315 if (vm_object_lock_upgrade(object
) == FALSE
) {
4317 * couldn't upgrade so do a full retry on the fault
4318 * since we dropped the object lock which
4319 * could allow another thread to insert
4320 * a page at this offset
4322 vm_map_unlock_read(map
);
4323 if (real_map
!= map
)
4324 vm_map_unlock(real_map
);
4329 m
= vm_page_alloc(object
, offset
);
4331 if (m
== VM_PAGE_NULL
) {
4333 * no free page currently available...
4334 * must take the slow path
4340 * Now zero fill page...
4341 * the page is probably going to
4342 * be written soon, so don't bother
4343 * to clear the modified bit
4345 * NOTE: This code holds the map
4346 * lock across the zero fill.
4348 type_of_fault
= vm_fault_zero_page(m
, map
->no_zero_fill
);
4353 * On to the next level in the shadow chain
4355 cur_offset
+= cur_object
->vo_shadow_offset
;
4356 new_object
= cur_object
->shadow
;
4359 * take the new_object's lock with the indicated state
4361 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
)
4362 vm_object_lock_shared(new_object
);
4364 vm_object_lock(new_object
);
4366 if (cur_object
!= object
)
4367 vm_object_unlock(cur_object
);
4369 cur_object
= new_object
;
4375 * Cleanup from fast fault failure. Drop any object
4376 * lock other than original and drop map lock.
4378 if (object
!= cur_object
)
4379 vm_object_unlock(cur_object
);
4382 * must own the object lock exclusively at this point
4384 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
4385 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4387 if (vm_object_lock_upgrade(object
) == FALSE
) {
4389 * couldn't upgrade, so explictly
4390 * take the lock exclusively
4391 * no need to retry the fault at this
4392 * point since "vm_fault_page" will
4393 * completely re-evaluate the state
4395 vm_object_lock(object
);
4400 vm_map_unlock_read(map
);
4401 if (real_map
!= map
)
4402 vm_map_unlock(real_map
);
4405 * Make a reference to this object to
4406 * prevent its disposal while we are messing with
4407 * it. Once we have the reference, the map is free
4408 * to be diddled. Since objects reference their
4409 * shadows (and copies), they will stay around as well.
4411 vm_object_reference_locked(object
);
4412 vm_object_paging_begin(object
);
4414 XPR(XPR_VM_FAULT
,"vm_fault -> vm_fault_page\n",0,0,0,0,0);
4418 result_page
= VM_PAGE_NULL
;
4419 kr
= vm_fault_page(object
, offset
, fault_type
,
4420 (change_wiring
&& !wired
),
4421 FALSE
, /* page not looked up */
4422 &prot
, &result_page
, &top_page
,
4424 &error_code
, map
->no_zero_fill
,
4425 FALSE
, &fault_info
);
4428 * if kr != VM_FAULT_SUCCESS, then the paging reference
4429 * has been dropped and the object unlocked... the ref_count
4432 * if kr == VM_FAULT_SUCCESS, then the paging reference
4433 * is still held along with the ref_count on the original object
4435 * the object is returned locked with a paging reference
4437 * if top_page != NULL, then it's BUSY and the
4438 * object it belongs to has a paging reference
4439 * but is returned unlocked
4441 if (kr
!= VM_FAULT_SUCCESS
&&
4442 kr
!= VM_FAULT_SUCCESS_NO_VM_PAGE
) {
4444 * we didn't succeed, lose the object reference immediately.
4446 vm_object_deallocate(object
);
4449 * See why we failed, and take corrective action.
4452 case VM_FAULT_MEMORY_SHORTAGE
:
4453 if (vm_page_wait((change_wiring
) ?
4460 case VM_FAULT_INTERRUPTED
:
4463 case VM_FAULT_RETRY
:
4465 case VM_FAULT_MEMORY_ERROR
:
4469 kr
= KERN_MEMORY_ERROR
;
4472 panic("vm_fault: unexpected error 0x%x from "
4473 "vm_fault_page()\n", kr
);
4478 if (m
!= VM_PAGE_NULL
) {
4479 assert((change_wiring
&& !wired
) ?
4480 (top_page
== VM_PAGE_NULL
) :
4481 ((top_page
== VM_PAGE_NULL
) == (m
->object
== object
)));
4485 * What to do with the resulting page from vm_fault_page
4486 * if it doesn't get entered into the physical map:
4488 #define RELEASE_PAGE(m) \
4490 PAGE_WAKEUP_DONE(m); \
4491 if (!m->active && !m->inactive && !m->throttled) { \
4492 vm_page_lockspin_queues(); \
4493 if (!m->active && !m->inactive && !m->throttled) \
4494 vm_page_activate(m); \
4495 vm_page_unlock_queues(); \
4500 * We must verify that the maps have not changed
4501 * since our last lookup.
4503 if (m
!= VM_PAGE_NULL
) {
4504 old_copy_object
= m
->object
->copy
;
4505 vm_object_unlock(m
->object
);
4507 old_copy_object
= VM_OBJECT_NULL
;
4508 vm_object_unlock(object
);
4512 * no object locks are held at this point
4514 if ((map
!= original_map
) || !vm_map_verify(map
, &version
)) {
4515 vm_object_t retry_object
;
4516 vm_object_offset_t retry_offset
;
4517 vm_prot_t retry_prot
;
4520 * To avoid trying to write_lock the map while another
4521 * thread has it read_locked (in vm_map_pageable), we
4522 * do not try for write permission. If the page is
4523 * still writable, we will get write permission. If it
4524 * is not, or has been marked needs_copy, we enter the
4525 * mapping without write permission, and will merely
4526 * take another fault.
4529 vm_map_lock_read(map
);
4531 kr
= vm_map_lookup_locked(&map
, vaddr
,
4532 fault_type
& ~VM_PROT_WRITE
,
4533 OBJECT_LOCK_EXCLUSIVE
, &version
,
4534 &retry_object
, &retry_offset
, &retry_prot
,
4538 pmap
= real_map
->pmap
;
4540 if (kr
!= KERN_SUCCESS
) {
4541 vm_map_unlock_read(map
);
4543 if (m
!= VM_PAGE_NULL
) {
4545 * retake the lock so that
4546 * we can drop the paging reference
4547 * in vm_fault_cleanup and do the
4548 * PAGE_WAKEUP_DONE in RELEASE_PAGE
4550 vm_object_lock(m
->object
);
4554 vm_fault_cleanup(m
->object
, top_page
);
4557 * retake the lock so that
4558 * we can drop the paging reference
4559 * in vm_fault_cleanup
4561 vm_object_lock(object
);
4563 vm_fault_cleanup(object
, top_page
);
4565 vm_object_deallocate(object
);
4569 vm_object_unlock(retry_object
);
4571 if ((retry_object
!= object
) || (retry_offset
!= offset
)) {
4573 vm_map_unlock_read(map
);
4574 if (real_map
!= map
)
4575 vm_map_unlock(real_map
);
4577 if (m
!= VM_PAGE_NULL
) {
4579 * retake the lock so that
4580 * we can drop the paging reference
4581 * in vm_fault_cleanup and do the
4582 * PAGE_WAKEUP_DONE in RELEASE_PAGE
4584 vm_object_lock(m
->object
);
4588 vm_fault_cleanup(m
->object
, top_page
);
4591 * retake the lock so that
4592 * we can drop the paging reference
4593 * in vm_fault_cleanup
4595 vm_object_lock(object
);
4597 vm_fault_cleanup(object
, top_page
);
4599 vm_object_deallocate(object
);
4604 * Check whether the protection has changed or the object
4605 * has been copied while we left the map unlocked.
4609 if (m
!= VM_PAGE_NULL
) {
4610 vm_object_lock(m
->object
);
4612 if (m
->object
->copy
!= old_copy_object
) {
4614 * The copy object changed while the top-level object
4615 * was unlocked, so take away write permission.
4617 prot
&= ~VM_PROT_WRITE
;
4620 vm_object_lock(object
);
4623 * If we want to wire down this page, but no longer have
4624 * adequate permissions, we must start all over.
4626 if (wired
&& (fault_type
!= (prot
| VM_PROT_WRITE
))) {
4628 vm_map_verify_done(map
, &version
);
4629 if (real_map
!= map
)
4630 vm_map_unlock(real_map
);
4632 if (m
!= VM_PAGE_NULL
) {
4635 vm_fault_cleanup(m
->object
, top_page
);
4637 vm_fault_cleanup(object
, top_page
);
4639 vm_object_deallocate(object
);
4643 if (m
!= VM_PAGE_NULL
) {
4645 * Put this page into the physical map.
4646 * We had to do the unlock above because pmap_enter
4647 * may cause other faults. The page may be on
4648 * the pageout queues. If the pageout daemon comes
4649 * across the page, it will remove it from the queues.
4652 kr
= vm_fault_enter(m
,
4659 fault_info
.no_cache
,
4660 fault_info
.cs_bypass
,
4661 fault_info
.user_tag
,
4662 fault_info
.pmap_options
,
4666 kr
= vm_fault_enter(m
,
4673 fault_info
.no_cache
,
4674 fault_info
.cs_bypass
,
4675 fault_info
.user_tag
,
4676 fault_info
.pmap_options
,
4680 if (kr
!= KERN_SUCCESS
) {
4681 /* abort this page fault */
4682 vm_map_verify_done(map
, &version
);
4683 if (real_map
!= map
)
4684 vm_map_unlock(real_map
);
4685 PAGE_WAKEUP_DONE(m
);
4686 vm_fault_cleanup(m
->object
, top_page
);
4687 vm_object_deallocate(object
);
4690 if (physpage_p
!= NULL
) {
4691 /* for vm_map_wire_and_extract() */
4692 *physpage_p
= m
->phys_page
;
4693 if (prot
& VM_PROT_WRITE
) {
4694 vm_object_lock_assert_exclusive(m
->object
);
4700 vm_map_entry_t entry
;
4701 vm_map_offset_t laddr
;
4702 vm_map_offset_t ldelta
, hdelta
;
4705 * do a pmap block mapping from the physical address
4710 /* While we do not worry about execution protection in */
4711 /* general, certian pages may have instruction execution */
4712 /* disallowed. We will check here, and if not allowed */
4713 /* to execute, we return with a protection failure. */
4715 if ((fault_type
& VM_PROT_EXECUTE
) &&
4716 (!pmap_eligible_for_execute((ppnum_t
)(object
->vo_shadow_offset
>> 12)))) {
4718 vm_map_verify_done(map
, &version
);
4720 if (real_map
!= map
)
4721 vm_map_unlock(real_map
);
4723 vm_fault_cleanup(object
, top_page
);
4724 vm_object_deallocate(object
);
4726 kr
= KERN_PROTECTION_FAILURE
;
4731 if (real_map
!= map
)
4732 vm_map_unlock(real_map
);
4734 if (original_map
!= map
) {
4735 vm_map_unlock_read(map
);
4736 vm_map_lock_read(original_map
);
4742 hdelta
= 0xFFFFF000;
4743 ldelta
= 0xFFFFF000;
4745 while (vm_map_lookup_entry(map
, laddr
, &entry
)) {
4746 if (ldelta
> (laddr
- entry
->vme_start
))
4747 ldelta
= laddr
- entry
->vme_start
;
4748 if (hdelta
> (entry
->vme_end
- laddr
))
4749 hdelta
= entry
->vme_end
- laddr
;
4750 if (entry
->is_sub_map
) {
4752 laddr
= (laddr
- entry
->vme_start
)
4754 vm_map_lock_read(entry
->object
.sub_map
);
4756 if (map
!= real_map
)
4757 vm_map_unlock_read(map
);
4758 if (entry
->use_pmap
) {
4759 vm_map_unlock_read(real_map
);
4760 real_map
= entry
->object
.sub_map
;
4762 map
= entry
->object
.sub_map
;
4769 if (vm_map_lookup_entry(map
, laddr
, &entry
) &&
4770 (entry
->object
.vm_object
!= NULL
) &&
4771 (entry
->object
.vm_object
== object
)) {
4773 int superpage
= (!object
->pager_created
&& object
->phys_contiguous
)? VM_MEM_SUPERPAGE
: 0;
4775 if (superpage
&& physpage_p
) {
4776 /* for vm_map_wire_and_extract() */
4777 *physpage_p
= (ppnum_t
) ((((vm_map_offset_t
) entry
->object
.vm_object
->vo_shadow_offset
)
4779 + (laddr
- entry
->vme_start
))
4785 * Set up a block mapped area
4787 assert((uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
) == ((ldelta
+ hdelta
) >> PAGE_SHIFT
));
4788 pmap_map_block(caller_pmap
,
4789 (addr64_t
)(caller_pmap_addr
- ldelta
),
4790 (ppnum_t
)((((vm_map_offset_t
) (entry
->object
.vm_object
->vo_shadow_offset
)) +
4791 entry
->offset
+ (laddr
- entry
->vme_start
) - ldelta
) >> PAGE_SHIFT
),
4792 (uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
), prot
,
4793 (VM_WIMG_MASK
& (int)object
->wimg_bits
) | superpage
, 0);
4796 * Set up a block mapped area
4798 assert((uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
) == ((ldelta
+ hdelta
) >> PAGE_SHIFT
));
4799 pmap_map_block(real_map
->pmap
,
4800 (addr64_t
)(vaddr
- ldelta
),
4801 (ppnum_t
)((((vm_map_offset_t
)(entry
->object
.vm_object
->vo_shadow_offset
)) +
4802 entry
->offset
+ (laddr
- entry
->vme_start
) - ldelta
) >> PAGE_SHIFT
),
4803 (uint32_t)((ldelta
+ hdelta
) >> PAGE_SHIFT
), prot
,
4804 (VM_WIMG_MASK
& (int)object
->wimg_bits
) | superpage
, 0);
4810 * Unlock everything, and return
4812 vm_map_verify_done(map
, &version
);
4813 if (real_map
!= map
)
4814 vm_map_unlock(real_map
);
4816 if (m
!= VM_PAGE_NULL
) {
4817 PAGE_WAKEUP_DONE(m
);
4819 vm_fault_cleanup(m
->object
, top_page
);
4821 vm_fault_cleanup(object
, top_page
);
4823 vm_object_deallocate(object
);
4829 thread_interrupt_level(interruptible_state
);
4832 * Only throttle on faults which cause a pagein.
4834 if ((type_of_fault
== DBG_PAGEIND_FAULT
) || (type_of_fault
== DBG_PAGEINV_FAULT
) || (type_of_fault
== DBG_COMPRESSOR_SWAPIN_FAULT
)) {
4835 throttle_lowpri_io(1);
4838 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4839 (MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_END
,
4840 ((uint64_t)vaddr
>> 32),
4852 * Wire down a range of virtual addresses in a map.
4857 vm_map_entry_t entry
,
4859 vm_map_offset_t pmap_addr
,
4860 ppnum_t
*physpage_p
)
4863 register vm_map_offset_t va
;
4864 register vm_map_offset_t end_addr
= entry
->vme_end
;
4865 register kern_return_t rc
;
4867 assert(entry
->in_transition
);
4869 if ((entry
->object
.vm_object
!= NULL
) &&
4870 !entry
->is_sub_map
&&
4871 entry
->object
.vm_object
->phys_contiguous
) {
4872 return KERN_SUCCESS
;
4876 * Inform the physical mapping system that the
4877 * range of addresses may not fault, so that
4878 * page tables and such can be locked down as well.
4881 pmap_pageable(pmap
, pmap_addr
,
4882 pmap_addr
+ (end_addr
- entry
->vme_start
), FALSE
);
4885 * We simulate a fault to get the page and enter it
4886 * in the physical map.
4889 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
4890 rc
= vm_fault_wire_fast(map
, va
, entry
, pmap
,
4891 pmap_addr
+ (va
- entry
->vme_start
),
4893 if (rc
!= KERN_SUCCESS
) {
4894 rc
= vm_fault_internal(map
, va
, VM_PROT_NONE
, TRUE
,
4895 ((pmap
== kernel_pmap
)
4897 : THREAD_ABORTSAFE
),
4900 (va
- entry
->vme_start
)),
4902 DTRACE_VM2(softlock
, int, 1, (uint64_t *), NULL
);
4905 if (rc
!= KERN_SUCCESS
) {
4906 struct vm_map_entry tmp_entry
= *entry
;
4908 /* unwire wired pages */
4909 tmp_entry
.vme_end
= va
;
4910 vm_fault_unwire(map
,
4911 &tmp_entry
, FALSE
, pmap
, pmap_addr
);
4916 return KERN_SUCCESS
;
4922 * Unwire a range of virtual addresses in a map.
4927 vm_map_entry_t entry
,
4928 boolean_t deallocate
,
4930 vm_map_offset_t pmap_addr
)
4932 register vm_map_offset_t va
;
4933 register vm_map_offset_t end_addr
= entry
->vme_end
;
4935 struct vm_object_fault_info fault_info
;
4937 object
= (entry
->is_sub_map
)
4938 ? VM_OBJECT_NULL
: entry
->object
.vm_object
;
4941 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
4942 * do anything since such memory is wired by default. So we don't have
4943 * anything to undo here.
4946 if (object
!= VM_OBJECT_NULL
&& object
->phys_contiguous
)
4949 fault_info
.interruptible
= THREAD_UNINT
;
4950 fault_info
.behavior
= entry
->behavior
;
4951 fault_info
.user_tag
= entry
->alias
;
4952 fault_info
.pmap_options
= 0;
4953 if (entry
->iokit_acct
||
4954 (!entry
->is_sub_map
&& !entry
->use_pmap
)) {
4955 fault_info
.pmap_options
|= PMAP_OPTIONS_ALT_ACCT
;
4957 fault_info
.lo_offset
= entry
->offset
;
4958 fault_info
.hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
4959 fault_info
.no_cache
= entry
->no_cache
;
4960 fault_info
.stealth
= TRUE
;
4961 fault_info
.io_sync
= FALSE
;
4962 fault_info
.cs_bypass
= FALSE
;
4963 fault_info
.mark_zf_absent
= FALSE
;
4964 fault_info
.batch_pmap_op
= FALSE
;
4967 * Since the pages are wired down, we must be able to
4968 * get their mappings from the physical map system.
4971 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
4973 if (object
== VM_OBJECT_NULL
) {
4975 pmap_change_wiring(pmap
,
4976 pmap_addr
+ (va
- entry
->vme_start
), FALSE
);
4978 (void) vm_fault(map
, va
, VM_PROT_NONE
,
4979 TRUE
, THREAD_UNINT
, pmap
, pmap_addr
);
4982 vm_page_t result_page
;
4984 vm_object_t result_object
;
4985 vm_fault_return_t result
;
4987 if (end_addr
- va
> (vm_size_t
) -1) {
4988 /* 32-bit overflow */
4989 fault_info
.cluster_size
= (vm_size_t
) (0 - PAGE_SIZE
);
4991 fault_info
.cluster_size
= (vm_size_t
) (end_addr
- va
);
4992 assert(fault_info
.cluster_size
== end_addr
- va
);
4996 prot
= VM_PROT_NONE
;
4998 vm_object_lock(object
);
4999 vm_object_paging_begin(object
);
5001 "vm_fault_unwire -> vm_fault_page\n",
5003 result_page
= VM_PAGE_NULL
;
5004 result
= vm_fault_page(
5006 entry
->offset
+ (va
- entry
->vme_start
),
5008 FALSE
, /* page not looked up */
5009 &prot
, &result_page
, &top_page
,
5011 NULL
, map
->no_zero_fill
,
5012 FALSE
, &fault_info
);
5013 } while (result
== VM_FAULT_RETRY
);
5016 * If this was a mapping to a file on a device that has been forcibly
5017 * unmounted, then we won't get a page back from vm_fault_page(). Just
5018 * move on to the next one in case the remaining pages are mapped from
5019 * different objects. During a forced unmount, the object is terminated
5020 * so the alive flag will be false if this happens. A forced unmount will
5021 * will occur when an external disk is unplugged before the user does an
5022 * eject, so we don't want to panic in that situation.
5025 if (result
== VM_FAULT_MEMORY_ERROR
&& !object
->alive
)
5028 if (result
== VM_FAULT_MEMORY_ERROR
&&
5029 object
== kernel_object
) {
5031 * This must have been allocated with
5032 * KMA_KOBJECT and KMA_VAONLY and there's
5033 * no physical page at this offset.
5034 * We're done (no page to free).
5040 if (result
!= VM_FAULT_SUCCESS
)
5041 panic("vm_fault_unwire: failure");
5043 result_object
= result_page
->object
;
5046 assert(result_page
->phys_page
!=
5047 vm_page_fictitious_addr
);
5048 pmap_disconnect(result_page
->phys_page
);
5049 VM_PAGE_FREE(result_page
);
5051 if ((pmap
) && (result_page
->phys_page
!= vm_page_guard_addr
))
5052 pmap_change_wiring(pmap
,
5053 pmap_addr
+ (va
- entry
->vme_start
), FALSE
);
5056 if (VM_PAGE_WIRED(result_page
)) {
5057 vm_page_lockspin_queues();
5058 vm_page_unwire(result_page
, TRUE
);
5059 vm_page_unlock_queues();
5061 if(entry
->zero_wired_pages
) {
5062 pmap_zero_page(result_page
->phys_page
);
5063 entry
->zero_wired_pages
= FALSE
;
5066 PAGE_WAKEUP_DONE(result_page
);
5068 vm_fault_cleanup(result_object
, top_page
);
5073 * Inform the physical mapping system that the range
5074 * of addresses may fault, so that page tables and
5075 * such may be unwired themselves.
5078 pmap_pageable(pmap
, pmap_addr
,
5079 pmap_addr
+ (end_addr
- entry
->vme_start
), TRUE
);
5084 * vm_fault_wire_fast:
5086 * Handle common case of a wire down page fault at the given address.
5087 * If successful, the page is inserted into the associated physical map.
5088 * The map entry is passed in to avoid the overhead of a map lookup.
5090 * NOTE: the given address should be truncated to the
5091 * proper page address.
5093 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
5094 * a standard error specifying why the fault is fatal is returned.
5096 * The map in question must be referenced, and remains so.
5097 * Caller has a read lock on the map.
5099 * This is a stripped version of vm_fault() for wiring pages. Anything
5100 * other than the common case will return KERN_FAILURE, and the caller
5101 * is expected to call vm_fault().
5105 __unused vm_map_t map
,
5107 vm_map_entry_t entry
,
5109 vm_map_offset_t pmap_addr
,
5110 ppnum_t
*physpage_p
)
5113 vm_object_offset_t offset
;
5114 register vm_page_t m
;
5116 thread_t thread
= current_thread();
5120 VM_STAT_INCR(faults
);
5122 if (thread
!= THREAD_NULL
&& thread
->task
!= TASK_NULL
)
5123 thread
->task
->faults
++;
5130 #define RELEASE_PAGE(m) { \
5131 PAGE_WAKEUP_DONE(m); \
5132 vm_page_lockspin_queues(); \
5133 vm_page_unwire(m, TRUE); \
5134 vm_page_unlock_queues(); \
5138 #undef UNLOCK_THINGS
5139 #define UNLOCK_THINGS { \
5140 vm_object_paging_end(object); \
5141 vm_object_unlock(object); \
5144 #undef UNLOCK_AND_DEALLOCATE
5145 #define UNLOCK_AND_DEALLOCATE { \
5147 vm_object_deallocate(object); \
5150 * Give up and have caller do things the hard way.
5154 UNLOCK_AND_DEALLOCATE; \
5155 return(KERN_FAILURE); \
5160 * If this entry is not directly to a vm_object, bail out.
5162 if (entry
->is_sub_map
) {
5163 assert(physpage_p
== NULL
);
5164 return(KERN_FAILURE
);
5168 * Find the backing store object and offset into it.
5171 object
= entry
->object
.vm_object
;
5172 offset
= (va
- entry
->vme_start
) + entry
->offset
;
5173 prot
= entry
->protection
;
5176 * Make a reference to this object to prevent its
5177 * disposal while we are messing with it.
5180 vm_object_lock(object
);
5181 vm_object_reference_locked(object
);
5182 vm_object_paging_begin(object
);
5185 * INVARIANTS (through entire routine):
5187 * 1) At all times, we must either have the object
5188 * lock or a busy page in some object to prevent
5189 * some other thread from trying to bring in
5192 * 2) Once we have a busy page, we must remove it from
5193 * the pageout queues, so that the pageout daemon
5194 * will not grab it away.
5199 * Look for page in top-level object. If it's not there or
5200 * there's something going on, give up.
5201 * ENCRYPTED SWAP: use the slow fault path, since we'll need to
5202 * decrypt the page before wiring it down.
5204 m
= vm_page_lookup(object
, offset
);
5205 if ((m
== VM_PAGE_NULL
) || (m
->busy
) || (m
->encrypted
) ||
5206 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
))) {
5210 ASSERT_PAGE_DECRYPTED(m
);
5212 if (m
->fictitious
&&
5213 m
->phys_page
== vm_page_guard_addr
) {
5215 * Guard pages are fictitious pages and are never
5216 * entered into a pmap, so let's say it's been wired...
5223 * Wire the page down now. All bail outs beyond this
5224 * point must unwire the page.
5227 vm_page_lockspin_queues();
5229 vm_page_unlock_queues();
5232 * Mark page busy for other threads.
5239 * Give up if the page is being written and there's a copy object
5241 if ((object
->copy
!= VM_OBJECT_NULL
) && (prot
& VM_PROT_WRITE
)) {
5247 * Put this page into the physical map.
5249 type_of_fault
= DBG_CACHE_HIT_FAULT
;
5250 kr
= vm_fault_enter(m
,
5260 ((entry
->iokit_acct
||
5261 (!entry
->is_sub_map
&& !entry
->use_pmap
))
5262 ? PMAP_OPTIONS_ALT_ACCT
5269 * Unlock everything, and return
5273 /* for vm_map_wire_and_extract() */
5274 if (kr
== KERN_SUCCESS
) {
5275 *physpage_p
= m
->phys_page
;
5276 if (prot
& VM_PROT_WRITE
) {
5277 vm_object_lock_assert_exclusive(m
->object
);
5285 PAGE_WAKEUP_DONE(m
);
5286 UNLOCK_AND_DEALLOCATE
;
5293 * Routine: vm_fault_copy_cleanup
5295 * Release a page used by vm_fault_copy.
5299 vm_fault_copy_cleanup(
5303 vm_object_t object
= page
->object
;
5305 vm_object_lock(object
);
5306 PAGE_WAKEUP_DONE(page
);
5307 if (!page
->active
&& !page
->inactive
&& !page
->throttled
) {
5308 vm_page_lockspin_queues();
5309 if (!page
->active
&& !page
->inactive
&& !page
->throttled
)
5310 vm_page_activate(page
);
5311 vm_page_unlock_queues();
5313 vm_fault_cleanup(object
, top_page
);
5317 vm_fault_copy_dst_cleanup(
5322 if (page
!= VM_PAGE_NULL
) {
5323 object
= page
->object
;
5324 vm_object_lock(object
);
5325 vm_page_lockspin_queues();
5326 vm_page_unwire(page
, TRUE
);
5327 vm_page_unlock_queues();
5328 vm_object_paging_end(object
);
5329 vm_object_unlock(object
);
5334 * Routine: vm_fault_copy
5337 * Copy pages from one virtual memory object to another --
5338 * neither the source nor destination pages need be resident.
5340 * Before actually copying a page, the version associated with
5341 * the destination address map wil be verified.
5343 * In/out conditions:
5344 * The caller must hold a reference, but not a lock, to
5345 * each of the source and destination objects and to the
5349 * Returns KERN_SUCCESS if no errors were encountered in
5350 * reading or writing the data. Returns KERN_INTERRUPTED if
5351 * the operation was interrupted (only possible if the
5352 * "interruptible" argument is asserted). Other return values
5353 * indicate a permanent error in copying the data.
5355 * The actual amount of data copied will be returned in the
5356 * "copy_size" argument. In the event that the destination map
5357 * verification failed, this amount may be less than the amount
5362 vm_object_t src_object
,
5363 vm_object_offset_t src_offset
,
5364 vm_map_size_t
*copy_size
, /* INOUT */
5365 vm_object_t dst_object
,
5366 vm_object_offset_t dst_offset
,
5368 vm_map_version_t
*dst_version
,
5371 vm_page_t result_page
;
5374 vm_page_t src_top_page
;
5378 vm_page_t dst_top_page
;
5381 vm_map_size_t amount_left
;
5382 vm_object_t old_copy_object
;
5383 kern_return_t error
= 0;
5384 vm_fault_return_t result
;
5386 vm_map_size_t part_size
;
5387 struct vm_object_fault_info fault_info_src
;
5388 struct vm_object_fault_info fault_info_dst
;
5391 * In order not to confuse the clustered pageins, align
5392 * the different offsets on a page boundary.
5397 *copy_size -= amount_left; \
5401 amount_left
= *copy_size
;
5403 fault_info_src
.interruptible
= interruptible
;
5404 fault_info_src
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
5405 fault_info_src
.user_tag
= 0;
5406 fault_info_src
.pmap_options
= 0;
5407 fault_info_src
.lo_offset
= vm_object_trunc_page(src_offset
);
5408 fault_info_src
.hi_offset
= fault_info_src
.lo_offset
+ amount_left
;
5409 fault_info_src
.no_cache
= FALSE
;
5410 fault_info_src
.stealth
= TRUE
;
5411 fault_info_src
.io_sync
= FALSE
;
5412 fault_info_src
.cs_bypass
= FALSE
;
5413 fault_info_src
.mark_zf_absent
= FALSE
;
5414 fault_info_src
.batch_pmap_op
= FALSE
;
5416 fault_info_dst
.interruptible
= interruptible
;
5417 fault_info_dst
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
5418 fault_info_dst
.user_tag
= 0;
5419 fault_info_dst
.pmap_options
= 0;
5420 fault_info_dst
.lo_offset
= vm_object_trunc_page(dst_offset
);
5421 fault_info_dst
.hi_offset
= fault_info_dst
.lo_offset
+ amount_left
;
5422 fault_info_dst
.no_cache
= FALSE
;
5423 fault_info_dst
.stealth
= TRUE
;
5424 fault_info_dst
.io_sync
= FALSE
;
5425 fault_info_dst
.cs_bypass
= FALSE
;
5426 fault_info_dst
.mark_zf_absent
= FALSE
;
5427 fault_info_dst
.batch_pmap_op
= FALSE
;
5429 do { /* while (amount_left > 0) */
5431 * There may be a deadlock if both source and destination
5432 * pages are the same. To avoid this deadlock, the copy must
5433 * start by getting the destination page in order to apply
5434 * COW semantics if any.
5437 RetryDestinationFault
: ;
5439 dst_prot
= VM_PROT_WRITE
|VM_PROT_READ
;
5441 vm_object_lock(dst_object
);
5442 vm_object_paging_begin(dst_object
);
5444 if (amount_left
> (vm_size_t
) -1) {
5445 /* 32-bit overflow */
5446 fault_info_dst
.cluster_size
= (vm_size_t
) (0 - PAGE_SIZE
);
5448 fault_info_dst
.cluster_size
= (vm_size_t
) amount_left
;
5449 assert(fault_info_dst
.cluster_size
== amount_left
);
5452 XPR(XPR_VM_FAULT
,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
5453 dst_page
= VM_PAGE_NULL
;
5454 result
= vm_fault_page(dst_object
,
5455 vm_object_trunc_page(dst_offset
),
5456 VM_PROT_WRITE
|VM_PROT_READ
,
5458 FALSE
, /* page not looked up */
5459 &dst_prot
, &dst_page
, &dst_top_page
,
5462 dst_map
->no_zero_fill
,
5463 FALSE
, &fault_info_dst
);
5465 case VM_FAULT_SUCCESS
:
5467 case VM_FAULT_RETRY
:
5468 goto RetryDestinationFault
;
5469 case VM_FAULT_MEMORY_SHORTAGE
:
5470 if (vm_page_wait(interruptible
))
5471 goto RetryDestinationFault
;
5473 case VM_FAULT_INTERRUPTED
:
5474 RETURN(MACH_SEND_INTERRUPTED
);
5475 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
5476 /* success but no VM page: fail the copy */
5477 vm_object_paging_end(dst_object
);
5478 vm_object_unlock(dst_object
);
5480 case VM_FAULT_MEMORY_ERROR
:
5484 return(KERN_MEMORY_ERROR
);
5486 panic("vm_fault_copy: unexpected error 0x%x from "
5487 "vm_fault_page()\n", result
);
5489 assert ((dst_prot
& VM_PROT_WRITE
) != VM_PROT_NONE
);
5491 old_copy_object
= dst_page
->object
->copy
;
5494 * There exists the possiblity that the source and
5495 * destination page are the same. But we can't
5496 * easily determine that now. If they are the
5497 * same, the call to vm_fault_page() for the
5498 * destination page will deadlock. To prevent this we
5499 * wire the page so we can drop busy without having
5500 * the page daemon steal the page. We clean up the
5501 * top page but keep the paging reference on the object
5502 * holding the dest page so it doesn't go away.
5505 vm_page_lockspin_queues();
5506 vm_page_wire(dst_page
);
5507 vm_page_unlock_queues();
5508 PAGE_WAKEUP_DONE(dst_page
);
5509 vm_object_unlock(dst_page
->object
);
5511 if (dst_top_page
!= VM_PAGE_NULL
) {
5512 vm_object_lock(dst_object
);
5513 VM_PAGE_FREE(dst_top_page
);
5514 vm_object_paging_end(dst_object
);
5515 vm_object_unlock(dst_object
);
5520 if (src_object
== VM_OBJECT_NULL
) {
5522 * No source object. We will just
5523 * zero-fill the page in dst_object.
5525 src_page
= VM_PAGE_NULL
;
5526 result_page
= VM_PAGE_NULL
;
5528 vm_object_lock(src_object
);
5529 src_page
= vm_page_lookup(src_object
,
5530 vm_object_trunc_page(src_offset
));
5531 if (src_page
== dst_page
) {
5532 src_prot
= dst_prot
;
5533 result_page
= VM_PAGE_NULL
;
5535 src_prot
= VM_PROT_READ
;
5536 vm_object_paging_begin(src_object
);
5538 if (amount_left
> (vm_size_t
) -1) {
5539 /* 32-bit overflow */
5540 fault_info_src
.cluster_size
= (vm_size_t
) (0 - PAGE_SIZE
);
5542 fault_info_src
.cluster_size
= (vm_size_t
) amount_left
;
5543 assert(fault_info_src
.cluster_size
== amount_left
);
5547 "vm_fault_copy(2) -> vm_fault_page\n",
5549 result_page
= VM_PAGE_NULL
;
5550 result
= vm_fault_page(
5552 vm_object_trunc_page(src_offset
),
5553 VM_PROT_READ
, FALSE
,
5554 FALSE
, /* page not looked up */
5556 &result_page
, &src_top_page
,
5557 (int *)0, &error
, FALSE
,
5558 FALSE
, &fault_info_src
);
5561 case VM_FAULT_SUCCESS
:
5563 case VM_FAULT_RETRY
:
5564 goto RetrySourceFault
;
5565 case VM_FAULT_MEMORY_SHORTAGE
:
5566 if (vm_page_wait(interruptible
))
5567 goto RetrySourceFault
;
5569 case VM_FAULT_INTERRUPTED
:
5570 vm_fault_copy_dst_cleanup(dst_page
);
5571 RETURN(MACH_SEND_INTERRUPTED
);
5572 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
5573 /* success but no VM page: fail */
5574 vm_object_paging_end(src_object
);
5575 vm_object_unlock(src_object
);
5577 case VM_FAULT_MEMORY_ERROR
:
5578 vm_fault_copy_dst_cleanup(dst_page
);
5582 return(KERN_MEMORY_ERROR
);
5584 panic("vm_fault_copy(2): unexpected "
5586 "vm_fault_page()\n", result
);
5590 assert((src_top_page
== VM_PAGE_NULL
) ==
5591 (result_page
->object
== src_object
));
5593 assert ((src_prot
& VM_PROT_READ
) != VM_PROT_NONE
);
5594 vm_object_unlock(result_page
->object
);
5597 if (!vm_map_verify(dst_map
, dst_version
)) {
5598 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
5599 vm_fault_copy_cleanup(result_page
, src_top_page
);
5600 vm_fault_copy_dst_cleanup(dst_page
);
5604 vm_object_lock(dst_page
->object
);
5606 if (dst_page
->object
->copy
!= old_copy_object
) {
5607 vm_object_unlock(dst_page
->object
);
5608 vm_map_verify_done(dst_map
, dst_version
);
5609 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
5610 vm_fault_copy_cleanup(result_page
, src_top_page
);
5611 vm_fault_copy_dst_cleanup(dst_page
);
5614 vm_object_unlock(dst_page
->object
);
5617 * Copy the page, and note that it is dirty
5621 if (!page_aligned(src_offset
) ||
5622 !page_aligned(dst_offset
) ||
5623 !page_aligned(amount_left
)) {
5625 vm_object_offset_t src_po
,
5628 src_po
= src_offset
- vm_object_trunc_page(src_offset
);
5629 dst_po
= dst_offset
- vm_object_trunc_page(dst_offset
);
5631 if (dst_po
> src_po
) {
5632 part_size
= PAGE_SIZE
- dst_po
;
5634 part_size
= PAGE_SIZE
- src_po
;
5636 if (part_size
> (amount_left
)){
5637 part_size
= amount_left
;
5640 if (result_page
== VM_PAGE_NULL
) {
5641 assert((vm_offset_t
) dst_po
== dst_po
);
5642 assert((vm_size_t
) part_size
== part_size
);
5643 vm_page_part_zero_fill(dst_page
,
5644 (vm_offset_t
) dst_po
,
5645 (vm_size_t
) part_size
);
5647 assert((vm_offset_t
) src_po
== src_po
);
5648 assert((vm_offset_t
) dst_po
== dst_po
);
5649 assert((vm_size_t
) part_size
== part_size
);
5650 vm_page_part_copy(result_page
,
5651 (vm_offset_t
) src_po
,
5653 (vm_offset_t
) dst_po
,
5654 (vm_size_t
)part_size
);
5655 if(!dst_page
->dirty
){
5656 vm_object_lock(dst_object
);
5657 SET_PAGE_DIRTY(dst_page
, TRUE
);
5658 vm_object_unlock(dst_page
->object
);
5663 part_size
= PAGE_SIZE
;
5665 if (result_page
== VM_PAGE_NULL
)
5666 vm_page_zero_fill(dst_page
);
5668 vm_object_lock(result_page
->object
);
5669 vm_page_copy(result_page
, dst_page
);
5670 vm_object_unlock(result_page
->object
);
5672 if(!dst_page
->dirty
){
5673 vm_object_lock(dst_object
);
5674 SET_PAGE_DIRTY(dst_page
, TRUE
);
5675 vm_object_unlock(dst_page
->object
);
5682 * Unlock everything, and return
5685 vm_map_verify_done(dst_map
, dst_version
);
5687 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
5688 vm_fault_copy_cleanup(result_page
, src_top_page
);
5689 vm_fault_copy_dst_cleanup(dst_page
);
5691 amount_left
-= part_size
;
5692 src_offset
+= part_size
;
5693 dst_offset
+= part_size
;
5694 } while (amount_left
> 0);
5696 RETURN(KERN_SUCCESS
);
5702 #if VM_FAULT_CLASSIFY
5704 * Temporary statistics gathering support.
5708 * Statistics arrays:
5710 #define VM_FAULT_TYPES_MAX 5
5711 #define VM_FAULT_LEVEL_MAX 8
5713 int vm_fault_stats
[VM_FAULT_TYPES_MAX
][VM_FAULT_LEVEL_MAX
];
5715 #define VM_FAULT_TYPE_ZERO_FILL 0
5716 #define VM_FAULT_TYPE_MAP_IN 1
5717 #define VM_FAULT_TYPE_PAGER 2
5718 #define VM_FAULT_TYPE_COPY 3
5719 #define VM_FAULT_TYPE_OTHER 4
5723 vm_fault_classify(vm_object_t object
,
5724 vm_object_offset_t offset
,
5725 vm_prot_t fault_type
)
5727 int type
, level
= 0;
5731 m
= vm_page_lookup(object
, offset
);
5732 if (m
!= VM_PAGE_NULL
) {
5733 if (m
->busy
|| m
->error
|| m
->restart
|| m
->absent
) {
5734 type
= VM_FAULT_TYPE_OTHER
;
5737 if (((fault_type
& VM_PROT_WRITE
) == 0) ||
5738 ((level
== 0) && object
->copy
== VM_OBJECT_NULL
)) {
5739 type
= VM_FAULT_TYPE_MAP_IN
;
5742 type
= VM_FAULT_TYPE_COPY
;
5746 if (object
->pager_created
) {
5747 type
= VM_FAULT_TYPE_PAGER
;
5750 if (object
->shadow
== VM_OBJECT_NULL
) {
5751 type
= VM_FAULT_TYPE_ZERO_FILL
;
5755 offset
+= object
->vo_shadow_offset
;
5756 object
= object
->shadow
;
5762 if (level
> VM_FAULT_LEVEL_MAX
)
5763 level
= VM_FAULT_LEVEL_MAX
;
5765 vm_fault_stats
[type
][level
] += 1;
5770 /* cleanup routine to call from debugger */
5773 vm_fault_classify_init(void)
5777 for (type
= 0; type
< VM_FAULT_TYPES_MAX
; type
++) {
5778 for (level
= 0; level
< VM_FAULT_LEVEL_MAX
; level
++) {
5779 vm_fault_stats
[type
][level
] = 0;
5785 #endif /* VM_FAULT_CLASSIFY */
5789 vm_page_validate_cs_mapped(
5794 vm_object_offset_t offset
;
5796 memory_object_t pager
;
5798 boolean_t validated
, tainted
;
5801 vm_object_lock_assert_exclusive(page
->object
);
5803 if (!cs_validation
) {
5807 if (page
->wpmapped
&& !page
->cs_tainted
) {
5809 * This page was mapped for "write" access sometime in the
5810 * past and could still be modifiable in the future.
5811 * Consider it tainted.
5812 * [ If the page was already found to be "tainted", no
5813 * need to re-validate. ]
5815 page
->cs_validated
= TRUE
;
5816 page
->cs_tainted
= TRUE
;
5818 printf("CODESIGNING: vm_page_validate_cs: "
5819 "page %p obj %p off 0x%llx "
5821 page
, page
->object
, page
->offset
);
5823 vm_cs_validated_dirtied
++;
5826 if (page
->cs_validated
) {
5832 object
= page
->object
;
5833 assert(object
->code_signed
);
5834 offset
= page
->offset
;
5836 if (!object
->alive
|| object
->terminating
|| object
->pager
== NULL
) {
5838 * The object is terminating and we don't have its pager
5839 * so we can't validate the data...
5844 * Since we get here to validate a page that was brought in by
5845 * the pager, we know that this pager is all setup and ready
5848 assert(!object
->internal
);
5849 assert(object
->pager
!= NULL
);
5850 assert(object
->pager_ready
);
5852 pager
= object
->pager
;
5853 assert(object
->paging_in_progress
);
5854 kr
= vnode_pager_get_object_cs_blobs(pager
, &blobs
);
5855 if (kr
!= KERN_SUCCESS
) {
5859 /* verify the SHA1 hash for this page */
5860 validated
= cs_validate_page(blobs
,
5862 offset
+ object
->paging_offset
,
5863 (const void *)kaddr
,
5866 page
->cs_validated
= validated
;
5868 page
->cs_tainted
= tainted
;
5873 vm_page_validate_cs(
5877 vm_object_offset_t offset
;
5878 vm_map_offset_t koffset
;
5879 vm_map_size_t ksize
;
5882 boolean_t busy_page
;
5883 boolean_t need_unmap
;
5885 vm_object_lock_assert_held(page
->object
);
5887 if (!cs_validation
) {
5891 if (page
->wpmapped
&& !page
->cs_tainted
) {
5892 vm_object_lock_assert_exclusive(page
->object
);
5895 * This page was mapped for "write" access sometime in the
5896 * past and could still be modifiable in the future.
5897 * Consider it tainted.
5898 * [ If the page was already found to be "tainted", no
5899 * need to re-validate. ]
5901 page
->cs_validated
= TRUE
;
5902 page
->cs_tainted
= TRUE
;
5904 printf("CODESIGNING: vm_page_validate_cs: "
5905 "page %p obj %p off 0x%llx "
5907 page
, page
->object
, page
->offset
);
5909 vm_cs_validated_dirtied
++;
5912 if (page
->cs_validated
) {
5917 panic("vm_page_validate_cs(%p): page is slid\n", page
);
5919 assert(!page
->slid
);
5921 #if CHECK_CS_VALIDATION_BITMAP
5922 if ( vnode_pager_cs_check_validation_bitmap( page
->object
->pager
, trunc_page(page
->offset
+ page
->object
->paging_offset
), CS_BITMAP_CHECK
) == KERN_SUCCESS
) {
5923 page
->cs_validated
= TRUE
;
5924 page
->cs_tainted
= FALSE
;
5925 vm_cs_bitmap_validated
++;
5929 vm_object_lock_assert_exclusive(page
->object
);
5931 object
= page
->object
;
5932 assert(object
->code_signed
);
5933 offset
= page
->offset
;
5935 busy_page
= page
->busy
;
5937 /* keep page busy while we map (and unlock) the VM object */
5942 * Take a paging reference on the VM object
5943 * to protect it from collapse or bypass,
5944 * and keep it from disappearing too.
5946 vm_object_paging_begin(object
);
5948 /* map the page in the kernel address space */
5949 ksize
= PAGE_SIZE_64
;
5952 kr
= vm_paging_map_object(page
,
5956 FALSE
, /* can't unlock object ! */
5960 if (kr
!= KERN_SUCCESS
) {
5961 panic("vm_page_validate_cs: could not map page: 0x%x\n", kr
);
5963 kaddr
= CAST_DOWN(vm_offset_t
, koffset
);
5965 /* validate the mapped page */
5966 vm_page_validate_cs_mapped(page
, (const void *) kaddr
);
5968 #if CHECK_CS_VALIDATION_BITMAP
5969 if ( page
->cs_validated
== TRUE
&& page
->cs_tainted
== FALSE
) {
5970 vnode_pager_cs_check_validation_bitmap( object
->pager
, trunc_page( offset
+ object
->paging_offset
), CS_BITMAP_SET
);
5974 assert(object
== page
->object
);
5975 vm_object_lock_assert_exclusive(object
);
5978 PAGE_WAKEUP_DONE(page
);
5981 /* unmap the map from the kernel address space */
5982 vm_paging_unmap_object(object
, koffset
, koffset
+ ksize
);
5987 vm_object_paging_end(object
);