2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Page fault handling module.
65 #include <mach_cluster_stats.h>
66 #include <mach_pagemap.h>
68 #include <libkern/OSAtomic.h>
70 #include <mach/mach_types.h>
71 #include <mach/kern_return.h>
72 #include <mach/message.h> /* for error codes */
73 #include <mach/vm_param.h>
74 #include <mach/vm_behavior.h>
75 #include <mach/memory_object.h>
76 /* For memory_object_data_{request,unlock} */
79 #include <kern/kern_types.h>
80 #include <kern/host_statistics.h>
81 #include <kern/counters.h>
82 #include <kern/task.h>
83 #include <kern/thread.h>
84 #include <kern/sched_prim.h>
85 #include <kern/host.h>
87 #include <kern/mach_param.h>
88 #include <kern/macro_help.h>
89 #include <kern/zalloc.h>
90 #include <kern/misc_protos.h>
92 #include <ppc/proc_reg.h>
94 #include <vm/vm_fault.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_kern.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/vm_protos.h>
102 #include <vm/vm_external.h>
103 #include <vm/memory_object.h>
104 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
106 #include <sys/kdebug.h>
108 #define VM_FAULT_CLASSIFY 0
110 /* Zero-filled pages are marked "m->zero_fill" and put on the
111 * special zero-fill inactive queue only if they belong to
112 * an object at least this big.
114 #define VM_ZF_OBJECT_SIZE_THRESHOLD (0x200000)
116 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
118 int vm_object_pagein_throttle
= 16;
123 extern struct db_watchpoint
*db_watchpoint_list
;
124 #endif /* MACH_KDB */
127 /* Forward declarations of internal routines. */
128 extern kern_return_t
vm_fault_wire_fast(
131 vm_map_entry_t entry
,
133 vm_map_offset_t pmap_addr
);
135 extern void vm_fault_continue(void);
137 extern void vm_fault_copy_cleanup(
141 extern void vm_fault_copy_dst_cleanup(
144 #if VM_FAULT_CLASSIFY
145 extern void vm_fault_classify(vm_object_t object
,
146 vm_object_offset_t offset
,
147 vm_prot_t fault_type
);
149 extern void vm_fault_classify_init(void);
153 unsigned long vm_cs_validates
= 0;
154 unsigned long vm_cs_revalidates
= 0;
155 unsigned long vm_cs_query_modified
= 0;
156 unsigned long vm_cs_validated_dirtied
= 0;
159 * Routine: vm_fault_init
161 * Initialize our private data structures.
169 * Routine: vm_fault_cleanup
171 * Clean up the result of vm_fault_page.
173 * The paging reference for "object" is released.
174 * "object" is unlocked.
175 * If "top_page" is not null, "top_page" is
176 * freed and the paging reference for the object
177 * containing it is released.
180 * "object" must be locked.
184 register vm_object_t object
,
185 register vm_page_t top_page
)
187 vm_object_paging_end(object
);
188 vm_object_unlock(object
);
190 if (top_page
!= VM_PAGE_NULL
) {
191 object
= top_page
->object
;
193 vm_object_lock(object
);
194 VM_PAGE_FREE(top_page
);
195 vm_object_paging_end(object
);
196 vm_object_unlock(object
);
200 #if MACH_CLUSTER_STATS
201 #define MAXCLUSTERPAGES 16
203 unsigned long pages_in_cluster
;
204 unsigned long pages_at_higher_offsets
;
205 unsigned long pages_at_lower_offsets
;
206 } cluster_stats_in
[MAXCLUSTERPAGES
];
207 #define CLUSTER_STAT(clause) clause
208 #define CLUSTER_STAT_HIGHER(x) \
209 ((cluster_stats_in[(x)].pages_at_higher_offsets)++)
210 #define CLUSTER_STAT_LOWER(x) \
211 ((cluster_stats_in[(x)].pages_at_lower_offsets)++)
212 #define CLUSTER_STAT_CLUSTER(x) \
213 ((cluster_stats_in[(x)].pages_in_cluster)++)
214 #else /* MACH_CLUSTER_STATS */
215 #define CLUSTER_STAT(clause)
216 #endif /* MACH_CLUSTER_STATS */
218 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
221 boolean_t vm_page_deactivate_behind
= TRUE
;
223 * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
225 int vm_default_ahead
= 0;
226 int vm_default_behind
= MAX_UPL_TRANSFER
;
228 #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
231 * vm_page_is_sequential
233 * Determine if sequential access is in progress
234 * in accordance with the behavior specified.
235 * Update state to indicate current access pattern.
237 * object must have at least the shared lock held
241 vm_fault_is_sequential(
243 vm_object_offset_t offset
,
244 vm_behavior_t behavior
)
246 vm_object_offset_t last_alloc
;
250 last_alloc
= object
->last_alloc
;
251 sequential
= object
->sequential
;
252 orig_sequential
= sequential
;
255 case VM_BEHAVIOR_RANDOM
:
257 * reset indicator of sequential behavior
262 case VM_BEHAVIOR_SEQUENTIAL
:
263 if (offset
&& last_alloc
== offset
- PAGE_SIZE_64
) {
265 * advance indicator of sequential behavior
267 if (sequential
< MAX_SEQUENTIAL_RUN
)
268 sequential
+= PAGE_SIZE
;
271 * reset indicator of sequential behavior
277 case VM_BEHAVIOR_RSEQNTL
:
278 if (last_alloc
&& last_alloc
== offset
+ PAGE_SIZE_64
) {
280 * advance indicator of sequential behavior
282 if (sequential
> -MAX_SEQUENTIAL_RUN
)
283 sequential
-= PAGE_SIZE
;
286 * reset indicator of sequential behavior
292 case VM_BEHAVIOR_DEFAULT
:
294 if (offset
&& last_alloc
== (offset
- PAGE_SIZE_64
)) {
296 * advance indicator of sequential behavior
300 if (sequential
< MAX_SEQUENTIAL_RUN
)
301 sequential
+= PAGE_SIZE
;
303 } else if (last_alloc
&& last_alloc
== (offset
+ PAGE_SIZE_64
)) {
305 * advance indicator of sequential behavior
309 if (sequential
> -MAX_SEQUENTIAL_RUN
)
310 sequential
-= PAGE_SIZE
;
313 * reset indicator of sequential behavior
319 if (sequential
!= orig_sequential
) {
320 if (!OSCompareAndSwap(orig_sequential
, sequential
, (UInt32
*)&object
->sequential
)) {
322 * if someone else has already updated object->sequential
323 * don't bother trying to update it or object->last_alloc
329 * I'd like to do this with a OSCompareAndSwap64, but that
330 * doesn't exist for PPC... however, it shouldn't matter
331 * that much... last_alloc is maintained so that we can determine
332 * if a sequential access pattern is taking place... if only
333 * one thread is banging on this object, no problem with the unprotected
334 * update... if 2 or more threads are banging away, we run the risk of
335 * someone seeing a mangled update... however, in the face of multiple
336 * accesses, no sequential access pattern can develop anyway, so we
337 * haven't lost any real info.
339 object
->last_alloc
= offset
;
344 * vm_page_deactivate_behind
346 * Determine if sequential access is in progress
347 * in accordance with the behavior specified. If
348 * so, compute a potential page to deactivate and
351 * object must be locked.
353 * return TRUE if we actually deactivate a page
357 vm_fault_deactivate_behind(
359 vm_object_offset_t offset
,
360 vm_behavior_t behavior
)
364 int sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
367 dbgTrace(0xBEEF0018, (unsigned int) object
, (unsigned int) vm_fault_deactivate_behind
); /* (TEST/DEBUG) */
370 if (object
== kernel_object
|| vm_page_deactivate_behind
== FALSE
) {
372 * Do not deactivate pages from the kernel object: they
373 * are not intended to become pageable.
374 * or we've disabled the deactivate behind mechanism
378 if ((sequential_run
= object
->sequential
)) {
379 if (sequential_run
< 0) {
380 sequential_behavior
= VM_BEHAVIOR_RSEQNTL
;
381 sequential_run
= 0 - sequential_run
;
383 sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
387 case VM_BEHAVIOR_RANDOM
:
389 case VM_BEHAVIOR_SEQUENTIAL
:
390 if (sequential_run
>= (int)PAGE_SIZE
)
391 m
= vm_page_lookup(object
, offset
- PAGE_SIZE_64
);
393 case VM_BEHAVIOR_RSEQNTL
:
394 if (sequential_run
>= (int)PAGE_SIZE
)
395 m
= vm_page_lookup(object
, offset
+ PAGE_SIZE_64
);
397 case VM_BEHAVIOR_DEFAULT
:
399 { vm_object_offset_t behind
= vm_default_behind
* PAGE_SIZE_64
;
402 * determine if the run of sequential accesss has been
403 * long enough on an object with default access behavior
404 * to consider it for deactivation
406 if ((uint64_t)sequential_run
>= behind
) {
407 if (sequential_behavior
== VM_BEHAVIOR_SEQUENTIAL
) {
408 if (offset
>= behind
)
409 m
= vm_page_lookup(object
, offset
- behind
);
411 if (offset
< -behind
)
412 m
= vm_page_lookup(object
, offset
+ behind
);
419 if (!m
->busy
&& !m
->no_cache
&& !m
->throttled
&& !m
->fictitious
&& !m
->absent
) {
420 pmap_clear_reference(m
->phys_page
);
421 m
->deactivated
= TRUE
;
423 dbgTrace(0xBEEF0019, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
433 * check for various conditions that would
434 * prevent us from creating a ZF page...
435 * cleanup is based on being called from vm_fault_page
437 * object must be locked
438 * object == m->object
440 static vm_fault_return_t
441 vm_fault_check(vm_object_t object
, vm_page_t m
, vm_page_t first_m
, boolean_t interruptible_state
)
443 if (object
->shadow_severed
) {
445 * the shadow chain was severed
446 * just have to return an error at this point
448 if (m
!= VM_PAGE_NULL
)
450 vm_fault_cleanup(object
, first_m
);
452 thread_interrupt_level(interruptible_state
);
454 return (VM_FAULT_MEMORY_ERROR
);
456 if (vm_backing_store_low
) {
458 * are we protecting the system from
459 * backing store exhaustion. If so
460 * sleep unless we are privileged.
462 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
)) {
464 if (m
!= VM_PAGE_NULL
)
466 vm_fault_cleanup(object
, first_m
);
468 assert_wait((event_t
)&vm_backing_store_low
, THREAD_UNINT
);
470 thread_block(THREAD_CONTINUE_NULL
);
471 thread_interrupt_level(interruptible_state
);
473 return (VM_FAULT_RETRY
);
476 if (VM_PAGE_ZFILL_THROTTLED()) {
478 * we're throttling zero-fills...
479 * treat this as if we couldn't grab a page
481 if (m
!= VM_PAGE_NULL
)
483 vm_fault_cleanup(object
, first_m
);
485 thread_interrupt_level(interruptible_state
);
487 return (VM_FAULT_MEMORY_SHORTAGE
);
489 return (VM_FAULT_SUCCESS
);
494 * do the work to zero fill a page and
495 * inject it into the correct paging queue
497 * m->object must be locked
498 * page queue lock must NOT be held
501 vm_fault_zero_page(vm_page_t m
, boolean_t no_zero_fill
)
503 int my_fault
= DBG_ZERO_FILL_FAULT
;
506 * This is is a zero-fill page fault...
508 * Checking the page lock is a waste of
509 * time; this page was absent, so
510 * it can't be page locked by a pager.
512 * we also consider it undefined
513 * with respect to instruction
514 * execution. i.e. it is the responsibility
515 * of higher layers to call for an instruction
516 * sync after changing the contents and before
517 * sending a program into this area. We
518 * choose this approach for performance
522 m
->cs_validated
= FALSE
;
523 m
->cs_tainted
= FALSE
;
525 if (no_zero_fill
== TRUE
)
526 my_fault
= DBG_NZF_PAGE_FAULT
;
528 vm_page_zero_fill(m
);
530 VM_STAT_INCR(zero_fill_count
);
531 DTRACE_VM2(zfod
, int, 1, (uint64_t *), NULL
);
534 assert(m
->object
!= kernel_object
);
535 //assert(m->pageq.next == NULL && m->pageq.prev == NULL);
537 if (!IP_VALID(memory_manager_default
) &&
538 (m
->object
->purgable
== VM_PURGABLE_DENY
||
539 m
->object
->purgable
== VM_PURGABLE_NONVOLATILE
||
540 m
->object
->purgable
== VM_PURGABLE_VOLATILE
)) {
541 vm_page_lock_queues();
543 queue_enter(&vm_page_queue_throttled
, m
, vm_page_t
, pageq
);
545 vm_page_throttled_count
++;
547 vm_page_unlock_queues();
549 if (m
->object
->size
> VM_ZF_OBJECT_SIZE_THRESHOLD
) {
551 OSAddAtomic(1, (SInt32
*)&vm_zf_count
);
559 * Routine: vm_fault_page
561 * Find the resident page for the virtual memory
562 * specified by the given virtual memory object
564 * Additional arguments:
565 * The required permissions for the page is given
566 * in "fault_type". Desired permissions are included
568 * fault_info is passed along to determine pagein cluster
569 * limits... it contains the expected reference pattern,
570 * cluster size if available, etc...
572 * If the desired page is known to be resident (for
573 * example, because it was previously wired down), asserting
574 * the "unwiring" parameter will speed the search.
576 * If the operation can be interrupted (by thread_abort
577 * or thread_terminate), then the "interruptible"
578 * parameter should be asserted.
581 * The page containing the proper data is returned
585 * The source object must be locked and referenced,
586 * and must donate one paging reference. The reference
587 * is not affected. The paging reference and lock are
590 * If the call succeeds, the object in which "result_page"
591 * resides is left locked and holding a paging reference.
592 * If this is not the original object, a busy page in the
593 * original object is returned in "top_page", to prevent other
594 * callers from pursuing this same data, along with a paging
595 * reference for the original object. The "top_page" should
596 * be destroyed when this guarantee is no longer required.
597 * The "result_page" is also left busy. It is not removed
598 * from the pageout queues.
604 vm_object_t first_object
, /* Object to begin search */
605 vm_object_offset_t first_offset
, /* Offset into object */
606 vm_prot_t fault_type
, /* What access is requested */
607 boolean_t must_be_resident
,/* Must page be resident? */
608 /* Modifies in place: */
609 vm_prot_t
*protection
, /* Protection for mapping */
611 vm_page_t
*result_page
, /* Page found, if successful */
612 vm_page_t
*top_page
, /* Page in top object, if
613 * not result_page. */
614 int *type_of_fault
, /* if non-null, fill in with type of fault
615 * COW, zero-fill, etc... returned in trace point */
616 /* More arguments: */
617 kern_return_t
*error_code
, /* code if page is in error */
618 boolean_t no_zero_fill
, /* don't zero fill absent pages */
620 boolean_t data_supply
, /* treat as data_supply if
621 * it is a write fault and a full
622 * page is provided */
624 __unused boolean_t data_supply
,
626 vm_object_fault_info_t fault_info
)
630 vm_object_offset_t offset
;
632 vm_object_t next_object
;
633 vm_object_t copy_object
;
634 boolean_t look_for_page
;
635 vm_prot_t access_required
= fault_type
;
636 vm_prot_t wants_copy_flag
;
637 CLUSTER_STAT(int pages_at_higher_offsets
;)
638 CLUSTER_STAT(int pages_at_lower_offsets
;)
639 kern_return_t wait_result
;
640 boolean_t interruptible_state
;
641 vm_fault_return_t error
;
643 uint32_t try_failed_count
;
644 int interruptible
; /* how may fault be interrupted? */
645 memory_object_t pager
;
648 * MACH page map - an optional optimization where a bit map is maintained
649 * by the VM subsystem for internal objects to indicate which pages of
650 * the object currently reside on backing store. This existence map
651 * duplicates information maintained by the vnode pager. It is
652 * created at the time of the first pageout against the object, i.e.
653 * at the same time pager for the object is created. The optimization
654 * is designed to eliminate pager interaction overhead, if it is
655 * 'known' that the page does not exist on backing store.
657 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
658 * either marked as paged out in the existence map for the object or no
659 * existence map exists for the object. MUST_ASK_PAGER() is one of the
660 * criteria in the decision to invoke the pager. It is also used as one
661 * of the criteria to terminate the scan for adjacent pages in a clustered
662 * pagein operation. Note that MUST_ASK_PAGER() always evaluates to TRUE for
663 * permanent objects. Note also that if the pager for an internal object
664 * has not been created, the pager is not invoked regardless of the value
665 * of MUST_ASK_PAGER() and that clustered pagein scans are only done on an object
666 * for which a pager has been created.
668 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
669 * is marked as paged out in the existence map for the object. PAGED_OUT()
670 * PAGED_OUT() is used to determine if a page has already been pushed
671 * into a copy object in order to avoid a redundant page out operation.
674 #define MUST_ASK_PAGER(o, f) (vm_external_state_get((o)->existence_map, (f)) \
675 != VM_EXTERNAL_STATE_ABSENT)
676 #define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
677 == VM_EXTERNAL_STATE_EXISTS)
679 #define MUST_ASK_PAGER(o, f) (TRUE)
680 #define PAGED_OUT(o, f) (FALSE)
686 #define PREPARE_RELEASE_PAGE(m) \
688 vm_page_lock_queues(); \
691 #define DO_RELEASE_PAGE(m) \
693 PAGE_WAKEUP_DONE(m); \
694 if (!m->active && !m->inactive && !m->throttled)\
695 vm_page_activate(m); \
696 vm_page_unlock_queues(); \
699 #define RELEASE_PAGE(m) \
701 PREPARE_RELEASE_PAGE(m); \
702 DO_RELEASE_PAGE(m); \
706 dbgTrace(0xBEEF0002, (unsigned int) first_object
, (unsigned int) first_offset
); /* (TEST/DEBUG) */
712 * If there are watchpoints set, then
713 * we don't want to give away write permission
714 * on a read fault. Make the task write fault,
715 * so that the watchpoint code notices the access.
717 if (db_watchpoint_list
) {
719 * If we aren't asking for write permission,
720 * then don't give it away. We're using write
721 * faults to set the dirty bit.
723 if (!(fault_type
& VM_PROT_WRITE
))
724 *protection
&= ~VM_PROT_WRITE
;
726 #endif /* MACH_KDB */
728 interruptible
= fault_info
->interruptible
;
729 interruptible_state
= thread_interrupt_level(interruptible
);
732 * INVARIANTS (through entire routine):
734 * 1) At all times, we must either have the object
735 * lock or a busy page in some object to prevent
736 * some other thread from trying to bring in
739 * Note that we cannot hold any locks during the
740 * pager access or when waiting for memory, so
741 * we use a busy page then.
743 * 2) To prevent another thread from racing us down the
744 * shadow chain and entering a new page in the top
745 * object before we do, we must keep a busy page in
746 * the top object while following the shadow chain.
748 * 3) We must increment paging_in_progress on any object
749 * for which we have a busy page before dropping
752 * 4) We leave busy pages on the pageout queues.
753 * If the pageout daemon comes across a busy page,
754 * it will remove the page from the pageout queues.
757 object
= first_object
;
758 offset
= first_offset
;
759 first_m
= VM_PAGE_NULL
;
760 access_required
= fault_type
;
764 "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n",
765 (integer_t
)object
, offset
, fault_type
, *protection
, 0);
768 * default type of fault
770 my_fault
= DBG_CACHE_HIT_FAULT
;
774 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
776 if (!object
->alive
) {
778 * object is no longer valid
779 * clean up and return error
781 vm_fault_cleanup(object
, first_m
);
782 thread_interrupt_level(interruptible_state
);
784 return (VM_FAULT_MEMORY_ERROR
);
788 * See whether the page at 'offset' is resident
790 m
= vm_page_lookup(object
, offset
);
792 dbgTrace(0xBEEF0004, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
794 if (m
!= VM_PAGE_NULL
) {
798 * The page is being brought in,
799 * wait for it and then retry.
801 * A possible optimization: if the page
802 * is known to be resident, we can ignore
803 * pages that are absent (regardless of
804 * whether they're busy).
807 dbgTrace(0xBEEF0005, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
809 wait_result
= PAGE_SLEEP(object
, m
, interruptible
);
811 "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
812 (integer_t
)object
, offset
,
814 counter(c_vm_fault_page_block_busy_kernel
++);
816 if (wait_result
!= THREAD_AWAKENED
) {
817 vm_fault_cleanup(object
, first_m
);
818 thread_interrupt_level(interruptible_state
);
820 if (wait_result
== THREAD_RESTART
)
821 return (VM_FAULT_RETRY
);
823 return (VM_FAULT_INTERRUPTED
);
828 if (m
->phys_page
== vm_page_guard_addr
) {
830 * Guard page: off limits !
832 if (fault_type
== VM_PROT_NONE
) {
834 * The fault is not requesting any
835 * access to the guard page, so it must
836 * be just to wire or unwire it.
837 * Let's pretend it succeeded...
841 assert(first_m
== VM_PAGE_NULL
);
844 *type_of_fault
= DBG_GUARD_FAULT
;
845 return VM_FAULT_SUCCESS
;
848 * The fault requests access to the
849 * guard page: let's deny that !
851 vm_fault_cleanup(object
, first_m
);
852 thread_interrupt_level(interruptible_state
);
853 return VM_FAULT_MEMORY_ERROR
;
859 * The page is in error, give up now.
862 dbgTrace(0xBEEF0006, (unsigned int) m
, (unsigned int) error_code
); /* (TEST/DEBUG) */
865 *error_code
= KERN_MEMORY_ERROR
;
868 vm_fault_cleanup(object
, first_m
);
869 thread_interrupt_level(interruptible_state
);
871 return (VM_FAULT_MEMORY_ERROR
);
875 * The pager wants us to restart
876 * at the top of the chain,
877 * typically because it has moved the
878 * page to another pager, then do so.
881 dbgTrace(0xBEEF0007, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
885 vm_fault_cleanup(object
, first_m
);
886 thread_interrupt_level(interruptible_state
);
888 return (VM_FAULT_RETRY
);
892 * The page isn't busy, but is absent,
893 * therefore it's deemed "unavailable".
895 * Remove the non-existent page (unless it's
896 * in the top object) and move on down to the
897 * next object (if there is one).
900 dbgTrace(0xBEEF0008, (unsigned int) m
, (unsigned int) object
->shadow
); /* (TEST/DEBUG) */
902 next_object
= object
->shadow
;
904 if (next_object
== VM_OBJECT_NULL
) {
906 * Absent page at bottom of shadow
907 * chain; zero fill the page we left
908 * busy in the first object, and free
911 assert(!must_be_resident
);
914 * check for any conditions that prevent
915 * us from creating a new zero-fill page
916 * vm_fault_check will do all of the
917 * fault cleanup in the case of an error condition
918 * including resetting the thread_interrupt_level
920 error
= vm_fault_check(object
, m
, first_m
, interruptible_state
);
922 if (error
!= VM_FAULT_SUCCESS
)
926 "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
927 (integer_t
)object
, offset
,
929 (integer_t
)first_object
, 0);
931 if (object
!= first_object
) {
933 * free the absent page we just found
938 * drop reference and lock on current object
940 vm_object_paging_end(object
);
941 vm_object_unlock(object
);
944 * grab the original page we
945 * 'soldered' in place and
946 * retake lock on 'first_object'
949 first_m
= VM_PAGE_NULL
;
951 object
= first_object
;
952 offset
= first_offset
;
954 vm_object_lock(object
);
957 * we're going to use the absent page we just found
958 * so convert it to a 'busy' page
964 * zero-fill the page and put it on
965 * the correct paging queue
967 my_fault
= vm_fault_zero_page(m
, no_zero_fill
);
971 if (must_be_resident
)
972 vm_object_paging_end(object
);
973 else if (object
!= first_object
) {
974 vm_object_paging_end(object
);
981 vm_page_lockspin_queues();
982 VM_PAGE_QUEUES_REMOVE(m
);
983 vm_page_unlock_queues();
986 "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n",
987 (integer_t
)object
, offset
,
988 (integer_t
)next_object
,
989 offset
+object
->shadow_offset
,0);
991 offset
+= object
->shadow_offset
;
992 fault_info
->lo_offset
+= object
->shadow_offset
;
993 fault_info
->hi_offset
+= object
->shadow_offset
;
994 access_required
= VM_PROT_READ
;
996 vm_object_lock(next_object
);
997 vm_object_unlock(object
);
998 object
= next_object
;
999 vm_object_paging_begin(object
);
1002 * reset to default type of fault
1004 my_fault
= DBG_CACHE_HIT_FAULT
;
1010 && ((object
!= first_object
) || (object
->copy
!= VM_OBJECT_NULL
))
1011 && (fault_type
& VM_PROT_WRITE
)) {
1013 * This is a copy-on-write fault that will
1014 * cause us to revoke access to this page, but
1015 * this page is in the process of being cleaned
1016 * in a clustered pageout. We must wait until
1017 * the cleaning operation completes before
1018 * revoking access to the original page,
1019 * otherwise we might attempt to remove a
1023 dbgTrace(0xBEEF0009, (unsigned int) m
, (unsigned int) offset
); /* (TEST/DEBUG) */
1026 "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n",
1027 (integer_t
)object
, offset
,
1028 (integer_t
)m
, 0, 0);
1030 * take an extra ref so that object won't die
1032 vm_object_reference_locked(object
);
1034 vm_fault_cleanup(object
, first_m
);
1036 counter(c_vm_fault_page_block_backoff_kernel
++);
1037 vm_object_lock(object
);
1038 assert(object
->ref_count
> 0);
1040 m
= vm_page_lookup(object
, offset
);
1042 if (m
!= VM_PAGE_NULL
&& m
->cleaning
) {
1043 PAGE_ASSERT_WAIT(m
, interruptible
);
1045 vm_object_unlock(object
);
1046 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1047 vm_object_deallocate(object
);
1051 vm_object_unlock(object
);
1053 vm_object_deallocate(object
);
1054 thread_interrupt_level(interruptible_state
);
1056 return (VM_FAULT_RETRY
);
1059 if (type_of_fault
== NULL
&& m
->speculative
) {
1061 * If we were passed a non-NULL pointer for
1062 * "type_of_fault", than we came from
1063 * vm_fault... we'll let it deal with
1064 * this condition, since it
1065 * needs to see m->speculative to correctly
1066 * account the pageins, otherwise...
1067 * take it off the speculative queue, we'll
1068 * let the caller of vm_fault_page deal
1069 * with getting it onto the correct queue
1071 vm_page_lockspin_queues();
1072 VM_PAGE_QUEUES_REMOVE(m
);
1073 vm_page_unlock_queues();
1079 * the user needs access to a page that we
1080 * encrypted before paging it out.
1081 * Decrypt the page now.
1082 * Keep it busy to prevent anyone from
1083 * accessing it during the decryption.
1086 vm_page_decrypt(m
, 0);
1087 assert(object
== m
->object
);
1089 PAGE_WAKEUP_DONE(m
);
1092 * Retry from the top, in case
1093 * something changed while we were
1098 ASSERT_PAGE_DECRYPTED(m
);
1100 if (m
->object
->code_signed
) {
1103 * We just paged in a page from a signed
1104 * memory object but we don't need to
1105 * validate it now. We'll validate it if
1106 * when it gets mapped into a user address
1107 * space for the first time or when the page
1108 * gets copied to another object as a result
1109 * of a copy-on-write.
1114 * We mark the page busy and leave it on
1115 * the pageout queues. If the pageout
1116 * deamon comes across it, then it will
1117 * remove the page from the queue, but not the object
1120 dbgTrace(0xBEEF000B, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1123 "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
1124 (integer_t
)object
, offset
, (integer_t
)m
, 0, 0);
1134 * we get here when there is no page present in the object at
1135 * the offset we're interested in... we'll allocate a page
1136 * at this point if the pager associated with
1137 * this object can provide the data or we're the top object...
1138 * object is locked; m == NULL
1140 look_for_page
= (object
->pager_created
&& (MUST_ASK_PAGER(object
, offset
) == TRUE
) && !data_supply
);
1143 dbgTrace(0xBEEF000C, (unsigned int) look_for_page
, (unsigned int) object
); /* (TEST/DEBUG) */
1145 if ((look_for_page
|| (object
== first_object
)) && !must_be_resident
&& !object
->phys_contiguous
) {
1147 * Allocate a new page for this object/offset pair
1151 dbgTrace(0xBEEF000D, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
1153 if (m
== VM_PAGE_NULL
) {
1155 vm_fault_cleanup(object
, first_m
);
1156 thread_interrupt_level(interruptible_state
);
1158 return (VM_FAULT_MEMORY_SHORTAGE
);
1160 vm_page_insert(m
, object
, offset
);
1162 if (look_for_page
&& !must_be_resident
) {
1166 * If the memory manager is not ready, we
1167 * cannot make requests.
1169 if (!object
->pager_ready
) {
1171 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1173 if (m
!= VM_PAGE_NULL
)
1177 "vm_f_page: ready wait obj 0x%X, offset 0x%X\n",
1178 (integer_t
)object
, offset
, 0, 0, 0);
1181 * take an extra ref so object won't die
1183 vm_object_reference_locked(object
);
1184 vm_fault_cleanup(object
, first_m
);
1185 counter(c_vm_fault_page_block_backoff_kernel
++);
1187 vm_object_lock(object
);
1188 assert(object
->ref_count
> 0);
1190 if (!object
->pager_ready
) {
1191 wait_result
= vm_object_assert_wait(object
, VM_OBJECT_EVENT_PAGER_READY
, interruptible
);
1193 vm_object_unlock(object
);
1194 if (wait_result
== THREAD_WAITING
)
1195 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1196 vm_object_deallocate(object
);
1200 vm_object_unlock(object
);
1201 vm_object_deallocate(object
);
1202 thread_interrupt_level(interruptible_state
);
1204 return (VM_FAULT_RETRY
);
1207 if (!object
->internal
&& !object
->phys_contiguous
&& object
->paging_in_progress
> vm_object_pagein_throttle
) {
1209 * If there are too many outstanding page
1210 * requests pending on this external object, we
1211 * wait for them to be resolved now.
1214 dbgTrace(0xBEEF0010, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1216 if (m
!= VM_PAGE_NULL
)
1219 * take an extra ref so object won't die
1221 vm_object_reference_locked(object
);
1223 vm_fault_cleanup(object
, first_m
);
1225 counter(c_vm_fault_page_block_backoff_kernel
++);
1227 vm_object_lock(object
);
1228 assert(object
->ref_count
> 0);
1230 if (object
->paging_in_progress
> vm_object_pagein_throttle
) {
1231 vm_object_assert_wait(object
, VM_OBJECT_EVENT_PAGING_IN_PROGRESS
, interruptible
);
1233 vm_object_unlock(object
);
1234 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1235 vm_object_deallocate(object
);
1239 vm_object_unlock(object
);
1240 vm_object_deallocate(object
);
1241 thread_interrupt_level(interruptible_state
);
1243 return (VM_FAULT_RETRY
);
1246 if (m
!= VM_PAGE_NULL
) {
1248 * Indicate that the page is waiting for data
1249 * from the memory manager.
1251 m
->list_req_pending
= TRUE
;
1256 dbgTrace(0xBEEF0012, (unsigned int) object
, (unsigned int) 0); /* (TEST/DEBUG) */
1260 * It's possible someone called vm_object_destroy while we weren't
1261 * holding the object lock. If that has happened, then bail out
1265 pager
= object
->pager
;
1267 if (pager
== MEMORY_OBJECT_NULL
) {
1268 vm_fault_cleanup(object
, first_m
);
1269 thread_interrupt_level(interruptible_state
);
1270 return VM_FAULT_MEMORY_ERROR
;
1274 * We have an absent page in place for the faulting offset,
1275 * so we can release the object lock.
1278 vm_object_unlock(object
);
1281 * If this object uses a copy_call strategy,
1282 * and we are interested in a copy of this object
1283 * (having gotten here only by following a
1284 * shadow chain), then tell the memory manager
1285 * via a flag added to the desired_access
1286 * parameter, so that it can detect a race
1287 * between our walking down the shadow chain
1288 * and its pushing pages up into a copy of
1289 * the object that it manages.
1291 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_CALL
&& object
!= first_object
)
1292 wants_copy_flag
= VM_PROT_WANTS_COPY
;
1294 wants_copy_flag
= VM_PROT_NONE
;
1297 "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n",
1298 (integer_t
)object
, offset
, (integer_t
)m
,
1299 access_required
| wants_copy_flag
, 0);
1302 * Call the memory manager to retrieve the data.
1304 rc
= memory_object_data_request(
1306 offset
+ object
->paging_offset
,
1308 access_required
| wants_copy_flag
,
1309 (memory_object_fault_info_t
)fault_info
);
1312 dbgTrace(0xBEEF0013, (unsigned int) object
, (unsigned int) rc
); /* (TEST/DEBUG) */
1314 vm_object_lock(object
);
1316 if (rc
!= KERN_SUCCESS
) {
1318 vm_fault_cleanup(object
, first_m
);
1319 thread_interrupt_level(interruptible_state
);
1321 return ((rc
== MACH_SEND_INTERRUPTED
) ?
1322 VM_FAULT_INTERRUPTED
:
1323 VM_FAULT_MEMORY_ERROR
);
1325 if ((interruptible
!= THREAD_UNINT
) && (current_thread()->sched_mode
& TH_MODE_ABORT
)) {
1327 vm_fault_cleanup(object
, first_m
);
1328 thread_interrupt_level(interruptible_state
);
1330 return (VM_FAULT_INTERRUPTED
);
1332 if (m
== VM_PAGE_NULL
&& object
->phys_contiguous
) {
1334 * No page here means that the object we
1335 * initially looked up was "physically
1336 * contiguous" (i.e. device memory). However,
1337 * with Virtual VRAM, the object might not
1338 * be backed by that device memory anymore,
1339 * so we're done here only if the object is
1340 * still "phys_contiguous".
1341 * Otherwise, if the object is no longer
1342 * "phys_contiguous", we need to retry the
1343 * page fault against the object's new backing
1344 * store (different memory object).
1349 * potentially a pagein fault
1350 * if we make it through the state checks
1351 * above, than we'll count it as such
1353 my_fault
= DBG_PAGEIN_FAULT
;
1356 * Retry with same object/offset, since new data may
1357 * be in a different page (i.e., m is meaningless at
1364 * We get here if the object has no pager, or an existence map
1365 * exists and indicates the page isn't present on the pager
1366 * or we're unwiring a page. If a pager exists, but there
1367 * is no existence map, then the m->absent case above handles
1368 * the ZF case when the pager can't provide the page
1371 dbgTrace(0xBEEF0014, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1373 if (object
== first_object
)
1376 assert(m
== VM_PAGE_NULL
);
1379 "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n",
1380 (integer_t
)object
, offset
, (integer_t
)m
,
1381 (integer_t
)object
->shadow
, 0);
1383 next_object
= object
->shadow
;
1385 if (next_object
== VM_OBJECT_NULL
) {
1387 * we've hit the bottom of the shadown chain,
1388 * fill the page in the top object with zeros.
1390 assert(!must_be_resident
);
1392 if (object
!= first_object
) {
1393 vm_object_paging_end(object
);
1394 vm_object_unlock(object
);
1396 object
= first_object
;
1397 offset
= first_offset
;
1398 vm_object_lock(object
);
1401 assert(m
->object
== object
);
1402 first_m
= VM_PAGE_NULL
;
1405 * check for any conditions that prevent
1406 * us from creating a new zero-fill page
1407 * vm_fault_check will do all of the
1408 * fault cleanup in the case of an error condition
1409 * including resetting the thread_interrupt_level
1411 error
= vm_fault_check(object
, m
, first_m
, interruptible_state
);
1413 if (error
!= VM_FAULT_SUCCESS
)
1416 if (m
== VM_PAGE_NULL
) {
1419 if (m
== VM_PAGE_NULL
) {
1420 vm_fault_cleanup(object
, VM_PAGE_NULL
);
1421 thread_interrupt_level(interruptible_state
);
1423 return (VM_FAULT_MEMORY_SHORTAGE
);
1425 vm_page_insert(m
, object
, offset
);
1427 my_fault
= vm_fault_zero_page(m
, no_zero_fill
);
1433 * Move on to the next object. Lock the next
1434 * object before unlocking the current one.
1436 if ((object
!= first_object
) || must_be_resident
)
1437 vm_object_paging_end(object
);
1439 offset
+= object
->shadow_offset
;
1440 fault_info
->lo_offset
+= object
->shadow_offset
;
1441 fault_info
->hi_offset
+= object
->shadow_offset
;
1442 access_required
= VM_PROT_READ
;
1444 vm_object_lock(next_object
);
1445 vm_object_unlock(object
);
1447 object
= next_object
;
1448 vm_object_paging_begin(object
);
1453 * PAGE HAS BEEN FOUND.
1456 * busy, so that we can play with it;
1457 * not absent, so that nobody else will fill it;
1458 * possibly eligible for pageout;
1460 * The top-level page (first_m) is:
1461 * VM_PAGE_NULL if the page was found in the
1463 * busy, not absent, and ineligible for pageout.
1465 * The current object (object) is locked. A paging
1466 * reference is held for the current and top-level
1471 dbgTrace(0xBEEF0015, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1473 #if EXTRA_ASSERTIONS
1474 if (m
!= VM_PAGE_NULL
) {
1475 assert(m
->busy
&& !m
->absent
);
1476 assert((first_m
== VM_PAGE_NULL
) ||
1477 (first_m
->busy
&& !first_m
->absent
&&
1478 !first_m
->active
&& !first_m
->inactive
));
1480 #endif /* EXTRA_ASSERTIONS */
1484 * If we found a page, we must have decrypted it before we
1487 if (m
!= VM_PAGE_NULL
) {
1488 ASSERT_PAGE_DECRYPTED(m
);
1492 "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
1493 (integer_t
)object
, offset
, (integer_t
)m
,
1494 (integer_t
)first_object
, (integer_t
)first_m
);
1497 * If the page is being written, but isn't
1498 * already owned by the top-level object,
1499 * we have to copy it into a new page owned
1500 * by the top-level object.
1502 if ((object
!= first_object
) && (m
!= VM_PAGE_NULL
)) {
1505 dbgTrace(0xBEEF0016, (unsigned int) object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
1507 if (fault_type
& VM_PROT_WRITE
) {
1511 * We only really need to copy if we
1514 assert(!must_be_resident
);
1517 * are we protecting the system from
1518 * backing store exhaustion. If so
1519 * sleep unless we are privileged.
1521 if (vm_backing_store_low
) {
1522 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
)) {
1525 vm_fault_cleanup(object
, first_m
);
1527 assert_wait((event_t
)&vm_backing_store_low
, THREAD_UNINT
);
1529 thread_block(THREAD_CONTINUE_NULL
);
1530 thread_interrupt_level(interruptible_state
);
1532 return (VM_FAULT_RETRY
);
1536 * If we try to collapse first_object at this
1537 * point, we may deadlock when we try to get
1538 * the lock on an intermediate object (since we
1539 * have the bottom object locked). We can't
1540 * unlock the bottom object, because the page
1541 * we found may move (by collapse) if we do.
1543 * Instead, we first copy the page. Then, when
1544 * we have no more use for the bottom object,
1545 * we unlock it and try to collapse.
1547 * Note that we copy the page even if we didn't
1548 * need to... that's the breaks.
1552 * Allocate a page for the copy
1554 copy_m
= vm_page_grab();
1556 if (copy_m
== VM_PAGE_NULL
) {
1559 vm_fault_cleanup(object
, first_m
);
1560 thread_interrupt_level(interruptible_state
);
1562 return (VM_FAULT_MEMORY_SHORTAGE
);
1565 "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n",
1566 (integer_t
)object
, offset
,
1567 (integer_t
)m
, (integer_t
)copy_m
, 0);
1569 vm_page_copy(m
, copy_m
);
1572 * If another map is truly sharing this
1573 * page with us, we have to flush all
1574 * uses of the original page, since we
1575 * can't distinguish those which want the
1576 * original from those which need the
1579 * XXXO If we know that only one map has
1580 * access to this page, then we could
1581 * avoid the pmap_disconnect() call.
1584 pmap_disconnect(m
->phys_page
);
1586 assert(!m
->cleaning
);
1589 * We no longer need the old page or object.
1591 PAGE_WAKEUP_DONE(m
);
1592 vm_object_paging_end(object
);
1593 vm_object_unlock(object
);
1595 my_fault
= DBG_COW_FAULT
;
1596 VM_STAT_INCR(cow_faults
);
1597 DTRACE_VM2(cow_fault
, int, 1, (uint64_t *), NULL
);
1598 current_task()->cow_faults
++;
1600 object
= first_object
;
1601 offset
= first_offset
;
1603 vm_object_lock(object
);
1605 * get rid of the place holder
1606 * page that we soldered in earlier
1608 VM_PAGE_FREE(first_m
);
1609 first_m
= VM_PAGE_NULL
;
1612 * and replace it with the
1613 * page we just copied into
1615 assert(copy_m
->busy
);
1616 vm_page_insert(copy_m
, object
, offset
);
1617 copy_m
->dirty
= TRUE
;
1621 * Now that we've gotten the copy out of the
1622 * way, let's try to collapse the top object.
1623 * But we have to play ugly games with
1624 * paging_in_progress to do that...
1626 vm_object_paging_end(object
);
1627 vm_object_collapse(object
, offset
, TRUE
);
1628 vm_object_paging_begin(object
);
1631 *protection
&= (~VM_PROT_WRITE
);
1634 * Now check whether the page needs to be pushed into the
1635 * copy object. The use of asymmetric copy on write for
1636 * shared temporary objects means that we may do two copies to
1637 * satisfy the fault; one above to get the page from a
1638 * shadowed object, and one here to push it into the copy.
1640 try_failed_count
= 0;
1642 while ((copy_object
= first_object
->copy
) != VM_OBJECT_NULL
&& (m
!= VM_PAGE_NULL
)) {
1643 vm_object_offset_t copy_offset
;
1647 dbgTrace(0xBEEF0017, (unsigned int) copy_object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
1650 * If the page is being written, but hasn't been
1651 * copied to the copy-object, we have to copy it there.
1653 if ((fault_type
& VM_PROT_WRITE
) == 0) {
1654 *protection
&= ~VM_PROT_WRITE
;
1659 * If the page was guaranteed to be resident,
1660 * we must have already performed the copy.
1662 if (must_be_resident
)
1666 * Try to get the lock on the copy_object.
1668 if (!vm_object_lock_try(copy_object
)) {
1670 vm_object_unlock(object
);
1673 mutex_pause(try_failed_count
); /* wait a bit */
1674 vm_object_lock(object
);
1678 try_failed_count
= 0;
1681 * Make another reference to the copy-object,
1682 * to keep it from disappearing during the
1685 vm_object_reference_locked(copy_object
);
1688 * Does the page exist in the copy?
1690 copy_offset
= first_offset
- copy_object
->shadow_offset
;
1692 if (copy_object
->size
<= copy_offset
)
1694 * Copy object doesn't cover this page -- do nothing.
1697 else if ((copy_m
= vm_page_lookup(copy_object
, copy_offset
)) != VM_PAGE_NULL
) {
1699 * Page currently exists in the copy object
1703 * If the page is being brought
1704 * in, wait for it and then retry.
1709 * take an extra ref so object won't die
1711 vm_object_reference_locked(copy_object
);
1712 vm_object_unlock(copy_object
);
1713 vm_fault_cleanup(object
, first_m
);
1714 counter(c_vm_fault_page_block_backoff_kernel
++);
1716 vm_object_lock(copy_object
);
1717 assert(copy_object
->ref_count
> 0);
1718 VM_OBJ_RES_DECR(copy_object
);
1719 vm_object_lock_assert_exclusive(copy_object
);
1720 copy_object
->ref_count
--;
1721 assert(copy_object
->ref_count
> 0);
1722 copy_m
= vm_page_lookup(copy_object
, copy_offset
);
1725 * it's OK if the "copy_m" page is encrypted,
1726 * because we're not moving it nor handling its
1729 if (copy_m
!= VM_PAGE_NULL
&& copy_m
->busy
) {
1730 PAGE_ASSERT_WAIT(copy_m
, interruptible
);
1732 vm_object_unlock(copy_object
);
1733 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1734 vm_object_deallocate(copy_object
);
1738 vm_object_unlock(copy_object
);
1739 vm_object_deallocate(copy_object
);
1740 thread_interrupt_level(interruptible_state
);
1742 return (VM_FAULT_RETRY
);
1746 else if (!PAGED_OUT(copy_object
, copy_offset
)) {
1748 * If PAGED_OUT is TRUE, then the page used to exist
1749 * in the copy-object, and has already been paged out.
1750 * We don't need to repeat this. If PAGED_OUT is
1751 * FALSE, then either we don't know (!pager_created,
1752 * for example) or it hasn't been paged out.
1753 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
1754 * We must copy the page to the copy object.
1757 if (vm_backing_store_low
) {
1759 * we are protecting the system from
1760 * backing store exhaustion. If so
1761 * sleep unless we are privileged.
1763 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
)) {
1764 assert_wait((event_t
)&vm_backing_store_low
, THREAD_UNINT
);
1767 VM_OBJ_RES_DECR(copy_object
);
1768 vm_object_lock_assert_exclusive(copy_object
);
1769 copy_object
->ref_count
--;
1770 assert(copy_object
->ref_count
> 0);
1772 vm_object_unlock(copy_object
);
1773 vm_fault_cleanup(object
, first_m
);
1774 thread_block(THREAD_CONTINUE_NULL
);
1775 thread_interrupt_level(interruptible_state
);
1777 return (VM_FAULT_RETRY
);
1781 * Allocate a page for the copy
1783 copy_m
= vm_page_alloc(copy_object
, copy_offset
);
1785 if (copy_m
== VM_PAGE_NULL
) {
1788 VM_OBJ_RES_DECR(copy_object
);
1789 vm_object_lock_assert_exclusive(copy_object
);
1790 copy_object
->ref_count
--;
1791 assert(copy_object
->ref_count
> 0);
1793 vm_object_unlock(copy_object
);
1794 vm_fault_cleanup(object
, first_m
);
1795 thread_interrupt_level(interruptible_state
);
1797 return (VM_FAULT_MEMORY_SHORTAGE
);
1800 * Must copy page into copy-object.
1802 vm_page_copy(m
, copy_m
);
1805 * If the old page was in use by any users
1806 * of the copy-object, it must be removed
1807 * from all pmaps. (We can't know which
1811 pmap_disconnect(m
->phys_page
);
1814 * If there's a pager, then immediately
1815 * page out this page, using the "initialize"
1816 * option. Else, we use the copy.
1818 if ((!copy_object
->pager_created
)
1820 || vm_external_state_get(copy_object
->existence_map
, copy_offset
) == VM_EXTERNAL_STATE_ABSENT
1824 vm_page_lockspin_queues();
1825 assert(!m
->cleaning
);
1826 vm_page_activate(copy_m
);
1827 vm_page_unlock_queues();
1829 copy_m
->dirty
= TRUE
;
1830 PAGE_WAKEUP_DONE(copy_m
);
1833 assert(copy_m
->busy
== TRUE
);
1834 assert(!m
->cleaning
);
1837 * dirty is protected by the object lock
1839 copy_m
->dirty
= TRUE
;
1842 * The page is already ready for pageout:
1843 * not on pageout queues and busy.
1844 * Unlock everything except the
1845 * copy_object itself.
1847 vm_object_unlock(object
);
1850 * Write the page to the copy-object,
1851 * flushing it from the kernel.
1853 vm_pageout_initialize_page(copy_m
);
1856 * Since the pageout may have
1857 * temporarily dropped the
1858 * copy_object's lock, we
1859 * check whether we'll have
1860 * to deallocate the hard way.
1862 if ((copy_object
->shadow
!= object
) || (copy_object
->ref_count
== 1)) {
1863 vm_object_unlock(copy_object
);
1864 vm_object_deallocate(copy_object
);
1865 vm_object_lock(object
);
1870 * Pick back up the old object's
1871 * lock. [It is safe to do so,
1872 * since it must be deeper in the
1875 vm_object_lock(object
);
1878 * Because we're pushing a page upward
1879 * in the object tree, we must restart
1880 * any faults that are waiting here.
1881 * [Note that this is an expansion of
1882 * PAGE_WAKEUP that uses the THREAD_RESTART
1883 * wait result]. Can't turn off the page's
1884 * busy bit because we're not done with it.
1888 thread_wakeup_with_result((event_t
) m
, THREAD_RESTART
);
1892 * The reference count on copy_object must be
1893 * at least 2: one for our extra reference,
1894 * and at least one from the outside world
1895 * (we checked that when we last locked
1898 vm_object_lock_assert_exclusive(copy_object
);
1899 copy_object
->ref_count
--;
1900 assert(copy_object
->ref_count
> 0);
1902 VM_OBJ_RES_DECR(copy_object
);
1903 vm_object_unlock(copy_object
);
1908 *top_page
= first_m
;
1911 "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n",
1912 (integer_t
)object
, offset
, (integer_t
)m
, (integer_t
)first_m
, 0);
1914 if (m
!= VM_PAGE_NULL
) {
1915 if (my_fault
== DBG_PAGEIN_FAULT
) {
1917 VM_STAT_INCR(pageins
);
1918 DTRACE_VM2(pgin
, int, 1, (uint64_t *), NULL
);
1919 DTRACE_VM2(maj_fault
, int, 1, (uint64_t *), NULL
);
1920 current_task()->pageins
++;
1922 if (m
->object
->internal
) {
1923 DTRACE_VM2(anonpgin
, int, 1, (uint64_t *), NULL
);
1925 DTRACE_VM2(fspgin
, int, 1, (uint64_t *), NULL
);
1929 * evaluate access pattern and update state
1930 * vm_fault_deactivate_behind depends on the
1931 * state being up to date
1933 vm_fault_is_sequential(object
, offset
, fault_info
->behavior
);
1935 vm_fault_deactivate_behind(object
, offset
, fault_info
->behavior
);
1938 *type_of_fault
= my_fault
;
1940 vm_object_unlock(object
);
1942 thread_interrupt_level(interruptible_state
);
1945 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS
, 0); /* (TEST/DEBUG) */
1947 return (VM_FAULT_SUCCESS
);
1950 thread_interrupt_level(interruptible_state
);
1952 if (wait_result
== THREAD_INTERRUPTED
)
1953 return (VM_FAULT_INTERRUPTED
);
1954 return (VM_FAULT_RETRY
);
1962 * page queue lock must NOT be held
1963 * m->object must be locked
1965 * NOTE: m->object could be locked "shared" only if we are called
1966 * from vm_fault() as part of a soft fault. If so, we must be
1967 * careful not to modify the VM object in any way that is not
1968 * legal under a shared lock...
1970 unsigned long cs_enter_tainted_rejected
= 0;
1971 unsigned long cs_enter_tainted_accepted
= 0;
1973 vm_fault_enter(vm_page_t m
,
1975 vm_map_offset_t vaddr
,
1978 boolean_t change_wiring
,
1982 unsigned int cache_attr
;
1984 boolean_t previously_pmapped
= m
->pmapped
;
1986 vm_object_lock_assert_held(m
->object
);
1988 mutex_assert(&vm_page_queue_lock
, MA_NOTOWNED
);
1991 if (m
->phys_page
== vm_page_guard_addr
) {
1992 assert(m
->fictitious
);
1993 return KERN_SUCCESS
;
1996 cache_attr
= ((unsigned int)m
->object
->wimg_bits
) & VM_WIMG_MASK
;
1998 if (m
->object
->code_signed
&& pmap
!= kernel_pmap
&&
1999 (!m
->cs_validated
|| m
->wpmapped
)) {
2000 vm_object_lock_assert_exclusive(m
->object
);
2002 if (m
->cs_validated
&& m
->wpmapped
) {
2003 vm_cs_revalidates
++;
2008 * This page comes from a VM object backed by a signed
2009 * memory object. We are about to enter it into a process
2010 * address space, so we need to validate its signature.
2012 /* VM map is locked, so 1 ref will remain on VM object */
2013 vm_page_validate_cs(m
);
2016 if (m
->pmapped
== FALSE
) {
2018 * This is the first time this page is being
2019 * mapped in an address space (pmapped == FALSE).
2021 * Part of that page may still be in the data cache
2022 * and not flushed to memory. In case we end up
2023 * accessing that page via the instruction cache,
2024 * we need to ensure that the 2 caches are in sync.
2026 pmap_sync_page_data_phys(m
->phys_page
);
2028 if ((*type_of_fault
== DBG_CACHE_HIT_FAULT
) && m
->clustered
) {
2030 * found it in the cache, but this
2031 * is the first fault-in of the page (m->pmapped == FALSE)
2032 * so it must have come in as part of
2033 * a cluster... account 1 pagein against it
2035 VM_STAT_INCR(pageins
);
2036 DTRACE_VM2(pgin
, int, 1, (uint64_t *), NULL
);
2038 if (m
->object
->internal
) {
2039 DTRACE_VM2(anonpgin
, int, 1, (uint64_t *), NULL
);
2041 DTRACE_VM2(fspgin
, int, 1, (uint64_t *), NULL
);
2044 current_task()->pageins
++;
2046 *type_of_fault
= DBG_PAGEIN_FAULT
;
2048 VM_PAGE_CONSUME_CLUSTERED(m
);
2050 } else if (cache_attr
!= VM_WIMG_DEFAULT
)
2051 pmap_sync_page_attributes_phys(m
->phys_page
);
2053 if (*type_of_fault
!= DBG_COW_FAULT
) {
2054 DTRACE_VM2(as_fault
, int, 1, (uint64_t *), NULL
);
2056 if (pmap
== kernel_pmap
) {
2057 DTRACE_VM2(kernel_asflt
, int, 1, (uint64_t *), NULL
);
2061 if (m
->cs_tainted
) {
2064 * This page has been tainted and can not be trusted.
2065 * Let's notify the current process and let it take any
2066 * necessary precautions before we enter the tainted page
2067 * into its address space.
2069 if (cs_invalid_page()) {
2070 /* reject the tainted page: abort the page fault */
2071 kr
= KERN_MEMORY_ERROR
;
2072 cs_enter_tainted_rejected
++;
2074 /* proceed with the tainted page */
2076 cs_enter_tainted_accepted
++;
2078 if (cs_debug
|| kr
!= KERN_SUCCESS
) {
2079 printf("CODESIGNING: vm_fault_enter(0x%llx): "
2080 "page %p obj %p off 0x%llx *** TAINTED ***\n",
2081 (long long)vaddr
, m
, m
->object
, m
->offset
);
2084 /* proceed with the valid page */
2088 if (kr
== KERN_SUCCESS
) {
2090 * NOTE: we may only hold the vm_object lock SHARED
2091 * at this point, but the update of pmapped is ok
2092 * since this is the ONLY bit updated behind the SHARED
2093 * lock... however, we need to figure out how to do an atomic
2094 * update on a bit field to make this less fragile... right
2095 * now I don'w know how to coerce 'C' to give me the offset info
2096 * that's needed for an AtomicCompareAndSwap
2099 if (prot
& VM_PROT_WRITE
) {
2100 vm_object_lock_assert_exclusive(m
->object
);
2104 PMAP_ENTER(pmap
, vaddr
, m
, prot
, cache_attr
, wired
);
2108 * Hold queues lock to manipulate
2109 * the page queues. Change wiring
2112 if (change_wiring
) {
2113 vm_page_lockspin_queues();
2116 if (kr
== KERN_SUCCESS
) {
2122 vm_page_unlock_queues();
2125 if (kr
!= KERN_SUCCESS
) {
2126 vm_page_lock_queues();
2127 vm_page_deactivate(m
);
2128 vm_page_unlock_queues();
2130 if (((!m
->active
&& !m
->inactive
) || no_cache
) && !m
->wire_count
&& !m
->throttled
) {
2131 vm_page_lockspin_queues();
2133 * test again now that we hold the page queue lock
2135 if (((!m
->active
&& !m
->inactive
) || no_cache
) && !m
->wire_count
) {
2138 * If this is a no_cache mapping and the page has never been
2139 * mapped before or was previously a no_cache page, then we
2140 * want to leave pages in the speculative state so that they
2141 * can be readily recycled if free memory runs low. Otherwise
2142 * the page is activated as normal.
2145 if (no_cache
&& (!previously_pmapped
|| m
->no_cache
)) {
2148 if (m
->active
|| m
->inactive
)
2149 VM_PAGE_QUEUES_REMOVE(m
);
2151 if (!m
->speculative
)
2152 vm_page_speculate(m
, TRUE
);
2154 } else if (!m
->active
&& !m
->inactive
)
2155 vm_page_activate(m
);
2159 vm_page_unlock_queues();
2170 * Handle page faults, including pseudo-faults
2171 * used to change the wiring status of pages.
2173 * Explicit continuations have been removed.
2175 * vm_fault and vm_fault_page save mucho state
2176 * in the moral equivalent of a closure. The state
2177 * structure is allocated when first entering vm_fault
2178 * and deallocated when leaving vm_fault.
2181 extern int _map_enter_debug
;
2183 unsigned long vm_fault_collapse_total
= 0;
2184 unsigned long vm_fault_collapse_skipped
= 0;
2189 vm_map_offset_t vaddr
,
2190 vm_prot_t fault_type
,
2191 boolean_t change_wiring
,
2194 vm_map_offset_t caller_pmap_addr
)
2196 vm_map_version_t version
; /* Map version for verificiation */
2197 boolean_t wired
; /* Should mapping be wired down? */
2198 vm_object_t object
; /* Top-level object */
2199 vm_object_offset_t offset
; /* Top-level offset */
2200 vm_prot_t prot
; /* Protection for mapping */
2201 vm_object_t old_copy_object
; /* Saved copy object */
2202 vm_page_t result_page
; /* Result of vm_fault_page */
2203 vm_page_t top_page
; /* Placeholder page */
2206 vm_page_t m
; /* Fast access to result_page */
2207 kern_return_t error_code
;
2208 vm_object_t cur_object
;
2209 vm_object_offset_t cur_offset
;
2211 vm_object_t new_object
;
2214 boolean_t interruptible_state
;
2215 vm_map_t real_map
= map
;
2216 vm_map_t original_map
= map
;
2217 vm_prot_t original_fault_type
;
2218 struct vm_object_fault_info fault_info
;
2219 boolean_t need_collapse
= FALSE
;
2220 int object_lock_type
= 0;
2221 int cur_object_lock_type
;
2224 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_START
,
2225 (int)((uint64_t)vaddr
>> 32),
2231 if (get_preemption_level() != 0) {
2232 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_END
,
2233 (int)((uint64_t)vaddr
>> 32),
2239 return (KERN_FAILURE
);
2241 interruptible_state
= thread_interrupt_level(interruptible
);
2243 VM_STAT_INCR(faults
);
2244 current_task()->faults
++;
2245 original_fault_type
= fault_type
;
2247 if (fault_type
& VM_PROT_WRITE
)
2248 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2250 object_lock_type
= OBJECT_LOCK_SHARED
;
2252 cur_object_lock_type
= OBJECT_LOCK_SHARED
;
2256 * assume we will hit a page in the cache
2257 * otherwise, explicitly override with
2258 * the real fault type once we determine it
2260 type_of_fault
= DBG_CACHE_HIT_FAULT
;
2263 * Find the backing store object and offset into
2264 * it to begin the search.
2266 fault_type
= original_fault_type
;
2268 vm_map_lock_read(map
);
2270 kr
= vm_map_lookup_locked(&map
, vaddr
, fault_type
,
2271 object_lock_type
, &version
,
2272 &object
, &offset
, &prot
, &wired
,
2276 if (kr
!= KERN_SUCCESS
) {
2277 vm_map_unlock_read(map
);
2280 pmap
= real_map
->pmap
;
2281 fault_info
.interruptible
= interruptible
;
2284 * If the page is wired, we must fault for the current protection
2285 * value, to avoid further faults.
2288 fault_type
= prot
| VM_PROT_WRITE
;
2290 * since we're treating this fault as a 'write'
2291 * we must hold the top object lock exclusively
2293 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
2295 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2297 if (vm_object_lock_upgrade(object
) == FALSE
) {
2299 * couldn't upgrade, so explictly
2300 * take the lock exclusively
2302 vm_object_lock(object
);
2307 #if VM_FAULT_CLASSIFY
2309 * Temporary data gathering code
2311 vm_fault_classify(object
, offset
, fault_type
);
2314 * Fast fault code. The basic idea is to do as much as
2315 * possible while holding the map lock and object locks.
2316 * Busy pages are not used until the object lock has to
2317 * be dropped to do something (copy, zero fill, pmap enter).
2318 * Similarly, paging references aren't acquired until that
2319 * point, and object references aren't used.
2321 * If we can figure out what to do
2322 * (zero fill, copy on write, pmap enter) while holding
2323 * the locks, then it gets done. Otherwise, we give up,
2324 * and use the original fault path (which doesn't hold
2325 * the map lock, and relies on busy pages).
2326 * The give up cases include:
2327 * - Have to talk to pager.
2328 * - Page is busy, absent or in error.
2329 * - Pager has locked out desired access.
2330 * - Fault needs to be restarted.
2331 * - Have to push page into copy object.
2333 * The code is an infinite loop that moves one level down
2334 * the shadow chain each time. cur_object and cur_offset
2335 * refer to the current object being examined. object and offset
2336 * are the original object from the map. The loop is at the
2337 * top level if and only if object and cur_object are the same.
2339 * Invariants: Map lock is held throughout. Lock is held on
2340 * original object and cur_object (if different) when
2341 * continuing or exiting loop.
2347 * If this page is to be inserted in a copy delay object
2348 * for writing, and if the object has a copy, then the
2349 * copy delay strategy is implemented in the slow fault page.
2351 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_DELAY
&&
2352 object
->copy
!= VM_OBJECT_NULL
&& (fault_type
& VM_PROT_WRITE
))
2353 goto handle_copy_delay
;
2355 cur_object
= object
;
2356 cur_offset
= offset
;
2359 m
= vm_page_lookup(cur_object
, cur_offset
);
2361 if (m
!= VM_PAGE_NULL
) {
2363 wait_result_t result
;
2366 * in order to do the PAGE_ASSERT_WAIT, we must
2367 * have object that 'm' belongs to locked exclusively
2369 if (object
!= cur_object
) {
2370 vm_object_unlock(object
);
2372 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
2374 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2376 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
2378 * couldn't upgrade so go do a full retry
2379 * immediately since we've already dropped
2380 * the top object lock associated with this page
2381 * and the current one got dropped due to the
2382 * failed upgrade... the state is no longer valid
2384 vm_map_unlock_read(map
);
2385 if (real_map
!= map
)
2386 vm_map_unlock(real_map
);
2391 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
2393 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2395 if (vm_object_lock_upgrade(object
) == FALSE
) {
2397 * couldn't upgrade, so explictly take the lock
2398 * exclusively and go relookup the page since we
2399 * will have dropped the object lock and
2400 * a different thread could have inserted
2401 * a page at this offset
2402 * no need for a full retry since we're
2403 * at the top level of the object chain
2405 vm_object_lock(object
);
2410 vm_map_unlock_read(map
);
2411 if (real_map
!= map
)
2412 vm_map_unlock(real_map
);
2414 result
= PAGE_ASSERT_WAIT(m
, interruptible
);
2416 vm_object_unlock(cur_object
);
2418 if (result
== THREAD_WAITING
) {
2419 result
= thread_block(THREAD_CONTINUE_NULL
);
2421 counter(c_vm_fault_page_block_busy_kernel
++);
2423 if (result
== THREAD_AWAKENED
|| result
== THREAD_RESTART
)
2429 if (m
->phys_page
== vm_page_guard_addr
) {
2431 * Guard page: let the slow path deal with it
2435 if (m
->unusual
&& (m
->error
|| m
->restart
|| m
->private || m
->absent
)) {
2437 * Unusual case... let the slow path deal with it
2444 * We've soft-faulted (because it's not in the page
2445 * table) on an encrypted page.
2446 * Keep the page "busy" so that no one messes with
2447 * it during the decryption.
2448 * Release the extra locks we're holding, keep only
2449 * the page's VM object lock.
2451 * in order to set 'busy' on 'm', we must
2452 * have object that 'm' belongs to locked exclusively
2454 if (object
!= cur_object
) {
2455 vm_object_unlock(object
);
2457 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
2459 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2461 if (vm_object_lock_upgrade(cur_object
) == FALSE
) {
2463 * couldn't upgrade so go do a full retry
2464 * immediately since we've already dropped
2465 * the top object lock associated with this page
2466 * and the current one got dropped due to the
2467 * failed upgrade... the state is no longer valid
2469 vm_map_unlock_read(map
);
2470 if (real_map
!= map
)
2471 vm_map_unlock(real_map
);
2476 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
2478 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2480 if (vm_object_lock_upgrade(object
) == FALSE
) {
2482 * couldn't upgrade, so explictly take the lock
2483 * exclusively and go relookup the page since we
2484 * will have dropped the object lock and
2485 * a different thread could have inserted
2486 * a page at this offset
2487 * no need for a full retry since we're
2488 * at the top level of the object chain
2490 vm_object_lock(object
);
2497 vm_map_unlock_read(map
);
2498 if (real_map
!= map
)
2499 vm_map_unlock(real_map
);
2501 vm_page_decrypt(m
, 0);
2504 PAGE_WAKEUP_DONE(m
);
2506 vm_object_unlock(cur_object
);
2508 * Retry from the top, in case anything
2509 * changed while we were decrypting...
2513 ASSERT_PAGE_DECRYPTED(m
);
2515 if (m
->object
->code_signed
&& map
!= kernel_map
&&
2516 (!m
->cs_validated
|| m
->wpmapped
)) {
2518 * We might need to validate this page
2519 * against its code signature, so we
2520 * want to hold the VM object exclusively.
2522 if (object
!= cur_object
) {
2523 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
) {
2524 vm_object_unlock(object
);
2525 vm_object_unlock(cur_object
);
2527 cur_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2529 vm_map_unlock_read(map
);
2530 if (real_map
!= map
)
2531 vm_map_unlock(real_map
);
2536 } else if (object_lock_type
== OBJECT_LOCK_SHARED
) {
2538 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2540 if (vm_object_lock_upgrade(object
) == FALSE
) {
2542 * couldn't upgrade, so explictly take the lock
2543 * exclusively and go relookup the page since we
2544 * will have dropped the object lock and
2545 * a different thread could have inserted
2546 * a page at this offset
2547 * no need for a full retry since we're
2548 * at the top level of the object chain
2550 vm_object_lock(object
);
2557 * Two cases of map in faults:
2558 * - At top level w/o copy object.
2559 * - Read fault anywhere.
2560 * --> must disallow write.
2563 if (object
== cur_object
&& object
->copy
== VM_OBJECT_NULL
) {
2564 if ((fault_type
& VM_PROT_WRITE
) == 0) {
2566 * This is not a "write" fault, so we
2567 * might not have taken the object lock
2568 * exclusively and we might not be able
2569 * to update the "wpmapped" bit in
2571 * Let's just grant read access to
2572 * the page for now and we'll
2573 * soft-fault again if we need write
2576 prot
&= ~VM_PROT_WRITE
;
2581 if ((fault_type
& VM_PROT_WRITE
) == 0) {
2583 prot
&= ~VM_PROT_WRITE
;
2586 * Set up to map the page...
2587 * mark the page busy, drop
2588 * unneeded object lock
2590 if (object
!= cur_object
) {
2592 * don't need the original object anymore
2594 vm_object_unlock(object
);
2597 * switch to the object that has the new page
2599 object
= cur_object
;
2600 object_lock_type
= cur_object_lock_type
;
2604 * prepare for the pmap_enter...
2605 * object and map are both locked
2606 * m contains valid data
2607 * object == m->object
2608 * cur_object == NULL or it's been unlocked
2609 * no paging references on either object or cur_object
2612 if (db_watchpoint_list
&& (fault_type
& VM_PROT_WRITE
) == 0)
2613 prot
&= ~VM_PROT_WRITE
;
2616 kr
= vm_fault_enter(m
,
2622 fault_info
.no_cache
,
2625 kr
= vm_fault_enter(m
,
2631 fault_info
.no_cache
,
2635 if (need_collapse
== TRUE
)
2636 vm_object_collapse(object
, offset
, TRUE
);
2638 if (type_of_fault
== DBG_PAGEIN_FAULT
) {
2640 * evaluate access pattern and update state
2641 * vm_fault_deactivate_behind depends on the
2642 * state being up to date
2644 vm_fault_is_sequential(object
, cur_offset
, fault_info
.behavior
);
2646 vm_fault_deactivate_behind(object
, cur_offset
, fault_info
.behavior
);
2649 * That's it, clean up and return.
2652 PAGE_WAKEUP_DONE(m
);
2654 vm_object_unlock(object
);
2656 vm_map_unlock_read(map
);
2657 if (real_map
!= map
)
2658 vm_map_unlock(real_map
);
2663 * COPY ON WRITE FAULT
2665 * If objects match, then
2666 * object->copy must not be NULL (else control
2667 * would be in previous code block), and we
2668 * have a potential push into the copy object
2669 * with which we can't cope with here.
2671 if (cur_object
== object
) {
2673 * must take the slow path to
2674 * deal with the copy push
2678 assert(object_lock_type
== OBJECT_LOCK_EXCLUSIVE
);
2681 * This is now a shadow based copy on write
2682 * fault -- it requires a copy up the shadow
2685 * Allocate a page in the original top level
2686 * object. Give up if allocate fails. Also
2687 * need to remember current page, as it's the
2688 * source of the copy.
2690 * at this point we hold locks on both
2691 * object and cur_object... no need to take
2692 * paging refs or mark pages BUSY since
2693 * we don't drop either object lock until
2694 * the page has been copied and inserted
2699 if (m
== VM_PAGE_NULL
) {
2701 * no free page currently available...
2702 * must take the slow path
2707 * Now do the copy. Mark the source page busy...
2709 * NOTE: This code holds the map lock across
2712 vm_page_copy(cur_m
, m
);
2713 vm_page_insert(m
, object
, offset
);
2717 * Now cope with the source page and object
2719 if (object
->ref_count
> 1 && cur_m
->pmapped
)
2720 pmap_disconnect(cur_m
->phys_page
);
2722 need_collapse
= TRUE
;
2724 if (!cur_object
->internal
&&
2725 cur_object
->copy_strategy
== MEMORY_OBJECT_COPY_DELAY
) {
2727 * The object from which we've just
2728 * copied a page is most probably backed
2729 * by a vnode. We don't want to waste too
2730 * much time trying to collapse the VM objects
2731 * and create a bottleneck when several tasks
2732 * map the same file.
2734 if (cur_object
->copy
== object
) {
2736 * Shared mapping or no COW yet.
2737 * We can never collapse a copy
2738 * object into its backing object.
2740 need_collapse
= FALSE
;
2741 } else if (cur_object
->copy
== object
->shadow
&&
2742 object
->shadow
->resident_page_count
== 0) {
2744 * Shared mapping after a COW occurred.
2746 need_collapse
= FALSE
;
2749 vm_object_unlock(cur_object
);
2751 if (need_collapse
== FALSE
)
2752 vm_fault_collapse_skipped
++;
2753 vm_fault_collapse_total
++;
2755 type_of_fault
= DBG_COW_FAULT
;
2756 VM_STAT_INCR(cow_faults
);
2757 DTRACE_VM2(cow_fault
, int, 1, (uint64_t *), NULL
);
2758 current_task()->cow_faults
++;
2764 * No page at cur_object, cur_offset... m == NULL
2766 if (cur_object
->pager_created
) {
2767 if (MUST_ASK_PAGER(cur_object
, cur_offset
) == TRUE
) {
2769 * May have to talk to a pager...
2770 * take the slow path.
2775 * existence map present and indicates
2776 * that the pager doesn't have this page
2779 if (cur_object
->shadow
== VM_OBJECT_NULL
) {
2781 * Zero fill fault. Page gets
2782 * inserted into the original object.
2784 if (cur_object
->shadow_severed
) {
2786 if (object
!= cur_object
)
2787 vm_object_unlock(cur_object
);
2788 vm_object_unlock(object
);
2790 vm_map_unlock_read(map
);
2791 if (real_map
!= map
)
2792 vm_map_unlock(real_map
);
2794 kr
= KERN_MEMORY_ERROR
;
2797 if (VM_PAGE_ZFILL_THROTTLED()) {
2799 * drop all of our locks...
2800 * wait until the free queue is
2801 * pumped back up and then
2804 if (object
!= cur_object
)
2805 vm_object_unlock(cur_object
);
2806 vm_object_unlock(object
);
2807 vm_map_unlock_read(map
);
2808 if (real_map
!= map
)
2809 vm_map_unlock(real_map
);
2811 if (vm_page_wait((change_wiring
) ?
2819 if (vm_backing_store_low
) {
2821 * we are protecting the system from
2822 * backing store exhaustion...
2823 * must take the slow path if we're
2826 if (!(current_task()->priv_flags
& VM_BACKING_STORE_PRIV
))
2829 if (cur_object
!= object
) {
2830 vm_object_unlock(cur_object
);
2832 cur_object
= object
;
2834 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
2836 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2838 if (vm_object_lock_upgrade(object
) == FALSE
) {
2840 * couldn't upgrade so do a full retry on the fault
2841 * since we dropped the object lock which
2842 * could allow another thread to insert
2843 * a page at this offset
2845 vm_map_unlock_read(map
);
2846 if (real_map
!= map
)
2847 vm_map_unlock(real_map
);
2852 m
= vm_page_alloc(object
, offset
);
2854 if (m
== VM_PAGE_NULL
) {
2856 * no free page currently available...
2857 * must take the slow path
2863 * Now zero fill page...
2864 * the page is probably going to
2865 * be written soon, so don't bother
2866 * to clear the modified bit
2868 * NOTE: This code holds the map
2869 * lock across the zero fill.
2871 type_of_fault
= vm_fault_zero_page(m
, map
->no_zero_fill
);
2876 * On to the next level in the shadow chain
2878 cur_offset
+= cur_object
->shadow_offset
;
2879 new_object
= cur_object
->shadow
;
2882 * take the new_object's lock with the indicated state
2884 if (cur_object_lock_type
== OBJECT_LOCK_SHARED
)
2885 vm_object_lock_shared(new_object
);
2887 vm_object_lock(new_object
);
2889 if (cur_object
!= object
)
2890 vm_object_unlock(cur_object
);
2892 cur_object
= new_object
;
2898 * Cleanup from fast fault failure. Drop any object
2899 * lock other than original and drop map lock.
2901 if (object
!= cur_object
)
2902 vm_object_unlock(cur_object
);
2905 * must own the object lock exclusively at this point
2907 if (object_lock_type
== OBJECT_LOCK_SHARED
) {
2908 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
2910 if (vm_object_lock_upgrade(object
) == FALSE
) {
2912 * couldn't upgrade, so explictly
2913 * take the lock exclusively
2914 * no need to retry the fault at this
2915 * point since "vm_fault_page" will
2916 * completely re-evaluate the state
2918 vm_object_lock(object
);
2923 vm_map_unlock_read(map
);
2924 if (real_map
!= map
)
2925 vm_map_unlock(real_map
);
2928 * Make a reference to this object to
2929 * prevent its disposal while we are messing with
2930 * it. Once we have the reference, the map is free
2931 * to be diddled. Since objects reference their
2932 * shadows (and copies), they will stay around as well.
2934 vm_object_reference_locked(object
);
2935 vm_object_paging_begin(object
);
2937 XPR(XPR_VM_FAULT
,"vm_fault -> vm_fault_page\n",0,0,0,0,0);
2941 kr
= vm_fault_page(object
, offset
, fault_type
,
2942 (change_wiring
&& !wired
),
2943 &prot
, &result_page
, &top_page
,
2945 &error_code
, map
->no_zero_fill
,
2946 FALSE
, &fault_info
);
2949 * if kr != VM_FAULT_SUCCESS, then the paging reference
2950 * has been dropped and the object unlocked... the ref_count
2953 * if kr == VM_FAULT_SUCCESS, then the paging reference
2954 * is still held along with the ref_count on the original object
2956 * if m != NULL, then the object it belongs to
2957 * is returned locked with a paging reference
2959 * if top_page != NULL, then it's BUSY and the
2960 * object it belongs to has a paging reference
2961 * but is returned unlocked
2963 if (kr
!= VM_FAULT_SUCCESS
) {
2965 * we didn't succeed, lose the object reference immediately.
2967 vm_object_deallocate(object
);
2970 * See why we failed, and take corrective action.
2973 case VM_FAULT_MEMORY_SHORTAGE
:
2974 if (vm_page_wait((change_wiring
) ?
2981 case VM_FAULT_INTERRUPTED
:
2984 case VM_FAULT_RETRY
:
2986 case VM_FAULT_MEMORY_ERROR
:
2990 kr
= KERN_MEMORY_ERROR
;
2996 if (m
!= VM_PAGE_NULL
) {
2997 assert((change_wiring
&& !wired
) ?
2998 (top_page
== VM_PAGE_NULL
) :
2999 ((top_page
== VM_PAGE_NULL
) == (m
->object
== object
)));
3003 * What to do with the resulting page from vm_fault_page
3004 * if it doesn't get entered into the physical map:
3006 #define RELEASE_PAGE(m) \
3008 PAGE_WAKEUP_DONE(m); \
3009 vm_page_lockspin_queues(); \
3010 if (!m->active && !m->inactive && !m->throttled)\
3011 vm_page_activate(m); \
3012 vm_page_unlock_queues(); \
3016 * We must verify that the maps have not changed
3017 * since our last lookup.
3019 if (m
!= VM_PAGE_NULL
) {
3020 old_copy_object
= m
->object
->copy
;
3021 vm_object_unlock(m
->object
);
3023 old_copy_object
= VM_OBJECT_NULL
;
3026 * no object locks are held at this point
3028 if ((map
!= original_map
) || !vm_map_verify(map
, &version
)) {
3029 vm_object_t retry_object
;
3030 vm_object_offset_t retry_offset
;
3031 vm_prot_t retry_prot
;
3034 * To avoid trying to write_lock the map while another
3035 * thread has it read_locked (in vm_map_pageable), we
3036 * do not try for write permission. If the page is
3037 * still writable, we will get write permission. If it
3038 * is not, or has been marked needs_copy, we enter the
3039 * mapping without write permission, and will merely
3040 * take another fault.
3043 vm_map_lock_read(map
);
3045 kr
= vm_map_lookup_locked(&map
, vaddr
,
3046 fault_type
& ~VM_PROT_WRITE
,
3047 OBJECT_LOCK_EXCLUSIVE
, &version
,
3048 &retry_object
, &retry_offset
, &retry_prot
,
3052 pmap
= real_map
->pmap
;
3054 if (kr
!= KERN_SUCCESS
) {
3055 vm_map_unlock_read(map
);
3057 if (m
!= VM_PAGE_NULL
) {
3059 * retake the lock so that
3060 * we can drop the paging reference
3061 * in vm_fault_cleanup and do the
3062 * PAGE_WAKEUP_DONE in RELEASE_PAGE
3064 vm_object_lock(m
->object
);
3068 vm_fault_cleanup(m
->object
, top_page
);
3071 * retake the lock so that
3072 * we can drop the paging reference
3073 * in vm_fault_cleanup
3075 vm_object_lock(object
);
3077 vm_fault_cleanup(object
, top_page
);
3079 vm_object_deallocate(object
);
3083 vm_object_unlock(retry_object
);
3085 if ((retry_object
!= object
) || (retry_offset
!= offset
)) {
3087 vm_map_unlock_read(map
);
3088 if (real_map
!= map
)
3089 vm_map_unlock(real_map
);
3091 if (m
!= VM_PAGE_NULL
) {
3093 * retake the lock so that
3094 * we can drop the paging reference
3095 * in vm_fault_cleanup and do the
3096 * PAGE_WAKEUP_DONE in RELEASE_PAGE
3098 vm_object_lock(m
->object
);
3102 vm_fault_cleanup(m
->object
, top_page
);
3105 * retake the lock so that
3106 * we can drop the paging reference
3107 * in vm_fault_cleanup
3109 vm_object_lock(object
);
3111 vm_fault_cleanup(object
, top_page
);
3113 vm_object_deallocate(object
);
3118 * Check whether the protection has changed or the object
3119 * has been copied while we left the map unlocked.
3123 if (m
!= VM_PAGE_NULL
) {
3124 vm_object_lock(m
->object
);
3126 if (m
->object
->copy
!= old_copy_object
) {
3128 * The copy object changed while the top-level object
3129 * was unlocked, so take away write permission.
3131 prot
&= ~VM_PROT_WRITE
;
3134 vm_object_lock(object
);
3137 * If we want to wire down this page, but no longer have
3138 * adequate permissions, we must start all over.
3140 if (wired
&& (fault_type
!= (prot
| VM_PROT_WRITE
))) {
3142 vm_map_verify_done(map
, &version
);
3143 if (real_map
!= map
)
3144 vm_map_unlock(real_map
);
3146 if (m
!= VM_PAGE_NULL
) {
3149 vm_fault_cleanup(m
->object
, top_page
);
3151 vm_fault_cleanup(object
, top_page
);
3153 vm_object_deallocate(object
);
3157 if (m
!= VM_PAGE_NULL
) {
3159 * Put this page into the physical map.
3160 * We had to do the unlock above because pmap_enter
3161 * may cause other faults. The page may be on
3162 * the pageout queues. If the pageout daemon comes
3163 * across the page, it will remove it from the queues.
3166 kr
= vm_fault_enter(m
,
3172 fault_info
.no_cache
,
3175 kr
= vm_fault_enter(m
,
3181 fault_info
.no_cache
,
3184 if (kr
!= KERN_SUCCESS
) {
3185 /* abort this page fault */
3186 vm_map_verify_done(map
, &version
);
3187 if (real_map
!= map
)
3188 vm_map_unlock(real_map
);
3189 PAGE_WAKEUP_DONE(m
);
3190 vm_fault_cleanup(m
->object
, top_page
);
3191 vm_object_deallocate(object
);
3196 vm_map_entry_t entry
;
3197 vm_map_offset_t laddr
;
3198 vm_map_offset_t ldelta
, hdelta
;
3201 * do a pmap block mapping from the physical address
3206 /* While we do not worry about execution protection in */
3207 /* general, certian pages may have instruction execution */
3208 /* disallowed. We will check here, and if not allowed */
3209 /* to execute, we return with a protection failure. */
3211 if ((fault_type
& VM_PROT_EXECUTE
) &&
3212 (!pmap_eligible_for_execute((ppnum_t
)(object
->shadow_offset
>> 12)))) {
3214 vm_map_verify_done(map
, &version
);
3216 if (real_map
!= map
)
3217 vm_map_unlock(real_map
);
3219 vm_fault_cleanup(object
, top_page
);
3220 vm_object_deallocate(object
);
3222 kr
= KERN_PROTECTION_FAILURE
;
3227 if (real_map
!= map
)
3228 vm_map_unlock(real_map
);
3230 if (original_map
!= map
) {
3231 vm_map_unlock_read(map
);
3232 vm_map_lock_read(original_map
);
3238 hdelta
= 0xFFFFF000;
3239 ldelta
= 0xFFFFF000;
3241 while (vm_map_lookup_entry(map
, laddr
, &entry
)) {
3242 if (ldelta
> (laddr
- entry
->vme_start
))
3243 ldelta
= laddr
- entry
->vme_start
;
3244 if (hdelta
> (entry
->vme_end
- laddr
))
3245 hdelta
= entry
->vme_end
- laddr
;
3246 if (entry
->is_sub_map
) {
3248 laddr
= (laddr
- entry
->vme_start
)
3250 vm_map_lock_read(entry
->object
.sub_map
);
3252 if (map
!= real_map
)
3253 vm_map_unlock_read(map
);
3254 if (entry
->use_pmap
) {
3255 vm_map_unlock_read(real_map
);
3256 real_map
= entry
->object
.sub_map
;
3258 map
= entry
->object
.sub_map
;
3265 if (vm_map_lookup_entry(map
, laddr
, &entry
) &&
3266 (entry
->object
.vm_object
!= NULL
) &&
3267 (entry
->object
.vm_object
== object
)) {
3271 * Set up a block mapped area
3273 pmap_map_block(caller_pmap
,
3274 (addr64_t
)(caller_pmap_addr
- ldelta
),
3275 (((vm_map_offset_t
) (entry
->object
.vm_object
->shadow_offset
)) +
3276 entry
->offset
+ (laddr
- entry
->vme_start
) - ldelta
) >> 12,
3277 ((ldelta
+ hdelta
) >> 12), prot
,
3278 (VM_WIMG_MASK
& (int)object
->wimg_bits
), 0);
3281 * Set up a block mapped area
3283 pmap_map_block(real_map
->pmap
,
3284 (addr64_t
)(vaddr
- ldelta
),
3285 (((vm_map_offset_t
)(entry
->object
.vm_object
->shadow_offset
)) +
3286 entry
->offset
+ (laddr
- entry
->vme_start
) - ldelta
) >> 12,
3287 ((ldelta
+ hdelta
) >> 12), prot
,
3288 (VM_WIMG_MASK
& (int)object
->wimg_bits
), 0);
3294 * Unlock everything, and return
3296 vm_map_verify_done(map
, &version
);
3297 if (real_map
!= map
)
3298 vm_map_unlock(real_map
);
3300 if (m
!= VM_PAGE_NULL
) {
3301 PAGE_WAKEUP_DONE(m
);
3303 vm_fault_cleanup(m
->object
, top_page
);
3305 vm_fault_cleanup(object
, top_page
);
3307 vm_object_deallocate(object
);
3313 thread_interrupt_level(interruptible_state
);
3315 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 2)) | DBG_FUNC_END
,
3316 (int)((uint64_t)vaddr
>> 32),
3328 * Wire down a range of virtual addresses in a map.
3333 vm_map_entry_t entry
,
3335 vm_map_offset_t pmap_addr
)
3338 register vm_map_offset_t va
;
3339 register vm_map_offset_t end_addr
= entry
->vme_end
;
3340 register kern_return_t rc
;
3342 assert(entry
->in_transition
);
3344 if ((entry
->object
.vm_object
!= NULL
) &&
3345 !entry
->is_sub_map
&&
3346 entry
->object
.vm_object
->phys_contiguous
) {
3347 return KERN_SUCCESS
;
3351 * Inform the physical mapping system that the
3352 * range of addresses may not fault, so that
3353 * page tables and such can be locked down as well.
3356 pmap_pageable(pmap
, pmap_addr
,
3357 pmap_addr
+ (end_addr
- entry
->vme_start
), FALSE
);
3360 * We simulate a fault to get the page and enter it
3361 * in the physical map.
3364 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
3365 if ((rc
= vm_fault_wire_fast(
3366 map
, va
, entry
, pmap
,
3367 pmap_addr
+ (va
- entry
->vme_start
)
3368 )) != KERN_SUCCESS
) {
3369 rc
= vm_fault(map
, va
, VM_PROT_NONE
, TRUE
,
3370 (pmap
== kernel_pmap
) ?
3371 THREAD_UNINT
: THREAD_ABORTSAFE
,
3372 pmap
, pmap_addr
+ (va
- entry
->vme_start
));
3373 DTRACE_VM2(softlock
, int, 1, (uint64_t *), NULL
);
3376 if (rc
!= KERN_SUCCESS
) {
3377 struct vm_map_entry tmp_entry
= *entry
;
3379 /* unwire wired pages */
3380 tmp_entry
.vme_end
= va
;
3381 vm_fault_unwire(map
,
3382 &tmp_entry
, FALSE
, pmap
, pmap_addr
);
3387 return KERN_SUCCESS
;
3393 * Unwire a range of virtual addresses in a map.
3398 vm_map_entry_t entry
,
3399 boolean_t deallocate
,
3401 vm_map_offset_t pmap_addr
)
3403 register vm_map_offset_t va
;
3404 register vm_map_offset_t end_addr
= entry
->vme_end
;
3406 struct vm_object_fault_info fault_info
;
3408 object
= (entry
->is_sub_map
)
3409 ? VM_OBJECT_NULL
: entry
->object
.vm_object
;
3412 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
3413 * do anything since such memory is wired by default. So we don't have
3414 * anything to undo here.
3417 if (object
!= VM_OBJECT_NULL
&& object
->phys_contiguous
)
3420 fault_info
.interruptible
= THREAD_UNINT
;
3421 fault_info
.behavior
= entry
->behavior
;
3422 fault_info
.user_tag
= entry
->alias
;
3423 fault_info
.lo_offset
= entry
->offset
;
3424 fault_info
.hi_offset
= (entry
->vme_end
- entry
->vme_start
) + entry
->offset
;
3425 fault_info
.no_cache
= entry
->no_cache
;
3428 * Since the pages are wired down, we must be able to
3429 * get their mappings from the physical map system.
3432 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
3435 pmap_change_wiring(pmap
,
3436 pmap_addr
+ (va
- entry
->vme_start
), FALSE
);
3438 if (object
== VM_OBJECT_NULL
) {
3439 (void) vm_fault(map
, va
, VM_PROT_NONE
,
3440 TRUE
, THREAD_UNINT
, pmap
, pmap_addr
);
3443 vm_page_t result_page
;
3445 vm_object_t result_object
;
3446 vm_fault_return_t result
;
3448 fault_info
.cluster_size
= end_addr
- va
;
3451 prot
= VM_PROT_NONE
;
3453 vm_object_lock(object
);
3454 vm_object_paging_begin(object
);
3456 "vm_fault_unwire -> vm_fault_page\n",
3458 result
= vm_fault_page(
3460 entry
->offset
+ (va
- entry
->vme_start
),
3462 &prot
, &result_page
, &top_page
,
3464 NULL
, map
->no_zero_fill
,
3465 FALSE
, &fault_info
);
3466 } while (result
== VM_FAULT_RETRY
);
3469 * If this was a mapping to a file on a device that has been forcibly
3470 * unmounted, then we won't get a page back from vm_fault_page(). Just
3471 * move on to the next one in case the remaining pages are mapped from
3472 * different objects. During a forced unmount, the object is terminated
3473 * so the alive flag will be false if this happens. A forced unmount will
3474 * will occur when an external disk is unplugged before the user does an
3475 * eject, so we don't want to panic in that situation.
3478 if (result
== VM_FAULT_MEMORY_ERROR
&& !object
->alive
)
3481 if (result
!= VM_FAULT_SUCCESS
)
3482 panic("vm_fault_unwire: failure");
3484 result_object
= result_page
->object
;
3487 assert(result_page
->phys_page
!=
3488 vm_page_fictitious_addr
);
3489 pmap_disconnect(result_page
->phys_page
);
3490 VM_PAGE_FREE(result_page
);
3492 vm_page_lockspin_queues();
3493 vm_page_unwire(result_page
);
3494 vm_page_unlock_queues();
3495 PAGE_WAKEUP_DONE(result_page
);
3497 vm_fault_cleanup(result_object
, top_page
);
3502 * Inform the physical mapping system that the range
3503 * of addresses may fault, so that page tables and
3504 * such may be unwired themselves.
3507 pmap_pageable(pmap
, pmap_addr
,
3508 pmap_addr
+ (end_addr
- entry
->vme_start
), TRUE
);
3513 * vm_fault_wire_fast:
3515 * Handle common case of a wire down page fault at the given address.
3516 * If successful, the page is inserted into the associated physical map.
3517 * The map entry is passed in to avoid the overhead of a map lookup.
3519 * NOTE: the given address should be truncated to the
3520 * proper page address.
3522 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
3523 * a standard error specifying why the fault is fatal is returned.
3525 * The map in question must be referenced, and remains so.
3526 * Caller has a read lock on the map.
3528 * This is a stripped version of vm_fault() for wiring pages. Anything
3529 * other than the common case will return KERN_FAILURE, and the caller
3530 * is expected to call vm_fault().
3534 __unused vm_map_t map
,
3536 vm_map_entry_t entry
,
3538 vm_map_offset_t pmap_addr
)
3541 vm_object_offset_t offset
;
3542 register vm_page_t m
;
3544 thread_t thread
= current_thread();
3548 VM_STAT_INCR(faults
);
3550 if (thread
!= THREAD_NULL
&& thread
->task
!= TASK_NULL
)
3551 thread
->task
->faults
++;
3558 #define RELEASE_PAGE(m) { \
3559 PAGE_WAKEUP_DONE(m); \
3560 vm_page_lockspin_queues(); \
3561 vm_page_unwire(m); \
3562 vm_page_unlock_queues(); \
3566 #undef UNLOCK_THINGS
3567 #define UNLOCK_THINGS { \
3568 vm_object_paging_end(object); \
3569 vm_object_unlock(object); \
3572 #undef UNLOCK_AND_DEALLOCATE
3573 #define UNLOCK_AND_DEALLOCATE { \
3575 vm_object_deallocate(object); \
3578 * Give up and have caller do things the hard way.
3582 UNLOCK_AND_DEALLOCATE; \
3583 return(KERN_FAILURE); \
3588 * If this entry is not directly to a vm_object, bail out.
3590 if (entry
->is_sub_map
)
3591 return(KERN_FAILURE
);
3594 * Find the backing store object and offset into it.
3597 object
= entry
->object
.vm_object
;
3598 offset
= (va
- entry
->vme_start
) + entry
->offset
;
3599 prot
= entry
->protection
;
3602 * Make a reference to this object to prevent its
3603 * disposal while we are messing with it.
3606 vm_object_lock(object
);
3607 vm_object_reference_locked(object
);
3608 vm_object_paging_begin(object
);
3611 * INVARIANTS (through entire routine):
3613 * 1) At all times, we must either have the object
3614 * lock or a busy page in some object to prevent
3615 * some other thread from trying to bring in
3618 * 2) Once we have a busy page, we must remove it from
3619 * the pageout queues, so that the pageout daemon
3620 * will not grab it away.
3625 * Look for page in top-level object. If it's not there or
3626 * there's something going on, give up.
3627 * ENCRYPTED SWAP: use the slow fault path, since we'll need to
3628 * decrypt the page before wiring it down.
3630 m
= vm_page_lookup(object
, offset
);
3631 if ((m
== VM_PAGE_NULL
) || (m
->busy
) || (m
->encrypted
) ||
3632 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
))) {
3636 ASSERT_PAGE_DECRYPTED(m
);
3638 if (m
->fictitious
&&
3639 m
->phys_page
== vm_page_guard_addr
) {
3641 * Guard pages are fictitious pages and are never
3642 * entered into a pmap, so let's say it's been wired...
3649 * Wire the page down now. All bail outs beyond this
3650 * point must unwire the page.
3653 vm_page_lockspin_queues();
3655 vm_page_unlock_queues();
3658 * Mark page busy for other threads.
3665 * Give up if the page is being written and there's a copy object
3667 if ((object
->copy
!= VM_OBJECT_NULL
) && (prot
& VM_PROT_WRITE
)) {
3673 * Put this page into the physical map.
3675 type_of_fault
= DBG_CACHE_HIT_FAULT
;
3676 kr
= vm_fault_enter(m
,
3687 * Unlock everything, and return
3690 PAGE_WAKEUP_DONE(m
);
3691 UNLOCK_AND_DEALLOCATE
;
3698 * Routine: vm_fault_copy_cleanup
3700 * Release a page used by vm_fault_copy.
3704 vm_fault_copy_cleanup(
3708 vm_object_t object
= page
->object
;
3710 vm_object_lock(object
);
3711 PAGE_WAKEUP_DONE(page
);
3712 vm_page_lockspin_queues();
3713 if (!page
->active
&& !page
->inactive
&& !page
->throttled
)
3714 vm_page_activate(page
);
3715 vm_page_unlock_queues();
3716 vm_fault_cleanup(object
, top_page
);
3720 vm_fault_copy_dst_cleanup(
3725 if (page
!= VM_PAGE_NULL
) {
3726 object
= page
->object
;
3727 vm_object_lock(object
);
3728 vm_page_lockspin_queues();
3729 vm_page_unwire(page
);
3730 vm_page_unlock_queues();
3731 vm_object_paging_end(object
);
3732 vm_object_unlock(object
);
3737 * Routine: vm_fault_copy
3740 * Copy pages from one virtual memory object to another --
3741 * neither the source nor destination pages need be resident.
3743 * Before actually copying a page, the version associated with
3744 * the destination address map wil be verified.
3746 * In/out conditions:
3747 * The caller must hold a reference, but not a lock, to
3748 * each of the source and destination objects and to the
3752 * Returns KERN_SUCCESS if no errors were encountered in
3753 * reading or writing the data. Returns KERN_INTERRUPTED if
3754 * the operation was interrupted (only possible if the
3755 * "interruptible" argument is asserted). Other return values
3756 * indicate a permanent error in copying the data.
3758 * The actual amount of data copied will be returned in the
3759 * "copy_size" argument. In the event that the destination map
3760 * verification failed, this amount may be less than the amount
3765 vm_object_t src_object
,
3766 vm_object_offset_t src_offset
,
3767 vm_map_size_t
*copy_size
, /* INOUT */
3768 vm_object_t dst_object
,
3769 vm_object_offset_t dst_offset
,
3771 vm_map_version_t
*dst_version
,
3774 vm_page_t result_page
;
3777 vm_page_t src_top_page
;
3781 vm_page_t dst_top_page
;
3784 vm_map_size_t amount_left
;
3785 vm_object_t old_copy_object
;
3786 kern_return_t error
= 0;
3788 vm_map_size_t part_size
;
3789 struct vm_object_fault_info fault_info_src
;
3790 struct vm_object_fault_info fault_info_dst
;
3793 * In order not to confuse the clustered pageins, align
3794 * the different offsets on a page boundary.
3799 *copy_size -= amount_left; \
3803 amount_left
= *copy_size
;
3805 fault_info_src
.interruptible
= interruptible
;
3806 fault_info_src
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
3807 fault_info_src
.user_tag
= 0;
3808 fault_info_src
.lo_offset
= vm_object_trunc_page(src_offset
);
3809 fault_info_src
.hi_offset
= fault_info_src
.lo_offset
+ amount_left
;
3810 fault_info_src
.no_cache
= FALSE
;
3812 fault_info_dst
.interruptible
= interruptible
;
3813 fault_info_dst
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
3814 fault_info_dst
.user_tag
= 0;
3815 fault_info_dst
.lo_offset
= vm_object_trunc_page(dst_offset
);
3816 fault_info_dst
.hi_offset
= fault_info_dst
.lo_offset
+ amount_left
;
3817 fault_info_dst
.no_cache
= FALSE
;
3819 do { /* while (amount_left > 0) */
3821 * There may be a deadlock if both source and destination
3822 * pages are the same. To avoid this deadlock, the copy must
3823 * start by getting the destination page in order to apply
3824 * COW semantics if any.
3827 RetryDestinationFault
: ;
3829 dst_prot
= VM_PROT_WRITE
|VM_PROT_READ
;
3831 vm_object_lock(dst_object
);
3832 vm_object_paging_begin(dst_object
);
3834 fault_info_dst
.cluster_size
= amount_left
;
3836 XPR(XPR_VM_FAULT
,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
3837 switch (vm_fault_page(dst_object
,
3838 vm_object_trunc_page(dst_offset
),
3839 VM_PROT_WRITE
|VM_PROT_READ
,
3841 &dst_prot
, &dst_page
, &dst_top_page
,
3844 dst_map
->no_zero_fill
,
3845 FALSE
, &fault_info_dst
)) {
3846 case VM_FAULT_SUCCESS
:
3848 case VM_FAULT_RETRY
:
3849 goto RetryDestinationFault
;
3850 case VM_FAULT_MEMORY_SHORTAGE
:
3851 if (vm_page_wait(interruptible
))
3852 goto RetryDestinationFault
;
3854 case VM_FAULT_INTERRUPTED
:
3855 RETURN(MACH_SEND_INTERRUPTED
);
3856 case VM_FAULT_MEMORY_ERROR
:
3860 return(KERN_MEMORY_ERROR
);
3862 assert ((dst_prot
& VM_PROT_WRITE
) != VM_PROT_NONE
);
3864 old_copy_object
= dst_page
->object
->copy
;
3867 * There exists the possiblity that the source and
3868 * destination page are the same. But we can't
3869 * easily determine that now. If they are the
3870 * same, the call to vm_fault_page() for the
3871 * destination page will deadlock. To prevent this we
3872 * wire the page so we can drop busy without having
3873 * the page daemon steal the page. We clean up the
3874 * top page but keep the paging reference on the object
3875 * holding the dest page so it doesn't go away.
3878 vm_page_lockspin_queues();
3879 vm_page_wire(dst_page
);
3880 vm_page_unlock_queues();
3881 PAGE_WAKEUP_DONE(dst_page
);
3882 vm_object_unlock(dst_page
->object
);
3884 if (dst_top_page
!= VM_PAGE_NULL
) {
3885 vm_object_lock(dst_object
);
3886 VM_PAGE_FREE(dst_top_page
);
3887 vm_object_paging_end(dst_object
);
3888 vm_object_unlock(dst_object
);
3893 if (src_object
== VM_OBJECT_NULL
) {
3895 * No source object. We will just
3896 * zero-fill the page in dst_object.
3898 src_page
= VM_PAGE_NULL
;
3899 result_page
= VM_PAGE_NULL
;
3901 vm_object_lock(src_object
);
3902 src_page
= vm_page_lookup(src_object
,
3903 vm_object_trunc_page(src_offset
));
3904 if (src_page
== dst_page
) {
3905 src_prot
= dst_prot
;
3906 result_page
= VM_PAGE_NULL
;
3908 src_prot
= VM_PROT_READ
;
3909 vm_object_paging_begin(src_object
);
3911 fault_info_src
.cluster_size
= amount_left
;
3914 "vm_fault_copy(2) -> vm_fault_page\n",
3916 switch (vm_fault_page(
3918 vm_object_trunc_page(src_offset
),
3919 VM_PROT_READ
, FALSE
,
3921 &result_page
, &src_top_page
,
3922 (int *)0, &error
, FALSE
,
3923 FALSE
, &fault_info_src
)) {
3925 case VM_FAULT_SUCCESS
:
3927 case VM_FAULT_RETRY
:
3928 goto RetrySourceFault
;
3929 case VM_FAULT_MEMORY_SHORTAGE
:
3930 if (vm_page_wait(interruptible
))
3931 goto RetrySourceFault
;
3933 case VM_FAULT_INTERRUPTED
:
3934 vm_fault_copy_dst_cleanup(dst_page
);
3935 RETURN(MACH_SEND_INTERRUPTED
);
3936 case VM_FAULT_MEMORY_ERROR
:
3937 vm_fault_copy_dst_cleanup(dst_page
);
3941 return(KERN_MEMORY_ERROR
);
3945 assert((src_top_page
== VM_PAGE_NULL
) ==
3946 (result_page
->object
== src_object
));
3948 assert ((src_prot
& VM_PROT_READ
) != VM_PROT_NONE
);
3949 vm_object_unlock(result_page
->object
);
3952 if (!vm_map_verify(dst_map
, dst_version
)) {
3953 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
3954 vm_fault_copy_cleanup(result_page
, src_top_page
);
3955 vm_fault_copy_dst_cleanup(dst_page
);
3959 vm_object_lock(dst_page
->object
);
3961 if (dst_page
->object
->copy
!= old_copy_object
) {
3962 vm_object_unlock(dst_page
->object
);
3963 vm_map_verify_done(dst_map
, dst_version
);
3964 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
3965 vm_fault_copy_cleanup(result_page
, src_top_page
);
3966 vm_fault_copy_dst_cleanup(dst_page
);
3969 vm_object_unlock(dst_page
->object
);
3972 * Copy the page, and note that it is dirty
3976 if (!page_aligned(src_offset
) ||
3977 !page_aligned(dst_offset
) ||
3978 !page_aligned(amount_left
)) {
3980 vm_object_offset_t src_po
,
3983 src_po
= src_offset
- vm_object_trunc_page(src_offset
);
3984 dst_po
= dst_offset
- vm_object_trunc_page(dst_offset
);
3986 if (dst_po
> src_po
) {
3987 part_size
= PAGE_SIZE
- dst_po
;
3989 part_size
= PAGE_SIZE
- src_po
;
3991 if (part_size
> (amount_left
)){
3992 part_size
= amount_left
;
3995 if (result_page
== VM_PAGE_NULL
) {
3996 vm_page_part_zero_fill(dst_page
,
3999 vm_page_part_copy(result_page
, src_po
,
4000 dst_page
, dst_po
, part_size
);
4001 if(!dst_page
->dirty
){
4002 vm_object_lock(dst_object
);
4003 dst_page
->dirty
= TRUE
;
4004 vm_object_unlock(dst_page
->object
);
4009 part_size
= PAGE_SIZE
;
4011 if (result_page
== VM_PAGE_NULL
)
4012 vm_page_zero_fill(dst_page
);
4014 vm_page_copy(result_page
, dst_page
);
4015 if(!dst_page
->dirty
){
4016 vm_object_lock(dst_object
);
4017 dst_page
->dirty
= TRUE
;
4018 vm_object_unlock(dst_page
->object
);
4025 * Unlock everything, and return
4028 vm_map_verify_done(dst_map
, dst_version
);
4030 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
4031 vm_fault_copy_cleanup(result_page
, src_top_page
);
4032 vm_fault_copy_dst_cleanup(dst_page
);
4034 amount_left
-= part_size
;
4035 src_offset
+= part_size
;
4036 dst_offset
+= part_size
;
4037 } while (amount_left
> 0);
4039 RETURN(KERN_SUCCESS
);
4045 #if VM_FAULT_CLASSIFY
4047 * Temporary statistics gathering support.
4051 * Statistics arrays:
4053 #define VM_FAULT_TYPES_MAX 5
4054 #define VM_FAULT_LEVEL_MAX 8
4056 int vm_fault_stats
[VM_FAULT_TYPES_MAX
][VM_FAULT_LEVEL_MAX
];
4058 #define VM_FAULT_TYPE_ZERO_FILL 0
4059 #define VM_FAULT_TYPE_MAP_IN 1
4060 #define VM_FAULT_TYPE_PAGER 2
4061 #define VM_FAULT_TYPE_COPY 3
4062 #define VM_FAULT_TYPE_OTHER 4
4066 vm_fault_classify(vm_object_t object
,
4067 vm_object_offset_t offset
,
4068 vm_prot_t fault_type
)
4070 int type
, level
= 0;
4074 m
= vm_page_lookup(object
, offset
);
4075 if (m
!= VM_PAGE_NULL
) {
4076 if (m
->busy
|| m
->error
|| m
->restart
|| m
->absent
) {
4077 type
= VM_FAULT_TYPE_OTHER
;
4080 if (((fault_type
& VM_PROT_WRITE
) == 0) ||
4081 ((level
== 0) && object
->copy
== VM_OBJECT_NULL
)) {
4082 type
= VM_FAULT_TYPE_MAP_IN
;
4085 type
= VM_FAULT_TYPE_COPY
;
4089 if (object
->pager_created
) {
4090 type
= VM_FAULT_TYPE_PAGER
;
4093 if (object
->shadow
== VM_OBJECT_NULL
) {
4094 type
= VM_FAULT_TYPE_ZERO_FILL
;
4098 offset
+= object
->shadow_offset
;
4099 object
= object
->shadow
;
4105 if (level
> VM_FAULT_LEVEL_MAX
)
4106 level
= VM_FAULT_LEVEL_MAX
;
4108 vm_fault_stats
[type
][level
] += 1;
4113 /* cleanup routine to call from debugger */
4116 vm_fault_classify_init(void)
4120 for (type
= 0; type
< VM_FAULT_TYPES_MAX
; type
++) {
4121 for (level
= 0; level
< VM_FAULT_LEVEL_MAX
; level
++) {
4122 vm_fault_stats
[type
][level
] = 0;
4128 #endif /* VM_FAULT_CLASSIFY */
4131 extern int cs_validation
;
4134 vm_page_validate_cs(
4138 vm_object_offset_t offset
;
4139 vm_map_offset_t koffset
;
4140 vm_map_size_t ksize
;
4143 memory_object_t pager
;
4145 boolean_t validated
, tainted
;
4146 boolean_t busy_page
;
4148 vm_object_lock_assert_held(page
->object
);
4150 if (!cs_validation
) {
4154 if (page
->cs_validated
&& !page
->cs_tainted
&& page
->wpmapped
) {
4155 vm_object_lock_assert_exclusive(page
->object
);
4158 * This page has already been validated and found to
4159 * be valid. However, it was mapped for "write" access
4160 * sometime in the past, so we have to check if it was
4161 * modified. If so, it needs to be revalidated.
4162 * If the page was already found to be "tainted", no
4163 * need to re-validate.
4166 vm_cs_query_modified
++;
4167 page
->dirty
= pmap_is_modified(page
->phys_page
);
4171 * The page is dirty, so let's clear its
4172 * "validated" bit and re-validate it.
4175 printf("CODESIGNING: vm_page_validate_cs: "
4176 "page %p obj %p off 0x%llx "
4178 page
, page
->object
, page
->offset
);
4180 page
->cs_validated
= FALSE
;
4181 vm_cs_validated_dirtied
++;
4185 if (page
->cs_validated
) {
4189 vm_object_lock_assert_exclusive(page
->object
);
4193 object
= page
->object
;
4194 assert(object
->code_signed
);
4195 offset
= page
->offset
;
4197 busy_page
= page
->busy
;
4199 /* keep page busy while we map (and unlock) the VM object */
4204 * Take a paging reference on the VM object
4205 * to protect it from collapse or bypass,
4206 * and keep it from disappearing too.
4208 vm_object_paging_begin(object
);
4210 /* map the page in the kernel address space */
4212 ksize
= PAGE_SIZE_64
;
4213 kr
= vm_paging_map_object(&koffset
,
4218 FALSE
); /* can't unlock object ! */
4219 if (kr
!= KERN_SUCCESS
) {
4220 panic("vm_page_validate_cs: could not map page: 0x%x\n", kr
);
4222 kaddr
= CAST_DOWN(vm_offset_t
, koffset
);
4225 * Since we get here to validate a page that was brought in by
4226 * the pager, we know that this pager is all setup and ready
4229 assert(!object
->internal
);
4230 assert(object
->pager
!= NULL
);
4231 assert(object
->pager_ready
);
4233 if (!object
->alive
|| object
->terminating
|| object
->pager
== NULL
) {
4235 * The object is terminating and we don't have its pager
4236 * so we can't validate the data...
4241 pager
= object
->pager
;
4242 assert(pager
!= NULL
);
4244 kr
= vnode_pager_get_object_cs_blobs(pager
, &blobs
);
4245 if (kr
!= KERN_SUCCESS
) {
4249 /* verify the SHA1 hash for this page */
4250 validated
= cs_validate_page(blobs
,
4251 offset
+ object
->paging_offset
,
4252 (const void *)kaddr
,
4256 assert(object
== page
->object
);
4257 vm_object_lock_assert_exclusive(object
);
4259 page
->cs_validated
= validated
;
4261 page
->cs_tainted
= tainted
;
4266 PAGE_WAKEUP_DONE(page
);
4269 /* unmap the map from the kernel address space */
4270 vm_paging_unmap_object(object
, koffset
, koffset
+ ksize
);
4275 vm_object_paging_end(object
);