2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * Page fault handling module.
59 /* remove after component interface available */
60 extern int vnode_pager_workaround
;
61 extern int device_pager_workaround
;
64 #include <mach_cluster_stats.h>
65 #include <mach_pagemap.h>
68 #include <vm/vm_fault.h>
69 #include <mach/kern_return.h>
70 #include <mach/message.h> /* for error codes */
71 #include <kern/host_statistics.h>
72 #include <kern/counters.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/host.h>
78 #include <ppc/proc_reg.h>
79 #include <ppc/pmap_internals.h>
80 #include <vm/task_working_set.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
85 #include <vm/vm_pageout.h>
86 #include <mach/vm_param.h>
87 #include <mach/vm_behavior.h>
88 #include <mach/memory_object.h>
89 /* For memory_object_data_{request,unlock} */
90 #include <kern/mach_param.h>
91 #include <kern/macro_help.h>
92 #include <kern/zalloc.h>
93 #include <kern/misc_protos.h>
95 #include <sys/kdebug.h>
97 #define VM_FAULT_CLASSIFY 0
98 #define VM_FAULT_STATIC_CONFIG 1
100 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
102 int vm_object_absent_max
= 50;
104 int vm_fault_debug
= 0;
105 boolean_t vm_page_deactivate_behind
= TRUE
;
108 #if !VM_FAULT_STATIC_CONFIG
109 boolean_t vm_fault_dirty_handling
= FALSE
;
110 boolean_t vm_fault_interruptible
= FALSE
;
111 boolean_t software_reference_bits
= TRUE
;
115 extern struct db_watchpoint
*db_watchpoint_list
;
116 #endif /* MACH_KDB */
118 /* Forward declarations of internal routines. */
119 extern kern_return_t
vm_fault_wire_fast(
122 vm_map_entry_t entry
,
125 extern void vm_fault_continue(void);
127 extern void vm_fault_copy_cleanup(
131 extern void vm_fault_copy_dst_cleanup(
134 #if VM_FAULT_CLASSIFY
135 extern void vm_fault_classify(vm_object_t object
,
136 vm_object_offset_t offset
,
137 vm_prot_t fault_type
);
139 extern void vm_fault_classify_init(void);
143 * Routine: vm_fault_init
145 * Initialize our private data structures.
153 * Routine: vm_fault_cleanup
155 * Clean up the result of vm_fault_page.
157 * The paging reference for "object" is released.
158 * "object" is unlocked.
159 * If "top_page" is not null, "top_page" is
160 * freed and the paging reference for the object
161 * containing it is released.
164 * "object" must be locked.
168 register vm_object_t object
,
169 register vm_page_t top_page
)
171 vm_object_paging_end(object
);
172 vm_object_unlock(object
);
174 if (top_page
!= VM_PAGE_NULL
) {
175 object
= top_page
->object
;
176 vm_object_lock(object
);
177 VM_PAGE_FREE(top_page
);
178 vm_object_paging_end(object
);
179 vm_object_unlock(object
);
183 #if MACH_CLUSTER_STATS
184 #define MAXCLUSTERPAGES 16
186 unsigned long pages_in_cluster
;
187 unsigned long pages_at_higher_offsets
;
188 unsigned long pages_at_lower_offsets
;
189 } cluster_stats_in
[MAXCLUSTERPAGES
];
190 #define CLUSTER_STAT(clause) clause
191 #define CLUSTER_STAT_HIGHER(x) \
192 ((cluster_stats_in[(x)].pages_at_higher_offsets)++)
193 #define CLUSTER_STAT_LOWER(x) \
194 ((cluster_stats_in[(x)].pages_at_lower_offsets)++)
195 #define CLUSTER_STAT_CLUSTER(x) \
196 ((cluster_stats_in[(x)].pages_in_cluster)++)
197 #else /* MACH_CLUSTER_STATS */
198 #define CLUSTER_STAT(clause)
199 #endif /* MACH_CLUSTER_STATS */
201 /* XXX - temporary */
202 boolean_t vm_allow_clustered_pagein
= FALSE
;
203 int vm_pagein_cluster_used
= 0;
206 * Prepage default sizes given VM_BEHAVIOR_DEFAULT reference behavior
208 int vm_default_ahead
= 1; /* Number of pages to prepage ahead */
209 int vm_default_behind
= 0; /* Number of pages to prepage behind */
211 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
214 * Routine: vm_fault_page
216 * Find the resident page for the virtual memory
217 * specified by the given virtual memory object
219 * Additional arguments:
220 * The required permissions for the page is given
221 * in "fault_type". Desired permissions are included
222 * in "protection". The minimum and maximum valid offsets
223 * within the object for the relevant map entry are
224 * passed in "lo_offset" and "hi_offset" respectively and
225 * the expected page reference pattern is passed in "behavior".
226 * These three parameters are used to determine pagein cluster
229 * If the desired page is known to be resident (for
230 * example, because it was previously wired down), asserting
231 * the "unwiring" parameter will speed the search.
233 * If the operation can be interrupted (by thread_abort
234 * or thread_terminate), then the "interruptible"
235 * parameter should be asserted.
238 * The page containing the proper data is returned
242 * The source object must be locked and referenced,
243 * and must donate one paging reference. The reference
244 * is not affected. The paging reference and lock are
247 * If the call succeeds, the object in which "result_page"
248 * resides is left locked and holding a paging reference.
249 * If this is not the original object, a busy page in the
250 * original object is returned in "top_page", to prevent other
251 * callers from pursuing this same data, along with a paging
252 * reference for the original object. The "top_page" should
253 * be destroyed when this guarantee is no longer required.
254 * The "result_page" is also left busy. It is not removed
255 * from the pageout queues.
261 vm_object_t first_object
, /* Object to begin search */
262 vm_object_offset_t first_offset
, /* Offset into object */
263 vm_prot_t fault_type
, /* What access is requested */
264 boolean_t must_be_resident
,/* Must page be resident? */
265 int interruptible
, /* how may fault be interrupted? */
266 vm_object_offset_t lo_offset
, /* Map entry start */
267 vm_object_offset_t hi_offset
, /* Map entry end */
268 vm_behavior_t behavior
, /* Page reference behavior */
269 /* Modifies in place: */
270 vm_prot_t
*protection
, /* Protection for mapping */
272 vm_page_t
*result_page
, /* Page found, if successful */
273 vm_page_t
*top_page
, /* Page in top object, if
274 * not result_page. */
275 int *type_of_fault
, /* if non-null, fill in with type of fault
276 * COW, zero-fill, etc... returned in trace point */
277 /* More arguments: */
278 kern_return_t
*error_code
, /* code if page is in error */
279 boolean_t no_zero_fill
, /* don't zero fill absent pages */
280 boolean_t data_supply
, /* treat as data_supply if
281 * it is a write fault and a full
282 * page is provided */
291 vm_object_offset_t offset
;
293 vm_object_t next_object
;
294 vm_object_t copy_object
;
295 boolean_t look_for_page
;
296 vm_prot_t access_required
= fault_type
;
297 vm_prot_t wants_copy_flag
;
298 vm_size_t cluster_size
, length
;
299 vm_object_offset_t cluster_offset
;
300 vm_object_offset_t cluster_start
, cluster_end
, paging_offset
;
301 vm_object_offset_t align_offset
;
302 CLUSTER_STAT(int pages_at_higher_offsets
;)
303 CLUSTER_STAT(int pages_at_lower_offsets
;)
304 kern_return_t wait_result
;
306 boolean_t interruptible_state
;
307 boolean_t bumped_pagein
= FALSE
;
312 * MACH page map - an optional optimization where a bit map is maintained
313 * by the VM subsystem for internal objects to indicate which pages of
314 * the object currently reside on backing store. This existence map
315 * duplicates information maintained by the vnode pager. It is
316 * created at the time of the first pageout against the object, i.e.
317 * at the same time pager for the object is created. The optimization
318 * is designed to eliminate pager interaction overhead, if it is
319 * 'known' that the page does not exist on backing store.
321 * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is
322 * either marked as paged out in the existence map for the object or no
323 * existence map exists for the object. LOOK_FOR() is one of the
324 * criteria in the decision to invoke the pager. It is also used as one
325 * of the criteria to terminate the scan for adjacent pages in a clustered
326 * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for
327 * permanent objects. Note also that if the pager for an internal object
328 * has not been created, the pager is not invoked regardless of the value
329 * of LOOK_FOR() and that clustered pagein scans are only done on an object
330 * for which a pager has been created.
332 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
333 * is marked as paged out in the existence map for the object. PAGED_OUT()
334 * PAGED_OUT() is used to determine if a page has already been pushed
335 * into a copy object in order to avoid a redundant page out operation.
337 #define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
338 != VM_EXTERNAL_STATE_ABSENT)
339 #define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
340 == VM_EXTERNAL_STATE_EXISTS)
341 #else /* MACH_PAGEMAP */
343 * If the MACH page map optimization is not enabled,
344 * LOOK_FOR() always evaluates to TRUE. The pager will always be
345 * invoked to resolve missing pages in an object, assuming the pager
346 * has been created for the object. In a clustered page operation, the
347 * absence of a page on backing backing store cannot be used to terminate
348 * a scan for adjacent pages since that information is available only in
349 * the pager. Hence pages that may not be paged out are potentially
350 * included in a clustered request. The vnode pager is coded to deal
351 * with any combination of absent/present pages in a clustered
352 * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager
353 * will always be invoked to push a dirty page into a copy object assuming
354 * a pager has been created. If the page has already been pushed, the
355 * pager will ingore the new request.
357 #define LOOK_FOR(o, f) TRUE
358 #define PAGED_OUT(o, f) FALSE
359 #endif /* MACH_PAGEMAP */
364 #define PREPARE_RELEASE_PAGE(m) \
366 vm_page_lock_queues(); \
369 #define DO_RELEASE_PAGE(m) \
371 PAGE_WAKEUP_DONE(m); \
372 if (!m->active && !m->inactive) \
373 vm_page_activate(m); \
374 vm_page_unlock_queues(); \
377 #define RELEASE_PAGE(m) \
379 PREPARE_RELEASE_PAGE(m); \
380 DO_RELEASE_PAGE(m); \
384 dbgTrace(0xBEEF0002, (unsigned int) first_object
, (unsigned int) first_offset
); /* (TEST/DEBUG) */
389 #if !VM_FAULT_STATIC_CONFIG
390 if (vm_fault_dirty_handling
393 * If there are watchpoints set, then
394 * we don't want to give away write permission
395 * on a read fault. Make the task write fault,
396 * so that the watchpoint code notices the access.
398 || db_watchpoint_list
399 #endif /* MACH_KDB */
402 * If we aren't asking for write permission,
403 * then don't give it away. We're using write
404 * faults to set the dirty bit.
406 if (!(fault_type
& VM_PROT_WRITE
))
407 *protection
&= ~VM_PROT_WRITE
;
410 if (!vm_fault_interruptible
)
411 interruptible
= THREAD_UNINT
;
412 #else /* STATIC_CONFIG */
415 * If there are watchpoints set, then
416 * we don't want to give away write permission
417 * on a read fault. Make the task write fault,
418 * so that the watchpoint code notices the access.
420 if (db_watchpoint_list
) {
422 * If we aren't asking for write permission,
423 * then don't give it away. We're using write
424 * faults to set the dirty bit.
426 if (!(fault_type
& VM_PROT_WRITE
))
427 *protection
&= ~VM_PROT_WRITE
;
430 #endif /* MACH_KDB */
431 #endif /* STATIC_CONFIG */
433 cur_thread
= current_thread();
435 interruptible_state
= cur_thread
->interruptible
;
436 if (interruptible
== THREAD_UNINT
)
437 cur_thread
->interruptible
= FALSE
;
440 * INVARIANTS (through entire routine):
442 * 1) At all times, we must either have the object
443 * lock or a busy page in some object to prevent
444 * some other thread from trying to bring in
447 * Note that we cannot hold any locks during the
448 * pager access or when waiting for memory, so
449 * we use a busy page then.
451 * Note also that we aren't as concerned about more than
452 * one thread attempting to memory_object_data_unlock
453 * the same page at once, so we don't hold the page
454 * as busy then, but do record the highest unlock
455 * value so far. [Unlock requests may also be delivered
458 * 2) To prevent another thread from racing us down the
459 * shadow chain and entering a new page in the top
460 * object before we do, we must keep a busy page in
461 * the top object while following the shadow chain.
463 * 3) We must increment paging_in_progress on any object
464 * for which we have a busy page
466 * 4) We leave busy pages on the pageout queues.
467 * If the pageout daemon comes across a busy page,
468 * it will remove the page from the pageout queues.
472 * Search for the page at object/offset.
475 object
= first_object
;
476 offset
= first_offset
;
477 first_m
= VM_PAGE_NULL
;
478 access_required
= fault_type
;
481 "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n",
482 (integer_t
)object
, offset
, fault_type
, *protection
, 0);
485 * See whether this page is resident
490 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
492 if (!object
->alive
) {
493 vm_fault_cleanup(object
, first_m
);
494 cur_thread
->interruptible
= interruptible_state
;
495 return(VM_FAULT_MEMORY_ERROR
);
497 m
= vm_page_lookup(object
, offset
);
499 dbgTrace(0xBEEF0004, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
501 if (m
!= VM_PAGE_NULL
) {
503 * If the page was pre-paged as part of a
504 * cluster, record the fact.
507 vm_pagein_cluster_used
++;
508 m
->clustered
= FALSE
;
512 * If the page is being brought in,
513 * wait for it and then retry.
515 * A possible optimization: if the page
516 * is known to be resident, we can ignore
517 * pages that are absent (regardless of
518 * whether they're busy).
523 dbgTrace(0xBEEF0005, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
525 PAGE_ASSERT_WAIT(m
, interruptible
);
526 vm_object_unlock(object
);
528 "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
529 (integer_t
)object
, offset
,
531 counter(c_vm_fault_page_block_busy_kernel
++);
532 wait_result
= thread_block((void (*)(void))0);
534 vm_object_lock(object
);
535 if (wait_result
!= THREAD_AWAKENED
) {
536 vm_fault_cleanup(object
, first_m
);
537 cur_thread
->interruptible
= interruptible_state
;
538 if (wait_result
== THREAD_RESTART
)
540 return(VM_FAULT_RETRY
);
544 return(VM_FAULT_INTERRUPTED
);
551 * If the page is in error, give up now.
556 dbgTrace(0xBEEF0006, (unsigned int) m
, (unsigned int) error_code
); /* (TEST/DEBUG) */
559 *error_code
= m
->page_error
;
561 vm_fault_cleanup(object
, first_m
);
562 cur_thread
->interruptible
= interruptible_state
;
563 return(VM_FAULT_MEMORY_ERROR
);
567 * If the pager wants us to restart
568 * at the top of the chain,
569 * typically because it has moved the
570 * page to another pager, then do so.
575 dbgTrace(0xBEEF0007, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
578 vm_fault_cleanup(object
, first_m
);
579 cur_thread
->interruptible
= interruptible_state
;
580 return(VM_FAULT_RETRY
);
584 * If the page isn't busy, but is absent,
585 * then it was deemed "unavailable".
590 * Remove the non-existent page (unless it's
591 * in the top object) and move on down to the
592 * next object (if there is one).
595 dbgTrace(0xBEEF0008, (unsigned int) m
, (unsigned int) object
->shadow
); /* (TEST/DEBUG) */
598 next_object
= object
->shadow
;
599 if (next_object
== VM_OBJECT_NULL
) {
602 assert(!must_be_resident
);
604 if (object
->shadow_severed
) {
607 cur_thread
->interruptible
= interruptible_state
;
608 return VM_FAULT_MEMORY_ERROR
;
612 * Absent page at bottom of shadow
613 * chain; zero fill the page we left
614 * busy in the first object, and flush
615 * the absent page. But first we
616 * need to allocate a real page.
618 if (VM_PAGE_THROTTLED() ||
619 (real_m
= vm_page_grab()) == VM_PAGE_NULL
) {
620 vm_fault_cleanup(object
, first_m
);
621 cur_thread
->interruptible
= interruptible_state
;
622 return(VM_FAULT_MEMORY_SHORTAGE
);
626 "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
627 (integer_t
)object
, offset
,
629 (integer_t
)first_object
, 0);
630 if (object
!= first_object
) {
632 vm_object_paging_end(object
);
633 vm_object_unlock(object
);
634 object
= first_object
;
635 offset
= first_offset
;
637 first_m
= VM_PAGE_NULL
;
638 vm_object_lock(object
);
642 assert(real_m
->busy
);
643 vm_page_insert(real_m
, object
, offset
);
647 * Drop the lock while zero filling
648 * page. Then break because this
649 * is the page we wanted. Checking
650 * the page lock is a waste of time;
651 * this page was either absent or
652 * newly allocated -- in both cases
653 * it can't be page locked by a pager.
658 vm_object_unlock(object
);
659 vm_page_zero_fill(m
);
661 *type_of_fault
= DBG_ZERO_FILL_FAULT
;
662 VM_STAT(zero_fill_count
++);
664 if (bumped_pagein
== TRUE
) {
666 current_task()->pageins
--;
668 vm_object_lock(object
);
670 pmap_clear_modify(m
->phys_addr
);
671 vm_page_lock_queues();
672 VM_PAGE_QUEUES_REMOVE(m
);
673 m
->page_ticket
= vm_page_ticket
;
674 vm_page_ticket_roll
++;
675 if(vm_page_ticket_roll
==
676 VM_PAGE_TICKETS_IN_ROLL
) {
677 vm_page_ticket_roll
= 0;
679 VM_PAGE_TICKET_ROLL_IDS
)
684 queue_enter(&vm_page_queue_inactive
,
685 m
, vm_page_t
, pageq
);
687 vm_page_inactive_count
++;
688 vm_page_unlock_queues();
691 if (must_be_resident
) {
692 vm_object_paging_end(object
);
693 } else if (object
!= first_object
) {
694 vm_object_paging_end(object
);
700 vm_object_absent_release(object
);
703 vm_page_lock_queues();
704 VM_PAGE_QUEUES_REMOVE(m
);
705 vm_page_unlock_queues();
708 "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n",
709 (integer_t
)object
, offset
,
710 (integer_t
)next_object
,
711 offset
+object
->shadow_offset
,0);
712 offset
+= object
->shadow_offset
;
713 hi_offset
+= object
->shadow_offset
;
714 lo_offset
+= object
->shadow_offset
;
715 access_required
= VM_PROT_READ
;
716 vm_object_lock(next_object
);
717 vm_object_unlock(object
);
718 object
= next_object
;
719 vm_object_paging_begin(object
);
725 && ((object
!= first_object
) ||
726 (object
->copy
!= VM_OBJECT_NULL
))
727 && (fault_type
& VM_PROT_WRITE
)) {
729 * This is a copy-on-write fault that will
730 * cause us to revoke access to this page, but
731 * this page is in the process of being cleaned
732 * in a clustered pageout. We must wait until
733 * the cleaning operation completes before
734 * revoking access to the original page,
735 * otherwise we might attempt to remove a
739 dbgTrace(0xBEEF0009, (unsigned int) m
, (unsigned int) offset
); /* (TEST/DEBUG) */
742 "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n",
743 (integer_t
)object
, offset
,
745 /* take an extra ref so that object won't die */
746 assert(object
->ref_count
> 0);
748 vm_object_res_reference(object
);
749 vm_fault_cleanup(object
, first_m
);
750 counter(c_vm_fault_page_block_backoff_kernel
++);
751 vm_object_lock(object
);
752 assert(object
->ref_count
> 0);
753 m
= vm_page_lookup(object
, offset
);
754 if (m
!= VM_PAGE_NULL
&& m
->cleaning
) {
755 PAGE_ASSERT_WAIT(m
, interruptible
);
756 vm_object_unlock(object
);
757 wait_result
= thread_block((void (*)(void)) 0);
758 vm_object_deallocate(object
);
761 vm_object_unlock(object
);
762 vm_object_deallocate(object
);
763 cur_thread
->interruptible
= interruptible_state
;
764 return VM_FAULT_RETRY
;
769 * If the desired access to this page has
770 * been locked out, request that it be unlocked.
773 if (access_required
& m
->page_lock
) {
774 if ((access_required
& m
->unlock_request
) != access_required
) {
775 vm_prot_t new_unlock_request
;
779 dbgTrace(0xBEEF000A, (unsigned int) m
, (unsigned int) object
->pager_ready
); /* (TEST/DEBUG) */
781 if (!object
->pager_ready
) {
783 "vm_f_page: ready wait acc_req %d, obj 0x%X, offset 0x%X, page 0x%X\n",
785 (integer_t
)object
, offset
,
787 /* take an extra ref */
788 assert(object
->ref_count
> 0);
790 vm_object_res_reference(object
);
791 vm_fault_cleanup(object
,
793 counter(c_vm_fault_page_block_backoff_kernel
++);
794 vm_object_lock(object
);
795 assert(object
->ref_count
> 0);
796 if (!object
->pager_ready
) {
797 vm_object_assert_wait(
799 VM_OBJECT_EVENT_PAGER_READY
,
801 vm_object_unlock(object
);
802 wait_result
= thread_block((void (*)(void))0);
803 vm_object_deallocate(object
);
806 vm_object_unlock(object
);
807 vm_object_deallocate(object
);
808 cur_thread
->interruptible
= interruptible_state
;
809 return VM_FAULT_RETRY
;
813 new_unlock_request
= m
->unlock_request
=
814 (access_required
| m
->unlock_request
);
815 vm_object_unlock(object
);
817 "vm_f_page: unlock obj 0x%X, offset 0x%X, page 0x%X, unl_req %d\n",
818 (integer_t
)object
, offset
,
819 (integer_t
)m
, new_unlock_request
, 0);
820 if ((rc
= memory_object_data_unlock(
822 offset
+ object
->paging_offset
,
827 printf("vm_fault: memory_object_data_unlock failed\n");
828 vm_object_lock(object
);
829 vm_fault_cleanup(object
, first_m
);
830 cur_thread
->interruptible
= interruptible_state
;
831 return((rc
== MACH_SEND_INTERRUPTED
) ?
832 VM_FAULT_INTERRUPTED
:
833 VM_FAULT_MEMORY_ERROR
);
835 vm_object_lock(object
);
840 "vm_f_page: access wait acc_req %d, obj 0x%X, offset 0x%X, page 0x%X\n",
841 access_required
, (integer_t
)object
,
842 offset
, (integer_t
)m
, 0);
843 /* take an extra ref so object won't die */
844 assert(object
->ref_count
> 0);
846 vm_object_res_reference(object
);
847 vm_fault_cleanup(object
, first_m
);
848 counter(c_vm_fault_page_block_backoff_kernel
++);
849 vm_object_lock(object
);
850 assert(object
->ref_count
> 0);
851 m
= vm_page_lookup(object
, offset
);
852 if (m
!= VM_PAGE_NULL
&&
853 (access_required
& m
->page_lock
) &&
854 !((access_required
& m
->unlock_request
) != access_required
)) {
855 PAGE_ASSERT_WAIT(m
, interruptible
);
856 vm_object_unlock(object
);
857 wait_result
= thread_block((void (*)(void)) 0);
858 vm_object_deallocate(object
);
861 vm_object_unlock(object
);
862 vm_object_deallocate(object
);
863 cur_thread
->interruptible
= interruptible_state
;
864 return VM_FAULT_RETRY
;
868 * We mark the page busy and leave it on
869 * the pageout queues. If the pageout
870 * deamon comes across it, then it will
875 dbgTrace(0xBEEF000B, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
878 #if !VM_FAULT_STATIC_CONFIG
879 if (!software_reference_bits
) {
880 vm_page_lock_queues();
882 vm_stat
.reactivations
++;
884 VM_PAGE_QUEUES_REMOVE(m
);
885 vm_page_unlock_queues();
889 "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
890 (integer_t
)object
, offset
, (integer_t
)m
, 0, 0);
898 (object
->pager_created
) &&
899 LOOK_FOR(object
, offset
) &&
903 dbgTrace(0xBEEF000C, (unsigned int) look_for_page
, (unsigned int) object
); /* (TEST/DEBUG) */
905 if ((look_for_page
|| (object
== first_object
))
907 && !(object
->phys_contiguous
)) {
909 * Allocate a new page for this object/offset
913 m
= vm_page_grab_fictitious();
915 dbgTrace(0xBEEF000D, (unsigned int) m
, (unsigned int) object
); /* (TEST/DEBUG) */
917 if (m
== VM_PAGE_NULL
) {
918 vm_fault_cleanup(object
, first_m
);
919 cur_thread
->interruptible
= interruptible_state
;
920 return(VM_FAULT_FICTITIOUS_SHORTAGE
);
922 vm_page_insert(m
, object
, offset
);
925 if ((look_for_page
&& !must_be_resident
)) {
929 * If the memory manager is not ready, we
930 * cannot make requests.
932 if (!object
->pager_ready
) {
934 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
936 if(m
!= VM_PAGE_NULL
)
939 "vm_f_page: ready wait obj 0x%X, offset 0x%X\n",
940 (integer_t
)object
, offset
, 0, 0, 0);
941 /* take an extra ref so object won't die */
942 assert(object
->ref_count
> 0);
944 vm_object_res_reference(object
);
945 vm_fault_cleanup(object
, first_m
);
946 counter(c_vm_fault_page_block_backoff_kernel
++);
947 vm_object_lock(object
);
948 assert(object
->ref_count
> 0);
949 if (!object
->pager_ready
) {
950 vm_object_assert_wait(object
,
951 VM_OBJECT_EVENT_PAGER_READY
,
953 vm_object_unlock(object
);
954 wait_result
= thread_block((void (*)(void))0);
955 vm_object_deallocate(object
);
958 vm_object_unlock(object
);
959 vm_object_deallocate(object
);
960 cur_thread
->interruptible
= interruptible_state
;
961 return VM_FAULT_RETRY
;
965 if(object
->phys_contiguous
) {
966 if(m
!= VM_PAGE_NULL
) {
972 if (object
->internal
) {
974 * Requests to the default pager
975 * must reserve a real page in advance,
976 * because the pager's data-provided
977 * won't block for pages. IMPORTANT:
978 * this acts as a throttling mechanism
979 * for data_requests to the default
984 dbgTrace(0xBEEF000F, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
986 if (m
->fictitious
&& !vm_page_convert(m
)) {
988 vm_fault_cleanup(object
, first_m
);
989 cur_thread
->interruptible
= interruptible_state
;
990 return(VM_FAULT_MEMORY_SHORTAGE
);
992 } else if (object
->absent_count
>
993 vm_object_absent_max
) {
995 * If there are too many outstanding page
996 * requests pending on this object, we
997 * wait for them to be resolved now.
1001 dbgTrace(0xBEEF0010, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1003 if(m
!= VM_PAGE_NULL
)
1005 /* take an extra ref so object won't die */
1006 assert(object
->ref_count
> 0);
1007 object
->ref_count
++;
1008 vm_object_res_reference(object
);
1009 vm_fault_cleanup(object
, first_m
);
1010 counter(c_vm_fault_page_block_backoff_kernel
++);
1011 vm_object_lock(object
);
1012 assert(object
->ref_count
> 0);
1013 if (object
->absent_count
> vm_object_absent_max
) {
1014 vm_object_absent_assert_wait(object
,
1016 vm_object_unlock(object
);
1017 wait_result
= thread_block((void (*)(void))0);
1018 vm_object_deallocate(object
);
1021 vm_object_unlock(object
);
1022 vm_object_deallocate(object
);
1023 cur_thread
->interruptible
= interruptible_state
;
1024 return VM_FAULT_RETRY
;
1029 * Indicate that the page is waiting for data
1030 * from the memory manager.
1033 if(m
!= VM_PAGE_NULL
) {
1035 m
->list_req_pending
= TRUE
;
1038 object
->absent_count
++;
1042 cluster_start
= offset
;
1044 cluster_size
= object
->cluster_size
;
1047 * Skip clustered pagein if it is globally disabled
1048 * or random page reference behavior is expected
1049 * for the address range containing the faulting
1050 * address or the object paging block size is
1051 * equal to the page size.
1053 if (!vm_allow_clustered_pagein
||
1054 behavior
== VM_BEHAVIOR_RANDOM
||
1055 m
== VM_PAGE_NULL
||
1056 cluster_size
== PAGE_SIZE
) {
1057 cluster_start
= trunc_page_64(cluster_start
);
1061 assert(offset
>= lo_offset
);
1062 assert(offset
< hi_offset
);
1063 assert(ALIGNED(object
->paging_offset
));
1064 assert(cluster_size
>= PAGE_SIZE
);
1067 dbgTrace(0xBEEF0011, (unsigned int) m
, (unsigned int) 0); /* (TEST/DEBUG) */
1070 * Decide whether to scan ahead or behind for
1071 * additional pages contiguous to the faulted
1072 * page in the same paging block. The decision
1073 * is based on system wide globals and the
1074 * expected page reference behavior of the
1075 * address range contained the faulting address.
1076 * First calculate some constants.
1078 paging_offset
= offset
+ object
->paging_offset
;
1079 cluster_offset
= paging_offset
& (cluster_size
- 1);
1080 align_offset
= paging_offset
&(PAGE_SIZE_64
-1);
1081 if (align_offset
!= 0) {
1082 cluster_offset
= trunc_page_64(cluster_offset
);
1085 #define SPANS_CLUSTER(x) ((((x) - align_offset) & (vm_object_offset_t)(cluster_size - 1)) == 0)
1088 * Backward scan only if reverse sequential
1089 * behavior has been specified
1091 CLUSTER_STAT(pages_at_lower_offsets
= 0;)
1092 if (((vm_default_behind
!= 0 &&
1093 behavior
== VM_BEHAVIOR_DEFAULT
) ||
1094 behavior
== VM_BEHAVIOR_RSEQNTL
) && offset
) {
1095 vm_object_offset_t cluster_bot
;
1098 * Calculate lower search boundary.
1099 * Exclude pages that span a cluster boundary.
1100 * Clip to start of map entry.
1101 * For default page reference behavior, scan
1102 * default pages behind.
1104 cluster_bot
= (offset
> cluster_offset
) ?
1105 offset
- cluster_offset
: offset
;
1106 if (align_offset
!= 0) {
1107 if ((cluster_bot
< offset
) &&
1108 SPANS_CLUSTER(cluster_bot
)) {
1109 cluster_bot
+= PAGE_SIZE_64
;
1112 if (behavior
== VM_BEHAVIOR_DEFAULT
) {
1114 bot
= (vm_object_offset_t
)
1115 (vm_default_behind
* PAGE_SIZE
);
1117 if (cluster_bot
< (offset
- bot
))
1118 cluster_bot
= offset
- bot
;
1120 if (lo_offset
> cluster_bot
)
1121 cluster_bot
= lo_offset
;
1123 for ( cluster_start
= offset
- PAGE_SIZE_64
;
1124 (cluster_start
>= cluster_bot
) &&
1126 (align_offset
- PAGE_SIZE_64
));
1127 cluster_start
-= PAGE_SIZE_64
) {
1128 assert(cluster_size
> PAGE_SIZE_64
);
1129 retry_cluster_backw
:
1130 if (!LOOK_FOR(object
, cluster_start
) ||
1131 vm_page_lookup(object
, cluster_start
)
1135 if (object
->internal
) {
1137 * need to acquire a real page in
1138 * advance because this acts as
1139 * a throttling mechanism for
1140 * data_requests to the default
1141 * pager. If this fails, give up
1142 * trying to find any more pages
1143 * in the cluster and send off the
1144 * request for what we already have.
1146 if ((m
= vm_page_grab())
1148 cluster_start
+= PAGE_SIZE_64
;
1149 cluster_end
= offset
+ PAGE_SIZE_64
;
1152 } else if ((m
= vm_page_grab_fictitious())
1154 vm_object_unlock(object
);
1155 vm_page_more_fictitious();
1156 vm_object_lock(object
);
1157 goto retry_cluster_backw
;
1161 m
->clustered
= TRUE
;
1162 m
->list_req_pending
= TRUE
;
1164 vm_page_insert(m
, object
, cluster_start
);
1165 CLUSTER_STAT(pages_at_lower_offsets
++;)
1166 object
->absent_count
++;
1168 cluster_start
+= PAGE_SIZE_64
;
1169 assert(cluster_start
>= cluster_bot
);
1171 assert(cluster_start
<= offset
);
1174 * Forward scan if default or sequential behavior
1177 CLUSTER_STAT(pages_at_higher_offsets
= 0;)
1178 if ((behavior
== VM_BEHAVIOR_DEFAULT
&&
1179 vm_default_ahead
!= 0) ||
1180 behavior
== VM_BEHAVIOR_SEQUENTIAL
) {
1181 vm_object_offset_t cluster_top
;
1184 * Calculate upper search boundary.
1185 * Exclude pages that span a cluster boundary.
1186 * Clip to end of map entry.
1187 * For default page reference behavior, scan
1188 * default pages ahead.
1190 cluster_top
= (offset
+ cluster_size
) -
1192 if (align_offset
!= 0) {
1193 if ((cluster_top
> (offset
+ PAGE_SIZE_64
)) &&
1194 SPANS_CLUSTER(cluster_top
)) {
1195 cluster_top
-= PAGE_SIZE_64
;
1198 if (behavior
== VM_BEHAVIOR_DEFAULT
) {
1199 vm_object_offset_t top
= (vm_object_offset_t
)
1200 ((vm_default_ahead
*PAGE_SIZE
)+PAGE_SIZE
);
1202 if (cluster_top
> (offset
+ top
))
1203 cluster_top
= offset
+ top
;
1205 if (cluster_top
> hi_offset
)
1206 cluster_top
= hi_offset
;
1208 for (cluster_end
= offset
+ PAGE_SIZE_64
;
1209 cluster_end
< cluster_top
;
1210 cluster_end
+= PAGE_SIZE_64
) {
1211 assert(cluster_size
> PAGE_SIZE
);
1213 if (!LOOK_FOR(object
, cluster_end
) ||
1214 vm_page_lookup(object
, cluster_end
)
1218 if (object
->internal
) {
1220 * need to acquire a real page in
1221 * advance because this acts as
1222 * a throttling mechanism for
1223 * data_requests to the default
1224 * pager. If this fails, give up
1225 * trying to find any more pages
1226 * in the cluster and send off the
1227 * request for what we already have.
1229 if ((m
= vm_page_grab())
1233 } else if ((m
= vm_page_grab_fictitious())
1235 vm_object_unlock(object
);
1236 vm_page_more_fictitious();
1237 vm_object_lock(object
);
1238 goto retry_cluster_forw
;
1242 m
->clustered
= TRUE
;
1243 m
->list_req_pending
= TRUE
;
1245 vm_page_insert(m
, object
, cluster_end
);
1246 CLUSTER_STAT(pages_at_higher_offsets
++;)
1247 object
->absent_count
++;
1249 assert(cluster_end
<= cluster_top
);
1252 cluster_end
= offset
+ PAGE_SIZE_64
;
1255 assert(cluster_end
>= offset
+ PAGE_SIZE_64
);
1256 length
= cluster_end
- cluster_start
;
1258 #if MACH_CLUSTER_STATS
1259 CLUSTER_STAT_HIGHER(pages_at_higher_offsets
);
1260 CLUSTER_STAT_LOWER(pages_at_lower_offsets
);
1261 CLUSTER_STAT_CLUSTER(length
/PAGE_SIZE
);
1262 #endif /* MACH_CLUSTER_STATS */
1266 * lengthen the cluster by the pages in the working set
1269 (current_task()->dynamic_working_set
!= 0)) {
1270 cluster_end
= cluster_start
+ length
;
1271 /* tws values for start and end are just a
1272 * suggestions. Therefore, as long as
1273 * build_cluster does not use pointers or
1274 * take action based on values that
1275 * could be affected by re-entrance we
1276 * do not need to take the map lock.
1278 tws_build_cluster((tws_hash_t
)
1279 current_task()->dynamic_working_set
,
1280 object
, &cluster_start
,
1281 &cluster_end
, 0x16000);
1282 length
= cluster_end
- cluster_start
;
1285 dbgTrace(0xBEEF0012, (unsigned int) object
, (unsigned int) 0); /* (TEST/DEBUG) */
1288 * We have a busy page, so we can
1289 * release the object lock.
1291 vm_object_unlock(object
);
1294 * Call the memory manager to retrieve the data.
1298 *type_of_fault
= DBG_PAGEIN_FAULT
;
1300 current_task()->pageins
++;
1301 bumped_pagein
= TRUE
;
1304 * If this object uses a copy_call strategy,
1305 * and we are interested in a copy of this object
1306 * (having gotten here only by following a
1307 * shadow chain), then tell the memory manager
1308 * via a flag added to the desired_access
1309 * parameter, so that it can detect a race
1310 * between our walking down the shadow chain
1311 * and its pushing pages up into a copy of
1312 * the object that it manages.
1315 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_CALL
&&
1316 object
!= first_object
) {
1317 wants_copy_flag
= VM_PROT_WANTS_COPY
;
1319 wants_copy_flag
= VM_PROT_NONE
;
1323 "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n",
1324 (integer_t
)object
, offset
, (integer_t
)m
,
1325 access_required
| wants_copy_flag
, 0);
1327 rc
= memory_object_data_request(object
->pager
,
1328 cluster_start
+ object
->paging_offset
,
1330 access_required
| wants_copy_flag
);
1334 dbgTrace(0xBEEF0013, (unsigned int) object
, (unsigned int) rc
); /* (TEST/DEBUG) */
1336 if (rc
!= KERN_SUCCESS
) {
1337 if (rc
!= MACH_SEND_INTERRUPTED
1339 printf("%s(0x%x, 0x%x, 0x%x, 0x%x) failed, rc=%d\n",
1340 "memory_object_data_request",
1342 cluster_start
+ object
->paging_offset
,
1343 length
, access_required
, rc
);
1345 * Don't want to leave a busy page around,
1346 * but the data request may have blocked,
1347 * so check if it's still there and busy.
1349 if(!object
->phys_contiguous
) {
1350 vm_object_lock(object
);
1351 for (; length
; length
-= PAGE_SIZE
,
1352 cluster_start
+= PAGE_SIZE_64
) {
1354 if ((p
= vm_page_lookup(object
,
1356 && p
->absent
&& p
->busy
1362 vm_fault_cleanup(object
, first_m
);
1363 cur_thread
->interruptible
= interruptible_state
;
1364 return((rc
== MACH_SEND_INTERRUPTED
) ?
1365 VM_FAULT_INTERRUPTED
:
1366 VM_FAULT_MEMORY_ERROR
);
1369 tws_hash_line_t line
;
1372 task
= current_task();
1375 (task
->dynamic_working_set
!= 0)) {
1378 task
->dynamic_working_set
,
1380 &line
) == KERN_SUCCESS
) {
1381 tws_line_signal((tws_hash_t
)
1382 task
->dynamic_working_set
,
1390 * Retry with same object/offset, since new data may
1391 * be in a different page (i.e., m is meaningless at
1394 vm_object_lock(object
);
1395 if ((interruptible
!= THREAD_UNINT
) &&
1396 (current_thread()->state
& TH_ABORT
)) {
1397 vm_fault_cleanup(object
, first_m
);
1398 cur_thread
->interruptible
= interruptible_state
;
1399 return(VM_FAULT_INTERRUPTED
);
1401 if(m
== VM_PAGE_NULL
)
1407 * The only case in which we get here is if
1408 * object has no pager (or unwiring). If the pager doesn't
1409 * have the page this is handled in the m->absent case above
1410 * (and if you change things here you should look above).
1413 dbgTrace(0xBEEF0014, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1415 if (object
== first_object
)
1418 assert(m
== VM_PAGE_NULL
);
1421 "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n",
1422 (integer_t
)object
, offset
, (integer_t
)m
,
1423 (integer_t
)object
->shadow
, 0);
1425 * Move on to the next object. Lock the next
1426 * object before unlocking the current one.
1428 next_object
= object
->shadow
;
1429 if (next_object
== VM_OBJECT_NULL
) {
1430 assert(!must_be_resident
);
1432 * If there's no object left, fill the page
1433 * in the top object with zeros. But first we
1434 * need to allocate a real page.
1437 if (object
!= first_object
) {
1438 vm_object_paging_end(object
);
1439 vm_object_unlock(object
);
1441 object
= first_object
;
1442 offset
= first_offset
;
1443 vm_object_lock(object
);
1447 assert(m
->object
== object
);
1448 first_m
= VM_PAGE_NULL
;
1450 if (object
->shadow_severed
) {
1452 vm_fault_cleanup(object
, VM_PAGE_NULL
);
1453 cur_thread
->interruptible
= interruptible_state
;
1454 return VM_FAULT_MEMORY_ERROR
;
1457 if (VM_PAGE_THROTTLED() ||
1458 (m
->fictitious
&& !vm_page_convert(m
))) {
1460 vm_fault_cleanup(object
, VM_PAGE_NULL
);
1461 cur_thread
->interruptible
= interruptible_state
;
1462 return(VM_FAULT_MEMORY_SHORTAGE
);
1464 m
->no_isync
= FALSE
;
1466 if (!no_zero_fill
) {
1467 vm_object_unlock(object
);
1468 vm_page_zero_fill(m
);
1470 *type_of_fault
= DBG_ZERO_FILL_FAULT
;
1471 VM_STAT(zero_fill_count
++);
1473 if (bumped_pagein
== TRUE
) {
1475 current_task()->pageins
--;
1477 vm_object_lock(object
);
1479 vm_page_lock_queues();
1480 VM_PAGE_QUEUES_REMOVE(m
);
1481 m
->page_ticket
= vm_page_ticket
;
1482 vm_page_ticket_roll
++;
1483 if(vm_page_ticket_roll
== VM_PAGE_TICKETS_IN_ROLL
) {
1484 vm_page_ticket_roll
= 0;
1485 if(vm_page_ticket
==
1486 VM_PAGE_TICKET_ROLL_IDS
)
1491 queue_enter(&vm_page_queue_inactive
,
1492 m
, vm_page_t
, pageq
);
1494 vm_page_inactive_count
++;
1495 vm_page_unlock_queues();
1496 pmap_clear_modify(m
->phys_addr
);
1500 if ((object
!= first_object
) || must_be_resident
)
1501 vm_object_paging_end(object
);
1502 offset
+= object
->shadow_offset
;
1503 hi_offset
+= object
->shadow_offset
;
1504 lo_offset
+= object
->shadow_offset
;
1505 access_required
= VM_PROT_READ
;
1506 vm_object_lock(next_object
);
1507 vm_object_unlock(object
);
1508 object
= next_object
;
1509 vm_object_paging_begin(object
);
1514 * PAGE HAS BEEN FOUND.
1517 * busy, so that we can play with it;
1518 * not absent, so that nobody else will fill it;
1519 * possibly eligible for pageout;
1521 * The top-level page (first_m) is:
1522 * VM_PAGE_NULL if the page was found in the
1524 * busy, not absent, and ineligible for pageout.
1526 * The current object (object) is locked. A paging
1527 * reference is held for the current and top-level
1532 dbgTrace(0xBEEF0015, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1534 #if EXTRA_ASSERTIONS
1535 if(m
!= VM_PAGE_NULL
) {
1536 assert(m
->busy
&& !m
->absent
);
1537 assert((first_m
== VM_PAGE_NULL
) ||
1538 (first_m
->busy
&& !first_m
->absent
&&
1539 !first_m
->active
&& !first_m
->inactive
));
1541 #endif /* EXTRA_ASSERTIONS */
1544 "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
1545 (integer_t
)object
, offset
, (integer_t
)m
,
1546 (integer_t
)first_object
, (integer_t
)first_m
);
1548 * If the page is being written, but isn't
1549 * already owned by the top-level object,
1550 * we have to copy it into a new page owned
1551 * by the top-level object.
1554 if ((object
!= first_object
) && (m
!= VM_PAGE_NULL
)) {
1556 * We only really need to copy if we
1561 dbgTrace(0xBEEF0016, (unsigned int) object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
1563 if (fault_type
& VM_PROT_WRITE
) {
1566 assert(!must_be_resident
);
1569 * If we try to collapse first_object at this
1570 * point, we may deadlock when we try to get
1571 * the lock on an intermediate object (since we
1572 * have the bottom object locked). We can't
1573 * unlock the bottom object, because the page
1574 * we found may move (by collapse) if we do.
1576 * Instead, we first copy the page. Then, when
1577 * we have no more use for the bottom object,
1578 * we unlock it and try to collapse.
1580 * Note that we copy the page even if we didn't
1581 * need to... that's the breaks.
1585 * Allocate a page for the copy
1587 copy_m
= vm_page_grab();
1588 if (copy_m
== VM_PAGE_NULL
) {
1590 vm_fault_cleanup(object
, first_m
);
1591 cur_thread
->interruptible
= interruptible_state
;
1592 return(VM_FAULT_MEMORY_SHORTAGE
);
1597 "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n",
1598 (integer_t
)object
, offset
,
1599 (integer_t
)m
, (integer_t
)copy_m
, 0);
1600 vm_page_copy(m
, copy_m
);
1603 * If another map is truly sharing this
1604 * page with us, we have to flush all
1605 * uses of the original page, since we
1606 * can't distinguish those which want the
1607 * original from those which need the
1610 * XXXO If we know that only one map has
1611 * access to this page, then we could
1612 * avoid the pmap_page_protect() call.
1615 vm_page_lock_queues();
1616 assert(!m
->cleaning
);
1617 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
1618 vm_page_deactivate(m
);
1619 copy_m
->dirty
= TRUE
;
1621 * Setting reference here prevents this fault from
1622 * being counted as a (per-thread) reactivate as well
1623 * as a copy-on-write.
1625 first_m
->reference
= TRUE
;
1626 vm_page_unlock_queues();
1629 * We no longer need the old page or object.
1632 PAGE_WAKEUP_DONE(m
);
1633 vm_object_paging_end(object
);
1634 vm_object_unlock(object
);
1637 *type_of_fault
= DBG_COW_FAULT
;
1638 VM_STAT(cow_faults
++);
1639 current_task()->cow_faults
++;
1640 object
= first_object
;
1641 offset
= first_offset
;
1643 vm_object_lock(object
);
1644 VM_PAGE_FREE(first_m
);
1645 first_m
= VM_PAGE_NULL
;
1646 assert(copy_m
->busy
);
1647 vm_page_insert(copy_m
, object
, offset
);
1651 * Now that we've gotten the copy out of the
1652 * way, let's try to collapse the top object.
1653 * But we have to play ugly games with
1654 * paging_in_progress to do that...
1657 vm_object_paging_end(object
);
1658 vm_object_collapse(object
);
1659 vm_object_paging_begin(object
);
1663 *protection
&= (~VM_PROT_WRITE
);
1668 * Now check whether the page needs to be pushed into the
1669 * copy object. The use of asymmetric copy on write for
1670 * shared temporary objects means that we may do two copies to
1671 * satisfy the fault; one above to get the page from a
1672 * shadowed object, and one here to push it into the copy.
1675 while (first_object
->copy_strategy
== MEMORY_OBJECT_COPY_DELAY
&&
1676 (copy_object
= first_object
->copy
) != VM_OBJECT_NULL
&&
1677 (m
!= VM_PAGE_NULL
)) {
1678 vm_object_offset_t copy_offset
;
1682 dbgTrace(0xBEEF0017, (unsigned int) copy_object
, (unsigned int) fault_type
); /* (TEST/DEBUG) */
1685 * If the page is being written, but hasn't been
1686 * copied to the copy-object, we have to copy it there.
1689 if ((fault_type
& VM_PROT_WRITE
) == 0) {
1690 *protection
&= ~VM_PROT_WRITE
;
1695 * If the page was guaranteed to be resident,
1696 * we must have already performed the copy.
1699 if (must_be_resident
)
1703 * Try to get the lock on the copy_object.
1705 if (!vm_object_lock_try(copy_object
)) {
1706 vm_object_unlock(object
);
1708 mutex_pause(); /* wait a bit */
1710 vm_object_lock(object
);
1715 * Make another reference to the copy-object,
1716 * to keep it from disappearing during the
1719 assert(copy_object
->ref_count
> 0);
1720 copy_object
->ref_count
++;
1721 VM_OBJ_RES_INCR(copy_object
);
1724 * Does the page exist in the copy?
1726 copy_offset
= first_offset
- copy_object
->shadow_offset
;
1727 if (copy_object
->size
<= copy_offset
)
1729 * Copy object doesn't cover this page -- do nothing.
1733 vm_page_lookup(copy_object
, copy_offset
)) != VM_PAGE_NULL
) {
1734 /* Page currently exists in the copy object */
1737 * If the page is being brought
1738 * in, wait for it and then retry.
1741 /* take an extra ref so object won't die */
1742 assert(copy_object
->ref_count
> 0);
1743 copy_object
->ref_count
++;
1744 vm_object_res_reference(copy_object
);
1745 vm_object_unlock(copy_object
);
1746 vm_fault_cleanup(object
, first_m
);
1747 counter(c_vm_fault_page_block_backoff_kernel
++);
1748 vm_object_lock(copy_object
);
1749 assert(copy_object
->ref_count
> 0);
1750 VM_OBJ_RES_DECR(copy_object
);
1751 copy_object
->ref_count
--;
1752 assert(copy_object
->ref_count
> 0);
1753 copy_m
= vm_page_lookup(copy_object
, copy_offset
);
1754 if (copy_m
!= VM_PAGE_NULL
&& copy_m
->busy
) {
1755 PAGE_ASSERT_WAIT(copy_m
, interruptible
);
1756 vm_object_unlock(copy_object
);
1757 wait_result
= thread_block((void (*)(void))0);
1758 vm_object_deallocate(copy_object
);
1761 vm_object_unlock(copy_object
);
1762 vm_object_deallocate(copy_object
);
1763 cur_thread
->interruptible
= interruptible_state
;
1764 return VM_FAULT_RETRY
;
1768 else if (!PAGED_OUT(copy_object
, copy_offset
)) {
1770 * If PAGED_OUT is TRUE, then the page used to exist
1771 * in the copy-object, and has already been paged out.
1772 * We don't need to repeat this. If PAGED_OUT is
1773 * FALSE, then either we don't know (!pager_created,
1774 * for example) or it hasn't been paged out.
1775 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
1776 * We must copy the page to the copy object.
1780 * Allocate a page for the copy
1782 copy_m
= vm_page_alloc(copy_object
, copy_offset
);
1783 if (copy_m
== VM_PAGE_NULL
) {
1785 VM_OBJ_RES_DECR(copy_object
);
1786 copy_object
->ref_count
--;
1787 assert(copy_object
->ref_count
> 0);
1788 vm_object_unlock(copy_object
);
1789 vm_fault_cleanup(object
, first_m
);
1790 cur_thread
->interruptible
= interruptible_state
;
1791 return(VM_FAULT_MEMORY_SHORTAGE
);
1795 * Must copy page into copy-object.
1798 vm_page_copy(m
, copy_m
);
1801 * If the old page was in use by any users
1802 * of the copy-object, it must be removed
1803 * from all pmaps. (We can't know which
1807 vm_page_lock_queues();
1808 assert(!m
->cleaning
);
1809 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
1810 copy_m
->dirty
= TRUE
;
1811 vm_page_unlock_queues();
1814 * If there's a pager, then immediately
1815 * page out this page, using the "initialize"
1816 * option. Else, we use the copy.
1821 ((!copy_object
->pager_created
) ||
1822 vm_external_state_get(
1823 copy_object
->existence_map
, copy_offset
)
1824 == VM_EXTERNAL_STATE_ABSENT
)
1826 (!copy_object
->pager_created
)
1829 vm_page_lock_queues();
1830 vm_page_activate(copy_m
);
1831 vm_page_unlock_queues();
1832 PAGE_WAKEUP_DONE(copy_m
);
1835 assert(copy_m
->busy
== TRUE
);
1838 * The page is already ready for pageout:
1839 * not on pageout queues and busy.
1840 * Unlock everything except the
1841 * copy_object itself.
1844 vm_object_unlock(object
);
1847 * Write the page to the copy-object,
1848 * flushing it from the kernel.
1851 vm_pageout_initialize_page(copy_m
);
1854 * Since the pageout may have
1855 * temporarily dropped the
1856 * copy_object's lock, we
1857 * check whether we'll have
1858 * to deallocate the hard way.
1861 if ((copy_object
->shadow
!= object
) ||
1862 (copy_object
->ref_count
== 1)) {
1863 vm_object_unlock(copy_object
);
1864 vm_object_deallocate(copy_object
);
1865 vm_object_lock(object
);
1870 * Pick back up the old object's
1871 * lock. [It is safe to do so,
1872 * since it must be deeper in the
1876 vm_object_lock(object
);
1880 * Because we're pushing a page upward
1881 * in the object tree, we must restart
1882 * any faults that are waiting here.
1883 * [Note that this is an expansion of
1884 * PAGE_WAKEUP that uses the THREAD_RESTART
1885 * wait result]. Can't turn off the page's
1886 * busy bit because we're not done with it.
1891 thread_wakeup_with_result((event_t
) m
,
1897 * The reference count on copy_object must be
1898 * at least 2: one for our extra reference,
1899 * and at least one from the outside world
1900 * (we checked that when we last locked
1903 copy_object
->ref_count
--;
1904 assert(copy_object
->ref_count
> 0);
1905 VM_OBJ_RES_DECR(copy_object
);
1906 vm_object_unlock(copy_object
);
1912 *top_page
= first_m
;
1915 "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n",
1916 (integer_t
)object
, offset
, (integer_t
)m
, (integer_t
)first_m
, 0);
1918 * If the page can be written, assume that it will be.
1919 * [Earlier, we restrict the permission to allow write
1920 * access only if the fault so required, so we don't
1921 * mark read-only data as dirty.]
1924 #if !VM_FAULT_STATIC_CONFIG
1925 if (vm_fault_dirty_handling
&& (*protection
& VM_PROT_WRITE
) &&
1926 (m
!= VM_PAGE_NULL
)) {
1931 dbgTrace(0xBEEF0018, (unsigned int) object
, (unsigned int) vm_page_deactivate_behind
); /* (TEST/DEBUG) */
1933 if (vm_page_deactivate_behind
) {
1934 if (offset
&& /* don't underflow */
1935 (object
->last_alloc
== (offset
- PAGE_SIZE_64
))) {
1936 m
= vm_page_lookup(object
, object
->last_alloc
);
1937 if ((m
!= VM_PAGE_NULL
) && !m
->busy
) {
1938 vm_page_lock_queues();
1939 vm_page_deactivate(m
);
1940 vm_page_unlock_queues();
1943 dbgTrace(0xBEEF0019, (unsigned int) object
, (unsigned int) m
); /* (TEST/DEBUG) */
1946 object
->last_alloc
= offset
;
1949 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS
, 0); /* (TEST/DEBUG) */
1951 cur_thread
->interruptible
= interruptible_state
;
1952 if(*result_page
== VM_PAGE_NULL
) {
1953 vm_object_unlock(object
);
1955 return(VM_FAULT_SUCCESS
);
1959 vm_fault_cleanup(object
, first_m
);
1961 counter(c_vm_fault_page_block_backoff_kernel
++);
1962 thread_block((void (*)(void))0);
1966 cur_thread
->interruptible
= interruptible_state
;
1967 if (wait_result
== THREAD_INTERRUPTED
)
1968 return VM_FAULT_INTERRUPTED
;
1969 return VM_FAULT_RETRY
;
1977 * Handle page faults, including pseudo-faults
1978 * used to change the wiring status of pages.
1980 * Explicit continuations have been removed.
1982 * vm_fault and vm_fault_page save mucho state
1983 * in the moral equivalent of a closure. The state
1984 * structure is allocated when first entering vm_fault
1985 * and deallocated when leaving vm_fault.
1992 vm_prot_t fault_type
,
1993 boolean_t change_wiring
,
1996 vm_map_version_t version
; /* Map version for verificiation */
1997 boolean_t wired
; /* Should mapping be wired down? */
1998 vm_object_t object
; /* Top-level object */
1999 vm_object_offset_t offset
; /* Top-level offset */
2000 vm_prot_t prot
; /* Protection for mapping */
2001 vm_behavior_t behavior
; /* Expected paging behavior */
2002 vm_object_offset_t lo_offset
, hi_offset
;
2003 vm_object_t old_copy_object
; /* Saved copy object */
2004 vm_page_t result_page
; /* Result of vm_fault_page */
2005 vm_page_t top_page
; /* Placeholder page */
2009 vm_page_t m
; /* Fast access to result_page */
2010 kern_return_t error_code
; /* page error reasons */
2012 vm_object_t cur_object
;
2014 vm_object_offset_t cur_offset
;
2016 vm_object_t new_object
;
2018 vm_map_t pmap_map
= map
;
2019 vm_map_t original_map
= map
;
2021 boolean_t funnel_set
= FALSE
;
2023 thread_t cur_thread
;
2024 boolean_t interruptible_state
;
2027 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 0)) | DBG_FUNC_START
,
2034 cur_thread
= current_thread();
2036 interruptible_state
= cur_thread
->interruptible
;
2037 if (interruptible
== THREAD_UNINT
)
2038 cur_thread
->interruptible
= FALSE
;
2041 * assume we will hit a page in the cache
2042 * otherwise, explicitly override with
2043 * the real fault type once we determine it
2045 type_of_fault
= DBG_CACHE_HIT_FAULT
;
2048 current_task()->faults
++;
2051 * drop funnel if it is already held. Then restore while returning
2053 if ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
) {
2055 curflock
= cur_thread
->funnel_lock
;
2056 thread_funnel_set( curflock
, FALSE
);
2062 * Find the backing store object and offset into
2063 * it to begin the search.
2066 vm_map_lock_read(map
);
2067 kr
= vm_map_lookup_locked(&map
, vaddr
, fault_type
, &version
,
2070 &behavior
, &lo_offset
, &hi_offset
, &pmap_map
);
2072 pmap
= pmap_map
->pmap
;
2074 if (kr
!= KERN_SUCCESS
) {
2075 vm_map_unlock_read(map
);
2080 * If the page is wired, we must fault for the current protection
2081 * value, to avoid further faults.
2085 fault_type
= prot
| VM_PROT_WRITE
;
2087 #if VM_FAULT_CLASSIFY
2089 * Temporary data gathering code
2091 vm_fault_classify(object
, offset
, fault_type
);
2094 * Fast fault code. The basic idea is to do as much as
2095 * possible while holding the map lock and object locks.
2096 * Busy pages are not used until the object lock has to
2097 * be dropped to do something (copy, zero fill, pmap enter).
2098 * Similarly, paging references aren't acquired until that
2099 * point, and object references aren't used.
2101 * If we can figure out what to do
2102 * (zero fill, copy on write, pmap enter) while holding
2103 * the locks, then it gets done. Otherwise, we give up,
2104 * and use the original fault path (which doesn't hold
2105 * the map lock, and relies on busy pages).
2106 * The give up cases include:
2107 * - Have to talk to pager.
2108 * - Page is busy, absent or in error.
2109 * - Pager has locked out desired access.
2110 * - Fault needs to be restarted.
2111 * - Have to push page into copy object.
2113 * The code is an infinite loop that moves one level down
2114 * the shadow chain each time. cur_object and cur_offset
2115 * refer to the current object being examined. object and offset
2116 * are the original object from the map. The loop is at the
2117 * top level if and only if object and cur_object are the same.
2119 * Invariants: Map lock is held throughout. Lock is held on
2120 * original object and cur_object (if different) when
2121 * continuing or exiting loop.
2127 * If this page is to be inserted in a copy delay object
2128 * for writing, and if the object has a copy, then the
2129 * copy delay strategy is implemented in the slow fault page.
2131 if (object
->copy_strategy
!= MEMORY_OBJECT_COPY_DELAY
||
2132 object
->copy
== VM_OBJECT_NULL
||
2133 (fault_type
& VM_PROT_WRITE
) == 0) {
2134 cur_object
= object
;
2135 cur_offset
= offset
;
2138 m
= vm_page_lookup(cur_object
, cur_offset
);
2139 if (m
!= VM_PAGE_NULL
) {
2143 if (m
->unusual
&& (m
->error
|| m
->restart
|| m
->private
2144 || m
->absent
|| (fault_type
& m
->page_lock
))) {
2147 * Unusual case. Give up.
2153 * Two cases of map in faults:
2154 * - At top level w/o copy object.
2155 * - Read fault anywhere.
2156 * --> must disallow write.
2159 if (object
== cur_object
&&
2160 object
->copy
== VM_OBJECT_NULL
)
2161 goto FastMapInFault
;
2163 if ((fault_type
& VM_PROT_WRITE
) == 0) {
2165 prot
&= ~VM_PROT_WRITE
;
2168 * Set up to map the page ...
2169 * mark the page busy, drop
2170 * locks and take a paging reference
2171 * on the object with the page.
2174 if (object
!= cur_object
) {
2175 vm_object_unlock(object
);
2176 object
= cur_object
;
2181 vm_object_paging_begin(object
);
2182 vm_object_unlock(object
);
2186 * Check a couple of global reasons to
2187 * be conservative about write access.
2188 * Then do the pmap_enter.
2190 #if !VM_FAULT_STATIC_CONFIG
2191 if (vm_fault_dirty_handling
2193 || db_watchpoint_list
2195 && (fault_type
& VM_PROT_WRITE
) == 0)
2196 prot
&= ~VM_PROT_WRITE
;
2197 #else /* STATIC_CONFIG */
2199 if (db_watchpoint_list
2200 && (fault_type
& VM_PROT_WRITE
) == 0)
2201 prot
&= ~VM_PROT_WRITE
;
2202 #endif /* MACH_KDB */
2203 #endif /* STATIC_CONFIG */
2204 if (m
->no_isync
== TRUE
)
2205 pmap_sync_caches_phys(m
->phys_addr
);
2207 PMAP_ENTER(pmap
, vaddr
, m
, prot
, wired
);
2209 tws_hash_line_t line
;
2212 task
= current_task();
2214 (task
->dynamic_working_set
!= 0)) {
2217 task
->dynamic_working_set
,
2219 &line
) != KERN_SUCCESS
) {
2220 if(tws_insert((tws_hash_t
)
2221 task
->dynamic_working_set
,
2222 m
->offset
, m
->object
,
2225 tws_expand_working_set(
2226 task
->dynamic_working_set
,
2227 TWS_HASH_LINE_COUNT
);
2233 * Grab the object lock to manipulate
2234 * the page queues. Change wiring
2235 * case is obvious. In soft ref bits
2236 * case activate page only if it fell
2237 * off paging queues, otherwise just
2238 * activate it if it's inactive.
2240 * NOTE: original vm_fault code will
2241 * move active page to back of active
2242 * queue. This code doesn't.
2244 vm_object_lock(object
);
2245 vm_page_lock_queues();
2248 vm_pagein_cluster_used
++;
2249 m
->clustered
= FALSE
;
2252 * we did the isync above (if needed)... we're clearing
2253 * the flag here to avoid holding a lock
2254 * while calling pmap functions, however
2255 * we need hold the object lock before
2256 * we can modify the flag
2258 m
->no_isync
= FALSE
;
2259 m
->reference
= TRUE
;
2261 if (change_wiring
) {
2267 #if VM_FAULT_STATIC_CONFIG
2269 if (!m
->active
&& !m
->inactive
)
2270 vm_page_activate(m
);
2273 else if (software_reference_bits
) {
2274 if (!m
->active
&& !m
->inactive
)
2275 vm_page_activate(m
);
2277 else if (!m
->active
) {
2278 vm_page_activate(m
);
2281 vm_page_unlock_queues();
2284 * That's it, clean up and return.
2286 PAGE_WAKEUP_DONE(m
);
2287 vm_object_paging_end(object
);
2288 vm_object_unlock(object
);
2289 vm_map_unlock_read(map
);
2291 vm_map_unlock(pmap_map
);
2294 thread_funnel_set( curflock
, TRUE
);
2297 cur_thread
->interruptible
= interruptible_state
;
2299 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 0)) | DBG_FUNC_END
,
2305 return KERN_SUCCESS
;
2309 * Copy on write fault. If objects match, then
2310 * object->copy must not be NULL (else control
2311 * would be in previous code block), and we
2312 * have a potential push into the copy object
2313 * with which we won't cope here.
2316 if (cur_object
== object
)
2320 * This is now a shadow based copy on write
2321 * fault -- it requires a copy up the shadow
2324 * Allocate a page in the original top level
2325 * object. Give up if allocate fails. Also
2326 * need to remember current page, as it's the
2327 * source of the copy.
2331 if (m
== VM_PAGE_NULL
) {
2336 * Now do the copy. Mark the source busy
2337 * and take out paging references on both
2340 * NOTE: This code holds the map lock across
2345 vm_page_copy(cur_m
, m
);
2346 vm_page_insert(m
, object
, offset
);
2348 vm_object_paging_begin(cur_object
);
2349 vm_object_paging_begin(object
);
2351 type_of_fault
= DBG_COW_FAULT
;
2352 VM_STAT(cow_faults
++);
2353 current_task()->cow_faults
++;
2356 * Now cope with the source page and object
2357 * If the top object has a ref count of 1
2358 * then no other map can access it, and hence
2359 * it's not necessary to do the pmap_page_protect.
2363 vm_page_lock_queues();
2364 vm_page_deactivate(cur_m
);
2366 pmap_page_protect(cur_m
->phys_addr
,
2368 vm_page_unlock_queues();
2370 PAGE_WAKEUP_DONE(cur_m
);
2371 vm_object_paging_end(cur_object
);
2372 vm_object_unlock(cur_object
);
2375 * Slight hack to call vm_object collapse
2376 * and then reuse common map in code.
2377 * note that the object lock was taken above.
2380 vm_object_paging_end(object
);
2381 vm_object_collapse(object
);
2382 vm_object_paging_begin(object
);
2383 vm_object_unlock(object
);
2390 * No page at cur_object, cur_offset
2393 if (cur_object
->pager_created
) {
2396 * Have to talk to the pager. Give up.
2403 if (cur_object
->shadow
== VM_OBJECT_NULL
) {
2405 if (cur_object
->shadow_severed
) {
2406 vm_object_paging_end(object
);
2407 vm_object_unlock(object
);
2408 vm_map_unlock_read(map
);
2410 vm_map_unlock(pmap_map
);
2413 thread_funnel_set( curflock
, TRUE
);
2416 cur_thread
->interruptible
= interruptible_state
;
2418 return VM_FAULT_MEMORY_ERROR
;
2422 * Zero fill fault. Page gets
2423 * filled in top object. Insert
2424 * page, then drop any lower lock.
2425 * Give up if no page.
2427 if ((vm_page_free_target
-
2428 ((vm_page_free_target
-vm_page_free_min
)>>2))
2429 > vm_page_free_count
) {
2432 m
= vm_page_alloc(object
, offset
);
2433 if (m
== VM_PAGE_NULL
) {
2437 * This is a zero-fill or initial fill
2438 * page fault. As such, we consider it
2439 * undefined with respect to instruction
2440 * execution. i.e. it is the responsibility
2441 * of higher layers to call for an instruction
2442 * sync after changing the contents and before
2443 * sending a program into this area. We
2444 * choose this approach for performance
2447 m
->no_isync
= FALSE
;
2449 if (cur_object
!= object
)
2450 vm_object_unlock(cur_object
);
2452 vm_object_paging_begin(object
);
2453 vm_object_unlock(object
);
2456 * Now zero fill page and map it.
2457 * the page is probably going to
2458 * be written soon, so don't bother
2459 * to clear the modified bit
2461 * NOTE: This code holds the map
2462 * lock across the zero fill.
2465 if (!map
->no_zero_fill
) {
2466 vm_page_zero_fill(m
);
2467 type_of_fault
= DBG_ZERO_FILL_FAULT
;
2468 VM_STAT(zero_fill_count
++);
2470 vm_page_lock_queues();
2471 VM_PAGE_QUEUES_REMOVE(m
);
2473 m
->page_ticket
= vm_page_ticket
;
2474 vm_page_ticket_roll
++;
2475 if(vm_page_ticket_roll
==
2476 VM_PAGE_TICKETS_IN_ROLL
) {
2477 vm_page_ticket_roll
= 0;
2478 if(vm_page_ticket
==
2479 VM_PAGE_TICKET_ROLL_IDS
)
2485 queue_enter(&vm_page_queue_inactive
,
2486 m
, vm_page_t
, pageq
);
2488 vm_page_inactive_count
++;
2489 vm_page_unlock_queues();
2494 * On to the next level
2497 cur_offset
+= cur_object
->shadow_offset
;
2498 new_object
= cur_object
->shadow
;
2499 vm_object_lock(new_object
);
2500 if (cur_object
!= object
)
2501 vm_object_unlock(cur_object
);
2502 cur_object
= new_object
;
2509 * Cleanup from fast fault failure. Drop any object
2510 * lock other than original and drop map lock.
2513 if (object
!= cur_object
)
2514 vm_object_unlock(cur_object
);
2516 vm_map_unlock_read(map
);
2518 vm_map_unlock(pmap_map
);
2521 * Make a reference to this object to
2522 * prevent its disposal while we are messing with
2523 * it. Once we have the reference, the map is free
2524 * to be diddled. Since objects reference their
2525 * shadows (and copies), they will stay around as well.
2528 assert(object
->ref_count
> 0);
2529 object
->ref_count
++;
2530 vm_object_res_reference(object
);
2531 vm_object_paging_begin(object
);
2533 XPR(XPR_VM_FAULT
,"vm_fault -> vm_fault_page\n",0,0,0,0,0);
2534 kr
= vm_fault_page(object
, offset
, fault_type
,
2535 (change_wiring
&& !wired
),
2537 lo_offset
, hi_offset
, behavior
,
2538 &prot
, &result_page
, &top_page
,
2540 &error_code
, map
->no_zero_fill
, FALSE
, map
, vaddr
);
2543 * If we didn't succeed, lose the object reference immediately.
2546 if (kr
!= VM_FAULT_SUCCESS
)
2547 vm_object_deallocate(object
);
2550 * See why we failed, and take corrective action.
2554 case VM_FAULT_SUCCESS
:
2556 case VM_FAULT_MEMORY_SHORTAGE
:
2557 if (vm_page_wait((change_wiring
) ?
2562 case VM_FAULT_INTERRUPTED
:
2565 case VM_FAULT_RETRY
:
2567 case VM_FAULT_FICTITIOUS_SHORTAGE
:
2568 vm_page_more_fictitious();
2570 case VM_FAULT_MEMORY_ERROR
:
2574 kr
= KERN_MEMORY_ERROR
;
2580 if(m
!= VM_PAGE_NULL
) {
2581 assert((change_wiring
&& !wired
) ?
2582 (top_page
== VM_PAGE_NULL
) :
2583 ((top_page
== VM_PAGE_NULL
) == (m
->object
== object
)));
2587 * How to clean up the result of vm_fault_page. This
2588 * happens whether the mapping is entered or not.
2591 #define UNLOCK_AND_DEALLOCATE \
2593 vm_fault_cleanup(m->object, top_page); \
2594 vm_object_deallocate(object); \
2598 * What to do with the resulting page from vm_fault_page
2599 * if it doesn't get entered into the physical map:
2602 #define RELEASE_PAGE(m) \
2604 PAGE_WAKEUP_DONE(m); \
2605 vm_page_lock_queues(); \
2606 if (!m->active && !m->inactive) \
2607 vm_page_activate(m); \
2608 vm_page_unlock_queues(); \
2612 * We must verify that the maps have not changed
2613 * since our last lookup.
2616 if(m
!= VM_PAGE_NULL
) {
2617 old_copy_object
= m
->object
->copy
;
2619 vm_object_unlock(m
->object
);
2621 old_copy_object
= VM_OBJECT_NULL
;
2623 if ((map
!= original_map
) || !vm_map_verify(map
, &version
)) {
2624 vm_object_t retry_object
;
2625 vm_object_offset_t retry_offset
;
2626 vm_prot_t retry_prot
;
2629 * To avoid trying to write_lock the map while another
2630 * thread has it read_locked (in vm_map_pageable), we
2631 * do not try for write permission. If the page is
2632 * still writable, we will get write permission. If it
2633 * is not, or has been marked needs_copy, we enter the
2634 * mapping without write permission, and will merely
2635 * take another fault.
2638 vm_map_lock_read(map
);
2639 kr
= vm_map_lookup_locked(&map
, vaddr
,
2640 fault_type
& ~VM_PROT_WRITE
, &version
,
2641 &retry_object
, &retry_offset
, &retry_prot
,
2642 &wired
, &behavior
, &lo_offset
, &hi_offset
,
2644 pmap
= pmap_map
->pmap
;
2646 if (kr
!= KERN_SUCCESS
) {
2647 vm_map_unlock_read(map
);
2648 if(m
!= VM_PAGE_NULL
) {
2649 vm_object_lock(m
->object
);
2651 UNLOCK_AND_DEALLOCATE
;
2653 vm_object_deallocate(object
);
2658 vm_object_unlock(retry_object
);
2659 if(m
!= VM_PAGE_NULL
) {
2660 vm_object_lock(m
->object
);
2662 vm_object_lock(object
);
2665 if ((retry_object
!= object
) ||
2666 (retry_offset
!= offset
)) {
2667 vm_map_unlock_read(map
);
2669 vm_map_unlock(pmap_map
);
2670 if(m
!= VM_PAGE_NULL
) {
2672 UNLOCK_AND_DEALLOCATE
;
2674 vm_object_deallocate(object
);
2680 * Check whether the protection has changed or the object
2681 * has been copied while we left the map unlocked.
2684 if(m
!= VM_PAGE_NULL
) {
2685 vm_object_unlock(m
->object
);
2687 vm_object_unlock(object
);
2690 if(m
!= VM_PAGE_NULL
) {
2691 vm_object_lock(m
->object
);
2693 vm_object_lock(object
);
2697 * If the copy object changed while the top-level object
2698 * was unlocked, then we must take away write permission.
2701 if(m
!= VM_PAGE_NULL
) {
2702 if (m
->object
->copy
!= old_copy_object
)
2703 prot
&= ~VM_PROT_WRITE
;
2707 * If we want to wire down this page, but no longer have
2708 * adequate permissions, we must start all over.
2711 if (wired
&& (fault_type
!= (prot
|VM_PROT_WRITE
))) {
2712 vm_map_verify_done(map
, &version
);
2714 vm_map_unlock(pmap_map
);
2715 if(m
!= VM_PAGE_NULL
) {
2717 UNLOCK_AND_DEALLOCATE
;
2719 vm_object_deallocate(object
);
2725 * Put this page into the physical map.
2726 * We had to do the unlock above because pmap_enter
2727 * may cause other faults. The page may be on
2728 * the pageout queues. If the pageout daemon comes
2729 * across the page, it will remove it from the queues.
2731 if (m
!= VM_PAGE_NULL
) {
2732 if (m
->no_isync
== TRUE
) {
2733 pmap_sync_caches_phys(m
->phys_addr
);
2735 m
->no_isync
= FALSE
;
2737 vm_object_unlock(m
->object
);
2739 PMAP_ENTER(pmap
, vaddr
, m
, prot
, wired
);
2741 tws_hash_line_t line
;
2744 task
= current_task();
2746 (task
->dynamic_working_set
!= 0)) {
2749 task
->dynamic_working_set
,
2750 m
->offset
, m
->object
,
2751 &line
) != KERN_SUCCESS
) {
2752 tws_insert((tws_hash_t
)
2753 task
->dynamic_working_set
,
2754 m
->offset
, m
->object
,
2756 if(tws_insert((tws_hash_t
)
2757 task
->dynamic_working_set
,
2758 m
->offset
, m
->object
,
2761 tws_expand_working_set(
2762 task
->dynamic_working_set
,
2763 TWS_HASH_LINE_COUNT
);
2770 /* if __ppc__ not working until figure out phys copy on block maps */
2773 struct phys_entry
*pp
;
2775 * do a pmap block mapping from the physical address
2778 if(pp
= pmap_find_physentry(
2779 (vm_offset_t
)object
->shadow_offset
)) {
2780 memattr
= ((pp
->pte1
& 0x00000078) >> 3);
2782 memattr
= PTE_WIMG_UNCACHED_COHERENT_GUARDED
;
2785 pmap_map_block(pmap
, vaddr
,
2786 (vm_offset_t
)object
->shadow_offset
,
2788 memattr
, 0); /* Set up a block mapped area */
2791 for (off
= 0; off
< object
->size
; off
+= page_size
) {
2792 pmap_enter(pmap
, vaddr
+ off
,
2793 object
->shadow_offset
+ off
, prot
, TRUE
);
2801 * If the page is not wired down and isn't already
2802 * on a pageout queue, then put it where the
2803 * pageout daemon can find it.
2805 if(m
!= VM_PAGE_NULL
) {
2806 vm_object_lock(m
->object
);
2807 vm_page_lock_queues();
2809 if (change_wiring
) {
2815 #if VM_FAULT_STATIC_CONFIG
2817 if (!m
->active
&& !m
->inactive
)
2818 vm_page_activate(m
);
2819 m
->reference
= TRUE
;
2822 else if (software_reference_bits
) {
2823 if (!m
->active
&& !m
->inactive
)
2824 vm_page_activate(m
);
2825 m
->reference
= TRUE
;
2827 vm_page_activate(m
);
2830 vm_page_unlock_queues();
2834 * Unlock everything, and return
2837 vm_map_verify_done(map
, &version
);
2839 vm_map_unlock(pmap_map
);
2840 if(m
!= VM_PAGE_NULL
) {
2841 PAGE_WAKEUP_DONE(m
);
2842 UNLOCK_AND_DEALLOCATE
;
2844 vm_fault_cleanup(object
, top_page
);
2845 vm_object_deallocate(object
);
2849 #undef UNLOCK_AND_DEALLOCATE
2854 thread_funnel_set( curflock
, TRUE
);
2857 cur_thread
->interruptible
= interruptible_state
;
2859 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, 0)) | DBG_FUNC_END
,
2871 * Wire down a range of virtual addresses in a map.
2876 vm_map_entry_t entry
,
2880 register vm_offset_t va
;
2881 register vm_offset_t end_addr
= entry
->vme_end
;
2882 register kern_return_t rc
;
2884 assert(entry
->in_transition
);
2887 * Inform the physical mapping system that the
2888 * range of addresses may not fault, so that
2889 * page tables and such can be locked down as well.
2892 pmap_pageable(pmap
, entry
->vme_start
, end_addr
, FALSE
);
2895 * We simulate a fault to get the page and enter it
2896 * in the physical map.
2899 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
2900 if ((rc
= vm_fault_wire_fast(
2901 map
, va
, entry
, pmap
)) != KERN_SUCCESS
) {
2902 rc
= vm_fault(map
, va
, VM_PROT_NONE
, TRUE
,
2903 (pmap
== kernel_pmap
) ? THREAD_UNINT
: THREAD_ABORTSAFE
);
2906 if (rc
!= KERN_SUCCESS
) {
2907 struct vm_map_entry tmp_entry
= *entry
;
2909 /* unwire wired pages */
2910 tmp_entry
.vme_end
= va
;
2911 vm_fault_unwire(map
, &tmp_entry
, FALSE
, pmap
);
2916 return KERN_SUCCESS
;
2922 * Unwire a range of virtual addresses in a map.
2927 vm_map_entry_t entry
,
2928 boolean_t deallocate
,
2931 register vm_offset_t va
;
2932 register vm_offset_t end_addr
= entry
->vme_end
;
2935 object
= (entry
->is_sub_map
)
2936 ? VM_OBJECT_NULL
: entry
->object
.vm_object
;
2939 * Since the pages are wired down, we must be able to
2940 * get their mappings from the physical map system.
2943 for (va
= entry
->vme_start
; va
< end_addr
; va
+= PAGE_SIZE
) {
2944 pmap_change_wiring(pmap
, va
, FALSE
);
2946 if (object
== VM_OBJECT_NULL
) {
2947 (void) vm_fault(map
, va
, VM_PROT_NONE
, TRUE
, THREAD_UNINT
);
2950 vm_page_t result_page
;
2952 vm_object_t result_object
;
2953 vm_fault_return_t result
;
2956 prot
= VM_PROT_NONE
;
2958 vm_object_lock(object
);
2959 vm_object_paging_begin(object
);
2961 "vm_fault_unwire -> vm_fault_page\n",
2963 result
= vm_fault_page(object
,
2965 (va
- entry
->vme_start
),
2971 - entry
->vme_start
),
2977 0, map
->no_zero_fill
,
2979 } while (result
== VM_FAULT_RETRY
);
2981 if (result
!= VM_FAULT_SUCCESS
)
2982 panic("vm_fault_unwire: failure");
2984 result_object
= result_page
->object
;
2986 assert(!result_page
->fictitious
);
2987 pmap_page_protect(result_page
->phys_addr
,
2989 VM_PAGE_FREE(result_page
);
2991 vm_page_lock_queues();
2992 vm_page_unwire(result_page
);
2993 vm_page_unlock_queues();
2994 PAGE_WAKEUP_DONE(result_page
);
2997 vm_fault_cleanup(result_object
, top_page
);
3002 * Inform the physical mapping system that the range
3003 * of addresses may fault, so that page tables and
3004 * such may be unwired themselves.
3007 pmap_pageable(pmap
, entry
->vme_start
, end_addr
, TRUE
);
3012 * vm_fault_wire_fast:
3014 * Handle common case of a wire down page fault at the given address.
3015 * If successful, the page is inserted into the associated physical map.
3016 * The map entry is passed in to avoid the overhead of a map lookup.
3018 * NOTE: the given address should be truncated to the
3019 * proper page address.
3021 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
3022 * a standard error specifying why the fault is fatal is returned.
3024 * The map in question must be referenced, and remains so.
3025 * Caller has a read lock on the map.
3027 * This is a stripped version of vm_fault() for wiring pages. Anything
3028 * other than the common case will return KERN_FAILURE, and the caller
3029 * is expected to call vm_fault().
3035 vm_map_entry_t entry
,
3039 vm_object_offset_t offset
;
3040 register vm_page_t m
;
3042 thread_act_t thr_act
;
3046 if((thr_act
=current_act()) && (thr_act
->task
!= TASK_NULL
))
3047 thr_act
->task
->faults
++;
3054 #define RELEASE_PAGE(m) { \
3055 PAGE_WAKEUP_DONE(m); \
3056 vm_page_lock_queues(); \
3057 vm_page_unwire(m); \
3058 vm_page_unlock_queues(); \
3062 #undef UNLOCK_THINGS
3063 #define UNLOCK_THINGS { \
3064 object->paging_in_progress--; \
3065 vm_object_unlock(object); \
3068 #undef UNLOCK_AND_DEALLOCATE
3069 #define UNLOCK_AND_DEALLOCATE { \
3071 vm_object_deallocate(object); \
3074 * Give up and have caller do things the hard way.
3078 UNLOCK_AND_DEALLOCATE; \
3079 return(KERN_FAILURE); \
3084 * If this entry is not directly to a vm_object, bail out.
3086 if (entry
->is_sub_map
)
3087 return(KERN_FAILURE
);
3090 * Find the backing store object and offset into it.
3093 object
= entry
->object
.vm_object
;
3094 offset
= (va
- entry
->vme_start
) + entry
->offset
;
3095 prot
= entry
->protection
;
3098 * Make a reference to this object to prevent its
3099 * disposal while we are messing with it.
3102 vm_object_lock(object
);
3103 assert(object
->ref_count
> 0);
3104 object
->ref_count
++;
3105 vm_object_res_reference(object
);
3106 object
->paging_in_progress
++;
3109 * INVARIANTS (through entire routine):
3111 * 1) At all times, we must either have the object
3112 * lock or a busy page in some object to prevent
3113 * some other thread from trying to bring in
3116 * 2) Once we have a busy page, we must remove it from
3117 * the pageout queues, so that the pageout daemon
3118 * will not grab it away.
3123 * Look for page in top-level object. If it's not there or
3124 * there's something going on, give up.
3126 m
= vm_page_lookup(object
, offset
);
3127 if ((m
== VM_PAGE_NULL
) || (m
->busy
) ||
3128 (m
->unusual
&& ( m
->error
|| m
->restart
|| m
->absent
||
3129 prot
& m
->page_lock
))) {
3135 * Wire the page down now. All bail outs beyond this
3136 * point must unwire the page.
3139 vm_page_lock_queues();
3141 vm_page_unlock_queues();
3144 * Mark page busy for other threads.
3151 * Give up if the page is being written and there's a copy object
3153 if ((object
->copy
!= VM_OBJECT_NULL
) && (prot
& VM_PROT_WRITE
)) {
3159 * Put this page into the physical map.
3160 * We have to unlock the object because pmap_enter
3161 * may cause other faults.
3163 if (m
->no_isync
== TRUE
) {
3164 pmap_sync_caches_phys(m
->phys_addr
);
3166 m
->no_isync
= FALSE
;
3168 vm_object_unlock(object
);
3170 PMAP_ENTER(pmap
, va
, m
, prot
, TRUE
);
3173 * Must relock object so that paging_in_progress can be cleared.
3175 vm_object_lock(object
);
3178 * Unlock everything, and return
3181 PAGE_WAKEUP_DONE(m
);
3182 UNLOCK_AND_DEALLOCATE
;
3184 return(KERN_SUCCESS
);
3189 * Routine: vm_fault_copy_cleanup
3191 * Release a page used by vm_fault_copy.
3195 vm_fault_copy_cleanup(
3199 vm_object_t object
= page
->object
;
3201 vm_object_lock(object
);
3202 PAGE_WAKEUP_DONE(page
);
3203 vm_page_lock_queues();
3204 if (!page
->active
&& !page
->inactive
)
3205 vm_page_activate(page
);
3206 vm_page_unlock_queues();
3207 vm_fault_cleanup(object
, top_page
);
3211 vm_fault_copy_dst_cleanup(
3216 if (page
!= VM_PAGE_NULL
) {
3217 object
= page
->object
;
3218 vm_object_lock(object
);
3219 vm_page_lock_queues();
3220 vm_page_unwire(page
);
3221 vm_page_unlock_queues();
3222 vm_object_paging_end(object
);
3223 vm_object_unlock(object
);
3228 * Routine: vm_fault_copy
3231 * Copy pages from one virtual memory object to another --
3232 * neither the source nor destination pages need be resident.
3234 * Before actually copying a page, the version associated with
3235 * the destination address map wil be verified.
3237 * In/out conditions:
3238 * The caller must hold a reference, but not a lock, to
3239 * each of the source and destination objects and to the
3243 * Returns KERN_SUCCESS if no errors were encountered in
3244 * reading or writing the data. Returns KERN_INTERRUPTED if
3245 * the operation was interrupted (only possible if the
3246 * "interruptible" argument is asserted). Other return values
3247 * indicate a permanent error in copying the data.
3249 * The actual amount of data copied will be returned in the
3250 * "copy_size" argument. In the event that the destination map
3251 * verification failed, this amount may be less than the amount
3256 vm_object_t src_object
,
3257 vm_object_offset_t src_offset
,
3258 vm_size_t
*src_size
, /* INOUT */
3259 vm_object_t dst_object
,
3260 vm_object_offset_t dst_offset
,
3262 vm_map_version_t
*dst_version
,
3265 vm_page_t result_page
;
3268 vm_page_t src_top_page
;
3272 vm_page_t dst_top_page
;
3275 vm_size_t amount_left
;
3276 vm_object_t old_copy_object
;
3277 kern_return_t error
= 0;
3279 vm_size_t part_size
;
3282 * In order not to confuse the clustered pageins, align
3283 * the different offsets on a page boundary.
3285 vm_object_offset_t src_lo_offset
= trunc_page_64(src_offset
);
3286 vm_object_offset_t dst_lo_offset
= trunc_page_64(dst_offset
);
3287 vm_object_offset_t src_hi_offset
= round_page_64(src_offset
+ *src_size
);
3288 vm_object_offset_t dst_hi_offset
= round_page_64(dst_offset
+ *src_size
);
3292 *src_size -= amount_left; \
3296 amount_left
= *src_size
;
3297 do { /* while (amount_left > 0) */
3299 * There may be a deadlock if both source and destination
3300 * pages are the same. To avoid this deadlock, the copy must
3301 * start by getting the destination page in order to apply
3302 * COW semantics if any.
3305 RetryDestinationFault
: ;
3307 dst_prot
= VM_PROT_WRITE
|VM_PROT_READ
;
3309 vm_object_lock(dst_object
);
3310 vm_object_paging_begin(dst_object
);
3312 XPR(XPR_VM_FAULT
,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
3313 switch (vm_fault_page(dst_object
,
3314 trunc_page_64(dst_offset
),
3315 VM_PROT_WRITE
|VM_PROT_READ
,
3320 VM_BEHAVIOR_SEQUENTIAL
,
3326 dst_map
->no_zero_fill
,
3328 case VM_FAULT_SUCCESS
:
3330 case VM_FAULT_RETRY
:
3331 goto RetryDestinationFault
;
3332 case VM_FAULT_MEMORY_SHORTAGE
:
3333 if (vm_page_wait(interruptible
))
3334 goto RetryDestinationFault
;
3336 case VM_FAULT_INTERRUPTED
:
3337 RETURN(MACH_SEND_INTERRUPTED
);
3338 case VM_FAULT_FICTITIOUS_SHORTAGE
:
3339 vm_page_more_fictitious();
3340 goto RetryDestinationFault
;
3341 case VM_FAULT_MEMORY_ERROR
:
3345 return(KERN_MEMORY_ERROR
);
3347 assert ((dst_prot
& VM_PROT_WRITE
) != VM_PROT_NONE
);
3349 old_copy_object
= dst_page
->object
->copy
;
3352 * There exists the possiblity that the source and
3353 * destination page are the same. But we can't
3354 * easily determine that now. If they are the
3355 * same, the call to vm_fault_page() for the
3356 * destination page will deadlock. To prevent this we
3357 * wire the page so we can drop busy without having
3358 * the page daemon steal the page. We clean up the
3359 * top page but keep the paging reference on the object
3360 * holding the dest page so it doesn't go away.
3363 vm_page_lock_queues();
3364 vm_page_wire(dst_page
);
3365 vm_page_unlock_queues();
3366 PAGE_WAKEUP_DONE(dst_page
);
3367 vm_object_unlock(dst_page
->object
);
3369 if (dst_top_page
!= VM_PAGE_NULL
) {
3370 vm_object_lock(dst_object
);
3371 VM_PAGE_FREE(dst_top_page
);
3372 vm_object_paging_end(dst_object
);
3373 vm_object_unlock(dst_object
);
3378 if (src_object
== VM_OBJECT_NULL
) {
3380 * No source object. We will just
3381 * zero-fill the page in dst_object.
3383 src_page
= VM_PAGE_NULL
;
3384 result_page
= VM_PAGE_NULL
;
3386 vm_object_lock(src_object
);
3387 src_page
= vm_page_lookup(src_object
,
3388 trunc_page_64(src_offset
));
3389 if (src_page
== dst_page
) {
3390 src_prot
= dst_prot
;
3391 result_page
= VM_PAGE_NULL
;
3393 src_prot
= VM_PROT_READ
;
3394 vm_object_paging_begin(src_object
);
3397 "vm_fault_copy(2) -> vm_fault_page\n",
3399 switch (vm_fault_page(src_object
,
3400 trunc_page_64(src_offset
),
3406 VM_BEHAVIOR_SEQUENTIAL
,
3415 case VM_FAULT_SUCCESS
:
3417 case VM_FAULT_RETRY
:
3418 goto RetrySourceFault
;
3419 case VM_FAULT_MEMORY_SHORTAGE
:
3420 if (vm_page_wait(interruptible
))
3421 goto RetrySourceFault
;
3423 case VM_FAULT_INTERRUPTED
:
3424 vm_fault_copy_dst_cleanup(dst_page
);
3425 RETURN(MACH_SEND_INTERRUPTED
);
3426 case VM_FAULT_FICTITIOUS_SHORTAGE
:
3427 vm_page_more_fictitious();
3428 goto RetrySourceFault
;
3429 case VM_FAULT_MEMORY_ERROR
:
3430 vm_fault_copy_dst_cleanup(dst_page
);
3434 return(KERN_MEMORY_ERROR
);
3438 assert((src_top_page
== VM_PAGE_NULL
) ==
3439 (result_page
->object
== src_object
));
3441 assert ((src_prot
& VM_PROT_READ
) != VM_PROT_NONE
);
3442 vm_object_unlock(result_page
->object
);
3445 if (!vm_map_verify(dst_map
, dst_version
)) {
3446 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
3447 vm_fault_copy_cleanup(result_page
, src_top_page
);
3448 vm_fault_copy_dst_cleanup(dst_page
);
3452 vm_object_lock(dst_page
->object
);
3454 if (dst_page
->object
->copy
!= old_copy_object
) {
3455 vm_object_unlock(dst_page
->object
);
3456 vm_map_verify_done(dst_map
, dst_version
);
3457 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
3458 vm_fault_copy_cleanup(result_page
, src_top_page
);
3459 vm_fault_copy_dst_cleanup(dst_page
);
3462 vm_object_unlock(dst_page
->object
);
3465 * Copy the page, and note that it is dirty
3469 if (!page_aligned(src_offset
) ||
3470 !page_aligned(dst_offset
) ||
3471 !page_aligned(amount_left
)) {
3473 vm_object_offset_t src_po
,
3476 src_po
= src_offset
- trunc_page_64(src_offset
);
3477 dst_po
= dst_offset
- trunc_page_64(dst_offset
);
3479 if (dst_po
> src_po
) {
3480 part_size
= PAGE_SIZE
- dst_po
;
3482 part_size
= PAGE_SIZE
- src_po
;
3484 if (part_size
> (amount_left
)){
3485 part_size
= amount_left
;
3488 if (result_page
== VM_PAGE_NULL
) {
3489 vm_page_part_zero_fill(dst_page
,
3492 vm_page_part_copy(result_page
, src_po
,
3493 dst_page
, dst_po
, part_size
);
3494 if(!dst_page
->dirty
){
3495 vm_object_lock(dst_object
);
3496 dst_page
->dirty
= TRUE
;
3497 vm_object_unlock(dst_page
->object
);
3502 part_size
= PAGE_SIZE
;
3504 if (result_page
== VM_PAGE_NULL
)
3505 vm_page_zero_fill(dst_page
);
3507 vm_page_copy(result_page
, dst_page
);
3508 if(!dst_page
->dirty
){
3509 vm_object_lock(dst_object
);
3510 dst_page
->dirty
= TRUE
;
3511 vm_object_unlock(dst_page
->object
);
3518 * Unlock everything, and return
3521 vm_map_verify_done(dst_map
, dst_version
);
3523 if (result_page
!= VM_PAGE_NULL
&& src_page
!= dst_page
)
3524 vm_fault_copy_cleanup(result_page
, src_top_page
);
3525 vm_fault_copy_dst_cleanup(dst_page
);
3527 amount_left
-= part_size
;
3528 src_offset
+= part_size
;
3529 dst_offset
+= part_size
;
3530 } while (amount_left
> 0);
3532 RETURN(KERN_SUCCESS
);
3541 * Routine: vm_fault_page_overwrite
3544 * A form of vm_fault_page that assumes that the
3545 * resulting page will be overwritten in its entirety,
3546 * making it unnecessary to obtain the correct *contents*
3550 * XXX Untested. Also unused. Eventually, this technology
3551 * could be used in vm_fault_copy() to advantage.
3554 vm_fault_page_overwrite(
3556 vm_object_t dst_object
,
3557 vm_object_offset_t dst_offset
,
3558 vm_page_t
*result_page
) /* OUT */
3562 kern_return_t wait_result
;
3564 #define interruptible THREAD_UNINT /* XXX */
3568 * Look for a page at this offset
3571 while ((dst_page
= vm_page_lookup(dst_object
, dst_offset
))
3574 * No page, no problem... just allocate one.
3577 dst_page
= vm_page_alloc(dst_object
, dst_offset
);
3578 if (dst_page
== VM_PAGE_NULL
) {
3579 vm_object_unlock(dst_object
);
3581 vm_object_lock(dst_object
);
3586 * Pretend that the memory manager
3587 * write-protected the page.
3589 * Note that we will be asking for write
3590 * permission without asking for the data
3594 dst_page
->overwriting
= TRUE
;
3595 dst_page
->page_lock
= VM_PROT_WRITE
;
3596 dst_page
->absent
= TRUE
;
3597 dst_page
->unusual
= TRUE
;
3598 dst_object
->absent_count
++;
3603 * When we bail out, we might have to throw
3604 * away the page created here.
3607 #define DISCARD_PAGE \
3609 vm_object_lock(dst_object); \
3610 dst_page = vm_page_lookup(dst_object, dst_offset); \
3611 if ((dst_page != VM_PAGE_NULL) && dst_page->overwriting) \
3612 VM_PAGE_FREE(dst_page); \
3613 vm_object_unlock(dst_object); \
3618 * If the page is write-protected...
3621 if (dst_page
->page_lock
& VM_PROT_WRITE
) {
3623 * ... and an unlock request hasn't been sent
3626 if ( ! (dst_page
->unlock_request
& VM_PROT_WRITE
)) {
3631 * ... then send one now.
3634 if (!dst_object
->pager_ready
) {
3635 vm_object_assert_wait(dst_object
,
3636 VM_OBJECT_EVENT_PAGER_READY
,
3638 vm_object_unlock(dst_object
);
3639 wait_result
= thread_block((void (*)(void))0);
3640 if (wait_result
!= THREAD_AWAKENED
) {
3642 return(VM_FAULT_INTERRUPTED
);
3647 u
= dst_page
->unlock_request
|= VM_PROT_WRITE
;
3648 vm_object_unlock(dst_object
);
3650 if ((rc
= memory_object_data_unlock(
3652 dst_offset
+ dst_object
->paging_offset
,
3654 u
)) != KERN_SUCCESS
) {
3656 printf("vm_object_overwrite: memory_object_data_unlock failed\n");
3658 return((rc
== MACH_SEND_INTERRUPTED
) ?
3659 VM_FAULT_INTERRUPTED
:
3660 VM_FAULT_MEMORY_ERROR
);
3662 vm_object_lock(dst_object
);
3666 /* ... fall through to wait below */
3669 * If the page isn't being used for other
3670 * purposes, then we're done.
3672 if ( ! (dst_page
->busy
|| dst_page
->absent
||
3673 dst_page
->error
|| dst_page
->restart
) )
3677 PAGE_ASSERT_WAIT(dst_page
, interruptible
);
3678 vm_object_unlock(dst_object
);
3679 wait_result
= thread_block((void (*)(void))0);
3680 if (wait_result
!= THREAD_AWAKENED
) {
3682 return(VM_FAULT_INTERRUPTED
);
3686 *result_page
= dst_page
;
3687 return(VM_FAULT_SUCCESS
);
3689 #undef interruptible
3695 #if VM_FAULT_CLASSIFY
3697 * Temporary statistics gathering support.
3701 * Statistics arrays:
3703 #define VM_FAULT_TYPES_MAX 5
3704 #define VM_FAULT_LEVEL_MAX 8
3706 int vm_fault_stats
[VM_FAULT_TYPES_MAX
][VM_FAULT_LEVEL_MAX
];
3708 #define VM_FAULT_TYPE_ZERO_FILL 0
3709 #define VM_FAULT_TYPE_MAP_IN 1
3710 #define VM_FAULT_TYPE_PAGER 2
3711 #define VM_FAULT_TYPE_COPY 3
3712 #define VM_FAULT_TYPE_OTHER 4
3716 vm_fault_classify(vm_object_t object
,
3717 vm_object_offset_t offset
,
3718 vm_prot_t fault_type
)
3720 int type
, level
= 0;
3724 m
= vm_page_lookup(object
, offset
);
3725 if (m
!= VM_PAGE_NULL
) {
3726 if (m
->busy
|| m
->error
|| m
->restart
|| m
->absent
||
3727 fault_type
& m
->page_lock
) {
3728 type
= VM_FAULT_TYPE_OTHER
;
3731 if (((fault_type
& VM_PROT_WRITE
) == 0) ||
3732 ((level
== 0) && object
->copy
== VM_OBJECT_NULL
)) {
3733 type
= VM_FAULT_TYPE_MAP_IN
;
3736 type
= VM_FAULT_TYPE_COPY
;
3740 if (object
->pager_created
) {
3741 type
= VM_FAULT_TYPE_PAGER
;
3744 if (object
->shadow
== VM_OBJECT_NULL
) {
3745 type
= VM_FAULT_TYPE_ZERO_FILL
;
3749 offset
+= object
->shadow_offset
;
3750 object
= object
->shadow
;
3756 if (level
> VM_FAULT_LEVEL_MAX
)
3757 level
= VM_FAULT_LEVEL_MAX
;
3759 vm_fault_stats
[type
][level
] += 1;
3764 /* cleanup routine to call from debugger */
3767 vm_fault_classify_init(void)
3771 for (type
= 0; type
< VM_FAULT_TYPES_MAX
; type
++) {
3772 for (level
= 0; level
< VM_FAULT_LEVEL_MAX
; level
++) {
3773 vm_fault_stats
[type
][level
] = 0;
3779 #endif /* VM_FAULT_CLASSIFY */