2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 #include <kern/thread.h>
54 #include <vm/vm_fault.h>
55 #include <mach/kern_return.h>
56 #include <mach/vm_behavior.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
62 #include <i386/intel_read_fault.h>
64 #include <kern/macro_help.h>
67 * Expansion of vm_fault for read fault in kernel mode.
68 * Must enter the mapping as writable, since the i386
69 * (and i860 in i386 compatability mode) ignores write
70 * protection in kernel mode.
72 * Note that this routine can be called for pmap's other
73 * than the kernel_pmap, in which case it just enters
74 * a read-only mapping. (See e.g. kernel_trap().)
81 vm_map_version_t version
; /* Map version for
83 vm_object_t object
; /* Top-level object */
84 vm_object_offset_t offset
; /* Top-level offset */
85 vm_prot_t prot
; /* Protection for mapping */
86 vm_behavior_t behavior
; /* Expected paging behavior */
87 vm_map_offset_t lo_offset
, hi_offset
;
88 vm_page_t result_page
; /* Result of vm_fault_page */
89 vm_page_t top_page
; /* Placeholder page */
90 boolean_t wired
; /* Is map region wired? */
94 vm_map_t original_map
= map
;
97 funnel_t
*curflock
= NULL
;
99 cur_thread
= current_thread();
100 if ((cur_thread
->funnel_state
& TH_FN_OWNED
) == TH_FN_OWNED
) {
102 curflock
= cur_thread
->funnel_lock
;
103 thread_funnel_set( curflock
, FALSE
);
113 * Find the backing store object and offset into it
116 vm_map_lock_read(map
);
117 result
= vm_map_lookup_locked(&map
, vaddr
, VM_PROT_READ
, &version
,
118 &object
, &offset
, &prot
, &wired
,
119 &behavior
, &lo_offset
,
120 &hi_offset
, &map_pmap
);
122 vm_map_unlock_read(map
);
124 if (result
!= KERN_SUCCESS
) {
126 thread_funnel_set( curflock
, TRUE
);
130 if(map_pmap
!= map
) {
131 vm_map_reference(map_pmap
);
132 vm_map_unlock_read(map_pmap
);
136 * Make a reference to this object to prevent its
137 * disposal while we are playing with it.
139 assert(object
->ref_count
> 0);
141 vm_object_res_reference(object
);
142 vm_object_paging_begin(object
);
144 result
= vm_fault_page(object
, offset
, VM_PROT_READ
, FALSE
,
146 lo_offset
, hi_offset
, behavior
,
147 &prot
, &result_page
, &top_page
, (int *)0,
148 0, map
->no_zero_fill
, FALSE
, map
, vaddr
);
150 if (result
!= VM_FAULT_SUCCESS
) {
151 vm_object_deallocate(object
);
152 if(map_pmap
!= map
) {
153 vm_map_deallocate(map_pmap
);
159 case VM_FAULT_INTERRUPTED
:
161 thread_funnel_set( curflock
, TRUE
);
162 return (KERN_SUCCESS
);
163 case VM_FAULT_MEMORY_SHORTAGE
:
166 case VM_FAULT_FICTITIOUS_SHORTAGE
:
167 vm_page_more_fictitious();
169 case VM_FAULT_MEMORY_ERROR
:
170 return (KERN_MEMORY_ERROR
);
177 * How to clean up the result of vm_fault_page. This
178 * happens whether the mapping is entered or not.
181 #define UNLOCK_AND_DEALLOCATE \
183 vm_fault_cleanup(m->object, top_page); \
184 vm_object_deallocate(object); \
188 * What to do with the resulting page from vm_fault_page
189 * if it doesn't get entered into the physical map:
192 #define RELEASE_PAGE(m) \
194 PAGE_WAKEUP_DONE(m); \
195 vm_page_lock_queues(); \
196 if (!m->active && !m->inactive) \
197 vm_page_activate(m); \
198 vm_page_unlock_queues(); \
202 * We must verify that the maps have not changed.
204 vm_object_unlock(m
->object
);
206 if ((map
!= original_map
) || !vm_map_verify(map
, &version
)) {
207 vm_object_t retry_object
;
208 vm_object_offset_t retry_offset
;
209 vm_prot_t retry_prot
;
211 if (map
!= map_pmap
) {
212 vm_map_deallocate(map_pmap
);
216 vm_map_lock_read(map
);
218 result
= vm_map_lookup_locked(&map
, vaddr
, VM_PROT_READ
, &version
,
219 &retry_object
, &retry_offset
, &retry_prot
,
220 &wired
, &behavior
, &lo_offset
,
221 &hi_offset
, &map_pmap
);
223 if (result
!= KERN_SUCCESS
) {
224 vm_map_unlock_read(map
);
225 vm_object_lock(m
->object
);
227 UNLOCK_AND_DEALLOCATE
;
229 thread_funnel_set( curflock
, TRUE
);
233 if (map
!= map_pmap
) {
234 vm_map_reference(map_pmap
);
237 vm_object_unlock(retry_object
);
239 if (retry_object
!= object
|| retry_offset
!= offset
) {
240 vm_object_lock(m
->object
);
242 vm_map_unlock_read(map
);
243 if(map_pmap
!= map
) {
244 vm_map_unlock_read(map_pmap
);
245 vm_map_deallocate(map_pmap
);
247 UNLOCK_AND_DEALLOCATE
;
253 * Put the page in the physical map.
256 PMAP_ENTER(map_pmap
->pmap
, vaddr
, m
, VM_PROT_READ
, PMAP_DEFAULT_CACHE
, wired
);
258 if(map_pmap
!= map
) {
259 vm_map_unlock_read(map_pmap
);
260 vm_map_deallocate(map_pmap
);
263 vm_object_lock(m
->object
);
264 vm_page_lock_queues();
265 if (!m
->active
&& !m
->inactive
)
268 vm_page_unlock_queues();
270 vm_map_verify_done(map
, &version
);
273 UNLOCK_AND_DEALLOCATE
;
275 #undef UNLOCK_AND_DEALLOCATE
278 thread_funnel_set( curflock
, TRUE
);
279 return (KERN_SUCCESS
);