]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/read_fault.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / i386 / read_fault.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #include <kern/thread.h>
60 #include <vm/vm_fault.h>
61 #include <mach/kern_return.h>
62 #include <mach/vm_behavior.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_page.h>
66 #include <vm/pmap.h>
67
68 #include <i386/intel_read_fault.h>
69
70 #include <kern/macro_help.h>
71
72 /*
73 * Expansion of vm_fault for read fault in kernel mode.
74 * Must enter the mapping as writable, since the i386
75 * (and i860 in i386 compatability mode) ignores write
76 * protection in kernel mode.
77 *
78 * Note that this routine can be called for pmap's other
79 * than the kernel_pmap, in which case it just enters
80 * a read-only mapping. (See e.g. kernel_trap().)
81 */
82 kern_return_t
83 intel_read_fault(
84 vm_map_t map,
85 vm_offset_t vaddr)
86 {
87 vm_map_version_t version; /* Map version for
88 verification */
89 vm_object_t object; /* Top-level object */
90 vm_object_offset_t offset; /* Top-level offset */
91 vm_prot_t prot; /* Protection for mapping */
92 vm_behavior_t behavior; /* Expected paging behavior */
93 vm_map_offset_t lo_offset, hi_offset;
94 vm_page_t result_page; /* Result of vm_fault_page */
95 vm_page_t top_page; /* Placeholder page */
96 boolean_t wired; /* Is map region wired? */
97 kern_return_t result;
98 register vm_page_t m;
99 vm_map_t map_pmap;
100 vm_map_t original_map = map;
101 thread_t cur_thread;
102 boolean_t funnel_set;
103 funnel_t *curflock = NULL;
104
105 cur_thread = current_thread();
106 if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) {
107 funnel_set = TRUE;
108 curflock = cur_thread->funnel_lock;
109 thread_funnel_set( curflock , FALSE);
110 } else {
111 funnel_set = FALSE;
112 }
113
114 RetryFault:
115
116 map = original_map;
117
118 /*
119 * Find the backing store object and offset into it
120 * to begin search.
121 */
122 vm_map_lock_read(map);
123 result = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ, &version,
124 &object, &offset, &prot, &wired,
125 &behavior, &lo_offset,
126 &hi_offset, &map_pmap);
127
128 vm_map_unlock_read(map);
129
130 if (result != KERN_SUCCESS) {
131 if (funnel_set)
132 thread_funnel_set( curflock, TRUE);
133 return (result);
134 }
135
136 if(map_pmap != map) {
137 vm_map_reference(map_pmap);
138 vm_map_unlock_read(map_pmap);
139 }
140
141 /*
142 * Make a reference to this object to prevent its
143 * disposal while we are playing with it.
144 */
145 assert(object->ref_count > 0);
146 object->ref_count++;
147 vm_object_res_reference(object);
148 vm_object_paging_begin(object);
149
150 result = vm_fault_page(object, offset, VM_PROT_READ, FALSE,
151 THREAD_ABORTSAFE,
152 lo_offset, hi_offset, behavior,
153 &prot, &result_page, &top_page, (int *)0,
154 0, map->no_zero_fill, FALSE, map, vaddr);
155
156 if (result != VM_FAULT_SUCCESS) {
157 vm_object_deallocate(object);
158 if(map_pmap != map) {
159 vm_map_deallocate(map_pmap);
160 }
161
162 switch (result) {
163 case VM_FAULT_RETRY:
164 goto RetryFault;
165 case VM_FAULT_INTERRUPTED:
166 if (funnel_set)
167 thread_funnel_set( curflock, TRUE);
168 return (KERN_SUCCESS);
169 case VM_FAULT_MEMORY_SHORTAGE:
170 VM_PAGE_WAIT();
171 goto RetryFault;
172 case VM_FAULT_FICTITIOUS_SHORTAGE:
173 vm_page_more_fictitious();
174 goto RetryFault;
175 case VM_FAULT_MEMORY_ERROR:
176 return (KERN_MEMORY_ERROR);
177 }
178 }
179
180 m = result_page;
181
182 /*
183 * How to clean up the result of vm_fault_page. This
184 * happens whether the mapping is entered or not.
185 */
186
187 #define UNLOCK_AND_DEALLOCATE \
188 MACRO_BEGIN \
189 vm_fault_cleanup(m->object, top_page); \
190 vm_object_deallocate(object); \
191 MACRO_END
192
193 /*
194 * What to do with the resulting page from vm_fault_page
195 * if it doesn't get entered into the physical map:
196 */
197
198 #define RELEASE_PAGE(m) \
199 MACRO_BEGIN \
200 PAGE_WAKEUP_DONE(m); \
201 vm_page_lock_queues(); \
202 if (!m->active && !m->inactive) \
203 vm_page_activate(m); \
204 vm_page_unlock_queues(); \
205 MACRO_END
206
207 /*
208 * We must verify that the maps have not changed.
209 */
210 vm_object_unlock(m->object);
211
212 if ((map != original_map) || !vm_map_verify(map, &version)) {
213 vm_object_t retry_object;
214 vm_object_offset_t retry_offset;
215 vm_prot_t retry_prot;
216
217 if (map != map_pmap) {
218 vm_map_deallocate(map_pmap);
219 }
220
221 map = original_map;
222 vm_map_lock_read(map);
223
224 result = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ, &version,
225 &retry_object, &retry_offset, &retry_prot,
226 &wired, &behavior, &lo_offset,
227 &hi_offset, &map_pmap);
228
229 if (result != KERN_SUCCESS) {
230 vm_map_unlock_read(map);
231 vm_object_lock(m->object);
232 RELEASE_PAGE(m);
233 UNLOCK_AND_DEALLOCATE;
234 if (funnel_set)
235 thread_funnel_set( curflock, TRUE);
236 return (result);
237 }
238
239 if (map != map_pmap) {
240 vm_map_reference(map_pmap);
241 }
242
243 vm_object_unlock(retry_object);
244
245 if (retry_object != object || retry_offset != offset) {
246 vm_object_lock(m->object);
247 RELEASE_PAGE(m);
248 vm_map_unlock_read(map);
249 if(map_pmap != map) {
250 vm_map_unlock_read(map_pmap);
251 vm_map_deallocate(map_pmap);
252 }
253 UNLOCK_AND_DEALLOCATE;
254 goto RetryFault;
255 }
256 }
257
258 /*
259 * Put the page in the physical map.
260 */
261
262 PMAP_ENTER(map_pmap->pmap, vaddr, m, VM_PROT_READ, PMAP_DEFAULT_CACHE, wired);
263
264 if(map_pmap != map) {
265 vm_map_unlock_read(map_pmap);
266 vm_map_deallocate(map_pmap);
267 }
268
269 vm_object_lock(m->object);
270 vm_page_lock_queues();
271 if (!m->active && !m->inactive)
272 vm_page_activate(m);
273 m->reference = TRUE;
274 vm_page_unlock_queues();
275
276 vm_map_verify_done(map, &version);
277 PAGE_WAKEUP_DONE(m);
278
279 UNLOCK_AND_DEALLOCATE;
280
281 #undef UNLOCK_AND_DEALLOCATE
282 #undef RELEASE_PAGE
283 if (funnel_set)
284 thread_funnel_set( curflock, TRUE);
285 return (KERN_SUCCESS);
286 }
287