]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/read_fault.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / i386 / read_fault.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60
61 #include <kern/thread.h>
62 #include <vm/vm_fault.h>
63 #include <mach/kern_return.h>
64 #include <mach/vm_behavior.h>
65 #include <vm/vm_map.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/pmap.h>
69
70 #include <i386/intel_read_fault.h>
71
72 #include <kern/macro_help.h>
73
74 /*
75 * Expansion of vm_fault for read fault in kernel mode.
76 * Must enter the mapping as writable, since the i386
77 * (and i860 in i386 compatability mode) ignores write
78 * protection in kernel mode.
79 *
80 * Note that this routine can be called for pmap's other
81 * than the kernel_pmap, in which case it just enters
82 * a read-only mapping. (See e.g. kernel_trap().)
83 */
84 kern_return_t
85 intel_read_fault(
86 vm_map_t map,
87 vm_offset_t vaddr)
88 {
89 vm_map_version_t version; /* Map version for
90 verification */
91 vm_object_t object; /* Top-level object */
92 vm_object_offset_t offset; /* Top-level offset */
93 vm_prot_t prot; /* Protection for mapping */
94 vm_behavior_t behavior; /* Expected paging behavior */
95 vm_map_offset_t lo_offset, hi_offset;
96 vm_page_t result_page; /* Result of vm_fault_page */
97 vm_page_t top_page; /* Placeholder page */
98 boolean_t wired; /* Is map region wired? */
99 kern_return_t result;
100 register vm_page_t m;
101 vm_map_t map_pmap;
102 vm_map_t original_map = map;
103 thread_t cur_thread;
104 boolean_t funnel_set;
105 funnel_t *curflock = NULL;
106
107 cur_thread = current_thread();
108 if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) {
109 funnel_set = TRUE;
110 curflock = cur_thread->funnel_lock;
111 thread_funnel_set( curflock , FALSE);
112 } else {
113 funnel_set = FALSE;
114 }
115
116 RetryFault:
117
118 map = original_map;
119
120 /*
121 * Find the backing store object and offset into it
122 * to begin search.
123 */
124 vm_map_lock_read(map);
125 result = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ, &version,
126 &object, &offset, &prot, &wired,
127 &behavior, &lo_offset,
128 &hi_offset, &map_pmap);
129
130 vm_map_unlock_read(map);
131
132 if (result != KERN_SUCCESS) {
133 if (funnel_set)
134 thread_funnel_set( curflock, TRUE);
135 return (result);
136 }
137
138 if(map_pmap != map) {
139 vm_map_reference(map_pmap);
140 vm_map_unlock_read(map_pmap);
141 }
142
143 /*
144 * Make a reference to this object to prevent its
145 * disposal while we are playing with it.
146 */
147 assert(object->ref_count > 0);
148 object->ref_count++;
149 vm_object_res_reference(object);
150 vm_object_paging_begin(object);
151
152 result = vm_fault_page(object, offset, VM_PROT_READ, FALSE,
153 THREAD_ABORTSAFE,
154 lo_offset, hi_offset, behavior,
155 &prot, &result_page, &top_page, (int *)0,
156 0, map->no_zero_fill, FALSE, map, vaddr);
157
158 if (result != VM_FAULT_SUCCESS) {
159 vm_object_deallocate(object);
160 if(map_pmap != map) {
161 vm_map_deallocate(map_pmap);
162 }
163
164 switch (result) {
165 case VM_FAULT_RETRY:
166 goto RetryFault;
167 case VM_FAULT_INTERRUPTED:
168 if (funnel_set)
169 thread_funnel_set( curflock, TRUE);
170 return (KERN_SUCCESS);
171 case VM_FAULT_MEMORY_SHORTAGE:
172 VM_PAGE_WAIT();
173 goto RetryFault;
174 case VM_FAULT_FICTITIOUS_SHORTAGE:
175 vm_page_more_fictitious();
176 goto RetryFault;
177 case VM_FAULT_MEMORY_ERROR:
178 return (KERN_MEMORY_ERROR);
179 }
180 }
181
182 m = result_page;
183
184 /*
185 * How to clean up the result of vm_fault_page. This
186 * happens whether the mapping is entered or not.
187 */
188
189 #define UNLOCK_AND_DEALLOCATE \
190 MACRO_BEGIN \
191 vm_fault_cleanup(m->object, top_page); \
192 vm_object_deallocate(object); \
193 MACRO_END
194
195 /*
196 * What to do with the resulting page from vm_fault_page
197 * if it doesn't get entered into the physical map:
198 */
199
200 #define RELEASE_PAGE(m) \
201 MACRO_BEGIN \
202 PAGE_WAKEUP_DONE(m); \
203 vm_page_lock_queues(); \
204 if (!m->active && !m->inactive) \
205 vm_page_activate(m); \
206 vm_page_unlock_queues(); \
207 MACRO_END
208
209 /*
210 * We must verify that the maps have not changed.
211 */
212 vm_object_unlock(m->object);
213
214 if ((map != original_map) || !vm_map_verify(map, &version)) {
215 vm_object_t retry_object;
216 vm_object_offset_t retry_offset;
217 vm_prot_t retry_prot;
218
219 if (map != map_pmap) {
220 vm_map_deallocate(map_pmap);
221 }
222
223 map = original_map;
224 vm_map_lock_read(map);
225
226 result = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ, &version,
227 &retry_object, &retry_offset, &retry_prot,
228 &wired, &behavior, &lo_offset,
229 &hi_offset, &map_pmap);
230
231 if (result != KERN_SUCCESS) {
232 vm_map_unlock_read(map);
233 vm_object_lock(m->object);
234 RELEASE_PAGE(m);
235 UNLOCK_AND_DEALLOCATE;
236 if (funnel_set)
237 thread_funnel_set( curflock, TRUE);
238 return (result);
239 }
240
241 if (map != map_pmap) {
242 vm_map_reference(map_pmap);
243 }
244
245 vm_object_unlock(retry_object);
246
247 if (retry_object != object || retry_offset != offset) {
248 vm_object_lock(m->object);
249 RELEASE_PAGE(m);
250 vm_map_unlock_read(map);
251 if(map_pmap != map) {
252 vm_map_unlock_read(map_pmap);
253 vm_map_deallocate(map_pmap);
254 }
255 UNLOCK_AND_DEALLOCATE;
256 goto RetryFault;
257 }
258 }
259
260 /*
261 * Put the page in the physical map.
262 */
263
264 PMAP_ENTER(map_pmap->pmap, vaddr, m, VM_PROT_READ, PMAP_DEFAULT_CACHE, wired);
265
266 if(map_pmap != map) {
267 vm_map_unlock_read(map_pmap);
268 vm_map_deallocate(map_pmap);
269 }
270
271 vm_object_lock(m->object);
272 vm_page_lock_queues();
273 if (!m->active && !m->inactive)
274 vm_page_activate(m);
275 m->reference = TRUE;
276 vm_page_unlock_queues();
277
278 vm_map_verify_done(map, &version);
279 PAGE_WAKEUP_DONE(m);
280
281 UNLOCK_AND_DEALLOCATE;
282
283 #undef UNLOCK_AND_DEALLOCATE
284 #undef RELEASE_PAGE
285 if (funnel_set)
286 thread_funnel_set( curflock, TRUE);
287 return (KERN_SUCCESS);
288 }
289