]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm_fault.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * | |
62 | * Page fault handling module. | |
63 | */ | |
64 | ||
65 | #include <mach_cluster_stats.h> | |
66 | #include <mach_pagemap.h> | |
67 | #include <libkern/OSAtomic.h> | |
68 | ||
69 | #include <mach/mach_types.h> | |
70 | #include <mach/kern_return.h> | |
71 | #include <mach/message.h> /* for error codes */ | |
72 | #include <mach/vm_param.h> | |
73 | #include <mach/vm_behavior.h> | |
74 | #include <mach/memory_object.h> | |
75 | /* For memory_object_data_{request,unlock} */ | |
76 | #include <mach/sdt.h> | |
77 | ||
78 | #include <kern/kern_types.h> | |
79 | #include <kern/host_statistics.h> | |
80 | #include <kern/counters.h> | |
81 | #include <kern/task.h> | |
82 | #include <kern/thread.h> | |
83 | #include <kern/sched_prim.h> | |
84 | #include <kern/host.h> | |
85 | #include <kern/xpr.h> | |
86 | #include <kern/mach_param.h> | |
87 | #include <kern/macro_help.h> | |
88 | #include <kern/zalloc.h> | |
89 | #include <kern/misc_protos.h> | |
90 | ||
91 | #include <vm/vm_compressor.h> | |
92 | #include <vm/vm_compressor_pager.h> | |
93 | #include <vm/vm_fault.h> | |
94 | #include <vm/vm_map.h> | |
95 | #include <vm/vm_object.h> | |
96 | #include <vm/vm_page.h> | |
97 | #include <vm/vm_kern.h> | |
98 | #include <vm/pmap.h> | |
99 | #include <vm/vm_pageout.h> | |
100 | #include <vm/vm_protos.h> | |
101 | #include <vm/vm_external.h> | |
102 | #include <vm/memory_object.h> | |
103 | #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */ | |
104 | #include <vm/vm_shared_region.h> | |
105 | ||
106 | #include <sys/codesign.h> | |
107 | ||
108 | #include <libsa/sys/timers.h> /* for struct timespec */ | |
109 | ||
110 | #define VM_FAULT_CLASSIFY 0 | |
111 | ||
112 | #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */ | |
113 | ||
114 | unsigned int vm_object_pagein_throttle = 16; | |
115 | ||
116 | /* | |
117 | * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which | |
118 | * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts | |
119 | * of memory if they're buggy and can run the system completely out of swap space. If this happens, we | |
120 | * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps | |
121 | * keep the UI active so that the user has a chance to kill the offending task before the system | |
122 | * completely hangs. | |
123 | * | |
124 | * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied | |
125 | * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold | |
126 | * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a | |
127 | * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again. | |
128 | */ | |
129 | ||
130 | extern void throttle_lowpri_io(int); | |
131 | ||
132 | uint64_t vm_hard_throttle_threshold; | |
133 | ||
134 | ||
135 | ||
136 | #define NEED_TO_HARD_THROTTLE_THIS_TASK() (vm_wants_task_throttled(current_task()) || \ | |
137 | (vm_page_free_count < vm_page_throttle_limit && \ | |
138 | proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) > THROTTLE_LEVEL_THROTTLED)) | |
139 | ||
140 | ||
141 | #define HARD_THROTTLE_DELAY 5000 /* 5000 us == 5 ms */ | |
142 | #define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */ | |
143 | ||
144 | #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6 | |
145 | #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000 | |
146 | ||
147 | ||
148 | boolean_t current_thread_aborted(void); | |
149 | ||
150 | /* Forward declarations of internal routines. */ | |
151 | static kern_return_t vm_fault_wire_fast( | |
152 | vm_map_t map, | |
153 | vm_map_offset_t va, | |
154 | vm_prot_t prot, | |
155 | vm_map_entry_t entry, | |
156 | pmap_t pmap, | |
157 | vm_map_offset_t pmap_addr, | |
158 | ppnum_t *physpage_p); | |
159 | ||
160 | static kern_return_t vm_fault_internal( | |
161 | vm_map_t map, | |
162 | vm_map_offset_t vaddr, | |
163 | vm_prot_t caller_prot, | |
164 | boolean_t change_wiring, | |
165 | int interruptible, | |
166 | pmap_t pmap, | |
167 | vm_map_offset_t pmap_addr, | |
168 | ppnum_t *physpage_p); | |
169 | ||
170 | static void vm_fault_copy_cleanup( | |
171 | vm_page_t page, | |
172 | vm_page_t top_page); | |
173 | ||
174 | static void vm_fault_copy_dst_cleanup( | |
175 | vm_page_t page); | |
176 | ||
177 | #if VM_FAULT_CLASSIFY | |
178 | extern void vm_fault_classify(vm_object_t object, | |
179 | vm_object_offset_t offset, | |
180 | vm_prot_t fault_type); | |
181 | ||
182 | extern void vm_fault_classify_init(void); | |
183 | #endif | |
184 | ||
185 | unsigned long vm_pmap_enter_blocked = 0; | |
186 | unsigned long vm_pmap_enter_retried = 0; | |
187 | ||
188 | unsigned long vm_cs_validates = 0; | |
189 | unsigned long vm_cs_revalidates = 0; | |
190 | unsigned long vm_cs_query_modified = 0; | |
191 | unsigned long vm_cs_validated_dirtied = 0; | |
192 | unsigned long vm_cs_bitmap_validated = 0; | |
193 | ||
194 | void vm_pre_fault(vm_map_offset_t); | |
195 | ||
196 | extern int not_in_kdp; | |
197 | extern char *kdp_compressor_decompressed_page; | |
198 | extern addr64_t kdp_compressor_decompressed_page_paddr; | |
199 | extern ppnum_t kdp_compressor_decompressed_page_ppnum; | |
200 | ||
201 | /* | |
202 | * Routine: vm_fault_init | |
203 | * Purpose: | |
204 | * Initialize our private data structures. | |
205 | */ | |
206 | void | |
207 | vm_fault_init(void) | |
208 | { | |
209 | int i, vm_compressor_temp; | |
210 | boolean_t need_default_val = TRUE; | |
211 | /* | |
212 | * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is | |
213 | * computed as a percentage of available memory, and the percentage used is scaled inversely with | |
214 | * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems | |
215 | * and reduce the value down to 10% for very large memory configurations. This helps give us a | |
216 | * definition of a memory hog that makes more sense relative to the amount of ram in the machine. | |
217 | * The formula here simply uses the number of gigabytes of ram to adjust the percentage. | |
218 | */ | |
219 | ||
220 | vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024*1024*1024)), 25)) / 100; | |
221 | ||
222 | /* | |
223 | * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry. | |
224 | */ | |
225 | ||
226 | if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof (vm_compressor_temp))) { | |
227 | for ( i = 0; i < VM_PAGER_MAX_MODES; i++) { | |
228 | if (vm_compressor_temp > 0 && | |
229 | ((vm_compressor_temp & ( 1 << i)) == vm_compressor_temp)) { | |
230 | need_default_val = FALSE; | |
231 | vm_compressor_mode = vm_compressor_temp; | |
232 | break; | |
233 | } | |
234 | } | |
235 | if (need_default_val) | |
236 | printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp); | |
237 | } | |
238 | if (need_default_val) { | |
239 | /* If no boot arg or incorrect boot arg, try device tree. */ | |
240 | PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode)); | |
241 | } | |
242 | PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count, sizeof (vm_compressor_thread_count)); | |
243 | ||
244 | if (PE_parse_boot_argn("vm_compressor_immediate", &vm_compressor_temp, sizeof (vm_compressor_temp))) | |
245 | vm_compressor_immediate_preferred_override = TRUE; | |
246 | else { | |
247 | if (PE_get_default("kern.vm_compressor_immediate", &vm_compressor_temp, sizeof(vm_compressor_temp))) | |
248 | vm_compressor_immediate_preferred_override = TRUE; | |
249 | } | |
250 | if (vm_compressor_immediate_preferred_override == TRUE) { | |
251 | if (vm_compressor_temp) | |
252 | vm_compressor_immediate_preferred = TRUE; | |
253 | else | |
254 | vm_compressor_immediate_preferred = FALSE; | |
255 | } | |
256 | printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode); | |
257 | } | |
258 | ||
259 | /* | |
260 | * Routine: vm_fault_cleanup | |
261 | * Purpose: | |
262 | * Clean up the result of vm_fault_page. | |
263 | * Results: | |
264 | * The paging reference for "object" is released. | |
265 | * "object" is unlocked. | |
266 | * If "top_page" is not null, "top_page" is | |
267 | * freed and the paging reference for the object | |
268 | * containing it is released. | |
269 | * | |
270 | * In/out conditions: | |
271 | * "object" must be locked. | |
272 | */ | |
273 | void | |
274 | vm_fault_cleanup( | |
275 | register vm_object_t object, | |
276 | register vm_page_t top_page) | |
277 | { | |
278 | vm_object_paging_end(object); | |
279 | vm_object_unlock(object); | |
280 | ||
281 | if (top_page != VM_PAGE_NULL) { | |
282 | object = top_page->object; | |
283 | ||
284 | vm_object_lock(object); | |
285 | VM_PAGE_FREE(top_page); | |
286 | vm_object_paging_end(object); | |
287 | vm_object_unlock(object); | |
288 | } | |
289 | } | |
290 | ||
291 | #if MACH_CLUSTER_STATS | |
292 | #define MAXCLUSTERPAGES 16 | |
293 | struct { | |
294 | unsigned long pages_in_cluster; | |
295 | unsigned long pages_at_higher_offsets; | |
296 | unsigned long pages_at_lower_offsets; | |
297 | } cluster_stats_in[MAXCLUSTERPAGES]; | |
298 | #define CLUSTER_STAT(clause) clause | |
299 | #define CLUSTER_STAT_HIGHER(x) \ | |
300 | ((cluster_stats_in[(x)].pages_at_higher_offsets)++) | |
301 | #define CLUSTER_STAT_LOWER(x) \ | |
302 | ((cluster_stats_in[(x)].pages_at_lower_offsets)++) | |
303 | #define CLUSTER_STAT_CLUSTER(x) \ | |
304 | ((cluster_stats_in[(x)].pages_in_cluster)++) | |
305 | #else /* MACH_CLUSTER_STATS */ | |
306 | #define CLUSTER_STAT(clause) | |
307 | #endif /* MACH_CLUSTER_STATS */ | |
308 | ||
309 | #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0) | |
310 | ||
311 | ||
312 | boolean_t vm_page_deactivate_behind = TRUE; | |
313 | /* | |
314 | * default sizes given VM_BEHAVIOR_DEFAULT reference behavior | |
315 | */ | |
316 | #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128 | |
317 | #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */ | |
318 | /* we use it to size an array on the stack */ | |
319 | ||
320 | int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW; | |
321 | ||
322 | #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024) | |
323 | ||
324 | /* | |
325 | * vm_page_is_sequential | |
326 | * | |
327 | * Determine if sequential access is in progress | |
328 | * in accordance with the behavior specified. | |
329 | * Update state to indicate current access pattern. | |
330 | * | |
331 | * object must have at least the shared lock held | |
332 | */ | |
333 | static | |
334 | void | |
335 | vm_fault_is_sequential( | |
336 | vm_object_t object, | |
337 | vm_object_offset_t offset, | |
338 | vm_behavior_t behavior) | |
339 | { | |
340 | vm_object_offset_t last_alloc; | |
341 | int sequential; | |
342 | int orig_sequential; | |
343 | ||
344 | last_alloc = object->last_alloc; | |
345 | sequential = object->sequential; | |
346 | orig_sequential = sequential; | |
347 | ||
348 | switch (behavior) { | |
349 | case VM_BEHAVIOR_RANDOM: | |
350 | /* | |
351 | * reset indicator of sequential behavior | |
352 | */ | |
353 | sequential = 0; | |
354 | break; | |
355 | ||
356 | case VM_BEHAVIOR_SEQUENTIAL: | |
357 | if (offset && last_alloc == offset - PAGE_SIZE_64) { | |
358 | /* | |
359 | * advance indicator of sequential behavior | |
360 | */ | |
361 | if (sequential < MAX_SEQUENTIAL_RUN) | |
362 | sequential += PAGE_SIZE; | |
363 | } else { | |
364 | /* | |
365 | * reset indicator of sequential behavior | |
366 | */ | |
367 | sequential = 0; | |
368 | } | |
369 | break; | |
370 | ||
371 | case VM_BEHAVIOR_RSEQNTL: | |
372 | if (last_alloc && last_alloc == offset + PAGE_SIZE_64) { | |
373 | /* | |
374 | * advance indicator of sequential behavior | |
375 | */ | |
376 | if (sequential > -MAX_SEQUENTIAL_RUN) | |
377 | sequential -= PAGE_SIZE; | |
378 | } else { | |
379 | /* | |
380 | * reset indicator of sequential behavior | |
381 | */ | |
382 | sequential = 0; | |
383 | } | |
384 | break; | |
385 | ||
386 | case VM_BEHAVIOR_DEFAULT: | |
387 | default: | |
388 | if (offset && last_alloc == (offset - PAGE_SIZE_64)) { | |
389 | /* | |
390 | * advance indicator of sequential behavior | |
391 | */ | |
392 | if (sequential < 0) | |
393 | sequential = 0; | |
394 | if (sequential < MAX_SEQUENTIAL_RUN) | |
395 | sequential += PAGE_SIZE; | |
396 | ||
397 | } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) { | |
398 | /* | |
399 | * advance indicator of sequential behavior | |
400 | */ | |
401 | if (sequential > 0) | |
402 | sequential = 0; | |
403 | if (sequential > -MAX_SEQUENTIAL_RUN) | |
404 | sequential -= PAGE_SIZE; | |
405 | } else { | |
406 | /* | |
407 | * reset indicator of sequential behavior | |
408 | */ | |
409 | sequential = 0; | |
410 | } | |
411 | break; | |
412 | } | |
413 | if (sequential != orig_sequential) { | |
414 | if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) { | |
415 | /* | |
416 | * if someone else has already updated object->sequential | |
417 | * don't bother trying to update it or object->last_alloc | |
418 | */ | |
419 | return; | |
420 | } | |
421 | } | |
422 | /* | |
423 | * I'd like to do this with a OSCompareAndSwap64, but that | |
424 | * doesn't exist for PPC... however, it shouldn't matter | |
425 | * that much... last_alloc is maintained so that we can determine | |
426 | * if a sequential access pattern is taking place... if only | |
427 | * one thread is banging on this object, no problem with the unprotected | |
428 | * update... if 2 or more threads are banging away, we run the risk of | |
429 | * someone seeing a mangled update... however, in the face of multiple | |
430 | * accesses, no sequential access pattern can develop anyway, so we | |
431 | * haven't lost any real info. | |
432 | */ | |
433 | object->last_alloc = offset; | |
434 | } | |
435 | ||
436 | ||
437 | int vm_page_deactivate_behind_count = 0; | |
438 | ||
439 | /* | |
440 | * vm_page_deactivate_behind | |
441 | * | |
442 | * Determine if sequential access is in progress | |
443 | * in accordance with the behavior specified. If | |
444 | * so, compute a potential page to deactivate and | |
445 | * deactivate it. | |
446 | * | |
447 | * object must be locked. | |
448 | * | |
449 | * return TRUE if we actually deactivate a page | |
450 | */ | |
451 | static | |
452 | boolean_t | |
453 | vm_fault_deactivate_behind( | |
454 | vm_object_t object, | |
455 | vm_object_offset_t offset, | |
456 | vm_behavior_t behavior) | |
457 | { | |
458 | int n; | |
459 | int pages_in_run = 0; | |
460 | int max_pages_in_run = 0; | |
461 | int sequential_run; | |
462 | int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; | |
463 | vm_object_offset_t run_offset = 0; | |
464 | vm_object_offset_t pg_offset = 0; | |
465 | vm_page_t m; | |
466 | vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER]; | |
467 | ||
468 | pages_in_run = 0; | |
469 | #if TRACEFAULTPAGE | |
470 | dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */ | |
471 | #endif | |
472 | ||
473 | if (object == kernel_object || vm_page_deactivate_behind == FALSE) { | |
474 | /* | |
475 | * Do not deactivate pages from the kernel object: they | |
476 | * are not intended to become pageable. | |
477 | * or we've disabled the deactivate behind mechanism | |
478 | */ | |
479 | return FALSE; | |
480 | } | |
481 | if ((sequential_run = object->sequential)) { | |
482 | if (sequential_run < 0) { | |
483 | sequential_behavior = VM_BEHAVIOR_RSEQNTL; | |
484 | sequential_run = 0 - sequential_run; | |
485 | } else { | |
486 | sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; | |
487 | } | |
488 | } | |
489 | switch (behavior) { | |
490 | case VM_BEHAVIOR_RANDOM: | |
491 | break; | |
492 | case VM_BEHAVIOR_SEQUENTIAL: | |
493 | if (sequential_run >= (int)PAGE_SIZE) { | |
494 | run_offset = 0 - PAGE_SIZE_64; | |
495 | max_pages_in_run = 1; | |
496 | } | |
497 | break; | |
498 | case VM_BEHAVIOR_RSEQNTL: | |
499 | if (sequential_run >= (int)PAGE_SIZE) { | |
500 | run_offset = PAGE_SIZE_64; | |
501 | max_pages_in_run = 1; | |
502 | } | |
503 | break; | |
504 | case VM_BEHAVIOR_DEFAULT: | |
505 | default: | |
506 | { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; | |
507 | ||
508 | /* | |
509 | * determine if the run of sequential accesss has been | |
510 | * long enough on an object with default access behavior | |
511 | * to consider it for deactivation | |
512 | */ | |
513 | if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) { | |
514 | /* | |
515 | * the comparisons between offset and behind are done | |
516 | * in this kind of odd fashion in order to prevent wrap around | |
517 | * at the end points | |
518 | */ | |
519 | if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) { | |
520 | if (offset >= behind) { | |
521 | run_offset = 0 - behind; | |
522 | pg_offset = PAGE_SIZE_64; | |
523 | max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER; | |
524 | } | |
525 | } else { | |
526 | if (offset < -behind) { | |
527 | run_offset = behind; | |
528 | pg_offset = 0 - PAGE_SIZE_64; | |
529 | max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER; | |
530 | } | |
531 | } | |
532 | } | |
533 | break; | |
534 | } | |
535 | } | |
536 | for (n = 0; n < max_pages_in_run; n++) { | |
537 | m = vm_page_lookup(object, offset + run_offset + (n * pg_offset)); | |
538 | ||
539 | if (m && !m->laundry && !m->busy && !m->no_cache && !m->throttled && !m->fictitious && !m->absent) { | |
540 | page_run[pages_in_run++] = m; | |
541 | ||
542 | /* | |
543 | * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise... | |
544 | * | |
545 | * a TLB flush isn't really needed here since at worst we'll miss the reference bit being | |
546 | * updated in the PTE if a remote processor still has this mapping cached in its TLB when the | |
547 | * new reference happens. If no futher references happen on the page after that remote TLB flushes | |
548 | * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue | |
549 | * by pageout_scan, which is just fine since the last reference would have happened quite far | |
550 | * in the past (TLB caches don't hang around for very long), and of course could just as easily | |
551 | * have happened before we did the deactivate_behind. | |
552 | */ | |
553 | pmap_clear_refmod_options(m->phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL); | |
554 | } | |
555 | } | |
556 | if (pages_in_run) { | |
557 | vm_page_lockspin_queues(); | |
558 | ||
559 | for (n = 0; n < pages_in_run; n++) { | |
560 | ||
561 | m = page_run[n]; | |
562 | ||
563 | vm_page_deactivate_internal(m, FALSE); | |
564 | ||
565 | vm_page_deactivate_behind_count++; | |
566 | #if TRACEFAULTPAGE | |
567 | dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ | |
568 | #endif | |
569 | } | |
570 | vm_page_unlock_queues(); | |
571 | ||
572 | return TRUE; | |
573 | } | |
574 | return FALSE; | |
575 | } | |
576 | ||
577 | ||
578 | #if (DEVELOPMENT || DEBUG) | |
579 | uint32_t vm_page_creation_throttled_hard = 0; | |
580 | uint32_t vm_page_creation_throttled_soft = 0; | |
581 | uint64_t vm_page_creation_throttle_avoided = 0; | |
582 | #endif /* DEVELOPMENT || DEBUG */ | |
583 | ||
584 | static int | |
585 | vm_page_throttled(boolean_t page_kept) | |
586 | { | |
587 | clock_sec_t elapsed_sec; | |
588 | clock_sec_t tv_sec; | |
589 | clock_usec_t tv_usec; | |
590 | ||
591 | thread_t thread = current_thread(); | |
592 | ||
593 | if (thread->options & TH_OPT_VMPRIV) | |
594 | return (0); | |
595 | ||
596 | if (thread->t_page_creation_throttled) { | |
597 | thread->t_page_creation_throttled = 0; | |
598 | ||
599 | if (page_kept == FALSE) | |
600 | goto no_throttle; | |
601 | } | |
602 | if (NEED_TO_HARD_THROTTLE_THIS_TASK()) { | |
603 | #if (DEVELOPMENT || DEBUG) | |
604 | thread->t_page_creation_throttled_hard++; | |
605 | OSAddAtomic(1, &vm_page_creation_throttled_hard); | |
606 | #endif /* DEVELOPMENT || DEBUG */ | |
607 | return (HARD_THROTTLE_DELAY); | |
608 | } | |
609 | ||
610 | if ((vm_page_free_count < vm_page_throttle_limit || ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && SWAPPER_NEEDS_TO_UNTHROTTLE())) && | |
611 | thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) { | |
612 | ||
613 | if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) { | |
614 | #if (DEVELOPMENT || DEBUG) | |
615 | OSAddAtomic64(1, &vm_page_creation_throttle_avoided); | |
616 | #endif | |
617 | goto no_throttle; | |
618 | } | |
619 | clock_get_system_microtime(&tv_sec, &tv_usec); | |
620 | ||
621 | elapsed_sec = tv_sec - thread->t_page_creation_time; | |
622 | ||
623 | if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS || | |
624 | (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) { | |
625 | ||
626 | if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) { | |
627 | /* | |
628 | * we'll reset our stats to give a well behaved app | |
629 | * that was unlucky enough to accumulate a bunch of pages | |
630 | * over a long period of time a chance to get out of | |
631 | * the throttled state... we reset the counter and timestamp | |
632 | * so that if it stays under the rate limit for the next second | |
633 | * it will be back in our good graces... if it exceeds it, it | |
634 | * will remain in the throttled state | |
635 | */ | |
636 | thread->t_page_creation_time = tv_sec; | |
637 | thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1); | |
638 | } | |
639 | ++vm_page_throttle_count; | |
640 | ||
641 | thread->t_page_creation_throttled = 1; | |
642 | ||
643 | if ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && HARD_THROTTLE_LIMIT_REACHED()) { | |
644 | #if (DEVELOPMENT || DEBUG) | |
645 | thread->t_page_creation_throttled_hard++; | |
646 | OSAddAtomic(1, &vm_page_creation_throttled_hard); | |
647 | #endif /* DEVELOPMENT || DEBUG */ | |
648 | return (HARD_THROTTLE_DELAY); | |
649 | } else { | |
650 | #if (DEVELOPMENT || DEBUG) | |
651 | thread->t_page_creation_throttled_soft++; | |
652 | OSAddAtomic(1, &vm_page_creation_throttled_soft); | |
653 | #endif /* DEVELOPMENT || DEBUG */ | |
654 | return (SOFT_THROTTLE_DELAY); | |
655 | } | |
656 | } | |
657 | thread->t_page_creation_time = tv_sec; | |
658 | thread->t_page_creation_count = 0; | |
659 | } | |
660 | no_throttle: | |
661 | thread->t_page_creation_count++; | |
662 | ||
663 | return (0); | |
664 | } | |
665 | ||
666 | ||
667 | /* | |
668 | * check for various conditions that would | |
669 | * prevent us from creating a ZF page... | |
670 | * cleanup is based on being called from vm_fault_page | |
671 | * | |
672 | * object must be locked | |
673 | * object == m->object | |
674 | */ | |
675 | static vm_fault_return_t | |
676 | vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state, boolean_t page_throttle) | |
677 | { | |
678 | int throttle_delay; | |
679 | ||
680 | if (object->shadow_severed || | |
681 | VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) { | |
682 | /* | |
683 | * Either: | |
684 | * 1. the shadow chain was severed, | |
685 | * 2. the purgeable object is volatile or empty and is marked | |
686 | * to fault on access while volatile. | |
687 | * Just have to return an error at this point | |
688 | */ | |
689 | if (m != VM_PAGE_NULL) | |
690 | VM_PAGE_FREE(m); | |
691 | vm_fault_cleanup(object, first_m); | |
692 | ||
693 | thread_interrupt_level(interruptible_state); | |
694 | ||
695 | return (VM_FAULT_MEMORY_ERROR); | |
696 | } | |
697 | if (vm_backing_store_low) { | |
698 | /* | |
699 | * are we protecting the system from | |
700 | * backing store exhaustion. If so | |
701 | * sleep unless we are privileged. | |
702 | */ | |
703 | if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) { | |
704 | ||
705 | if (m != VM_PAGE_NULL) | |
706 | VM_PAGE_FREE(m); | |
707 | vm_fault_cleanup(object, first_m); | |
708 | ||
709 | assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT); | |
710 | ||
711 | thread_block(THREAD_CONTINUE_NULL); | |
712 | thread_interrupt_level(interruptible_state); | |
713 | ||
714 | return (VM_FAULT_RETRY); | |
715 | } | |
716 | } | |
717 | if (page_throttle == TRUE) { | |
718 | if ((throttle_delay = vm_page_throttled(FALSE))) { | |
719 | /* | |
720 | * we're throttling zero-fills... | |
721 | * treat this as if we couldn't grab a page | |
722 | */ | |
723 | if (m != VM_PAGE_NULL) | |
724 | VM_PAGE_FREE(m); | |
725 | vm_fault_cleanup(object, first_m); | |
726 | ||
727 | VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); | |
728 | ||
729 | delay(throttle_delay); | |
730 | ||
731 | if (current_thread_aborted()) { | |
732 | thread_interrupt_level(interruptible_state); | |
733 | return VM_FAULT_INTERRUPTED; | |
734 | } | |
735 | thread_interrupt_level(interruptible_state); | |
736 | ||
737 | return (VM_FAULT_MEMORY_SHORTAGE); | |
738 | } | |
739 | } | |
740 | return (VM_FAULT_SUCCESS); | |
741 | } | |
742 | ||
743 | ||
744 | /* | |
745 | * do the work to zero fill a page and | |
746 | * inject it into the correct paging queue | |
747 | * | |
748 | * m->object must be locked | |
749 | * page queue lock must NOT be held | |
750 | */ | |
751 | static int | |
752 | vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) | |
753 | { | |
754 | int my_fault = DBG_ZERO_FILL_FAULT; | |
755 | ||
756 | /* | |
757 | * This is is a zero-fill page fault... | |
758 | * | |
759 | * Checking the page lock is a waste of | |
760 | * time; this page was absent, so | |
761 | * it can't be page locked by a pager. | |
762 | * | |
763 | * we also consider it undefined | |
764 | * with respect to instruction | |
765 | * execution. i.e. it is the responsibility | |
766 | * of higher layers to call for an instruction | |
767 | * sync after changing the contents and before | |
768 | * sending a program into this area. We | |
769 | * choose this approach for performance | |
770 | */ | |
771 | m->pmapped = TRUE; | |
772 | ||
773 | m->cs_validated = FALSE; | |
774 | m->cs_tainted = FALSE; | |
775 | m->cs_nx = FALSE; | |
776 | ||
777 | if (no_zero_fill == TRUE) { | |
778 | my_fault = DBG_NZF_PAGE_FAULT; | |
779 | ||
780 | if (m->absent && m->busy) | |
781 | return (my_fault); | |
782 | } else { | |
783 | vm_page_zero_fill(m); | |
784 | ||
785 | VM_STAT_INCR(zero_fill_count); | |
786 | DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL); | |
787 | } | |
788 | assert(!m->laundry); | |
789 | assert(m->object != kernel_object); | |
790 | //assert(m->pageq.next == NULL && m->pageq.prev == NULL); | |
791 | ||
792 | if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && | |
793 | (m->object->purgable == VM_PURGABLE_DENY || | |
794 | m->object->purgable == VM_PURGABLE_NONVOLATILE || | |
795 | m->object->purgable == VM_PURGABLE_VOLATILE )) { | |
796 | ||
797 | vm_page_lockspin_queues(); | |
798 | ||
799 | if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) { | |
800 | assert(!VM_PAGE_WIRED(m)); | |
801 | ||
802 | /* | |
803 | * can't be on the pageout queue since we don't | |
804 | * have a pager to try and clean to | |
805 | */ | |
806 | assert(!m->pageout_queue); | |
807 | ||
808 | vm_page_queues_remove(m); | |
809 | vm_page_check_pageable_safe(m); | |
810 | queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq); | |
811 | m->throttled = TRUE; | |
812 | vm_page_throttled_count++; | |
813 | } | |
814 | vm_page_unlock_queues(); | |
815 | } | |
816 | return (my_fault); | |
817 | } | |
818 | ||
819 | ||
820 | /* | |
821 | * Routine: vm_fault_page | |
822 | * Purpose: | |
823 | * Find the resident page for the virtual memory | |
824 | * specified by the given virtual memory object | |
825 | * and offset. | |
826 | * Additional arguments: | |
827 | * The required permissions for the page is given | |
828 | * in "fault_type". Desired permissions are included | |
829 | * in "protection". | |
830 | * fault_info is passed along to determine pagein cluster | |
831 | * limits... it contains the expected reference pattern, | |
832 | * cluster size if available, etc... | |
833 | * | |
834 | * If the desired page is known to be resident (for | |
835 | * example, because it was previously wired down), asserting | |
836 | * the "unwiring" parameter will speed the search. | |
837 | * | |
838 | * If the operation can be interrupted (by thread_abort | |
839 | * or thread_terminate), then the "interruptible" | |
840 | * parameter should be asserted. | |
841 | * | |
842 | * Results: | |
843 | * The page containing the proper data is returned | |
844 | * in "result_page". | |
845 | * | |
846 | * In/out conditions: | |
847 | * The source object must be locked and referenced, | |
848 | * and must donate one paging reference. The reference | |
849 | * is not affected. The paging reference and lock are | |
850 | * consumed. | |
851 | * | |
852 | * If the call succeeds, the object in which "result_page" | |
853 | * resides is left locked and holding a paging reference. | |
854 | * If this is not the original object, a busy page in the | |
855 | * original object is returned in "top_page", to prevent other | |
856 | * callers from pursuing this same data, along with a paging | |
857 | * reference for the original object. The "top_page" should | |
858 | * be destroyed when this guarantee is no longer required. | |
859 | * The "result_page" is also left busy. It is not removed | |
860 | * from the pageout queues. | |
861 | * Special Case: | |
862 | * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the | |
863 | * fault succeeded but there's no VM page (i.e. the VM object | |
864 | * does not actually hold VM pages, but device memory or | |
865 | * large pages). The object is still locked and we still hold a | |
866 | * paging_in_progress reference. | |
867 | */ | |
868 | unsigned int vm_fault_page_blocked_access = 0; | |
869 | unsigned int vm_fault_page_forced_retry = 0; | |
870 | ||
871 | vm_fault_return_t | |
872 | vm_fault_page( | |
873 | /* Arguments: */ | |
874 | vm_object_t first_object, /* Object to begin search */ | |
875 | vm_object_offset_t first_offset, /* Offset into object */ | |
876 | vm_prot_t fault_type, /* What access is requested */ | |
877 | boolean_t must_be_resident,/* Must page be resident? */ | |
878 | boolean_t caller_lookup, /* caller looked up page */ | |
879 | /* Modifies in place: */ | |
880 | vm_prot_t *protection, /* Protection for mapping */ | |
881 | vm_page_t *result_page, /* Page found, if successful */ | |
882 | /* Returns: */ | |
883 | vm_page_t *top_page, /* Page in top object, if | |
884 | * not result_page. */ | |
885 | int *type_of_fault, /* if non-null, fill in with type of fault | |
886 | * COW, zero-fill, etc... returned in trace point */ | |
887 | /* More arguments: */ | |
888 | kern_return_t *error_code, /* code if page is in error */ | |
889 | boolean_t no_zero_fill, /* don't zero fill absent pages */ | |
890 | boolean_t data_supply, /* treat as data_supply if | |
891 | * it is a write fault and a full | |
892 | * page is provided */ | |
893 | vm_object_fault_info_t fault_info) | |
894 | { | |
895 | vm_page_t m; | |
896 | vm_object_t object; | |
897 | vm_object_offset_t offset; | |
898 | vm_page_t first_m; | |
899 | vm_object_t next_object; | |
900 | vm_object_t copy_object; | |
901 | boolean_t look_for_page; | |
902 | boolean_t force_fault_retry = FALSE; | |
903 | vm_prot_t access_required = fault_type; | |
904 | vm_prot_t wants_copy_flag; | |
905 | CLUSTER_STAT(int pages_at_higher_offsets;) | |
906 | CLUSTER_STAT(int pages_at_lower_offsets;) | |
907 | kern_return_t wait_result; | |
908 | boolean_t interruptible_state; | |
909 | boolean_t data_already_requested = FALSE; | |
910 | vm_behavior_t orig_behavior; | |
911 | vm_size_t orig_cluster_size; | |
912 | vm_fault_return_t error; | |
913 | int my_fault; | |
914 | uint32_t try_failed_count; | |
915 | int interruptible; /* how may fault be interrupted? */ | |
916 | int external_state = VM_EXTERNAL_STATE_UNKNOWN; | |
917 | memory_object_t pager; | |
918 | vm_fault_return_t retval; | |
919 | ||
920 | /* | |
921 | * MACH page map - an optional optimization where a bit map is maintained | |
922 | * by the VM subsystem for internal objects to indicate which pages of | |
923 | * the object currently reside on backing store. This existence map | |
924 | * duplicates information maintained by the vnode pager. It is | |
925 | * created at the time of the first pageout against the object, i.e. | |
926 | * at the same time pager for the object is created. The optimization | |
927 | * is designed to eliminate pager interaction overhead, if it is | |
928 | * 'known' that the page does not exist on backing store. | |
929 | * | |
930 | * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is | |
931 | * either marked as paged out in the existence map for the object or no | |
932 | * existence map exists for the object. MUST_ASK_PAGER() is one of the | |
933 | * criteria in the decision to invoke the pager. It is also used as one | |
934 | * of the criteria to terminate the scan for adjacent pages in a clustered | |
935 | * pagein operation. Note that MUST_ASK_PAGER() always evaluates to TRUE for | |
936 | * permanent objects. Note also that if the pager for an internal object | |
937 | * has not been created, the pager is not invoked regardless of the value | |
938 | * of MUST_ASK_PAGER() and that clustered pagein scans are only done on an object | |
939 | * for which a pager has been created. | |
940 | * | |
941 | * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset | |
942 | * is marked as paged out in the existence map for the object. PAGED_OUT() | |
943 | * PAGED_OUT() is used to determine if a page has already been pushed | |
944 | * into a copy object in order to avoid a redundant page out operation. | |
945 | */ | |
946 | #if MACH_PAGEMAP | |
947 | #define MUST_ASK_PAGER(o, f, s) \ | |
948 | ((vm_external_state_get((o)->existence_map, (f)) \ | |
949 | != VM_EXTERNAL_STATE_ABSENT) && \ | |
950 | (s = (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)))) \ | |
951 | != VM_EXTERNAL_STATE_ABSENT) | |
952 | #define PAGED_OUT(o, f) \ | |
953 | ((vm_external_state_get((o)->existence_map, (f)) \ | |
954 | == VM_EXTERNAL_STATE_EXISTS) || \ | |
955 | (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) \ | |
956 | == VM_EXTERNAL_STATE_EXISTS)) | |
957 | #else /* MACH_PAGEMAP */ | |
958 | #define MUST_ASK_PAGER(o, f, s) \ | |
959 | ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT) | |
960 | #define PAGED_OUT(o, f) \ | |
961 | (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS) | |
962 | #endif /* MACH_PAGEMAP */ | |
963 | ||
964 | /* | |
965 | * Recovery actions | |
966 | */ | |
967 | #define RELEASE_PAGE(m) \ | |
968 | MACRO_BEGIN \ | |
969 | PAGE_WAKEUP_DONE(m); \ | |
970 | if (!m->active && !m->inactive && !m->throttled) { \ | |
971 | vm_page_lockspin_queues(); \ | |
972 | if (!m->active && !m->inactive && !m->throttled) { \ | |
973 | if (COMPRESSED_PAGER_IS_ACTIVE) \ | |
974 | vm_page_deactivate(m); \ | |
975 | else \ | |
976 | vm_page_activate(m); \ | |
977 | } \ | |
978 | vm_page_unlock_queues(); \ | |
979 | } \ | |
980 | MACRO_END | |
981 | ||
982 | #if TRACEFAULTPAGE | |
983 | dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */ | |
984 | #endif | |
985 | ||
986 | interruptible = fault_info->interruptible; | |
987 | interruptible_state = thread_interrupt_level(interruptible); | |
988 | ||
989 | /* | |
990 | * INVARIANTS (through entire routine): | |
991 | * | |
992 | * 1) At all times, we must either have the object | |
993 | * lock or a busy page in some object to prevent | |
994 | * some other thread from trying to bring in | |
995 | * the same page. | |
996 | * | |
997 | * Note that we cannot hold any locks during the | |
998 | * pager access or when waiting for memory, so | |
999 | * we use a busy page then. | |
1000 | * | |
1001 | * 2) To prevent another thread from racing us down the | |
1002 | * shadow chain and entering a new page in the top | |
1003 | * object before we do, we must keep a busy page in | |
1004 | * the top object while following the shadow chain. | |
1005 | * | |
1006 | * 3) We must increment paging_in_progress on any object | |
1007 | * for which we have a busy page before dropping | |
1008 | * the object lock | |
1009 | * | |
1010 | * 4) We leave busy pages on the pageout queues. | |
1011 | * If the pageout daemon comes across a busy page, | |
1012 | * it will remove the page from the pageout queues. | |
1013 | */ | |
1014 | ||
1015 | object = first_object; | |
1016 | offset = first_offset; | |
1017 | first_m = VM_PAGE_NULL; | |
1018 | access_required = fault_type; | |
1019 | ||
1020 | ||
1021 | XPR(XPR_VM_FAULT, | |
1022 | "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n", | |
1023 | object, offset, fault_type, *protection, 0); | |
1024 | ||
1025 | /* | |
1026 | * default type of fault | |
1027 | */ | |
1028 | my_fault = DBG_CACHE_HIT_FAULT; | |
1029 | ||
1030 | while (TRUE) { | |
1031 | #if TRACEFAULTPAGE | |
1032 | dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ | |
1033 | #endif | |
1034 | if (!object->alive) { | |
1035 | /* | |
1036 | * object is no longer valid | |
1037 | * clean up and return error | |
1038 | */ | |
1039 | vm_fault_cleanup(object, first_m); | |
1040 | thread_interrupt_level(interruptible_state); | |
1041 | ||
1042 | return (VM_FAULT_MEMORY_ERROR); | |
1043 | } | |
1044 | ||
1045 | if (!object->pager_created && object->phys_contiguous) { | |
1046 | /* | |
1047 | * A physically-contiguous object without a pager: | |
1048 | * must be a "large page" object. We do not deal | |
1049 | * with VM pages for this object. | |
1050 | */ | |
1051 | caller_lookup = FALSE; | |
1052 | m = VM_PAGE_NULL; | |
1053 | goto phys_contig_object; | |
1054 | } | |
1055 | ||
1056 | if (object->blocked_access) { | |
1057 | /* | |
1058 | * Access to this VM object has been blocked. | |
1059 | * Replace our "paging_in_progress" reference with | |
1060 | * a "activity_in_progress" reference and wait for | |
1061 | * access to be unblocked. | |
1062 | */ | |
1063 | caller_lookup = FALSE; /* no longer valid after sleep */ | |
1064 | vm_object_activity_begin(object); | |
1065 | vm_object_paging_end(object); | |
1066 | while (object->blocked_access) { | |
1067 | vm_object_sleep(object, | |
1068 | VM_OBJECT_EVENT_UNBLOCKED, | |
1069 | THREAD_UNINT); | |
1070 | } | |
1071 | vm_fault_page_blocked_access++; | |
1072 | vm_object_paging_begin(object); | |
1073 | vm_object_activity_end(object); | |
1074 | } | |
1075 | ||
1076 | /* | |
1077 | * See whether the page at 'offset' is resident | |
1078 | */ | |
1079 | if (caller_lookup == TRUE) { | |
1080 | /* | |
1081 | * The caller has already looked up the page | |
1082 | * and gave us the result in "result_page". | |
1083 | * We can use this for the first lookup but | |
1084 | * it loses its validity as soon as we unlock | |
1085 | * the object. | |
1086 | */ | |
1087 | m = *result_page; | |
1088 | caller_lookup = FALSE; /* no longer valid after that */ | |
1089 | } else { | |
1090 | m = vm_page_lookup(object, offset); | |
1091 | } | |
1092 | #if TRACEFAULTPAGE | |
1093 | dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ | |
1094 | #endif | |
1095 | if (m != VM_PAGE_NULL) { | |
1096 | ||
1097 | if (m->busy) { | |
1098 | /* | |
1099 | * The page is being brought in, | |
1100 | * wait for it and then retry. | |
1101 | */ | |
1102 | #if TRACEFAULTPAGE | |
1103 | dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ | |
1104 | #endif | |
1105 | wait_result = PAGE_SLEEP(object, m, interruptible); | |
1106 | ||
1107 | XPR(XPR_VM_FAULT, | |
1108 | "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n", | |
1109 | object, offset, | |
1110 | m, 0, 0); | |
1111 | counter(c_vm_fault_page_block_busy_kernel++); | |
1112 | ||
1113 | if (wait_result != THREAD_AWAKENED) { | |
1114 | vm_fault_cleanup(object, first_m); | |
1115 | thread_interrupt_level(interruptible_state); | |
1116 | ||
1117 | if (wait_result == THREAD_RESTART) | |
1118 | return (VM_FAULT_RETRY); | |
1119 | else | |
1120 | return (VM_FAULT_INTERRUPTED); | |
1121 | } | |
1122 | continue; | |
1123 | } | |
1124 | if (m->laundry) { | |
1125 | m->pageout = FALSE; | |
1126 | ||
1127 | if (!m->cleaning) | |
1128 | vm_pageout_steal_laundry(m, FALSE); | |
1129 | } | |
1130 | if (m->phys_page == vm_page_guard_addr) { | |
1131 | /* | |
1132 | * Guard page: off limits ! | |
1133 | */ | |
1134 | if (fault_type == VM_PROT_NONE) { | |
1135 | /* | |
1136 | * The fault is not requesting any | |
1137 | * access to the guard page, so it must | |
1138 | * be just to wire or unwire it. | |
1139 | * Let's pretend it succeeded... | |
1140 | */ | |
1141 | m->busy = TRUE; | |
1142 | *result_page = m; | |
1143 | assert(first_m == VM_PAGE_NULL); | |
1144 | *top_page = first_m; | |
1145 | if (type_of_fault) | |
1146 | *type_of_fault = DBG_GUARD_FAULT; | |
1147 | thread_interrupt_level(interruptible_state); | |
1148 | return VM_FAULT_SUCCESS; | |
1149 | } else { | |
1150 | /* | |
1151 | * The fault requests access to the | |
1152 | * guard page: let's deny that ! | |
1153 | */ | |
1154 | vm_fault_cleanup(object, first_m); | |
1155 | thread_interrupt_level(interruptible_state); | |
1156 | return VM_FAULT_MEMORY_ERROR; | |
1157 | } | |
1158 | } | |
1159 | ||
1160 | if (m->error) { | |
1161 | /* | |
1162 | * The page is in error, give up now. | |
1163 | */ | |
1164 | #if TRACEFAULTPAGE | |
1165 | dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */ | |
1166 | #endif | |
1167 | if (error_code) | |
1168 | *error_code = KERN_MEMORY_ERROR; | |
1169 | VM_PAGE_FREE(m); | |
1170 | ||
1171 | vm_fault_cleanup(object, first_m); | |
1172 | thread_interrupt_level(interruptible_state); | |
1173 | ||
1174 | return (VM_FAULT_MEMORY_ERROR); | |
1175 | } | |
1176 | if (m->restart) { | |
1177 | /* | |
1178 | * The pager wants us to restart | |
1179 | * at the top of the chain, | |
1180 | * typically because it has moved the | |
1181 | * page to another pager, then do so. | |
1182 | */ | |
1183 | #if TRACEFAULTPAGE | |
1184 | dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ | |
1185 | #endif | |
1186 | VM_PAGE_FREE(m); | |
1187 | ||
1188 | vm_fault_cleanup(object, first_m); | |
1189 | thread_interrupt_level(interruptible_state); | |
1190 | ||
1191 | return (VM_FAULT_RETRY); | |
1192 | } | |
1193 | if (m->absent) { | |
1194 | /* | |
1195 | * The page isn't busy, but is absent, | |
1196 | * therefore it's deemed "unavailable". | |
1197 | * | |
1198 | * Remove the non-existent page (unless it's | |
1199 | * in the top object) and move on down to the | |
1200 | * next object (if there is one). | |
1201 | */ | |
1202 | #if TRACEFAULTPAGE | |
1203 | dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */ | |
1204 | #endif | |
1205 | next_object = object->shadow; | |
1206 | ||
1207 | if (next_object == VM_OBJECT_NULL) { | |
1208 | /* | |
1209 | * Absent page at bottom of shadow | |
1210 | * chain; zero fill the page we left | |
1211 | * busy in the first object, and free | |
1212 | * the absent page. | |
1213 | */ | |
1214 | assert(!must_be_resident); | |
1215 | ||
1216 | /* | |
1217 | * check for any conditions that prevent | |
1218 | * us from creating a new zero-fill page | |
1219 | * vm_fault_check will do all of the | |
1220 | * fault cleanup in the case of an error condition | |
1221 | * including resetting the thread_interrupt_level | |
1222 | */ | |
1223 | error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE); | |
1224 | ||
1225 | if (error != VM_FAULT_SUCCESS) | |
1226 | return (error); | |
1227 | ||
1228 | XPR(XPR_VM_FAULT, | |
1229 | "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n", | |
1230 | object, offset, | |
1231 | m, | |
1232 | first_object, 0); | |
1233 | ||
1234 | if (object != first_object) { | |
1235 | /* | |
1236 | * free the absent page we just found | |
1237 | */ | |
1238 | VM_PAGE_FREE(m); | |
1239 | ||
1240 | /* | |
1241 | * drop reference and lock on current object | |
1242 | */ | |
1243 | vm_object_paging_end(object); | |
1244 | vm_object_unlock(object); | |
1245 | ||
1246 | /* | |
1247 | * grab the original page we | |
1248 | * 'soldered' in place and | |
1249 | * retake lock on 'first_object' | |
1250 | */ | |
1251 | m = first_m; | |
1252 | first_m = VM_PAGE_NULL; | |
1253 | ||
1254 | object = first_object; | |
1255 | offset = first_offset; | |
1256 | ||
1257 | vm_object_lock(object); | |
1258 | } else { | |
1259 | /* | |
1260 | * we're going to use the absent page we just found | |
1261 | * so convert it to a 'busy' page | |
1262 | */ | |
1263 | m->absent = FALSE; | |
1264 | m->busy = TRUE; | |
1265 | } | |
1266 | if (fault_info->mark_zf_absent && no_zero_fill == TRUE) | |
1267 | m->absent = TRUE; | |
1268 | /* | |
1269 | * zero-fill the page and put it on | |
1270 | * the correct paging queue | |
1271 | */ | |
1272 | my_fault = vm_fault_zero_page(m, no_zero_fill); | |
1273 | ||
1274 | break; | |
1275 | } else { | |
1276 | if (must_be_resident) | |
1277 | vm_object_paging_end(object); | |
1278 | else if (object != first_object) { | |
1279 | vm_object_paging_end(object); | |
1280 | VM_PAGE_FREE(m); | |
1281 | } else { | |
1282 | first_m = m; | |
1283 | m->absent = FALSE; | |
1284 | m->busy = TRUE; | |
1285 | ||
1286 | vm_page_lockspin_queues(); | |
1287 | ||
1288 | assert(!m->pageout_queue); | |
1289 | vm_page_queues_remove(m); | |
1290 | ||
1291 | vm_page_unlock_queues(); | |
1292 | } | |
1293 | XPR(XPR_VM_FAULT, | |
1294 | "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n", | |
1295 | object, offset, | |
1296 | next_object, | |
1297 | offset+object->vo_shadow_offset,0); | |
1298 | ||
1299 | offset += object->vo_shadow_offset; | |
1300 | fault_info->lo_offset += object->vo_shadow_offset; | |
1301 | fault_info->hi_offset += object->vo_shadow_offset; | |
1302 | access_required = VM_PROT_READ; | |
1303 | ||
1304 | vm_object_lock(next_object); | |
1305 | vm_object_unlock(object); | |
1306 | object = next_object; | |
1307 | vm_object_paging_begin(object); | |
1308 | ||
1309 | /* | |
1310 | * reset to default type of fault | |
1311 | */ | |
1312 | my_fault = DBG_CACHE_HIT_FAULT; | |
1313 | ||
1314 | continue; | |
1315 | } | |
1316 | } | |
1317 | if ((m->cleaning) | |
1318 | && ((object != first_object) || (object->copy != VM_OBJECT_NULL)) | |
1319 | && (fault_type & VM_PROT_WRITE)) { | |
1320 | /* | |
1321 | * This is a copy-on-write fault that will | |
1322 | * cause us to revoke access to this page, but | |
1323 | * this page is in the process of being cleaned | |
1324 | * in a clustered pageout. We must wait until | |
1325 | * the cleaning operation completes before | |
1326 | * revoking access to the original page, | |
1327 | * otherwise we might attempt to remove a | |
1328 | * wired mapping. | |
1329 | */ | |
1330 | #if TRACEFAULTPAGE | |
1331 | dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */ | |
1332 | #endif | |
1333 | XPR(XPR_VM_FAULT, | |
1334 | "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n", | |
1335 | object, offset, | |
1336 | m, 0, 0); | |
1337 | /* | |
1338 | * take an extra ref so that object won't die | |
1339 | */ | |
1340 | vm_object_reference_locked(object); | |
1341 | ||
1342 | vm_fault_cleanup(object, first_m); | |
1343 | ||
1344 | counter(c_vm_fault_page_block_backoff_kernel++); | |
1345 | vm_object_lock(object); | |
1346 | assert(object->ref_count > 0); | |
1347 | ||
1348 | m = vm_page_lookup(object, offset); | |
1349 | ||
1350 | if (m != VM_PAGE_NULL && m->cleaning) { | |
1351 | PAGE_ASSERT_WAIT(m, interruptible); | |
1352 | ||
1353 | vm_object_unlock(object); | |
1354 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
1355 | vm_object_deallocate(object); | |
1356 | ||
1357 | goto backoff; | |
1358 | } else { | |
1359 | vm_object_unlock(object); | |
1360 | ||
1361 | vm_object_deallocate(object); | |
1362 | thread_interrupt_level(interruptible_state); | |
1363 | ||
1364 | return (VM_FAULT_RETRY); | |
1365 | } | |
1366 | } | |
1367 | if (type_of_fault == NULL && m->speculative && | |
1368 | !(fault_info != NULL && fault_info->stealth)) { | |
1369 | /* | |
1370 | * If we were passed a non-NULL pointer for | |
1371 | * "type_of_fault", than we came from | |
1372 | * vm_fault... we'll let it deal with | |
1373 | * this condition, since it | |
1374 | * needs to see m->speculative to correctly | |
1375 | * account the pageins, otherwise... | |
1376 | * take it off the speculative queue, we'll | |
1377 | * let the caller of vm_fault_page deal | |
1378 | * with getting it onto the correct queue | |
1379 | * | |
1380 | * If the caller specified in fault_info that | |
1381 | * it wants a "stealth" fault, we also leave | |
1382 | * the page in the speculative queue. | |
1383 | */ | |
1384 | vm_page_lockspin_queues(); | |
1385 | if (m->speculative) | |
1386 | vm_page_queues_remove(m); | |
1387 | vm_page_unlock_queues(); | |
1388 | } | |
1389 | ||
1390 | if (m->encrypted) { | |
1391 | /* | |
1392 | * ENCRYPTED SWAP: | |
1393 | * the user needs access to a page that we | |
1394 | * encrypted before paging it out. | |
1395 | * Decrypt the page now. | |
1396 | * Keep it busy to prevent anyone from | |
1397 | * accessing it during the decryption. | |
1398 | */ | |
1399 | m->busy = TRUE; | |
1400 | vm_page_decrypt(m, 0); | |
1401 | assert(object == m->object); | |
1402 | assert(m->busy); | |
1403 | PAGE_WAKEUP_DONE(m); | |
1404 | ||
1405 | /* | |
1406 | * Retry from the top, in case | |
1407 | * something changed while we were | |
1408 | * decrypting. | |
1409 | */ | |
1410 | continue; | |
1411 | } | |
1412 | ASSERT_PAGE_DECRYPTED(m); | |
1413 | ||
1414 | if (m->object->code_signed) { | |
1415 | /* | |
1416 | * CODE SIGNING: | |
1417 | * We just paged in a page from a signed | |
1418 | * memory object but we don't need to | |
1419 | * validate it now. We'll validate it if | |
1420 | * when it gets mapped into a user address | |
1421 | * space for the first time or when the page | |
1422 | * gets copied to another object as a result | |
1423 | * of a copy-on-write. | |
1424 | */ | |
1425 | } | |
1426 | ||
1427 | /* | |
1428 | * We mark the page busy and leave it on | |
1429 | * the pageout queues. If the pageout | |
1430 | * deamon comes across it, then it will | |
1431 | * remove the page from the queue, but not the object | |
1432 | */ | |
1433 | #if TRACEFAULTPAGE | |
1434 | dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ | |
1435 | #endif | |
1436 | XPR(XPR_VM_FAULT, | |
1437 | "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n", | |
1438 | object, offset, m, 0, 0); | |
1439 | assert(!m->busy); | |
1440 | assert(!m->absent); | |
1441 | ||
1442 | m->busy = TRUE; | |
1443 | break; | |
1444 | } | |
1445 | ||
1446 | ||
1447 | /* | |
1448 | * we get here when there is no page present in the object at | |
1449 | * the offset we're interested in... we'll allocate a page | |
1450 | * at this point if the pager associated with | |
1451 | * this object can provide the data or we're the top object... | |
1452 | * object is locked; m == NULL | |
1453 | */ | |
1454 | if (must_be_resident) { | |
1455 | if (fault_type == VM_PROT_NONE && | |
1456 | object == kernel_object) { | |
1457 | /* | |
1458 | * We've been called from vm_fault_unwire() | |
1459 | * while removing a map entry that was allocated | |
1460 | * with KMA_KOBJECT and KMA_VAONLY. This page | |
1461 | * is not present and there's nothing more to | |
1462 | * do here (nothing to unwire). | |
1463 | */ | |
1464 | vm_fault_cleanup(object, first_m); | |
1465 | thread_interrupt_level(interruptible_state); | |
1466 | ||
1467 | return VM_FAULT_MEMORY_ERROR; | |
1468 | } | |
1469 | ||
1470 | goto dont_look_for_page; | |
1471 | } | |
1472 | ||
1473 | #if !MACH_PAGEMAP | |
1474 | data_supply = FALSE; | |
1475 | #endif /* !MACH_PAGEMAP */ | |
1476 | ||
1477 | look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply); | |
1478 | ||
1479 | #if TRACEFAULTPAGE | |
1480 | dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */ | |
1481 | #endif | |
1482 | if (!look_for_page && object == first_object && !object->phys_contiguous) { | |
1483 | /* | |
1484 | * Allocate a new page for this object/offset pair as a placeholder | |
1485 | */ | |
1486 | m = vm_page_grab(); | |
1487 | #if TRACEFAULTPAGE | |
1488 | dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ | |
1489 | #endif | |
1490 | if (m == VM_PAGE_NULL) { | |
1491 | ||
1492 | vm_fault_cleanup(object, first_m); | |
1493 | thread_interrupt_level(interruptible_state); | |
1494 | ||
1495 | return (VM_FAULT_MEMORY_SHORTAGE); | |
1496 | } | |
1497 | ||
1498 | if (fault_info && fault_info->batch_pmap_op == TRUE) { | |
1499 | vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); | |
1500 | } else { | |
1501 | vm_page_insert(m, object, offset); | |
1502 | } | |
1503 | } | |
1504 | if (look_for_page) { | |
1505 | kern_return_t rc; | |
1506 | int my_fault_type; | |
1507 | ||
1508 | /* | |
1509 | * If the memory manager is not ready, we | |
1510 | * cannot make requests. | |
1511 | */ | |
1512 | if (!object->pager_ready) { | |
1513 | #if TRACEFAULTPAGE | |
1514 | dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ | |
1515 | #endif | |
1516 | if (m != VM_PAGE_NULL) | |
1517 | VM_PAGE_FREE(m); | |
1518 | ||
1519 | XPR(XPR_VM_FAULT, | |
1520 | "vm_f_page: ready wait obj 0x%X, offset 0x%X\n", | |
1521 | object, offset, 0, 0, 0); | |
1522 | ||
1523 | /* | |
1524 | * take an extra ref so object won't die | |
1525 | */ | |
1526 | vm_object_reference_locked(object); | |
1527 | vm_fault_cleanup(object, first_m); | |
1528 | counter(c_vm_fault_page_block_backoff_kernel++); | |
1529 | ||
1530 | vm_object_lock(object); | |
1531 | assert(object->ref_count > 0); | |
1532 | ||
1533 | if (!object->pager_ready) { | |
1534 | wait_result = vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGER_READY, interruptible); | |
1535 | ||
1536 | vm_object_unlock(object); | |
1537 | if (wait_result == THREAD_WAITING) | |
1538 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
1539 | vm_object_deallocate(object); | |
1540 | ||
1541 | goto backoff; | |
1542 | } else { | |
1543 | vm_object_unlock(object); | |
1544 | vm_object_deallocate(object); | |
1545 | thread_interrupt_level(interruptible_state); | |
1546 | ||
1547 | return (VM_FAULT_RETRY); | |
1548 | } | |
1549 | } | |
1550 | if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) { | |
1551 | /* | |
1552 | * If there are too many outstanding page | |
1553 | * requests pending on this external object, we | |
1554 | * wait for them to be resolved now. | |
1555 | */ | |
1556 | #if TRACEFAULTPAGE | |
1557 | dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ | |
1558 | #endif | |
1559 | if (m != VM_PAGE_NULL) | |
1560 | VM_PAGE_FREE(m); | |
1561 | /* | |
1562 | * take an extra ref so object won't die | |
1563 | */ | |
1564 | vm_object_reference_locked(object); | |
1565 | ||
1566 | vm_fault_cleanup(object, first_m); | |
1567 | ||
1568 | counter(c_vm_fault_page_block_backoff_kernel++); | |
1569 | ||
1570 | vm_object_lock(object); | |
1571 | assert(object->ref_count > 0); | |
1572 | ||
1573 | if (object->paging_in_progress >= vm_object_pagein_throttle) { | |
1574 | vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible); | |
1575 | ||
1576 | vm_object_unlock(object); | |
1577 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
1578 | vm_object_deallocate(object); | |
1579 | ||
1580 | goto backoff; | |
1581 | } else { | |
1582 | vm_object_unlock(object); | |
1583 | vm_object_deallocate(object); | |
1584 | thread_interrupt_level(interruptible_state); | |
1585 | ||
1586 | return (VM_FAULT_RETRY); | |
1587 | } | |
1588 | } | |
1589 | if (object->internal && | |
1590 | (COMPRESSED_PAGER_IS_ACTIVE | |
1591 | || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)) { | |
1592 | int compressed_count_delta; | |
1593 | ||
1594 | if (m == VM_PAGE_NULL) { | |
1595 | /* | |
1596 | * Allocate a new page for this object/offset pair as a placeholder | |
1597 | */ | |
1598 | m = vm_page_grab(); | |
1599 | #if TRACEFAULTPAGE | |
1600 | dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ | |
1601 | #endif | |
1602 | if (m == VM_PAGE_NULL) { | |
1603 | ||
1604 | vm_fault_cleanup(object, first_m); | |
1605 | thread_interrupt_level(interruptible_state); | |
1606 | ||
1607 | return (VM_FAULT_MEMORY_SHORTAGE); | |
1608 | } | |
1609 | ||
1610 | m->absent = TRUE; | |
1611 | if (fault_info && fault_info->batch_pmap_op == TRUE) { | |
1612 | vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); | |
1613 | } else { | |
1614 | vm_page_insert(m, object, offset); | |
1615 | } | |
1616 | } | |
1617 | assert(m->busy); | |
1618 | ||
1619 | m->absent = TRUE; | |
1620 | pager = object->pager; | |
1621 | ||
1622 | assert(object->paging_in_progress > 0); | |
1623 | vm_object_unlock(object); | |
1624 | ||
1625 | rc = vm_compressor_pager_get( | |
1626 | pager, | |
1627 | offset + object->paging_offset, | |
1628 | m->phys_page, | |
1629 | &my_fault_type, | |
1630 | 0, | |
1631 | &compressed_count_delta); | |
1632 | ||
1633 | if (type_of_fault == NULL) { | |
1634 | int throttle_delay; | |
1635 | ||
1636 | /* | |
1637 | * we weren't called from vm_fault, so we | |
1638 | * need to apply page creation throttling | |
1639 | * do it before we re-acquire any locks | |
1640 | */ | |
1641 | if (my_fault_type == DBG_COMPRESSOR_FAULT) { | |
1642 | if ((throttle_delay = vm_page_throttled(TRUE))) { | |
1643 | VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0); | |
1644 | delay(throttle_delay); | |
1645 | } | |
1646 | } | |
1647 | } | |
1648 | vm_object_lock(object); | |
1649 | assert(object->paging_in_progress > 0); | |
1650 | ||
1651 | vm_compressor_pager_count( | |
1652 | pager, | |
1653 | compressed_count_delta, | |
1654 | FALSE, /* shared_lock */ | |
1655 | object); | |
1656 | ||
1657 | switch (rc) { | |
1658 | case KERN_SUCCESS: | |
1659 | m->absent = FALSE; | |
1660 | m->dirty = TRUE; | |
1661 | if ((m->object->wimg_bits & | |
1662 | VM_WIMG_MASK) != | |
1663 | VM_WIMG_USE_DEFAULT) { | |
1664 | /* | |
1665 | * If the page is not cacheable, | |
1666 | * we can't let its contents | |
1667 | * linger in the data cache | |
1668 | * after the decompression. | |
1669 | */ | |
1670 | pmap_sync_page_attributes_phys( | |
1671 | m->phys_page); | |
1672 | } else { | |
1673 | m->written_by_kernel = TRUE; | |
1674 | } | |
1675 | ||
1676 | /* | |
1677 | * If the object is purgeable, its | |
1678 | * owner's purgeable ledgers have been | |
1679 | * updated in vm_page_insert() but the | |
1680 | * page was also accounted for in a | |
1681 | * "compressed purgeable" ledger, so | |
1682 | * update that now. | |
1683 | */ | |
1684 | if ((object->purgable != | |
1685 | VM_PURGABLE_DENY) && | |
1686 | (object->vo_purgeable_owner != | |
1687 | NULL)) { | |
1688 | /* | |
1689 | * One less compressed | |
1690 | * purgeable page. | |
1691 | */ | |
1692 | vm_purgeable_compressed_update( | |
1693 | object, | |
1694 | -1); | |
1695 | } | |
1696 | ||
1697 | break; | |
1698 | case KERN_MEMORY_FAILURE: | |
1699 | m->unusual = TRUE; | |
1700 | m->error = TRUE; | |
1701 | m->absent = FALSE; | |
1702 | break; | |
1703 | case KERN_MEMORY_ERROR: | |
1704 | assert(m->absent); | |
1705 | break; | |
1706 | default: | |
1707 | panic("vm_fault_page(): unexpected " | |
1708 | "error %d from " | |
1709 | "vm_compressor_pager_get()\n", | |
1710 | rc); | |
1711 | } | |
1712 | PAGE_WAKEUP_DONE(m); | |
1713 | ||
1714 | rc = KERN_SUCCESS; | |
1715 | goto data_requested; | |
1716 | } | |
1717 | my_fault_type = DBG_PAGEIN_FAULT; | |
1718 | ||
1719 | if (m != VM_PAGE_NULL) { | |
1720 | VM_PAGE_FREE(m); | |
1721 | m = VM_PAGE_NULL; | |
1722 | } | |
1723 | ||
1724 | #if TRACEFAULTPAGE | |
1725 | dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */ | |
1726 | #endif | |
1727 | ||
1728 | /* | |
1729 | * It's possible someone called vm_object_destroy while we weren't | |
1730 | * holding the object lock. If that has happened, then bail out | |
1731 | * here. | |
1732 | */ | |
1733 | ||
1734 | pager = object->pager; | |
1735 | ||
1736 | if (pager == MEMORY_OBJECT_NULL) { | |
1737 | vm_fault_cleanup(object, first_m); | |
1738 | thread_interrupt_level(interruptible_state); | |
1739 | return VM_FAULT_MEMORY_ERROR; | |
1740 | } | |
1741 | ||
1742 | /* | |
1743 | * We have an absent page in place for the faulting offset, | |
1744 | * so we can release the object lock. | |
1745 | */ | |
1746 | ||
1747 | vm_object_unlock(object); | |
1748 | ||
1749 | /* | |
1750 | * If this object uses a copy_call strategy, | |
1751 | * and we are interested in a copy of this object | |
1752 | * (having gotten here only by following a | |
1753 | * shadow chain), then tell the memory manager | |
1754 | * via a flag added to the desired_access | |
1755 | * parameter, so that it can detect a race | |
1756 | * between our walking down the shadow chain | |
1757 | * and its pushing pages up into a copy of | |
1758 | * the object that it manages. | |
1759 | */ | |
1760 | if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) | |
1761 | wants_copy_flag = VM_PROT_WANTS_COPY; | |
1762 | else | |
1763 | wants_copy_flag = VM_PROT_NONE; | |
1764 | ||
1765 | XPR(XPR_VM_FAULT, | |
1766 | "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n", | |
1767 | object, offset, m, | |
1768 | access_required | wants_copy_flag, 0); | |
1769 | ||
1770 | if (object->copy == first_object) { | |
1771 | /* | |
1772 | * if we issue the memory_object_data_request in | |
1773 | * this state, we are subject to a deadlock with | |
1774 | * the underlying filesystem if it is trying to | |
1775 | * shrink the file resulting in a push of pages | |
1776 | * into the copy object... that push will stall | |
1777 | * on the placeholder page, and if the pushing thread | |
1778 | * is holding a lock that is required on the pagein | |
1779 | * path (such as a truncate lock), we'll deadlock... | |
1780 | * to avoid this potential deadlock, we throw away | |
1781 | * our placeholder page before calling memory_object_data_request | |
1782 | * and force this thread to retry the vm_fault_page after | |
1783 | * we have issued the I/O. the second time through this path | |
1784 | * we will find the page already in the cache (presumably still | |
1785 | * busy waiting for the I/O to complete) and then complete | |
1786 | * the fault w/o having to go through memory_object_data_request again | |
1787 | */ | |
1788 | assert(first_m != VM_PAGE_NULL); | |
1789 | assert(first_m->object == first_object); | |
1790 | ||
1791 | vm_object_lock(first_object); | |
1792 | VM_PAGE_FREE(first_m); | |
1793 | vm_object_paging_end(first_object); | |
1794 | vm_object_unlock(first_object); | |
1795 | ||
1796 | first_m = VM_PAGE_NULL; | |
1797 | force_fault_retry = TRUE; | |
1798 | ||
1799 | vm_fault_page_forced_retry++; | |
1800 | } | |
1801 | ||
1802 | if (data_already_requested == TRUE) { | |
1803 | orig_behavior = fault_info->behavior; | |
1804 | orig_cluster_size = fault_info->cluster_size; | |
1805 | ||
1806 | fault_info->behavior = VM_BEHAVIOR_RANDOM; | |
1807 | fault_info->cluster_size = PAGE_SIZE; | |
1808 | } | |
1809 | /* | |
1810 | * Call the memory manager to retrieve the data. | |
1811 | */ | |
1812 | rc = memory_object_data_request( | |
1813 | pager, | |
1814 | offset + object->paging_offset, | |
1815 | PAGE_SIZE, | |
1816 | access_required | wants_copy_flag, | |
1817 | (memory_object_fault_info_t)fault_info); | |
1818 | ||
1819 | if (data_already_requested == TRUE) { | |
1820 | fault_info->behavior = orig_behavior; | |
1821 | fault_info->cluster_size = orig_cluster_size; | |
1822 | } else | |
1823 | data_already_requested = TRUE; | |
1824 | ||
1825 | DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL); | |
1826 | #if TRACEFAULTPAGE | |
1827 | dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */ | |
1828 | #endif | |
1829 | vm_object_lock(object); | |
1830 | ||
1831 | data_requested: | |
1832 | if (rc != KERN_SUCCESS) { | |
1833 | ||
1834 | vm_fault_cleanup(object, first_m); | |
1835 | thread_interrupt_level(interruptible_state); | |
1836 | ||
1837 | return ((rc == MACH_SEND_INTERRUPTED) ? | |
1838 | VM_FAULT_INTERRUPTED : | |
1839 | VM_FAULT_MEMORY_ERROR); | |
1840 | } else { | |
1841 | clock_sec_t tv_sec; | |
1842 | clock_usec_t tv_usec; | |
1843 | ||
1844 | if (my_fault_type == DBG_PAGEIN_FAULT) { | |
1845 | clock_get_system_microtime(&tv_sec, &tv_usec); | |
1846 | current_thread()->t_page_creation_time = tv_sec; | |
1847 | current_thread()->t_page_creation_count = 0; | |
1848 | } | |
1849 | } | |
1850 | if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) { | |
1851 | ||
1852 | vm_fault_cleanup(object, first_m); | |
1853 | thread_interrupt_level(interruptible_state); | |
1854 | ||
1855 | return (VM_FAULT_INTERRUPTED); | |
1856 | } | |
1857 | if (force_fault_retry == TRUE) { | |
1858 | ||
1859 | vm_fault_cleanup(object, first_m); | |
1860 | thread_interrupt_level(interruptible_state); | |
1861 | ||
1862 | return (VM_FAULT_RETRY); | |
1863 | } | |
1864 | if (m == VM_PAGE_NULL && object->phys_contiguous) { | |
1865 | /* | |
1866 | * No page here means that the object we | |
1867 | * initially looked up was "physically | |
1868 | * contiguous" (i.e. device memory). However, | |
1869 | * with Virtual VRAM, the object might not | |
1870 | * be backed by that device memory anymore, | |
1871 | * so we're done here only if the object is | |
1872 | * still "phys_contiguous". | |
1873 | * Otherwise, if the object is no longer | |
1874 | * "phys_contiguous", we need to retry the | |
1875 | * page fault against the object's new backing | |
1876 | * store (different memory object). | |
1877 | */ | |
1878 | phys_contig_object: | |
1879 | goto done; | |
1880 | } | |
1881 | /* | |
1882 | * potentially a pagein fault | |
1883 | * if we make it through the state checks | |
1884 | * above, than we'll count it as such | |
1885 | */ | |
1886 | my_fault = my_fault_type; | |
1887 | ||
1888 | /* | |
1889 | * Retry with same object/offset, since new data may | |
1890 | * be in a different page (i.e., m is meaningless at | |
1891 | * this point). | |
1892 | */ | |
1893 | continue; | |
1894 | } | |
1895 | dont_look_for_page: | |
1896 | /* | |
1897 | * We get here if the object has no pager, or an existence map | |
1898 | * exists and indicates the page isn't present on the pager | |
1899 | * or we're unwiring a page. If a pager exists, but there | |
1900 | * is no existence map, then the m->absent case above handles | |
1901 | * the ZF case when the pager can't provide the page | |
1902 | */ | |
1903 | #if TRACEFAULTPAGE | |
1904 | dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ | |
1905 | #endif | |
1906 | if (object == first_object) | |
1907 | first_m = m; | |
1908 | else | |
1909 | assert(m == VM_PAGE_NULL); | |
1910 | ||
1911 | XPR(XPR_VM_FAULT, | |
1912 | "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n", | |
1913 | object, offset, m, | |
1914 | object->shadow, 0); | |
1915 | ||
1916 | next_object = object->shadow; | |
1917 | ||
1918 | if (next_object == VM_OBJECT_NULL) { | |
1919 | /* | |
1920 | * we've hit the bottom of the shadown chain, | |
1921 | * fill the page in the top object with zeros. | |
1922 | */ | |
1923 | assert(!must_be_resident); | |
1924 | ||
1925 | if (object != first_object) { | |
1926 | vm_object_paging_end(object); | |
1927 | vm_object_unlock(object); | |
1928 | ||
1929 | object = first_object; | |
1930 | offset = first_offset; | |
1931 | vm_object_lock(object); | |
1932 | } | |
1933 | m = first_m; | |
1934 | assert(m->object == object); | |
1935 | first_m = VM_PAGE_NULL; | |
1936 | ||
1937 | /* | |
1938 | * check for any conditions that prevent | |
1939 | * us from creating a new zero-fill page | |
1940 | * vm_fault_check will do all of the | |
1941 | * fault cleanup in the case of an error condition | |
1942 | * including resetting the thread_interrupt_level | |
1943 | */ | |
1944 | error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE); | |
1945 | ||
1946 | if (error != VM_FAULT_SUCCESS) | |
1947 | return (error); | |
1948 | ||
1949 | if (m == VM_PAGE_NULL) { | |
1950 | m = vm_page_grab(); | |
1951 | ||
1952 | if (m == VM_PAGE_NULL) { | |
1953 | vm_fault_cleanup(object, VM_PAGE_NULL); | |
1954 | thread_interrupt_level(interruptible_state); | |
1955 | ||
1956 | return (VM_FAULT_MEMORY_SHORTAGE); | |
1957 | } | |
1958 | vm_page_insert(m, object, offset); | |
1959 | } | |
1960 | if (fault_info->mark_zf_absent && no_zero_fill == TRUE) | |
1961 | m->absent = TRUE; | |
1962 | ||
1963 | my_fault = vm_fault_zero_page(m, no_zero_fill); | |
1964 | ||
1965 | break; | |
1966 | ||
1967 | } else { | |
1968 | /* | |
1969 | * Move on to the next object. Lock the next | |
1970 | * object before unlocking the current one. | |
1971 | */ | |
1972 | if ((object != first_object) || must_be_resident) | |
1973 | vm_object_paging_end(object); | |
1974 | ||
1975 | offset += object->vo_shadow_offset; | |
1976 | fault_info->lo_offset += object->vo_shadow_offset; | |
1977 | fault_info->hi_offset += object->vo_shadow_offset; | |
1978 | access_required = VM_PROT_READ; | |
1979 | ||
1980 | vm_object_lock(next_object); | |
1981 | vm_object_unlock(object); | |
1982 | ||
1983 | object = next_object; | |
1984 | vm_object_paging_begin(object); | |
1985 | } | |
1986 | } | |
1987 | ||
1988 | /* | |
1989 | * PAGE HAS BEEN FOUND. | |
1990 | * | |
1991 | * This page (m) is: | |
1992 | * busy, so that we can play with it; | |
1993 | * not absent, so that nobody else will fill it; | |
1994 | * possibly eligible for pageout; | |
1995 | * | |
1996 | * The top-level page (first_m) is: | |
1997 | * VM_PAGE_NULL if the page was found in the | |
1998 | * top-level object; | |
1999 | * busy, not absent, and ineligible for pageout. | |
2000 | * | |
2001 | * The current object (object) is locked. A paging | |
2002 | * reference is held for the current and top-level | |
2003 | * objects. | |
2004 | */ | |
2005 | ||
2006 | #if TRACEFAULTPAGE | |
2007 | dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ | |
2008 | #endif | |
2009 | #if EXTRA_ASSERTIONS | |
2010 | assert(m->busy && !m->absent); | |
2011 | assert((first_m == VM_PAGE_NULL) || | |
2012 | (first_m->busy && !first_m->absent && | |
2013 | !first_m->active && !first_m->inactive)); | |
2014 | #endif /* EXTRA_ASSERTIONS */ | |
2015 | ||
2016 | /* | |
2017 | * ENCRYPTED SWAP: | |
2018 | * If we found a page, we must have decrypted it before we | |
2019 | * get here... | |
2020 | */ | |
2021 | ASSERT_PAGE_DECRYPTED(m); | |
2022 | ||
2023 | XPR(XPR_VM_FAULT, | |
2024 | "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n", | |
2025 | object, offset, m, | |
2026 | first_object, first_m); | |
2027 | ||
2028 | /* | |
2029 | * If the page is being written, but isn't | |
2030 | * already owned by the top-level object, | |
2031 | * we have to copy it into a new page owned | |
2032 | * by the top-level object. | |
2033 | */ | |
2034 | if (object != first_object) { | |
2035 | ||
2036 | #if TRACEFAULTPAGE | |
2037 | dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */ | |
2038 | #endif | |
2039 | if (fault_type & VM_PROT_WRITE) { | |
2040 | vm_page_t copy_m; | |
2041 | ||
2042 | /* | |
2043 | * We only really need to copy if we | |
2044 | * want to write it. | |
2045 | */ | |
2046 | assert(!must_be_resident); | |
2047 | ||
2048 | /* | |
2049 | * are we protecting the system from | |
2050 | * backing store exhaustion. If so | |
2051 | * sleep unless we are privileged. | |
2052 | */ | |
2053 | if (vm_backing_store_low) { | |
2054 | if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) { | |
2055 | ||
2056 | RELEASE_PAGE(m); | |
2057 | vm_fault_cleanup(object, first_m); | |
2058 | ||
2059 | assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT); | |
2060 | ||
2061 | thread_block(THREAD_CONTINUE_NULL); | |
2062 | thread_interrupt_level(interruptible_state); | |
2063 | ||
2064 | return (VM_FAULT_RETRY); | |
2065 | } | |
2066 | } | |
2067 | /* | |
2068 | * If we try to collapse first_object at this | |
2069 | * point, we may deadlock when we try to get | |
2070 | * the lock on an intermediate object (since we | |
2071 | * have the bottom object locked). We can't | |
2072 | * unlock the bottom object, because the page | |
2073 | * we found may move (by collapse) if we do. | |
2074 | * | |
2075 | * Instead, we first copy the page. Then, when | |
2076 | * we have no more use for the bottom object, | |
2077 | * we unlock it and try to collapse. | |
2078 | * | |
2079 | * Note that we copy the page even if we didn't | |
2080 | * need to... that's the breaks. | |
2081 | */ | |
2082 | ||
2083 | /* | |
2084 | * Allocate a page for the copy | |
2085 | */ | |
2086 | copy_m = vm_page_grab(); | |
2087 | ||
2088 | if (copy_m == VM_PAGE_NULL) { | |
2089 | RELEASE_PAGE(m); | |
2090 | ||
2091 | vm_fault_cleanup(object, first_m); | |
2092 | thread_interrupt_level(interruptible_state); | |
2093 | ||
2094 | return (VM_FAULT_MEMORY_SHORTAGE); | |
2095 | } | |
2096 | XPR(XPR_VM_FAULT, | |
2097 | "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n", | |
2098 | object, offset, | |
2099 | m, copy_m, 0); | |
2100 | ||
2101 | vm_page_copy(m, copy_m); | |
2102 | ||
2103 | /* | |
2104 | * If another map is truly sharing this | |
2105 | * page with us, we have to flush all | |
2106 | * uses of the original page, since we | |
2107 | * can't distinguish those which want the | |
2108 | * original from those which need the | |
2109 | * new copy. | |
2110 | * | |
2111 | * XXXO If we know that only one map has | |
2112 | * access to this page, then we could | |
2113 | * avoid the pmap_disconnect() call. | |
2114 | */ | |
2115 | if (m->pmapped) | |
2116 | pmap_disconnect(m->phys_page); | |
2117 | ||
2118 | if (m->clustered) { | |
2119 | VM_PAGE_COUNT_AS_PAGEIN(m); | |
2120 | VM_PAGE_CONSUME_CLUSTERED(m); | |
2121 | } | |
2122 | assert(!m->cleaning); | |
2123 | ||
2124 | /* | |
2125 | * We no longer need the old page or object. | |
2126 | */ | |
2127 | RELEASE_PAGE(m); | |
2128 | ||
2129 | vm_object_paging_end(object); | |
2130 | vm_object_unlock(object); | |
2131 | ||
2132 | my_fault = DBG_COW_FAULT; | |
2133 | VM_STAT_INCR(cow_faults); | |
2134 | DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL); | |
2135 | current_task()->cow_faults++; | |
2136 | ||
2137 | object = first_object; | |
2138 | offset = first_offset; | |
2139 | ||
2140 | vm_object_lock(object); | |
2141 | /* | |
2142 | * get rid of the place holder | |
2143 | * page that we soldered in earlier | |
2144 | */ | |
2145 | VM_PAGE_FREE(first_m); | |
2146 | first_m = VM_PAGE_NULL; | |
2147 | ||
2148 | /* | |
2149 | * and replace it with the | |
2150 | * page we just copied into | |
2151 | */ | |
2152 | assert(copy_m->busy); | |
2153 | vm_page_insert(copy_m, object, offset); | |
2154 | SET_PAGE_DIRTY(copy_m, TRUE); | |
2155 | ||
2156 | m = copy_m; | |
2157 | /* | |
2158 | * Now that we've gotten the copy out of the | |
2159 | * way, let's try to collapse the top object. | |
2160 | * But we have to play ugly games with | |
2161 | * paging_in_progress to do that... | |
2162 | */ | |
2163 | vm_object_paging_end(object); | |
2164 | vm_object_collapse(object, offset, TRUE); | |
2165 | vm_object_paging_begin(object); | |
2166 | ||
2167 | } else | |
2168 | *protection &= (~VM_PROT_WRITE); | |
2169 | } | |
2170 | /* | |
2171 | * Now check whether the page needs to be pushed into the | |
2172 | * copy object. The use of asymmetric copy on write for | |
2173 | * shared temporary objects means that we may do two copies to | |
2174 | * satisfy the fault; one above to get the page from a | |
2175 | * shadowed object, and one here to push it into the copy. | |
2176 | */ | |
2177 | try_failed_count = 0; | |
2178 | ||
2179 | while ((copy_object = first_object->copy) != VM_OBJECT_NULL) { | |
2180 | vm_object_offset_t copy_offset; | |
2181 | vm_page_t copy_m; | |
2182 | ||
2183 | #if TRACEFAULTPAGE | |
2184 | dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */ | |
2185 | #endif | |
2186 | /* | |
2187 | * If the page is being written, but hasn't been | |
2188 | * copied to the copy-object, we have to copy it there. | |
2189 | */ | |
2190 | if ((fault_type & VM_PROT_WRITE) == 0) { | |
2191 | *protection &= ~VM_PROT_WRITE; | |
2192 | break; | |
2193 | } | |
2194 | ||
2195 | /* | |
2196 | * If the page was guaranteed to be resident, | |
2197 | * we must have already performed the copy. | |
2198 | */ | |
2199 | if (must_be_resident) | |
2200 | break; | |
2201 | ||
2202 | /* | |
2203 | * Try to get the lock on the copy_object. | |
2204 | */ | |
2205 | if (!vm_object_lock_try(copy_object)) { | |
2206 | ||
2207 | vm_object_unlock(object); | |
2208 | try_failed_count++; | |
2209 | ||
2210 | mutex_pause(try_failed_count); /* wait a bit */ | |
2211 | vm_object_lock(object); | |
2212 | ||
2213 | continue; | |
2214 | } | |
2215 | try_failed_count = 0; | |
2216 | ||
2217 | /* | |
2218 | * Make another reference to the copy-object, | |
2219 | * to keep it from disappearing during the | |
2220 | * copy. | |
2221 | */ | |
2222 | vm_object_reference_locked(copy_object); | |
2223 | ||
2224 | /* | |
2225 | * Does the page exist in the copy? | |
2226 | */ | |
2227 | copy_offset = first_offset - copy_object->vo_shadow_offset; | |
2228 | ||
2229 | if (copy_object->vo_size <= copy_offset) | |
2230 | /* | |
2231 | * Copy object doesn't cover this page -- do nothing. | |
2232 | */ | |
2233 | ; | |
2234 | else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) { | |
2235 | /* | |
2236 | * Page currently exists in the copy object | |
2237 | */ | |
2238 | if (copy_m->busy) { | |
2239 | /* | |
2240 | * If the page is being brought | |
2241 | * in, wait for it and then retry. | |
2242 | */ | |
2243 | RELEASE_PAGE(m); | |
2244 | ||
2245 | /* | |
2246 | * take an extra ref so object won't die | |
2247 | */ | |
2248 | vm_object_reference_locked(copy_object); | |
2249 | vm_object_unlock(copy_object); | |
2250 | vm_fault_cleanup(object, first_m); | |
2251 | counter(c_vm_fault_page_block_backoff_kernel++); | |
2252 | ||
2253 | vm_object_lock(copy_object); | |
2254 | assert(copy_object->ref_count > 0); | |
2255 | VM_OBJ_RES_DECR(copy_object); | |
2256 | vm_object_lock_assert_exclusive(copy_object); | |
2257 | copy_object->ref_count--; | |
2258 | assert(copy_object->ref_count > 0); | |
2259 | copy_m = vm_page_lookup(copy_object, copy_offset); | |
2260 | /* | |
2261 | * ENCRYPTED SWAP: | |
2262 | * it's OK if the "copy_m" page is encrypted, | |
2263 | * because we're not moving it nor handling its | |
2264 | * contents. | |
2265 | */ | |
2266 | if (copy_m != VM_PAGE_NULL && copy_m->busy) { | |
2267 | PAGE_ASSERT_WAIT(copy_m, interruptible); | |
2268 | ||
2269 | vm_object_unlock(copy_object); | |
2270 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
2271 | vm_object_deallocate(copy_object); | |
2272 | ||
2273 | goto backoff; | |
2274 | } else { | |
2275 | vm_object_unlock(copy_object); | |
2276 | vm_object_deallocate(copy_object); | |
2277 | thread_interrupt_level(interruptible_state); | |
2278 | ||
2279 | return (VM_FAULT_RETRY); | |
2280 | } | |
2281 | } | |
2282 | } | |
2283 | else if (!PAGED_OUT(copy_object, copy_offset)) { | |
2284 | /* | |
2285 | * If PAGED_OUT is TRUE, then the page used to exist | |
2286 | * in the copy-object, and has already been paged out. | |
2287 | * We don't need to repeat this. If PAGED_OUT is | |
2288 | * FALSE, then either we don't know (!pager_created, | |
2289 | * for example) or it hasn't been paged out. | |
2290 | * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT) | |
2291 | * We must copy the page to the copy object. | |
2292 | */ | |
2293 | ||
2294 | if (vm_backing_store_low) { | |
2295 | /* | |
2296 | * we are protecting the system from | |
2297 | * backing store exhaustion. If so | |
2298 | * sleep unless we are privileged. | |
2299 | */ | |
2300 | if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) { | |
2301 | assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT); | |
2302 | ||
2303 | RELEASE_PAGE(m); | |
2304 | VM_OBJ_RES_DECR(copy_object); | |
2305 | vm_object_lock_assert_exclusive(copy_object); | |
2306 | copy_object->ref_count--; | |
2307 | assert(copy_object->ref_count > 0); | |
2308 | ||
2309 | vm_object_unlock(copy_object); | |
2310 | vm_fault_cleanup(object, first_m); | |
2311 | thread_block(THREAD_CONTINUE_NULL); | |
2312 | thread_interrupt_level(interruptible_state); | |
2313 | ||
2314 | return (VM_FAULT_RETRY); | |
2315 | } | |
2316 | } | |
2317 | /* | |
2318 | * Allocate a page for the copy | |
2319 | */ | |
2320 | copy_m = vm_page_alloc(copy_object, copy_offset); | |
2321 | ||
2322 | if (copy_m == VM_PAGE_NULL) { | |
2323 | RELEASE_PAGE(m); | |
2324 | ||
2325 | VM_OBJ_RES_DECR(copy_object); | |
2326 | vm_object_lock_assert_exclusive(copy_object); | |
2327 | copy_object->ref_count--; | |
2328 | assert(copy_object->ref_count > 0); | |
2329 | ||
2330 | vm_object_unlock(copy_object); | |
2331 | vm_fault_cleanup(object, first_m); | |
2332 | thread_interrupt_level(interruptible_state); | |
2333 | ||
2334 | return (VM_FAULT_MEMORY_SHORTAGE); | |
2335 | } | |
2336 | /* | |
2337 | * Must copy page into copy-object. | |
2338 | */ | |
2339 | vm_page_copy(m, copy_m); | |
2340 | ||
2341 | /* | |
2342 | * If the old page was in use by any users | |
2343 | * of the copy-object, it must be removed | |
2344 | * from all pmaps. (We can't know which | |
2345 | * pmaps use it.) | |
2346 | */ | |
2347 | if (m->pmapped) | |
2348 | pmap_disconnect(m->phys_page); | |
2349 | ||
2350 | if (m->clustered) { | |
2351 | VM_PAGE_COUNT_AS_PAGEIN(m); | |
2352 | VM_PAGE_CONSUME_CLUSTERED(m); | |
2353 | } | |
2354 | /* | |
2355 | * If there's a pager, then immediately | |
2356 | * page out this page, using the "initialize" | |
2357 | * option. Else, we use the copy. | |
2358 | */ | |
2359 | if ((!copy_object->pager_ready) | |
2360 | #if MACH_PAGEMAP | |
2361 | || vm_external_state_get(copy_object->existence_map, copy_offset) == VM_EXTERNAL_STATE_ABSENT | |
2362 | #endif | |
2363 | || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT | |
2364 | ) { | |
2365 | ||
2366 | vm_page_lockspin_queues(); | |
2367 | assert(!m->cleaning); | |
2368 | vm_page_activate(copy_m); | |
2369 | vm_page_unlock_queues(); | |
2370 | ||
2371 | SET_PAGE_DIRTY(copy_m, TRUE); | |
2372 | PAGE_WAKEUP_DONE(copy_m); | |
2373 | ||
2374 | } else if (copy_object->internal && | |
2375 | (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE)) { | |
2376 | /* | |
2377 | * For internal objects check with the pager to see | |
2378 | * if the page already exists in the backing store. | |
2379 | * If yes, then we can drop the copy page. If not, | |
2380 | * then we'll activate it, mark it dirty and keep it | |
2381 | * around. | |
2382 | */ | |
2383 | ||
2384 | kern_return_t kr = KERN_SUCCESS; | |
2385 | ||
2386 | memory_object_t copy_pager = copy_object->pager; | |
2387 | assert(copy_pager != MEMORY_OBJECT_NULL); | |
2388 | vm_object_paging_begin(copy_object); | |
2389 | ||
2390 | vm_object_unlock(copy_object); | |
2391 | ||
2392 | kr = memory_object_data_request( | |
2393 | copy_pager, | |
2394 | copy_offset + copy_object->paging_offset, | |
2395 | 0, /* Only query the pager. */ | |
2396 | VM_PROT_READ, | |
2397 | NULL); | |
2398 | ||
2399 | vm_object_lock(copy_object); | |
2400 | ||
2401 | vm_object_paging_end(copy_object); | |
2402 | ||
2403 | /* | |
2404 | * Since we dropped the copy_object's lock, | |
2405 | * check whether we'll have to deallocate | |
2406 | * the hard way. | |
2407 | */ | |
2408 | if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) { | |
2409 | vm_object_unlock(copy_object); | |
2410 | vm_object_deallocate(copy_object); | |
2411 | vm_object_lock(object); | |
2412 | ||
2413 | continue; | |
2414 | } | |
2415 | if (kr == KERN_SUCCESS) { | |
2416 | /* | |
2417 | * The pager has the page. We don't want to overwrite | |
2418 | * that page by sending this one out to the backing store. | |
2419 | * So we drop the copy page. | |
2420 | */ | |
2421 | VM_PAGE_FREE(copy_m); | |
2422 | ||
2423 | } else { | |
2424 | /* | |
2425 | * The pager doesn't have the page. We'll keep this one | |
2426 | * around in the copy object. It might get sent out to | |
2427 | * the backing store under memory pressure. | |
2428 | */ | |
2429 | vm_page_lockspin_queues(); | |
2430 | assert(!m->cleaning); | |
2431 | vm_page_activate(copy_m); | |
2432 | vm_page_unlock_queues(); | |
2433 | ||
2434 | SET_PAGE_DIRTY(copy_m, TRUE); | |
2435 | PAGE_WAKEUP_DONE(copy_m); | |
2436 | } | |
2437 | } else { | |
2438 | ||
2439 | assert(copy_m->busy == TRUE); | |
2440 | assert(!m->cleaning); | |
2441 | ||
2442 | /* | |
2443 | * dirty is protected by the object lock | |
2444 | */ | |
2445 | SET_PAGE_DIRTY(copy_m, TRUE); | |
2446 | ||
2447 | /* | |
2448 | * The page is already ready for pageout: | |
2449 | * not on pageout queues and busy. | |
2450 | * Unlock everything except the | |
2451 | * copy_object itself. | |
2452 | */ | |
2453 | vm_object_unlock(object); | |
2454 | ||
2455 | /* | |
2456 | * Write the page to the copy-object, | |
2457 | * flushing it from the kernel. | |
2458 | */ | |
2459 | vm_pageout_initialize_page(copy_m); | |
2460 | ||
2461 | /* | |
2462 | * Since the pageout may have | |
2463 | * temporarily dropped the | |
2464 | * copy_object's lock, we | |
2465 | * check whether we'll have | |
2466 | * to deallocate the hard way. | |
2467 | */ | |
2468 | if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) { | |
2469 | vm_object_unlock(copy_object); | |
2470 | vm_object_deallocate(copy_object); | |
2471 | vm_object_lock(object); | |
2472 | ||
2473 | continue; | |
2474 | } | |
2475 | /* | |
2476 | * Pick back up the old object's | |
2477 | * lock. [It is safe to do so, | |
2478 | * since it must be deeper in the | |
2479 | * object tree.] | |
2480 | */ | |
2481 | vm_object_lock(object); | |
2482 | } | |
2483 | ||
2484 | /* | |
2485 | * Because we're pushing a page upward | |
2486 | * in the object tree, we must restart | |
2487 | * any faults that are waiting here. | |
2488 | * [Note that this is an expansion of | |
2489 | * PAGE_WAKEUP that uses the THREAD_RESTART | |
2490 | * wait result]. Can't turn off the page's | |
2491 | * busy bit because we're not done with it. | |
2492 | */ | |
2493 | if (m->wanted) { | |
2494 | m->wanted = FALSE; | |
2495 | thread_wakeup_with_result((event_t) m, THREAD_RESTART); | |
2496 | } | |
2497 | } | |
2498 | /* | |
2499 | * The reference count on copy_object must be | |
2500 | * at least 2: one for our extra reference, | |
2501 | * and at least one from the outside world | |
2502 | * (we checked that when we last locked | |
2503 | * copy_object). | |
2504 | */ | |
2505 | vm_object_lock_assert_exclusive(copy_object); | |
2506 | copy_object->ref_count--; | |
2507 | assert(copy_object->ref_count > 0); | |
2508 | ||
2509 | VM_OBJ_RES_DECR(copy_object); | |
2510 | vm_object_unlock(copy_object); | |
2511 | ||
2512 | break; | |
2513 | } | |
2514 | ||
2515 | done: | |
2516 | *result_page = m; | |
2517 | *top_page = first_m; | |
2518 | ||
2519 | XPR(XPR_VM_FAULT, | |
2520 | "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n", | |
2521 | object, offset, m, first_m, 0); | |
2522 | ||
2523 | if (m != VM_PAGE_NULL) { | |
2524 | retval = VM_FAULT_SUCCESS; | |
2525 | ||
2526 | if (my_fault == DBG_PAGEIN_FAULT) { | |
2527 | ||
2528 | VM_PAGE_COUNT_AS_PAGEIN(m); | |
2529 | ||
2530 | if (m->object->internal) | |
2531 | my_fault = DBG_PAGEIND_FAULT; | |
2532 | else | |
2533 | my_fault = DBG_PAGEINV_FAULT; | |
2534 | ||
2535 | /* | |
2536 | * evaluate access pattern and update state | |
2537 | * vm_fault_deactivate_behind depends on the | |
2538 | * state being up to date | |
2539 | */ | |
2540 | vm_fault_is_sequential(object, offset, fault_info->behavior); | |
2541 | ||
2542 | vm_fault_deactivate_behind(object, offset, fault_info->behavior); | |
2543 | } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) { | |
2544 | ||
2545 | VM_STAT_INCR(decompressions); | |
2546 | } | |
2547 | if (type_of_fault) | |
2548 | *type_of_fault = my_fault; | |
2549 | } else { | |
2550 | retval = VM_FAULT_SUCCESS_NO_VM_PAGE; | |
2551 | assert(first_m == VM_PAGE_NULL); | |
2552 | assert(object == first_object); | |
2553 | } | |
2554 | ||
2555 | thread_interrupt_level(interruptible_state); | |
2556 | ||
2557 | #if TRACEFAULTPAGE | |
2558 | dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */ | |
2559 | #endif | |
2560 | return retval; | |
2561 | ||
2562 | backoff: | |
2563 | thread_interrupt_level(interruptible_state); | |
2564 | ||
2565 | if (wait_result == THREAD_INTERRUPTED) | |
2566 | return (VM_FAULT_INTERRUPTED); | |
2567 | return (VM_FAULT_RETRY); | |
2568 | ||
2569 | #undef RELEASE_PAGE | |
2570 | } | |
2571 | ||
2572 | ||
2573 | ||
2574 | /* | |
2575 | * CODE SIGNING: | |
2576 | * When soft faulting a page, we have to validate the page if: | |
2577 | * 1. the page is being mapped in user space | |
2578 | * 2. the page hasn't already been found to be "tainted" | |
2579 | * 3. the page belongs to a code-signed object | |
2580 | * 4. the page has not been validated yet or has been mapped for write. | |
2581 | */ | |
2582 | #define VM_FAULT_NEED_CS_VALIDATION(pmap, page) \ | |
2583 | ((pmap) != kernel_pmap /*1*/ && \ | |
2584 | !(page)->cs_tainted /*2*/ && \ | |
2585 | (page)->object->code_signed /*3*/ && \ | |
2586 | (!(page)->cs_validated || (page)->wpmapped /*4*/)) | |
2587 | ||
2588 | ||
2589 | /* | |
2590 | * page queue lock must NOT be held | |
2591 | * m->object must be locked | |
2592 | * | |
2593 | * NOTE: m->object could be locked "shared" only if we are called | |
2594 | * from vm_fault() as part of a soft fault. If so, we must be | |
2595 | * careful not to modify the VM object in any way that is not | |
2596 | * legal under a shared lock... | |
2597 | */ | |
2598 | extern int proc_selfpid(void); | |
2599 | extern char *proc_name_address(void *p); | |
2600 | unsigned long cs_enter_tainted_rejected = 0; | |
2601 | unsigned long cs_enter_tainted_accepted = 0; | |
2602 | kern_return_t | |
2603 | vm_fault_enter(vm_page_t m, | |
2604 | pmap_t pmap, | |
2605 | vm_map_offset_t vaddr, | |
2606 | vm_prot_t prot, | |
2607 | vm_prot_t caller_prot, | |
2608 | boolean_t wired, | |
2609 | boolean_t change_wiring, | |
2610 | boolean_t no_cache, | |
2611 | boolean_t cs_bypass, | |
2612 | __unused int user_tag, | |
2613 | int pmap_options, | |
2614 | boolean_t *need_retry, | |
2615 | int *type_of_fault) | |
2616 | { | |
2617 | kern_return_t kr, pe_result; | |
2618 | boolean_t previously_pmapped = m->pmapped; | |
2619 | boolean_t must_disconnect = 0; | |
2620 | boolean_t map_is_switched, map_is_switch_protected; | |
2621 | int cs_enforcement_enabled; | |
2622 | vm_prot_t fault_type; | |
2623 | ||
2624 | fault_type = change_wiring ? VM_PROT_NONE : caller_prot; | |
2625 | ||
2626 | vm_object_lock_assert_held(m->object); | |
2627 | #if DEBUG | |
2628 | lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); | |
2629 | #endif /* DEBUG */ | |
2630 | ||
2631 | if (m->phys_page == vm_page_guard_addr) { | |
2632 | assert(m->fictitious); | |
2633 | return KERN_SUCCESS; | |
2634 | } | |
2635 | ||
2636 | if (*type_of_fault == DBG_ZERO_FILL_FAULT) { | |
2637 | ||
2638 | vm_object_lock_assert_exclusive(m->object); | |
2639 | ||
2640 | } else if ((fault_type & VM_PROT_WRITE) == 0) { | |
2641 | /* | |
2642 | * This is not a "write" fault, so we | |
2643 | * might not have taken the object lock | |
2644 | * exclusively and we might not be able | |
2645 | * to update the "wpmapped" bit in | |
2646 | * vm_fault_enter(). | |
2647 | * Let's just grant read access to | |
2648 | * the page for now and we'll | |
2649 | * soft-fault again if we need write | |
2650 | * access later... | |
2651 | */ | |
2652 | prot &= ~VM_PROT_WRITE; | |
2653 | } | |
2654 | if (m->pmapped == FALSE) { | |
2655 | ||
2656 | if (m->clustered) { | |
2657 | if (*type_of_fault == DBG_CACHE_HIT_FAULT) { | |
2658 | /* | |
2659 | * found it in the cache, but this | |
2660 | * is the first fault-in of the page (m->pmapped == FALSE) | |
2661 | * so it must have come in as part of | |
2662 | * a cluster... account 1 pagein against it | |
2663 | */ | |
2664 | if (m->object->internal) | |
2665 | *type_of_fault = DBG_PAGEIND_FAULT; | |
2666 | else | |
2667 | *type_of_fault = DBG_PAGEINV_FAULT; | |
2668 | ||
2669 | VM_PAGE_COUNT_AS_PAGEIN(m); | |
2670 | } | |
2671 | VM_PAGE_CONSUME_CLUSTERED(m); | |
2672 | } | |
2673 | } | |
2674 | ||
2675 | if (*type_of_fault != DBG_COW_FAULT) { | |
2676 | DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL); | |
2677 | ||
2678 | if (pmap == kernel_pmap) { | |
2679 | DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL); | |
2680 | } | |
2681 | } | |
2682 | ||
2683 | /* Validate code signature if necessary. */ | |
2684 | if (VM_FAULT_NEED_CS_VALIDATION(pmap, m)) { | |
2685 | vm_object_lock_assert_exclusive(m->object); | |
2686 | ||
2687 | if (m->cs_validated) { | |
2688 | vm_cs_revalidates++; | |
2689 | } | |
2690 | ||
2691 | /* VM map is locked, so 1 ref will remain on VM object - | |
2692 | * so no harm if vm_page_validate_cs drops the object lock */ | |
2693 | vm_page_validate_cs(m); | |
2694 | } | |
2695 | ||
2696 | #define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/) | |
2697 | #define page_nx(m) ((m)->cs_nx) | |
2698 | ||
2699 | map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) && | |
2700 | (pmap == vm_map_pmap(current_thread()->map))); | |
2701 | map_is_switch_protected = current_thread()->map->switch_protect; | |
2702 | ||
2703 | /* If the map is switched, and is switch-protected, we must protect | |
2704 | * some pages from being write-faulted: immutable pages because by | |
2705 | * definition they may not be written, and executable pages because that | |
2706 | * would provide a way to inject unsigned code. | |
2707 | * If the page is immutable, we can simply return. However, we can't | |
2708 | * immediately determine whether a page is executable anywhere. But, | |
2709 | * we can disconnect it everywhere and remove the executable protection | |
2710 | * from the current map. We do that below right before we do the | |
2711 | * PMAP_ENTER. | |
2712 | */ | |
2713 | cs_enforcement_enabled = cs_enforcement(NULL); | |
2714 | ||
2715 | if(cs_enforcement_enabled && map_is_switched && | |
2716 | map_is_switch_protected && page_immutable(m, prot) && | |
2717 | (prot & VM_PROT_WRITE)) | |
2718 | { | |
2719 | return KERN_CODESIGN_ERROR; | |
2720 | } | |
2721 | ||
2722 | if (cs_enforcement_enabled && page_nx(m) && (prot & VM_PROT_EXECUTE)) { | |
2723 | if (cs_debug) | |
2724 | printf("page marked to be NX, not letting it be mapped EXEC\n"); | |
2725 | return KERN_CODESIGN_ERROR; | |
2726 | } | |
2727 | ||
2728 | /* A page could be tainted, or pose a risk of being tainted later. | |
2729 | * Check whether the receiving process wants it, and make it feel | |
2730 | * the consequences (that hapens in cs_invalid_page()). | |
2731 | * For CS Enforcement, two other conditions will | |
2732 | * cause that page to be tainted as well: | |
2733 | * - pmapping an unsigned page executable - this means unsigned code; | |
2734 | * - writeable mapping of a validated page - the content of that page | |
2735 | * can be changed without the kernel noticing, therefore unsigned | |
2736 | * code can be created | |
2737 | */ | |
2738 | if (!cs_bypass && | |
2739 | (m->cs_tainted || | |
2740 | (cs_enforcement_enabled && | |
2741 | (/* The page is unsigned and wants to be executable */ | |
2742 | (!m->cs_validated && (prot & VM_PROT_EXECUTE)) || | |
2743 | /* The page should be immutable, but is in danger of being modified | |
2744 | * This is the case where we want policy from the code directory - | |
2745 | * is the page immutable or not? For now we have to assume that | |
2746 | * code pages will be immutable, data pages not. | |
2747 | * We'll assume a page is a code page if it has a code directory | |
2748 | * and we fault for execution. | |
2749 | * That is good enough since if we faulted the code page for | |
2750 | * writing in another map before, it is wpmapped; if we fault | |
2751 | * it for writing in this map later it will also be faulted for executing | |
2752 | * at the same time; and if we fault for writing in another map | |
2753 | * later, we will disconnect it from this pmap so we'll notice | |
2754 | * the change. | |
2755 | */ | |
2756 | (page_immutable(m, prot) && ((prot & VM_PROT_WRITE) || m->wpmapped)) | |
2757 | )) | |
2758 | )) | |
2759 | { | |
2760 | /* We will have a tainted page. Have to handle the special case | |
2761 | * of a switched map now. If the map is not switched, standard | |
2762 | * procedure applies - call cs_invalid_page(). | |
2763 | * If the map is switched, the real owner is invalid already. | |
2764 | * There is no point in invalidating the switching process since | |
2765 | * it will not be executing from the map. So we don't call | |
2766 | * cs_invalid_page() in that case. */ | |
2767 | boolean_t reject_page; | |
2768 | if(map_is_switched) { | |
2769 | assert(pmap==vm_map_pmap(current_thread()->map)); | |
2770 | assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE)); | |
2771 | reject_page = FALSE; | |
2772 | } else { | |
2773 | if (cs_debug > 5) | |
2774 | printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s slid: %s prot: 0x%x\n", | |
2775 | m->object->code_signed ? "yes" : "no", | |
2776 | m->cs_validated ? "yes" : "no", | |
2777 | m->cs_tainted ? "yes" : "no", | |
2778 | m->wpmapped ? "yes" : "no", | |
2779 | m->slid ? "yes" : "no", | |
2780 | (int)prot); | |
2781 | reject_page = cs_invalid_page((addr64_t) vaddr); | |
2782 | } | |
2783 | ||
2784 | if (reject_page) { | |
2785 | /* reject the invalid page: abort the page fault */ | |
2786 | int pid; | |
2787 | const char *procname; | |
2788 | task_t task; | |
2789 | vm_object_t file_object, shadow; | |
2790 | vm_object_offset_t file_offset; | |
2791 | char *pathname, *filename; | |
2792 | vm_size_t pathname_len, filename_len; | |
2793 | boolean_t truncated_path; | |
2794 | #define __PATH_MAX 1024 | |
2795 | struct timespec mtime, cs_mtime; | |
2796 | ||
2797 | kr = KERN_CODESIGN_ERROR; | |
2798 | cs_enter_tainted_rejected++; | |
2799 | ||
2800 | /* get process name and pid */ | |
2801 | procname = "?"; | |
2802 | task = current_task(); | |
2803 | pid = proc_selfpid(); | |
2804 | if (task->bsd_info != NULL) | |
2805 | procname = proc_name_address(task->bsd_info); | |
2806 | ||
2807 | /* get file's VM object */ | |
2808 | file_object = m->object; | |
2809 | file_offset = m->offset; | |
2810 | for (shadow = file_object->shadow; | |
2811 | shadow != VM_OBJECT_NULL; | |
2812 | shadow = file_object->shadow) { | |
2813 | vm_object_lock_shared(shadow); | |
2814 | if (file_object != m->object) { | |
2815 | vm_object_unlock(file_object); | |
2816 | } | |
2817 | file_offset += file_object->vo_shadow_offset; | |
2818 | file_object = shadow; | |
2819 | } | |
2820 | ||
2821 | mtime.tv_sec = 0; | |
2822 | mtime.tv_nsec = 0; | |
2823 | cs_mtime.tv_sec = 0; | |
2824 | cs_mtime.tv_nsec = 0; | |
2825 | ||
2826 | /* get file's pathname and/or filename */ | |
2827 | pathname = NULL; | |
2828 | filename = NULL; | |
2829 | pathname_len = 0; | |
2830 | filename_len = 0; | |
2831 | truncated_path = FALSE; | |
2832 | /* no pager -> no file -> no pathname, use "<nil>" in that case */ | |
2833 | if (file_object->pager != NULL) { | |
2834 | pathname = (char *)kalloc(__PATH_MAX * 2); | |
2835 | if (pathname) { | |
2836 | pathname[0] = '\0'; | |
2837 | pathname_len = __PATH_MAX; | |
2838 | filename = pathname + pathname_len; | |
2839 | filename_len = __PATH_MAX; | |
2840 | } | |
2841 | vnode_pager_get_object_name(file_object->pager, | |
2842 | pathname, | |
2843 | pathname_len, | |
2844 | filename, | |
2845 | filename_len, | |
2846 | &truncated_path); | |
2847 | if (pathname) { | |
2848 | /* safety first... */ | |
2849 | pathname[__PATH_MAX-1] = '\0'; | |
2850 | filename[__PATH_MAX-1] = '\0'; | |
2851 | } | |
2852 | vnode_pager_get_object_mtime(file_object->pager, | |
2853 | &mtime, | |
2854 | &cs_mtime); | |
2855 | } | |
2856 | printf("CODE SIGNING: process %d[%s]: " | |
2857 | "rejecting invalid page at address 0x%llx " | |
2858 | "from offset 0x%llx in file \"%s%s%s\" " | |
2859 | "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " | |
2860 | "(signed:%d validated:%d tainted:%d " | |
2861 | "wpmapped:%d slid:%d)\n", | |
2862 | pid, procname, (addr64_t) vaddr, | |
2863 | file_offset, | |
2864 | (pathname ? pathname : "<nil>"), | |
2865 | (truncated_path ? "/.../" : ""), | |
2866 | (truncated_path ? filename : ""), | |
2867 | cs_mtime.tv_sec, cs_mtime.tv_nsec, | |
2868 | ((cs_mtime.tv_sec == mtime.tv_sec && | |
2869 | cs_mtime.tv_nsec == mtime.tv_nsec) | |
2870 | ? "==" | |
2871 | : "!="), | |
2872 | mtime.tv_sec, mtime.tv_nsec, | |
2873 | m->object->code_signed, | |
2874 | m->cs_validated, | |
2875 | m->cs_tainted, | |
2876 | m->wpmapped, | |
2877 | m->slid); | |
2878 | if (file_object != m->object) { | |
2879 | vm_object_unlock(file_object); | |
2880 | } | |
2881 | if (pathname_len != 0) { | |
2882 | kfree(pathname, __PATH_MAX * 2); | |
2883 | pathname = NULL; | |
2884 | filename = NULL; | |
2885 | } | |
2886 | } else { | |
2887 | /* proceed with the invalid page */ | |
2888 | kr = KERN_SUCCESS; | |
2889 | if (!m->cs_validated) { | |
2890 | /* | |
2891 | * This page has not been validated, so it | |
2892 | * must not belong to a code-signed object | |
2893 | * and should not be forcefully considered | |
2894 | * as tainted. | |
2895 | * We're just concerned about it here because | |
2896 | * we've been asked to "execute" it but that | |
2897 | * does not mean that it should cause other | |
2898 | * accesses to fail. | |
2899 | * This happens when a debugger sets a | |
2900 | * breakpoint and we then execute code in | |
2901 | * that page. Marking the page as "tainted" | |
2902 | * would cause any inspection tool ("leaks", | |
2903 | * "vmmap", "CrashReporter", ...) to get killed | |
2904 | * due to code-signing violation on that page, | |
2905 | * even though they're just reading it and not | |
2906 | * executing from it. | |
2907 | */ | |
2908 | assert(!m->object->code_signed); | |
2909 | } else { | |
2910 | /* | |
2911 | * Page might have been tainted before or not; | |
2912 | * now it definitively is. If the page wasn't | |
2913 | * tainted, we must disconnect it from all | |
2914 | * pmaps later, to force existing mappings | |
2915 | * through that code path for re-consideration | |
2916 | * of the validity of that page. | |
2917 | */ | |
2918 | must_disconnect = !m->cs_tainted; | |
2919 | m->cs_tainted = TRUE; | |
2920 | } | |
2921 | cs_enter_tainted_accepted++; | |
2922 | } | |
2923 | if (kr != KERN_SUCCESS) { | |
2924 | if (cs_debug) { | |
2925 | printf("CODESIGNING: vm_fault_enter(0x%llx): " | |
2926 | "*** INVALID PAGE ***\n", | |
2927 | (long long)vaddr); | |
2928 | } | |
2929 | #if !SECURE_KERNEL | |
2930 | if (cs_enforcement_panic) { | |
2931 | panic("CODESIGNING: panicking on invalid page\n"); | |
2932 | } | |
2933 | #endif | |
2934 | } | |
2935 | ||
2936 | } else { | |
2937 | /* proceed with the valid page */ | |
2938 | kr = KERN_SUCCESS; | |
2939 | } | |
2940 | ||
2941 | boolean_t page_queues_locked = FALSE; | |
2942 | #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \ | |
2943 | MACRO_BEGIN \ | |
2944 | if (! page_queues_locked) { \ | |
2945 | page_queues_locked = TRUE; \ | |
2946 | vm_page_lockspin_queues(); \ | |
2947 | } \ | |
2948 | MACRO_END | |
2949 | #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \ | |
2950 | MACRO_BEGIN \ | |
2951 | if (page_queues_locked) { \ | |
2952 | page_queues_locked = FALSE; \ | |
2953 | vm_page_unlock_queues(); \ | |
2954 | } \ | |
2955 | MACRO_END | |
2956 | ||
2957 | /* | |
2958 | * Hold queues lock to manipulate | |
2959 | * the page queues. Change wiring | |
2960 | * case is obvious. | |
2961 | */ | |
2962 | assert(m->compressor || m->object != compressor_object); | |
2963 | if (m->compressor) { | |
2964 | /* | |
2965 | * Compressor pages are neither wired | |
2966 | * nor pageable and should never change. | |
2967 | */ | |
2968 | assert(m->object == compressor_object); | |
2969 | } else if (change_wiring) { | |
2970 | __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); | |
2971 | ||
2972 | if (wired) { | |
2973 | if (kr == KERN_SUCCESS) { | |
2974 | vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE); | |
2975 | } | |
2976 | } else { | |
2977 | vm_page_unwire(m, TRUE); | |
2978 | } | |
2979 | /* we keep the page queues lock, if we need it later */ | |
2980 | ||
2981 | } else { | |
2982 | if (kr != KERN_SUCCESS) { | |
2983 | __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); | |
2984 | vm_page_deactivate(m); | |
2985 | /* we keep the page queues lock, if we need it later */ | |
2986 | } else if (((!m->active && !m->inactive) || | |
2987 | m->clean_queue || | |
2988 | no_cache) && | |
2989 | !VM_PAGE_WIRED(m) && !m->throttled) { | |
2990 | ||
2991 | if (vm_page_local_q && | |
2992 | !no_cache && | |
2993 | (*type_of_fault == DBG_COW_FAULT || | |
2994 | *type_of_fault == DBG_ZERO_FILL_FAULT) ) { | |
2995 | struct vpl *lq; | |
2996 | uint32_t lid; | |
2997 | ||
2998 | __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED(); | |
2999 | vm_object_lock_assert_exclusive(m->object); | |
3000 | ||
3001 | /* | |
3002 | * we got a local queue to stuff this | |
3003 | * new page on... | |
3004 | * its safe to manipulate local and | |
3005 | * local_id at this point since we're | |
3006 | * behind an exclusive object lock and | |
3007 | * the page is not on any global queue. | |
3008 | * | |
3009 | * we'll use the current cpu number to | |
3010 | * select the queue note that we don't | |
3011 | * need to disable preemption... we're | |
3012 | * going to behind the local queue's | |
3013 | * lock to do the real work | |
3014 | */ | |
3015 | lid = cpu_number(); | |
3016 | ||
3017 | lq = &vm_page_local_q[lid].vpl_un.vpl; | |
3018 | ||
3019 | VPL_LOCK(&lq->vpl_lock); | |
3020 | ||
3021 | vm_page_check_pageable_safe(m); | |
3022 | queue_enter(&lq->vpl_queue, m, | |
3023 | vm_page_t, pageq); | |
3024 | m->local = TRUE; | |
3025 | m->local_id = lid; | |
3026 | lq->vpl_count++; | |
3027 | ||
3028 | if (m->object->internal) | |
3029 | lq->vpl_internal_count++; | |
3030 | else | |
3031 | lq->vpl_external_count++; | |
3032 | ||
3033 | VPL_UNLOCK(&lq->vpl_lock); | |
3034 | ||
3035 | if (lq->vpl_count > vm_page_local_q_soft_limit) | |
3036 | { | |
3037 | /* | |
3038 | * we're beyond the soft limit | |
3039 | * for the local queue | |
3040 | * vm_page_reactivate_local will | |
3041 | * 'try' to take the global page | |
3042 | * queue lock... if it can't | |
3043 | * that's ok... we'll let the | |
3044 | * queue continue to grow up | |
3045 | * to the hard limit... at that | |
3046 | * point we'll wait for the | |
3047 | * lock... once we've got the | |
3048 | * lock, we'll transfer all of | |
3049 | * the pages from the local | |
3050 | * queue to the global active | |
3051 | * queue | |
3052 | */ | |
3053 | vm_page_reactivate_local(lid, FALSE, FALSE); | |
3054 | } | |
3055 | } else { | |
3056 | ||
3057 | __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); | |
3058 | ||
3059 | /* | |
3060 | * test again now that we hold the | |
3061 | * page queue lock | |
3062 | */ | |
3063 | if (!VM_PAGE_WIRED(m)) { | |
3064 | if (m->clean_queue) { | |
3065 | vm_page_queues_remove(m); | |
3066 | ||
3067 | vm_pageout_cleaned_reactivated++; | |
3068 | vm_pageout_cleaned_fault_reactivated++; | |
3069 | } | |
3070 | ||
3071 | if ((!m->active && | |
3072 | !m->inactive) || | |
3073 | no_cache) { | |
3074 | /* | |
3075 | * If this is a no_cache mapping | |
3076 | * and the page has never been | |
3077 | * mapped before or was | |
3078 | * previously a no_cache page, | |
3079 | * then we want to leave pages | |
3080 | * in the speculative state so | |
3081 | * that they can be readily | |
3082 | * recycled if free memory runs | |
3083 | * low. Otherwise the page is | |
3084 | * activated as normal. | |
3085 | */ | |
3086 | ||
3087 | if (no_cache && | |
3088 | (!previously_pmapped || | |
3089 | m->no_cache)) { | |
3090 | m->no_cache = TRUE; | |
3091 | ||
3092 | if (!m->speculative) | |
3093 | vm_page_speculate(m, FALSE); | |
3094 | ||
3095 | } else if (!m->active && | |
3096 | !m->inactive) { | |
3097 | ||
3098 | vm_page_activate(m); | |
3099 | } | |
3100 | } | |
3101 | } | |
3102 | /* we keep the page queues lock, if we need it later */ | |
3103 | } | |
3104 | } | |
3105 | } | |
3106 | /* we're done with the page queues lock, if we ever took it */ | |
3107 | __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED(); | |
3108 | ||
3109 | ||
3110 | /* If we have a KERN_SUCCESS from the previous checks, we either have | |
3111 | * a good page, or a tainted page that has been accepted by the process. | |
3112 | * In both cases the page will be entered into the pmap. | |
3113 | * If the page is writeable, we need to disconnect it from other pmaps | |
3114 | * now so those processes can take note. | |
3115 | */ | |
3116 | if (kr == KERN_SUCCESS) { | |
3117 | ||
3118 | /* | |
3119 | * NOTE: we may only hold the vm_object lock SHARED | |
3120 | * at this point, so we need the phys_page lock to | |
3121 | * properly serialize updating the pmapped and | |
3122 | * xpmapped bits | |
3123 | */ | |
3124 | if ((prot & VM_PROT_EXECUTE) && !m->xpmapped) { | |
3125 | ||
3126 | pmap_lock_phys_page(m->phys_page); | |
3127 | /* | |
3128 | * go ahead and take the opportunity | |
3129 | * to set 'pmapped' here so that we don't | |
3130 | * need to grab this lock a 2nd time | |
3131 | * just below | |
3132 | */ | |
3133 | m->pmapped = TRUE; | |
3134 | ||
3135 | if (!m->xpmapped) { | |
3136 | ||
3137 | m->xpmapped = TRUE; | |
3138 | ||
3139 | pmap_unlock_phys_page(m->phys_page); | |
3140 | ||
3141 | if (!m->object->internal) | |
3142 | OSAddAtomic(1, &vm_page_xpmapped_external_count); | |
3143 | ||
3144 | if ((COMPRESSED_PAGER_IS_ACTIVE) && | |
3145 | m->object->internal && | |
3146 | m->object->pager != NULL) { | |
3147 | /* | |
3148 | * This page could have been | |
3149 | * uncompressed by the | |
3150 | * compressor pager and its | |
3151 | * contents might be only in | |
3152 | * the data cache. | |
3153 | * Since it's being mapped for | |
3154 | * "execute" for the fist time, | |
3155 | * make sure the icache is in | |
3156 | * sync. | |
3157 | */ | |
3158 | pmap_sync_page_data_phys(m->phys_page); | |
3159 | } | |
3160 | } else | |
3161 | pmap_unlock_phys_page(m->phys_page); | |
3162 | } else { | |
3163 | if (m->pmapped == FALSE) { | |
3164 | pmap_lock_phys_page(m->phys_page); | |
3165 | m->pmapped = TRUE; | |
3166 | pmap_unlock_phys_page(m->phys_page); | |
3167 | } | |
3168 | } | |
3169 | if (vm_page_is_slideable(m)) { | |
3170 | boolean_t was_busy = m->busy; | |
3171 | ||
3172 | vm_object_lock_assert_exclusive(m->object); | |
3173 | ||
3174 | m->busy = TRUE; | |
3175 | kr = vm_page_slide(m, 0); | |
3176 | assert(m->busy); | |
3177 | if(!was_busy) { | |
3178 | PAGE_WAKEUP_DONE(m); | |
3179 | } | |
3180 | if (kr != KERN_SUCCESS) { | |
3181 | /* | |
3182 | * This page has not been slid correctly, | |
3183 | * do not do the pmap_enter() ! | |
3184 | * Let vm_fault_enter() return the error | |
3185 | * so the caller can fail the fault. | |
3186 | */ | |
3187 | goto after_the_pmap_enter; | |
3188 | } | |
3189 | } | |
3190 | ||
3191 | if (fault_type & VM_PROT_WRITE) { | |
3192 | ||
3193 | if (m->wpmapped == FALSE) { | |
3194 | vm_object_lock_assert_exclusive(m->object); | |
3195 | ||
3196 | m->wpmapped = TRUE; | |
3197 | } | |
3198 | if (must_disconnect) { | |
3199 | /* | |
3200 | * We can only get here | |
3201 | * because of the CSE logic | |
3202 | */ | |
3203 | assert(cs_enforcement_enabled); | |
3204 | pmap_disconnect(m->phys_page); | |
3205 | /* | |
3206 | * If we are faulting for a write, we can clear | |
3207 | * the execute bit - that will ensure the page is | |
3208 | * checked again before being executable, which | |
3209 | * protects against a map switch. | |
3210 | * This only happens the first time the page | |
3211 | * gets tainted, so we won't get stuck here | |
3212 | * to make an already writeable page executable. | |
3213 | */ | |
3214 | if (!cs_bypass){ | |
3215 | prot &= ~VM_PROT_EXECUTE; | |
3216 | } | |
3217 | } | |
3218 | } | |
3219 | ||
3220 | /* Prevent a deadlock by not | |
3221 | * holding the object lock if we need to wait for a page in | |
3222 | * pmap_enter() - <rdar://problem/7138958> */ | |
3223 | PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0, | |
3224 | wired, | |
3225 | pmap_options | PMAP_OPTIONS_NOWAIT, | |
3226 | pe_result); | |
3227 | ||
3228 | if(pe_result == KERN_RESOURCE_SHORTAGE) { | |
3229 | ||
3230 | if (need_retry) { | |
3231 | /* | |
3232 | * this will be non-null in the case where we hold the lock | |
3233 | * on the top-object in this chain... we can't just drop | |
3234 | * the lock on the object we're inserting the page into | |
3235 | * and recall the PMAP_ENTER since we can still cause | |
3236 | * a deadlock if one of the critical paths tries to | |
3237 | * acquire the lock on the top-object and we're blocked | |
3238 | * in PMAP_ENTER waiting for memory... our only recourse | |
3239 | * is to deal with it at a higher level where we can | |
3240 | * drop both locks. | |
3241 | */ | |
3242 | *need_retry = TRUE; | |
3243 | vm_pmap_enter_retried++; | |
3244 | goto after_the_pmap_enter; | |
3245 | } | |
3246 | /* The nonblocking version of pmap_enter did not succeed. | |
3247 | * and we don't need to drop other locks and retry | |
3248 | * at the level above us, so | |
3249 | * use the blocking version instead. Requires marking | |
3250 | * the page busy and unlocking the object */ | |
3251 | boolean_t was_busy = m->busy; | |
3252 | ||
3253 | vm_object_lock_assert_exclusive(m->object); | |
3254 | ||
3255 | m->busy = TRUE; | |
3256 | vm_object_unlock(m->object); | |
3257 | ||
3258 | PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, | |
3259 | 0, wired, | |
3260 | pmap_options, pe_result); | |
3261 | ||
3262 | /* Take the object lock again. */ | |
3263 | vm_object_lock(m->object); | |
3264 | ||
3265 | /* If the page was busy, someone else will wake it up. | |
3266 | * Otherwise, we have to do it now. */ | |
3267 | assert(m->busy); | |
3268 | if(!was_busy) { | |
3269 | PAGE_WAKEUP_DONE(m); | |
3270 | } | |
3271 | vm_pmap_enter_blocked++; | |
3272 | } | |
3273 | } | |
3274 | ||
3275 | after_the_pmap_enter: | |
3276 | return kr; | |
3277 | } | |
3278 | ||
3279 | void | |
3280 | vm_pre_fault(vm_map_offset_t vaddr) | |
3281 | { | |
3282 | if (pmap_find_phys(current_map()->pmap, vaddr) == 0) { | |
3283 | ||
3284 | vm_fault(current_map(), /* map */ | |
3285 | vaddr, /* vaddr */ | |
3286 | VM_PROT_READ, /* fault_type */ | |
3287 | FALSE, /* change_wiring */ | |
3288 | THREAD_UNINT, /* interruptible */ | |
3289 | NULL, /* caller_pmap */ | |
3290 | 0 /* caller_pmap_addr */); | |
3291 | } | |
3292 | } | |
3293 | ||
3294 | ||
3295 | /* | |
3296 | * Routine: vm_fault | |
3297 | * Purpose: | |
3298 | * Handle page faults, including pseudo-faults | |
3299 | * used to change the wiring status of pages. | |
3300 | * Returns: | |
3301 | * Explicit continuations have been removed. | |
3302 | * Implementation: | |
3303 | * vm_fault and vm_fault_page save mucho state | |
3304 | * in the moral equivalent of a closure. The state | |
3305 | * structure is allocated when first entering vm_fault | |
3306 | * and deallocated when leaving vm_fault. | |
3307 | */ | |
3308 | ||
3309 | extern int _map_enter_debug; | |
3310 | ||
3311 | unsigned long vm_fault_collapse_total = 0; | |
3312 | unsigned long vm_fault_collapse_skipped = 0; | |
3313 | ||
3314 | ||
3315 | kern_return_t | |
3316 | vm_fault( | |
3317 | vm_map_t map, | |
3318 | vm_map_offset_t vaddr, | |
3319 | vm_prot_t fault_type, | |
3320 | boolean_t change_wiring, | |
3321 | int interruptible, | |
3322 | pmap_t caller_pmap, | |
3323 | vm_map_offset_t caller_pmap_addr) | |
3324 | { | |
3325 | return vm_fault_internal(map, vaddr, fault_type, change_wiring, | |
3326 | interruptible, caller_pmap, caller_pmap_addr, | |
3327 | NULL); | |
3328 | } | |
3329 | ||
3330 | ||
3331 | kern_return_t | |
3332 | vm_fault_internal( | |
3333 | vm_map_t map, | |
3334 | vm_map_offset_t vaddr, | |
3335 | vm_prot_t caller_prot, | |
3336 | boolean_t change_wiring, | |
3337 | int interruptible, | |
3338 | pmap_t caller_pmap, | |
3339 | vm_map_offset_t caller_pmap_addr, | |
3340 | ppnum_t *physpage_p) | |
3341 | { | |
3342 | vm_map_version_t version; /* Map version for verificiation */ | |
3343 | boolean_t wired; /* Should mapping be wired down? */ | |
3344 | vm_object_t object; /* Top-level object */ | |
3345 | vm_object_offset_t offset; /* Top-level offset */ | |
3346 | vm_prot_t prot; /* Protection for mapping */ | |
3347 | vm_object_t old_copy_object; /* Saved copy object */ | |
3348 | vm_page_t result_page; /* Result of vm_fault_page */ | |
3349 | vm_page_t top_page; /* Placeholder page */ | |
3350 | kern_return_t kr; | |
3351 | ||
3352 | vm_page_t m; /* Fast access to result_page */ | |
3353 | kern_return_t error_code; | |
3354 | vm_object_t cur_object; | |
3355 | vm_object_offset_t cur_offset; | |
3356 | vm_page_t cur_m; | |
3357 | vm_object_t new_object; | |
3358 | int type_of_fault; | |
3359 | pmap_t pmap; | |
3360 | boolean_t interruptible_state; | |
3361 | vm_map_t real_map = map; | |
3362 | vm_map_t original_map = map; | |
3363 | vm_prot_t fault_type; | |
3364 | vm_prot_t original_fault_type; | |
3365 | struct vm_object_fault_info fault_info; | |
3366 | boolean_t need_collapse = FALSE; | |
3367 | boolean_t need_retry = FALSE; | |
3368 | boolean_t *need_retry_ptr = NULL; | |
3369 | int object_lock_type = 0; | |
3370 | int cur_object_lock_type; | |
3371 | vm_object_t top_object = VM_OBJECT_NULL; | |
3372 | int throttle_delay; | |
3373 | int compressed_count_delta; | |
3374 | ||
3375 | ||
3376 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
3377 | (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START, | |
3378 | ((uint64_t)vaddr >> 32), | |
3379 | vaddr, | |
3380 | (map == kernel_map), | |
3381 | 0, | |
3382 | 0); | |
3383 | ||
3384 | if (get_preemption_level() != 0) { | |
3385 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
3386 | (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, | |
3387 | ((uint64_t)vaddr >> 32), | |
3388 | vaddr, | |
3389 | KERN_FAILURE, | |
3390 | 0, | |
3391 | 0); | |
3392 | ||
3393 | return (KERN_FAILURE); | |
3394 | } | |
3395 | ||
3396 | interruptible_state = thread_interrupt_level(interruptible); | |
3397 | ||
3398 | fault_type = (change_wiring ? VM_PROT_NONE : caller_prot); | |
3399 | ||
3400 | VM_STAT_INCR(faults); | |
3401 | current_task()->faults++; | |
3402 | original_fault_type = fault_type; | |
3403 | ||
3404 | if (fault_type & VM_PROT_WRITE) | |
3405 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3406 | else | |
3407 | object_lock_type = OBJECT_LOCK_SHARED; | |
3408 | ||
3409 | cur_object_lock_type = OBJECT_LOCK_SHARED; | |
3410 | ||
3411 | RetryFault: | |
3412 | /* | |
3413 | * assume we will hit a page in the cache | |
3414 | * otherwise, explicitly override with | |
3415 | * the real fault type once we determine it | |
3416 | */ | |
3417 | type_of_fault = DBG_CACHE_HIT_FAULT; | |
3418 | ||
3419 | /* | |
3420 | * Find the backing store object and offset into | |
3421 | * it to begin the search. | |
3422 | */ | |
3423 | fault_type = original_fault_type; | |
3424 | map = original_map; | |
3425 | vm_map_lock_read(map); | |
3426 | ||
3427 | kr = vm_map_lookup_locked(&map, vaddr, fault_type, | |
3428 | object_lock_type, &version, | |
3429 | &object, &offset, &prot, &wired, | |
3430 | &fault_info, | |
3431 | &real_map); | |
3432 | ||
3433 | ||
3434 | if (kr != KERN_SUCCESS) { | |
3435 | vm_map_unlock_read(map); | |
3436 | goto done; | |
3437 | } | |
3438 | pmap = real_map->pmap; | |
3439 | fault_info.interruptible = interruptible; | |
3440 | fault_info.stealth = FALSE; | |
3441 | fault_info.io_sync = FALSE; | |
3442 | fault_info.mark_zf_absent = FALSE; | |
3443 | fault_info.batch_pmap_op = FALSE; | |
3444 | ||
3445 | /* | |
3446 | * If the page is wired, we must fault for the current protection | |
3447 | * value, to avoid further faults. | |
3448 | */ | |
3449 | if (wired) { | |
3450 | fault_type = prot | VM_PROT_WRITE; | |
3451 | /* | |
3452 | * since we're treating this fault as a 'write' | |
3453 | * we must hold the top object lock exclusively | |
3454 | */ | |
3455 | if (object_lock_type == OBJECT_LOCK_SHARED) { | |
3456 | ||
3457 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3458 | ||
3459 | if (vm_object_lock_upgrade(object) == FALSE) { | |
3460 | /* | |
3461 | * couldn't upgrade, so explictly | |
3462 | * take the lock exclusively | |
3463 | */ | |
3464 | vm_object_lock(object); | |
3465 | } | |
3466 | } | |
3467 | } | |
3468 | ||
3469 | #if VM_FAULT_CLASSIFY | |
3470 | /* | |
3471 | * Temporary data gathering code | |
3472 | */ | |
3473 | vm_fault_classify(object, offset, fault_type); | |
3474 | #endif | |
3475 | /* | |
3476 | * Fast fault code. The basic idea is to do as much as | |
3477 | * possible while holding the map lock and object locks. | |
3478 | * Busy pages are not used until the object lock has to | |
3479 | * be dropped to do something (copy, zero fill, pmap enter). | |
3480 | * Similarly, paging references aren't acquired until that | |
3481 | * point, and object references aren't used. | |
3482 | * | |
3483 | * If we can figure out what to do | |
3484 | * (zero fill, copy on write, pmap enter) while holding | |
3485 | * the locks, then it gets done. Otherwise, we give up, | |
3486 | * and use the original fault path (which doesn't hold | |
3487 | * the map lock, and relies on busy pages). | |
3488 | * The give up cases include: | |
3489 | * - Have to talk to pager. | |
3490 | * - Page is busy, absent or in error. | |
3491 | * - Pager has locked out desired access. | |
3492 | * - Fault needs to be restarted. | |
3493 | * - Have to push page into copy object. | |
3494 | * | |
3495 | * The code is an infinite loop that moves one level down | |
3496 | * the shadow chain each time. cur_object and cur_offset | |
3497 | * refer to the current object being examined. object and offset | |
3498 | * are the original object from the map. The loop is at the | |
3499 | * top level if and only if object and cur_object are the same. | |
3500 | * | |
3501 | * Invariants: Map lock is held throughout. Lock is held on | |
3502 | * original object and cur_object (if different) when | |
3503 | * continuing or exiting loop. | |
3504 | * | |
3505 | */ | |
3506 | ||
3507 | ||
3508 | /* | |
3509 | * If this page is to be inserted in a copy delay object | |
3510 | * for writing, and if the object has a copy, then the | |
3511 | * copy delay strategy is implemented in the slow fault page. | |
3512 | */ | |
3513 | if (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY && | |
3514 | object->copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) | |
3515 | goto handle_copy_delay; | |
3516 | ||
3517 | cur_object = object; | |
3518 | cur_offset = offset; | |
3519 | ||
3520 | while (TRUE) { | |
3521 | if (!cur_object->pager_created && | |
3522 | cur_object->phys_contiguous) /* superpage */ | |
3523 | break; | |
3524 | ||
3525 | if (cur_object->blocked_access) { | |
3526 | /* | |
3527 | * Access to this VM object has been blocked. | |
3528 | * Let the slow path handle it. | |
3529 | */ | |
3530 | break; | |
3531 | } | |
3532 | ||
3533 | m = vm_page_lookup(cur_object, cur_offset); | |
3534 | ||
3535 | if (m != VM_PAGE_NULL) { | |
3536 | if (m->busy) { | |
3537 | wait_result_t result; | |
3538 | ||
3539 | /* | |
3540 | * in order to do the PAGE_ASSERT_WAIT, we must | |
3541 | * have object that 'm' belongs to locked exclusively | |
3542 | */ | |
3543 | if (object != cur_object) { | |
3544 | ||
3545 | if (cur_object_lock_type == OBJECT_LOCK_SHARED) { | |
3546 | ||
3547 | cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3548 | ||
3549 | if (vm_object_lock_upgrade(cur_object) == FALSE) { | |
3550 | /* | |
3551 | * couldn't upgrade so go do a full retry | |
3552 | * immediately since we can no longer be | |
3553 | * certain about cur_object (since we | |
3554 | * don't hold a reference on it)... | |
3555 | * first drop the top object lock | |
3556 | */ | |
3557 | vm_object_unlock(object); | |
3558 | ||
3559 | vm_map_unlock_read(map); | |
3560 | if (real_map != map) | |
3561 | vm_map_unlock(real_map); | |
3562 | ||
3563 | goto RetryFault; | |
3564 | } | |
3565 | } | |
3566 | } else if (object_lock_type == OBJECT_LOCK_SHARED) { | |
3567 | ||
3568 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3569 | ||
3570 | if (vm_object_lock_upgrade(object) == FALSE) { | |
3571 | /* | |
3572 | * couldn't upgrade, so explictly take the lock | |
3573 | * exclusively and go relookup the page since we | |
3574 | * will have dropped the object lock and | |
3575 | * a different thread could have inserted | |
3576 | * a page at this offset | |
3577 | * no need for a full retry since we're | |
3578 | * at the top level of the object chain | |
3579 | */ | |
3580 | vm_object_lock(object); | |
3581 | ||
3582 | continue; | |
3583 | } | |
3584 | } | |
3585 | if (m->pageout_queue && m->object->internal && COMPRESSED_PAGER_IS_ACTIVE) { | |
3586 | /* | |
3587 | * m->busy == TRUE and the object is locked exclusively | |
3588 | * if m->pageout_queue == TRUE after we acquire the | |
3589 | * queues lock, we are guaranteed that it is stable on | |
3590 | * the pageout queue and therefore reclaimable | |
3591 | * | |
3592 | * NOTE: this is only true for the internal pageout queue | |
3593 | * in the compressor world | |
3594 | */ | |
3595 | vm_page_lock_queues(); | |
3596 | ||
3597 | if (m->pageout_queue) { | |
3598 | vm_pageout_throttle_up(m); | |
3599 | vm_page_unlock_queues(); | |
3600 | ||
3601 | PAGE_WAKEUP_DONE(m); | |
3602 | goto reclaimed_from_pageout; | |
3603 | } | |
3604 | vm_page_unlock_queues(); | |
3605 | } | |
3606 | if (object != cur_object) | |
3607 | vm_object_unlock(object); | |
3608 | ||
3609 | vm_map_unlock_read(map); | |
3610 | if (real_map != map) | |
3611 | vm_map_unlock(real_map); | |
3612 | ||
3613 | result = PAGE_ASSERT_WAIT(m, interruptible); | |
3614 | ||
3615 | vm_object_unlock(cur_object); | |
3616 | ||
3617 | if (result == THREAD_WAITING) { | |
3618 | result = thread_block(THREAD_CONTINUE_NULL); | |
3619 | ||
3620 | counter(c_vm_fault_page_block_busy_kernel++); | |
3621 | } | |
3622 | if (result == THREAD_AWAKENED || result == THREAD_RESTART) | |
3623 | goto RetryFault; | |
3624 | ||
3625 | kr = KERN_ABORTED; | |
3626 | goto done; | |
3627 | } | |
3628 | reclaimed_from_pageout: | |
3629 | if (m->laundry) { | |
3630 | if (object != cur_object) { | |
3631 | if (cur_object_lock_type == OBJECT_LOCK_SHARED) { | |
3632 | cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3633 | ||
3634 | vm_object_unlock(object); | |
3635 | vm_object_unlock(cur_object); | |
3636 | ||
3637 | vm_map_unlock_read(map); | |
3638 | if (real_map != map) | |
3639 | vm_map_unlock(real_map); | |
3640 | ||
3641 | goto RetryFault; | |
3642 | } | |
3643 | ||
3644 | } else if (object_lock_type == OBJECT_LOCK_SHARED) { | |
3645 | ||
3646 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3647 | ||
3648 | if (vm_object_lock_upgrade(object) == FALSE) { | |
3649 | /* | |
3650 | * couldn't upgrade, so explictly take the lock | |
3651 | * exclusively and go relookup the page since we | |
3652 | * will have dropped the object lock and | |
3653 | * a different thread could have inserted | |
3654 | * a page at this offset | |
3655 | * no need for a full retry since we're | |
3656 | * at the top level of the object chain | |
3657 | */ | |
3658 | vm_object_lock(object); | |
3659 | ||
3660 | continue; | |
3661 | } | |
3662 | } | |
3663 | m->pageout = FALSE; | |
3664 | ||
3665 | vm_pageout_steal_laundry(m, FALSE); | |
3666 | } | |
3667 | ||
3668 | if (m->phys_page == vm_page_guard_addr) { | |
3669 | /* | |
3670 | * Guard page: let the slow path deal with it | |
3671 | */ | |
3672 | break; | |
3673 | } | |
3674 | if (m->unusual && (m->error || m->restart || m->private || m->absent)) { | |
3675 | /* | |
3676 | * Unusual case... let the slow path deal with it | |
3677 | */ | |
3678 | break; | |
3679 | } | |
3680 | if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m->object)) { | |
3681 | if (object != cur_object) | |
3682 | vm_object_unlock(object); | |
3683 | vm_map_unlock_read(map); | |
3684 | if (real_map != map) | |
3685 | vm_map_unlock(real_map); | |
3686 | vm_object_unlock(cur_object); | |
3687 | kr = KERN_MEMORY_ERROR; | |
3688 | goto done; | |
3689 | } | |
3690 | ||
3691 | if (m->encrypted) { | |
3692 | /* | |
3693 | * ENCRYPTED SWAP: | |
3694 | * We've soft-faulted (because it's not in the page | |
3695 | * table) on an encrypted page. | |
3696 | * Keep the page "busy" so that no one messes with | |
3697 | * it during the decryption. | |
3698 | * Release the extra locks we're holding, keep only | |
3699 | * the page's VM object lock. | |
3700 | * | |
3701 | * in order to set 'busy' on 'm', we must | |
3702 | * have object that 'm' belongs to locked exclusively | |
3703 | */ | |
3704 | if (object != cur_object) { | |
3705 | vm_object_unlock(object); | |
3706 | ||
3707 | if (cur_object_lock_type == OBJECT_LOCK_SHARED) { | |
3708 | ||
3709 | cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3710 | ||
3711 | if (vm_object_lock_upgrade(cur_object) == FALSE) { | |
3712 | /* | |
3713 | * couldn't upgrade so go do a full retry | |
3714 | * immediately since we've already dropped | |
3715 | * the top object lock associated with this page | |
3716 | * and the current one got dropped due to the | |
3717 | * failed upgrade... the state is no longer valid | |
3718 | */ | |
3719 | vm_map_unlock_read(map); | |
3720 | if (real_map != map) | |
3721 | vm_map_unlock(real_map); | |
3722 | ||
3723 | goto RetryFault; | |
3724 | } | |
3725 | } | |
3726 | } else if (object_lock_type == OBJECT_LOCK_SHARED) { | |
3727 | ||
3728 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3729 | ||
3730 | if (vm_object_lock_upgrade(object) == FALSE) { | |
3731 | /* | |
3732 | * couldn't upgrade, so explictly take the lock | |
3733 | * exclusively and go relookup the page since we | |
3734 | * will have dropped the object lock and | |
3735 | * a different thread could have inserted | |
3736 | * a page at this offset | |
3737 | * no need for a full retry since we're | |
3738 | * at the top level of the object chain | |
3739 | */ | |
3740 | vm_object_lock(object); | |
3741 | ||
3742 | continue; | |
3743 | } | |
3744 | } | |
3745 | m->busy = TRUE; | |
3746 | ||
3747 | vm_map_unlock_read(map); | |
3748 | if (real_map != map) | |
3749 | vm_map_unlock(real_map); | |
3750 | ||
3751 | vm_page_decrypt(m, 0); | |
3752 | ||
3753 | assert(m->busy); | |
3754 | PAGE_WAKEUP_DONE(m); | |
3755 | ||
3756 | vm_object_unlock(cur_object); | |
3757 | /* | |
3758 | * Retry from the top, in case anything | |
3759 | * changed while we were decrypting... | |
3760 | */ | |
3761 | goto RetryFault; | |
3762 | } | |
3763 | ASSERT_PAGE_DECRYPTED(m); | |
3764 | ||
3765 | if(vm_page_is_slideable(m)) { | |
3766 | /* | |
3767 | * We might need to slide this page, and so, | |
3768 | * we want to hold the VM object exclusively. | |
3769 | */ | |
3770 | if (object != cur_object) { | |
3771 | if (cur_object_lock_type == OBJECT_LOCK_SHARED) { | |
3772 | vm_object_unlock(object); | |
3773 | vm_object_unlock(cur_object); | |
3774 | ||
3775 | cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3776 | ||
3777 | vm_map_unlock_read(map); | |
3778 | if (real_map != map) | |
3779 | vm_map_unlock(real_map); | |
3780 | ||
3781 | goto RetryFault; | |
3782 | } | |
3783 | } else if (object_lock_type == OBJECT_LOCK_SHARED) { | |
3784 | ||
3785 | vm_object_unlock(object); | |
3786 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3787 | vm_map_unlock_read(map); | |
3788 | goto RetryFault; | |
3789 | } | |
3790 | } | |
3791 | ||
3792 | if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m) || | |
3793 | (physpage_p != NULL && (prot & VM_PROT_WRITE))) { | |
3794 | upgrade_for_validation: | |
3795 | /* | |
3796 | * We might need to validate this page | |
3797 | * against its code signature, so we | |
3798 | * want to hold the VM object exclusively. | |
3799 | */ | |
3800 | if (object != cur_object) { | |
3801 | if (cur_object_lock_type == OBJECT_LOCK_SHARED) { | |
3802 | vm_object_unlock(object); | |
3803 | vm_object_unlock(cur_object); | |
3804 | ||
3805 | cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3806 | ||
3807 | vm_map_unlock_read(map); | |
3808 | if (real_map != map) | |
3809 | vm_map_unlock(real_map); | |
3810 | ||
3811 | goto RetryFault; | |
3812 | } | |
3813 | ||
3814 | } else if (object_lock_type == OBJECT_LOCK_SHARED) { | |
3815 | ||
3816 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
3817 | ||
3818 | if (vm_object_lock_upgrade(object) == FALSE) { | |
3819 | /* | |
3820 | * couldn't upgrade, so explictly take the lock | |
3821 | * exclusively and go relookup the page since we | |
3822 | * will have dropped the object lock and | |
3823 | * a different thread could have inserted | |
3824 | * a page at this offset | |
3825 | * no need for a full retry since we're | |
3826 | * at the top level of the object chain | |
3827 | */ | |
3828 | vm_object_lock(object); | |
3829 | ||
3830 | continue; | |
3831 | } | |
3832 | } | |
3833 | } | |
3834 | /* | |
3835 | * Two cases of map in faults: | |
3836 | * - At top level w/o copy object. | |
3837 | * - Read fault anywhere. | |
3838 | * --> must disallow write. | |
3839 | */ | |
3840 | ||
3841 | if (object == cur_object && object->copy == VM_OBJECT_NULL) { | |
3842 | ||
3843 | goto FastPmapEnter; | |
3844 | } | |
3845 | ||
3846 | if ((fault_type & VM_PROT_WRITE) == 0) { | |
3847 | ||
3848 | if (object != cur_object) { | |
3849 | /* | |
3850 | * We still need to hold the top object | |
3851 | * lock here to prevent a race between | |
3852 | * a read fault (taking only "shared" | |
3853 | * locks) and a write fault (taking | |
3854 | * an "exclusive" lock on the top | |
3855 | * object. | |
3856 | * Otherwise, as soon as we release the | |
3857 | * top lock, the write fault could | |
3858 | * proceed and actually complete before | |
3859 | * the read fault, and the copied page's | |
3860 | * translation could then be overwritten | |
3861 | * by the read fault's translation for | |
3862 | * the original page. | |
3863 | * | |
3864 | * Let's just record what the top object | |
3865 | * is and we'll release it later. | |
3866 | */ | |
3867 | top_object = object; | |
3868 | ||
3869 | /* | |
3870 | * switch to the object that has the new page | |
3871 | */ | |
3872 | object = cur_object; | |
3873 | object_lock_type = cur_object_lock_type; | |
3874 | } | |
3875 | FastPmapEnter: | |
3876 | /* | |
3877 | * prepare for the pmap_enter... | |
3878 | * object and map are both locked | |
3879 | * m contains valid data | |
3880 | * object == m->object | |
3881 | * cur_object == NULL or it's been unlocked | |
3882 | * no paging references on either object or cur_object | |
3883 | */ | |
3884 | if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) | |
3885 | need_retry_ptr = &need_retry; | |
3886 | else | |
3887 | need_retry_ptr = NULL; | |
3888 | ||
3889 | if (caller_pmap) { | |
3890 | kr = vm_fault_enter(m, | |
3891 | caller_pmap, | |
3892 | caller_pmap_addr, | |
3893 | prot, | |
3894 | caller_prot, | |
3895 | wired, | |
3896 | change_wiring, | |
3897 | fault_info.no_cache, | |
3898 | fault_info.cs_bypass, | |
3899 | fault_info.user_tag, | |
3900 | fault_info.pmap_options, | |
3901 | need_retry_ptr, | |
3902 | &type_of_fault); | |
3903 | } else { | |
3904 | kr = vm_fault_enter(m, | |
3905 | pmap, | |
3906 | vaddr, | |
3907 | prot, | |
3908 | caller_prot, | |
3909 | wired, | |
3910 | change_wiring, | |
3911 | fault_info.no_cache, | |
3912 | fault_info.cs_bypass, | |
3913 | fault_info.user_tag, | |
3914 | fault_info.pmap_options, | |
3915 | need_retry_ptr, | |
3916 | &type_of_fault); | |
3917 | } | |
3918 | ||
3919 | if (kr == KERN_SUCCESS && | |
3920 | physpage_p != NULL) { | |
3921 | /* for vm_map_wire_and_extract() */ | |
3922 | *physpage_p = m->phys_page; | |
3923 | if (prot & VM_PROT_WRITE) { | |
3924 | vm_object_lock_assert_exclusive( | |
3925 | m->object); | |
3926 | m->dirty = TRUE; | |
3927 | } | |
3928 | } | |
3929 | ||
3930 | if (top_object != VM_OBJECT_NULL) { | |
3931 | /* | |
3932 | * It's safe to drop the top object | |
3933 | * now that we've done our | |
3934 | * vm_fault_enter(). Any other fault | |
3935 | * in progress for that virtual | |
3936 | * address will either find our page | |
3937 | * and translation or put in a new page | |
3938 | * and translation. | |
3939 | */ | |
3940 | vm_object_unlock(top_object); | |
3941 | top_object = VM_OBJECT_NULL; | |
3942 | } | |
3943 | ||
3944 | if (need_collapse == TRUE) | |
3945 | vm_object_collapse(object, offset, TRUE); | |
3946 | ||
3947 | if (need_retry == FALSE && | |
3948 | (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) { | |
3949 | /* | |
3950 | * evaluate access pattern and update state | |
3951 | * vm_fault_deactivate_behind depends on the | |
3952 | * state being up to date | |
3953 | */ | |
3954 | vm_fault_is_sequential(object, cur_offset, fault_info.behavior); | |
3955 | ||
3956 | vm_fault_deactivate_behind(object, cur_offset, fault_info.behavior); | |
3957 | } | |
3958 | /* | |
3959 | * That's it, clean up and return. | |
3960 | */ | |
3961 | if (m->busy) | |
3962 | PAGE_WAKEUP_DONE(m); | |
3963 | ||
3964 | vm_object_unlock(object); | |
3965 | ||
3966 | vm_map_unlock_read(map); | |
3967 | if (real_map != map) | |
3968 | vm_map_unlock(real_map); | |
3969 | ||
3970 | if (need_retry == TRUE) { | |
3971 | /* | |
3972 | * vm_fault_enter couldn't complete the PMAP_ENTER... | |
3973 | * at this point we don't hold any locks so it's safe | |
3974 | * to ask the pmap layer to expand the page table to | |
3975 | * accommodate this mapping... once expanded, we'll | |
3976 | * re-drive the fault which should result in vm_fault_enter | |
3977 | * being able to successfully enter the mapping this time around | |
3978 | */ | |
3979 | (void)pmap_enter_options( | |
3980 | pmap, vaddr, 0, 0, 0, 0, 0, | |
3981 | PMAP_OPTIONS_NOENTER, NULL); | |
3982 | ||
3983 | need_retry = FALSE; | |
3984 | goto RetryFault; | |
3985 | } | |
3986 | goto done; | |
3987 | } | |
3988 | /* | |
3989 | * COPY ON WRITE FAULT | |
3990 | */ | |
3991 | assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE); | |
3992 | ||
3993 | /* | |
3994 | * If objects match, then | |
3995 | * object->copy must not be NULL (else control | |
3996 | * would be in previous code block), and we | |
3997 | * have a potential push into the copy object | |
3998 | * with which we can't cope with here. | |
3999 | */ | |
4000 | if (cur_object == object) { | |
4001 | /* | |
4002 | * must take the slow path to | |
4003 | * deal with the copy push | |
4004 | */ | |
4005 | break; | |
4006 | } | |
4007 | ||
4008 | /* | |
4009 | * This is now a shadow based copy on write | |
4010 | * fault -- it requires a copy up the shadow | |
4011 | * chain. | |
4012 | */ | |
4013 | ||
4014 | if ((cur_object_lock_type == OBJECT_LOCK_SHARED) && | |
4015 | VM_FAULT_NEED_CS_VALIDATION(NULL, m)) { | |
4016 | goto upgrade_for_validation; | |
4017 | } | |
4018 | ||
4019 | /* | |
4020 | * Allocate a page in the original top level | |
4021 | * object. Give up if allocate fails. Also | |
4022 | * need to remember current page, as it's the | |
4023 | * source of the copy. | |
4024 | * | |
4025 | * at this point we hold locks on both | |
4026 | * object and cur_object... no need to take | |
4027 | * paging refs or mark pages BUSY since | |
4028 | * we don't drop either object lock until | |
4029 | * the page has been copied and inserted | |
4030 | */ | |
4031 | cur_m = m; | |
4032 | m = vm_page_grab(); | |
4033 | ||
4034 | if (m == VM_PAGE_NULL) { | |
4035 | /* | |
4036 | * no free page currently available... | |
4037 | * must take the slow path | |
4038 | */ | |
4039 | break; | |
4040 | } | |
4041 | /* | |
4042 | * Now do the copy. Mark the source page busy... | |
4043 | * | |
4044 | * NOTE: This code holds the map lock across | |
4045 | * the page copy. | |
4046 | */ | |
4047 | vm_page_copy(cur_m, m); | |
4048 | vm_page_insert(m, object, offset); | |
4049 | SET_PAGE_DIRTY(m, FALSE); | |
4050 | ||
4051 | /* | |
4052 | * Now cope with the source page and object | |
4053 | */ | |
4054 | if (object->ref_count > 1 && cur_m->pmapped) | |
4055 | pmap_disconnect(cur_m->phys_page); | |
4056 | ||
4057 | if (cur_m->clustered) { | |
4058 | VM_PAGE_COUNT_AS_PAGEIN(cur_m); | |
4059 | VM_PAGE_CONSUME_CLUSTERED(cur_m); | |
4060 | } | |
4061 | need_collapse = TRUE; | |
4062 | ||
4063 | if (!cur_object->internal && | |
4064 | cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) { | |
4065 | /* | |
4066 | * The object from which we've just | |
4067 | * copied a page is most probably backed | |
4068 | * by a vnode. We don't want to waste too | |
4069 | * much time trying to collapse the VM objects | |
4070 | * and create a bottleneck when several tasks | |
4071 | * map the same file. | |
4072 | */ | |
4073 | if (cur_object->copy == object) { | |
4074 | /* | |
4075 | * Shared mapping or no COW yet. | |
4076 | * We can never collapse a copy | |
4077 | * object into its backing object. | |
4078 | */ | |
4079 | need_collapse = FALSE; | |
4080 | } else if (cur_object->copy == object->shadow && | |
4081 | object->shadow->resident_page_count == 0) { | |
4082 | /* | |
4083 | * Shared mapping after a COW occurred. | |
4084 | */ | |
4085 | need_collapse = FALSE; | |
4086 | } | |
4087 | } | |
4088 | vm_object_unlock(cur_object); | |
4089 | ||
4090 | if (need_collapse == FALSE) | |
4091 | vm_fault_collapse_skipped++; | |
4092 | vm_fault_collapse_total++; | |
4093 | ||
4094 | type_of_fault = DBG_COW_FAULT; | |
4095 | VM_STAT_INCR(cow_faults); | |
4096 | DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL); | |
4097 | current_task()->cow_faults++; | |
4098 | ||
4099 | goto FastPmapEnter; | |
4100 | ||
4101 | } else { | |
4102 | /* | |
4103 | * No page at cur_object, cur_offset... m == NULL | |
4104 | */ | |
4105 | if (cur_object->pager_created) { | |
4106 | int compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; | |
4107 | ||
4108 | if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) { | |
4109 | int my_fault_type; | |
4110 | int c_flags = C_DONT_BLOCK; | |
4111 | boolean_t insert_cur_object = FALSE; | |
4112 | ||
4113 | /* | |
4114 | * May have to talk to a pager... | |
4115 | * if so, take the slow path by | |
4116 | * doing a 'break' from the while (TRUE) loop | |
4117 | * | |
4118 | * external_state will only be set to VM_EXTERNAL_STATE_EXISTS | |
4119 | * if the compressor is active and the page exists there | |
4120 | */ | |
4121 | if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) | |
4122 | break; | |
4123 | ||
4124 | if (map == kernel_map || real_map == kernel_map) { | |
4125 | /* | |
4126 | * can't call into the compressor with the kernel_map | |
4127 | * lock held, since the compressor may try to operate | |
4128 | * on the kernel map in order to return an empty c_segment | |
4129 | */ | |
4130 | break; | |
4131 | } | |
4132 | if (object != cur_object) { | |
4133 | if (fault_type & VM_PROT_WRITE) | |
4134 | c_flags |= C_KEEP; | |
4135 | else | |
4136 | insert_cur_object = TRUE; | |
4137 | } | |
4138 | if (insert_cur_object == TRUE) { | |
4139 | ||
4140 | if (cur_object_lock_type == OBJECT_LOCK_SHARED) { | |
4141 | ||
4142 | cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
4143 | ||
4144 | if (vm_object_lock_upgrade(cur_object) == FALSE) { | |
4145 | /* | |
4146 | * couldn't upgrade so go do a full retry | |
4147 | * immediately since we can no longer be | |
4148 | * certain about cur_object (since we | |
4149 | * don't hold a reference on it)... | |
4150 | * first drop the top object lock | |
4151 | */ | |
4152 | vm_object_unlock(object); | |
4153 | ||
4154 | vm_map_unlock_read(map); | |
4155 | if (real_map != map) | |
4156 | vm_map_unlock(real_map); | |
4157 | ||
4158 | goto RetryFault; | |
4159 | } | |
4160 | } | |
4161 | } else if (object_lock_type == OBJECT_LOCK_SHARED) { | |
4162 | ||
4163 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
4164 | ||
4165 | if (object != cur_object) { | |
4166 | /* | |
4167 | * we can't go for the upgrade on the top | |
4168 | * lock since the upgrade may block waiting | |
4169 | * for readers to drain... since we hold | |
4170 | * cur_object locked at this point, waiting | |
4171 | * for the readers to drain would represent | |
4172 | * a lock order inversion since the lock order | |
4173 | * for objects is the reference order in the | |
4174 | * shadown chain | |
4175 | */ | |
4176 | vm_object_unlock(object); | |
4177 | vm_object_unlock(cur_object); | |
4178 | ||
4179 | vm_map_unlock_read(map); | |
4180 | if (real_map != map) | |
4181 | vm_map_unlock(real_map); | |
4182 | ||
4183 | goto RetryFault; | |
4184 | } | |
4185 | if (vm_object_lock_upgrade(object) == FALSE) { | |
4186 | /* | |
4187 | * couldn't upgrade, so explictly take the lock | |
4188 | * exclusively and go relookup the page since we | |
4189 | * will have dropped the object lock and | |
4190 | * a different thread could have inserted | |
4191 | * a page at this offset | |
4192 | * no need for a full retry since we're | |
4193 | * at the top level of the object chain | |
4194 | */ | |
4195 | vm_object_lock(object); | |
4196 | ||
4197 | continue; | |
4198 | } | |
4199 | } | |
4200 | m = vm_page_grab(); | |
4201 | ||
4202 | if (m == VM_PAGE_NULL) { | |
4203 | /* | |
4204 | * no free page currently available... | |
4205 | * must take the slow path | |
4206 | */ | |
4207 | break; | |
4208 | } | |
4209 | ||
4210 | /* | |
4211 | * The object is and remains locked | |
4212 | * so no need to take a | |
4213 | * "paging_in_progress" reference. | |
4214 | */ | |
4215 | boolean_t shared_lock; | |
4216 | if ((object == cur_object && | |
4217 | object_lock_type == OBJECT_LOCK_EXCLUSIVE) || | |
4218 | (object != cur_object && | |
4219 | cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) { | |
4220 | shared_lock = FALSE; | |
4221 | } else { | |
4222 | shared_lock = TRUE; | |
4223 | } | |
4224 | ||
4225 | kr = vm_compressor_pager_get( | |
4226 | cur_object->pager, | |
4227 | (cur_offset + | |
4228 | cur_object->paging_offset), | |
4229 | m->phys_page, | |
4230 | &my_fault_type, | |
4231 | c_flags, | |
4232 | &compressed_count_delta); | |
4233 | ||
4234 | vm_compressor_pager_count( | |
4235 | cur_object->pager, | |
4236 | compressed_count_delta, | |
4237 | shared_lock, | |
4238 | cur_object); | |
4239 | ||
4240 | if (kr != KERN_SUCCESS) { | |
4241 | vm_page_release(m); | |
4242 | break; | |
4243 | } | |
4244 | m->dirty = TRUE; | |
4245 | ||
4246 | /* | |
4247 | * If the object is purgeable, its | |
4248 | * owner's purgeable ledgers will be | |
4249 | * updated in vm_page_insert() but the | |
4250 | * page was also accounted for in a | |
4251 | * "compressed purgeable" ledger, so | |
4252 | * update that now. | |
4253 | */ | |
4254 | if (object != cur_object && | |
4255 | !insert_cur_object) { | |
4256 | /* | |
4257 | * We're not going to insert | |
4258 | * the decompressed page into | |
4259 | * the object it came from. | |
4260 | * | |
4261 | * We're dealing with a | |
4262 | * copy-on-write fault on | |
4263 | * "object". | |
4264 | * We're going to decompress | |
4265 | * the page directly into the | |
4266 | * target "object" while | |
4267 | * keepin the compressed | |
4268 | * page for "cur_object", so | |
4269 | * no ledger update in that | |
4270 | * case. | |
4271 | */ | |
4272 | } else if ((cur_object->purgable == | |
4273 | VM_PURGABLE_DENY) || | |
4274 | (cur_object->vo_purgeable_owner == | |
4275 | NULL)) { | |
4276 | /* | |
4277 | * "cur_object" is not purgeable | |
4278 | * or is not owned, so no | |
4279 | * purgeable ledgers to update. | |
4280 | */ | |
4281 | } else { | |
4282 | /* | |
4283 | * One less compressed | |
4284 | * purgeable page for | |
4285 | * cur_object's owner. | |
4286 | */ | |
4287 | vm_purgeable_compressed_update( | |
4288 | cur_object, | |
4289 | -1); | |
4290 | } | |
4291 | ||
4292 | if (insert_cur_object) { | |
4293 | vm_page_insert(m, cur_object, cur_offset); | |
4294 | } else { | |
4295 | vm_page_insert(m, object, offset); | |
4296 | } | |
4297 | ||
4298 | if ((m->object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) { | |
4299 | /* | |
4300 | * If the page is not cacheable, | |
4301 | * we can't let its contents | |
4302 | * linger in the data cache | |
4303 | * after the decompression. | |
4304 | */ | |
4305 | pmap_sync_page_attributes_phys(m->phys_page); | |
4306 | } | |
4307 | ||
4308 | type_of_fault = my_fault_type; | |
4309 | ||
4310 | VM_STAT_INCR(decompressions); | |
4311 | ||
4312 | if (cur_object != object) { | |
4313 | if (insert_cur_object) { | |
4314 | top_object = object; | |
4315 | /* | |
4316 | * switch to the object that has the new page | |
4317 | */ | |
4318 | object = cur_object; | |
4319 | object_lock_type = cur_object_lock_type; | |
4320 | } else { | |
4321 | vm_object_unlock(cur_object); | |
4322 | cur_object = object; | |
4323 | } | |
4324 | } | |
4325 | goto FastPmapEnter; | |
4326 | } | |
4327 | /* | |
4328 | * existence map present and indicates | |
4329 | * that the pager doesn't have this page | |
4330 | */ | |
4331 | } | |
4332 | if (cur_object->shadow == VM_OBJECT_NULL) { | |
4333 | /* | |
4334 | * Zero fill fault. Page gets | |
4335 | * inserted into the original object. | |
4336 | */ | |
4337 | if (cur_object->shadow_severed || | |
4338 | VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) | |
4339 | { | |
4340 | if (object != cur_object) | |
4341 | vm_object_unlock(cur_object); | |
4342 | vm_object_unlock(object); | |
4343 | ||
4344 | vm_map_unlock_read(map); | |
4345 | if (real_map != map) | |
4346 | vm_map_unlock(real_map); | |
4347 | ||
4348 | kr = KERN_MEMORY_ERROR; | |
4349 | goto done; | |
4350 | } | |
4351 | if (vm_backing_store_low) { | |
4352 | /* | |
4353 | * we are protecting the system from | |
4354 | * backing store exhaustion... | |
4355 | * must take the slow path if we're | |
4356 | * not privileged | |
4357 | */ | |
4358 | if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) | |
4359 | break; | |
4360 | } | |
4361 | if (cur_object != object) { | |
4362 | vm_object_unlock(cur_object); | |
4363 | ||
4364 | cur_object = object; | |
4365 | } | |
4366 | if (object_lock_type == OBJECT_LOCK_SHARED) { | |
4367 | ||
4368 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
4369 | ||
4370 | if (vm_object_lock_upgrade(object) == FALSE) { | |
4371 | /* | |
4372 | * couldn't upgrade so do a full retry on the fault | |
4373 | * since we dropped the object lock which | |
4374 | * could allow another thread to insert | |
4375 | * a page at this offset | |
4376 | */ | |
4377 | vm_map_unlock_read(map); | |
4378 | if (real_map != map) | |
4379 | vm_map_unlock(real_map); | |
4380 | ||
4381 | goto RetryFault; | |
4382 | } | |
4383 | } | |
4384 | m = vm_page_alloc(object, offset); | |
4385 | ||
4386 | if (m == VM_PAGE_NULL) { | |
4387 | /* | |
4388 | * no free page currently available... | |
4389 | * must take the slow path | |
4390 | */ | |
4391 | break; | |
4392 | } | |
4393 | ||
4394 | /* | |
4395 | * Now zero fill page... | |
4396 | * the page is probably going to | |
4397 | * be written soon, so don't bother | |
4398 | * to clear the modified bit | |
4399 | * | |
4400 | * NOTE: This code holds the map | |
4401 | * lock across the zero fill. | |
4402 | */ | |
4403 | type_of_fault = vm_fault_zero_page(m, map->no_zero_fill); | |
4404 | ||
4405 | goto FastPmapEnter; | |
4406 | } | |
4407 | /* | |
4408 | * On to the next level in the shadow chain | |
4409 | */ | |
4410 | cur_offset += cur_object->vo_shadow_offset; | |
4411 | new_object = cur_object->shadow; | |
4412 | ||
4413 | /* | |
4414 | * take the new_object's lock with the indicated state | |
4415 | */ | |
4416 | if (cur_object_lock_type == OBJECT_LOCK_SHARED) | |
4417 | vm_object_lock_shared(new_object); | |
4418 | else | |
4419 | vm_object_lock(new_object); | |
4420 | ||
4421 | if (cur_object != object) | |
4422 | vm_object_unlock(cur_object); | |
4423 | ||
4424 | cur_object = new_object; | |
4425 | ||
4426 | continue; | |
4427 | } | |
4428 | } | |
4429 | /* | |
4430 | * Cleanup from fast fault failure. Drop any object | |
4431 | * lock other than original and drop map lock. | |
4432 | */ | |
4433 | if (object != cur_object) | |
4434 | vm_object_unlock(cur_object); | |
4435 | ||
4436 | /* | |
4437 | * must own the object lock exclusively at this point | |
4438 | */ | |
4439 | if (object_lock_type == OBJECT_LOCK_SHARED) { | |
4440 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
4441 | ||
4442 | if (vm_object_lock_upgrade(object) == FALSE) { | |
4443 | /* | |
4444 | * couldn't upgrade, so explictly | |
4445 | * take the lock exclusively | |
4446 | * no need to retry the fault at this | |
4447 | * point since "vm_fault_page" will | |
4448 | * completely re-evaluate the state | |
4449 | */ | |
4450 | vm_object_lock(object); | |
4451 | } | |
4452 | } | |
4453 | ||
4454 | handle_copy_delay: | |
4455 | vm_map_unlock_read(map); | |
4456 | if (real_map != map) | |
4457 | vm_map_unlock(real_map); | |
4458 | ||
4459 | /* | |
4460 | * Make a reference to this object to | |
4461 | * prevent its disposal while we are messing with | |
4462 | * it. Once we have the reference, the map is free | |
4463 | * to be diddled. Since objects reference their | |
4464 | * shadows (and copies), they will stay around as well. | |
4465 | */ | |
4466 | vm_object_reference_locked(object); | |
4467 | vm_object_paging_begin(object); | |
4468 | ||
4469 | XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0); | |
4470 | ||
4471 | error_code = 0; | |
4472 | ||
4473 | result_page = VM_PAGE_NULL; | |
4474 | kr = vm_fault_page(object, offset, fault_type, | |
4475 | (change_wiring && !wired), | |
4476 | FALSE, /* page not looked up */ | |
4477 | &prot, &result_page, &top_page, | |
4478 | &type_of_fault, | |
4479 | &error_code, map->no_zero_fill, | |
4480 | FALSE, &fault_info); | |
4481 | ||
4482 | /* | |
4483 | * if kr != VM_FAULT_SUCCESS, then the paging reference | |
4484 | * has been dropped and the object unlocked... the ref_count | |
4485 | * is still held | |
4486 | * | |
4487 | * if kr == VM_FAULT_SUCCESS, then the paging reference | |
4488 | * is still held along with the ref_count on the original object | |
4489 | * | |
4490 | * the object is returned locked with a paging reference | |
4491 | * | |
4492 | * if top_page != NULL, then it's BUSY and the | |
4493 | * object it belongs to has a paging reference | |
4494 | * but is returned unlocked | |
4495 | */ | |
4496 | if (kr != VM_FAULT_SUCCESS && | |
4497 | kr != VM_FAULT_SUCCESS_NO_VM_PAGE) { | |
4498 | /* | |
4499 | * we didn't succeed, lose the object reference immediately. | |
4500 | */ | |
4501 | vm_object_deallocate(object); | |
4502 | ||
4503 | /* | |
4504 | * See why we failed, and take corrective action. | |
4505 | */ | |
4506 | switch (kr) { | |
4507 | case VM_FAULT_MEMORY_SHORTAGE: | |
4508 | if (vm_page_wait((change_wiring) ? | |
4509 | THREAD_UNINT : | |
4510 | THREAD_ABORTSAFE)) | |
4511 | goto RetryFault; | |
4512 | /* | |
4513 | * fall thru | |
4514 | */ | |
4515 | case VM_FAULT_INTERRUPTED: | |
4516 | kr = KERN_ABORTED; | |
4517 | goto done; | |
4518 | case VM_FAULT_RETRY: | |
4519 | goto RetryFault; | |
4520 | case VM_FAULT_MEMORY_ERROR: | |
4521 | if (error_code) | |
4522 | kr = error_code; | |
4523 | else | |
4524 | kr = KERN_MEMORY_ERROR; | |
4525 | goto done; | |
4526 | default: | |
4527 | panic("vm_fault: unexpected error 0x%x from " | |
4528 | "vm_fault_page()\n", kr); | |
4529 | } | |
4530 | } | |
4531 | m = result_page; | |
4532 | ||
4533 | if (m != VM_PAGE_NULL) { | |
4534 | assert((change_wiring && !wired) ? | |
4535 | (top_page == VM_PAGE_NULL) : | |
4536 | ((top_page == VM_PAGE_NULL) == (m->object == object))); | |
4537 | } | |
4538 | ||
4539 | /* | |
4540 | * What to do with the resulting page from vm_fault_page | |
4541 | * if it doesn't get entered into the physical map: | |
4542 | */ | |
4543 | #define RELEASE_PAGE(m) \ | |
4544 | MACRO_BEGIN \ | |
4545 | PAGE_WAKEUP_DONE(m); \ | |
4546 | if (!m->active && !m->inactive && !m->throttled) { \ | |
4547 | vm_page_lockspin_queues(); \ | |
4548 | if (!m->active && !m->inactive && !m->throttled) \ | |
4549 | vm_page_activate(m); \ | |
4550 | vm_page_unlock_queues(); \ | |
4551 | } \ | |
4552 | MACRO_END | |
4553 | ||
4554 | /* | |
4555 | * We must verify that the maps have not changed | |
4556 | * since our last lookup. | |
4557 | */ | |
4558 | if (m != VM_PAGE_NULL) { | |
4559 | old_copy_object = m->object->copy; | |
4560 | vm_object_unlock(m->object); | |
4561 | } else { | |
4562 | old_copy_object = VM_OBJECT_NULL; | |
4563 | vm_object_unlock(object); | |
4564 | } | |
4565 | ||
4566 | /* | |
4567 | * no object locks are held at this point | |
4568 | */ | |
4569 | if ((map != original_map) || !vm_map_verify(map, &version)) { | |
4570 | vm_object_t retry_object; | |
4571 | vm_object_offset_t retry_offset; | |
4572 | vm_prot_t retry_prot; | |
4573 | ||
4574 | /* | |
4575 | * To avoid trying to write_lock the map while another | |
4576 | * thread has it read_locked (in vm_map_pageable), we | |
4577 | * do not try for write permission. If the page is | |
4578 | * still writable, we will get write permission. If it | |
4579 | * is not, or has been marked needs_copy, we enter the | |
4580 | * mapping without write permission, and will merely | |
4581 | * take another fault. | |
4582 | */ | |
4583 | map = original_map; | |
4584 | vm_map_lock_read(map); | |
4585 | ||
4586 | kr = vm_map_lookup_locked(&map, vaddr, | |
4587 | fault_type & ~VM_PROT_WRITE, | |
4588 | OBJECT_LOCK_EXCLUSIVE, &version, | |
4589 | &retry_object, &retry_offset, &retry_prot, | |
4590 | &wired, | |
4591 | &fault_info, | |
4592 | &real_map); | |
4593 | pmap = real_map->pmap; | |
4594 | ||
4595 | if (kr != KERN_SUCCESS) { | |
4596 | vm_map_unlock_read(map); | |
4597 | ||
4598 | if (m != VM_PAGE_NULL) { | |
4599 | /* | |
4600 | * retake the lock so that | |
4601 | * we can drop the paging reference | |
4602 | * in vm_fault_cleanup and do the | |
4603 | * PAGE_WAKEUP_DONE in RELEASE_PAGE | |
4604 | */ | |
4605 | vm_object_lock(m->object); | |
4606 | ||
4607 | RELEASE_PAGE(m); | |
4608 | ||
4609 | vm_fault_cleanup(m->object, top_page); | |
4610 | } else { | |
4611 | /* | |
4612 | * retake the lock so that | |
4613 | * we can drop the paging reference | |
4614 | * in vm_fault_cleanup | |
4615 | */ | |
4616 | vm_object_lock(object); | |
4617 | ||
4618 | vm_fault_cleanup(object, top_page); | |
4619 | } | |
4620 | vm_object_deallocate(object); | |
4621 | ||
4622 | goto done; | |
4623 | } | |
4624 | vm_object_unlock(retry_object); | |
4625 | ||
4626 | if ((retry_object != object) || (retry_offset != offset)) { | |
4627 | ||
4628 | vm_map_unlock_read(map); | |
4629 | if (real_map != map) | |
4630 | vm_map_unlock(real_map); | |
4631 | ||
4632 | if (m != VM_PAGE_NULL) { | |
4633 | /* | |
4634 | * retake the lock so that | |
4635 | * we can drop the paging reference | |
4636 | * in vm_fault_cleanup and do the | |
4637 | * PAGE_WAKEUP_DONE in RELEASE_PAGE | |
4638 | */ | |
4639 | vm_object_lock(m->object); | |
4640 | ||
4641 | RELEASE_PAGE(m); | |
4642 | ||
4643 | vm_fault_cleanup(m->object, top_page); | |
4644 | } else { | |
4645 | /* | |
4646 | * retake the lock so that | |
4647 | * we can drop the paging reference | |
4648 | * in vm_fault_cleanup | |
4649 | */ | |
4650 | vm_object_lock(object); | |
4651 | ||
4652 | vm_fault_cleanup(object, top_page); | |
4653 | } | |
4654 | vm_object_deallocate(object); | |
4655 | ||
4656 | goto RetryFault; | |
4657 | } | |
4658 | /* | |
4659 | * Check whether the protection has changed or the object | |
4660 | * has been copied while we left the map unlocked. | |
4661 | */ | |
4662 | prot &= retry_prot; | |
4663 | } | |
4664 | if (m != VM_PAGE_NULL) { | |
4665 | vm_object_lock(m->object); | |
4666 | ||
4667 | if (m->object->copy != old_copy_object) { | |
4668 | /* | |
4669 | * The copy object changed while the top-level object | |
4670 | * was unlocked, so take away write permission. | |
4671 | */ | |
4672 | prot &= ~VM_PROT_WRITE; | |
4673 | } | |
4674 | } else | |
4675 | vm_object_lock(object); | |
4676 | ||
4677 | /* | |
4678 | * If we want to wire down this page, but no longer have | |
4679 | * adequate permissions, we must start all over. | |
4680 | */ | |
4681 | if (wired && (fault_type != (prot | VM_PROT_WRITE))) { | |
4682 | ||
4683 | vm_map_verify_done(map, &version); | |
4684 | if (real_map != map) | |
4685 | vm_map_unlock(real_map); | |
4686 | ||
4687 | if (m != VM_PAGE_NULL) { | |
4688 | RELEASE_PAGE(m); | |
4689 | ||
4690 | vm_fault_cleanup(m->object, top_page); | |
4691 | } else | |
4692 | vm_fault_cleanup(object, top_page); | |
4693 | ||
4694 | vm_object_deallocate(object); | |
4695 | ||
4696 | goto RetryFault; | |
4697 | } | |
4698 | if (m != VM_PAGE_NULL) { | |
4699 | /* | |
4700 | * Put this page into the physical map. | |
4701 | * We had to do the unlock above because pmap_enter | |
4702 | * may cause other faults. The page may be on | |
4703 | * the pageout queues. If the pageout daemon comes | |
4704 | * across the page, it will remove it from the queues. | |
4705 | */ | |
4706 | if (caller_pmap) { | |
4707 | kr = vm_fault_enter(m, | |
4708 | caller_pmap, | |
4709 | caller_pmap_addr, | |
4710 | prot, | |
4711 | caller_prot, | |
4712 | wired, | |
4713 | change_wiring, | |
4714 | fault_info.no_cache, | |
4715 | fault_info.cs_bypass, | |
4716 | fault_info.user_tag, | |
4717 | fault_info.pmap_options, | |
4718 | NULL, | |
4719 | &type_of_fault); | |
4720 | } else { | |
4721 | kr = vm_fault_enter(m, | |
4722 | pmap, | |
4723 | vaddr, | |
4724 | prot, | |
4725 | caller_prot, | |
4726 | wired, | |
4727 | change_wiring, | |
4728 | fault_info.no_cache, | |
4729 | fault_info.cs_bypass, | |
4730 | fault_info.user_tag, | |
4731 | fault_info.pmap_options, | |
4732 | NULL, | |
4733 | &type_of_fault); | |
4734 | } | |
4735 | if (kr != KERN_SUCCESS) { | |
4736 | /* abort this page fault */ | |
4737 | vm_map_verify_done(map, &version); | |
4738 | if (real_map != map) | |
4739 | vm_map_unlock(real_map); | |
4740 | PAGE_WAKEUP_DONE(m); | |
4741 | vm_fault_cleanup(m->object, top_page); | |
4742 | vm_object_deallocate(object); | |
4743 | goto done; | |
4744 | } | |
4745 | if (physpage_p != NULL) { | |
4746 | /* for vm_map_wire_and_extract() */ | |
4747 | *physpage_p = m->phys_page; | |
4748 | if (prot & VM_PROT_WRITE) { | |
4749 | vm_object_lock_assert_exclusive(m->object); | |
4750 | m->dirty = TRUE; | |
4751 | } | |
4752 | } | |
4753 | } else { | |
4754 | ||
4755 | vm_map_entry_t entry; | |
4756 | vm_map_offset_t laddr; | |
4757 | vm_map_offset_t ldelta, hdelta; | |
4758 | ||
4759 | /* | |
4760 | * do a pmap block mapping from the physical address | |
4761 | * in the object | |
4762 | */ | |
4763 | ||
4764 | #ifdef ppc | |
4765 | /* While we do not worry about execution protection in */ | |
4766 | /* general, certian pages may have instruction execution */ | |
4767 | /* disallowed. We will check here, and if not allowed */ | |
4768 | /* to execute, we return with a protection failure. */ | |
4769 | ||
4770 | if ((fault_type & VM_PROT_EXECUTE) && | |
4771 | (!pmap_eligible_for_execute((ppnum_t)(object->vo_shadow_offset >> 12)))) { | |
4772 | ||
4773 | vm_map_verify_done(map, &version); | |
4774 | ||
4775 | if (real_map != map) | |
4776 | vm_map_unlock(real_map); | |
4777 | ||
4778 | vm_fault_cleanup(object, top_page); | |
4779 | vm_object_deallocate(object); | |
4780 | ||
4781 | kr = KERN_PROTECTION_FAILURE; | |
4782 | goto done; | |
4783 | } | |
4784 | #endif /* ppc */ | |
4785 | ||
4786 | if (real_map != map) | |
4787 | vm_map_unlock(real_map); | |
4788 | ||
4789 | if (original_map != map) { | |
4790 | vm_map_unlock_read(map); | |
4791 | vm_map_lock_read(original_map); | |
4792 | map = original_map; | |
4793 | } | |
4794 | real_map = map; | |
4795 | ||
4796 | laddr = vaddr; | |
4797 | hdelta = 0xFFFFF000; | |
4798 | ldelta = 0xFFFFF000; | |
4799 | ||
4800 | while (vm_map_lookup_entry(map, laddr, &entry)) { | |
4801 | if (ldelta > (laddr - entry->vme_start)) | |
4802 | ldelta = laddr - entry->vme_start; | |
4803 | if (hdelta > (entry->vme_end - laddr)) | |
4804 | hdelta = entry->vme_end - laddr; | |
4805 | if (entry->is_sub_map) { | |
4806 | ||
4807 | laddr = ((laddr - entry->vme_start) | |
4808 | + VME_OFFSET(entry)); | |
4809 | vm_map_lock_read(VME_SUBMAP(entry)); | |
4810 | ||
4811 | if (map != real_map) | |
4812 | vm_map_unlock_read(map); | |
4813 | if (entry->use_pmap) { | |
4814 | vm_map_unlock_read(real_map); | |
4815 | real_map = VME_SUBMAP(entry); | |
4816 | } | |
4817 | map = VME_SUBMAP(entry); | |
4818 | ||
4819 | } else { | |
4820 | break; | |
4821 | } | |
4822 | } | |
4823 | ||
4824 | if (vm_map_lookup_entry(map, laddr, &entry) && | |
4825 | (VME_OBJECT(entry) != NULL) && | |
4826 | (VME_OBJECT(entry) == object)) { | |
4827 | int superpage; | |
4828 | ||
4829 | if (!object->pager_created && | |
4830 | object->phys_contiguous) { | |
4831 | superpage = VM_MEM_SUPERPAGE; | |
4832 | } else { | |
4833 | superpage = 0; | |
4834 | } | |
4835 | ||
4836 | if (superpage && physpage_p) { | |
4837 | /* for vm_map_wire_and_extract() */ | |
4838 | *physpage_p = (ppnum_t) | |
4839 | ((((vm_map_offset_t) | |
4840 | object->vo_shadow_offset) | |
4841 | + VME_OFFSET(entry) | |
4842 | + (laddr - entry->vme_start)) | |
4843 | >> PAGE_SHIFT); | |
4844 | } | |
4845 | ||
4846 | if (caller_pmap) { | |
4847 | /* | |
4848 | * Set up a block mapped area | |
4849 | */ | |
4850 | assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT)); | |
4851 | pmap_map_block(caller_pmap, | |
4852 | (addr64_t)(caller_pmap_addr - ldelta), | |
4853 | (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) + | |
4854 | VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), | |
4855 | (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, | |
4856 | (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); | |
4857 | } else { | |
4858 | /* | |
4859 | * Set up a block mapped area | |
4860 | */ | |
4861 | assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT)); | |
4862 | pmap_map_block(real_map->pmap, | |
4863 | (addr64_t)(vaddr - ldelta), | |
4864 | (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) + | |
4865 | VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), | |
4866 | (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, | |
4867 | (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); | |
4868 | } | |
4869 | } | |
4870 | } | |
4871 | ||
4872 | /* | |
4873 | * Unlock everything, and return | |
4874 | */ | |
4875 | vm_map_verify_done(map, &version); | |
4876 | if (real_map != map) | |
4877 | vm_map_unlock(real_map); | |
4878 | ||
4879 | if (m != VM_PAGE_NULL) { | |
4880 | PAGE_WAKEUP_DONE(m); | |
4881 | ||
4882 | vm_fault_cleanup(m->object, top_page); | |
4883 | } else | |
4884 | vm_fault_cleanup(object, top_page); | |
4885 | ||
4886 | vm_object_deallocate(object); | |
4887 | ||
4888 | #undef RELEASE_PAGE | |
4889 | ||
4890 | kr = KERN_SUCCESS; | |
4891 | done: | |
4892 | thread_interrupt_level(interruptible_state); | |
4893 | ||
4894 | /* | |
4895 | * Only I/O throttle on faults which cause a pagein/swapin. | |
4896 | */ | |
4897 | if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) { | |
4898 | throttle_lowpri_io(1); | |
4899 | } else { | |
4900 | if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) { | |
4901 | ||
4902 | if ((throttle_delay = vm_page_throttled(TRUE))) { | |
4903 | ||
4904 | if (vm_debug_events) { | |
4905 | if (type_of_fault == DBG_COMPRESSOR_FAULT) | |
4906 | VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); | |
4907 | else if (type_of_fault == DBG_COW_FAULT) | |
4908 | VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); | |
4909 | else | |
4910 | VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); | |
4911 | } | |
4912 | delay(throttle_delay); | |
4913 | } | |
4914 | } | |
4915 | } | |
4916 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
4917 | (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, | |
4918 | ((uint64_t)vaddr >> 32), | |
4919 | vaddr, | |
4920 | kr, | |
4921 | type_of_fault, | |
4922 | 0); | |
4923 | ||
4924 | return (kr); | |
4925 | } | |
4926 | ||
4927 | /* | |
4928 | * vm_fault_wire: | |
4929 | * | |
4930 | * Wire down a range of virtual addresses in a map. | |
4931 | */ | |
4932 | kern_return_t | |
4933 | vm_fault_wire( | |
4934 | vm_map_t map, | |
4935 | vm_map_entry_t entry, | |
4936 | vm_prot_t prot, | |
4937 | pmap_t pmap, | |
4938 | vm_map_offset_t pmap_addr, | |
4939 | ppnum_t *physpage_p) | |
4940 | { | |
4941 | ||
4942 | register vm_map_offset_t va; | |
4943 | register vm_map_offset_t end_addr = entry->vme_end; | |
4944 | register kern_return_t rc; | |
4945 | ||
4946 | assert(entry->in_transition); | |
4947 | ||
4948 | if ((VME_OBJECT(entry) != NULL) && | |
4949 | !entry->is_sub_map && | |
4950 | VME_OBJECT(entry)->phys_contiguous) { | |
4951 | return KERN_SUCCESS; | |
4952 | } | |
4953 | ||
4954 | /* | |
4955 | * Inform the physical mapping system that the | |
4956 | * range of addresses may not fault, so that | |
4957 | * page tables and such can be locked down as well. | |
4958 | */ | |
4959 | ||
4960 | pmap_pageable(pmap, pmap_addr, | |
4961 | pmap_addr + (end_addr - entry->vme_start), FALSE); | |
4962 | ||
4963 | /* | |
4964 | * We simulate a fault to get the page and enter it | |
4965 | * in the physical map. | |
4966 | */ | |
4967 | ||
4968 | for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { | |
4969 | rc = vm_fault_wire_fast(map, va, prot, entry, pmap, | |
4970 | pmap_addr + (va - entry->vme_start), | |
4971 | physpage_p); | |
4972 | if (rc != KERN_SUCCESS) { | |
4973 | rc = vm_fault_internal(map, va, prot, TRUE, | |
4974 | ((pmap == kernel_pmap) | |
4975 | ? THREAD_UNINT | |
4976 | : THREAD_ABORTSAFE), | |
4977 | pmap, | |
4978 | (pmap_addr + | |
4979 | (va - entry->vme_start)), | |
4980 | physpage_p); | |
4981 | DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL); | |
4982 | } | |
4983 | ||
4984 | if (rc != KERN_SUCCESS) { | |
4985 | struct vm_map_entry tmp_entry = *entry; | |
4986 | ||
4987 | /* unwire wired pages */ | |
4988 | tmp_entry.vme_end = va; | |
4989 | vm_fault_unwire(map, | |
4990 | &tmp_entry, FALSE, pmap, pmap_addr); | |
4991 | ||
4992 | return rc; | |
4993 | } | |
4994 | } | |
4995 | return KERN_SUCCESS; | |
4996 | } | |
4997 | ||
4998 | /* | |
4999 | * vm_fault_unwire: | |
5000 | * | |
5001 | * Unwire a range of virtual addresses in a map. | |
5002 | */ | |
5003 | void | |
5004 | vm_fault_unwire( | |
5005 | vm_map_t map, | |
5006 | vm_map_entry_t entry, | |
5007 | boolean_t deallocate, | |
5008 | pmap_t pmap, | |
5009 | vm_map_offset_t pmap_addr) | |
5010 | { | |
5011 | register vm_map_offset_t va; | |
5012 | register vm_map_offset_t end_addr = entry->vme_end; | |
5013 | vm_object_t object; | |
5014 | struct vm_object_fault_info fault_info; | |
5015 | ||
5016 | object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry); | |
5017 | ||
5018 | /* | |
5019 | * If it's marked phys_contiguous, then vm_fault_wire() didn't actually | |
5020 | * do anything since such memory is wired by default. So we don't have | |
5021 | * anything to undo here. | |
5022 | */ | |
5023 | ||
5024 | if (object != VM_OBJECT_NULL && object->phys_contiguous) | |
5025 | return; | |
5026 | ||
5027 | fault_info.interruptible = THREAD_UNINT; | |
5028 | fault_info.behavior = entry->behavior; | |
5029 | fault_info.user_tag = VME_ALIAS(entry); | |
5030 | fault_info.pmap_options = 0; | |
5031 | if (entry->iokit_acct || | |
5032 | (!entry->is_sub_map && !entry->use_pmap)) { | |
5033 | fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT; | |
5034 | } | |
5035 | fault_info.lo_offset = VME_OFFSET(entry); | |
5036 | fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry); | |
5037 | fault_info.no_cache = entry->no_cache; | |
5038 | fault_info.stealth = TRUE; | |
5039 | fault_info.io_sync = FALSE; | |
5040 | fault_info.cs_bypass = FALSE; | |
5041 | fault_info.mark_zf_absent = FALSE; | |
5042 | fault_info.batch_pmap_op = FALSE; | |
5043 | ||
5044 | /* | |
5045 | * Since the pages are wired down, we must be able to | |
5046 | * get their mappings from the physical map system. | |
5047 | */ | |
5048 | ||
5049 | for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { | |
5050 | ||
5051 | if (object == VM_OBJECT_NULL) { | |
5052 | if (pmap) { | |
5053 | pmap_change_wiring(pmap, | |
5054 | pmap_addr + (va - entry->vme_start), FALSE); | |
5055 | } | |
5056 | (void) vm_fault(map, va, VM_PROT_NONE, | |
5057 | TRUE, THREAD_UNINT, pmap, pmap_addr); | |
5058 | } else { | |
5059 | vm_prot_t prot; | |
5060 | vm_page_t result_page; | |
5061 | vm_page_t top_page; | |
5062 | vm_object_t result_object; | |
5063 | vm_fault_return_t result; | |
5064 | ||
5065 | if (end_addr - va > (vm_size_t) -1) { | |
5066 | /* 32-bit overflow */ | |
5067 | fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE); | |
5068 | } else { | |
5069 | fault_info.cluster_size = (vm_size_t) (end_addr - va); | |
5070 | assert(fault_info.cluster_size == end_addr - va); | |
5071 | } | |
5072 | ||
5073 | do { | |
5074 | prot = VM_PROT_NONE; | |
5075 | ||
5076 | vm_object_lock(object); | |
5077 | vm_object_paging_begin(object); | |
5078 | XPR(XPR_VM_FAULT, | |
5079 | "vm_fault_unwire -> vm_fault_page\n", | |
5080 | 0,0,0,0,0); | |
5081 | result_page = VM_PAGE_NULL; | |
5082 | result = vm_fault_page( | |
5083 | object, | |
5084 | (VME_OFFSET(entry) + | |
5085 | (va - entry->vme_start)), | |
5086 | VM_PROT_NONE, TRUE, | |
5087 | FALSE, /* page not looked up */ | |
5088 | &prot, &result_page, &top_page, | |
5089 | (int *)0, | |
5090 | NULL, map->no_zero_fill, | |
5091 | FALSE, &fault_info); | |
5092 | } while (result == VM_FAULT_RETRY); | |
5093 | ||
5094 | /* | |
5095 | * If this was a mapping to a file on a device that has been forcibly | |
5096 | * unmounted, then we won't get a page back from vm_fault_page(). Just | |
5097 | * move on to the next one in case the remaining pages are mapped from | |
5098 | * different objects. During a forced unmount, the object is terminated | |
5099 | * so the alive flag will be false if this happens. A forced unmount will | |
5100 | * will occur when an external disk is unplugged before the user does an | |
5101 | * eject, so we don't want to panic in that situation. | |
5102 | */ | |
5103 | ||
5104 | if (result == VM_FAULT_MEMORY_ERROR && !object->alive) | |
5105 | continue; | |
5106 | ||
5107 | if (result == VM_FAULT_MEMORY_ERROR && | |
5108 | object == kernel_object) { | |
5109 | /* | |
5110 | * This must have been allocated with | |
5111 | * KMA_KOBJECT and KMA_VAONLY and there's | |
5112 | * no physical page at this offset. | |
5113 | * We're done (no page to free). | |
5114 | */ | |
5115 | assert(deallocate); | |
5116 | continue; | |
5117 | } | |
5118 | ||
5119 | if (result != VM_FAULT_SUCCESS) | |
5120 | panic("vm_fault_unwire: failure"); | |
5121 | ||
5122 | result_object = result_page->object; | |
5123 | ||
5124 | if (deallocate) { | |
5125 | assert(result_page->phys_page != | |
5126 | vm_page_fictitious_addr); | |
5127 | pmap_disconnect(result_page->phys_page); | |
5128 | VM_PAGE_FREE(result_page); | |
5129 | } else { | |
5130 | if ((pmap) && (result_page->phys_page != vm_page_guard_addr)) | |
5131 | pmap_change_wiring(pmap, | |
5132 | pmap_addr + (va - entry->vme_start), FALSE); | |
5133 | ||
5134 | ||
5135 | if (VM_PAGE_WIRED(result_page)) { | |
5136 | vm_page_lockspin_queues(); | |
5137 | vm_page_unwire(result_page, TRUE); | |
5138 | vm_page_unlock_queues(); | |
5139 | } | |
5140 | if(entry->zero_wired_pages) { | |
5141 | pmap_zero_page(result_page->phys_page); | |
5142 | entry->zero_wired_pages = FALSE; | |
5143 | } | |
5144 | ||
5145 | PAGE_WAKEUP_DONE(result_page); | |
5146 | } | |
5147 | vm_fault_cleanup(result_object, top_page); | |
5148 | } | |
5149 | } | |
5150 | ||
5151 | /* | |
5152 | * Inform the physical mapping system that the range | |
5153 | * of addresses may fault, so that page tables and | |
5154 | * such may be unwired themselves. | |
5155 | */ | |
5156 | ||
5157 | pmap_pageable(pmap, pmap_addr, | |
5158 | pmap_addr + (end_addr - entry->vme_start), TRUE); | |
5159 | ||
5160 | } | |
5161 | ||
5162 | /* | |
5163 | * vm_fault_wire_fast: | |
5164 | * | |
5165 | * Handle common case of a wire down page fault at the given address. | |
5166 | * If successful, the page is inserted into the associated physical map. | |
5167 | * The map entry is passed in to avoid the overhead of a map lookup. | |
5168 | * | |
5169 | * NOTE: the given address should be truncated to the | |
5170 | * proper page address. | |
5171 | * | |
5172 | * KERN_SUCCESS is returned if the page fault is handled; otherwise, | |
5173 | * a standard error specifying why the fault is fatal is returned. | |
5174 | * | |
5175 | * The map in question must be referenced, and remains so. | |
5176 | * Caller has a read lock on the map. | |
5177 | * | |
5178 | * This is a stripped version of vm_fault() for wiring pages. Anything | |
5179 | * other than the common case will return KERN_FAILURE, and the caller | |
5180 | * is expected to call vm_fault(). | |
5181 | */ | |
5182 | static kern_return_t | |
5183 | vm_fault_wire_fast( | |
5184 | __unused vm_map_t map, | |
5185 | vm_map_offset_t va, | |
5186 | vm_prot_t caller_prot, | |
5187 | vm_map_entry_t entry, | |
5188 | pmap_t pmap, | |
5189 | vm_map_offset_t pmap_addr, | |
5190 | ppnum_t *physpage_p) | |
5191 | { | |
5192 | vm_object_t object; | |
5193 | vm_object_offset_t offset; | |
5194 | register vm_page_t m; | |
5195 | vm_prot_t prot; | |
5196 | thread_t thread = current_thread(); | |
5197 | int type_of_fault; | |
5198 | kern_return_t kr; | |
5199 | ||
5200 | VM_STAT_INCR(faults); | |
5201 | ||
5202 | if (thread != THREAD_NULL && thread->task != TASK_NULL) | |
5203 | thread->task->faults++; | |
5204 | ||
5205 | /* | |
5206 | * Recovery actions | |
5207 | */ | |
5208 | ||
5209 | #undef RELEASE_PAGE | |
5210 | #define RELEASE_PAGE(m) { \ | |
5211 | PAGE_WAKEUP_DONE(m); \ | |
5212 | vm_page_lockspin_queues(); \ | |
5213 | vm_page_unwire(m, TRUE); \ | |
5214 | vm_page_unlock_queues(); \ | |
5215 | } | |
5216 | ||
5217 | ||
5218 | #undef UNLOCK_THINGS | |
5219 | #define UNLOCK_THINGS { \ | |
5220 | vm_object_paging_end(object); \ | |
5221 | vm_object_unlock(object); \ | |
5222 | } | |
5223 | ||
5224 | #undef UNLOCK_AND_DEALLOCATE | |
5225 | #define UNLOCK_AND_DEALLOCATE { \ | |
5226 | UNLOCK_THINGS; \ | |
5227 | vm_object_deallocate(object); \ | |
5228 | } | |
5229 | /* | |
5230 | * Give up and have caller do things the hard way. | |
5231 | */ | |
5232 | ||
5233 | #define GIVE_UP { \ | |
5234 | UNLOCK_AND_DEALLOCATE; \ | |
5235 | return(KERN_FAILURE); \ | |
5236 | } | |
5237 | ||
5238 | ||
5239 | /* | |
5240 | * If this entry is not directly to a vm_object, bail out. | |
5241 | */ | |
5242 | if (entry->is_sub_map) { | |
5243 | assert(physpage_p == NULL); | |
5244 | return(KERN_FAILURE); | |
5245 | } | |
5246 | ||
5247 | /* | |
5248 | * Find the backing store object and offset into it. | |
5249 | */ | |
5250 | ||
5251 | object = VME_OBJECT(entry); | |
5252 | offset = (va - entry->vme_start) + VME_OFFSET(entry); | |
5253 | prot = entry->protection; | |
5254 | ||
5255 | /* | |
5256 | * Make a reference to this object to prevent its | |
5257 | * disposal while we are messing with it. | |
5258 | */ | |
5259 | ||
5260 | vm_object_lock(object); | |
5261 | vm_object_reference_locked(object); | |
5262 | vm_object_paging_begin(object); | |
5263 | ||
5264 | /* | |
5265 | * INVARIANTS (through entire routine): | |
5266 | * | |
5267 | * 1) At all times, we must either have the object | |
5268 | * lock or a busy page in some object to prevent | |
5269 | * some other thread from trying to bring in | |
5270 | * the same page. | |
5271 | * | |
5272 | * 2) Once we have a busy page, we must remove it from | |
5273 | * the pageout queues, so that the pageout daemon | |
5274 | * will not grab it away. | |
5275 | * | |
5276 | */ | |
5277 | ||
5278 | /* | |
5279 | * Look for page in top-level object. If it's not there or | |
5280 | * there's something going on, give up. | |
5281 | * ENCRYPTED SWAP: use the slow fault path, since we'll need to | |
5282 | * decrypt the page before wiring it down. | |
5283 | */ | |
5284 | m = vm_page_lookup(object, offset); | |
5285 | if ((m == VM_PAGE_NULL) || (m->busy) || (m->encrypted) || | |
5286 | (m->unusual && ( m->error || m->restart || m->absent))) { | |
5287 | ||
5288 | GIVE_UP; | |
5289 | } | |
5290 | ASSERT_PAGE_DECRYPTED(m); | |
5291 | ||
5292 | if (m->fictitious && | |
5293 | m->phys_page == vm_page_guard_addr) { | |
5294 | /* | |
5295 | * Guard pages are fictitious pages and are never | |
5296 | * entered into a pmap, so let's say it's been wired... | |
5297 | */ | |
5298 | kr = KERN_SUCCESS; | |
5299 | goto done; | |
5300 | } | |
5301 | ||
5302 | /* | |
5303 | * Wire the page down now. All bail outs beyond this | |
5304 | * point must unwire the page. | |
5305 | */ | |
5306 | ||
5307 | vm_page_lockspin_queues(); | |
5308 | vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE); | |
5309 | vm_page_unlock_queues(); | |
5310 | ||
5311 | /* | |
5312 | * Mark page busy for other threads. | |
5313 | */ | |
5314 | assert(!m->busy); | |
5315 | m->busy = TRUE; | |
5316 | assert(!m->absent); | |
5317 | ||
5318 | /* | |
5319 | * Give up if the page is being written and there's a copy object | |
5320 | */ | |
5321 | if ((object->copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) { | |
5322 | RELEASE_PAGE(m); | |
5323 | GIVE_UP; | |
5324 | } | |
5325 | ||
5326 | /* | |
5327 | * Put this page into the physical map. | |
5328 | */ | |
5329 | type_of_fault = DBG_CACHE_HIT_FAULT; | |
5330 | kr = vm_fault_enter(m, | |
5331 | pmap, | |
5332 | pmap_addr, | |
5333 | prot, | |
5334 | prot, | |
5335 | TRUE, | |
5336 | FALSE, | |
5337 | FALSE, | |
5338 | FALSE, | |
5339 | VME_ALIAS(entry), | |
5340 | ((entry->iokit_acct || | |
5341 | (!entry->is_sub_map && !entry->use_pmap)) | |
5342 | ? PMAP_OPTIONS_ALT_ACCT | |
5343 | : 0), | |
5344 | NULL, | |
5345 | &type_of_fault); | |
5346 | ||
5347 | done: | |
5348 | /* | |
5349 | * Unlock everything, and return | |
5350 | */ | |
5351 | ||
5352 | if (physpage_p) { | |
5353 | /* for vm_map_wire_and_extract() */ | |
5354 | if (kr == KERN_SUCCESS) { | |
5355 | *physpage_p = m->phys_page; | |
5356 | if (prot & VM_PROT_WRITE) { | |
5357 | vm_object_lock_assert_exclusive(m->object); | |
5358 | m->dirty = TRUE; | |
5359 | } | |
5360 | } else { | |
5361 | *physpage_p = 0; | |
5362 | } | |
5363 | } | |
5364 | ||
5365 | PAGE_WAKEUP_DONE(m); | |
5366 | UNLOCK_AND_DEALLOCATE; | |
5367 | ||
5368 | return kr; | |
5369 | ||
5370 | } | |
5371 | ||
5372 | /* | |
5373 | * Routine: vm_fault_copy_cleanup | |
5374 | * Purpose: | |
5375 | * Release a page used by vm_fault_copy. | |
5376 | */ | |
5377 | ||
5378 | static void | |
5379 | vm_fault_copy_cleanup( | |
5380 | vm_page_t page, | |
5381 | vm_page_t top_page) | |
5382 | { | |
5383 | vm_object_t object = page->object; | |
5384 | ||
5385 | vm_object_lock(object); | |
5386 | PAGE_WAKEUP_DONE(page); | |
5387 | if (!page->active && !page->inactive && !page->throttled) { | |
5388 | vm_page_lockspin_queues(); | |
5389 | if (!page->active && !page->inactive && !page->throttled) | |
5390 | vm_page_activate(page); | |
5391 | vm_page_unlock_queues(); | |
5392 | } | |
5393 | vm_fault_cleanup(object, top_page); | |
5394 | } | |
5395 | ||
5396 | static void | |
5397 | vm_fault_copy_dst_cleanup( | |
5398 | vm_page_t page) | |
5399 | { | |
5400 | vm_object_t object; | |
5401 | ||
5402 | if (page != VM_PAGE_NULL) { | |
5403 | object = page->object; | |
5404 | vm_object_lock(object); | |
5405 | vm_page_lockspin_queues(); | |
5406 | vm_page_unwire(page, TRUE); | |
5407 | vm_page_unlock_queues(); | |
5408 | vm_object_paging_end(object); | |
5409 | vm_object_unlock(object); | |
5410 | } | |
5411 | } | |
5412 | ||
5413 | /* | |
5414 | * Routine: vm_fault_copy | |
5415 | * | |
5416 | * Purpose: | |
5417 | * Copy pages from one virtual memory object to another -- | |
5418 | * neither the source nor destination pages need be resident. | |
5419 | * | |
5420 | * Before actually copying a page, the version associated with | |
5421 | * the destination address map wil be verified. | |
5422 | * | |
5423 | * In/out conditions: | |
5424 | * The caller must hold a reference, but not a lock, to | |
5425 | * each of the source and destination objects and to the | |
5426 | * destination map. | |
5427 | * | |
5428 | * Results: | |
5429 | * Returns KERN_SUCCESS if no errors were encountered in | |
5430 | * reading or writing the data. Returns KERN_INTERRUPTED if | |
5431 | * the operation was interrupted (only possible if the | |
5432 | * "interruptible" argument is asserted). Other return values | |
5433 | * indicate a permanent error in copying the data. | |
5434 | * | |
5435 | * The actual amount of data copied will be returned in the | |
5436 | * "copy_size" argument. In the event that the destination map | |
5437 | * verification failed, this amount may be less than the amount | |
5438 | * requested. | |
5439 | */ | |
5440 | kern_return_t | |
5441 | vm_fault_copy( | |
5442 | vm_object_t src_object, | |
5443 | vm_object_offset_t src_offset, | |
5444 | vm_map_size_t *copy_size, /* INOUT */ | |
5445 | vm_object_t dst_object, | |
5446 | vm_object_offset_t dst_offset, | |
5447 | vm_map_t dst_map, | |
5448 | vm_map_version_t *dst_version, | |
5449 | int interruptible) | |
5450 | { | |
5451 | vm_page_t result_page; | |
5452 | ||
5453 | vm_page_t src_page; | |
5454 | vm_page_t src_top_page; | |
5455 | vm_prot_t src_prot; | |
5456 | ||
5457 | vm_page_t dst_page; | |
5458 | vm_page_t dst_top_page; | |
5459 | vm_prot_t dst_prot; | |
5460 | ||
5461 | vm_map_size_t amount_left; | |
5462 | vm_object_t old_copy_object; | |
5463 | kern_return_t error = 0; | |
5464 | vm_fault_return_t result; | |
5465 | ||
5466 | vm_map_size_t part_size; | |
5467 | struct vm_object_fault_info fault_info_src; | |
5468 | struct vm_object_fault_info fault_info_dst; | |
5469 | ||
5470 | /* | |
5471 | * In order not to confuse the clustered pageins, align | |
5472 | * the different offsets on a page boundary. | |
5473 | */ | |
5474 | ||
5475 | #define RETURN(x) \ | |
5476 | MACRO_BEGIN \ | |
5477 | *copy_size -= amount_left; \ | |
5478 | MACRO_RETURN(x); \ | |
5479 | MACRO_END | |
5480 | ||
5481 | amount_left = *copy_size; | |
5482 | ||
5483 | fault_info_src.interruptible = interruptible; | |
5484 | fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL; | |
5485 | fault_info_src.user_tag = 0; | |
5486 | fault_info_src.pmap_options = 0; | |
5487 | fault_info_src.lo_offset = vm_object_trunc_page(src_offset); | |
5488 | fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left; | |
5489 | fault_info_src.no_cache = FALSE; | |
5490 | fault_info_src.stealth = TRUE; | |
5491 | fault_info_src.io_sync = FALSE; | |
5492 | fault_info_src.cs_bypass = FALSE; | |
5493 | fault_info_src.mark_zf_absent = FALSE; | |
5494 | fault_info_src.batch_pmap_op = FALSE; | |
5495 | ||
5496 | fault_info_dst.interruptible = interruptible; | |
5497 | fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL; | |
5498 | fault_info_dst.user_tag = 0; | |
5499 | fault_info_dst.pmap_options = 0; | |
5500 | fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset); | |
5501 | fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left; | |
5502 | fault_info_dst.no_cache = FALSE; | |
5503 | fault_info_dst.stealth = TRUE; | |
5504 | fault_info_dst.io_sync = FALSE; | |
5505 | fault_info_dst.cs_bypass = FALSE; | |
5506 | fault_info_dst.mark_zf_absent = FALSE; | |
5507 | fault_info_dst.batch_pmap_op = FALSE; | |
5508 | ||
5509 | do { /* while (amount_left > 0) */ | |
5510 | /* | |
5511 | * There may be a deadlock if both source and destination | |
5512 | * pages are the same. To avoid this deadlock, the copy must | |
5513 | * start by getting the destination page in order to apply | |
5514 | * COW semantics if any. | |
5515 | */ | |
5516 | ||
5517 | RetryDestinationFault: ; | |
5518 | ||
5519 | dst_prot = VM_PROT_WRITE|VM_PROT_READ; | |
5520 | ||
5521 | vm_object_lock(dst_object); | |
5522 | vm_object_paging_begin(dst_object); | |
5523 | ||
5524 | if (amount_left > (vm_size_t) -1) { | |
5525 | /* 32-bit overflow */ | |
5526 | fault_info_dst.cluster_size = (vm_size_t) (0 - PAGE_SIZE); | |
5527 | } else { | |
5528 | fault_info_dst.cluster_size = (vm_size_t) amount_left; | |
5529 | assert(fault_info_dst.cluster_size == amount_left); | |
5530 | } | |
5531 | ||
5532 | XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0); | |
5533 | dst_page = VM_PAGE_NULL; | |
5534 | result = vm_fault_page(dst_object, | |
5535 | vm_object_trunc_page(dst_offset), | |
5536 | VM_PROT_WRITE|VM_PROT_READ, | |
5537 | FALSE, | |
5538 | FALSE, /* page not looked up */ | |
5539 | &dst_prot, &dst_page, &dst_top_page, | |
5540 | (int *)0, | |
5541 | &error, | |
5542 | dst_map->no_zero_fill, | |
5543 | FALSE, &fault_info_dst); | |
5544 | switch (result) { | |
5545 | case VM_FAULT_SUCCESS: | |
5546 | break; | |
5547 | case VM_FAULT_RETRY: | |
5548 | goto RetryDestinationFault; | |
5549 | case VM_FAULT_MEMORY_SHORTAGE: | |
5550 | if (vm_page_wait(interruptible)) | |
5551 | goto RetryDestinationFault; | |
5552 | /* fall thru */ | |
5553 | case VM_FAULT_INTERRUPTED: | |
5554 | RETURN(MACH_SEND_INTERRUPTED); | |
5555 | case VM_FAULT_SUCCESS_NO_VM_PAGE: | |
5556 | /* success but no VM page: fail the copy */ | |
5557 | vm_object_paging_end(dst_object); | |
5558 | vm_object_unlock(dst_object); | |
5559 | /*FALLTHROUGH*/ | |
5560 | case VM_FAULT_MEMORY_ERROR: | |
5561 | if (error) | |
5562 | return (error); | |
5563 | else | |
5564 | return(KERN_MEMORY_ERROR); | |
5565 | default: | |
5566 | panic("vm_fault_copy: unexpected error 0x%x from " | |
5567 | "vm_fault_page()\n", result); | |
5568 | } | |
5569 | assert ((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE); | |
5570 | ||
5571 | old_copy_object = dst_page->object->copy; | |
5572 | ||
5573 | /* | |
5574 | * There exists the possiblity that the source and | |
5575 | * destination page are the same. But we can't | |
5576 | * easily determine that now. If they are the | |
5577 | * same, the call to vm_fault_page() for the | |
5578 | * destination page will deadlock. To prevent this we | |
5579 | * wire the page so we can drop busy without having | |
5580 | * the page daemon steal the page. We clean up the | |
5581 | * top page but keep the paging reference on the object | |
5582 | * holding the dest page so it doesn't go away. | |
5583 | */ | |
5584 | ||
5585 | vm_page_lockspin_queues(); | |
5586 | vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE); | |
5587 | vm_page_unlock_queues(); | |
5588 | PAGE_WAKEUP_DONE(dst_page); | |
5589 | vm_object_unlock(dst_page->object); | |
5590 | ||
5591 | if (dst_top_page != VM_PAGE_NULL) { | |
5592 | vm_object_lock(dst_object); | |
5593 | VM_PAGE_FREE(dst_top_page); | |
5594 | vm_object_paging_end(dst_object); | |
5595 | vm_object_unlock(dst_object); | |
5596 | } | |
5597 | ||
5598 | RetrySourceFault: ; | |
5599 | ||
5600 | if (src_object == VM_OBJECT_NULL) { | |
5601 | /* | |
5602 | * No source object. We will just | |
5603 | * zero-fill the page in dst_object. | |
5604 | */ | |
5605 | src_page = VM_PAGE_NULL; | |
5606 | result_page = VM_PAGE_NULL; | |
5607 | } else { | |
5608 | vm_object_lock(src_object); | |
5609 | src_page = vm_page_lookup(src_object, | |
5610 | vm_object_trunc_page(src_offset)); | |
5611 | if (src_page == dst_page) { | |
5612 | src_prot = dst_prot; | |
5613 | result_page = VM_PAGE_NULL; | |
5614 | } else { | |
5615 | src_prot = VM_PROT_READ; | |
5616 | vm_object_paging_begin(src_object); | |
5617 | ||
5618 | if (amount_left > (vm_size_t) -1) { | |
5619 | /* 32-bit overflow */ | |
5620 | fault_info_src.cluster_size = (vm_size_t) (0 - PAGE_SIZE); | |
5621 | } else { | |
5622 | fault_info_src.cluster_size = (vm_size_t) amount_left; | |
5623 | assert(fault_info_src.cluster_size == amount_left); | |
5624 | } | |
5625 | ||
5626 | XPR(XPR_VM_FAULT, | |
5627 | "vm_fault_copy(2) -> vm_fault_page\n", | |
5628 | 0,0,0,0,0); | |
5629 | result_page = VM_PAGE_NULL; | |
5630 | result = vm_fault_page( | |
5631 | src_object, | |
5632 | vm_object_trunc_page(src_offset), | |
5633 | VM_PROT_READ, FALSE, | |
5634 | FALSE, /* page not looked up */ | |
5635 | &src_prot, | |
5636 | &result_page, &src_top_page, | |
5637 | (int *)0, &error, FALSE, | |
5638 | FALSE, &fault_info_src); | |
5639 | ||
5640 | switch (result) { | |
5641 | case VM_FAULT_SUCCESS: | |
5642 | break; | |
5643 | case VM_FAULT_RETRY: | |
5644 | goto RetrySourceFault; | |
5645 | case VM_FAULT_MEMORY_SHORTAGE: | |
5646 | if (vm_page_wait(interruptible)) | |
5647 | goto RetrySourceFault; | |
5648 | /* fall thru */ | |
5649 | case VM_FAULT_INTERRUPTED: | |
5650 | vm_fault_copy_dst_cleanup(dst_page); | |
5651 | RETURN(MACH_SEND_INTERRUPTED); | |
5652 | case VM_FAULT_SUCCESS_NO_VM_PAGE: | |
5653 | /* success but no VM page: fail */ | |
5654 | vm_object_paging_end(src_object); | |
5655 | vm_object_unlock(src_object); | |
5656 | /*FALLTHROUGH*/ | |
5657 | case VM_FAULT_MEMORY_ERROR: | |
5658 | vm_fault_copy_dst_cleanup(dst_page); | |
5659 | if (error) | |
5660 | return (error); | |
5661 | else | |
5662 | return(KERN_MEMORY_ERROR); | |
5663 | default: | |
5664 | panic("vm_fault_copy(2): unexpected " | |
5665 | "error 0x%x from " | |
5666 | "vm_fault_page()\n", result); | |
5667 | } | |
5668 | ||
5669 | ||
5670 | assert((src_top_page == VM_PAGE_NULL) == | |
5671 | (result_page->object == src_object)); | |
5672 | } | |
5673 | assert ((src_prot & VM_PROT_READ) != VM_PROT_NONE); | |
5674 | vm_object_unlock(result_page->object); | |
5675 | } | |
5676 | ||
5677 | if (!vm_map_verify(dst_map, dst_version)) { | |
5678 | if (result_page != VM_PAGE_NULL && src_page != dst_page) | |
5679 | vm_fault_copy_cleanup(result_page, src_top_page); | |
5680 | vm_fault_copy_dst_cleanup(dst_page); | |
5681 | break; | |
5682 | } | |
5683 | ||
5684 | vm_object_lock(dst_page->object); | |
5685 | ||
5686 | if (dst_page->object->copy != old_copy_object) { | |
5687 | vm_object_unlock(dst_page->object); | |
5688 | vm_map_verify_done(dst_map, dst_version); | |
5689 | if (result_page != VM_PAGE_NULL && src_page != dst_page) | |
5690 | vm_fault_copy_cleanup(result_page, src_top_page); | |
5691 | vm_fault_copy_dst_cleanup(dst_page); | |
5692 | break; | |
5693 | } | |
5694 | vm_object_unlock(dst_page->object); | |
5695 | ||
5696 | /* | |
5697 | * Copy the page, and note that it is dirty | |
5698 | * immediately. | |
5699 | */ | |
5700 | ||
5701 | if (!page_aligned(src_offset) || | |
5702 | !page_aligned(dst_offset) || | |
5703 | !page_aligned(amount_left)) { | |
5704 | ||
5705 | vm_object_offset_t src_po, | |
5706 | dst_po; | |
5707 | ||
5708 | src_po = src_offset - vm_object_trunc_page(src_offset); | |
5709 | dst_po = dst_offset - vm_object_trunc_page(dst_offset); | |
5710 | ||
5711 | if (dst_po > src_po) { | |
5712 | part_size = PAGE_SIZE - dst_po; | |
5713 | } else { | |
5714 | part_size = PAGE_SIZE - src_po; | |
5715 | } | |
5716 | if (part_size > (amount_left)){ | |
5717 | part_size = amount_left; | |
5718 | } | |
5719 | ||
5720 | if (result_page == VM_PAGE_NULL) { | |
5721 | assert((vm_offset_t) dst_po == dst_po); | |
5722 | assert((vm_size_t) part_size == part_size); | |
5723 | vm_page_part_zero_fill(dst_page, | |
5724 | (vm_offset_t) dst_po, | |
5725 | (vm_size_t) part_size); | |
5726 | } else { | |
5727 | assert((vm_offset_t) src_po == src_po); | |
5728 | assert((vm_offset_t) dst_po == dst_po); | |
5729 | assert((vm_size_t) part_size == part_size); | |
5730 | vm_page_part_copy(result_page, | |
5731 | (vm_offset_t) src_po, | |
5732 | dst_page, | |
5733 | (vm_offset_t) dst_po, | |
5734 | (vm_size_t)part_size); | |
5735 | if(!dst_page->dirty){ | |
5736 | vm_object_lock(dst_object); | |
5737 | SET_PAGE_DIRTY(dst_page, TRUE); | |
5738 | vm_object_unlock(dst_page->object); | |
5739 | } | |
5740 | ||
5741 | } | |
5742 | } else { | |
5743 | part_size = PAGE_SIZE; | |
5744 | ||
5745 | if (result_page == VM_PAGE_NULL) | |
5746 | vm_page_zero_fill(dst_page); | |
5747 | else{ | |
5748 | vm_object_lock(result_page->object); | |
5749 | vm_page_copy(result_page, dst_page); | |
5750 | vm_object_unlock(result_page->object); | |
5751 | ||
5752 | if(!dst_page->dirty){ | |
5753 | vm_object_lock(dst_object); | |
5754 | SET_PAGE_DIRTY(dst_page, TRUE); | |
5755 | vm_object_unlock(dst_page->object); | |
5756 | } | |
5757 | } | |
5758 | ||
5759 | } | |
5760 | ||
5761 | /* | |
5762 | * Unlock everything, and return | |
5763 | */ | |
5764 | ||
5765 | vm_map_verify_done(dst_map, dst_version); | |
5766 | ||
5767 | if (result_page != VM_PAGE_NULL && src_page != dst_page) | |
5768 | vm_fault_copy_cleanup(result_page, src_top_page); | |
5769 | vm_fault_copy_dst_cleanup(dst_page); | |
5770 | ||
5771 | amount_left -= part_size; | |
5772 | src_offset += part_size; | |
5773 | dst_offset += part_size; | |
5774 | } while (amount_left > 0); | |
5775 | ||
5776 | RETURN(KERN_SUCCESS); | |
5777 | #undef RETURN | |
5778 | ||
5779 | /*NOTREACHED*/ | |
5780 | } | |
5781 | ||
5782 | #if VM_FAULT_CLASSIFY | |
5783 | /* | |
5784 | * Temporary statistics gathering support. | |
5785 | */ | |
5786 | ||
5787 | /* | |
5788 | * Statistics arrays: | |
5789 | */ | |
5790 | #define VM_FAULT_TYPES_MAX 5 | |
5791 | #define VM_FAULT_LEVEL_MAX 8 | |
5792 | ||
5793 | int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX]; | |
5794 | ||
5795 | #define VM_FAULT_TYPE_ZERO_FILL 0 | |
5796 | #define VM_FAULT_TYPE_MAP_IN 1 | |
5797 | #define VM_FAULT_TYPE_PAGER 2 | |
5798 | #define VM_FAULT_TYPE_COPY 3 | |
5799 | #define VM_FAULT_TYPE_OTHER 4 | |
5800 | ||
5801 | ||
5802 | void | |
5803 | vm_fault_classify(vm_object_t object, | |
5804 | vm_object_offset_t offset, | |
5805 | vm_prot_t fault_type) | |
5806 | { | |
5807 | int type, level = 0; | |
5808 | vm_page_t m; | |
5809 | ||
5810 | while (TRUE) { | |
5811 | m = vm_page_lookup(object, offset); | |
5812 | if (m != VM_PAGE_NULL) { | |
5813 | if (m->busy || m->error || m->restart || m->absent) { | |
5814 | type = VM_FAULT_TYPE_OTHER; | |
5815 | break; | |
5816 | } | |
5817 | if (((fault_type & VM_PROT_WRITE) == 0) || | |
5818 | ((level == 0) && object->copy == VM_OBJECT_NULL)) { | |
5819 | type = VM_FAULT_TYPE_MAP_IN; | |
5820 | break; | |
5821 | } | |
5822 | type = VM_FAULT_TYPE_COPY; | |
5823 | break; | |
5824 | } | |
5825 | else { | |
5826 | if (object->pager_created) { | |
5827 | type = VM_FAULT_TYPE_PAGER; | |
5828 | break; | |
5829 | } | |
5830 | if (object->shadow == VM_OBJECT_NULL) { | |
5831 | type = VM_FAULT_TYPE_ZERO_FILL; | |
5832 | break; | |
5833 | } | |
5834 | ||
5835 | offset += object->vo_shadow_offset; | |
5836 | object = object->shadow; | |
5837 | level++; | |
5838 | continue; | |
5839 | } | |
5840 | } | |
5841 | ||
5842 | if (level > VM_FAULT_LEVEL_MAX) | |
5843 | level = VM_FAULT_LEVEL_MAX; | |
5844 | ||
5845 | vm_fault_stats[type][level] += 1; | |
5846 | ||
5847 | return; | |
5848 | } | |
5849 | ||
5850 | /* cleanup routine to call from debugger */ | |
5851 | ||
5852 | void | |
5853 | vm_fault_classify_init(void) | |
5854 | { | |
5855 | int type, level; | |
5856 | ||
5857 | for (type = 0; type < VM_FAULT_TYPES_MAX; type++) { | |
5858 | for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) { | |
5859 | vm_fault_stats[type][level] = 0; | |
5860 | } | |
5861 | } | |
5862 | ||
5863 | return; | |
5864 | } | |
5865 | #endif /* VM_FAULT_CLASSIFY */ | |
5866 | ||
5867 | vm_offset_t | |
5868 | kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, uint32_t *fault_results) | |
5869 | { | |
5870 | #pragma unused(map, cur_target_addr, fault_results) | |
5871 | ||
5872 | return 0; | |
5873 | #if 0 | |
5874 | vm_map_entry_t entry; | |
5875 | vm_object_t object; | |
5876 | vm_offset_t object_offset; | |
5877 | vm_page_t m; | |
5878 | int compressor_external_state, compressed_count_delta; | |
5879 | int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP); | |
5880 | int my_fault_type = VM_PROT_READ; | |
5881 | kern_return_t kr; | |
5882 | ||
5883 | ||
5884 | if (not_in_kdp) { | |
5885 | panic("kdp_lightweight_fault called from outside of debugger context"); | |
5886 | } | |
5887 | ||
5888 | assert(map != VM_MAP_NULL); | |
5889 | ||
5890 | assert((cur_target_addr & PAGE_MASK) == 0); | |
5891 | if ((cur_target_addr & PAGE_MASK) != 0) { | |
5892 | return 0; | |
5893 | } | |
5894 | ||
5895 | if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) { | |
5896 | return 0; | |
5897 | } | |
5898 | ||
5899 | if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) { | |
5900 | return 0; | |
5901 | } | |
5902 | ||
5903 | if (entry->is_sub_map) { | |
5904 | return 0; | |
5905 | } | |
5906 | ||
5907 | object = VME_OBJECT(entry); | |
5908 | if (object == VM_OBJECT_NULL) { | |
5909 | return 0; | |
5910 | } | |
5911 | ||
5912 | object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry); | |
5913 | ||
5914 | while (TRUE) { | |
5915 | if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) { | |
5916 | return 0; | |
5917 | } | |
5918 | ||
5919 | if (object->pager_created && (object->paging_in_progress || | |
5920 | object->activity_in_progress)) { | |
5921 | return 0; | |
5922 | } | |
5923 | ||
5924 | m = kdp_vm_page_lookup(object, object_offset); | |
5925 | ||
5926 | if (m != VM_PAGE_NULL) { | |
5927 | ||
5928 | if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) { | |
5929 | return 0; | |
5930 | } | |
5931 | ||
5932 | if (m->laundry || m->busy || m->pageout || m->absent || m->error || m->cleaning || | |
5933 | m->overwriting || m->restart || m->unusual) { | |
5934 | return 0; | |
5935 | } | |
5936 | ||
5937 | assert(!m->private); | |
5938 | if (m->private) { | |
5939 | return 0; | |
5940 | } | |
5941 | ||
5942 | assert(!m->fictitious); | |
5943 | if (m->fictitious) { | |
5944 | return 0; | |
5945 | } | |
5946 | ||
5947 | assert(!m->encrypted); | |
5948 | if (m->encrypted) { | |
5949 | return 0; | |
5950 | } | |
5951 | ||
5952 | assert(!m->encrypted_cleaning); | |
5953 | if (m->encrypted_cleaning) { | |
5954 | return 0; | |
5955 | } | |
5956 | ||
5957 | assert(!m->compressor); | |
5958 | if (m->compressor) { | |
5959 | return 0; | |
5960 | } | |
5961 | ||
5962 | if (fault_results) { | |
5963 | *fault_results |= kThreadFaultedBT; | |
5964 | } | |
5965 | return ptoa(m->phys_page); | |
5966 | } | |
5967 | ||
5968 | compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; | |
5969 | ||
5970 | if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) { | |
5971 | if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) { | |
5972 | kr = vm_compressor_pager_get(object->pager, (object_offset + object->paging_offset), | |
5973 | kdp_compressor_decompressed_page_ppnum, &my_fault_type, | |
5974 | compressor_flags, &compressed_count_delta); | |
5975 | if (kr == KERN_SUCCESS) { | |
5976 | if (fault_results) { | |
5977 | *fault_results |= kThreadDecompressedBT; | |
5978 | } | |
5979 | return kdp_compressor_decompressed_page_paddr; | |
5980 | } else { | |
5981 | return 0; | |
5982 | } | |
5983 | } | |
5984 | } | |
5985 | ||
5986 | if (object->shadow == VM_OBJECT_NULL) { | |
5987 | return 0; | |
5988 | } | |
5989 | ||
5990 | object_offset += object->vo_shadow_offset; | |
5991 | object = object->shadow; | |
5992 | } | |
5993 | #endif /* 0 */ | |
5994 | } | |
5995 | ||
5996 | ||
5997 | #define CODE_SIGNING_CHUNK_SIZE 4096 | |
5998 | void | |
5999 | vm_page_validate_cs_mapped( | |
6000 | vm_page_t page, | |
6001 | const void *kaddr) | |
6002 | { | |
6003 | vm_object_t object; | |
6004 | vm_object_offset_t offset, offset_in_page; | |
6005 | kern_return_t kr; | |
6006 | memory_object_t pager; | |
6007 | void *blobs; | |
6008 | boolean_t validated; | |
6009 | unsigned tainted; | |
6010 | int num_chunks, num_chunks_validated; | |
6011 | ||
6012 | assert(page->busy); | |
6013 | vm_object_lock_assert_exclusive(page->object); | |
6014 | ||
6015 | if (!cs_validation) { | |
6016 | return; | |
6017 | } | |
6018 | ||
6019 | if (page->wpmapped && !page->cs_tainted) { | |
6020 | /* | |
6021 | * This page was mapped for "write" access sometime in the | |
6022 | * past and could still be modifiable in the future. | |
6023 | * Consider it tainted. | |
6024 | * [ If the page was already found to be "tainted", no | |
6025 | * need to re-validate. ] | |
6026 | */ | |
6027 | page->cs_validated = TRUE; | |
6028 | page->cs_tainted = TRUE; | |
6029 | if (cs_debug) { | |
6030 | printf("CODESIGNING: vm_page_validate_cs: " | |
6031 | "page %p obj %p off 0x%llx " | |
6032 | "was modified\n", | |
6033 | page, page->object, page->offset); | |
6034 | } | |
6035 | vm_cs_validated_dirtied++; | |
6036 | } | |
6037 | ||
6038 | if (page->cs_validated || page->cs_tainted) { | |
6039 | return; | |
6040 | } | |
6041 | ||
6042 | vm_cs_validates++; | |
6043 | ||
6044 | object = page->object; | |
6045 | assert(object->code_signed); | |
6046 | offset = page->offset; | |
6047 | ||
6048 | if (!object->alive || object->terminating || object->pager == NULL) { | |
6049 | /* | |
6050 | * The object is terminating and we don't have its pager | |
6051 | * so we can't validate the data... | |
6052 | */ | |
6053 | return; | |
6054 | } | |
6055 | /* | |
6056 | * Since we get here to validate a page that was brought in by | |
6057 | * the pager, we know that this pager is all setup and ready | |
6058 | * by now. | |
6059 | */ | |
6060 | assert(!object->internal); | |
6061 | assert(object->pager != NULL); | |
6062 | assert(object->pager_ready); | |
6063 | ||
6064 | pager = object->pager; | |
6065 | assert(object->paging_in_progress); | |
6066 | kr = vnode_pager_get_object_cs_blobs(pager, &blobs); | |
6067 | if (kr != KERN_SUCCESS) { | |
6068 | blobs = NULL; | |
6069 | } | |
6070 | ||
6071 | /* verify the SHA1 hash for this page */ | |
6072 | num_chunks_validated = 0; | |
6073 | for (offset_in_page = 0, num_chunks = 0; | |
6074 | offset_in_page < PAGE_SIZE_64; | |
6075 | offset_in_page += CODE_SIGNING_CHUNK_SIZE, num_chunks++) { | |
6076 | tainted = 0; | |
6077 | validated = cs_validate_page(blobs, | |
6078 | pager, | |
6079 | (object->paging_offset + | |
6080 | offset + | |
6081 | offset_in_page), | |
6082 | (const void *)((const char *)kaddr | |
6083 | + offset_in_page), | |
6084 | &tainted); | |
6085 | if (validated) { | |
6086 | num_chunks_validated++; | |
6087 | } | |
6088 | if (tainted & CS_VALIDATE_TAINTED) { | |
6089 | page->cs_tainted = TRUE; | |
6090 | } | |
6091 | if (tainted & CS_VALIDATE_NX) { | |
6092 | page->cs_nx = TRUE; | |
6093 | } | |
6094 | } | |
6095 | /* page is validated only if all its chunks are */ | |
6096 | if (num_chunks_validated == num_chunks) { | |
6097 | page->cs_validated = TRUE; | |
6098 | } | |
6099 | } | |
6100 | ||
6101 | void | |
6102 | vm_page_validate_cs( | |
6103 | vm_page_t page) | |
6104 | { | |
6105 | vm_object_t object; | |
6106 | vm_object_offset_t offset; | |
6107 | vm_map_offset_t koffset; | |
6108 | vm_map_size_t ksize; | |
6109 | vm_offset_t kaddr; | |
6110 | kern_return_t kr; | |
6111 | boolean_t busy_page; | |
6112 | boolean_t need_unmap; | |
6113 | ||
6114 | vm_object_lock_assert_held(page->object); | |
6115 | ||
6116 | if (!cs_validation) { | |
6117 | return; | |
6118 | } | |
6119 | ||
6120 | if (page->wpmapped && !page->cs_tainted) { | |
6121 | vm_object_lock_assert_exclusive(page->object); | |
6122 | ||
6123 | /* | |
6124 | * This page was mapped for "write" access sometime in the | |
6125 | * past and could still be modifiable in the future. | |
6126 | * Consider it tainted. | |
6127 | * [ If the page was already found to be "tainted", no | |
6128 | * need to re-validate. ] | |
6129 | */ | |
6130 | page->cs_validated = TRUE; | |
6131 | page->cs_tainted = TRUE; | |
6132 | if (cs_debug) { | |
6133 | printf("CODESIGNING: vm_page_validate_cs: " | |
6134 | "page %p obj %p off 0x%llx " | |
6135 | "was modified\n", | |
6136 | page, page->object, page->offset); | |
6137 | } | |
6138 | vm_cs_validated_dirtied++; | |
6139 | } | |
6140 | ||
6141 | if (page->cs_validated || page->cs_tainted) { | |
6142 | return; | |
6143 | } | |
6144 | ||
6145 | if (page->slid) { | |
6146 | panic("vm_page_validate_cs(%p): page is slid\n", page); | |
6147 | } | |
6148 | assert(!page->slid); | |
6149 | ||
6150 | #if CHECK_CS_VALIDATION_BITMAP | |
6151 | if ( vnode_pager_cs_check_validation_bitmap( page->object->pager, trunc_page(page->offset + page->object->paging_offset), CS_BITMAP_CHECK ) == KERN_SUCCESS) { | |
6152 | page->cs_validated = TRUE; | |
6153 | page->cs_tainted = FALSE; | |
6154 | vm_cs_bitmap_validated++; | |
6155 | return; | |
6156 | } | |
6157 | #endif | |
6158 | vm_object_lock_assert_exclusive(page->object); | |
6159 | ||
6160 | object = page->object; | |
6161 | assert(object->code_signed); | |
6162 | offset = page->offset; | |
6163 | ||
6164 | busy_page = page->busy; | |
6165 | if (!busy_page) { | |
6166 | /* keep page busy while we map (and unlock) the VM object */ | |
6167 | page->busy = TRUE; | |
6168 | } | |
6169 | ||
6170 | /* | |
6171 | * Take a paging reference on the VM object | |
6172 | * to protect it from collapse or bypass, | |
6173 | * and keep it from disappearing too. | |
6174 | */ | |
6175 | vm_object_paging_begin(object); | |
6176 | ||
6177 | /* map the page in the kernel address space */ | |
6178 | ksize = PAGE_SIZE_64; | |
6179 | koffset = 0; | |
6180 | need_unmap = FALSE; | |
6181 | kr = vm_paging_map_object(page, | |
6182 | object, | |
6183 | offset, | |
6184 | VM_PROT_READ, | |
6185 | FALSE, /* can't unlock object ! */ | |
6186 | &ksize, | |
6187 | &koffset, | |
6188 | &need_unmap); | |
6189 | if (kr != KERN_SUCCESS) { | |
6190 | panic("vm_page_validate_cs: could not map page: 0x%x\n", kr); | |
6191 | } | |
6192 | kaddr = CAST_DOWN(vm_offset_t, koffset); | |
6193 | ||
6194 | /* validate the mapped page */ | |
6195 | vm_page_validate_cs_mapped(page, (const void *) kaddr); | |
6196 | ||
6197 | #if CHECK_CS_VALIDATION_BITMAP | |
6198 | if ( page->cs_validated == TRUE && page->cs_tainted == FALSE ) { | |
6199 | vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page( offset + object->paging_offset), CS_BITMAP_SET ); | |
6200 | } | |
6201 | #endif | |
6202 | assert(page->busy); | |
6203 | assert(object == page->object); | |
6204 | vm_object_lock_assert_exclusive(object); | |
6205 | ||
6206 | if (!busy_page) { | |
6207 | PAGE_WAKEUP_DONE(page); | |
6208 | } | |
6209 | if (need_unmap) { | |
6210 | /* unmap the map from the kernel address space */ | |
6211 | vm_paging_unmap_object(object, koffset, koffset + ksize); | |
6212 | koffset = 0; | |
6213 | ksize = 0; | |
6214 | kaddr = 0; | |
6215 | } | |
6216 | vm_object_paging_end(object); | |
6217 | } | |
6218 | ||
6219 | void | |
6220 | vm_page_validate_cs_mapped_chunk( | |
6221 | vm_page_t page, | |
6222 | const void *kaddr, | |
6223 | vm_offset_t chunk_offset, | |
6224 | boolean_t *validated_p, | |
6225 | unsigned *tainted_p) | |
6226 | { | |
6227 | vm_object_t object; | |
6228 | vm_object_offset_t offset, offset_in_page; | |
6229 | kern_return_t kr; | |
6230 | memory_object_t pager; | |
6231 | void *blobs; | |
6232 | boolean_t validated; | |
6233 | unsigned tainted; | |
6234 | ||
6235 | *validated_p = FALSE; | |
6236 | *tainted_p = 0; | |
6237 | ||
6238 | assert(page->busy); | |
6239 | vm_object_lock_assert_exclusive(page->object); | |
6240 | ||
6241 | if (!cs_validation) { | |
6242 | return; | |
6243 | } | |
6244 | ||
6245 | object = page->object; | |
6246 | assert(object->code_signed); | |
6247 | offset = page->offset; | |
6248 | ||
6249 | if (!object->alive || object->terminating || object->pager == NULL) { | |
6250 | /* | |
6251 | * The object is terminating and we don't have its pager | |
6252 | * so we can't validate the data... | |
6253 | */ | |
6254 | return; | |
6255 | } | |
6256 | /* | |
6257 | * Since we get here to validate a page that was brought in by | |
6258 | * the pager, we know that this pager is all setup and ready | |
6259 | * by now. | |
6260 | */ | |
6261 | assert(!object->internal); | |
6262 | assert(object->pager != NULL); | |
6263 | assert(object->pager_ready); | |
6264 | ||
6265 | pager = object->pager; | |
6266 | assert(object->paging_in_progress); | |
6267 | kr = vnode_pager_get_object_cs_blobs(pager, &blobs); | |
6268 | if (kr != KERN_SUCCESS) { | |
6269 | blobs = NULL; | |
6270 | } | |
6271 | ||
6272 | /* verify the signature for this chunk */ | |
6273 | offset_in_page = chunk_offset; | |
6274 | assert(offset_in_page < PAGE_SIZE); | |
6275 | assert((offset_in_page & (CODE_SIGNING_CHUNK_SIZE-1)) == 0); | |
6276 | ||
6277 | tainted = 0; | |
6278 | validated = cs_validate_page(blobs, | |
6279 | pager, | |
6280 | (object->paging_offset + | |
6281 | offset + | |
6282 | offset_in_page), | |
6283 | (const void *)((const char *)kaddr | |
6284 | + offset_in_page), | |
6285 | &tainted); | |
6286 | if (validated) { | |
6287 | *validated_p = TRUE; | |
6288 | } | |
6289 | if (tainted) { | |
6290 | *tainted_p = tainted; | |
6291 | } | |
6292 | } |