]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
cb323159 | 2 | * Copyright (c) 2000-2019 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
d9a64523 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
d9a64523 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
d9a64523 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
d9a64523 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
d9a64523 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
d9a64523 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
d9a64523 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
d9a64523 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
d9a64523 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
d9a64523 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_pageout.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * The proverbial page-out daemon. | |
64 | */ | |
1c79356b | 65 | |
91447636 A |
66 | #include <stdint.h> |
67 | ||
68 | #include <debug.h> | |
1c79356b A |
69 | #include <mach_pagemap.h> |
70 | #include <mach_cluster_stats.h> | |
1c79356b A |
71 | |
72 | #include <mach/mach_types.h> | |
73 | #include <mach/memory_object.h> | |
74 | #include <mach/memory_object_default.h> | |
0b4e3aa0 | 75 | #include <mach/memory_object_control_server.h> |
1c79356b | 76 | #include <mach/mach_host_server.h> |
91447636 A |
77 | #include <mach/upl.h> |
78 | #include <mach/vm_map.h> | |
1c79356b A |
79 | #include <mach/vm_param.h> |
80 | #include <mach/vm_statistics.h> | |
2d21ac55 | 81 | #include <mach/sdt.h> |
91447636 A |
82 | |
83 | #include <kern/kern_types.h> | |
1c79356b | 84 | #include <kern/counters.h> |
91447636 A |
85 | #include <kern/host_statistics.h> |
86 | #include <kern/machine.h> | |
87 | #include <kern/misc_protos.h> | |
b0d623f7 | 88 | #include <kern/sched.h> |
1c79356b | 89 | #include <kern/thread.h> |
91447636 | 90 | #include <kern/kalloc.h> |
39037602 | 91 | #include <kern/policy_internal.h> |
5ba3f43e | 92 | #include <kern/thread_group.h> |
91447636 A |
93 | |
94 | #include <machine/vm_tuning.h> | |
b0d623f7 | 95 | #include <machine/commpage.h> |
91447636 | 96 | |
1c79356b | 97 | #include <vm/pmap.h> |
39236c6e | 98 | #include <vm/vm_compressor_pager.h> |
55e303ae | 99 | #include <vm/vm_fault.h> |
1c79356b A |
100 | #include <vm/vm_map.h> |
101 | #include <vm/vm_object.h> | |
102 | #include <vm/vm_page.h> | |
103 | #include <vm/vm_pageout.h> | |
91447636 | 104 | #include <vm/vm_protos.h> /* must be last */ |
2d21ac55 A |
105 | #include <vm/memory_object.h> |
106 | #include <vm/vm_purgeable_internal.h> | |
6d2010ae | 107 | #include <vm/vm_shared_region.h> |
39236c6e A |
108 | #include <vm/vm_compressor.h> |
109 | ||
5ba3f43e A |
110 | #include <san/kasan.h> |
111 | ||
fe8ab488 A |
112 | #if CONFIG_PHANTOM_CACHE |
113 | #include <vm/vm_phantom_cache.h> | |
114 | #endif | |
55e303ae | 115 | |
b0d623f7 A |
116 | #if UPL_DEBUG |
117 | #include <libkern/OSDebug.h> | |
118 | #endif | |
91447636 | 119 | |
d9a64523 A |
120 | extern int cs_debug; |
121 | ||
122 | extern void mbuf_drain(boolean_t); | |
fe8ab488 A |
123 | |
124 | #if VM_PRESSURE_EVENTS | |
5ba3f43e | 125 | #if CONFIG_JETSAM |
fe8ab488 A |
126 | extern unsigned int memorystatus_available_pages; |
127 | extern unsigned int memorystatus_available_pages_pressure; | |
128 | extern unsigned int memorystatus_available_pages_critical; | |
5ba3f43e A |
129 | #else /* CONFIG_JETSAM */ |
130 | extern uint64_t memorystatus_available_pages; | |
131 | extern uint64_t memorystatus_available_pages_pressure; | |
132 | extern uint64_t memorystatus_available_pages_critical; | |
133 | #endif /* CONFIG_JETSAM */ | |
134 | ||
fe8ab488 A |
135 | extern unsigned int memorystatus_frozen_count; |
136 | extern unsigned int memorystatus_suspended_count; | |
39236c6e | 137 | extern vm_pressure_level_t memorystatus_vm_pressure_level; |
39236c6e | 138 | |
cb323159 A |
139 | extern lck_mtx_t memorystatus_jetsam_fg_band_lock; |
140 | extern uint32_t memorystatus_jetsam_fg_band_waiters; | |
141 | ||
39236c6e | 142 | void vm_pressure_response(void); |
316670eb | 143 | extern void consider_vm_pressure_events(void); |
fe8ab488 A |
144 | |
145 | #define MEMORYSTATUS_SUSPENDED_THRESHOLD 4 | |
146 | #endif /* VM_PRESSURE_EVENTS */ | |
147 | ||
cb323159 A |
148 | thread_t vm_pageout_scan_thread = THREAD_NULL; |
149 | boolean_t vps_dynamic_priority_enabled = FALSE; | |
91447636 | 150 | |
2d21ac55 | 151 | #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */ |
0a7de745 | 152 | #ifdef CONFIG_EMBEDDED |
5ba3f43e A |
153 | #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024 |
154 | #else | |
2d21ac55 A |
155 | #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096 |
156 | #endif | |
5ba3f43e | 157 | #endif |
91447636 A |
158 | |
159 | #ifndef VM_PAGEOUT_DEADLOCK_RELIEF | |
0a7de745 | 160 | #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */ |
91447636 A |
161 | #endif |
162 | ||
0a7de745 A |
163 | #ifndef VM_PAGE_LAUNDRY_MAX |
164 | #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */ | |
165 | #endif /* VM_PAGEOUT_LAUNDRY_MAX */ | |
1c79356b | 166 | |
0a7de745 A |
167 | #ifndef VM_PAGEOUT_BURST_WAIT |
168 | #define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */ | |
169 | #endif /* VM_PAGEOUT_BURST_WAIT */ | |
1c79356b | 170 | |
0a7de745 A |
171 | #ifndef VM_PAGEOUT_EMPTY_WAIT |
172 | #define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */ | |
173 | #endif /* VM_PAGEOUT_EMPTY_WAIT */ | |
1c79356b | 174 | |
0a7de745 | 175 | #ifndef VM_PAGEOUT_DEADLOCK_WAIT |
d9a64523 | 176 | #define VM_PAGEOUT_DEADLOCK_WAIT 100 /* milliseconds */ |
0a7de745 | 177 | #endif /* VM_PAGEOUT_DEADLOCK_WAIT */ |
91447636 | 178 | |
0a7de745 A |
179 | #ifndef VM_PAGEOUT_IDLE_WAIT |
180 | #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */ | |
181 | #endif /* VM_PAGEOUT_IDLE_WAIT */ | |
91447636 | 182 | |
0a7de745 A |
183 | #ifndef VM_PAGEOUT_SWAP_WAIT |
184 | #define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */ | |
185 | #endif /* VM_PAGEOUT_SWAP_WAIT */ | |
39236c6e | 186 | |
6d2010ae | 187 | |
2d21ac55 | 188 | #ifndef VM_PAGE_SPECULATIVE_TARGET |
d9a64523 | 189 | #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage)) |
2d21ac55 A |
190 | #endif /* VM_PAGE_SPECULATIVE_TARGET */ |
191 | ||
6d2010ae | 192 | |
1c79356b A |
193 | /* |
194 | * To obtain a reasonable LRU approximation, the inactive queue | |
195 | * needs to be large enough to give pages on it a chance to be | |
196 | * referenced a second time. This macro defines the fraction | |
197 | * of active+inactive pages that should be inactive. | |
198 | * The pageout daemon uses it to update vm_page_inactive_target. | |
199 | * | |
200 | * If vm_page_free_count falls below vm_page_free_target and | |
201 | * vm_page_inactive_count is below vm_page_inactive_target, | |
202 | * then the pageout daemon starts running. | |
203 | */ | |
204 | ||
0a7de745 A |
205 | #ifndef VM_PAGE_INACTIVE_TARGET |
206 | #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2) | |
207 | #endif /* VM_PAGE_INACTIVE_TARGET */ | |
1c79356b A |
208 | |
209 | /* | |
210 | * Once the pageout daemon starts running, it keeps going | |
211 | * until vm_page_free_count meets or exceeds vm_page_free_target. | |
212 | */ | |
213 | ||
0a7de745 A |
214 | #ifndef VM_PAGE_FREE_TARGET |
215 | #ifdef CONFIG_EMBEDDED | |
216 | #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100) | |
5ba3f43e | 217 | #else |
0a7de745 | 218 | #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80) |
5ba3f43e | 219 | #endif |
0a7de745 | 220 | #endif /* VM_PAGE_FREE_TARGET */ |
1c79356b | 221 | |
39236c6e | 222 | |
1c79356b A |
223 | /* |
224 | * The pageout daemon always starts running once vm_page_free_count | |
225 | * falls below vm_page_free_min. | |
226 | */ | |
227 | ||
0a7de745 A |
228 | #ifndef VM_PAGE_FREE_MIN |
229 | #ifdef CONFIG_EMBEDDED | |
230 | #define VM_PAGE_FREE_MIN(free) (10 + (free) / 200) | |
5ba3f43e | 231 | #else |
0a7de745 | 232 | #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100) |
5ba3f43e | 233 | #endif |
0a7de745 | 234 | #endif /* VM_PAGE_FREE_MIN */ |
1c79356b | 235 | |
0a7de745 A |
236 | #ifdef CONFIG_EMBEDDED |
237 | #define VM_PAGE_FREE_RESERVED_LIMIT 100 | |
238 | #define VM_PAGE_FREE_MIN_LIMIT 1500 | |
239 | #define VM_PAGE_FREE_TARGET_LIMIT 2000 | |
5ba3f43e | 240 | #else |
0a7de745 A |
241 | #define VM_PAGE_FREE_RESERVED_LIMIT 1700 |
242 | #define VM_PAGE_FREE_MIN_LIMIT 3500 | |
243 | #define VM_PAGE_FREE_TARGET_LIMIT 4000 | |
5ba3f43e | 244 | #endif |
2d21ac55 | 245 | |
1c79356b A |
246 | /* |
247 | * When vm_page_free_count falls below vm_page_free_reserved, | |
248 | * only vm-privileged threads can allocate pages. vm-privilege | |
249 | * allows the pageout daemon and default pager (and any other | |
250 | * associated threads needed for default pageout) to continue | |
251 | * operation by dipping into the reserved pool of pages. | |
252 | */ | |
253 | ||
0a7de745 A |
254 | #ifndef VM_PAGE_FREE_RESERVED |
255 | #define VM_PAGE_FREE_RESERVED(n) \ | |
b0d623f7 | 256 | ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n)) |
0a7de745 | 257 | #endif /* VM_PAGE_FREE_RESERVED */ |
1c79356b | 258 | |
2d21ac55 A |
259 | /* |
260 | * When we dequeue pages from the inactive list, they are | |
261 | * reactivated (ie, put back on the active queue) if referenced. | |
262 | * However, it is possible to starve the free list if other | |
263 | * processors are referencing pages faster than we can turn off | |
264 | * the referenced bit. So we limit the number of reactivations | |
265 | * we will make per call of vm_pageout_scan(). | |
266 | */ | |
267 | #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000 | |
d9a64523 | 268 | |
0a7de745 A |
269 | #ifndef VM_PAGE_REACTIVATE_LIMIT |
270 | #ifdef CONFIG_EMBEDDED | |
271 | #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2) | |
5ba3f43e | 272 | #else |
0a7de745 | 273 | #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX)) |
5ba3f43e | 274 | #endif |
0a7de745 A |
275 | #endif /* VM_PAGE_REACTIVATE_LIMIT */ |
276 | #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000 | |
2d21ac55 | 277 | |
316670eb A |
278 | extern boolean_t hibernate_cleaning_in_progress; |
279 | ||
1c79356b A |
280 | /* |
281 | * Forward declarations for internal routines. | |
282 | */ | |
39236c6e A |
283 | struct cq { |
284 | struct vm_pageout_queue *q; | |
0a7de745 A |
285 | void *current_chead; |
286 | char *scratch_buf; | |
287 | int id; | |
39236c6e | 288 | }; |
3e170ce0 A |
289 | |
290 | struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT]; | |
291 | ||
91447636 | 292 | |
39236c6e A |
293 | #if VM_PRESSURE_EVENTS |
294 | void vm_pressure_thread(void); | |
fe8ab488 A |
295 | |
296 | boolean_t VM_PRESSURE_NORMAL_TO_WARNING(void); | |
297 | boolean_t VM_PRESSURE_WARNING_TO_CRITICAL(void); | |
298 | ||
299 | boolean_t VM_PRESSURE_WARNING_TO_NORMAL(void); | |
300 | boolean_t VM_PRESSURE_CRITICAL_TO_WARNING(void); | |
39236c6e | 301 | #endif |
d9a64523 | 302 | |
5ba3f43e | 303 | void vm_pageout_garbage_collect(int); |
91447636 | 304 | static void vm_pageout_iothread_external(void); |
39236c6e | 305 | static void vm_pageout_iothread_internal(struct cq *cq); |
5ba3f43e | 306 | static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *, boolean_t); |
91447636 | 307 | |
1c79356b A |
308 | extern void vm_pageout_continue(void); |
309 | extern void vm_pageout_scan(void); | |
d9a64523 | 310 | |
5ba3f43e | 311 | void vm_tests(void); /* forward */ |
1c79356b | 312 | |
cb323159 A |
313 | boolean_t vm_pageout_running = FALSE; |
314 | ||
315 | uint32_t vm_page_upl_tainted = 0; | |
316 | uint32_t vm_page_iopl_tainted = 0; | |
317 | ||
5ba3f43e | 318 | #if !CONFIG_EMBEDDED |
4bd07ac2 | 319 | static boolean_t vm_pageout_waiter = FALSE; |
5ba3f43e | 320 | #endif /* !CONFIG_EMBEDDED */ |
4bd07ac2 | 321 | |
3e170ce0 | 322 | |
d9a64523 A |
323 | #if DEVELOPMENT || DEBUG |
324 | struct vm_pageout_debug vm_pageout_debug; | |
325 | #endif | |
326 | struct vm_pageout_vminfo vm_pageout_vminfo; | |
327 | struct vm_pageout_state vm_pageout_state; | |
328 | struct vm_config vm_config; | |
39037602 | 329 | |
0a7de745 A |
330 | struct vm_pageout_queue vm_pageout_queue_internal __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); |
331 | struct vm_pageout_queue vm_pageout_queue_external __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); | |
91447636 | 332 | |
0a7de745 | 333 | int vm_upl_wait_for_pages = 0; |
d9a64523 | 334 | vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL; |
2d21ac55 | 335 | |
0a7de745 | 336 | boolean_t(*volatile consider_buffer_cache_collect)(int) = NULL; |
b0d623f7 | 337 | |
0a7de745 A |
338 | int vm_debug_events = 0; |
339 | ||
340 | lck_grp_t vm_pageout_lck_grp; | |
6d2010ae | 341 | |
316670eb | 342 | #if CONFIG_MEMORYSTATUS |
39236c6e | 343 | extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async); |
39037602 A |
344 | |
345 | uint32_t vm_pageout_memorystatus_fb_factor_nr = 5; | |
346 | uint32_t vm_pageout_memorystatus_fb_factor_dr = 2; | |
39037602 | 347 | |
316670eb | 348 | #endif |
6d2010ae | 349 | |
c6bf4f31 A |
350 | #if __AMP__ |
351 | int vm_compressor_ebound = 1; | |
352 | int vm_pgo_pbound = 0; | |
353 | extern void thread_bind_cluster_type(char); | |
354 | #endif /* __AMP__ */ | |
1c79356b | 355 | |
5ba3f43e | 356 | |
d9a64523 | 357 | /* |
1c79356b A |
358 | * Routine: vm_pageout_object_terminate |
359 | * Purpose: | |
2d21ac55 | 360 | * Destroy the pageout_object, and perform all of the |
1c79356b | 361 | * required cleanup actions. |
d9a64523 | 362 | * |
1c79356b A |
363 | * In/Out conditions: |
364 | * The object must be locked, and will be returned locked. | |
365 | */ | |
366 | void | |
367 | vm_pageout_object_terminate( | |
0a7de745 | 368 | vm_object_t object) |
1c79356b | 369 | { |
0a7de745 | 370 | vm_object_t shadow_object; |
1c79356b A |
371 | |
372 | /* | |
373 | * Deal with the deallocation (last reference) of a pageout object | |
374 | * (used for cleaning-in-place) by dropping the paging references/ | |
375 | * freeing pages in the original object. | |
376 | */ | |
377 | ||
378 | assert(object->pageout); | |
379 | shadow_object = object->shadow; | |
380 | vm_object_lock(shadow_object); | |
381 | ||
39037602 | 382 | while (!vm_page_queue_empty(&object->memq)) { |
0a7de745 A |
383 | vm_page_t p, m; |
384 | vm_object_offset_t offset; | |
1c79356b | 385 | |
39037602 | 386 | p = (vm_page_t) vm_page_queue_first(&object->memq); |
1c79356b | 387 | |
d9a64523 A |
388 | assert(p->vmp_private); |
389 | assert(p->vmp_free_when_done); | |
390 | p->vmp_free_when_done = FALSE; | |
391 | assert(!p->vmp_cleaning); | |
392 | assert(!p->vmp_laundry); | |
1c79356b | 393 | |
d9a64523 | 394 | offset = p->vmp_offset; |
1c79356b A |
395 | VM_PAGE_FREE(p); |
396 | p = VM_PAGE_NULL; | |
397 | ||
398 | m = vm_page_lookup(shadow_object, | |
0a7de745 | 399 | offset + object->vo_shadow_offset); |
1c79356b | 400 | |
0a7de745 | 401 | if (m == VM_PAGE_NULL) { |
1c79356b | 402 | continue; |
0a7de745 | 403 | } |
1c79356b | 404 | |
d9a64523 | 405 | assert((m->vmp_dirty) || (m->vmp_precious) || |
0a7de745 | 406 | (m->vmp_busy && m->vmp_cleaning)); |
1c79356b A |
407 | |
408 | /* | |
409 | * Handle the trusted pager throttle. | |
55e303ae | 410 | * Also decrement the burst throttle (if external). |
1c79356b A |
411 | */ |
412 | vm_page_lock_queues(); | |
0a7de745 | 413 | if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { |
91447636 | 414 | vm_pageout_throttle_up(m); |
0a7de745 | 415 | } |
1c79356b A |
416 | |
417 | /* | |
418 | * Handle the "target" page(s). These pages are to be freed if | |
419 | * successfully cleaned. Target pages are always busy, and are | |
420 | * wired exactly once. The initial target pages are not mapped, | |
421 | * (so cannot be referenced or modified) but converted target | |
422 | * pages may have been modified between the selection as an | |
423 | * adjacent page and conversion to a target. | |
424 | */ | |
d9a64523 A |
425 | if (m->vmp_free_when_done) { |
426 | assert(m->vmp_busy); | |
427 | assert(m->vmp_q_state == VM_PAGE_IS_WIRED); | |
428 | assert(m->vmp_wire_count == 1); | |
429 | m->vmp_cleaning = FALSE; | |
430 | m->vmp_free_when_done = FALSE; | |
1c79356b A |
431 | /* |
432 | * Revoke all access to the page. Since the object is | |
433 | * locked, and the page is busy, this prevents the page | |
91447636 | 434 | * from being dirtied after the pmap_disconnect() call |
1c79356b | 435 | * returns. |
91447636 | 436 | * |
1c79356b A |
437 | * Since the page is left "dirty" but "not modifed", we |
438 | * can detect whether the page was redirtied during | |
439 | * pageout by checking the modify state. | |
440 | */ | |
39037602 | 441 | if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) { |
316670eb A |
442 | SET_PAGE_DIRTY(m, FALSE); |
443 | } else { | |
d9a64523 | 444 | m->vmp_dirty = FALSE; |
316670eb | 445 | } |
1c79356b | 446 | |
d9a64523 | 447 | if (m->vmp_dirty) { |
0a7de745 | 448 | vm_page_unwire(m, TRUE); /* reactivates */ |
2d21ac55 | 449 | VM_STAT_INCR(reactivations); |
1c79356b | 450 | PAGE_WAKEUP_DONE(m); |
1c79356b | 451 | } else { |
0a7de745 | 452 | vm_page_free(m); /* clears busy, etc. */ |
1c79356b A |
453 | } |
454 | vm_page_unlock_queues(); | |
455 | continue; | |
456 | } | |
457 | /* | |
458 | * Handle the "adjacent" pages. These pages were cleaned in | |
459 | * place, and should be left alone. | |
460 | * If prep_pin_count is nonzero, then someone is using the | |
461 | * page, so make it active. | |
462 | */ | |
d9a64523 | 463 | if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) && !m->vmp_private) { |
0a7de745 | 464 | if (m->vmp_reference) { |
1c79356b | 465 | vm_page_activate(m); |
0a7de745 | 466 | } else { |
1c79356b | 467 | vm_page_deactivate(m); |
0a7de745 | 468 | } |
1c79356b | 469 | } |
d9a64523 | 470 | if (m->vmp_overwriting) { |
6d2010ae A |
471 | /* |
472 | * the (COPY_OUT_FROM == FALSE) request_page_list case | |
473 | */ | |
d9a64523 | 474 | if (m->vmp_busy) { |
6d2010ae | 475 | /* |
d9a64523 | 476 | * We do not re-set m->vmp_dirty ! |
6d2010ae A |
477 | * The page was busy so no extraneous activity |
478 | * could have occurred. COPY_INTO is a read into the | |
479 | * new pages. CLEAN_IN_PLACE does actually write | |
480 | * out the pages but handling outside of this code | |
481 | * will take care of resetting dirty. We clear the | |
482 | * modify however for the Programmed I/O case. | |
483 | */ | |
39037602 | 484 | pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m)); |
2d21ac55 | 485 | |
d9a64523 A |
486 | m->vmp_busy = FALSE; |
487 | m->vmp_absent = FALSE; | |
6d2010ae A |
488 | } else { |
489 | /* | |
490 | * alternate (COPY_OUT_FROM == FALSE) request_page_list case | |
491 | * Occurs when the original page was wired | |
492 | * at the time of the list request | |
493 | */ | |
0a7de745 A |
494 | assert(VM_PAGE_WIRED(m)); |
495 | vm_page_unwire(m, TRUE); /* reactivates */ | |
6d2010ae | 496 | } |
d9a64523 | 497 | m->vmp_overwriting = FALSE; |
1c79356b | 498 | } else { |
d9a64523 | 499 | m->vmp_dirty = FALSE; |
1c79356b | 500 | } |
d9a64523 | 501 | m->vmp_cleaning = FALSE; |
1c79356b | 502 | |
1c79356b A |
503 | /* |
504 | * Wakeup any thread waiting for the page to be un-cleaning. | |
505 | */ | |
506 | PAGE_WAKEUP(m); | |
507 | vm_page_unlock_queues(); | |
508 | } | |
509 | /* | |
510 | * Account for the paging reference taken in vm_paging_object_allocate. | |
511 | */ | |
b0d623f7 | 512 | vm_object_activity_end(shadow_object); |
1c79356b A |
513 | vm_object_unlock(shadow_object); |
514 | ||
515 | assert(object->ref_count == 0); | |
516 | assert(object->paging_in_progress == 0); | |
b0d623f7 | 517 | assert(object->activity_in_progress == 0); |
1c79356b A |
518 | assert(object->resident_page_count == 0); |
519 | return; | |
520 | } | |
521 | ||
1c79356b A |
522 | /* |
523 | * Routine: vm_pageclean_setup | |
524 | * | |
525 | * Purpose: setup a page to be cleaned (made non-dirty), but not | |
526 | * necessarily flushed from the VM page cache. | |
527 | * This is accomplished by cleaning in place. | |
528 | * | |
b0d623f7 A |
529 | * The page must not be busy, and new_object |
530 | * must be locked. | |
531 | * | |
1c79356b | 532 | */ |
3e170ce0 | 533 | static void |
1c79356b | 534 | vm_pageclean_setup( |
0a7de745 A |
535 | vm_page_t m, |
536 | vm_page_t new_m, | |
537 | vm_object_t new_object, | |
538 | vm_object_offset_t new_offset) | |
1c79356b | 539 | { |
d9a64523 | 540 | assert(!m->vmp_busy); |
2d21ac55 | 541 | #if 0 |
d9a64523 | 542 | assert(!m->vmp_cleaning); |
2d21ac55 | 543 | #endif |
1c79356b | 544 | |
39037602 | 545 | pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m)); |
1c79356b A |
546 | |
547 | /* | |
548 | * Mark original page as cleaning in place. | |
549 | */ | |
d9a64523 | 550 | m->vmp_cleaning = TRUE; |
316670eb | 551 | SET_PAGE_DIRTY(m, FALSE); |
d9a64523 | 552 | m->vmp_precious = FALSE; |
1c79356b A |
553 | |
554 | /* | |
555 | * Convert the fictitious page to a private shadow of | |
556 | * the real page. | |
557 | */ | |
d9a64523 | 558 | assert(new_m->vmp_fictitious); |
39037602 | 559 | assert(VM_PAGE_GET_PHYS_PAGE(new_m) == vm_page_fictitious_addr); |
d9a64523 A |
560 | new_m->vmp_fictitious = FALSE; |
561 | new_m->vmp_private = TRUE; | |
562 | new_m->vmp_free_when_done = TRUE; | |
39037602 | 563 | VM_PAGE_SET_PHYS_PAGE(new_m, VM_PAGE_GET_PHYS_PAGE(m)); |
b0d623f7 A |
564 | |
565 | vm_page_lockspin_queues(); | |
3e170ce0 | 566 | vm_page_wire(new_m, VM_KERN_MEMORY_NONE, TRUE); |
b0d623f7 | 567 | vm_page_unlock_queues(); |
1c79356b | 568 | |
3e170ce0 | 569 | vm_page_insert_wired(new_m, new_object, new_offset, VM_KERN_MEMORY_NONE); |
d9a64523 A |
570 | assert(!new_m->vmp_wanted); |
571 | new_m->vmp_busy = FALSE; | |
1c79356b A |
572 | } |
573 | ||
1c79356b A |
574 | /* |
575 | * Routine: vm_pageout_initialize_page | |
576 | * Purpose: | |
577 | * Causes the specified page to be initialized in | |
578 | * the appropriate memory object. This routine is used to push | |
579 | * pages into a copy-object when they are modified in the | |
580 | * permanent object. | |
581 | * | |
582 | * The page is moved to a temporary object and paged out. | |
583 | * | |
584 | * In/out conditions: | |
585 | * The page in question must not be on any pageout queues. | |
586 | * The object to which it belongs must be locked. | |
587 | * The page must be busy, but not hold a paging reference. | |
588 | * | |
589 | * Implementation: | |
590 | * Move this page to a completely new object. | |
591 | */ | |
d9a64523 | 592 | void |
1c79356b | 593 | vm_pageout_initialize_page( |
0a7de745 | 594 | vm_page_t m) |
1c79356b | 595 | { |
0a7de745 A |
596 | vm_object_t object; |
597 | vm_object_offset_t paging_offset; | |
598 | memory_object_t pager; | |
1c79356b | 599 | |
39037602 A |
600 | assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); |
601 | ||
602 | object = VM_PAGE_OBJECT(m); | |
603 | ||
d9a64523 | 604 | assert(m->vmp_busy); |
39037602 | 605 | assert(object->internal); |
1c79356b A |
606 | |
607 | /* | |
608 | * Verify that we really want to clean this page | |
609 | */ | |
d9a64523 A |
610 | assert(!m->vmp_absent); |
611 | assert(!m->vmp_error); | |
612 | assert(m->vmp_dirty); | |
1c79356b A |
613 | |
614 | /* | |
615 | * Create a paging reference to let us play with the object. | |
616 | */ | |
d9a64523 | 617 | paging_offset = m->vmp_offset + object->paging_offset; |
2d21ac55 | 618 | |
d9a64523 | 619 | if (m->vmp_absent || m->vmp_error || m->vmp_restart || (!m->vmp_dirty && !m->vmp_precious)) { |
1c79356b | 620 | panic("reservation without pageout?"); /* alan */ |
39037602 A |
621 | |
622 | VM_PAGE_FREE(m); | |
2d21ac55 A |
623 | vm_object_unlock(object); |
624 | ||
625 | return; | |
626 | } | |
627 | ||
628 | /* | |
d9a64523 | 629 | * If there's no pager, then we can't clean the page. This should |
2d21ac55 A |
630 | * never happen since this should be a copy object and therefore not |
631 | * an external object, so the pager should always be there. | |
632 | */ | |
633 | ||
634 | pager = object->pager; | |
635 | ||
636 | if (pager == MEMORY_OBJECT_NULL) { | |
2d21ac55 | 637 | panic("missing pager for copy object"); |
39037602 A |
638 | |
639 | VM_PAGE_FREE(m); | |
1c79356b A |
640 | return; |
641 | } | |
642 | ||
316670eb A |
643 | /* |
644 | * set the page for future call to vm_fault_list_request | |
645 | */ | |
39037602 | 646 | pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m)); |
316670eb | 647 | SET_PAGE_DIRTY(m, FALSE); |
b0d623f7 | 648 | |
316670eb A |
649 | /* |
650 | * keep the object from collapsing or terminating | |
651 | */ | |
652 | vm_object_paging_begin(object); | |
55e303ae | 653 | vm_object_unlock(object); |
1c79356b A |
654 | |
655 | /* | |
656 | * Write the data to its pager. | |
657 | * Note that the data is passed by naming the new object, | |
658 | * not a virtual address; the pager interface has been | |
659 | * manipulated to use the "internal memory" data type. | |
660 | * [The object reference from its allocation is donated | |
661 | * to the eventual recipient.] | |
662 | */ | |
2d21ac55 | 663 | memory_object_data_initialize(pager, paging_offset, PAGE_SIZE); |
1c79356b A |
664 | |
665 | vm_object_lock(object); | |
2d21ac55 | 666 | vm_object_paging_end(object); |
1c79356b A |
667 | } |
668 | ||
1c79356b A |
669 | |
670 | /* | |
671 | * vm_pageout_cluster: | |
672 | * | |
91447636 A |
673 | * Given a page, queue it to the appropriate I/O thread, |
674 | * which will page it out and attempt to clean adjacent pages | |
1c79356b A |
675 | * in the same operation. |
676 | * | |
39236c6e | 677 | * The object and queues must be locked. We will take a |
55e303ae | 678 | * paging reference to prevent deallocation or collapse when we |
91447636 A |
679 | * release the object lock back at the call site. The I/O thread |
680 | * is responsible for consuming this reference | |
55e303ae A |
681 | * |
682 | * The page must not be on any pageout queue. | |
1c79356b | 683 | */ |
d9a64523 A |
684 | #if DEVELOPMENT || DEBUG |
685 | vmct_stats_t vmct_stats; | |
686 | ||
5ba3f43e | 687 | int32_t vmct_active = 0; |
d9a64523 A |
688 | uint64_t vm_compressor_epoch_start = 0; |
689 | uint64_t vm_compressor_epoch_stop = 0; | |
690 | ||
5ba3f43e A |
691 | typedef enum vmct_state_t { |
692 | VMCT_IDLE, | |
693 | VMCT_AWAKENED, | |
694 | VMCT_ACTIVE, | |
695 | } vmct_state_t; | |
696 | vmct_state_t vmct_state[MAX_COMPRESSOR_THREAD_COUNT]; | |
d9a64523 A |
697 | #endif |
698 | ||
91447636 | 699 | |
5ba3f43e A |
700 | void |
701 | vm_pageout_cluster(vm_page_t m) | |
1c79356b | 702 | { |
0a7de745 A |
703 | vm_object_t object = VM_PAGE_OBJECT(m); |
704 | struct vm_pageout_queue *q; | |
91447636 | 705 | |
b0d623f7 | 706 | VM_PAGE_CHECK(m); |
39037602 | 707 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
6d2010ae | 708 | vm_object_lock_assert_exclusive(object); |
1c79356b | 709 | |
91447636 A |
710 | /* |
711 | * Only a certain kind of page is appreciated here. | |
712 | */ | |
d9a64523 A |
713 | assert((m->vmp_dirty || m->vmp_precious) && (!VM_PAGE_WIRED(m))); |
714 | assert(!m->vmp_cleaning && !m->vmp_laundry); | |
715 | assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q); | |
55e303ae A |
716 | |
717 | /* | |
316670eb | 718 | * protect the object from collapse or termination |
55e303ae | 719 | */ |
316670eb | 720 | vm_object_activity_begin(object); |
55e303ae | 721 | |
39236c6e | 722 | if (object->internal == TRUE) { |
39037602 | 723 | assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); |
39236c6e | 724 | |
d9a64523 | 725 | m->vmp_busy = TRUE; |
3e170ce0 | 726 | |
0a7de745 A |
727 | q = &vm_pageout_queue_internal; |
728 | } else { | |
729 | q = &vm_pageout_queue_external; | |
730 | } | |
d1ecb069 | 731 | |
d9a64523 | 732 | /* |
d1ecb069 A |
733 | * pgo_laundry count is tied to the laundry bit |
734 | */ | |
d9a64523 | 735 | m->vmp_laundry = TRUE; |
91447636 | 736 | q->pgo_laundry++; |
1c79356b | 737 | |
d9a64523 | 738 | m->vmp_q_state = VM_PAGE_ON_PAGEOUT_Q; |
0a7de745 | 739 | vm_page_queue_enter(&q->pgo_pending, m, vmp_pageq); |
5ba3f43e | 740 | |
91447636 | 741 | if (q->pgo_idle == TRUE) { |
39236c6e A |
742 | q->pgo_idle = FALSE; |
743 | thread_wakeup((event_t) &q->pgo_pending); | |
1c79356b | 744 | } |
b0d623f7 | 745 | VM_PAGE_CHECK(m); |
1c79356b A |
746 | } |
747 | ||
55e303ae | 748 | |
1c79356b | 749 | /* |
d9a64523 | 750 | * A page is back from laundry or we are stealing it back from |
b0d623f7 | 751 | * the laundering state. See if there are some pages waiting to |
91447636 | 752 | * go to laundry and if we can let some of them go now. |
1c79356b | 753 | * |
91447636 | 754 | * Object and page queues must be locked. |
1c79356b | 755 | */ |
91447636 A |
756 | void |
757 | vm_pageout_throttle_up( | |
0a7de745 | 758 | vm_page_t m) |
1c79356b | 759 | { |
0a7de745 A |
760 | struct vm_pageout_queue *q; |
761 | vm_object_t m_object; | |
39037602 | 762 | |
0a7de745 | 763 | m_object = VM_PAGE_OBJECT(m); |
316670eb | 764 | |
0a7de745 A |
765 | assert(m_object != VM_OBJECT_NULL); |
766 | assert(m_object != kernel_object); | |
d1ecb069 | 767 | |
0a7de745 A |
768 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
769 | vm_object_lock_assert_exclusive(m_object); | |
0b4c1975 | 770 | |
0a7de745 A |
771 | if (m_object->internal == TRUE) { |
772 | q = &vm_pageout_queue_internal; | |
773 | } else { | |
774 | q = &vm_pageout_queue_external; | |
775 | } | |
1c79356b | 776 | |
0a7de745 A |
777 | if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { |
778 | vm_page_queue_remove(&q->pgo_pending, m, vmp_pageq); | |
779 | m->vmp_q_state = VM_PAGE_NOT_ON_Q; | |
91447636 | 780 | |
0a7de745 | 781 | VM_PAGE_ZERO_PAGEQ_ENTRY(m); |
d9a64523 | 782 | |
0a7de745 | 783 | vm_object_activity_end(m_object); |
91447636 | 784 | |
0a7de745 A |
785 | VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page, 1); |
786 | } | |
787 | if (m->vmp_laundry == TRUE) { | |
788 | m->vmp_laundry = FALSE; | |
789 | q->pgo_laundry--; | |
91447636 | 790 | |
0a7de745 A |
791 | if (q->pgo_throttled == TRUE) { |
792 | q->pgo_throttled = FALSE; | |
793 | thread_wakeup((event_t) &q->pgo_laundry); | |
794 | } | |
795 | if (q->pgo_draining == TRUE && q->pgo_laundry == 0) { | |
796 | q->pgo_draining = FALSE; | |
797 | thread_wakeup((event_t) (&q->pgo_laundry + 1)); | |
798 | } | |
799 | VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, 1); | |
6d2010ae A |
800 | } |
801 | } | |
91447636 | 802 | |
b0d623f7 | 803 | |
39236c6e A |
804 | static void |
805 | vm_pageout_throttle_up_batch( | |
806 | struct vm_pageout_queue *q, | |
0a7de745 | 807 | int batch_cnt) |
39236c6e | 808 | { |
0a7de745 | 809 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
39236c6e | 810 | |
0a7de745 | 811 | VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, batch_cnt); |
39236c6e | 812 | |
0a7de745 | 813 | q->pgo_laundry -= batch_cnt; |
39236c6e | 814 | |
0a7de745 A |
815 | if (q->pgo_throttled == TRUE) { |
816 | q->pgo_throttled = FALSE; | |
817 | thread_wakeup((event_t) &q->pgo_laundry); | |
818 | } | |
819 | if (q->pgo_draining == TRUE && q->pgo_laundry == 0) { | |
820 | q->pgo_draining = FALSE; | |
821 | thread_wakeup((event_t) (&q->pgo_laundry + 1)); | |
822 | } | |
39236c6e A |
823 | } |
824 | ||
825 | ||
826 | ||
b0d623f7 A |
827 | /* |
828 | * VM memory pressure monitoring. | |
829 | * | |
830 | * vm_pageout_scan() keeps track of the number of pages it considers and | |
831 | * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now]. | |
832 | * | |
833 | * compute_memory_pressure() is called every second from compute_averages() | |
834 | * and moves "vm_pageout_stat_now" forward, to start accumulating the number | |
835 | * of recalimed pages in a new vm_pageout_stat[] bucket. | |
836 | * | |
837 | * mach_vm_pressure_monitor() collects past statistics about memory pressure. | |
838 | * The caller provides the number of seconds ("nsecs") worth of statistics | |
839 | * it wants, up to 30 seconds. | |
840 | * It computes the number of pages reclaimed in the past "nsecs" seconds and | |
841 | * also returns the number of pages the system still needs to reclaim at this | |
842 | * moment in time. | |
843 | */ | |
d9a64523 | 844 | #if DEVELOPMENT || DEBUG |
0a7de745 | 845 | #define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1 |
d9a64523 | 846 | #else |
0a7de745 | 847 | #define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1 |
d9a64523 | 848 | #endif |
b0d623f7 | 849 | struct vm_pageout_stat { |
0a7de745 A |
850 | unsigned long vm_page_active_count; |
851 | unsigned long vm_page_speculative_count; | |
852 | unsigned long vm_page_inactive_count; | |
853 | unsigned long vm_page_anonymous_count; | |
d9a64523 | 854 | |
0a7de745 A |
855 | unsigned long vm_page_free_count; |
856 | unsigned long vm_page_wire_count; | |
857 | unsigned long vm_page_compressor_count; | |
d9a64523 | 858 | |
0a7de745 A |
859 | unsigned long vm_page_pages_compressed; |
860 | unsigned long vm_page_pageable_internal_count; | |
861 | unsigned long vm_page_pageable_external_count; | |
862 | unsigned long vm_page_xpmapped_external_count; | |
d9a64523 | 863 | |
0a7de745 A |
864 | unsigned int pages_grabbed; |
865 | unsigned int pages_freed; | |
d9a64523 | 866 | |
5ba3f43e A |
867 | unsigned int pages_compressed; |
868 | unsigned int pages_grabbed_by_compressor; | |
d9a64523 A |
869 | unsigned int failed_compressions; |
870 | ||
0a7de745 A |
871 | unsigned int pages_evicted; |
872 | unsigned int pages_purged; | |
d9a64523 A |
873 | |
874 | unsigned int considered; | |
0a7de745 A |
875 | unsigned int considered_bq_internal; |
876 | unsigned int considered_bq_external; | |
d9a64523 | 877 | |
0a7de745 A |
878 | unsigned int skipped_external; |
879 | unsigned int filecache_min_reactivations; | |
d9a64523 A |
880 | |
881 | unsigned int freed_speculative; | |
882 | unsigned int freed_cleaned; | |
883 | unsigned int freed_internal; | |
884 | unsigned int freed_external; | |
885 | ||
5ba3f43e | 886 | unsigned int cleaned_dirty_external; |
0a7de745 | 887 | unsigned int cleaned_dirty_internal; |
d9a64523 | 888 | |
0a7de745 A |
889 | unsigned int inactive_referenced; |
890 | unsigned int inactive_nolock; | |
891 | unsigned int reactivation_limit_exceeded; | |
892 | unsigned int forced_inactive_reclaim; | |
d9a64523 | 893 | |
5ba3f43e A |
894 | unsigned int throttled_internal_q; |
895 | unsigned int throttled_external_q; | |
d9a64523 | 896 | |
0a7de745 A |
897 | unsigned int phantom_ghosts_found; |
898 | unsigned int phantom_ghosts_added; | |
899 | } vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }; | |
5ba3f43e | 900 | |
b0d623f7 | 901 | unsigned int vm_pageout_stat_now = 0; |
b0d623f7 A |
902 | |
903 | #define VM_PAGEOUT_STAT_BEFORE(i) \ | |
904 | (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1) | |
905 | #define VM_PAGEOUT_STAT_AFTER(i) \ | |
906 | (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1) | |
907 | ||
15129b1c | 908 | #if VM_PAGE_BUCKETS_CHECK |
d9a64523 | 909 | int vm_page_buckets_check_interval = 80; /* in eighths of a second */ |
15129b1c A |
910 | #endif /* VM_PAGE_BUCKETS_CHECK */ |
911 | ||
d9a64523 | 912 | |
b0d623f7 | 913 | void |
d9a64523 A |
914 | record_memory_pressure(void); |
915 | void | |
916 | record_memory_pressure(void) | |
b0d623f7 A |
917 | { |
918 | unsigned int vm_pageout_next; | |
919 | ||
15129b1c A |
920 | #if VM_PAGE_BUCKETS_CHECK |
921 | /* check the consistency of VM page buckets at regular interval */ | |
922 | static int counter = 0; | |
923 | if ((++counter % vm_page_buckets_check_interval) == 0) { | |
924 | vm_page_buckets_check(); | |
925 | } | |
926 | #endif /* VM_PAGE_BUCKETS_CHECK */ | |
927 | ||
d9a64523 | 928 | vm_pageout_state.vm_memory_pressure = |
0a7de745 A |
929 | vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_speculative + |
930 | vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_cleaned + | |
931 | vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_internal + | |
932 | vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_external; | |
b0d623f7 | 933 | |
0a7de745 | 934 | commpage_set_memory_pressure((unsigned int)vm_pageout_state.vm_memory_pressure ); |
b0d623f7 A |
935 | |
936 | /* move "now" forward */ | |
937 | vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now); | |
d9a64523 A |
938 | |
939 | bzero(&vm_pageout_stats[vm_pageout_next], sizeof(struct vm_pageout_stat)); | |
5ba3f43e | 940 | |
b0d623f7 A |
941 | vm_pageout_stat_now = vm_pageout_next; |
942 | } | |
943 | ||
316670eb A |
944 | |
945 | /* | |
946 | * IMPORTANT | |
947 | * mach_vm_ctl_page_free_wanted() is called indirectly, via | |
d9a64523 A |
948 | * mach_vm_pressure_monitor(), when taking a stackshot. Therefore, |
949 | * it must be safe in the restricted stackshot context. Locks and/or | |
316670eb A |
950 | * blocking are not allowable. |
951 | */ | |
b0d623f7 A |
952 | unsigned int |
953 | mach_vm_ctl_page_free_wanted(void) | |
954 | { | |
955 | unsigned int page_free_target, page_free_count, page_free_wanted; | |
956 | ||
957 | page_free_target = vm_page_free_target; | |
958 | page_free_count = vm_page_free_count; | |
959 | if (page_free_target > page_free_count) { | |
960 | page_free_wanted = page_free_target - page_free_count; | |
961 | } else { | |
962 | page_free_wanted = 0; | |
963 | } | |
964 | ||
965 | return page_free_wanted; | |
966 | } | |
967 | ||
316670eb A |
968 | |
969 | /* | |
970 | * IMPORTANT: | |
d9a64523 | 971 | * mach_vm_pressure_monitor() is called when taking a stackshot, with |
316670eb A |
972 | * wait_for_pressure FALSE, so that code path must remain safe in the |
973 | * restricted stackshot context. No blocking or locks are allowable. | |
974 | * on that code path. | |
975 | */ | |
976 | ||
b0d623f7 A |
977 | kern_return_t |
978 | mach_vm_pressure_monitor( | |
0a7de745 A |
979 | boolean_t wait_for_pressure, |
980 | unsigned int nsecs_monitored, | |
981 | unsigned int *pages_reclaimed_p, | |
982 | unsigned int *pages_wanted_p) | |
b0d623f7 | 983 | { |
0a7de745 A |
984 | wait_result_t wr; |
985 | unsigned int vm_pageout_then, vm_pageout_now; | |
986 | unsigned int pages_reclaimed; | |
d9a64523 | 987 | unsigned int units_of_monitor; |
b0d623f7 | 988 | |
d9a64523 | 989 | units_of_monitor = 8 * nsecs_monitored; |
b0d623f7 A |
990 | /* |
991 | * We don't take the vm_page_queue_lock here because we don't want | |
992 | * vm_pressure_monitor() to get in the way of the vm_pageout_scan() | |
993 | * thread when it's trying to reclaim memory. We don't need fully | |
994 | * accurate monitoring anyway... | |
995 | */ | |
996 | ||
997 | if (wait_for_pressure) { | |
998 | /* wait until there's memory pressure */ | |
999 | while (vm_page_free_count >= vm_page_free_target) { | |
1000 | wr = assert_wait((event_t) &vm_page_free_wanted, | |
0a7de745 | 1001 | THREAD_INTERRUPTIBLE); |
b0d623f7 A |
1002 | if (wr == THREAD_WAITING) { |
1003 | wr = thread_block(THREAD_CONTINUE_NULL); | |
1004 | } | |
1005 | if (wr == THREAD_INTERRUPTED) { | |
1006 | return KERN_ABORTED; | |
1007 | } | |
1008 | if (wr == THREAD_AWAKENED) { | |
1009 | /* | |
1010 | * The memory pressure might have already | |
1011 | * been relieved but let's not block again | |
1012 | * and let's report that there was memory | |
1013 | * pressure at some point. | |
1014 | */ | |
1015 | break; | |
1016 | } | |
1017 | } | |
1018 | } | |
1019 | ||
1020 | /* provide the number of pages the system wants to reclaim */ | |
1021 | if (pages_wanted_p != NULL) { | |
1022 | *pages_wanted_p = mach_vm_ctl_page_free_wanted(); | |
1023 | } | |
1024 | ||
1025 | if (pages_reclaimed_p == NULL) { | |
1026 | return KERN_SUCCESS; | |
1027 | } | |
1028 | ||
1029 | /* provide number of pages reclaimed in the last "nsecs_monitored" */ | |
39037602 A |
1030 | vm_pageout_now = vm_pageout_stat_now; |
1031 | pages_reclaimed = 0; | |
1032 | for (vm_pageout_then = | |
0a7de745 A |
1033 | VM_PAGEOUT_STAT_BEFORE(vm_pageout_now); |
1034 | vm_pageout_then != vm_pageout_now && | |
1035 | units_of_monitor-- != 0; | |
1036 | vm_pageout_then = | |
1037 | VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) { | |
d9a64523 A |
1038 | pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_speculative; |
1039 | pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_cleaned; | |
1040 | pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_internal; | |
1041 | pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_external; | |
39037602 | 1042 | } |
b0d623f7 A |
1043 | *pages_reclaimed_p = pages_reclaimed; |
1044 | ||
1045 | return KERN_SUCCESS; | |
1046 | } | |
1047 | ||
b0d623f7 | 1048 | |
316670eb | 1049 | |
39037602 A |
1050 | #if DEVELOPMENT || DEBUG |
1051 | ||
3e170ce0 | 1052 | static void |
39037602 A |
1053 | vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int); |
1054 | ||
1055 | /* | |
1056 | * condition variable used to make sure there is | |
1057 | * only a single sweep going on at a time | |
1058 | */ | |
0a7de745 | 1059 | boolean_t vm_pageout_disconnect_all_pages_active = FALSE; |
39037602 A |
1060 | |
1061 | ||
1062 | void | |
1063 | vm_pageout_disconnect_all_pages() | |
1064 | { | |
1065 | vm_page_lock_queues(); | |
1066 | ||
1067 | if (vm_pageout_disconnect_all_pages_active == TRUE) { | |
1068 | vm_page_unlock_queues(); | |
1069 | return; | |
1070 | } | |
1071 | vm_pageout_disconnect_all_pages_active = TRUE; | |
1072 | vm_page_unlock_queues(); | |
1073 | ||
1074 | vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled, vm_page_throttled_count); | |
1075 | vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous, vm_page_anonymous_count); | |
1076 | vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active, vm_page_active_count); | |
1077 | ||
1078 | vm_pageout_disconnect_all_pages_active = FALSE; | |
1079 | } | |
1080 | ||
1081 | ||
1082 | void | |
1083 | vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount) | |
1084 | { | |
0a7de745 A |
1085 | vm_page_t m; |
1086 | vm_object_t t_object = NULL; | |
1087 | vm_object_t l_object = NULL; | |
1088 | vm_object_t m_object = NULL; | |
1089 | int delayed_unlock = 0; | |
1090 | int try_failed_count = 0; | |
1091 | int disconnected_count = 0; | |
1092 | int paused_count = 0; | |
1093 | int object_locked_count = 0; | |
39037602 A |
1094 | |
1095 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_START, | |
0a7de745 | 1096 | q, qcount, 0, 0, 0); |
39037602 A |
1097 | |
1098 | vm_page_lock_queues(); | |
1099 | ||
1100 | while (qcount && !vm_page_queue_empty(q)) { | |
39037602 A |
1101 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
1102 | ||
1103 | m = (vm_page_t) vm_page_queue_first(q); | |
1104 | m_object = VM_PAGE_OBJECT(m); | |
1105 | ||
1106 | /* | |
1107 | * check to see if we currently are working | |
1108 | * with the same object... if so, we've | |
1109 | * already got the lock | |
1110 | */ | |
1111 | if (m_object != l_object) { | |
0a7de745 | 1112 | /* |
d9a64523 | 1113 | * the object associated with candidate page is |
39037602 A |
1114 | * different from the one we were just working |
1115 | * with... dump the lock if we still own it | |
1116 | */ | |
0a7de745 A |
1117 | if (l_object != NULL) { |
1118 | vm_object_unlock(l_object); | |
39037602 A |
1119 | l_object = NULL; |
1120 | } | |
0a7de745 | 1121 | if (m_object != t_object) { |
39037602 | 1122 | try_failed_count = 0; |
0a7de745 | 1123 | } |
39037602 A |
1124 | |
1125 | /* | |
1126 | * Try to lock object; since we've alread got the | |
1127 | * page queues lock, we can only 'try' for this one. | |
1128 | * if the 'try' fails, we need to do a mutex_pause | |
1129 | * to allow the owner of the object lock a chance to | |
d9a64523 | 1130 | * run... |
39037602 | 1131 | */ |
0a7de745 | 1132 | if (!vm_object_lock_try_scan(m_object)) { |
39037602 A |
1133 | if (try_failed_count > 20) { |
1134 | goto reenter_pg_on_q; | |
1135 | } | |
1136 | vm_page_unlock_queues(); | |
1137 | mutex_pause(try_failed_count++); | |
1138 | vm_page_lock_queues(); | |
1139 | delayed_unlock = 0; | |
1140 | ||
1141 | paused_count++; | |
1142 | ||
1143 | t_object = m_object; | |
1144 | continue; | |
1145 | } | |
1146 | object_locked_count++; | |
1147 | ||
1148 | l_object = m_object; | |
1149 | } | |
0a7de745 | 1150 | if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) { |
39037602 A |
1151 | /* |
1152 | * put it back on the head of its queue | |
1153 | */ | |
1154 | goto reenter_pg_on_q; | |
1155 | } | |
d9a64523 | 1156 | if (m->vmp_pmapped == TRUE) { |
39037602 A |
1157 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); |
1158 | ||
1159 | disconnected_count++; | |
1160 | } | |
1161 | reenter_pg_on_q: | |
0a7de745 A |
1162 | vm_page_queue_remove(q, m, vmp_pageq); |
1163 | vm_page_queue_enter(q, m, vmp_pageq); | |
39037602 A |
1164 | |
1165 | qcount--; | |
1166 | try_failed_count = 0; | |
1167 | ||
1168 | if (delayed_unlock++ > 128) { | |
39037602 A |
1169 | if (l_object != NULL) { |
1170 | vm_object_unlock(l_object); | |
1171 | l_object = NULL; | |
1172 | } | |
1173 | lck_mtx_yield(&vm_page_queue_lock); | |
1174 | delayed_unlock = 0; | |
1175 | } | |
1176 | } | |
1177 | if (l_object != NULL) { | |
1178 | vm_object_unlock(l_object); | |
1179 | l_object = NULL; | |
1180 | } | |
1181 | vm_page_unlock_queues(); | |
1182 | ||
1183 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_END, | |
0a7de745 | 1184 | q, disconnected_count, object_locked_count, paused_count, 0); |
39037602 A |
1185 | } |
1186 | ||
1187 | #endif | |
1188 | ||
1189 | ||
1190 | static void | |
1191 | vm_pageout_page_queue(vm_page_queue_head_t *, int); | |
3e170ce0 A |
1192 | |
1193 | /* | |
1194 | * condition variable used to make sure there is | |
1195 | * only a single sweep going on at a time | |
1196 | */ | |
0a7de745 | 1197 | boolean_t vm_pageout_anonymous_pages_active = FALSE; |
3e170ce0 A |
1198 | |
1199 | ||
1200 | void | |
1201 | vm_pageout_anonymous_pages() | |
1202 | { | |
39037602 | 1203 | if (VM_CONFIG_COMPRESSOR_IS_PRESENT) { |
3e170ce0 A |
1204 | vm_page_lock_queues(); |
1205 | ||
1206 | if (vm_pageout_anonymous_pages_active == TRUE) { | |
1207 | vm_page_unlock_queues(); | |
1208 | return; | |
1209 | } | |
1210 | vm_pageout_anonymous_pages_active = TRUE; | |
1211 | vm_page_unlock_queues(); | |
1212 | ||
1213 | vm_pageout_page_queue(&vm_page_queue_throttled, vm_page_throttled_count); | |
1214 | vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count); | |
1215 | vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count); | |
1216 | ||
0a7de745 | 1217 | if (VM_CONFIG_SWAP_IS_PRESENT) { |
39037602 | 1218 | vm_consider_swapping(); |
0a7de745 | 1219 | } |
3e170ce0 A |
1220 | |
1221 | vm_page_lock_queues(); | |
1222 | vm_pageout_anonymous_pages_active = FALSE; | |
1223 | vm_page_unlock_queues(); | |
1224 | } | |
1225 | } | |
1226 | ||
1227 | ||
1228 | void | |
39037602 | 1229 | vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) |
3e170ce0 | 1230 | { |
0a7de745 A |
1231 | vm_page_t m; |
1232 | vm_object_t t_object = NULL; | |
1233 | vm_object_t l_object = NULL; | |
1234 | vm_object_t m_object = NULL; | |
1235 | int delayed_unlock = 0; | |
1236 | int try_failed_count = 0; | |
1237 | int refmod_state; | |
1238 | int pmap_options; | |
1239 | struct vm_pageout_queue *iq; | |
1240 | ppnum_t phys_page; | |
3e170ce0 A |
1241 | |
1242 | ||
1243 | iq = &vm_pageout_queue_internal; | |
d9a64523 | 1244 | |
3e170ce0 A |
1245 | vm_page_lock_queues(); |
1246 | ||
39037602 | 1247 | while (qcount && !vm_page_queue_empty(q)) { |
39037602 | 1248 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
3e170ce0 A |
1249 | |
1250 | if (VM_PAGE_Q_THROTTLED(iq)) { | |
0a7de745 A |
1251 | if (l_object != NULL) { |
1252 | vm_object_unlock(l_object); | |
3e170ce0 A |
1253 | l_object = NULL; |
1254 | } | |
1255 | iq->pgo_draining = TRUE; | |
d9a64523 | 1256 | |
3e170ce0 A |
1257 | assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE); |
1258 | vm_page_unlock_queues(); | |
d9a64523 | 1259 | |
3e170ce0 | 1260 | thread_block(THREAD_CONTINUE_NULL); |
d9a64523 | 1261 | |
3e170ce0 A |
1262 | vm_page_lock_queues(); |
1263 | delayed_unlock = 0; | |
1264 | continue; | |
1265 | } | |
39037602 A |
1266 | m = (vm_page_t) vm_page_queue_first(q); |
1267 | m_object = VM_PAGE_OBJECT(m); | |
3e170ce0 A |
1268 | |
1269 | /* | |
1270 | * check to see if we currently are working | |
1271 | * with the same object... if so, we've | |
1272 | * already got the lock | |
1273 | */ | |
1274 | if (m_object != l_object) { | |
0a7de745 | 1275 | if (!m_object->internal) { |
3e170ce0 | 1276 | goto reenter_pg_on_q; |
0a7de745 | 1277 | } |
3e170ce0 | 1278 | |
0a7de745 | 1279 | /* |
d9a64523 | 1280 | * the object associated with candidate page is |
3e170ce0 A |
1281 | * different from the one we were just working |
1282 | * with... dump the lock if we still own it | |
1283 | */ | |
0a7de745 A |
1284 | if (l_object != NULL) { |
1285 | vm_object_unlock(l_object); | |
3e170ce0 A |
1286 | l_object = NULL; |
1287 | } | |
0a7de745 | 1288 | if (m_object != t_object) { |
3e170ce0 | 1289 | try_failed_count = 0; |
0a7de745 | 1290 | } |
3e170ce0 A |
1291 | |
1292 | /* | |
1293 | * Try to lock object; since we've alread got the | |
1294 | * page queues lock, we can only 'try' for this one. | |
1295 | * if the 'try' fails, we need to do a mutex_pause | |
1296 | * to allow the owner of the object lock a chance to | |
d9a64523 | 1297 | * run... |
3e170ce0 | 1298 | */ |
0a7de745 | 1299 | if (!vm_object_lock_try_scan(m_object)) { |
3e170ce0 A |
1300 | if (try_failed_count > 20) { |
1301 | goto reenter_pg_on_q; | |
1302 | } | |
1303 | vm_page_unlock_queues(); | |
1304 | mutex_pause(try_failed_count++); | |
1305 | vm_page_lock_queues(); | |
1306 | delayed_unlock = 0; | |
1307 | ||
1308 | t_object = m_object; | |
1309 | continue; | |
1310 | } | |
1311 | l_object = m_object; | |
1312 | } | |
0a7de745 | 1313 | if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) { |
3e170ce0 A |
1314 | /* |
1315 | * page is not to be cleaned | |
1316 | * put it back on the head of its queue | |
1317 | */ | |
1318 | goto reenter_pg_on_q; | |
1319 | } | |
39037602 A |
1320 | phys_page = VM_PAGE_GET_PHYS_PAGE(m); |
1321 | ||
d9a64523 | 1322 | if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) { |
39037602 | 1323 | refmod_state = pmap_get_refmod(phys_page); |
d9a64523 | 1324 | |
0a7de745 A |
1325 | if (refmod_state & VM_MEM_REFERENCED) { |
1326 | m->vmp_reference = TRUE; | |
1327 | } | |
3e170ce0 | 1328 | if (refmod_state & VM_MEM_MODIFIED) { |
0a7de745 | 1329 | SET_PAGE_DIRTY(m, FALSE); |
3e170ce0 A |
1330 | } |
1331 | } | |
d9a64523 A |
1332 | if (m->vmp_reference == TRUE) { |
1333 | m->vmp_reference = FALSE; | |
39037602 | 1334 | pmap_clear_refmod_options(phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL); |
3e170ce0 A |
1335 | goto reenter_pg_on_q; |
1336 | } | |
d9a64523 A |
1337 | if (m->vmp_pmapped == TRUE) { |
1338 | if (m->vmp_dirty || m->vmp_precious) { | |
3e170ce0 A |
1339 | pmap_options = PMAP_OPTIONS_COMPRESSOR; |
1340 | } else { | |
1341 | pmap_options = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; | |
1342 | } | |
39037602 | 1343 | refmod_state = pmap_disconnect_options(phys_page, pmap_options, NULL); |
3e170ce0 A |
1344 | if (refmod_state & VM_MEM_MODIFIED) { |
1345 | SET_PAGE_DIRTY(m, FALSE); | |
1346 | } | |
1347 | } | |
d9a64523 | 1348 | |
0a7de745 | 1349 | if (!m->vmp_dirty && !m->vmp_precious) { |
3e170ce0 A |
1350 | vm_page_unlock_queues(); |
1351 | VM_PAGE_FREE(m); | |
1352 | vm_page_lock_queues(); | |
1353 | delayed_unlock = 0; | |
1354 | ||
1355 | goto next_pg; | |
1356 | } | |
0a7de745 | 1357 | if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) { |
3e170ce0 | 1358 | if (!m_object->pager_initialized) { |
3e170ce0 A |
1359 | vm_page_unlock_queues(); |
1360 | ||
1361 | vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE); | |
1362 | ||
0a7de745 | 1363 | if (!m_object->pager_initialized) { |
3e170ce0 | 1364 | vm_object_compressor_pager_create(m_object); |
0a7de745 | 1365 | } |
3e170ce0 A |
1366 | |
1367 | vm_page_lock_queues(); | |
1368 | delayed_unlock = 0; | |
1369 | } | |
0a7de745 | 1370 | if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) { |
3e170ce0 | 1371 | goto reenter_pg_on_q; |
0a7de745 | 1372 | } |
3e170ce0 A |
1373 | /* |
1374 | * vm_object_compressor_pager_create will drop the object lock | |
1375 | * which means 'm' may no longer be valid to use | |
1376 | */ | |
1377 | continue; | |
1378 | } | |
1379 | /* | |
1380 | * we've already factored out pages in the laundry which | |
1381 | * means this page can't be on the pageout queue so it's | |
1382 | * safe to do the vm_page_queues_remove | |
1383 | */ | |
39037602 | 1384 | vm_page_queues_remove(m, TRUE); |
3e170ce0 | 1385 | |
39037602 | 1386 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); |
3e170ce0 | 1387 | |
5ba3f43e | 1388 | vm_pageout_cluster(m); |
3e170ce0 A |
1389 | |
1390 | goto next_pg; | |
1391 | ||
1392 | reenter_pg_on_q: | |
0a7de745 A |
1393 | vm_page_queue_remove(q, m, vmp_pageq); |
1394 | vm_page_queue_enter(q, m, vmp_pageq); | |
3e170ce0 A |
1395 | next_pg: |
1396 | qcount--; | |
1397 | try_failed_count = 0; | |
1398 | ||
1399 | if (delayed_unlock++ > 128) { | |
3e170ce0 A |
1400 | if (l_object != NULL) { |
1401 | vm_object_unlock(l_object); | |
1402 | l_object = NULL; | |
1403 | } | |
1404 | lck_mtx_yield(&vm_page_queue_lock); | |
1405 | delayed_unlock = 0; | |
1406 | } | |
1407 | } | |
1408 | if (l_object != NULL) { | |
1409 | vm_object_unlock(l_object); | |
1410 | l_object = NULL; | |
1411 | } | |
1412 | vm_page_unlock_queues(); | |
1413 | } | |
1414 | ||
1415 | ||
1416 | ||
316670eb A |
1417 | /* |
1418 | * function in BSD to apply I/O throttle to the pageout thread | |
1419 | */ | |
1420 | extern void vm_pageout_io_throttle(void); | |
1421 | ||
0a7de745 A |
1422 | #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \ |
1423 | MACRO_BEGIN \ | |
1424 | /* \ | |
1425 | * If a "reusable" page somehow made it back into \ | |
1426 | * the active queue, it's been re-used and is not \ | |
1427 | * quite re-usable. \ | |
1428 | * If the VM object was "all_reusable", consider it \ | |
1429 | * as "all re-used" instead of converting it to \ | |
1430 | * "partially re-used", which could be expensive. \ | |
1431 | */ \ | |
1432 | assert(VM_PAGE_OBJECT((m)) == (obj)); \ | |
1433 | if ((m)->vmp_reusable || \ | |
1434 | (obj)->all_reusable) { \ | |
1435 | vm_object_reuse_pages((obj), \ | |
1436 | (m)->vmp_offset, \ | |
1437 | (m)->vmp_offset + PAGE_SIZE_64, \ | |
1438 | FALSE); \ | |
1439 | } \ | |
1440 | MACRO_END | |
1441 | ||
1442 | ||
1443 | #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64 | |
1444 | #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024 | |
1445 | ||
1446 | #define FCS_IDLE 0 | |
1447 | #define FCS_DELAYED 1 | |
1448 | #define FCS_DEADLOCK_DETECTED 2 | |
6d2010ae A |
1449 | |
1450 | struct flow_control { | |
0a7de745 A |
1451 | int state; |
1452 | mach_timespec_t ts; | |
6d2010ae A |
1453 | }; |
1454 | ||
d9a64523 | 1455 | |
39037602 | 1456 | #if CONFIG_BACKGROUND_QUEUE |
39037602 A |
1457 | uint64_t vm_pageout_rejected_bq_internal = 0; |
1458 | uint64_t vm_pageout_rejected_bq_external = 0; | |
d9a64523 | 1459 | uint64_t vm_pageout_skipped_bq_internal = 0; |
39037602 | 1460 | #endif |
5ba3f43e | 1461 | |
0a7de745 | 1462 | #define ANONS_GRABBED_LIMIT 2 |
6d2010ae | 1463 | |
5ba3f43e | 1464 | |
d9a64523 | 1465 | #if 0 |
5ba3f43e | 1466 | static void vm_pageout_delayed_unlock(int *, int *, vm_page_t *); |
d9a64523 | 1467 | #endif |
5ba3f43e A |
1468 | static void vm_pageout_prepare_to_block(vm_object_t *, int *, vm_page_t *, int *, int); |
1469 | ||
0a7de745 A |
1470 | #define VM_PAGEOUT_PB_NO_ACTION 0 |
1471 | #define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1 | |
1472 | #define VM_PAGEOUT_PB_THREAD_YIELD 2 | |
5ba3f43e A |
1473 | |
1474 | ||
d9a64523 | 1475 | #if 0 |
5ba3f43e A |
1476 | static void |
1477 | vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *local_freeq) | |
1478 | { | |
1479 | if (*local_freeq) { | |
1480 | vm_page_unlock_queues(); | |
1481 | ||
d9a64523 | 1482 | VM_DEBUG_CONSTANT_EVENT( |
5ba3f43e | 1483 | vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START, |
d9a64523 | 1484 | vm_page_free_count, 0, 0, 1); |
5ba3f43e A |
1485 | |
1486 | vm_page_free_list(*local_freeq, TRUE); | |
1487 | ||
0a7de745 A |
1488 | VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END, |
1489 | vm_page_free_count, *local_freed, 0, 1); | |
5ba3f43e A |
1490 | |
1491 | *local_freeq = NULL; | |
1492 | *local_freed = 0; | |
1493 | ||
1494 | vm_page_lock_queues(); | |
1495 | } else { | |
1496 | lck_mtx_yield(&vm_page_queue_lock); | |
1497 | } | |
1498 | *delayed_unlock = 1; | |
1499 | } | |
d9a64523 | 1500 | #endif |
5ba3f43e A |
1501 | |
1502 | ||
1503 | static void | |
1504 | vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock, | |
0a7de745 | 1505 | vm_page_t *local_freeq, int *local_freed, int action) |
5ba3f43e A |
1506 | { |
1507 | vm_page_unlock_queues(); | |
1508 | ||
1509 | if (*object != NULL) { | |
1510 | vm_object_unlock(*object); | |
1511 | *object = NULL; | |
1512 | } | |
5ba3f43e | 1513 | if (*local_freeq) { |
5ba3f43e | 1514 | vm_page_free_list(*local_freeq, TRUE); |
5ba3f43e A |
1515 | |
1516 | *local_freeq = NULL; | |
1517 | *local_freed = 0; | |
1518 | } | |
1519 | *delayed_unlock = 1; | |
1520 | ||
1521 | switch (action) { | |
5ba3f43e A |
1522 | case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER: |
1523 | vm_consider_waking_compactor_swapper(); | |
1524 | break; | |
1525 | case VM_PAGEOUT_PB_THREAD_YIELD: | |
1526 | thread_yield_internal(1); | |
1527 | break; | |
1528 | case VM_PAGEOUT_PB_NO_ACTION: | |
1529 | default: | |
1530 | break; | |
1531 | } | |
1532 | vm_page_lock_queues(); | |
1533 | } | |
1534 | ||
1535 | ||
d9a64523 A |
1536 | static struct vm_pageout_vminfo last; |
1537 | ||
1538 | uint64_t last_vm_page_pages_grabbed = 0; | |
1539 | ||
1540 | extern uint32_t c_segment_pages_compressed; | |
5c9f4661 | 1541 | |
d9a64523 A |
1542 | extern uint64_t shared_region_pager_reclaimed; |
1543 | extern struct memory_object_pager_ops shared_region_pager_ops; | |
5ba3f43e | 1544 | |
0a7de745 A |
1545 | void |
1546 | update_vm_info(void) | |
5ba3f43e | 1547 | { |
0a7de745 | 1548 | uint64_t tmp; |
5ba3f43e | 1549 | |
d9a64523 A |
1550 | vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count = vm_page_active_count; |
1551 | vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count = vm_page_speculative_count; | |
1552 | vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count = vm_page_inactive_count; | |
1553 | vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count = vm_page_anonymous_count; | |
5ba3f43e | 1554 | |
d9a64523 A |
1555 | vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count = vm_page_free_count; |
1556 | vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count = vm_page_wire_count; | |
1557 | vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count = VM_PAGE_COMPRESSOR_COUNT; | |
5ba3f43e | 1558 | |
d9a64523 A |
1559 | vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed = c_segment_pages_compressed; |
1560 | vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count = vm_page_pageable_internal_count; | |
1561 | vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count = vm_page_pageable_external_count; | |
1562 | vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count = vm_page_xpmapped_external_count; | |
1563 | ||
1564 | ||
1565 | tmp = vm_pageout_vminfo.vm_pageout_considered_page; | |
1566 | vm_pageout_stats[vm_pageout_stat_now].considered = (unsigned int)(tmp - last.vm_pageout_considered_page); | |
1567 | last.vm_pageout_considered_page = tmp; | |
1568 | ||
1569 | tmp = vm_pageout_vminfo.vm_pageout_compressions; | |
1570 | vm_pageout_stats[vm_pageout_stat_now].pages_compressed = (unsigned int)(tmp - last.vm_pageout_compressions); | |
1571 | last.vm_pageout_compressions = tmp; | |
1572 | ||
1573 | tmp = vm_pageout_vminfo.vm_compressor_failed; | |
1574 | vm_pageout_stats[vm_pageout_stat_now].failed_compressions = (unsigned int)(tmp - last.vm_compressor_failed); | |
1575 | last.vm_compressor_failed = tmp; | |
1576 | ||
1577 | tmp = vm_pageout_vminfo.vm_compressor_pages_grabbed; | |
1578 | vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor = (unsigned int)(tmp - last.vm_compressor_pages_grabbed); | |
1579 | last.vm_compressor_pages_grabbed = tmp; | |
1580 | ||
1581 | tmp = vm_pageout_vminfo.vm_phantom_cache_found_ghost; | |
1582 | vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found = (unsigned int)(tmp - last.vm_phantom_cache_found_ghost); | |
1583 | last.vm_phantom_cache_found_ghost = tmp; | |
1584 | ||
1585 | tmp = vm_pageout_vminfo.vm_phantom_cache_added_ghost; | |
1586 | vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added = (unsigned int)(tmp - last.vm_phantom_cache_added_ghost); | |
1587 | last.vm_phantom_cache_added_ghost = tmp; | |
1588 | ||
1589 | tmp = get_pages_grabbed_count(); | |
1590 | vm_pageout_stats[vm_pageout_stat_now].pages_grabbed = (unsigned int)(tmp - last_vm_page_pages_grabbed); | |
1591 | last_vm_page_pages_grabbed = tmp; | |
1592 | ||
1593 | tmp = vm_pageout_vminfo.vm_page_pages_freed; | |
1594 | vm_pageout_stats[vm_pageout_stat_now].pages_freed = (unsigned int)(tmp - last.vm_page_pages_freed); | |
1595 | last.vm_page_pages_freed = tmp; | |
1596 | ||
1597 | ||
1598 | if (vm_pageout_stats[vm_pageout_stat_now].considered) { | |
0a7de745 | 1599 | tmp = vm_pageout_vminfo.vm_pageout_pages_evicted; |
d9a64523 A |
1600 | vm_pageout_stats[vm_pageout_stat_now].pages_evicted = (unsigned int)(tmp - last.vm_pageout_pages_evicted); |
1601 | last.vm_pageout_pages_evicted = tmp; | |
5ba3f43e | 1602 | |
0a7de745 | 1603 | tmp = vm_pageout_vminfo.vm_pageout_pages_purged; |
d9a64523 A |
1604 | vm_pageout_stats[vm_pageout_stat_now].pages_purged = (unsigned int)(tmp - last.vm_pageout_pages_purged); |
1605 | last.vm_pageout_pages_purged = tmp; | |
5ba3f43e | 1606 | |
0a7de745 | 1607 | tmp = vm_pageout_vminfo.vm_pageout_freed_speculative; |
d9a64523 A |
1608 | vm_pageout_stats[vm_pageout_stat_now].freed_speculative = (unsigned int)(tmp - last.vm_pageout_freed_speculative); |
1609 | last.vm_pageout_freed_speculative = tmp; | |
5ba3f43e | 1610 | |
d9a64523 A |
1611 | tmp = vm_pageout_vminfo.vm_pageout_freed_external; |
1612 | vm_pageout_stats[vm_pageout_stat_now].freed_external = (unsigned int)(tmp - last.vm_pageout_freed_external); | |
1613 | last.vm_pageout_freed_external = tmp; | |
5ba3f43e | 1614 | |
d9a64523 A |
1615 | tmp = vm_pageout_vminfo.vm_pageout_inactive_referenced; |
1616 | vm_pageout_stats[vm_pageout_stat_now].inactive_referenced = (unsigned int)(tmp - last.vm_pageout_inactive_referenced); | |
1617 | last.vm_pageout_inactive_referenced = tmp; | |
5ba3f43e | 1618 | |
d9a64523 A |
1619 | tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external; |
1620 | vm_pageout_stats[vm_pageout_stat_now].throttled_external_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_external); | |
1621 | last.vm_pageout_scan_inactive_throttled_external = tmp; | |
5ba3f43e | 1622 | |
d9a64523 A |
1623 | tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_external; |
1624 | vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_external); | |
1625 | last.vm_pageout_inactive_dirty_external = tmp; | |
5ba3f43e | 1626 | |
d9a64523 A |
1627 | tmp = vm_pageout_vminfo.vm_pageout_freed_cleaned; |
1628 | vm_pageout_stats[vm_pageout_stat_now].freed_cleaned = (unsigned int)(tmp - last.vm_pageout_freed_cleaned); | |
1629 | last.vm_pageout_freed_cleaned = tmp; | |
5ba3f43e | 1630 | |
d9a64523 A |
1631 | tmp = vm_pageout_vminfo.vm_pageout_inactive_nolock; |
1632 | vm_pageout_stats[vm_pageout_stat_now].inactive_nolock = (unsigned int)(tmp - last.vm_pageout_inactive_nolock); | |
1633 | last.vm_pageout_inactive_nolock = tmp; | |
5ba3f43e | 1634 | |
d9a64523 A |
1635 | tmp = vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal; |
1636 | vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q = (unsigned int)(tmp - last.vm_pageout_scan_inactive_throttled_internal); | |
1637 | last.vm_pageout_scan_inactive_throttled_internal = tmp; | |
5ba3f43e | 1638 | |
d9a64523 A |
1639 | tmp = vm_pageout_vminfo.vm_pageout_skipped_external; |
1640 | vm_pageout_stats[vm_pageout_stat_now].skipped_external = (unsigned int)(tmp - last.vm_pageout_skipped_external); | |
1641 | last.vm_pageout_skipped_external = tmp; | |
5ba3f43e | 1642 | |
d9a64523 A |
1643 | tmp = vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded; |
1644 | vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded = (unsigned int)(tmp - last.vm_pageout_reactivation_limit_exceeded); | |
1645 | last.vm_pageout_reactivation_limit_exceeded = tmp; | |
5ba3f43e | 1646 | |
d9a64523 A |
1647 | tmp = vm_pageout_vminfo.vm_pageout_inactive_force_reclaim; |
1648 | vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim = (unsigned int)(tmp - last.vm_pageout_inactive_force_reclaim); | |
1649 | last.vm_pageout_inactive_force_reclaim = tmp; | |
1650 | ||
1651 | tmp = vm_pageout_vminfo.vm_pageout_freed_internal; | |
1652 | vm_pageout_stats[vm_pageout_stat_now].freed_internal = (unsigned int)(tmp - last.vm_pageout_freed_internal); | |
1653 | last.vm_pageout_freed_internal = tmp; | |
1654 | ||
1655 | tmp = vm_pageout_vminfo.vm_pageout_considered_bq_internal; | |
1656 | vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal = (unsigned int)(tmp - last.vm_pageout_considered_bq_internal); | |
1657 | last.vm_pageout_considered_bq_internal = tmp; | |
1658 | ||
1659 | tmp = vm_pageout_vminfo.vm_pageout_considered_bq_external; | |
1660 | vm_pageout_stats[vm_pageout_stat_now].considered_bq_external = (unsigned int)(tmp - last.vm_pageout_considered_bq_external); | |
1661 | last.vm_pageout_considered_bq_external = tmp; | |
1662 | ||
1663 | tmp = vm_pageout_vminfo.vm_pageout_filecache_min_reactivated; | |
1664 | vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations = (unsigned int)(tmp - last.vm_pageout_filecache_min_reactivated); | |
1665 | last.vm_pageout_filecache_min_reactivated = tmp; | |
1666 | ||
1667 | tmp = vm_pageout_vminfo.vm_pageout_inactive_dirty_internal; | |
1668 | vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal = (unsigned int)(tmp - last.vm_pageout_inactive_dirty_internal); | |
1669 | last.vm_pageout_inactive_dirty_internal = tmp; | |
1670 | } | |
1671 | ||
1672 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO1)) | DBG_FUNC_NONE, | |
0a7de745 A |
1673 | vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count, |
1674 | vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count, | |
1675 | vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count, | |
1676 | vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count, | |
1677 | 0); | |
d9a64523 A |
1678 | |
1679 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO2)) | DBG_FUNC_NONE, | |
0a7de745 A |
1680 | vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count, |
1681 | vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count, | |
1682 | vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count, | |
1683 | 0, | |
1684 | 0); | |
d9a64523 A |
1685 | |
1686 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO3)) | DBG_FUNC_NONE, | |
0a7de745 A |
1687 | vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed, |
1688 | vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count, | |
1689 | vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count, | |
1690 | vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count, | |
1691 | 0); | |
d9a64523 A |
1692 | |
1693 | if (vm_pageout_stats[vm_pageout_stat_now].considered || | |
1694 | vm_pageout_stats[vm_pageout_stat_now].pages_compressed || | |
1695 | vm_pageout_stats[vm_pageout_stat_now].failed_compressions) { | |
d9a64523 | 1696 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO4)) | DBG_FUNC_NONE, |
0a7de745 A |
1697 | vm_pageout_stats[vm_pageout_stat_now].considered, |
1698 | vm_pageout_stats[vm_pageout_stat_now].freed_speculative, | |
1699 | vm_pageout_stats[vm_pageout_stat_now].freed_external, | |
1700 | vm_pageout_stats[vm_pageout_stat_now].inactive_referenced, | |
1701 | 0); | |
d9a64523 A |
1702 | |
1703 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO5)) | DBG_FUNC_NONE, | |
0a7de745 A |
1704 | vm_pageout_stats[vm_pageout_stat_now].throttled_external_q, |
1705 | vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external, | |
1706 | vm_pageout_stats[vm_pageout_stat_now].freed_cleaned, | |
1707 | vm_pageout_stats[vm_pageout_stat_now].inactive_nolock, | |
1708 | 0); | |
d9a64523 A |
1709 | |
1710 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO6)) | DBG_FUNC_NONE, | |
0a7de745 A |
1711 | vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q, |
1712 | vm_pageout_stats[vm_pageout_stat_now].pages_compressed, | |
1713 | vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor, | |
1714 | vm_pageout_stats[vm_pageout_stat_now].skipped_external, | |
1715 | 0); | |
d9a64523 A |
1716 | |
1717 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO7)) | DBG_FUNC_NONE, | |
0a7de745 A |
1718 | vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded, |
1719 | vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim, | |
1720 | vm_pageout_stats[vm_pageout_stat_now].failed_compressions, | |
1721 | vm_pageout_stats[vm_pageout_stat_now].freed_internal, | |
1722 | 0); | |
d9a64523 A |
1723 | |
1724 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO8)) | DBG_FUNC_NONE, | |
0a7de745 A |
1725 | vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal, |
1726 | vm_pageout_stats[vm_pageout_stat_now].considered_bq_external, | |
1727 | vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations, | |
1728 | vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal, | |
1729 | 0); | |
d9a64523 A |
1730 | } |
1731 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO9)) | DBG_FUNC_NONE, | |
0a7de745 A |
1732 | vm_pageout_stats[vm_pageout_stat_now].pages_grabbed, |
1733 | vm_pageout_stats[vm_pageout_stat_now].pages_freed, | |
1734 | vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found, | |
1735 | vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added, | |
1736 | 0); | |
5ba3f43e | 1737 | |
d9a64523 A |
1738 | record_memory_pressure(); |
1739 | } | |
1740 | ||
e8c3f781 | 1741 | extern boolean_t hibernation_vmqueues_inspection; |
d9a64523 | 1742 | |
cb323159 A |
1743 | /* |
1744 | * Return values for functions called by vm_pageout_scan | |
1745 | * that control its flow. | |
1746 | * | |
1747 | * PROCEED -- vm_pageout_scan will keep making forward progress. | |
1748 | * DONE_RETURN -- page demand satisfied, work is done -> vm_pageout_scan returns. | |
1749 | * NEXT_ITERATION -- restart the 'for' loop in vm_pageout_scan aka continue. | |
1750 | */ | |
d9a64523 | 1751 | |
cb323159 A |
1752 | #define VM_PAGEOUT_SCAN_PROCEED (0) |
1753 | #define VM_PAGEOUT_SCAN_DONE_RETURN (1) | |
1754 | #define VM_PAGEOUT_SCAN_NEXT_ITERATION (2) | |
1755 | ||
1756 | /* | |
1757 | * This function is called only from vm_pageout_scan and | |
1758 | * it moves overflow secluded pages (one-at-a-time) to the | |
1759 | * batched 'local' free Q or active Q. | |
1760 | */ | |
1761 | static void | |
1762 | vps_deal_with_secluded_page_overflow(vm_page_t *local_freeq, int *local_freed) | |
1763 | { | |
1764 | #if CONFIG_SECLUDED_MEMORY | |
1765 | /* | |
1766 | * Deal with secluded_q overflow. | |
1767 | */ | |
1768 | if (vm_page_secluded_count > vm_page_secluded_target) { | |
1769 | vm_page_t secluded_page; | |
d9a64523 | 1770 | |
e8c3f781 | 1771 | /* |
cb323159 A |
1772 | * SECLUDED_AGING_BEFORE_ACTIVE: |
1773 | * Excess secluded pages go to the active queue and | |
1774 | * will later go to the inactive queue. | |
e8c3f781 | 1775 | */ |
cb323159 A |
1776 | assert((vm_page_secluded_count_free + |
1777 | vm_page_secluded_count_inuse) == | |
1778 | vm_page_secluded_count); | |
1779 | secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded); | |
1780 | assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q); | |
1781 | ||
1782 | vm_page_queues_remove(secluded_page, FALSE); | |
1783 | assert(!secluded_page->vmp_fictitious); | |
1784 | assert(!VM_PAGE_WIRED(secluded_page)); | |
1785 | ||
1786 | if (secluded_page->vmp_object == 0) { | |
1787 | /* transfer to free queue */ | |
1788 | assert(secluded_page->vmp_busy); | |
1789 | secluded_page->vmp_snext = *local_freeq; | |
1790 | *local_freeq = secluded_page; | |
1791 | *local_freed += 1; | |
1792 | } else { | |
1793 | /* transfer to head of active queue */ | |
1794 | vm_page_enqueue_active(secluded_page, FALSE); | |
1795 | secluded_page = VM_PAGE_NULL; | |
1796 | } | |
e8c3f781 | 1797 | } |
cb323159 | 1798 | #else /* CONFIG_SECLUDED_MEMORY */ |
5ba3f43e | 1799 | |
cb323159 A |
1800 | #pragma unused(local_freeq) |
1801 | #pragma unused(local_freed) | |
d9a64523 | 1802 | |
cb323159 | 1803 | return; |
5ba3f43e | 1804 | |
cb323159 A |
1805 | #endif /* CONFIG_SECLUDED_MEMORY */ |
1806 | } | |
5ba3f43e | 1807 | |
cb323159 A |
1808 | /* |
1809 | * This function is called only from vm_pageout_scan and | |
1810 | * it initializes the loop targets for vm_pageout_scan(). | |
1811 | */ | |
1812 | static void | |
1813 | vps_init_page_targets(void) | |
1814 | { | |
1815 | /* | |
1816 | * LD TODO: Other page targets should be calculated here too. | |
1817 | */ | |
1818 | vm_page_anonymous_min = vm_page_inactive_target / 20; | |
5ba3f43e | 1819 | |
cb323159 A |
1820 | if (vm_pageout_state.vm_page_speculative_percentage > 50) { |
1821 | vm_pageout_state.vm_page_speculative_percentage = 50; | |
1822 | } else if (vm_pageout_state.vm_page_speculative_percentage <= 0) { | |
1823 | vm_pageout_state.vm_page_speculative_percentage = 1; | |
d9a64523 | 1824 | } |
5ba3f43e | 1825 | |
cb323159 A |
1826 | vm_pageout_state.vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count + |
1827 | vm_page_inactive_count); | |
1828 | } | |
5ba3f43e | 1829 | |
6d2010ae | 1830 | /* |
cb323159 A |
1831 | * This function is called only from vm_pageout_scan and |
1832 | * it purges a single VM object at-a-time and will either | |
1833 | * make vm_pageout_scan() restart the loop or keeping moving forward. | |
6d2010ae | 1834 | */ |
cb323159 A |
1835 | static int |
1836 | vps_purge_object() | |
1c79356b | 1837 | { |
cb323159 A |
1838 | int force_purge; |
1839 | ||
1840 | assert(available_for_purge >= 0); | |
1841 | force_purge = 0; /* no force-purging */ | |
fe8ab488 A |
1842 | |
1843 | #if VM_PRESSURE_EVENTS | |
39236c6e | 1844 | vm_pressure_level_t pressure_level; |
91447636 | 1845 | |
cb323159 | 1846 | pressure_level = memorystatus_vm_pressure_level; |
2d21ac55 | 1847 | |
cb323159 A |
1848 | if (pressure_level > kVMPressureNormal) { |
1849 | if (pressure_level >= kVMPressureCritical) { | |
1850 | force_purge = vm_pageout_state.memorystatus_purge_on_critical; | |
1851 | } else if (pressure_level >= kVMPressureUrgent) { | |
1852 | force_purge = vm_pageout_state.memorystatus_purge_on_urgent; | |
1853 | } else if (pressure_level >= kVMPressureWarning) { | |
1854 | force_purge = vm_pageout_state.memorystatus_purge_on_warning; | |
1855 | } | |
1856 | } | |
1857 | #endif /* VM_PRESSURE_EVENTS */ | |
1c79356b | 1858 | |
cb323159 A |
1859 | if (available_for_purge || force_purge) { |
1860 | memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START); | |
1c79356b | 1861 | |
cb323159 A |
1862 | VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0); |
1863 | if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) { | |
1864 | VM_PAGEOUT_DEBUG(vm_pageout_purged_objects, 1); | |
1865 | VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0); | |
1866 | memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END); | |
5ba3f43e | 1867 | |
cb323159 A |
1868 | return VM_PAGEOUT_SCAN_NEXT_ITERATION; |
1869 | } | |
1870 | VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1); | |
1871 | memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END); | |
1872 | } | |
d9a64523 | 1873 | |
cb323159 A |
1874 | return VM_PAGEOUT_SCAN_PROCEED; |
1875 | } | |
d9a64523 | 1876 | |
cb323159 A |
1877 | /* |
1878 | * This function is called only from vm_pageout_scan and | |
1879 | * it will try to age the next speculative Q if the oldest | |
1880 | * one is empty. | |
1881 | */ | |
1882 | static int | |
1883 | vps_age_speculative_queue(boolean_t force_speculative_aging) | |
1884 | { | |
1885 | #define DELAY_SPECULATIVE_AGE 1000 | |
2d21ac55 A |
1886 | |
1887 | /* | |
cb323159 A |
1888 | * try to pull pages from the aging bins... |
1889 | * see vm_page.h for an explanation of how | |
1890 | * this mechanism works | |
2d21ac55 | 1891 | */ |
cb323159 A |
1892 | boolean_t can_steal = FALSE; |
1893 | int num_scanned_queues; | |
1894 | static int delay_speculative_age = 0; /* depends the # of times we go through the main pageout_scan loop.*/ | |
1895 | mach_timespec_t ts; | |
1896 | struct vm_speculative_age_q *aq; | |
1897 | struct vm_speculative_age_q *sq; | |
2d21ac55 | 1898 | |
cb323159 | 1899 | sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q]; |
2d21ac55 | 1900 | |
cb323159 | 1901 | aq = &vm_page_queue_speculative[speculative_steal_index]; |
91447636 | 1902 | |
cb323159 A |
1903 | num_scanned_queues = 0; |
1904 | while (vm_page_queue_empty(&aq->age_q) && | |
1905 | num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) { | |
1906 | speculative_steal_index++; | |
39037602 | 1907 | |
cb323159 A |
1908 | if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) { |
1909 | speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q; | |
1910 | } | |
316670eb | 1911 | |
cb323159 A |
1912 | aq = &vm_page_queue_speculative[speculative_steal_index]; |
1913 | } | |
39236c6e | 1914 | |
cb323159 A |
1915 | if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) { |
1916 | /* | |
1917 | * XXX We've scanned all the speculative | |
1918 | * queues but still haven't found one | |
1919 | * that is not empty, even though | |
1920 | * vm_page_speculative_count is not 0. | |
1921 | */ | |
1922 | if (!vm_page_queue_empty(&sq->age_q)) { | |
1923 | return VM_PAGEOUT_SCAN_NEXT_ITERATION; | |
1924 | } | |
1925 | #if DEVELOPMENT || DEBUG | |
1926 | panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count); | |
1927 | #endif | |
1928 | /* readjust... */ | |
1929 | vm_page_speculative_count = 0; | |
1930 | /* ... and continue */ | |
1931 | return VM_PAGEOUT_SCAN_NEXT_ITERATION; | |
0a7de745 | 1932 | } |
316670eb | 1933 | |
cb323159 A |
1934 | if (vm_page_speculative_count > vm_pageout_state.vm_page_speculative_target || force_speculative_aging == TRUE) { |
1935 | can_steal = TRUE; | |
1936 | } else { | |
1937 | if (!delay_speculative_age) { | |
1938 | mach_timespec_t ts_fully_aged; | |
5ba3f43e | 1939 | |
cb323159 A |
1940 | ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) / 1000; |
1941 | ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) % 1000) | |
1942 | * 1000 * NSEC_PER_USEC; | |
1c79356b | 1943 | |
cb323159 | 1944 | ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts); |
1c79356b | 1945 | |
cb323159 A |
1946 | clock_sec_t sec; |
1947 | clock_nsec_t nsec; | |
1948 | clock_get_system_nanotime(&sec, &nsec); | |
1949 | ts.tv_sec = (unsigned int) sec; | |
1950 | ts.tv_nsec = nsec; | |
1951 | ||
1952 | if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0) { | |
1953 | can_steal = TRUE; | |
1954 | } else { | |
1955 | delay_speculative_age++; | |
1956 | } | |
1957 | } else { | |
1958 | delay_speculative_age++; | |
1959 | if (delay_speculative_age == DELAY_SPECULATIVE_AGE) { | |
1960 | delay_speculative_age = 0; | |
1961 | } | |
0a7de745 | 1962 | } |
cb323159 A |
1963 | } |
1964 | if (can_steal == TRUE) { | |
1965 | vm_page_speculate_ageit(aq); | |
1966 | } | |
5ba3f43e | 1967 | |
cb323159 A |
1968 | return VM_PAGEOUT_SCAN_PROCEED; |
1969 | } | |
5ba3f43e | 1970 | |
cb323159 A |
1971 | /* |
1972 | * This function is called only from vm_pageout_scan and | |
1973 | * it evicts a single VM object from the cache. | |
1974 | */ | |
1975 | static int inline | |
1976 | vps_object_cache_evict(vm_object_t *object_to_unlock) | |
1977 | { | |
1978 | static int cache_evict_throttle = 0; | |
1979 | struct vm_speculative_age_q *sq; | |
1980 | ||
1981 | sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q]; | |
1982 | ||
1983 | if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0) { | |
1984 | int pages_evicted; | |
1985 | ||
1986 | if (*object_to_unlock != NULL) { | |
1987 | vm_object_unlock(*object_to_unlock); | |
1988 | *object_to_unlock = NULL; | |
0a7de745 | 1989 | } |
cb323159 | 1990 | KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0); |
39037602 | 1991 | |
cb323159 A |
1992 | pages_evicted = vm_object_cache_evict(100, 10); |
1993 | ||
1994 | KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END, pages_evicted, 0, 0, 0, 0); | |
1995 | ||
1996 | if (pages_evicted) { | |
1997 | vm_pageout_vminfo.vm_pageout_pages_evicted += pages_evicted; | |
1998 | ||
1999 | VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE, | |
2000 | vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0); | |
2001 | memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE); | |
39037602 | 2002 | |
39037602 | 2003 | /* |
cb323159 A |
2004 | * we just freed up to 100 pages, |
2005 | * so go back to the top of the main loop | |
2006 | * and re-evaulate the memory situation | |
39037602 | 2007 | */ |
cb323159 A |
2008 | return VM_PAGEOUT_SCAN_NEXT_ITERATION; |
2009 | } else { | |
2010 | cache_evict_throttle = 1000; | |
39037602 | 2011 | } |
cb323159 A |
2012 | } |
2013 | if (cache_evict_throttle) { | |
2014 | cache_evict_throttle--; | |
2015 | } | |
39037602 | 2016 | |
cb323159 A |
2017 | return VM_PAGEOUT_SCAN_PROCEED; |
2018 | } | |
2019 | ||
2020 | ||
2021 | /* | |
2022 | * This function is called only from vm_pageout_scan and | |
2023 | * it calculates the filecache min. that needs to be maintained | |
2024 | * as we start to steal pages. | |
2025 | */ | |
2026 | static void | |
2027 | vps_calculate_filecache_min(void) | |
2028 | { | |
2029 | int divisor = vm_pageout_state.vm_page_filecache_min_divisor; | |
3e170ce0 | 2030 | |
cb323159 A |
2031 | #if CONFIG_JETSAM |
2032 | /* | |
2033 | * don't let the filecache_min fall below 15% of available memory | |
2034 | * on systems with an active compressor that isn't nearing its | |
2035 | * limits w/r to accepting new data | |
2036 | * | |
2037 | * on systems w/o the compressor/swapper, the filecache is always | |
2038 | * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY | |
2039 | * since most (if not all) of the anonymous pages are in the | |
2040 | * throttled queue (which isn't counted as available) which | |
2041 | * effectively disables this filter | |
2042 | */ | |
2043 | if (vm_compressor_low_on_space() || divisor == 0) { | |
2044 | vm_pageout_state.vm_page_filecache_min = 0; | |
2045 | } else { | |
2046 | vm_pageout_state.vm_page_filecache_min = | |
2047 | ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor; | |
2048 | } | |
2049 | #else | |
2050 | if (vm_compressor_out_of_space() || divisor == 0) { | |
2051 | vm_pageout_state.vm_page_filecache_min = 0; | |
2052 | } else { | |
1c79356b | 2053 | /* |
cb323159 | 2054 | * don't let the filecache_min fall below the specified critical level |
6d2010ae | 2055 | */ |
cb323159 A |
2056 | vm_pageout_state.vm_page_filecache_min = |
2057 | ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor; | |
2058 | } | |
2059 | #endif | |
2060 | if (vm_page_free_count < (vm_page_free_reserved / 4)) { | |
2061 | vm_pageout_state.vm_page_filecache_min = 0; | |
2062 | } | |
2063 | } | |
55e303ae | 2064 | |
cb323159 A |
2065 | /* |
2066 | * This function is called only from vm_pageout_scan and | |
2067 | * it updates the flow control time to detect if VM pageoutscan | |
2068 | * isn't making progress. | |
2069 | */ | |
2070 | static void | |
2071 | vps_flow_control_reset_deadlock_timer(struct flow_control *flow_control) | |
2072 | { | |
2073 | mach_timespec_t ts; | |
2074 | clock_sec_t sec; | |
2075 | clock_nsec_t nsec; | |
91447636 | 2076 | |
cb323159 A |
2077 | ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000; |
2078 | ts.tv_nsec = (vm_pageout_state.vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC; | |
2079 | clock_get_system_nanotime(&sec, &nsec); | |
2080 | flow_control->ts.tv_sec = (unsigned int) sec; | |
2081 | flow_control->ts.tv_nsec = nsec; | |
2082 | ADD_MACH_TIMESPEC(&flow_control->ts, &ts); | |
91447636 | 2083 | |
cb323159 | 2084 | flow_control->state = FCS_DELAYED; |
d9a64523 | 2085 | |
cb323159 A |
2086 | vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal++; |
2087 | } | |
316670eb | 2088 | |
cb323159 A |
2089 | /* |
2090 | * This function is called only from vm_pageout_scan and | |
2091 | * it is the flow control logic of VM pageout scan which | |
2092 | * controls if it should block and for how long. | |
2093 | * Any blocking of vm_pageout_scan happens ONLY in this function. | |
2094 | */ | |
2095 | static int | |
2096 | vps_flow_control(struct flow_control *flow_control, int *anons_grabbed, vm_object_t *object, int *delayed_unlock, | |
2097 | vm_page_t *local_freeq, int *local_freed, int *vm_pageout_deadlock_target, unsigned int inactive_burst_count) | |
2098 | { | |
2099 | boolean_t exceeded_burst_throttle = FALSE; | |
2100 | unsigned int msecs = 0; | |
2101 | uint32_t inactive_external_count; | |
2102 | mach_timespec_t ts; | |
2103 | struct vm_pageout_queue *iq; | |
2104 | struct vm_pageout_queue *eq; | |
2105 | struct vm_speculative_age_q *sq; | |
55e303ae | 2106 | |
cb323159 A |
2107 | iq = &vm_pageout_queue_internal; |
2108 | eq = &vm_pageout_queue_external; | |
2109 | sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q]; | |
2110 | ||
2111 | /* | |
2112 | * Sometimes we have to pause: | |
2113 | * 1) No inactive pages - nothing to do. | |
2114 | * 2) Loop control - no acceptable pages found on the inactive queue | |
2115 | * within the last vm_pageout_burst_inactive_throttle iterations | |
2116 | * 3) Flow control - default pageout queue is full | |
2117 | */ | |
2118 | if (vm_page_queue_empty(&vm_page_queue_inactive) && | |
2119 | vm_page_queue_empty(&vm_page_queue_anonymous) && | |
2120 | vm_page_queue_empty(&vm_page_queue_cleaned) && | |
2121 | vm_page_queue_empty(&sq->age_q)) { | |
2122 | VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1); | |
2123 | msecs = vm_pageout_state.vm_pageout_empty_wait; | |
2124 | } else if (inactive_burst_count >= | |
2125 | MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle, | |
2126 | (vm_page_inactive_count + | |
2127 | vm_page_speculative_count))) { | |
2128 | VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1); | |
2129 | msecs = vm_pageout_state.vm_pageout_burst_wait; | |
2130 | ||
2131 | exceeded_burst_throttle = TRUE; | |
2132 | } else if (VM_PAGE_Q_THROTTLED(iq) && | |
2133 | VM_DYNAMIC_PAGING_ENABLED()) { | |
2134 | clock_sec_t sec; | |
2135 | clock_nsec_t nsec; | |
2136 | ||
2137 | switch (flow_control->state) { | |
2138 | case FCS_IDLE: | |
2139 | if ((vm_page_free_count + *local_freed) < vm_page_free_target && | |
2140 | vm_pageout_state.vm_restricted_to_single_processor == FALSE) { | |
6d2010ae | 2141 | /* |
cb323159 A |
2142 | * since the compressor is running independently of vm_pageout_scan |
2143 | * let's not wait for it just yet... as long as we have a healthy supply | |
2144 | * of filecache pages to work with, let's keep stealing those. | |
6d2010ae | 2145 | */ |
cb323159 | 2146 | inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count; |
6d2010ae | 2147 | |
cb323159 A |
2148 | if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min && |
2149 | (inactive_external_count >= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) { | |
2150 | *anons_grabbed = ANONS_GRABBED_LIMIT; | |
2151 | VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred, 1); | |
2152 | return VM_PAGEOUT_SCAN_PROCEED; | |
2153 | } | |
0b4e3aa0 | 2154 | } |
d9a64523 | 2155 | |
cb323159 A |
2156 | vps_flow_control_reset_deadlock_timer(flow_control); |
2157 | msecs = vm_pageout_state.vm_pageout_deadlock_wait; | |
d9a64523 | 2158 | |
cb323159 | 2159 | break; |
39236c6e | 2160 | |
cb323159 A |
2161 | case FCS_DELAYED: |
2162 | clock_get_system_nanotime(&sec, &nsec); | |
2163 | ts.tv_sec = (unsigned int) sec; | |
2164 | ts.tv_nsec = nsec; | |
6d2010ae | 2165 | |
cb323159 A |
2166 | if (CMP_MACH_TIMESPEC(&ts, &flow_control->ts) >= 0) { |
2167 | /* | |
2168 | * the pageout thread for the default pager is potentially | |
2169 | * deadlocked since the | |
2170 | * default pager queue has been throttled for more than the | |
2171 | * allowable time... we need to move some clean pages or dirty | |
2172 | * pages belonging to the external pagers if they aren't throttled | |
2173 | * vm_page_free_wanted represents the number of threads currently | |
2174 | * blocked waiting for pages... we'll move one page for each of | |
2175 | * these plus a fixed amount to break the logjam... once we're done | |
2176 | * moving this number of pages, we'll re-enter the FSC_DELAYED state | |
2177 | * with a new timeout target since we have no way of knowing | |
2178 | * whether we've broken the deadlock except through observation | |
2179 | * of the queue associated with the default pager... we need to | |
2180 | * stop moving pages and allow the system to run to see what | |
2181 | * state it settles into. | |
2182 | */ | |
2183 | ||
2184 | *vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief + | |
2185 | vm_page_free_wanted + vm_page_free_wanted_privileged; | |
2186 | VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected, 1); | |
2187 | flow_control->state = FCS_DEADLOCK_DETECTED; | |
2188 | thread_wakeup((event_t) &vm_pageout_garbage_collect); | |
2189 | return VM_PAGEOUT_SCAN_PROCEED; | |
39236c6e | 2190 | } |
cb323159 A |
2191 | /* |
2192 | * just resniff instead of trying | |
2193 | * to compute a new delay time... we're going to be | |
2194 | * awakened immediately upon a laundry completion, | |
2195 | * so we won't wait any longer than necessary | |
2196 | */ | |
2197 | msecs = vm_pageout_state.vm_pageout_idle_wait; | |
2198 | break; | |
fe8ab488 | 2199 | |
cb323159 A |
2200 | case FCS_DEADLOCK_DETECTED: |
2201 | if (*vm_pageout_deadlock_target) { | |
2202 | return VM_PAGEOUT_SCAN_PROCEED; | |
fe8ab488 A |
2203 | } |
2204 | ||
cb323159 A |
2205 | vps_flow_control_reset_deadlock_timer(flow_control); |
2206 | msecs = vm_pageout_state.vm_pageout_deadlock_wait; | |
fe8ab488 | 2207 | |
cb323159 | 2208 | break; |
2d21ac55 | 2209 | } |
cb323159 A |
2210 | } else { |
2211 | /* | |
2212 | * No need to pause... | |
2213 | */ | |
2214 | return VM_PAGEOUT_SCAN_PROCEED; | |
2215 | } | |
fe8ab488 | 2216 | |
cb323159 | 2217 | vm_pageout_scan_wants_object = VM_OBJECT_NULL; |
d9a64523 | 2218 | |
cb323159 A |
2219 | vm_pageout_prepare_to_block(object, delayed_unlock, local_freeq, local_freed, |
2220 | VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER); | |
2d21ac55 | 2221 | |
cb323159 A |
2222 | if (vm_page_free_count >= vm_page_free_target) { |
2223 | /* | |
2224 | * we're here because | |
2225 | * 1) someone else freed up some pages while we had | |
2226 | * the queues unlocked above | |
2227 | * and we've hit one of the 3 conditions that | |
2228 | * cause us to pause the pageout scan thread | |
2229 | * | |
2230 | * since we already have enough free pages, | |
2231 | * let's avoid stalling and return normally | |
2232 | * | |
2233 | * before we return, make sure the pageout I/O threads | |
2234 | * are running throttled in case there are still requests | |
2235 | * in the laundry... since we have enough free pages | |
2236 | * we don't need the laundry to be cleaned in a timely | |
2237 | * fashion... so let's avoid interfering with foreground | |
2238 | * activity | |
2239 | * | |
2240 | * we don't want to hold vm_page_queue_free_lock when | |
2241 | * calling vm_pageout_adjust_eq_iothrottle (since it | |
2242 | * may cause other locks to be taken), we do the intitial | |
2243 | * check outside of the lock. Once we take the lock, | |
2244 | * we recheck the condition since it may have changed. | |
2245 | * if it has, no problem, we will make the threads | |
2246 | * non-throttled before actually blocking | |
2247 | */ | |
2248 | vm_pageout_adjust_eq_iothrottle(eq, TRUE); | |
2249 | } | |
2250 | lck_mtx_lock(&vm_page_queue_free_lock); | |
2d21ac55 | 2251 | |
cb323159 A |
2252 | if (vm_page_free_count >= vm_page_free_target && |
2253 | (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) { | |
2254 | return VM_PAGEOUT_SCAN_DONE_RETURN; | |
2255 | } | |
2256 | lck_mtx_unlock(&vm_page_queue_free_lock); | |
d9a64523 | 2257 | |
cb323159 A |
2258 | if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) { |
2259 | /* | |
2260 | * we're most likely about to block due to one of | |
2261 | * the 3 conditions that cause vm_pageout_scan to | |
2262 | * not be able to make forward progress w/r | |
2263 | * to providing new pages to the free queue, | |
2264 | * so unthrottle the I/O threads in case we | |
2265 | * have laundry to be cleaned... it needs | |
2266 | * to be completed ASAP. | |
2267 | * | |
2268 | * even if we don't block, we want the io threads | |
2269 | * running unthrottled since the sum of free + | |
2270 | * clean pages is still under our free target | |
2271 | */ | |
2272 | vm_pageout_adjust_eq_iothrottle(eq, FALSE); | |
2273 | } | |
2274 | if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) { | |
2275 | /* | |
2276 | * if we get here we're below our free target and | |
2277 | * we're stalling due to a full laundry queue or | |
2278 | * we don't have any inactive pages other then | |
2279 | * those in the clean queue... | |
2280 | * however, we have pages on the clean queue that | |
2281 | * can be moved to the free queue, so let's not | |
2282 | * stall the pageout scan | |
2283 | */ | |
2284 | flow_control->state = FCS_IDLE; | |
2285 | return VM_PAGEOUT_SCAN_PROCEED; | |
2286 | } | |
2287 | if (flow_control->state == FCS_DELAYED && !VM_PAGE_Q_THROTTLED(iq)) { | |
2288 | flow_control->state = FCS_IDLE; | |
2289 | return VM_PAGEOUT_SCAN_PROCEED; | |
2290 | } | |
b0d623f7 | 2291 | |
cb323159 | 2292 | VM_CHECK_MEMORYSTATUS; |
b0d623f7 | 2293 | |
cb323159 A |
2294 | if (flow_control->state != FCS_IDLE) { |
2295 | VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1); | |
2296 | } | |
2d21ac55 | 2297 | |
cb323159 A |
2298 | iq->pgo_throttled = TRUE; |
2299 | assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000 * NSEC_PER_USEC); | |
55e303ae | 2300 | |
cb323159 | 2301 | counter(c_vm_pageout_scan_block++); |
3e170ce0 | 2302 | |
cb323159 | 2303 | vm_page_unlock_queues(); |
2d21ac55 | 2304 | |
cb323159 | 2305 | assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL); |
5ba3f43e | 2306 | |
cb323159 A |
2307 | VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START, |
2308 | iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0); | |
2309 | memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START); | |
d9a64523 | 2310 | |
cb323159 | 2311 | thread_block(THREAD_CONTINUE_NULL); |
d9a64523 | 2312 | |
cb323159 A |
2313 | VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END, |
2314 | iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0); | |
2315 | memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END); | |
6d2010ae | 2316 | |
cb323159 | 2317 | vm_page_lock_queues(); |
d9a64523 | 2318 | |
cb323159 | 2319 | iq->pgo_throttled = FALSE; |
6d2010ae | 2320 | |
cb323159 | 2321 | vps_init_page_targets(); |
6d2010ae | 2322 | |
cb323159 A |
2323 | return VM_PAGEOUT_SCAN_NEXT_ITERATION; |
2324 | } | |
6d2010ae | 2325 | |
cb323159 A |
2326 | /* |
2327 | * This function is called only from vm_pageout_scan and | |
2328 | * it will find and return the most appropriate page to be | |
2329 | * reclaimed. | |
2330 | */ | |
2331 | static int | |
2332 | vps_choose_victim_page(vm_page_t *victim_page, int *anons_grabbed, boolean_t *grab_anonymous, boolean_t force_anonymous, | |
2333 | boolean_t *is_page_from_bg_q, unsigned int reactivated_this_call) | |
2334 | { | |
2335 | vm_page_t m = NULL; | |
2336 | vm_object_t m_object = VM_OBJECT_NULL; | |
2337 | uint32_t inactive_external_count; | |
2338 | struct vm_speculative_age_q *sq; | |
2339 | struct vm_pageout_queue *iq; | |
2340 | int retval = VM_PAGEOUT_SCAN_PROCEED; | |
2341 | ||
2342 | sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q]; | |
2343 | iq = &vm_pageout_queue_internal; | |
2344 | ||
2345 | while (1) { | |
2346 | *is_page_from_bg_q = FALSE; | |
2347 | ||
2348 | m = NULL; | |
2349 | m_object = VM_OBJECT_NULL; | |
2350 | ||
2351 | if (VM_DYNAMIC_PAGING_ENABLED()) { | |
2352 | assert(vm_page_throttled_count == 0); | |
2353 | assert(vm_page_queue_empty(&vm_page_queue_throttled)); | |
2354 | } | |
d9a64523 | 2355 | |
04b8595b | 2356 | /* |
cb323159 A |
2357 | * Try for a clean-queue inactive page. |
2358 | * These are pages that vm_pageout_scan tried to steal earlier, but | |
2359 | * were dirty and had to be cleaned. Pick them up now that they are clean. | |
04b8595b | 2360 | */ |
cb323159 A |
2361 | if (!vm_page_queue_empty(&vm_page_queue_cleaned)) { |
2362 | m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned); | |
2363 | ||
2364 | assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q); | |
2365 | ||
2366 | break; | |
0a7de745 | 2367 | } |
91447636 | 2368 | |
1c79356b | 2369 | /* |
cb323159 A |
2370 | * The next most eligible pages are ones we paged in speculatively, |
2371 | * but which have not yet been touched and have been aged out. | |
1c79356b | 2372 | */ |
cb323159 A |
2373 | if (!vm_page_queue_empty(&sq->age_q)) { |
2374 | m = (vm_page_t) vm_page_queue_first(&sq->age_q); | |
91447636 | 2375 | |
cb323159 | 2376 | assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q); |
39236c6e | 2377 | |
cb323159 | 2378 | if (!m->vmp_dirty || force_anonymous == FALSE) { |
91447636 | 2379 | break; |
cb323159 A |
2380 | } else { |
2381 | m = NULL; | |
2382 | } | |
2383 | } | |
d9a64523 | 2384 | |
cb323159 A |
2385 | #if CONFIG_BACKGROUND_QUEUE |
2386 | if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) { | |
2387 | vm_object_t bg_m_object = NULL; | |
91447636 | 2388 | |
cb323159 A |
2389 | m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background); |
2390 | ||
2391 | bg_m_object = VM_PAGE_OBJECT(m); | |
2392 | ||
2393 | if (!VM_PAGE_PAGEABLE(m)) { | |
91447636 | 2394 | /* |
cb323159 A |
2395 | * This page is on the background queue |
2396 | * but not on a pageable queue. This is | |
2397 | * likely a transient state and whoever | |
2398 | * took it out of its pageable queue | |
2399 | * will likely put it back on a pageable | |
2400 | * queue soon but we can't deal with it | |
2401 | * at this point, so let's ignore this | |
2402 | * page. | |
91447636 | 2403 | */ |
cb323159 A |
2404 | } else if (force_anonymous == FALSE || bg_m_object->internal) { |
2405 | if (bg_m_object->internal && | |
2406 | (VM_PAGE_Q_THROTTLED(iq) || | |
2407 | vm_compressor_out_of_space() == TRUE || | |
2408 | vm_page_free_count < (vm_page_free_reserved / 4))) { | |
2409 | vm_pageout_skipped_bq_internal++; | |
2410 | } else { | |
2411 | *is_page_from_bg_q = TRUE; | |
1c79356b | 2412 | |
cb323159 A |
2413 | if (bg_m_object->internal) { |
2414 | vm_pageout_vminfo.vm_pageout_considered_bq_internal++; | |
2415 | } else { | |
2416 | vm_pageout_vminfo.vm_pageout_considered_bq_external++; | |
2417 | } | |
2418 | break; | |
0a7de745 | 2419 | } |
91447636 | 2420 | } |
cb323159 A |
2421 | } |
2422 | #endif /* CONFIG_BACKGROUND_QUEUE */ | |
d9a64523 | 2423 | |
cb323159 | 2424 | inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count; |
fe8ab488 | 2425 | |
cb323159 A |
2426 | if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) || |
2427 | (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) { | |
2428 | *grab_anonymous = TRUE; | |
2429 | *anons_grabbed = 0; | |
0b4e3aa0 | 2430 | |
cb323159 A |
2431 | vm_pageout_vminfo.vm_pageout_skipped_external++; |
2432 | goto want_anonymous; | |
2433 | } | |
2434 | *grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min); | |
d9a64523 | 2435 | |
cb323159 A |
2436 | #if CONFIG_JETSAM |
2437 | /* If the file-backed pool has accumulated | |
2438 | * significantly more pages than the jetsam | |
2439 | * threshold, prefer to reclaim those | |
2440 | * inline to minimise compute overhead of reclaiming | |
2441 | * anonymous pages. | |
2442 | * This calculation does not account for the CPU local | |
2443 | * external page queues, as those are expected to be | |
2444 | * much smaller relative to the global pools. | |
2445 | */ | |
2446 | ||
2447 | struct vm_pageout_queue *eq = &vm_pageout_queue_external; | |
2448 | ||
2449 | if (*grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) { | |
2450 | if (vm_page_pageable_external_count > | |
2451 | vm_pageout_state.vm_page_filecache_min) { | |
2452 | if ((vm_page_pageable_external_count * | |
2453 | vm_pageout_memorystatus_fb_factor_dr) > | |
2454 | (memorystatus_available_pages_critical * | |
2455 | vm_pageout_memorystatus_fb_factor_nr)) { | |
2456 | *grab_anonymous = FALSE; | |
2457 | ||
2458 | VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1); | |
2459 | } | |
316670eb | 2460 | } |
cb323159 A |
2461 | if (*grab_anonymous) { |
2462 | VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1); | |
d9a64523 | 2463 | } |
cb323159 A |
2464 | } |
2465 | #endif /* CONFIG_JETSAM */ | |
d9a64523 | 2466 | |
cb323159 A |
2467 | want_anonymous: |
2468 | if (*grab_anonymous == FALSE || *anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) { | |
2469 | if (!vm_page_queue_empty(&vm_page_queue_inactive)) { | |
2470 | m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); | |
2471 | ||
2472 | assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q); | |
2473 | *anons_grabbed = 0; | |
2474 | ||
2475 | if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) { | |
2476 | if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { | |
2477 | if ((++reactivated_this_call % 100)) { | |
2478 | vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++; | |
2479 | ||
2480 | vm_page_activate(m); | |
2481 | VM_STAT_INCR(reactivations); | |
2482 | #if CONFIG_BACKGROUND_QUEUE | |
2483 | #if DEVELOPMENT || DEBUG | |
2484 | if (*is_page_from_bg_q == TRUE) { | |
2485 | if (m_object->internal) { | |
2486 | vm_pageout_rejected_bq_internal++; | |
2487 | } else { | |
2488 | vm_pageout_rejected_bq_external++; | |
2489 | } | |
2490 | } | |
2491 | #endif /* DEVELOPMENT || DEBUG */ | |
2492 | #endif /* CONFIG_BACKGROUND_QUEUE */ | |
2493 | vm_pageout_state.vm_pageout_inactive_used++; | |
2494 | ||
2495 | m = NULL; | |
2496 | retval = VM_PAGEOUT_SCAN_NEXT_ITERATION; | |
2497 | ||
2498 | break; | |
2499 | } | |
6d2010ae | 2500 | |
cb323159 A |
2501 | /* |
2502 | * steal 1% of the file backed pages even if | |
2503 | * we are under the limit that has been set | |
2504 | * for a healthy filecache | |
2505 | */ | |
2506 | } | |
2507 | } | |
2508 | break; | |
0a7de745 | 2509 | } |
cb323159 A |
2510 | } |
2511 | if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { | |
2512 | m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); | |
2513 | ||
2514 | assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q); | |
2515 | *anons_grabbed += 1; | |
2516 | ||
2517 | break; | |
2518 | } | |
316670eb | 2519 | |
cb323159 A |
2520 | m = NULL; |
2521 | } | |
d9a64523 | 2522 | |
cb323159 | 2523 | *victim_page = m; |
1c79356b | 2524 | |
cb323159 A |
2525 | return retval; |
2526 | } | |
2d21ac55 | 2527 | |
cb323159 A |
2528 | /* |
2529 | * This function is called only from vm_pageout_scan and | |
2530 | * it will put a page back on the active/inactive queue | |
2531 | * if we can't reclaim it for some reason. | |
2532 | */ | |
2533 | static void | |
2534 | vps_requeue_page(vm_page_t m, int page_prev_q_state, __unused boolean_t page_from_bg_q) | |
2535 | { | |
2536 | if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) { | |
2537 | vm_page_enqueue_inactive(m, FALSE); | |
2538 | } else { | |
2539 | vm_page_activate(m); | |
2540 | } | |
b0d623f7 | 2541 | |
cb323159 A |
2542 | #if CONFIG_BACKGROUND_QUEUE |
2543 | #if DEVELOPMENT || DEBUG | |
2544 | vm_object_t m_object = VM_PAGE_OBJECT(m); | |
6d2010ae | 2545 | |
cb323159 A |
2546 | if (page_from_bg_q == TRUE) { |
2547 | if (m_object->internal) { | |
2548 | vm_pageout_rejected_bq_internal++; | |
2549 | } else { | |
2550 | vm_pageout_rejected_bq_external++; | |
2551 | } | |
2552 | } | |
2553 | #endif /* DEVELOPMENT || DEBUG */ | |
2554 | #endif /* CONFIG_BACKGROUND_QUEUE */ | |
2555 | } | |
91447636 | 2556 | |
cb323159 A |
2557 | /* |
2558 | * This function is called only from vm_pageout_scan and | |
2559 | * it will try to grab the victim page's VM object (m_object) | |
2560 | * which differs from the previous victim page's object (object). | |
2561 | */ | |
2562 | static int | |
2563 | vps_switch_object(vm_page_t m, vm_object_t m_object, vm_object_t *object, int page_prev_q_state, boolean_t avoid_anon_pages, boolean_t page_from_bg_q) | |
2564 | { | |
2565 | struct vm_speculative_age_q *sq; | |
6d2010ae | 2566 | |
cb323159 | 2567 | sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q]; |
91447636 | 2568 | |
cb323159 A |
2569 | /* |
2570 | * the object associated with candidate page is | |
2571 | * different from the one we were just working | |
2572 | * with... dump the lock if we still own it | |
2573 | */ | |
2574 | if (*object != NULL) { | |
2575 | vm_object_unlock(*object); | |
2576 | *object = NULL; | |
2577 | } | |
2578 | /* | |
2579 | * Try to lock object; since we've alread got the | |
2580 | * page queues lock, we can only 'try' for this one. | |
2581 | * if the 'try' fails, we need to do a mutex_pause | |
2582 | * to allow the owner of the object lock a chance to | |
2583 | * run... otherwise, we're likely to trip over this | |
2584 | * object in the same state as we work our way through | |
2585 | * the queue... clumps of pages associated with the same | |
2586 | * object are fairly typical on the inactive and active queues | |
2587 | */ | |
2588 | if (!vm_object_lock_try_scan(m_object)) { | |
2589 | vm_page_t m_want = NULL; | |
2590 | ||
2591 | vm_pageout_vminfo.vm_pageout_inactive_nolock++; | |
2592 | ||
2593 | if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { | |
2594 | VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1); | |
2595 | } | |
2596 | ||
2597 | pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m)); | |
2598 | ||
2599 | m->vmp_reference = FALSE; | |
2600 | ||
2601 | if (!m_object->object_is_shared_cache) { | |
2602 | /* | |
2603 | * don't apply this optimization if this is the shared cache | |
2604 | * object, it's too easy to get rid of very hot and important | |
2605 | * pages... | |
2606 | * m->vmp_object must be stable since we hold the page queues lock... | |
2607 | * we can update the scan_collisions field sans the object lock | |
2608 | * since it is a separate field and this is the only spot that does | |
2609 | * a read-modify-write operation and it is never executed concurrently... | |
2610 | * we can asynchronously set this field to 0 when creating a UPL, so it | |
2611 | * is possible for the value to be a bit non-determistic, but that's ok | |
2612 | * since it's only used as a hint | |
2613 | */ | |
2614 | m_object->scan_collisions = 1; | |
2615 | } | |
2616 | if (!vm_page_queue_empty(&vm_page_queue_cleaned)) { | |
2617 | m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned); | |
2618 | } else if (!vm_page_queue_empty(&sq->age_q)) { | |
2619 | m_want = (vm_page_t) vm_page_queue_first(&sq->age_q); | |
2620 | } else if ((avoid_anon_pages || vm_page_queue_empty(&vm_page_queue_anonymous)) && | |
2621 | !vm_page_queue_empty(&vm_page_queue_inactive)) { | |
2622 | m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); | |
2623 | } else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { | |
2624 | m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); | |
2625 | } | |
2626 | ||
2627 | /* | |
2628 | * this is the next object we're going to be interested in | |
2629 | * try to make sure its available after the mutex_pause | |
2630 | * returns control | |
2631 | */ | |
2632 | if (m_want) { | |
2633 | vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want); | |
2634 | } | |
2635 | ||
2636 | vps_requeue_page(m, page_prev_q_state, page_from_bg_q); | |
2637 | ||
2638 | return VM_PAGEOUT_SCAN_NEXT_ITERATION; | |
2639 | } else { | |
2640 | *object = m_object; | |
2641 | vm_pageout_scan_wants_object = VM_OBJECT_NULL; | |
2642 | } | |
2643 | ||
2644 | return VM_PAGEOUT_SCAN_PROCEED; | |
2645 | } | |
2646 | ||
2647 | /* | |
2648 | * This function is called only from vm_pageout_scan and | |
2649 | * it notices that pageout scan may be rendered ineffective | |
2650 | * due to a FS deadlock and will jetsam a process if possible. | |
2651 | * If jetsam isn't supported, it'll move the page to the active | |
2652 | * queue to try and get some different pages pushed onwards so | |
2653 | * we can try to get out of this scenario. | |
2654 | */ | |
2655 | static void | |
2656 | vps_deal_with_throttled_queues(vm_page_t m, vm_object_t *object, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit, | |
2657 | int *delayed_unlock, boolean_t *force_anonymous, __unused boolean_t is_page_from_bg_q) | |
2658 | { | |
2659 | struct vm_pageout_queue *eq; | |
2660 | vm_object_t cur_object = VM_OBJECT_NULL; | |
2661 | ||
2662 | cur_object = *object; | |
2663 | ||
2664 | eq = &vm_pageout_queue_external; | |
2665 | ||
2666 | if (cur_object->internal == FALSE) { | |
2667 | /* | |
2668 | * we need to break up the following potential deadlock case... | |
2669 | * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written. | |
2670 | * b) The thread doing the writing is waiting for pages while holding the truncate lock | |
2671 | * c) Most of the pages in the inactive queue belong to this file. | |
2672 | * | |
2673 | * we are potentially in this deadlock because... | |
2674 | * a) the external pageout queue is throttled | |
2675 | * b) we're done with the active queue and moved on to the inactive queue | |
2676 | * c) we've got a dirty external page | |
2677 | * | |
2678 | * since we don't know the reason for the external pageout queue being throttled we | |
2679 | * must suspect that we are deadlocked, so move the current page onto the active queue | |
2680 | * in an effort to cause a page from the active queue to 'age' to the inactive queue | |
2681 | * | |
2682 | * if we don't have jetsam configured (i.e. we have a dynamic pager), set | |
2683 | * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous | |
2684 | * pool the next time we select a victim page... if we can make enough new free pages, | |
2685 | * the deadlock will break, the external pageout queue will empty and it will no longer | |
2686 | * be throttled | |
2687 | * | |
2688 | * if we have jetsam configured, keep a count of the pages reactivated this way so | |
2689 | * that we can try to find clean pages in the active/inactive queues before | |
2690 | * deciding to jetsam a process | |
2691 | */ | |
2692 | vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external++; | |
2693 | ||
2694 | vm_page_check_pageable_safe(m); | |
2695 | assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q); | |
2696 | vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq); | |
2697 | m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q; | |
2698 | vm_page_active_count++; | |
2699 | vm_page_pageable_external_count++; | |
2700 | ||
2701 | vm_pageout_adjust_eq_iothrottle(eq, FALSE); | |
2702 | ||
2703 | #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM | |
2704 | ||
2705 | #pragma unused(force_anonymous) | |
2706 | ||
2707 | *vm_pageout_inactive_external_forced_reactivate_limit -= 1; | |
2708 | ||
2709 | if (*vm_pageout_inactive_external_forced_reactivate_limit <= 0) { | |
2710 | *vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count; | |
2711 | /* | |
2712 | * Possible deadlock scenario so request jetsam action | |
2713 | */ | |
2714 | ||
2715 | assert(cur_object); | |
2716 | vm_object_unlock(cur_object); | |
2717 | ||
2718 | cur_object = VM_OBJECT_NULL; | |
2719 | ||
2720 | /* | |
2721 | * VM pageout scan needs to know we have dropped this lock and so set the | |
2722 | * object variable we got passed in to NULL. | |
2723 | */ | |
2724 | *object = VM_OBJECT_NULL; | |
2725 | ||
2726 | vm_page_unlock_queues(); | |
2727 | ||
2728 | VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START, | |
2729 | vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count); | |
2730 | ||
2731 | /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */ | |
2732 | if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) { | |
2733 | VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1); | |
2734 | } | |
2735 | ||
2736 | VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END, | |
2737 | vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count); | |
2738 | ||
2739 | vm_page_lock_queues(); | |
2740 | *delayed_unlock = 1; | |
2741 | } | |
2742 | #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */ | |
2743 | ||
2744 | #pragma unused(vm_pageout_inactive_external_forced_reactivate_limit) | |
2745 | #pragma unused(delayed_unlock) | |
2746 | ||
2747 | *force_anonymous = TRUE; | |
2748 | #endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */ | |
2749 | } else { | |
2750 | vm_page_activate(m); | |
2751 | VM_STAT_INCR(reactivations); | |
2752 | ||
2753 | #if CONFIG_BACKGROUND_QUEUE | |
2754 | #if DEVELOPMENT || DEBUG | |
2755 | if (is_page_from_bg_q == TRUE) { | |
2756 | if (cur_object->internal) { | |
2757 | vm_pageout_rejected_bq_internal++; | |
2758 | } else { | |
2759 | vm_pageout_rejected_bq_external++; | |
2760 | } | |
2761 | } | |
2762 | #endif /* DEVELOPMENT || DEBUG */ | |
2763 | #endif /* CONFIG_BACKGROUND_QUEUE */ | |
2764 | ||
2765 | vm_pageout_state.vm_pageout_inactive_used++; | |
2766 | } | |
2767 | } | |
2768 | ||
2769 | ||
2770 | void | |
2771 | vm_page_balance_inactive(int max_to_move) | |
2772 | { | |
2773 | vm_page_t m; | |
2774 | ||
2775 | LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); | |
2776 | ||
2777 | if (hibernation_vmqueues_inspection == TRUE) { | |
2778 | /* | |
2779 | * It is likely that the hibernation code path is | |
2780 | * dealing with these very queues as we are about | |
2781 | * to move pages around in/from them and completely | |
2782 | * change the linkage of the pages. | |
2783 | * | |
2784 | * And so we skip the rebalancing of these queues. | |
2785 | */ | |
2786 | return; | |
2787 | } | |
2788 | vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count + | |
2789 | vm_page_inactive_count + | |
2790 | vm_page_speculative_count); | |
2791 | ||
2792 | while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) { | |
2793 | VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1); | |
2794 | ||
2795 | m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active); | |
2796 | ||
2797 | assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q); | |
2798 | assert(!m->vmp_laundry); | |
2799 | assert(VM_PAGE_OBJECT(m) != kernel_object); | |
2800 | assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr); | |
2801 | ||
2802 | DTRACE_VM2(scan, int, 1, (uint64_t *), NULL); | |
2803 | ||
2804 | /* | |
2805 | * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise... | |
2806 | * | |
2807 | * a TLB flush isn't really needed here since at worst we'll miss the reference bit being | |
2808 | * updated in the PTE if a remote processor still has this mapping cached in its TLB when the | |
2809 | * new reference happens. If no futher references happen on the page after that remote TLB flushes | |
2810 | * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue | |
2811 | * by pageout_scan, which is just fine since the last reference would have happened quite far | |
2812 | * in the past (TLB caches don't hang around for very long), and of course could just as easily | |
2813 | * have happened before we moved the page | |
2814 | */ | |
2815 | if (m->vmp_pmapped == TRUE) { | |
2816 | pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL); | |
2817 | } | |
2818 | ||
2819 | /* | |
2820 | * The page might be absent or busy, | |
2821 | * but vm_page_deactivate can handle that. | |
2822 | * FALSE indicates that we don't want a H/W clear reference | |
2823 | */ | |
2824 | vm_page_deactivate_internal(m, FALSE); | |
2825 | } | |
2826 | } | |
2827 | ||
2828 | ||
2829 | /* | |
2830 | * vm_pageout_scan does the dirty work for the pageout daemon. | |
2831 | * It returns with both vm_page_queue_free_lock and vm_page_queue_lock | |
2832 | * held and vm_page_free_wanted == 0. | |
2833 | */ | |
2834 | void | |
2835 | vm_pageout_scan(void) | |
2836 | { | |
2837 | unsigned int loop_count = 0; | |
2838 | unsigned int inactive_burst_count = 0; | |
2839 | unsigned int reactivated_this_call; | |
2840 | unsigned int reactivate_limit; | |
2841 | vm_page_t local_freeq = NULL; | |
2842 | int local_freed = 0; | |
2843 | int delayed_unlock; | |
2844 | int delayed_unlock_limit = 0; | |
2845 | int refmod_state = 0; | |
2846 | int vm_pageout_deadlock_target = 0; | |
2847 | struct vm_pageout_queue *iq; | |
2848 | struct vm_pageout_queue *eq; | |
2849 | struct vm_speculative_age_q *sq; | |
2850 | struct flow_control flow_control = { .state = 0, .ts = { .tv_sec = 0, .tv_nsec = 0 } }; | |
2851 | boolean_t inactive_throttled = FALSE; | |
2852 | vm_object_t object = NULL; | |
2853 | uint32_t inactive_reclaim_run; | |
2854 | boolean_t grab_anonymous = FALSE; | |
2855 | boolean_t force_anonymous = FALSE; | |
2856 | boolean_t force_speculative_aging = FALSE; | |
2857 | int anons_grabbed = 0; | |
2858 | int page_prev_q_state = 0; | |
2859 | boolean_t page_from_bg_q = FALSE; | |
2860 | uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0; | |
2861 | vm_object_t m_object = VM_OBJECT_NULL; | |
2862 | int retval = 0; | |
2863 | boolean_t lock_yield_check = FALSE; | |
2864 | ||
2865 | ||
2866 | VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START, | |
2867 | vm_pageout_vminfo.vm_pageout_freed_speculative, | |
2868 | vm_pageout_state.vm_pageout_inactive_clean, | |
2869 | vm_pageout_vminfo.vm_pageout_inactive_dirty_internal, | |
2870 | vm_pageout_vminfo.vm_pageout_inactive_dirty_external); | |
2871 | ||
2872 | flow_control.state = FCS_IDLE; | |
2873 | iq = &vm_pageout_queue_internal; | |
2874 | eq = &vm_pageout_queue_external; | |
2875 | sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q]; | |
2876 | ||
2877 | /* Ask the pmap layer to return any pages it no longer needs. */ | |
2878 | uint64_t pmap_wired_pages_freed = pmap_release_pages_fast(); | |
2879 | ||
2880 | vm_page_lock_queues(); | |
2881 | ||
2882 | vm_page_wire_count -= pmap_wired_pages_freed; | |
2883 | ||
2884 | delayed_unlock = 1; | |
2885 | ||
2886 | /* | |
2887 | * Calculate the max number of referenced pages on the inactive | |
2888 | * queue that we will reactivate. | |
2889 | */ | |
2890 | reactivated_this_call = 0; | |
2891 | reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count + | |
2892 | vm_page_inactive_count); | |
2893 | inactive_reclaim_run = 0; | |
2894 | ||
2895 | vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count; | |
2896 | ||
2897 | /* | |
2898 | * We must limit the rate at which we send pages to the pagers | |
2899 | * so that we don't tie up too many pages in the I/O queues. | |
2900 | * We implement a throttling mechanism using the laundry count | |
2901 | * to limit the number of pages outstanding to the default | |
2902 | * and external pagers. We can bypass the throttles and look | |
2903 | * for clean pages if the pageout queues don't drain in a timely | |
2904 | * fashion since this may indicate that the pageout paths are | |
2905 | * stalled waiting for memory, which only we can provide. | |
2906 | */ | |
2907 | ||
2908 | vps_init_page_targets(); | |
2909 | assert(object == NULL); | |
2910 | assert(delayed_unlock != 0); | |
2911 | ||
2912 | for (;;) { | |
2913 | vm_page_t m; | |
2914 | ||
2915 | DTRACE_VM2(rev, int, 1, (uint64_t *), NULL); | |
2916 | ||
2917 | if (lock_yield_check) { | |
2918 | lock_yield_check = FALSE; | |
2919 | ||
2920 | if (delayed_unlock++ > delayed_unlock_limit) { | |
2921 | int freed = local_freed; | |
2922 | ||
2923 | vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed, | |
2924 | VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER); | |
2925 | if (freed == 0) { | |
2926 | lck_mtx_yield(&vm_page_queue_lock); | |
2927 | } | |
2928 | } else if (vm_pageout_scan_wants_object) { | |
2929 | vm_page_unlock_queues(); | |
2930 | mutex_pause(0); | |
2931 | vm_page_lock_queues(); | |
2932 | } | |
2933 | } | |
2934 | ||
2935 | if (vm_upl_wait_for_pages < 0) { | |
2936 | vm_upl_wait_for_pages = 0; | |
2937 | } | |
2938 | ||
2939 | delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages; | |
2940 | ||
2941 | if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) { | |
2942 | delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX; | |
2943 | } | |
2944 | ||
2945 | vps_deal_with_secluded_page_overflow(&local_freeq, &local_freed); | |
2946 | ||
2947 | assert(delayed_unlock); | |
2948 | ||
2949 | /* | |
2950 | * maintain our balance | |
2951 | */ | |
2952 | vm_page_balance_inactive(1); | |
2953 | ||
2954 | ||
2955 | /********************************************************************** | |
2956 | * above this point we're playing with the active and secluded queues | |
2957 | * below this point we're playing with the throttling mechanisms | |
2958 | * and the inactive queue | |
2959 | **********************************************************************/ | |
2960 | ||
2961 | if (vm_page_free_count + local_freed >= vm_page_free_target) { | |
2962 | vm_pageout_scan_wants_object = VM_OBJECT_NULL; | |
2963 | ||
2964 | vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed, | |
2965 | VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER); | |
2966 | /* | |
2967 | * make sure the pageout I/O threads are running | |
2968 | * throttled in case there are still requests | |
2969 | * in the laundry... since we have met our targets | |
2970 | * we don't need the laundry to be cleaned in a timely | |
2971 | * fashion... so let's avoid interfering with foreground | |
2972 | * activity | |
2973 | */ | |
2974 | vm_pageout_adjust_eq_iothrottle(eq, TRUE); | |
2975 | ||
2976 | lck_mtx_lock(&vm_page_queue_free_lock); | |
2977 | ||
2978 | if ((vm_page_free_count >= vm_page_free_target) && | |
2979 | (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) { | |
2980 | /* | |
2981 | * done - we have met our target *and* | |
2982 | * there is no one waiting for a page. | |
2983 | */ | |
2984 | return_from_scan: | |
2985 | assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL); | |
2986 | ||
2987 | VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE, | |
2988 | vm_pageout_state.vm_pageout_inactive, | |
2989 | vm_pageout_state.vm_pageout_inactive_used, 0, 0); | |
2990 | VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END, | |
2991 | vm_pageout_vminfo.vm_pageout_freed_speculative, | |
2992 | vm_pageout_state.vm_pageout_inactive_clean, | |
2993 | vm_pageout_vminfo.vm_pageout_inactive_dirty_internal, | |
2994 | vm_pageout_vminfo.vm_pageout_inactive_dirty_external); | |
2995 | ||
2996 | return; | |
2997 | } | |
2998 | lck_mtx_unlock(&vm_page_queue_free_lock); | |
2999 | } | |
3000 | ||
3001 | /* | |
3002 | * Before anything, we check if we have any ripe volatile | |
3003 | * objects around. If so, try to purge the first object. | |
3004 | * If the purge fails, fall through to reclaim a page instead. | |
3005 | * If the purge succeeds, go back to the top and reevalute | |
3006 | * the new memory situation. | |
3007 | */ | |
3008 | retval = vps_purge_object(); | |
3009 | ||
3010 | if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) { | |
3011 | /* | |
3012 | * Success | |
3013 | */ | |
3014 | if (object != NULL) { | |
3015 | vm_object_unlock(object); | |
3016 | object = NULL; | |
3017 | } | |
3018 | ||
3019 | lock_yield_check = FALSE; | |
3020 | continue; | |
3021 | } | |
3022 | ||
3023 | /* | |
3024 | * If our 'aged' queue is empty and we have some speculative pages | |
3025 | * in the other queues, let's go through and see if we need to age | |
3026 | * them. | |
3027 | * | |
3028 | * If we succeeded in aging a speculative Q or just that everything | |
3029 | * looks normal w.r.t queue age and queue counts, we keep going onward. | |
3030 | * | |
3031 | * If, for some reason, we seem to have a mismatch between the spec. | |
3032 | * page count and the page queues, we reset those variables and | |
3033 | * restart the loop (LD TODO: Track this better?). | |
3034 | */ | |
3035 | if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) { | |
3036 | retval = vps_age_speculative_queue(force_speculative_aging); | |
3037 | ||
3038 | if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) { | |
3039 | lock_yield_check = FALSE; | |
3040 | continue; | |
3041 | } | |
3042 | } | |
3043 | force_speculative_aging = FALSE; | |
3044 | ||
3045 | /* | |
3046 | * Check to see if we need to evict objects from the cache. | |
3047 | * | |
3048 | * Note: 'object' here doesn't have anything to do with | |
3049 | * the eviction part. We just need to make sure we have dropped | |
3050 | * any object lock we might be holding if we need to go down | |
3051 | * into the eviction logic. | |
3052 | */ | |
3053 | retval = vps_object_cache_evict(&object); | |
3054 | ||
3055 | if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) { | |
3056 | lock_yield_check = FALSE; | |
3057 | continue; | |
3058 | } | |
3059 | ||
3060 | ||
3061 | /* | |
3062 | * Calculate our filecache_min that will affect the loop | |
3063 | * going forward. | |
3064 | */ | |
3065 | vps_calculate_filecache_min(); | |
3066 | ||
3067 | /* | |
3068 | * LD TODO: Use a structure to hold all state variables for a single | |
3069 | * vm_pageout_scan iteration and pass that structure to this function instead. | |
3070 | */ | |
3071 | retval = vps_flow_control(&flow_control, &anons_grabbed, &object, | |
3072 | &delayed_unlock, &local_freeq, &local_freed, | |
3073 | &vm_pageout_deadlock_target, inactive_burst_count); | |
0b4e3aa0 | 3074 | |
cb323159 | 3075 | if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) { |
0a7de745 | 3076 | if (loop_count >= vm_page_inactive_count) { |
55e303ae | 3077 | loop_count = 0; |
0a7de745 | 3078 | } |
cb323159 | 3079 | |
91447636 A |
3080 | inactive_burst_count = 0; |
3081 | ||
cb323159 A |
3082 | assert(object == NULL); |
3083 | assert(delayed_unlock != 0); | |
1c79356b | 3084 | |
cb323159 A |
3085 | lock_yield_check = FALSE; |
3086 | continue; | |
3087 | } else if (retval == VM_PAGEOUT_SCAN_DONE_RETURN) { | |
3088 | goto return_from_scan; | |
3089 | } | |
91447636 A |
3090 | |
3091 | flow_control.state = FCS_IDLE; | |
cb323159 | 3092 | |
d9a64523 | 3093 | vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count), |
0a7de745 | 3094 | vm_pageout_inactive_external_forced_reactivate_limit); |
91447636 A |
3095 | loop_count++; |
3096 | inactive_burst_count++; | |
d9a64523 | 3097 | vm_pageout_state.vm_pageout_inactive++; |
316670eb A |
3098 | |
3099 | /* | |
3100 | * Choose a victim. | |
3101 | */ | |
39037602 | 3102 | |
cb323159 A |
3103 | m = NULL; |
3104 | retval = vps_choose_victim_page(&m, &anons_grabbed, &grab_anonymous, force_anonymous, &page_from_bg_q, reactivated_this_call); | |
5c9f4661 | 3105 | |
cb323159 A |
3106 | if (m == NULL) { |
3107 | if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) { | |
3108 | reactivated_this_call++; | |
d9a64523 | 3109 | |
cb323159 | 3110 | inactive_burst_count = 0; |
6d2010ae | 3111 | |
cb323159 A |
3112 | if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { |
3113 | VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1); | |
2d21ac55 | 3114 | } |
39236c6e | 3115 | |
cb323159 A |
3116 | lock_yield_check = TRUE; |
3117 | continue; | |
39236c6e | 3118 | } |
316670eb | 3119 | |
2d21ac55 | 3120 | /* |
316670eb | 3121 | * if we've gotten here, we have no victim page. |
5ba3f43e | 3122 | * check to see if we've not finished balancing the queues |
d9a64523 | 3123 | * or we have a page on the aged speculative queue that we |
5ba3f43e A |
3124 | * skipped due to force_anonymous == TRUE.. or we have |
3125 | * speculative pages that we can prematurely age... if | |
3126 | * one of these cases we'll keep going, else panic | |
2d21ac55 | 3127 | */ |
fe8ab488 | 3128 | force_anonymous = FALSE; |
d9a64523 | 3129 | VM_PAGEOUT_DEBUG(vm_pageout_no_victim, 1); |
316670eb | 3130 | |
0a7de745 | 3131 | if (!vm_page_queue_empty(&sq->age_q)) { |
cb323159 A |
3132 | lock_yield_check = TRUE; |
3133 | continue; | |
0a7de745 | 3134 | } |
fe8ab488 | 3135 | |
5ba3f43e A |
3136 | if (vm_page_speculative_count) { |
3137 | force_speculative_aging = TRUE; | |
cb323159 A |
3138 | lock_yield_check = TRUE; |
3139 | continue; | |
5ba3f43e | 3140 | } |
316670eb | 3141 | panic("vm_pageout: no victim"); |
d9a64523 | 3142 | |
316670eb | 3143 | /* NOTREACHED */ |
9bccf70c | 3144 | } |
cb323159 | 3145 | |
d190cdc3 | 3146 | assert(VM_PAGE_PAGEABLE(m)); |
39037602 | 3147 | m_object = VM_PAGE_OBJECT(m); |
39236c6e | 3148 | force_anonymous = FALSE; |
d9a64523 A |
3149 | |
3150 | page_prev_q_state = m->vmp_q_state; | |
316670eb A |
3151 | /* |
3152 | * we just found this page on one of our queues... | |
3153 | * it can't also be on the pageout queue, so safe | |
3e170ce0 | 3154 | * to call vm_page_queues_remove |
316670eb | 3155 | */ |
39037602 | 3156 | vm_page_queues_remove(m, TRUE); |
2d21ac55 | 3157 | |
d9a64523 A |
3158 | assert(!m->vmp_laundry); |
3159 | assert(!m->vmp_private); | |
3160 | assert(!m->vmp_fictitious); | |
39037602 A |
3161 | assert(m_object != kernel_object); |
3162 | assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr); | |
2d21ac55 | 3163 | |
d9a64523 | 3164 | vm_pageout_vminfo.vm_pageout_considered_page++; |
b0d623f7 | 3165 | |
2d21ac55 | 3166 | DTRACE_VM2(scan, int, 1, (uint64_t *), NULL); |
1c79356b | 3167 | |
91447636 | 3168 | /* |
2d21ac55 A |
3169 | * check to see if we currently are working |
3170 | * with the same object... if so, we've | |
3171 | * already got the lock | |
91447636 | 3172 | */ |
39037602 | 3173 | if (m_object != object) { |
cb323159 A |
3174 | boolean_t avoid_anon_pages = (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT); |
3175 | ||
0a7de745 | 3176 | /* |
cb323159 A |
3177 | * vps_switch_object() will always drop the 'object' lock first |
3178 | * and then try to acquire the 'm_object' lock. So 'object' has to point to | |
3179 | * either 'm_object' or NULL. | |
2d21ac55 | 3180 | */ |
cb323159 | 3181 | retval = vps_switch_object(m, m_object, &object, page_prev_q_state, avoid_anon_pages, page_from_bg_q); |
39236c6e | 3182 | |
cb323159 A |
3183 | if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) { |
3184 | lock_yield_check = TRUE; | |
3185 | continue; | |
1c79356b | 3186 | } |
1c79356b | 3187 | } |
39037602 A |
3188 | assert(m_object == object); |
3189 | assert(VM_PAGE_OBJECT(m) == m_object); | |
3190 | ||
d9a64523 | 3191 | if (m->vmp_busy) { |
1c79356b A |
3192 | /* |
3193 | * Somebody is already playing with this page. | |
6d2010ae | 3194 | * Put it back on the appropriate queue |
2d21ac55 | 3195 | * |
1c79356b | 3196 | */ |
0a7de745 | 3197 | VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1); |
316670eb | 3198 | |
0a7de745 A |
3199 | if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { |
3200 | VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1); | |
3201 | } | |
cb323159 A |
3202 | |
3203 | vps_requeue_page(m, page_prev_q_state, page_from_bg_q); | |
3204 | ||
3205 | lock_yield_check = TRUE; | |
3206 | continue; | |
d9a64523 A |
3207 | } |
3208 | ||
3209 | /* | |
3210 | * if (m->vmp_cleaning && !m->vmp_free_when_done) | |
3211 | * If already cleaning this page in place | |
3212 | * just leave if off the paging queues. | |
3213 | * We can leave the page mapped, and upl_commit_range | |
3214 | * will put it on the clean queue. | |
3215 | * | |
3216 | * if (m->vmp_free_when_done && !m->vmp_cleaning) | |
3217 | * an msync INVALIDATE is in progress... | |
3218 | * this page has been marked for destruction | |
0a7de745 A |
3219 | * after it has been cleaned, |
3220 | * but not yet gathered into a UPL | |
d9a64523 A |
3221 | * where 'cleaning' will be set... |
3222 | * just leave it off the paging queues | |
3223 | * | |
3224 | * if (m->vmp_free_when_done && m->vmp_clenaing) | |
3225 | * an msync INVALIDATE is in progress | |
3226 | * and the UPL has already gathered this page... | |
3227 | * just leave it off the paging queues | |
3228 | */ | |
3229 | if (m->vmp_free_when_done || m->vmp_cleaning) { | |
cb323159 A |
3230 | lock_yield_check = TRUE; |
3231 | continue; | |
1c79356b A |
3232 | } |
3233 | ||
6d2010ae | 3234 | |
1c79356b | 3235 | /* |
6d2010ae A |
3236 | * If it's absent, in error or the object is no longer alive, |
3237 | * we can reclaim the page... in the no longer alive case, | |
3238 | * there are 2 states the page can be in that preclude us | |
3239 | * from reclaiming it - busy or cleaning - that we've already | |
3240 | * dealt with | |
1c79356b | 3241 | */ |
d9a64523 | 3242 | if (m->vmp_absent || m->vmp_error || !object->alive) { |
0a7de745 A |
3243 | if (m->vmp_absent) { |
3244 | VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1); | |
3245 | } else if (!object->alive) { | |
3246 | VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1); | |
3247 | } else { | |
3248 | VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1); | |
3249 | } | |
d9a64523 | 3250 | reclaim_page: |
91447636 | 3251 | if (vm_pageout_deadlock_target) { |
0a7de745 A |
3252 | VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1); |
3253 | vm_pageout_deadlock_target--; | |
91447636 | 3254 | } |
2d21ac55 A |
3255 | |
3256 | DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL); | |
3257 | ||
b0d623f7 | 3258 | if (object->internal) { |
2d21ac55 A |
3259 | DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL); |
3260 | } else { | |
3261 | DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL); | |
3262 | } | |
d9a64523 A |
3263 | assert(!m->vmp_cleaning); |
3264 | assert(!m->vmp_laundry); | |
3265 | ||
3266 | if (!object->internal && | |
3267 | object->pager != NULL && | |
3268 | object->pager->mo_pager_ops == &shared_region_pager_ops) { | |
3269 | shared_region_pager_reclaimed++; | |
3270 | } | |
316670eb | 3271 | |
d9a64523 | 3272 | m->vmp_busy = TRUE; |
2d21ac55 | 3273 | |
b0d623f7 A |
3274 | /* |
3275 | * remove page from object here since we're already | |
3276 | * behind the object lock... defer the rest of the work | |
3277 | * we'd normally do in vm_page_free_prepare_object | |
3278 | * until 'vm_page_free_list' is called | |
3279 | */ | |
0a7de745 | 3280 | if (m->vmp_tabled) { |
b0d623f7 | 3281 | vm_page_remove(m, TRUE); |
0a7de745 | 3282 | } |
55e303ae | 3283 | |
d9a64523 A |
3284 | assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0); |
3285 | m->vmp_snext = local_freeq; | |
55e303ae | 3286 | local_freeq = m; |
91447636 | 3287 | local_freed++; |
d9a64523 | 3288 | |
0a7de745 | 3289 | if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) { |
d9a64523 | 3290 | vm_pageout_vminfo.vm_pageout_freed_speculative++; |
0a7de745 | 3291 | } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { |
d9a64523 | 3292 | vm_pageout_vminfo.vm_pageout_freed_cleaned++; |
0a7de745 | 3293 | } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) { |
d9a64523 | 3294 | vm_pageout_vminfo.vm_pageout_freed_internal++; |
0a7de745 | 3295 | } else { |
d9a64523 | 3296 | vm_pageout_vminfo.vm_pageout_freed_external++; |
0a7de745 | 3297 | } |
b0d623f7 | 3298 | |
fe8ab488 | 3299 | inactive_burst_count = 0; |
cb323159 A |
3300 | |
3301 | lock_yield_check = TRUE; | |
3302 | continue; | |
1c79356b | 3303 | } |
b0d623f7 | 3304 | if (object->copy == VM_OBJECT_NULL) { |
0a7de745 | 3305 | /* |
d9a64523 A |
3306 | * No one else can have any interest in this page. |
3307 | * If this is an empty purgable object, the page can be | |
3308 | * reclaimed even if dirty. | |
3309 | * If the page belongs to a volatile purgable object, we | |
3310 | * reactivate it if the compressor isn't active. | |
3311 | */ | |
b0d623f7 | 3312 | if (object->purgable == VM_PURGABLE_EMPTY) { |
d9a64523 | 3313 | if (m->vmp_pmapped == TRUE) { |
b0d623f7 | 3314 | /* unmap the page */ |
39037602 | 3315 | refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); |
b0d623f7 | 3316 | if (refmod_state & VM_MEM_MODIFIED) { |
316670eb | 3317 | SET_PAGE_DIRTY(m, FALSE); |
b0d623f7 A |
3318 | } |
3319 | } | |
d9a64523 | 3320 | if (m->vmp_dirty || m->vmp_precious) { |
b0d623f7 A |
3321 | /* we saved the cost of cleaning this page ! */ |
3322 | vm_page_purged_count++; | |
3323 | } | |
3324 | goto reclaim_page; | |
3325 | } | |
39236c6e | 3326 | |
39037602 | 3327 | if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) { |
39236c6e A |
3328 | /* |
3329 | * With the VM compressor, the cost of | |
3330 | * reclaiming a page is much lower (no I/O), | |
3331 | * so if we find a "volatile" page, it's better | |
3332 | * to let it get compressed rather than letting | |
3333 | * it occupy a full page until it gets purged. | |
3334 | * So no need to check for "volatile" here. | |
3335 | */ | |
3336 | } else if (object->purgable == VM_PURGABLE_VOLATILE) { | |
3337 | /* | |
3338 | * Avoid cleaning a "volatile" page which might | |
3339 | * be purged soon. | |
3340 | */ | |
3341 | ||
b0d623f7 A |
3342 | /* if it's wired, we can't put it on our queue */ |
3343 | assert(!VM_PAGE_WIRED(m)); | |
6d2010ae | 3344 | |
b0d623f7 | 3345 | /* just stick it back on! */ |
6d2010ae | 3346 | reactivated_this_call++; |
316670eb | 3347 | |
0a7de745 A |
3348 | if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { |
3349 | VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1); | |
3350 | } | |
316670eb | 3351 | |
b0d623f7 A |
3352 | goto reactivate_page; |
3353 | } | |
3354 | } | |
1c79356b A |
3355 | /* |
3356 | * If it's being used, reactivate. | |
3357 | * (Fictitious pages are either busy or absent.) | |
2d21ac55 A |
3358 | * First, update the reference and dirty bits |
3359 | * to make sure the page is unreferenced. | |
1c79356b | 3360 | */ |
2d21ac55 A |
3361 | refmod_state = -1; |
3362 | ||
d9a64523 | 3363 | if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) { |
0a7de745 | 3364 | refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m)); |
d9a64523 | 3365 | |
0a7de745 A |
3366 | if (refmod_state & VM_MEM_REFERENCED) { |
3367 | m->vmp_reference = TRUE; | |
3368 | } | |
3369 | if (refmod_state & VM_MEM_MODIFIED) { | |
316670eb A |
3370 | SET_PAGE_DIRTY(m, FALSE); |
3371 | } | |
91447636 | 3372 | } |
6d2010ae | 3373 | |
0a7de745 A |
3374 | if (m->vmp_reference || m->vmp_dirty) { |
3375 | /* deal with a rogue "reusable" page */ | |
3376 | VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object); | |
3377 | } | |
b0d623f7 | 3378 | |
cb323159 | 3379 | if (vm_pageout_state.vm_page_xpmapped_min_divisor == 0) { |
0a7de745 A |
3380 | vm_pageout_state.vm_page_xpmapped_min = 0; |
3381 | } else { | |
cb323159 | 3382 | vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / vm_pageout_state.vm_page_xpmapped_min_divisor; |
0a7de745 | 3383 | } |
d9a64523 A |
3384 | |
3385 | if (!m->vmp_no_cache && | |
5ba3f43e | 3386 | page_from_bg_q == FALSE && |
d9a64523 | 3387 | (m->vmp_reference || (m->vmp_xpmapped && !object->internal && |
0a7de745 | 3388 | (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) { |
2d21ac55 A |
3389 | /* |
3390 | * The page we pulled off the inactive list has | |
3391 | * been referenced. It is possible for other | |
3392 | * processors to be touching pages faster than we | |
3393 | * can clear the referenced bit and traverse the | |
3394 | * inactive queue, so we limit the number of | |
3395 | * reactivations. | |
3396 | */ | |
3397 | if (++reactivated_this_call >= reactivate_limit) { | |
d9a64523 | 3398 | vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded++; |
2d21ac55 | 3399 | } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) { |
d9a64523 | 3400 | vm_pageout_vminfo.vm_pageout_inactive_force_reclaim++; |
2d21ac55 | 3401 | } else { |
b0d623f7 | 3402 | uint32_t isinuse; |
316670eb | 3403 | |
0a7de745 A |
3404 | if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { |
3405 | VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1); | |
3406 | } | |
d9a64523 A |
3407 | |
3408 | vm_pageout_vminfo.vm_pageout_inactive_referenced++; | |
2d21ac55 | 3409 | reactivate_page: |
0a7de745 A |
3410 | if (!object->internal && object->pager != MEMORY_OBJECT_NULL && |
3411 | vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) { | |
b0d623f7 A |
3412 | /* |
3413 | * no explict mappings of this object exist | |
3414 | * and it's not open via the filesystem | |
3415 | */ | |
3416 | vm_page_deactivate(m); | |
d9a64523 | 3417 | VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated, 1); |
b0d623f7 A |
3418 | } else { |
3419 | /* | |
3420 | * The page was/is being used, so put back on active list. | |
3421 | */ | |
3422 | vm_page_activate(m); | |
3423 | VM_STAT_INCR(reactivations); | |
fe8ab488 | 3424 | inactive_burst_count = 0; |
b0d623f7 | 3425 | } |
39037602 | 3426 | #if CONFIG_BACKGROUND_QUEUE |
d9a64523 | 3427 | #if DEVELOPMENT || DEBUG |
5ba3f43e | 3428 | if (page_from_bg_q == TRUE) { |
0a7de745 | 3429 | if (m_object->internal) { |
39037602 | 3430 | vm_pageout_rejected_bq_internal++; |
0a7de745 | 3431 | } else { |
39037602 | 3432 | vm_pageout_rejected_bq_external++; |
0a7de745 | 3433 | } |
39037602 | 3434 | } |
cb323159 A |
3435 | #endif /* DEVELOPMENT || DEBUG */ |
3436 | #endif /* CONFIG_BACKGROUND_QUEUE */ | |
3437 | ||
0a7de745 A |
3438 | if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { |
3439 | VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1); | |
3440 | } | |
d9a64523 | 3441 | vm_pageout_state.vm_pageout_inactive_used++; |
55e303ae | 3442 | |
cb323159 A |
3443 | lock_yield_check = TRUE; |
3444 | continue; | |
2d21ac55 | 3445 | } |
d9a64523 | 3446 | /* |
2d21ac55 A |
3447 | * Make sure we call pmap_get_refmod() if it |
3448 | * wasn't already called just above, to update | |
3449 | * the dirty bit. | |
3450 | */ | |
d9a64523 | 3451 | if ((refmod_state == -1) && !m->vmp_dirty && m->vmp_pmapped) { |
39037602 | 3452 | refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m)); |
316670eb A |
3453 | if (refmod_state & VM_MEM_MODIFIED) { |
3454 | SET_PAGE_DIRTY(m, FALSE); | |
3455 | } | |
2d21ac55 | 3456 | } |
1c79356b A |
3457 | } |
3458 | ||
91447636 A |
3459 | /* |
3460 | * we've got a candidate page to steal... | |
3461 | * | |
d9a64523 A |
3462 | * m->vmp_dirty is up to date courtesy of the |
3463 | * preceding check for m->vmp_reference... if | |
3464 | * we get here, then m->vmp_reference had to be | |
2d21ac55 | 3465 | * FALSE (or possibly "reactivate_limit" was |
0a7de745 A |
3466 | * exceeded), but in either case we called |
3467 | * pmap_get_refmod() and updated both | |
3468 | * m->vmp_reference and m->vmp_dirty | |
91447636 A |
3469 | * |
3470 | * if it's dirty or precious we need to | |
3471 | * see if the target queue is throtttled | |
3472 | * it if is, we need to skip over it by moving it back | |
3473 | * to the end of the inactive queue | |
3474 | */ | |
b0d623f7 | 3475 | |
91447636 A |
3476 | inactive_throttled = FALSE; |
3477 | ||
d9a64523 | 3478 | if (m->vmp_dirty || m->vmp_precious) { |
0a7de745 A |
3479 | if (object->internal) { |
3480 | if (VM_PAGE_Q_THROTTLED(iq)) { | |
3481 | inactive_throttled = TRUE; | |
3482 | } | |
91447636 | 3483 | } else if (VM_PAGE_Q_THROTTLED(eq)) { |
2d21ac55 | 3484 | inactive_throttled = TRUE; |
1c79356b | 3485 | } |
91447636 | 3486 | } |
2d21ac55 | 3487 | throttle_inactive: |
39037602 | 3488 | if (!VM_DYNAMIC_PAGING_ENABLED() && |
d9a64523 | 3489 | object->internal && m->vmp_dirty && |
6d2010ae | 3490 | (object->purgable == VM_PURGABLE_DENY || |
0a7de745 A |
3491 | object->purgable == VM_PURGABLE_NONVOLATILE || |
3492 | object->purgable == VM_PURGABLE_VOLATILE)) { | |
3e170ce0 | 3493 | vm_page_check_pageable_safe(m); |
d9a64523 | 3494 | assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q); |
0a7de745 | 3495 | vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq); |
d9a64523 | 3496 | m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; |
6d2010ae A |
3497 | vm_page_throttled_count++; |
3498 | ||
d9a64523 | 3499 | VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled, 1); |
6d2010ae | 3500 | |
fe8ab488 | 3501 | inactive_burst_count = 0; |
cb323159 A |
3502 | |
3503 | lock_yield_check = TRUE; | |
3504 | continue; | |
6d2010ae A |
3505 | } |
3506 | if (inactive_throttled == TRUE) { | |
cb323159 A |
3507 | vps_deal_with_throttled_queues(m, &object, &vm_pageout_inactive_external_forced_reactivate_limit, |
3508 | &delayed_unlock, &force_anonymous, page_from_bg_q); | |
d9a64523 | 3509 | |
cb323159 | 3510 | inactive_burst_count = 0; |
6d2010ae | 3511 | |
cb323159 A |
3512 | if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { |
3513 | VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1); | |
1c79356b | 3514 | } |
cb323159 A |
3515 | |
3516 | lock_yield_check = TRUE; | |
3517 | continue; | |
1c79356b | 3518 | } |
2d21ac55 | 3519 | |
1c79356b | 3520 | /* |
91447636 A |
3521 | * we've got a page that we can steal... |
3522 | * eliminate all mappings and make sure | |
3523 | * we have the up-to-date modified state | |
316670eb | 3524 | * |
91447636 | 3525 | * if we need to do a pmap_disconnect then we |
d9a64523 A |
3526 | * need to re-evaluate m->vmp_dirty since the pmap_disconnect |
3527 | * provides the true state atomically... the | |
91447636 A |
3528 | * page was still mapped up to the pmap_disconnect |
3529 | * and may have been dirtied at the last microsecond | |
3530 | * | |
2d21ac55 A |
3531 | * Note that if 'pmapped' is FALSE then the page is not |
3532 | * and has not been in any map, so there is no point calling | |
d9a64523 | 3533 | * pmap_disconnect(). m->vmp_dirty could have been set in anticipation |
39236c6e | 3534 | * of likely usage of the page. |
91447636 | 3535 | */ |
d9a64523 | 3536 | if (m->vmp_pmapped == TRUE) { |
3e170ce0 | 3537 | int pmap_options; |
0b4e3aa0 | 3538 | |
3e170ce0 A |
3539 | /* |
3540 | * Don't count this page as going into the compressor | |
3541 | * if any of these are true: | |
39037602 A |
3542 | * 1) compressed pager isn't enabled |
3543 | * 2) Freezer enabled device with compressed pager | |
3e170ce0 A |
3544 | * backend (exclusive use) i.e. most of the VM system |
3545 | * (including vm_pageout_scan) has no knowledge of | |
3546 | * the compressor | |
39037602 | 3547 | * 3) This page belongs to a file and hence will not be |
3e170ce0 A |
3548 | * sent into the compressor |
3549 | */ | |
0a7de745 | 3550 | if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE || |
3e170ce0 A |
3551 | object->internal == FALSE) { |
3552 | pmap_options = 0; | |
d9a64523 | 3553 | } else if (m->vmp_dirty || m->vmp_precious) { |
fe8ab488 | 3554 | /* |
3e170ce0 A |
3555 | * VM knows that this page is dirty (or |
3556 | * precious) and needs to be compressed | |
3557 | * rather than freed. | |
3558 | * Tell the pmap layer to count this page | |
3559 | * as "compressed". | |
fe8ab488 | 3560 | */ |
3e170ce0 | 3561 | pmap_options = PMAP_OPTIONS_COMPRESSOR; |
39236c6e | 3562 | } else { |
3e170ce0 A |
3563 | /* |
3564 | * VM does not know if the page needs to | |
3565 | * be preserved but the pmap layer might tell | |
3566 | * us if any mapping has "modified" it. | |
3567 | * Let's the pmap layer to count this page | |
3568 | * as compressed if and only if it has been | |
3569 | * modified. | |
3570 | */ | |
3571 | pmap_options = | |
0a7de745 | 3572 | PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; |
316670eb | 3573 | } |
39037602 | 3574 | refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), |
0a7de745 A |
3575 | pmap_options, |
3576 | NULL); | |
39236c6e A |
3577 | if (refmod_state & VM_MEM_MODIFIED) { |
3578 | SET_PAGE_DIRTY(m, FALSE); | |
91447636 A |
3579 | } |
3580 | } | |
d9a64523 | 3581 | |
2d21ac55 | 3582 | /* |
d9a64523 | 3583 | * reset our count of pages that have been reclaimed |
2d21ac55 A |
3584 | * since the last page was 'stolen' |
3585 | */ | |
3586 | inactive_reclaim_run = 0; | |
3587 | ||
1c79356b A |
3588 | /* |
3589 | * If it's clean and not precious, we can free the page. | |
3590 | */ | |
d9a64523 | 3591 | if (!m->vmp_dirty && !m->vmp_precious) { |
d9a64523 | 3592 | vm_pageout_state.vm_pageout_inactive_clean++; |
316670eb | 3593 | |
316670eb A |
3594 | /* |
3595 | * OK, at this point we have found a page we are going to free. | |
3596 | */ | |
fe8ab488 | 3597 | #if CONFIG_PHANTOM_CACHE |
0a7de745 | 3598 | if (!object->internal) { |
fe8ab488 | 3599 | vm_phantom_cache_add_ghost(m); |
0a7de745 | 3600 | } |
fe8ab488 | 3601 | #endif |
1c79356b A |
3602 | goto reclaim_page; |
3603 | } | |
2d21ac55 A |
3604 | |
3605 | /* | |
3606 | * The page may have been dirtied since the last check | |
3607 | * for a throttled target queue (which may have been skipped | |
3608 | * if the page was clean then). With the dirty page | |
3609 | * disconnected here, we can make one final check. | |
3610 | */ | |
6d2010ae | 3611 | if (object->internal) { |
0a7de745 | 3612 | if (VM_PAGE_Q_THROTTLED(iq)) { |
6d2010ae | 3613 | inactive_throttled = TRUE; |
0a7de745 | 3614 | } |
6d2010ae A |
3615 | } else if (VM_PAGE_Q_THROTTLED(eq)) { |
3616 | inactive_throttled = TRUE; | |
3617 | } | |
2d21ac55 | 3618 | |
0a7de745 | 3619 | if (inactive_throttled == TRUE) { |
6d2010ae | 3620 | goto throttle_inactive; |
0a7de745 | 3621 | } |
d9a64523 | 3622 | |
fe8ab488 A |
3623 | #if VM_PRESSURE_EVENTS |
3624 | #if CONFIG_JETSAM | |
3625 | ||
3626 | /* | |
3627 | * If Jetsam is enabled, then the sending | |
3628 | * of memory pressure notifications is handled | |
3629 | * from the same thread that takes care of high-water | |
3630 | * and other jetsams i.e. the memorystatus_thread. | |
3631 | */ | |
3632 | ||
3633 | #else /* CONFIG_JETSAM */ | |
d9a64523 | 3634 | |
39236c6e | 3635 | vm_pressure_response(); |
fe8ab488 A |
3636 | |
3637 | #endif /* CONFIG_JETSAM */ | |
39236c6e | 3638 | #endif /* VM_PRESSURE_EVENTS */ |
d9a64523 | 3639 | |
0a7de745 A |
3640 | if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) { |
3641 | VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1); | |
3642 | } | |
5ba3f43e | 3643 | |
0a7de745 | 3644 | if (object->internal) { |
d9a64523 | 3645 | vm_pageout_vminfo.vm_pageout_inactive_dirty_internal++; |
0a7de745 | 3646 | } else { |
d9a64523 | 3647 | vm_pageout_vminfo.vm_pageout_inactive_dirty_external++; |
0a7de745 | 3648 | } |
39236c6e | 3649 | |
3e170ce0 | 3650 | /* |
d9a64523 A |
3651 | * internal pages will go to the compressor... |
3652 | * external pages will go to the appropriate pager to be cleaned | |
3653 | * and upon completion will end up on 'vm_page_queue_cleaned' which | |
3654 | * is a preferred queue to steal from | |
3e170ce0 | 3655 | */ |
5ba3f43e | 3656 | vm_pageout_cluster(m); |
d9a64523 | 3657 | inactive_burst_count = 0; |
1c79356b | 3658 | |
91447636 A |
3659 | /* |
3660 | * back to top of pageout scan loop | |
3661 | */ | |
1c79356b | 3662 | } |
1c79356b A |
3663 | } |
3664 | ||
1c79356b | 3665 | |
1c79356b A |
3666 | void |
3667 | vm_page_free_reserve( | |
3668 | int pages) | |
3669 | { | |
0a7de745 | 3670 | int free_after_reserve; |
1c79356b | 3671 | |
39037602 | 3672 | if (VM_CONFIG_COMPRESSOR_IS_PRESENT) { |
0a7de745 | 3673 | if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) { |
39236c6e | 3674 | vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT; |
0a7de745 | 3675 | } else { |
39236c6e | 3676 | vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT); |
0a7de745 | 3677 | } |
39236c6e | 3678 | } else { |
0a7de745 | 3679 | if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) { |
39236c6e | 3680 | vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT; |
0a7de745 | 3681 | } else { |
39236c6e | 3682 | vm_page_free_reserved += pages; |
0a7de745 | 3683 | } |
39236c6e | 3684 | } |
d9a64523 | 3685 | free_after_reserve = vm_pageout_state.vm_page_free_count_init - vm_page_free_reserved; |
1c79356b A |
3686 | |
3687 | vm_page_free_min = vm_page_free_reserved + | |
0a7de745 | 3688 | VM_PAGE_FREE_MIN(free_after_reserve); |
1c79356b | 3689 | |
0a7de745 A |
3690 | if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) { |
3691 | vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT; | |
3692 | } | |
2d21ac55 | 3693 | |
1c79356b | 3694 | vm_page_free_target = vm_page_free_reserved + |
0a7de745 | 3695 | VM_PAGE_FREE_TARGET(free_after_reserve); |
1c79356b | 3696 | |
0a7de745 A |
3697 | if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) { |
3698 | vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT; | |
3699 | } | |
39236c6e | 3700 | |
0a7de745 | 3701 | if (vm_page_free_target < vm_page_free_min + 5) { |
39037602 | 3702 | vm_page_free_target = vm_page_free_min + 5; |
0a7de745 | 3703 | } |
39236c6e | 3704 | |
39037602 A |
3705 | vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2); |
3706 | } | |
39236c6e | 3707 | |
39037602 A |
3708 | /* |
3709 | * vm_pageout is the high level pageout daemon. | |
3710 | */ | |
39236c6e | 3711 | |
39037602 A |
3712 | void |
3713 | vm_pageout_continue(void) | |
3714 | { | |
3715 | DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL); | |
d9a64523 | 3716 | VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter, 1); |
39037602 A |
3717 | |
3718 | lck_mtx_lock(&vm_page_queue_free_lock); | |
3719 | vm_pageout_running = TRUE; | |
3720 | lck_mtx_unlock(&vm_page_queue_free_lock); | |
3721 | ||
3722 | vm_pageout_scan(); | |
3723 | /* | |
3724 | * we hold both the vm_page_queue_free_lock | |
3725 | * and the vm_page_queues_lock at this point | |
3726 | */ | |
3727 | assert(vm_page_free_wanted == 0); | |
3728 | assert(vm_page_free_wanted_privileged == 0); | |
3729 | assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT); | |
3730 | ||
3731 | vm_pageout_running = FALSE; | |
cb323159 | 3732 | #if !CONFIG_EMBEDDED |
39037602 A |
3733 | if (vm_pageout_waiter) { |
3734 | vm_pageout_waiter = FALSE; | |
3735 | thread_wakeup((event_t)&vm_pageout_waiter); | |
39236c6e | 3736 | } |
5ba3f43e | 3737 | #endif /* !CONFIG_EMBEDDED */ |
39236c6e | 3738 | |
39037602 | 3739 | lck_mtx_unlock(&vm_page_queue_free_lock); |
39236c6e A |
3740 | vm_page_unlock_queues(); |
3741 | ||
39037602 A |
3742 | counter(c_vm_pageout_block++); |
3743 | thread_block((thread_continue_t)vm_pageout_continue); | |
39236c6e A |
3744 | /*NOTREACHED*/ |
3745 | } | |
3746 | ||
5ba3f43e | 3747 | #if !CONFIG_EMBEDDED |
39037602 A |
3748 | kern_return_t |
3749 | vm_pageout_wait(uint64_t deadline) | |
3750 | { | |
3751 | kern_return_t kr; | |
3752 | ||
3753 | lck_mtx_lock(&vm_page_queue_free_lock); | |
0a7de745 | 3754 | for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr);) { |
39037602 A |
3755 | vm_pageout_waiter = TRUE; |
3756 | if (THREAD_AWAKENED != lck_mtx_sleep_deadline( | |
0a7de745 A |
3757 | &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT, |
3758 | (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) { | |
39037602 A |
3759 | kr = KERN_OPERATION_TIMED_OUT; |
3760 | } | |
3761 | } | |
3762 | lck_mtx_unlock(&vm_page_queue_free_lock); | |
3763 | ||
0a7de745 | 3764 | return kr; |
39037602 | 3765 | } |
5ba3f43e | 3766 | #endif /* !CONFIG_EMBEDDED */ |
39037602 | 3767 | |
39236c6e A |
3768 | |
3769 | static void | |
3770 | vm_pageout_iothread_external_continue(struct vm_pageout_queue *q) | |
3771 | { | |
0a7de745 A |
3772 | vm_page_t m = NULL; |
3773 | vm_object_t object; | |
39236c6e | 3774 | vm_object_offset_t offset; |
0a7de745 | 3775 | memory_object_t pager; |
39236c6e | 3776 | |
d9a64523 | 3777 | /* On systems with a compressor, the external IO thread clears its |
5ba3f43e A |
3778 | * VM privileged bit to accommodate large allocations (e.g. bulk UPL |
3779 | * creation) | |
3780 | */ | |
0a7de745 | 3781 | if (vm_pageout_state.vm_pageout_internal_iothread != THREAD_NULL) { |
39236c6e | 3782 | current_thread()->options &= ~TH_OPT_VMPRIV; |
0a7de745 | 3783 | } |
39236c6e A |
3784 | |
3785 | vm_page_lockspin_queues(); | |
3786 | ||
0a7de745 A |
3787 | while (!vm_page_queue_empty(&q->pgo_pending)) { |
3788 | q->pgo_busy = TRUE; | |
3789 | vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq); | |
3790 | ||
3791 | assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q); | |
3792 | VM_PAGE_CHECK(m); | |
3793 | /* | |
3794 | * grab a snapshot of the object and offset this | |
3795 | * page is tabled in so that we can relookup this | |
3796 | * page after we've taken the object lock - these | |
3797 | * fields are stable while we hold the page queues lock | |
3798 | * but as soon as we drop it, there is nothing to keep | |
3799 | * this page in this object... we hold an activity_in_progress | |
3800 | * on this object which will keep it from terminating | |
3801 | */ | |
3802 | object = VM_PAGE_OBJECT(m); | |
3803 | offset = m->vmp_offset; | |
3804 | ||
3805 | m->vmp_q_state = VM_PAGE_NOT_ON_Q; | |
3806 | VM_PAGE_ZERO_PAGEQ_ENTRY(m); | |
3807 | ||
3808 | vm_page_unlock_queues(); | |
3809 | ||
3810 | vm_object_lock(object); | |
3811 | ||
3812 | m = vm_page_lookup(object, offset); | |
3813 | ||
3814 | if (m == NULL || m->vmp_busy || m->vmp_cleaning || | |
3815 | !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) { | |
3816 | /* | |
3817 | * it's either the same page that someone else has | |
3818 | * started cleaning (or it's finished cleaning or | |
3819 | * been put back on the pageout queue), or | |
3820 | * the page has been freed or we have found a | |
3821 | * new page at this offset... in all of these cases | |
3822 | * we merely need to release the activity_in_progress | |
3823 | * we took when we put the page on the pageout queue | |
3824 | */ | |
3825 | vm_object_activity_end(object); | |
3826 | vm_object_unlock(object); | |
3827 | ||
3828 | vm_page_lockspin_queues(); | |
3829 | continue; | |
3830 | } | |
3831 | pager = object->pager; | |
3832 | ||
3833 | if (pager == MEMORY_OBJECT_NULL) { | |
3834 | /* | |
3835 | * This pager has been destroyed by either | |
3836 | * memory_object_destroy or vm_object_destroy, and | |
3837 | * so there is nowhere for the page to go. | |
3838 | */ | |
3839 | if (m->vmp_free_when_done) { | |
3840 | /* | |
3841 | * Just free the page... VM_PAGE_FREE takes | |
3842 | * care of cleaning up all the state... | |
3843 | * including doing the vm_pageout_throttle_up | |
3844 | */ | |
3845 | VM_PAGE_FREE(m); | |
3846 | } else { | |
3847 | vm_page_lockspin_queues(); | |
3848 | ||
3849 | vm_pageout_throttle_up(m); | |
3850 | vm_page_activate(m); | |
3851 | ||
3852 | vm_page_unlock_queues(); | |
3853 | ||
3854 | /* | |
3855 | * And we are done with it. | |
3856 | */ | |
3857 | } | |
3858 | vm_object_activity_end(object); | |
3859 | vm_object_unlock(object); | |
3860 | ||
3861 | vm_page_lockspin_queues(); | |
3862 | continue; | |
3863 | } | |
39236c6e | 3864 | #if 0 |
0a7de745 A |
3865 | /* |
3866 | * we don't hold the page queue lock | |
3867 | * so this check isn't safe to make | |
3868 | */ | |
3869 | VM_PAGE_CHECK(m); | |
39236c6e | 3870 | #endif |
0a7de745 A |
3871 | /* |
3872 | * give back the activity_in_progress reference we | |
3873 | * took when we queued up this page and replace it | |
3874 | * it with a paging_in_progress reference that will | |
3875 | * also hold the paging offset from changing and | |
3876 | * prevent the object from terminating | |
3877 | */ | |
3878 | vm_object_activity_end(object); | |
3879 | vm_object_paging_begin(object); | |
3880 | vm_object_unlock(object); | |
3881 | ||
3882 | /* | |
3883 | * Send the data to the pager. | |
3884 | * any pageout clustering happens there | |
3885 | */ | |
3886 | memory_object_data_return(pager, | |
3887 | m->vmp_offset + object->paging_offset, | |
3888 | PAGE_SIZE, | |
3889 | NULL, | |
3890 | NULL, | |
3891 | FALSE, | |
3892 | FALSE, | |
3893 | 0); | |
3894 | ||
3895 | vm_object_lock(object); | |
3896 | vm_object_paging_end(object); | |
3897 | vm_object_unlock(object); | |
3898 | ||
3899 | vm_pageout_io_throttle(); | |
3900 | ||
3901 | vm_page_lockspin_queues(); | |
39236c6e A |
3902 | } |
3903 | q->pgo_busy = FALSE; | |
3904 | q->pgo_idle = TRUE; | |
3905 | ||
3906 | assert_wait((event_t) &q->pgo_pending, THREAD_UNINT); | |
3907 | vm_page_unlock_queues(); | |
3908 | ||
3909 | thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q); | |
3910 | /*NOTREACHED*/ | |
3911 | } | |
3912 | ||
3913 | ||
0a7de745 | 3914 | #define MAX_FREE_BATCH 32 |
39037602 | 3915 | uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by |
0a7de745 A |
3916 | * this thread. |
3917 | */ | |
3e170ce0 | 3918 | |
5ba3f43e | 3919 | |
5ba3f43e A |
3920 | void |
3921 | vm_pageout_iothread_internal_continue(struct cq *); | |
3922 | void | |
39236c6e A |
3923 | vm_pageout_iothread_internal_continue(struct cq *cq) |
3924 | { | |
3925 | struct vm_pageout_queue *q; | |
0a7de745 A |
3926 | vm_page_t m = NULL; |
3927 | boolean_t pgo_draining; | |
39236c6e | 3928 | vm_page_t local_q; |
0a7de745 | 3929 | int local_cnt; |
39236c6e A |
3930 | vm_page_t local_freeq = NULL; |
3931 | int local_freed = 0; | |
0a7de745 | 3932 | int local_batch_size; |
5ba3f43e | 3933 | #if DEVELOPMENT || DEBUG |
0a7de745 | 3934 | int ncomps = 0; |
5ba3f43e A |
3935 | boolean_t marked_active = FALSE; |
3936 | #endif | |
39236c6e A |
3937 | KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0); |
3938 | ||
3939 | q = cq->q; | |
c6bf4f31 A |
3940 | #if __AMP__ |
3941 | if (vm_compressor_ebound && (vm_pageout_state.vm_compressor_thread_count > 1)) { | |
3942 | local_batch_size = (q->pgo_maxlaundry >> 3); | |
3943 | local_batch_size = MAX(local_batch_size, 16); | |
3944 | } else { | |
3945 | local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2); | |
3946 | } | |
3947 | #else | |
d9a64523 | 3948 | local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2); |
c6bf4f31 | 3949 | #endif |
39236c6e | 3950 | |
3e170ce0 | 3951 | #if RECORD_THE_COMPRESSED_DATA |
0a7de745 | 3952 | if (q->pgo_laundry) { |
3e170ce0 | 3953 | c_compressed_record_init(); |
0a7de745 | 3954 | } |
3e170ce0 | 3955 | #endif |
39236c6e | 3956 | while (TRUE) { |
0a7de745 | 3957 | int pages_left_on_q = 0; |
39236c6e A |
3958 | |
3959 | local_cnt = 0; | |
3960 | local_q = NULL; | |
3961 | ||
3962 | KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0); | |
39236c6e | 3963 | |
5ba3f43e A |
3964 | vm_page_lock_queues(); |
3965 | #if DEVELOPMENT || DEBUG | |
3966 | if (marked_active == FALSE) { | |
3967 | vmct_active++; | |
3968 | vmct_state[cq->id] = VMCT_ACTIVE; | |
3969 | marked_active = TRUE; | |
3970 | if (vmct_active == 1) { | |
d9a64523 | 3971 | vm_compressor_epoch_start = mach_absolute_time(); |
5ba3f43e A |
3972 | } |
3973 | } | |
3974 | #endif | |
39236c6e A |
3975 | KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0); |
3976 | ||
3e170ce0 | 3977 | KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0); |
39236c6e | 3978 | |
0a7de745 A |
3979 | while (!vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) { |
3980 | vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq); | |
d9a64523 | 3981 | assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q); |
39236c6e | 3982 | VM_PAGE_CHECK(m); |
d9a64523 A |
3983 | |
3984 | m->vmp_q_state = VM_PAGE_NOT_ON_Q; | |
39037602 | 3985 | VM_PAGE_ZERO_PAGEQ_ENTRY(m); |
d9a64523 | 3986 | m->vmp_laundry = FALSE; |
39236c6e | 3987 | |
d9a64523 | 3988 | m->vmp_snext = local_q; |
39236c6e A |
3989 | local_q = m; |
3990 | local_cnt++; | |
3991 | } | |
0a7de745 | 3992 | if (local_q == NULL) { |
39236c6e | 3993 | break; |
0a7de745 | 3994 | } |
39236c6e A |
3995 | |
3996 | q->pgo_busy = TRUE; | |
3997 | ||
3e170ce0 | 3998 | if ((pgo_draining = q->pgo_draining) == FALSE) { |
39236c6e | 3999 | vm_pageout_throttle_up_batch(q, local_cnt); |
3e170ce0 | 4000 | pages_left_on_q = q->pgo_laundry; |
0a7de745 | 4001 | } else { |
3e170ce0 | 4002 | pages_left_on_q = q->pgo_laundry - local_cnt; |
0a7de745 | 4003 | } |
39236c6e A |
4004 | |
4005 | vm_page_unlock_queues(); | |
4006 | ||
3e170ce0 | 4007 | #if !RECORD_THE_COMPRESSED_DATA |
d9a64523 | 4008 | if (pages_left_on_q >= local_batch_size && cq->id < (vm_pageout_state.vm_compressor_thread_count - 1)) { |
3e170ce0 | 4009 | thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1)); |
5ba3f43e | 4010 | } |
3e170ce0 A |
4011 | #endif |
4012 | KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0); | |
39236c6e A |
4013 | |
4014 | while (local_q) { | |
3e170ce0 A |
4015 | KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0); |
4016 | ||
39236c6e | 4017 | m = local_q; |
d9a64523 A |
4018 | local_q = m->vmp_snext; |
4019 | m->vmp_snext = NULL; | |
39236c6e | 4020 | |
d9a64523 A |
4021 | if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m) == KERN_SUCCESS) { |
4022 | #if DEVELOPMENT || DEBUG | |
5ba3f43e | 4023 | ncomps++; |
d9a64523 A |
4024 | #endif |
4025 | KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END, local_cnt, 0, 0, 0, 0); | |
4026 | ||
4027 | m->vmp_snext = local_freeq; | |
3e170ce0 A |
4028 | local_freeq = m; |
4029 | local_freed++; | |
39236c6e | 4030 | |
3e170ce0 | 4031 | if (local_freed >= MAX_FREE_BATCH) { |
0a7de745 | 4032 | OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); |
39236c6e | 4033 | |
3e170ce0 | 4034 | vm_page_free_list(local_freeq, TRUE); |
d9a64523 | 4035 | |
3e170ce0 A |
4036 | local_freeq = NULL; |
4037 | local_freed = 0; | |
39236c6e | 4038 | } |
39236c6e | 4039 | } |
3e170ce0 A |
4040 | #if !CONFIG_JETSAM |
4041 | while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) { | |
0a7de745 A |
4042 | kern_return_t wait_result; |
4043 | int need_wakeup = 0; | |
39236c6e A |
4044 | |
4045 | if (local_freeq) { | |
0a7de745 | 4046 | OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); |
39236c6e | 4047 | |
5ba3f43e | 4048 | vm_page_free_list(local_freeq, TRUE); |
39236c6e A |
4049 | local_freeq = NULL; |
4050 | local_freed = 0; | |
b0d623f7 | 4051 | |
39236c6e A |
4052 | continue; |
4053 | } | |
4054 | lck_mtx_lock_spin(&vm_page_queue_free_lock); | |
b0d623f7 | 4055 | |
3e170ce0 | 4056 | if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) { |
0a7de745 | 4057 | if (vm_page_free_wanted_privileged++ == 0) { |
39236c6e | 4058 | need_wakeup = 1; |
0a7de745 | 4059 | } |
39236c6e | 4060 | wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT); |
91447636 | 4061 | |
39236c6e | 4062 | lck_mtx_unlock(&vm_page_queue_free_lock); |
91447636 | 4063 | |
0a7de745 | 4064 | if (need_wakeup) { |
39236c6e | 4065 | thread_wakeup((event_t)&vm_page_free_wanted); |
0a7de745 | 4066 | } |
316670eb | 4067 | |
0a7de745 | 4068 | if (wait_result == THREAD_WAITING) { |
39236c6e | 4069 | thread_block(THREAD_CONTINUE_NULL); |
0a7de745 A |
4070 | } |
4071 | } else { | |
39236c6e | 4072 | lck_mtx_unlock(&vm_page_queue_free_lock); |
0a7de745 | 4073 | } |
39236c6e | 4074 | } |
3e170ce0 | 4075 | #endif |
39236c6e A |
4076 | } |
4077 | if (local_freeq) { | |
0a7de745 | 4078 | OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); |
5ba3f43e | 4079 | |
39236c6e | 4080 | vm_page_free_list(local_freeq, TRUE); |
39236c6e A |
4081 | local_freeq = NULL; |
4082 | local_freed = 0; | |
4083 | } | |
4084 | if (pgo_draining == TRUE) { | |
4085 | vm_page_lockspin_queues(); | |
4086 | vm_pageout_throttle_up_batch(q, local_cnt); | |
4087 | vm_page_unlock_queues(); | |
4088 | } | |
0b4c1975 | 4089 | } |
39236c6e A |
4090 | KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0); |
4091 | ||
4092 | /* | |
4093 | * queue lock is held and our q is empty | |
4094 | */ | |
91447636 A |
4095 | q->pgo_busy = FALSE; |
4096 | q->pgo_idle = TRUE; | |
316670eb | 4097 | |
3e170ce0 | 4098 | assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT); |
5ba3f43e A |
4099 | #if DEVELOPMENT || DEBUG |
4100 | if (marked_active == TRUE) { | |
4101 | vmct_active--; | |
4102 | vmct_state[cq->id] = VMCT_IDLE; | |
4103 | ||
4104 | if (vmct_active == 0) { | |
d9a64523 A |
4105 | vm_compressor_epoch_stop = mach_absolute_time(); |
4106 | assertf(vm_compressor_epoch_stop >= vm_compressor_epoch_start, | |
4107 | "Compressor epoch non-monotonic: 0x%llx -> 0x%llx", | |
4108 | vm_compressor_epoch_start, vm_compressor_epoch_stop); | |
5ba3f43e A |
4109 | /* This interval includes intervals where one or more |
4110 | * compressor threads were pre-empted | |
4111 | */ | |
d9a64523 | 4112 | vmct_stats.vmct_cthreads_total += vm_compressor_epoch_stop - vm_compressor_epoch_start; |
5ba3f43e | 4113 | } |
5ba3f43e A |
4114 | } |
4115 | #endif | |
4116 | vm_page_unlock_queues(); | |
4117 | #if DEVELOPMENT || DEBUG | |
39037602 | 4118 | if (__improbable(vm_compressor_time_thread)) { |
5ba3f43e A |
4119 | vmct_stats.vmct_runtimes[cq->id] = thread_get_runtime_self(); |
4120 | vmct_stats.vmct_pages[cq->id] += ncomps; | |
4121 | vmct_stats.vmct_iterations[cq->id]++; | |
4122 | if (ncomps > vmct_stats.vmct_maxpages[cq->id]) { | |
4123 | vmct_stats.vmct_maxpages[cq->id] = ncomps; | |
4124 | } | |
4125 | if (ncomps < vmct_stats.vmct_minpages[cq->id]) { | |
4126 | vmct_stats.vmct_minpages[cq->id] = ncomps; | |
4127 | } | |
39037602 | 4128 | } |
5ba3f43e | 4129 | #endif |
39037602 | 4130 | |
39236c6e A |
4131 | KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0); |
4132 | ||
4133 | thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq); | |
91447636 A |
4134 | /*NOTREACHED*/ |
4135 | } | |
4136 | ||
4137 | ||
3e170ce0 | 4138 | kern_return_t |
d9a64523 | 4139 | vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m) |
3e170ce0 | 4140 | { |
0a7de745 A |
4141 | vm_object_t object; |
4142 | memory_object_t pager; | |
4143 | int compressed_count_delta; | |
4144 | kern_return_t retval; | |
3e170ce0 | 4145 | |
39037602 A |
4146 | object = VM_PAGE_OBJECT(m); |
4147 | ||
d9a64523 A |
4148 | assert(!m->vmp_free_when_done); |
4149 | assert(!m->vmp_laundry); | |
3e170ce0 | 4150 | |
3e170ce0 A |
4151 | pager = object->pager; |
4152 | ||
0a7de745 | 4153 | if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) { |
3e170ce0 A |
4154 | KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0); |
4155 | ||
4156 | vm_object_lock(object); | |
4157 | ||
4158 | /* | |
4159 | * If there is no memory object for the page, create | |
4160 | * one and hand it to the compression pager. | |
4161 | */ | |
4162 | ||
0a7de745 | 4163 | if (!object->pager_initialized) { |
3e170ce0 | 4164 | vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); |
0a7de745 A |
4165 | } |
4166 | if (!object->pager_initialized) { | |
3e170ce0 | 4167 | vm_object_compressor_pager_create(object); |
0a7de745 | 4168 | } |
3e170ce0 | 4169 | |
39037602 A |
4170 | pager = object->pager; |
4171 | ||
4172 | if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) { | |
3e170ce0 | 4173 | /* |
39037602 A |
4174 | * Still no pager for the object, |
4175 | * or the pager has been destroyed. | |
3e170ce0 A |
4176 | * Reactivate the page. |
4177 | * | |
4178 | * Should only happen if there is no | |
4179 | * compression pager | |
4180 | */ | |
3e170ce0 A |
4181 | PAGE_WAKEUP_DONE(m); |
4182 | ||
4183 | vm_page_lockspin_queues(); | |
4184 | vm_page_activate(m); | |
d9a64523 | 4185 | VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager, 1); |
3e170ce0 | 4186 | vm_page_unlock_queues(); |
d9a64523 | 4187 | |
3e170ce0 A |
4188 | /* |
4189 | * And we are done with it. | |
4190 | */ | |
4191 | vm_object_activity_end(object); | |
4192 | vm_object_unlock(object); | |
4193 | ||
4194 | return KERN_FAILURE; | |
4195 | } | |
3e170ce0 | 4196 | vm_object_unlock(object); |
d9a64523 | 4197 | |
3e170ce0 A |
4198 | KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0); |
4199 | } | |
4200 | assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL); | |
d9a64523 | 4201 | assert(object->activity_in_progress > 0); |
3e170ce0 A |
4202 | |
4203 | retval = vm_compressor_pager_put( | |
4204 | pager, | |
d9a64523 | 4205 | m->vmp_offset + object->paging_offset, |
39037602 | 4206 | VM_PAGE_GET_PHYS_PAGE(m), |
3e170ce0 A |
4207 | current_chead, |
4208 | scratch_buf, | |
4209 | &compressed_count_delta); | |
4210 | ||
d9a64523 | 4211 | vm_object_lock(object); |
3e170ce0 | 4212 | |
d9a64523 A |
4213 | assert(object->activity_in_progress > 0); |
4214 | assert(VM_PAGE_OBJECT(m) == object); | |
4215 | assert( !VM_PAGE_WIRED(m)); | |
3e170ce0 A |
4216 | |
4217 | vm_compressor_pager_count(pager, | |
0a7de745 A |
4218 | compressed_count_delta, |
4219 | FALSE, /* shared_lock */ | |
4220 | object); | |
3e170ce0 | 4221 | |
3e170ce0 A |
4222 | if (retval == KERN_SUCCESS) { |
4223 | /* | |
4224 | * If the object is purgeable, its owner's | |
4225 | * purgeable ledgers will be updated in | |
4226 | * vm_page_remove() but the page still | |
4227 | * contributes to the owner's memory footprint, | |
4228 | * so account for it as such. | |
4229 | */ | |
d9a64523 | 4230 | if ((object->purgable != VM_PURGABLE_DENY || |
0a7de745 | 4231 | object->vo_ledger_tag) && |
d9a64523 A |
4232 | object->vo_owner != NULL) { |
4233 | /* one more compressed purgeable/tagged page */ | |
4234 | vm_object_owner_compressed_update(object, | |
0a7de745 | 4235 | +1); |
3e170ce0 A |
4236 | } |
4237 | VM_STAT_INCR(compressions); | |
d9a64523 | 4238 | |
0a7de745 | 4239 | if (m->vmp_tabled) { |
3e170ce0 | 4240 | vm_page_remove(m, TRUE); |
0a7de745 | 4241 | } |
3e170ce0 A |
4242 | } else { |
4243 | PAGE_WAKEUP_DONE(m); | |
4244 | ||
4245 | vm_page_lockspin_queues(); | |
4246 | ||
4247 | vm_page_activate(m); | |
d9a64523 | 4248 | vm_pageout_vminfo.vm_compressor_failed++; |
3e170ce0 A |
4249 | |
4250 | vm_page_unlock_queues(); | |
4251 | } | |
d9a64523 A |
4252 | vm_object_activity_end(object); |
4253 | vm_object_unlock(object); | |
4254 | ||
3e170ce0 A |
4255 | return retval; |
4256 | } | |
4257 | ||
4258 | ||
316670eb | 4259 | static void |
5ba3f43e | 4260 | vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpriority) |
316670eb | 4261 | { |
0a7de745 | 4262 | uint32_t policy; |
d9a64523 | 4263 | |
0a7de745 | 4264 | if (hibernate_cleaning_in_progress == TRUE) { |
316670eb | 4265 | req_lowpriority = FALSE; |
0a7de745 | 4266 | } |
316670eb | 4267 | |
5ba3f43e | 4268 | if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority) { |
316670eb A |
4269 | vm_page_unlock_queues(); |
4270 | ||
4271 | if (req_lowpriority == TRUE) { | |
39236c6e | 4272 | policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED; |
316670eb A |
4273 | DTRACE_VM(laundrythrottle); |
4274 | } else { | |
39236c6e | 4275 | policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED; |
316670eb A |
4276 | DTRACE_VM(laundryunthrottle); |
4277 | } | |
5ba3f43e | 4278 | proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid, |
0a7de745 | 4279 | TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy); |
39236c6e | 4280 | |
5ba3f43e | 4281 | eq->pgo_lowpriority = req_lowpriority; |
39236c6e | 4282 | |
316670eb A |
4283 | vm_page_lock_queues(); |
4284 | } | |
4285 | } | |
4286 | ||
4287 | ||
91447636 A |
4288 | static void |
4289 | vm_pageout_iothread_external(void) | |
4290 | { | |
0a7de745 | 4291 | thread_t self = current_thread(); |
2d21ac55 A |
4292 | |
4293 | self->options |= TH_OPT_VMPRIV; | |
91447636 | 4294 | |
39037602 | 4295 | DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL); |
39236c6e | 4296 | |
39037602 | 4297 | proc_set_thread_policy(self, TASK_POLICY_EXTERNAL, |
0a7de745 | 4298 | TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED); |
316670eb A |
4299 | |
4300 | vm_page_lock_queues(); | |
4301 | ||
4302 | vm_pageout_queue_external.pgo_tid = self->thread_id; | |
4303 | vm_pageout_queue_external.pgo_lowpriority = TRUE; | |
4304 | vm_pageout_queue_external.pgo_inited = TRUE; | |
4305 | ||
4306 | vm_page_unlock_queues(); | |
4307 | ||
39037602 | 4308 | vm_pageout_iothread_external_continue(&vm_pageout_queue_external); |
316670eb | 4309 | |
91447636 A |
4310 | /*NOTREACHED*/ |
4311 | } | |
4312 | ||
39236c6e | 4313 | |
91447636 | 4314 | static void |
39236c6e | 4315 | vm_pageout_iothread_internal(struct cq *cq) |
91447636 | 4316 | { |
0a7de745 | 4317 | thread_t self = current_thread(); |
91447636 A |
4318 | |
4319 | self->options |= TH_OPT_VMPRIV; | |
4320 | ||
316670eb A |
4321 | vm_page_lock_queues(); |
4322 | ||
4323 | vm_pageout_queue_internal.pgo_tid = self->thread_id; | |
4324 | vm_pageout_queue_internal.pgo_lowpriority = TRUE; | |
4325 | vm_pageout_queue_internal.pgo_inited = TRUE; | |
4326 | ||
4327 | vm_page_unlock_queues(); | |
4328 | ||
0a7de745 | 4329 | if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { |
39037602 | 4330 | thread_vm_bind_group_add(); |
0a7de745 | 4331 | } |
39236c6e | 4332 | |
5ba3f43e | 4333 | |
c6bf4f31 A |
4334 | #if __AMP__ |
4335 | if (vm_compressor_ebound) { | |
4336 | thread_bind_cluster_type('E'); | |
4337 | } | |
4338 | #endif /* __AMP__ */ | |
cb323159 | 4339 | |
5ba3f43e A |
4340 | thread_set_thread_name(current_thread(), "VM_compressor"); |
4341 | #if DEVELOPMENT || DEBUG | |
4342 | vmct_stats.vmct_minpages[cq->id] = INT32_MAX; | |
4343 | #endif | |
39037602 | 4344 | vm_pageout_iothread_internal_continue(cq); |
316670eb | 4345 | |
91447636 A |
4346 | /*NOTREACHED*/ |
4347 | } | |
4348 | ||
b0d623f7 | 4349 | kern_return_t |
d9a64523 | 4350 | vm_set_buffer_cleanup_callout(boolean_t (*func)(int)) |
b0d623f7 A |
4351 | { |
4352 | if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) { | |
4353 | return KERN_SUCCESS; | |
4354 | } else { | |
4355 | return KERN_FAILURE; /* Already set */ | |
4356 | } | |
4357 | } | |
4358 | ||
0a7de745 A |
4359 | extern boolean_t memorystatus_manual_testing_on; |
4360 | extern unsigned int memorystatus_level; | |
39236c6e A |
4361 | |
4362 | ||
39236c6e A |
4363 | #if VM_PRESSURE_EVENTS |
4364 | ||
fe8ab488 A |
4365 | boolean_t vm_pressure_events_enabled = FALSE; |
4366 | ||
39236c6e A |
4367 | void |
4368 | vm_pressure_response(void) | |
4369 | { | |
0a7de745 A |
4370 | vm_pressure_level_t old_level = kVMPressureNormal; |
4371 | int new_level = -1; | |
4372 | unsigned int total_pages; | |
4373 | uint64_t available_memory = 0; | |
39236c6e | 4374 | |
0a7de745 | 4375 | if (vm_pressure_events_enabled == FALSE) { |
fe8ab488 | 4376 | return; |
0a7de745 | 4377 | } |
fe8ab488 | 4378 | |
5ba3f43e A |
4379 | #if CONFIG_EMBEDDED |
4380 | ||
4381 | available_memory = (uint64_t) memorystatus_available_pages; | |
4382 | ||
4383 | #else /* CONFIG_EMBEDDED */ | |
fe8ab488 | 4384 | |
39037602 | 4385 | available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY; |
5ba3f43e | 4386 | memorystatus_available_pages = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY; |
fe8ab488 | 4387 | |
5ba3f43e | 4388 | #endif /* CONFIG_EMBEDDED */ |
39236c6e | 4389 | |
39037602 A |
4390 | total_pages = (unsigned int) atop_64(max_mem); |
4391 | #if CONFIG_SECLUDED_MEMORY | |
4392 | total_pages -= vm_page_secluded_count; | |
4393 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
4394 | memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages); | |
39236c6e A |
4395 | |
4396 | if (memorystatus_manual_testing_on) { | |
4397 | return; | |
4398 | } | |
d9a64523 | 4399 | |
39236c6e A |
4400 | old_level = memorystatus_vm_pressure_level; |
4401 | ||
4402 | switch (memorystatus_vm_pressure_level) { | |
0a7de745 A |
4403 | case kVMPressureNormal: |
4404 | { | |
4405 | if (VM_PRESSURE_WARNING_TO_CRITICAL()) { | |
4406 | new_level = kVMPressureCritical; | |
4407 | } else if (VM_PRESSURE_NORMAL_TO_WARNING()) { | |
4408 | new_level = kVMPressureWarning; | |
39236c6e | 4409 | } |
0a7de745 A |
4410 | break; |
4411 | } | |
39236c6e | 4412 | |
0a7de745 A |
4413 | case kVMPressureWarning: |
4414 | case kVMPressureUrgent: | |
4415 | { | |
4416 | if (VM_PRESSURE_WARNING_TO_NORMAL()) { | |
4417 | new_level = kVMPressureNormal; | |
4418 | } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) { | |
4419 | new_level = kVMPressureCritical; | |
39236c6e | 4420 | } |
0a7de745 A |
4421 | break; |
4422 | } | |
39236c6e | 4423 | |
0a7de745 A |
4424 | case kVMPressureCritical: |
4425 | { | |
4426 | if (VM_PRESSURE_WARNING_TO_NORMAL()) { | |
4427 | new_level = kVMPressureNormal; | |
4428 | } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) { | |
4429 | new_level = kVMPressureWarning; | |
39236c6e | 4430 | } |
0a7de745 A |
4431 | break; |
4432 | } | |
39236c6e | 4433 | |
0a7de745 A |
4434 | default: |
4435 | return; | |
39236c6e | 4436 | } |
d9a64523 | 4437 | |
39236c6e A |
4438 | if (new_level != -1) { |
4439 | memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level; | |
4440 | ||
0a7de745 | 4441 | if (new_level != (int) old_level) { |
d9a64523 | 4442 | VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE, |
0a7de745 | 4443 | new_level, old_level, 0, 0); |
d9a64523 A |
4444 | } |
4445 | ||
4446 | if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != memorystatus_vm_pressure_level)) { | |
4447 | if (vm_pageout_state.vm_pressure_thread_running == FALSE) { | |
39236c6e A |
4448 | thread_wakeup(&vm_pressure_thread); |
4449 | } | |
fe8ab488 | 4450 | |
d9a64523 A |
4451 | if (old_level != memorystatus_vm_pressure_level) { |
4452 | thread_wakeup(&vm_pageout_state.vm_pressure_changed); | |
fe8ab488 | 4453 | } |
39236c6e A |
4454 | } |
4455 | } | |
39236c6e A |
4456 | } |
4457 | #endif /* VM_PRESSURE_EVENTS */ | |
4458 | ||
cb323159 A |
4459 | /* |
4460 | * Function called by a kernel thread to either get the current pressure level or | |
4461 | * wait until memory pressure changes from a given level. | |
4462 | */ | |
39236c6e | 4463 | kern_return_t |
0a7de745 A |
4464 | mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level) |
4465 | { | |
cb323159 | 4466 | #if !VM_PRESSURE_EVENTS |
d9a64523 | 4467 | |
39236c6e A |
4468 | return KERN_FAILURE; |
4469 | ||
4470 | #else /* VM_PRESSURE_EVENTS */ | |
4471 | ||
cb323159 A |
4472 | wait_result_t wr = 0; |
4473 | vm_pressure_level_t old_level = memorystatus_vm_pressure_level; | |
39236c6e | 4474 | |
cb323159 A |
4475 | if (pressure_level == NULL) { |
4476 | return KERN_INVALID_ARGUMENT; | |
4477 | } | |
39236c6e | 4478 | |
cb323159 A |
4479 | if (*pressure_level == kVMPressureJetsam) { |
4480 | if (!wait_for_pressure) { | |
4481 | return KERN_INVALID_ARGUMENT; | |
4482 | } | |
39236c6e | 4483 | |
cb323159 A |
4484 | lck_mtx_lock(&memorystatus_jetsam_fg_band_lock); |
4485 | wr = assert_wait((event_t)&memorystatus_jetsam_fg_band_waiters, | |
4486 | THREAD_INTERRUPTIBLE); | |
4487 | if (wr == THREAD_WAITING) { | |
4488 | ++memorystatus_jetsam_fg_band_waiters; | |
4489 | lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock); | |
4490 | wr = thread_block(THREAD_CONTINUE_NULL); | |
4491 | } else { | |
4492 | lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock); | |
4493 | } | |
4494 | if (wr != THREAD_AWAKENED) { | |
4495 | return KERN_ABORTED; | |
4496 | } | |
4497 | *pressure_level = kVMPressureJetsam; | |
4498 | return KERN_SUCCESS; | |
4499 | } | |
39236c6e | 4500 | |
cb323159 A |
4501 | if (wait_for_pressure == TRUE) { |
4502 | while (old_level == *pressure_level) { | |
4503 | wr = assert_wait((event_t) &vm_pageout_state.vm_pressure_changed, | |
4504 | THREAD_INTERRUPTIBLE); | |
4505 | if (wr == THREAD_WAITING) { | |
4506 | wr = thread_block(THREAD_CONTINUE_NULL); | |
4507 | } | |
4508 | if (wr == THREAD_INTERRUPTED) { | |
4509 | return KERN_ABORTED; | |
39236c6e | 4510 | } |
39236c6e | 4511 | |
cb323159 A |
4512 | if (wr == THREAD_AWAKENED) { |
4513 | old_level = memorystatus_vm_pressure_level; | |
4514 | } | |
4515 | } | |
39236c6e A |
4516 | } |
4517 | ||
cb323159 A |
4518 | *pressure_level = old_level; |
4519 | return KERN_SUCCESS; | |
39236c6e A |
4520 | #endif /* VM_PRESSURE_EVENTS */ |
4521 | } | |
4522 | ||
4523 | #if VM_PRESSURE_EVENTS | |
4524 | void | |
0a7de745 A |
4525 | vm_pressure_thread(void) |
4526 | { | |
fe8ab488 | 4527 | static boolean_t thread_initialized = FALSE; |
316670eb | 4528 | |
fe8ab488 | 4529 | if (thread_initialized == TRUE) { |
d9a64523 | 4530 | vm_pageout_state.vm_pressure_thread_running = TRUE; |
316670eb | 4531 | consider_vm_pressure_events(); |
d9a64523 | 4532 | vm_pageout_state.vm_pressure_thread_running = FALSE; |
316670eb A |
4533 | } |
4534 | ||
d9a64523 | 4535 | thread_set_thread_name(current_thread(), "VM_pressure"); |
fe8ab488 | 4536 | thread_initialized = TRUE; |
316670eb A |
4537 | assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT); |
4538 | thread_block((thread_continue_t)vm_pressure_thread); | |
4539 | } | |
39236c6e A |
4540 | #endif /* VM_PRESSURE_EVENTS */ |
4541 | ||
316670eb | 4542 | |
316670eb A |
4543 | /* |
4544 | * called once per-second via "compute_averages" | |
4545 | */ | |
4546 | void | |
39037602 | 4547 | compute_pageout_gc_throttle(__unused void *arg) |
316670eb | 4548 | { |
d9a64523 | 4549 | if (vm_pageout_vminfo.vm_pageout_considered_page != vm_pageout_state.vm_pageout_considered_page_last) { |
d9a64523 | 4550 | vm_pageout_state.vm_pageout_considered_page_last = vm_pageout_vminfo.vm_pageout_considered_page; |
316670eb A |
4551 | |
4552 | thread_wakeup((event_t) &vm_pageout_garbage_collect); | |
4553 | } | |
4554 | } | |
4555 | ||
5ba3f43e A |
4556 | /* |
4557 | * vm_pageout_garbage_collect can also be called when the zone allocator needs | |
4558 | * to call zone_gc on a different thread in order to trigger zone-map-exhaustion | |
4559 | * jetsams. We need to check if the zone map size is above its jetsam limit to | |
4560 | * decide if this was indeed the case. | |
4561 | * | |
4562 | * We need to do this on a different thread because of the following reasons: | |
4563 | * | |
4564 | * 1. In the case of synchronous jetsams, the leaking process can try to jetsam | |
4565 | * itself causing the system to hang. We perform synchronous jetsams if we're | |
4566 | * leaking in the VM map entries zone, so the leaking process could be doing a | |
4567 | * zalloc for a VM map entry while holding its vm_map lock, when it decides to | |
4568 | * jetsam itself. We also need the vm_map lock on the process termination path, | |
4569 | * which would now lead the dying process to deadlock against itself. | |
4570 | * | |
4571 | * 2. The jetsam path might need to allocate zone memory itself. We could try | |
4572 | * using the non-blocking variant of zalloc for this path, but we can still | |
4573 | * end up trying to do a kernel_memory_allocate when the zone_map is almost | |
4574 | * full. | |
4575 | */ | |
4576 | ||
4577 | extern boolean_t is_zone_map_nearing_exhaustion(void); | |
316670eb | 4578 | |
5ba3f43e | 4579 | void |
91447636 A |
4580 | vm_pageout_garbage_collect(int collect) |
4581 | { | |
4582 | if (collect) { | |
5ba3f43e A |
4583 | if (is_zone_map_nearing_exhaustion()) { |
4584 | /* | |
4585 | * Woken up by the zone allocator for zone-map-exhaustion jetsams. | |
4586 | * | |
4587 | * Bail out after calling zone_gc (which triggers the | |
4588 | * zone-map-exhaustion jetsams). If we fall through, the subsequent | |
4589 | * operations that clear out a bunch of caches might allocate zone | |
4590 | * memory themselves (for eg. vm_map operations would need VM map | |
4591 | * entries). Since the zone map is almost full at this point, we | |
4592 | * could end up with a panic. We just need to quickly jetsam a | |
4593 | * process and exit here. | |
4594 | * | |
4595 | * It could so happen that we were woken up to relieve memory | |
4596 | * pressure and the zone map also happened to be near its limit at | |
4597 | * the time, in which case we'll skip out early. But that should be | |
4598 | * ok; if memory pressure persists, the thread will simply be woken | |
4599 | * up again. | |
4600 | */ | |
4601 | consider_zone_gc(TRUE); | |
5ba3f43e A |
4602 | } else { |
4603 | /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */ | |
4604 | boolean_t buf_large_zfree = FALSE; | |
4605 | boolean_t first_try = TRUE; | |
91447636 | 4606 | |
5ba3f43e | 4607 | stack_collect(); |
316670eb | 4608 | |
5ba3f43e | 4609 | consider_machine_collect(); |
d9a64523 | 4610 | mbuf_drain(FALSE); |
5ba3f43e A |
4611 | |
4612 | do { | |
4613 | if (consider_buffer_cache_collect != NULL) { | |
4614 | buf_large_zfree = (*consider_buffer_cache_collect)(0); | |
4615 | } | |
4616 | if (first_try == TRUE || buf_large_zfree == TRUE) { | |
4617 | /* | |
4618 | * consider_zone_gc should be last, because the other operations | |
4619 | * might return memory to zones. | |
4620 | */ | |
4621 | consider_zone_gc(FALSE); | |
4622 | } | |
4623 | first_try = FALSE; | |
5ba3f43e | 4624 | } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target); |
91447636 | 4625 | |
5ba3f43e A |
4626 | consider_machine_adjust(); |
4627 | } | |
91447636 | 4628 | } |
5ba3f43e | 4629 | |
91447636 A |
4630 | assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT); |
4631 | ||
4632 | thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1); | |
4633 | /*NOTREACHED*/ | |
4634 | } | |
4635 | ||
4636 | ||
15129b1c A |
4637 | #if VM_PAGE_BUCKETS_CHECK |
4638 | #if VM_PAGE_FAKE_BUCKETS | |
4639 | extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end; | |
4640 | #endif /* VM_PAGE_FAKE_BUCKETS */ | |
4641 | #endif /* VM_PAGE_BUCKETS_CHECK */ | |
91447636 | 4642 | |
39037602 | 4643 | |
3e170ce0 A |
4644 | |
4645 | void | |
4646 | vm_set_restrictions() | |
4647 | { | |
cb323159 A |
4648 | int vm_restricted_to_single_processor = 0; |
4649 | ||
4650 | if (PE_parse_boot_argn("vm_restricted_to_single_processor", &vm_restricted_to_single_processor, sizeof(vm_restricted_to_single_processor))) { | |
4651 | kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor); | |
4652 | vm_pageout_state.vm_restricted_to_single_processor = (vm_restricted_to_single_processor ? TRUE : FALSE); | |
4653 | } else { | |
4654 | host_basic_info_data_t hinfo; | |
4655 | mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; | |
3e170ce0 A |
4656 | |
4657 | #define BSD_HOST 1 | |
cb323159 | 4658 | host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count); |
3e170ce0 | 4659 | |
cb323159 | 4660 | assert(hinfo.max_cpus > 0); |
3e170ce0 | 4661 | |
cb323159 A |
4662 | if (hinfo.max_cpus <= 3) { |
4663 | /* | |
4664 | * on systems with a limited number of CPUS, bind the | |
4665 | * 4 major threads that can free memory and that tend to use | |
4666 | * a fair bit of CPU under pressured conditions to a single processor. | |
4667 | * This insures that these threads don't hog all of the available CPUs | |
4668 | * (important for camera launch), while allowing them to run independently | |
4669 | * w/r to locks... the 4 threads are | |
4670 | * vm_pageout_scan, vm_pageout_iothread_internal (compressor), | |
4671 | * vm_compressor_swap_trigger_thread (minor and major compactions), | |
4672 | * memorystatus_thread (jetsams). | |
4673 | * | |
4674 | * the first time the thread is run, it is responsible for checking the | |
4675 | * state of vm_restricted_to_single_processor, and if TRUE it calls | |
4676 | * thread_bind_master... someday this should be replaced with a group | |
4677 | * scheduling mechanism and KPI. | |
4678 | */ | |
4679 | vm_pageout_state.vm_restricted_to_single_processor = TRUE; | |
4680 | } else { | |
4681 | vm_pageout_state.vm_restricted_to_single_processor = FALSE; | |
4682 | } | |
0a7de745 | 4683 | } |
3e170ce0 A |
4684 | } |
4685 | ||
91447636 A |
4686 | void |
4687 | vm_pageout(void) | |
4688 | { | |
0a7de745 A |
4689 | thread_t self = current_thread(); |
4690 | thread_t thread; | |
4691 | kern_return_t result; | |
4692 | spl_t s; | |
91447636 A |
4693 | |
4694 | /* | |
4695 | * Set thread privileges. | |
4696 | */ | |
4697 | s = splsched(); | |
3e170ce0 | 4698 | |
cb323159 A |
4699 | vm_pageout_scan_thread = self; |
4700 | ||
4701 | #if CONFIG_VPS_DYNAMIC_PRIO | |
4702 | ||
4703 | int vps_dynprio_bootarg = 0; | |
4704 | ||
4705 | if (PE_parse_boot_argn("vps_dynamic_priority_enabled", &vps_dynprio_bootarg, sizeof(vps_dynprio_bootarg))) { | |
4706 | vps_dynamic_priority_enabled = (vps_dynprio_bootarg ? TRUE : FALSE); | |
4707 | kprintf("Overriding vps_dynamic_priority_enabled to %d\n", vps_dynamic_priority_enabled); | |
4708 | } else { | |
4709 | if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { | |
4710 | vps_dynamic_priority_enabled = TRUE; | |
4711 | } else { | |
4712 | vps_dynamic_priority_enabled = FALSE; | |
4713 | } | |
4714 | } | |
4715 | ||
4716 | if (vps_dynamic_priority_enabled) { | |
4717 | sched_set_kernel_thread_priority(self, MAXPRI_THROTTLE); | |
4718 | thread_set_eager_preempt(self); | |
4719 | } else { | |
4720 | sched_set_kernel_thread_priority(self, BASEPRI_VM); | |
4721 | } | |
4722 | ||
4723 | #else /* CONFIG_VPS_DYNAMIC_PRIO */ | |
4724 | ||
4725 | vps_dynamic_priority_enabled = FALSE; | |
4726 | sched_set_kernel_thread_priority(self, BASEPRI_VM); | |
4727 | ||
4728 | #endif /* CONFIG_VPS_DYNAMIC_PRIO */ | |
4729 | ||
91447636 | 4730 | thread_lock(self); |
3e170ce0 | 4731 | self->options |= TH_OPT_VMPRIV; |
91447636 | 4732 | thread_unlock(self); |
2d21ac55 | 4733 | |
0a7de745 | 4734 | if (!self->reserved_stack) { |
2d21ac55 | 4735 | self->reserved_stack = self->kernel_stack; |
0a7de745 | 4736 | } |
2d21ac55 | 4737 | |
cb323159 A |
4738 | if (vm_pageout_state.vm_restricted_to_single_processor == TRUE && |
4739 | vps_dynamic_priority_enabled == FALSE) { | |
3e170ce0 | 4740 | thread_vm_bind_group_add(); |
0a7de745 | 4741 | } |
3e170ce0 | 4742 | |
cb323159 A |
4743 | |
4744 | ||
c6bf4f31 A |
4745 | #if __AMP__ |
4746 | PE_parse_boot_argn("vmpgo_pcluster", &vm_pgo_pbound, sizeof(vm_pgo_pbound)); | |
4747 | if (vm_pgo_pbound) { | |
4748 | thread_bind_cluster_type('P'); | |
4749 | } | |
4750 | #endif /* __AMP__ */ | |
cb323159 | 4751 | |
91447636 A |
4752 | splx(s); |
4753 | ||
5ba3f43e A |
4754 | thread_set_thread_name(current_thread(), "VM_pageout_scan"); |
4755 | ||
91447636 A |
4756 | /* |
4757 | * Initialize some paging parameters. | |
4758 | */ | |
4759 | ||
d9a64523 A |
4760 | vm_pageout_state.vm_pressure_thread_running = FALSE; |
4761 | vm_pageout_state.vm_pressure_changed = FALSE; | |
4762 | vm_pageout_state.memorystatus_purge_on_warning = 2; | |
4763 | vm_pageout_state.memorystatus_purge_on_urgent = 5; | |
4764 | vm_pageout_state.memorystatus_purge_on_critical = 8; | |
4765 | vm_pageout_state.vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS; | |
4766 | vm_pageout_state.vm_page_speculative_percentage = 5; | |
4767 | vm_pageout_state.vm_page_speculative_target = 0; | |
4768 | ||
4769 | vm_pageout_state.vm_pageout_external_iothread = THREAD_NULL; | |
4770 | vm_pageout_state.vm_pageout_internal_iothread = THREAD_NULL; | |
4771 | ||
4772 | vm_pageout_state.vm_pageout_swap_wait = 0; | |
4773 | vm_pageout_state.vm_pageout_idle_wait = 0; | |
4774 | vm_pageout_state.vm_pageout_empty_wait = 0; | |
4775 | vm_pageout_state.vm_pageout_burst_wait = 0; | |
4776 | vm_pageout_state.vm_pageout_deadlock_wait = 0; | |
4777 | vm_pageout_state.vm_pageout_deadlock_relief = 0; | |
4778 | vm_pageout_state.vm_pageout_burst_inactive_throttle = 0; | |
4779 | ||
4780 | vm_pageout_state.vm_pageout_inactive = 0; | |
4781 | vm_pageout_state.vm_pageout_inactive_used = 0; | |
4782 | vm_pageout_state.vm_pageout_inactive_clean = 0; | |
4783 | ||
4784 | vm_pageout_state.vm_memory_pressure = 0; | |
0a7de745 | 4785 | vm_pageout_state.vm_page_filecache_min = 0; |
d9a64523 A |
4786 | #if CONFIG_JETSAM |
4787 | vm_pageout_state.vm_page_filecache_min_divisor = 70; | |
4788 | vm_pageout_state.vm_page_xpmapped_min_divisor = 40; | |
4789 | #else | |
4790 | vm_pageout_state.vm_page_filecache_min_divisor = 27; | |
4791 | vm_pageout_state.vm_page_xpmapped_min_divisor = 36; | |
4792 | #endif | |
4793 | vm_pageout_state.vm_page_free_count_init = vm_page_free_count; | |
91447636 | 4794 | |
0a7de745 | 4795 | vm_pageout_state.vm_pageout_considered_page_last = 0; |
91447636 | 4796 | |
0a7de745 | 4797 | if (vm_pageout_state.vm_pageout_swap_wait == 0) { |
d9a64523 | 4798 | vm_pageout_state.vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT; |
0a7de745 | 4799 | } |
91447636 | 4800 | |
0a7de745 | 4801 | if (vm_pageout_state.vm_pageout_idle_wait == 0) { |
d9a64523 | 4802 | vm_pageout_state.vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT; |
0a7de745 | 4803 | } |
91447636 | 4804 | |
0a7de745 | 4805 | if (vm_pageout_state.vm_pageout_burst_wait == 0) { |
d9a64523 | 4806 | vm_pageout_state.vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT; |
0a7de745 | 4807 | } |
91447636 | 4808 | |
0a7de745 | 4809 | if (vm_pageout_state.vm_pageout_empty_wait == 0) { |
d9a64523 | 4810 | vm_pageout_state.vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT; |
0a7de745 | 4811 | } |
91447636 | 4812 | |
0a7de745 | 4813 | if (vm_pageout_state.vm_pageout_deadlock_wait == 0) { |
d9a64523 | 4814 | vm_pageout_state.vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT; |
0a7de745 | 4815 | } |
55e303ae | 4816 | |
0a7de745 | 4817 | if (vm_pageout_state.vm_pageout_deadlock_relief == 0) { |
d9a64523 | 4818 | vm_pageout_state.vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF; |
0a7de745 | 4819 | } |
2d21ac55 | 4820 | |
0a7de745 A |
4821 | if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) { |
4822 | vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE; | |
4823 | } | |
1c79356b A |
4824 | /* |
4825 | * even if we've already called vm_page_free_reserve | |
4826 | * call it again here to insure that the targets are | |
4827 | * accurately calculated (it uses vm_page_free_count_init) | |
4828 | * calling it with an arg of 0 will not change the reserve | |
4829 | * but will re-calculate free_min and free_target | |
4830 | */ | |
91447636 A |
4831 | if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) { |
4832 | vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved); | |
0a7de745 | 4833 | } else { |
1c79356b | 4834 | vm_page_free_reserve(0); |
0a7de745 | 4835 | } |
1c79356b | 4836 | |
55e303ae | 4837 | |
39037602 | 4838 | vm_page_queue_init(&vm_pageout_queue_external.pgo_pending); |
91447636 A |
4839 | vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX; |
4840 | vm_pageout_queue_external.pgo_laundry = 0; | |
4841 | vm_pageout_queue_external.pgo_idle = FALSE; | |
4842 | vm_pageout_queue_external.pgo_busy = FALSE; | |
4843 | vm_pageout_queue_external.pgo_throttled = FALSE; | |
0b4c1975 | 4844 | vm_pageout_queue_external.pgo_draining = FALSE; |
316670eb A |
4845 | vm_pageout_queue_external.pgo_lowpriority = FALSE; |
4846 | vm_pageout_queue_external.pgo_tid = -1; | |
4847 | vm_pageout_queue_external.pgo_inited = FALSE; | |
4848 | ||
39037602 | 4849 | vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending); |
2d21ac55 | 4850 | vm_pageout_queue_internal.pgo_maxlaundry = 0; |
91447636 A |
4851 | vm_pageout_queue_internal.pgo_laundry = 0; |
4852 | vm_pageout_queue_internal.pgo_idle = FALSE; | |
4853 | vm_pageout_queue_internal.pgo_busy = FALSE; | |
4854 | vm_pageout_queue_internal.pgo_throttled = FALSE; | |
0b4c1975 | 4855 | vm_pageout_queue_internal.pgo_draining = FALSE; |
316670eb A |
4856 | vm_pageout_queue_internal.pgo_lowpriority = FALSE; |
4857 | vm_pageout_queue_internal.pgo_tid = -1; | |
4858 | vm_pageout_queue_internal.pgo_inited = FALSE; | |
55e303ae | 4859 | |
2d21ac55 A |
4860 | /* internal pageout thread started when default pager registered first time */ |
4861 | /* external pageout and garbage collection threads started here */ | |
55e303ae | 4862 | |
d9a64523 | 4863 | result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL, |
0a7de745 A |
4864 | BASEPRI_VM, |
4865 | &vm_pageout_state.vm_pageout_external_iothread); | |
4866 | if (result != KERN_SUCCESS) { | |
91447636 | 4867 | panic("vm_pageout_iothread_external: create failed"); |
0a7de745 | 4868 | } |
cb323159 | 4869 | thread_set_thread_name(vm_pageout_state.vm_pageout_external_iothread, "VM_pageout_external_iothread"); |
d9a64523 | 4870 | thread_deallocate(vm_pageout_state.vm_pageout_external_iothread); |
9bccf70c | 4871 | |
2d21ac55 | 4872 | result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL, |
0a7de745 A |
4873 | BASEPRI_DEFAULT, |
4874 | &thread); | |
4875 | if (result != KERN_SUCCESS) { | |
91447636 | 4876 | panic("vm_pageout_garbage_collect: create failed"); |
0a7de745 | 4877 | } |
cb323159 | 4878 | thread_set_thread_name(thread, "VM_pageout_garbage_collect"); |
91447636 | 4879 | thread_deallocate(thread); |
55e303ae | 4880 | |
39236c6e | 4881 | #if VM_PRESSURE_EVENTS |
316670eb | 4882 | result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL, |
0a7de745 A |
4883 | BASEPRI_DEFAULT, |
4884 | &thread); | |
316670eb | 4885 | |
0a7de745 | 4886 | if (result != KERN_SUCCESS) { |
316670eb | 4887 | panic("vm_pressure_thread: create failed"); |
0a7de745 | 4888 | } |
316670eb A |
4889 | |
4890 | thread_deallocate(thread); | |
39236c6e | 4891 | #endif |
316670eb | 4892 | |
8f6c56a5 | 4893 | vm_object_reaper_init(); |
39037602 A |
4894 | |
4895 | ||
4896 | bzero(&vm_config, sizeof(vm_config)); | |
4897 | ||
0a7de745 | 4898 | switch (vm_compressor_mode) { |
39037602 A |
4899 | case VM_PAGER_DEFAULT: |
4900 | printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n"); | |
4901 | ||
4902 | case VM_PAGER_COMPRESSOR_WITH_SWAP: | |
4903 | vm_config.compressor_is_present = TRUE; | |
4904 | vm_config.swap_is_present = TRUE; | |
4905 | vm_config.compressor_is_active = TRUE; | |
4906 | vm_config.swap_is_active = TRUE; | |
4907 | break; | |
4908 | ||
4909 | case VM_PAGER_COMPRESSOR_NO_SWAP: | |
4910 | vm_config.compressor_is_present = TRUE; | |
4911 | vm_config.swap_is_present = TRUE; | |
4912 | vm_config.compressor_is_active = TRUE; | |
4913 | break; | |
4914 | ||
4915 | case VM_PAGER_FREEZER_DEFAULT: | |
4916 | printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n"); | |
4917 | ||
4918 | case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP: | |
4919 | vm_config.compressor_is_present = TRUE; | |
4920 | vm_config.swap_is_present = TRUE; | |
4921 | break; | |
4922 | ||
4923 | case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP: | |
4924 | vm_config.compressor_is_present = TRUE; | |
4925 | vm_config.swap_is_present = TRUE; | |
4926 | vm_config.compressor_is_active = TRUE; | |
4927 | vm_config.freezer_swap_is_active = TRUE; | |
4928 | break; | |
4929 | ||
4930 | case VM_PAGER_NOT_CONFIGURED: | |
4931 | break; | |
4932 | ||
4933 | default: | |
4934 | printf("unknown compressor mode - %x\n", vm_compressor_mode); | |
4935 | break; | |
4936 | } | |
0a7de745 | 4937 | if (VM_CONFIG_COMPRESSOR_IS_PRESENT) { |
39236c6e | 4938 | vm_compressor_pager_init(); |
0a7de745 | 4939 | } |
2d21ac55 | 4940 | |
fe8ab488 A |
4941 | #if VM_PRESSURE_EVENTS |
4942 | vm_pressure_events_enabled = TRUE; | |
4943 | #endif /* VM_PRESSURE_EVENTS */ | |
4944 | ||
4945 | #if CONFIG_PHANTOM_CACHE | |
4946 | vm_phantom_cache_init(); | |
4947 | #endif | |
15129b1c A |
4948 | #if VM_PAGE_BUCKETS_CHECK |
4949 | #if VM_PAGE_FAKE_BUCKETS | |
4950 | printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n", | |
0a7de745 A |
4951 | (uint64_t) vm_page_fake_buckets_start, |
4952 | (uint64_t) vm_page_fake_buckets_end); | |
15129b1c | 4953 | pmap_protect(kernel_pmap, |
0a7de745 A |
4954 | vm_page_fake_buckets_start, |
4955 | vm_page_fake_buckets_end, | |
4956 | VM_PROT_READ); | |
15129b1c A |
4957 | // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */ |
4958 | #endif /* VM_PAGE_FAKE_BUCKETS */ | |
4959 | #endif /* VM_PAGE_BUCKETS_CHECK */ | |
4960 | ||
fe8ab488 A |
4961 | #if VM_OBJECT_TRACKING |
4962 | vm_object_tracking_init(); | |
4963 | #endif /* VM_OBJECT_TRACKING */ | |
4964 | ||
5ba3f43e | 4965 | vm_tests(); |
813fb2f6 | 4966 | |
91447636 | 4967 | vm_pageout_continue(); |
2d21ac55 A |
4968 | |
4969 | /* | |
4970 | * Unreached code! | |
4971 | * | |
4972 | * The vm_pageout_continue() call above never returns, so the code below is never | |
4973 | * executed. We take advantage of this to declare several DTrace VM related probe | |
4974 | * points that our kernel doesn't have an analog for. These are probe points that | |
4975 | * exist in Solaris and are in the DTrace documentation, so people may have written | |
4976 | * scripts that use them. Declaring the probe points here means their scripts will | |
4977 | * compile and execute which we want for portability of the scripts, but since this | |
4978 | * section of code is never reached, the probe points will simply never fire. Yes, | |
4979 | * this is basically a hack. The problem is the DTrace probe points were chosen with | |
4980 | * Solaris specific VM events in mind, not portability to different VM implementations. | |
4981 | */ | |
4982 | ||
4983 | DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL); | |
4984 | DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL); | |
4985 | DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL); | |
4986 | DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL); | |
4987 | DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL); | |
4988 | DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL); | |
4989 | DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL); | |
91447636 | 4990 | /*NOTREACHED*/ |
9bccf70c A |
4991 | } |
4992 | ||
39236c6e A |
4993 | |
4994 | ||
2d21ac55 A |
4995 | kern_return_t |
4996 | vm_pageout_internal_start(void) | |
4997 | { | |
0a7de745 A |
4998 | kern_return_t result; |
4999 | int i; | |
39236c6e | 5000 | host_basic_info_data_t hinfo; |
3e170ce0 | 5001 | |
0a7de745 | 5002 | assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); |
39236c6e | 5003 | |
39037602 | 5004 | mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; |
39236c6e | 5005 | #define BSD_HOST 1 |
39037602 | 5006 | host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count); |
39236c6e | 5007 | |
39037602 | 5008 | assert(hinfo.max_cpus > 0); |
3e170ce0 | 5009 | |
0a7de745 A |
5010 | lck_grp_init(&vm_pageout_lck_grp, "vm_pageout", LCK_GRP_ATTR_NULL); |
5011 | ||
d9a64523 A |
5012 | #if CONFIG_EMBEDDED |
5013 | vm_pageout_state.vm_compressor_thread_count = 1; | |
5014 | #else | |
0a7de745 A |
5015 | if (hinfo.max_cpus > 4) { |
5016 | vm_pageout_state.vm_compressor_thread_count = 2; | |
5017 | } else { | |
5018 | vm_pageout_state.vm_compressor_thread_count = 1; | |
5019 | } | |
d9a64523 A |
5020 | #endif |
5021 | PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state.vm_compressor_thread_count, | |
0a7de745 | 5022 | sizeof(vm_pageout_state.vm_compressor_thread_count)); |
d9a64523 | 5023 | |
c6bf4f31 A |
5024 | #if __AMP__ |
5025 | PE_parse_boot_argn("vmcomp_ecluster", &vm_compressor_ebound, sizeof(vm_compressor_ebound)); | |
5026 | if (vm_compressor_ebound) { | |
5027 | vm_pageout_state.vm_compressor_thread_count = 2; | |
5028 | } | |
5029 | #endif | |
0a7de745 | 5030 | if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) { |
d9a64523 | 5031 | vm_pageout_state.vm_compressor_thread_count = hinfo.max_cpus - 1; |
0a7de745 A |
5032 | } |
5033 | if (vm_pageout_state.vm_compressor_thread_count <= 0) { | |
d9a64523 | 5034 | vm_pageout_state.vm_compressor_thread_count = 1; |
0a7de745 | 5035 | } else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) { |
d9a64523 | 5036 | vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT; |
0a7de745 | 5037 | } |
3e170ce0 | 5038 | |
d9a64523 | 5039 | vm_pageout_queue_internal.pgo_maxlaundry = (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX; |
39037602 | 5040 | |
5ba3f43e A |
5041 | PE_parse_boot_argn("vmpgoi_maxlaundry", &vm_pageout_queue_internal.pgo_maxlaundry, sizeof(vm_pageout_queue_internal.pgo_maxlaundry)); |
5042 | ||
d9a64523 | 5043 | for (i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) { |
3e170ce0 A |
5044 | ciq[i].id = i; |
5045 | ciq[i].q = &vm_pageout_queue_internal; | |
5046 | ciq[i].current_chead = NULL; | |
5047 | ciq[i].scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE); | |
39037602 | 5048 | |
d9a64523 | 5049 | result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i], |
0a7de745 | 5050 | BASEPRI_VM, &vm_pageout_state.vm_pageout_internal_iothread); |
3e170ce0 | 5051 | |
0a7de745 | 5052 | if (result == KERN_SUCCESS) { |
d9a64523 | 5053 | thread_deallocate(vm_pageout_state.vm_pageout_internal_iothread); |
0a7de745 | 5054 | } else { |
39236c6e | 5055 | break; |
0a7de745 | 5056 | } |
39236c6e | 5057 | } |
2d21ac55 A |
5058 | return result; |
5059 | } | |
5060 | ||
fe8ab488 A |
5061 | #if CONFIG_IOSCHED |
5062 | /* | |
5063 | * To support I/O Expedite for compressed files we mark the upls with special flags. | |
5064 | * The way decmpfs works is that we create a big upl which marks all the pages needed to | |
5065 | * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs | |
5066 | * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages | |
5067 | * being held in the big original UPL. We mark each of these smaller UPLs with the flag | |
5068 | * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the | |
5069 | * decmp_io_upl field (in the upl structure). This link is protected in the forward direction | |
5070 | * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link | |
5071 | * unless the real I/O upl is being destroyed). | |
5072 | */ | |
5073 | ||
5074 | ||
5075 | static void | |
5076 | upl_set_decmp_info(upl_t upl, upl_t src_upl) | |
5077 | { | |
0a7de745 A |
5078 | assert((src_upl->flags & UPL_DECMP_REQ) != 0); |
5079 | ||
5080 | upl_lock(src_upl); | |
5081 | if (src_upl->decmp_io_upl) { | |
5082 | /* | |
5083 | * If there is already an alive real I/O UPL, ignore this new UPL. | |
5084 | * This case should rarely happen and even if it does, it just means | |
5085 | * that we might issue a spurious expedite which the driver is expected | |
5086 | * to handle. | |
5087 | */ | |
5088 | upl_unlock(src_upl); | |
5089 | return; | |
5090 | } | |
5091 | src_upl->decmp_io_upl = (void *)upl; | |
5092 | src_upl->ref_count++; | |
5093 | ||
5094 | upl->flags |= UPL_DECMP_REAL_IO; | |
5095 | upl->decmp_io_upl = (void *)src_upl; | |
04b8595b | 5096 | upl_unlock(src_upl); |
fe8ab488 | 5097 | } |
d9a64523 | 5098 | #endif /* CONFIG_IOSCHED */ |
fe8ab488 A |
5099 | |
5100 | #if UPL_DEBUG | |
0a7de745 | 5101 | int upl_debug_enabled = 1; |
fe8ab488 | 5102 | #else |
0a7de745 | 5103 | int upl_debug_enabled = 0; |
fe8ab488 | 5104 | #endif |
1c79356b | 5105 | |
b0d623f7 A |
5106 | static upl_t |
5107 | upl_create(int type, int flags, upl_size_t size) | |
0b4e3aa0 | 5108 | { |
0a7de745 A |
5109 | upl_t upl; |
5110 | vm_size_t page_field_size = 0; | |
5111 | int upl_flags = 0; | |
5112 | vm_size_t upl_size = sizeof(struct upl); | |
0b4e3aa0 | 5113 | |
b0d623f7 A |
5114 | size = round_page_32(size); |
5115 | ||
2d21ac55 | 5116 | if (type & UPL_CREATE_LITE) { |
b0d623f7 | 5117 | page_field_size = (atop(size) + 7) >> 3; |
55e303ae | 5118 | page_field_size = (page_field_size + 3) & 0xFFFFFFFC; |
2d21ac55 A |
5119 | |
5120 | upl_flags |= UPL_LITE; | |
55e303ae | 5121 | } |
2d21ac55 | 5122 | if (type & UPL_CREATE_INTERNAL) { |
39236c6e | 5123 | upl_size += sizeof(struct upl_page_info) * atop(size); |
2d21ac55 A |
5124 | |
5125 | upl_flags |= UPL_INTERNAL; | |
0b4e3aa0 | 5126 | } |
2d21ac55 A |
5127 | upl = (upl_t)kalloc(upl_size + page_field_size); |
5128 | ||
0a7de745 A |
5129 | if (page_field_size) { |
5130 | bzero((char *)upl + upl_size, page_field_size); | |
5131 | } | |
2d21ac55 A |
5132 | |
5133 | upl->flags = upl_flags | flags; | |
0b4e3aa0 A |
5134 | upl->kaddr = (vm_offset_t)0; |
5135 | upl->size = 0; | |
5136 | upl->map_object = NULL; | |
5137 | upl->ref_count = 1; | |
6d2010ae | 5138 | upl->ext_ref_count = 0; |
0c530ab8 | 5139 | upl->highest_page = 0; |
0b4e3aa0 | 5140 | upl_lock_init(upl); |
b0d623f7 | 5141 | upl->vector_upl = NULL; |
3e170ce0 | 5142 | upl->associated_upl = NULL; |
d9a64523 | 5143 | upl->upl_iodone = NULL; |
fe8ab488 A |
5144 | #if CONFIG_IOSCHED |
5145 | if (type & UPL_CREATE_IO_TRACKING) { | |
5146 | upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO); | |
5147 | } | |
d9a64523 | 5148 | |
fe8ab488 A |
5149 | upl->upl_reprio_info = 0; |
5150 | upl->decmp_io_upl = 0; | |
5151 | if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) { | |
5152 | /* Only support expedite on internal UPLs */ | |
5153 | thread_t curthread = current_thread(); | |
5154 | upl->upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * atop(size)); | |
5155 | bzero(upl->upl_reprio_info, (sizeof(uint64_t) * atop(size))); | |
5156 | upl->flags |= UPL_EXPEDITE_SUPPORTED; | |
0a7de745 | 5157 | if (curthread->decmp_upl != NULL) { |
fe8ab488 | 5158 | upl_set_decmp_info(upl, curthread->decmp_upl); |
0a7de745 | 5159 | } |
fe8ab488 A |
5160 | } |
5161 | #endif | |
5162 | #if CONFIG_IOSCHED || UPL_DEBUG | |
5163 | if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) { | |
5164 | upl->upl_creator = current_thread(); | |
5165 | upl->uplq.next = 0; | |
5166 | upl->uplq.prev = 0; | |
5167 | upl->flags |= UPL_TRACKED_BY_OBJECT; | |
5168 | } | |
5169 | #endif | |
5170 | ||
b0d623f7 | 5171 | #if UPL_DEBUG |
0b4e3aa0 A |
5172 | upl->ubc_alias1 = 0; |
5173 | upl->ubc_alias2 = 0; | |
b0d623f7 | 5174 | |
b0d623f7 A |
5175 | upl->upl_state = 0; |
5176 | upl->upl_commit_index = 0; | |
5177 | bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records)); | |
5178 | ||
5179 | (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES); | |
91447636 | 5180 | #endif /* UPL_DEBUG */ |
b0d623f7 | 5181 | |
0a7de745 | 5182 | return upl; |
0b4e3aa0 A |
5183 | } |
5184 | ||
5185 | static void | |
2d21ac55 | 5186 | upl_destroy(upl_t upl) |
0b4e3aa0 | 5187 | { |
0a7de745 A |
5188 | int page_field_size; /* bit field in word size buf */ |
5189 | int size; | |
0b4e3aa0 | 5190 | |
6d2010ae A |
5191 | if (upl->ext_ref_count) { |
5192 | panic("upl(%p) ext_ref_count", upl); | |
5193 | } | |
5194 | ||
fe8ab488 | 5195 | #if CONFIG_IOSCHED |
0a7de745 A |
5196 | if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) { |
5197 | upl_t src_upl; | |
5198 | src_upl = upl->decmp_io_upl; | |
5199 | assert((src_upl->flags & UPL_DECMP_REQ) != 0); | |
5200 | upl_lock(src_upl); | |
5201 | src_upl->decmp_io_upl = NULL; | |
5202 | upl_unlock(src_upl); | |
5203 | upl_deallocate(src_upl); | |
5204 | } | |
fe8ab488 A |
5205 | #endif /* CONFIG_IOSCHED */ |
5206 | ||
5207 | #if CONFIG_IOSCHED || UPL_DEBUG | |
5208 | if ((upl->flags & UPL_TRACKED_BY_OBJECT) && !(upl->flags & UPL_VECTOR)) { | |
0a7de745 | 5209 | vm_object_t object; |
2d21ac55 A |
5210 | |
5211 | if (upl->flags & UPL_SHADOWED) { | |
55e303ae A |
5212 | object = upl->map_object->shadow; |
5213 | } else { | |
5214 | object = upl->map_object; | |
5215 | } | |
fe8ab488 | 5216 | |
55e303ae | 5217 | vm_object_lock(object); |
2d21ac55 | 5218 | queue_remove(&object->uplq, upl, upl_t, uplq); |
316670eb A |
5219 | vm_object_activity_end(object); |
5220 | vm_object_collapse(object, 0, TRUE); | |
55e303ae | 5221 | vm_object_unlock(object); |
0b4e3aa0 | 5222 | } |
fe8ab488 | 5223 | #endif |
2d21ac55 A |
5224 | /* |
5225 | * drop a reference on the map_object whether or | |
5226 | * not a pageout object is inserted | |
5227 | */ | |
0a7de745 | 5228 | if (upl->flags & UPL_SHADOWED) { |
0b4e3aa0 | 5229 | vm_object_deallocate(upl->map_object); |
0a7de745 | 5230 | } |
55e303ae | 5231 | |
0a7de745 A |
5232 | if (upl->flags & UPL_DEVICE_MEMORY) { |
5233 | size = PAGE_SIZE; | |
5234 | } else { | |
5235 | size = upl->size; | |
5236 | } | |
55e303ae | 5237 | page_field_size = 0; |
2d21ac55 | 5238 | |
55e303ae | 5239 | if (upl->flags & UPL_LITE) { |
0a7de745 | 5240 | page_field_size = ((size / PAGE_SIZE) + 7) >> 3; |
55e303ae A |
5241 | page_field_size = (page_field_size + 3) & 0xFFFFFFFC; |
5242 | } | |
b0d623f7 A |
5243 | upl_lock_destroy(upl); |
5244 | upl->vector_upl = (vector_upl_t) 0xfeedbeef; | |
316670eb | 5245 | |
fe8ab488 | 5246 | #if CONFIG_IOSCHED |
0a7de745 A |
5247 | if (upl->flags & UPL_EXPEDITE_SUPPORTED) { |
5248 | kfree(upl->upl_reprio_info, sizeof(uint64_t) * (size / PAGE_SIZE)); | |
5249 | } | |
fe8ab488 A |
5250 | #endif |
5251 | ||
2d21ac55 | 5252 | if (upl->flags & UPL_INTERNAL) { |
91447636 | 5253 | kfree(upl, |
0a7de745 A |
5254 | sizeof(struct upl) + |
5255 | (sizeof(struct upl_page_info) * (size / PAGE_SIZE)) | |
5256 | + page_field_size); | |
0b4e3aa0 | 5257 | } else { |
91447636 | 5258 | kfree(upl, sizeof(struct upl) + page_field_size); |
0b4e3aa0 A |
5259 | } |
5260 | } | |
5261 | ||
0b4e3aa0 | 5262 | void |
2d21ac55 | 5263 | upl_deallocate(upl_t upl) |
0b4e3aa0 | 5264 | { |
fe8ab488 | 5265 | upl_lock(upl); |
d9a64523 | 5266 | |
b0d623f7 | 5267 | if (--upl->ref_count == 0) { |
0a7de745 | 5268 | if (vector_upl_is_valid(upl)) { |
b0d623f7 | 5269 | vector_upl_deallocate(upl); |
0a7de745 | 5270 | } |
d9a64523 A |
5271 | upl_unlock(upl); |
5272 | ||
0a7de745 A |
5273 | if (upl->upl_iodone) { |
5274 | upl_callout_iodone(upl); | |
5275 | } | |
d9a64523 | 5276 | |
0b4e3aa0 | 5277 | upl_destroy(upl); |
0a7de745 | 5278 | } else { |
fe8ab488 | 5279 | upl_unlock(upl); |
0a7de745 | 5280 | } |
fe8ab488 A |
5281 | } |
5282 | ||
5283 | #if CONFIG_IOSCHED | |
5284 | void | |
5285 | upl_mark_decmp(upl_t upl) | |
5286 | { | |
5287 | if (upl->flags & UPL_TRACKED_BY_OBJECT) { | |
5288 | upl->flags |= UPL_DECMP_REQ; | |
5289 | upl->upl_creator->decmp_upl = (void *)upl; | |
d9a64523 | 5290 | } |
fe8ab488 A |
5291 | } |
5292 | ||
5293 | void | |
5294 | upl_unmark_decmp(upl_t upl) | |
5295 | { | |
0a7de745 | 5296 | if (upl && (upl->flags & UPL_DECMP_REQ)) { |
fe8ab488 A |
5297 | upl->upl_creator->decmp_upl = NULL; |
5298 | } | |
d9a64523 | 5299 | } |
fe8ab488 A |
5300 | |
5301 | #endif /* CONFIG_IOSCHED */ | |
5302 | ||
0a7de745 A |
5303 | #define VM_PAGE_Q_BACKING_UP(q) \ |
5304 | ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10)) | |
fe8ab488 A |
5305 | |
5306 | boolean_t must_throttle_writes(void); | |
5307 | ||
5308 | boolean_t | |
5309 | must_throttle_writes() | |
5310 | { | |
5311 | if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) && | |
0a7de745 A |
5312 | vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10) { |
5313 | return TRUE; | |
5314 | } | |
fe8ab488 | 5315 | |
0a7de745 | 5316 | return FALSE; |
0b4e3aa0 | 5317 | } |
1c79356b | 5318 | |
fe8ab488 | 5319 | |
d9a64523 A |
5320 | /* |
5321 | * Routine: vm_object_upl_request | |
5322 | * Purpose: | |
1c79356b A |
5323 | * Cause the population of a portion of a vm_object. |
5324 | * Depending on the nature of the request, the pages | |
5325 | * returned may be contain valid data or be uninitialized. | |
5326 | * A page list structure, listing the physical pages | |
5327 | * will be returned upon request. | |
5328 | * This function is called by the file system or any other | |
5329 | * supplier of backing store to a pager. | |
5330 | * IMPORTANT NOTE: The caller must still respect the relationship | |
5331 | * between the vm_object and its backing memory object. The | |
5332 | * caller MUST NOT substitute changes in the backing file | |
d9a64523 | 5333 | * without first doing a memory_object_lock_request on the |
1c79356b A |
5334 | * target range unless it is know that the pages are not |
5335 | * shared with another entity at the pager level. | |
5336 | * Copy_in_to: | |
5337 | * if a page list structure is present | |
5338 | * return the mapped physical pages, where a | |
5339 | * page is not present, return a non-initialized | |
5340 | * one. If the no_sync bit is turned on, don't | |
5341 | * call the pager unlock to synchronize with other | |
5342 | * possible copies of the page. Leave pages busy | |
5343 | * in the original object, if a page list structure | |
5344 | * was specified. When a commit of the page list | |
5345 | * pages is done, the dirty bit will be set for each one. | |
5346 | * Copy_out_from: | |
5347 | * If a page list structure is present, return | |
5348 | * all mapped pages. Where a page does not exist | |
5349 | * map a zero filled one. Leave pages busy in | |
5350 | * the original object. If a page list structure | |
d9a64523 | 5351 | * is not specified, this call is a no-op. |
1c79356b A |
5352 | * |
5353 | * Note: access of default pager objects has a rather interesting | |
5354 | * twist. The caller of this routine, presumably the file system | |
5355 | * page cache handling code, will never actually make a request | |
5356 | * against a default pager backed object. Only the default | |
5357 | * pager will make requests on backing store related vm_objects | |
5358 | * In this way the default pager can maintain the relationship | |
d9a64523 | 5359 | * between backing store files (abstract memory objects) and |
1c79356b A |
5360 | * the vm_objects (cache objects), they support. |
5361 | * | |
5362 | */ | |
91447636 | 5363 | |
0b4e3aa0 A |
5364 | __private_extern__ kern_return_t |
5365 | vm_object_upl_request( | |
0a7de745 A |
5366 | vm_object_t object, |
5367 | vm_object_offset_t offset, | |
5368 | upl_size_t size, | |
5369 | upl_t *upl_ptr, | |
5370 | upl_page_info_array_t user_page_list, | |
5371 | unsigned int *page_list_count, | |
5372 | upl_control_flags_t cntrl_flags, | |
5373 | vm_tag_t tag) | |
1c79356b | 5374 | { |
0a7de745 A |
5375 | vm_page_t dst_page = VM_PAGE_NULL; |
5376 | vm_object_offset_t dst_offset; | |
5377 | upl_size_t xfer_size; | |
5378 | unsigned int size_in_pages; | |
5379 | boolean_t dirty; | |
5380 | boolean_t hw_dirty; | |
5381 | upl_t upl = NULL; | |
5382 | unsigned int entry; | |
5383 | vm_page_t alias_page = NULL; | |
5384 | int refmod_state = 0; | |
5385 | wpl_array_t lite_list = NULL; | |
5386 | vm_object_t last_copy_object; | |
5387 | struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; | |
5388 | struct vm_page_delayed_work *dwp; | |
5389 | int dw_count; | |
5390 | int dw_limit; | |
5391 | int io_tracking_flag = 0; | |
5392 | int grab_options; | |
5393 | int page_grab_count = 0; | |
5394 | ppnum_t phys_page; | |
5395 | pmap_flush_context pmap_flush_context_storage; | |
d9a64523 | 5396 | boolean_t pmap_flushes_delayed = FALSE; |
0a7de745 A |
5397 | #if DEVELOPMENT || DEBUG |
5398 | task_t task = current_task(); | |
5399 | #endif /* DEVELOPMENT || DEBUG */ | |
91447636 A |
5400 | |
5401 | if (cntrl_flags & ~UPL_VALID_FLAGS) { | |
5402 | /* | |
5403 | * For forward compatibility's sake, | |
5404 | * reject any unknown flag. | |
5405 | */ | |
5406 | return KERN_INVALID_VALUE; | |
5407 | } | |
0a7de745 | 5408 | if ((!object->internal) && (object->paging_offset != 0)) { |
2d21ac55 | 5409 | panic("vm_object_upl_request: external object with non-zero paging offset\n"); |
0a7de745 A |
5410 | } |
5411 | if (object->phys_contiguous) { | |
5412 | panic("vm_object_upl_request: contiguous object specified\n"); | |
5413 | } | |
0b4e3aa0 | 5414 | |
d9a64523 | 5415 | VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, 0, 0); |
0b4e3aa0 | 5416 | |
0a7de745 | 5417 | if (size > MAX_UPL_SIZE_BYTES) { |
fe8ab488 | 5418 | size = MAX_UPL_SIZE_BYTES; |
0a7de745 | 5419 | } |
1c79356b | 5420 | |
0a7de745 A |
5421 | if ((cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL) { |
5422 | *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT; | |
5423 | } | |
fe8ab488 A |
5424 | |
5425 | #if CONFIG_IOSCHED || UPL_DEBUG | |
0a7de745 | 5426 | if (object->io_tracking || upl_debug_enabled) { |
fe8ab488 | 5427 | io_tracking_flag |= UPL_CREATE_IO_TRACKING; |
0a7de745 | 5428 | } |
fe8ab488 A |
5429 | #endif |
5430 | #if CONFIG_IOSCHED | |
0a7de745 | 5431 | if (object->io_tracking) { |
fe8ab488 | 5432 | io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP; |
0a7de745 | 5433 | } |
fe8ab488 | 5434 | #endif |
1c79356b | 5435 | |
2d21ac55 | 5436 | if (cntrl_flags & UPL_SET_INTERNAL) { |
0a7de745 | 5437 | if (cntrl_flags & UPL_SET_LITE) { |
fe8ab488 | 5438 | upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size); |
91447636 | 5439 | |
2d21ac55 A |
5440 | user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); |
5441 | lite_list = (wpl_array_t) | |
0a7de745 A |
5442 | (((uintptr_t)user_page_list) + |
5443 | ((size / PAGE_SIZE) * sizeof(upl_page_info_t))); | |
b0d623f7 A |
5444 | if (size == 0) { |
5445 | user_page_list = NULL; | |
5446 | lite_list = NULL; | |
5447 | } | |
1c79356b | 5448 | } else { |
0a7de745 | 5449 | upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size); |
55e303ae | 5450 | |
2d21ac55 | 5451 | user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); |
b0d623f7 A |
5452 | if (size == 0) { |
5453 | user_page_list = NULL; | |
5454 | } | |
55e303ae | 5455 | } |
2d21ac55 | 5456 | } else { |
0a7de745 | 5457 | if (cntrl_flags & UPL_SET_LITE) { |
fe8ab488 | 5458 | upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size); |
55e303ae | 5459 | |
2d21ac55 | 5460 | lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl)); |
b0d623f7 A |
5461 | if (size == 0) { |
5462 | lite_list = NULL; | |
5463 | } | |
55e303ae | 5464 | } else { |
0a7de745 | 5465 | upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size); |
0b4e3aa0 | 5466 | } |
55e303ae | 5467 | } |
2d21ac55 | 5468 | *upl_ptr = upl; |
d9a64523 | 5469 | |
0a7de745 A |
5470 | if (user_page_list) { |
5471 | user_page_list[0].device = FALSE; | |
5472 | } | |
91447636 | 5473 | |
2d21ac55 | 5474 | if (cntrl_flags & UPL_SET_LITE) { |
0a7de745 | 5475 | upl->map_object = object; |
2d21ac55 | 5476 | } else { |
0a7de745 | 5477 | upl->map_object = vm_object_allocate(size); |
2d21ac55 A |
5478 | /* |
5479 | * No neeed to lock the new object: nobody else knows | |
5480 | * about it yet, so it's all ours so far. | |
5481 | */ | |
5482 | upl->map_object->shadow = object; | |
5483 | upl->map_object->pageout = TRUE; | |
5484 | upl->map_object->can_persist = FALSE; | |
5485 | upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
6d2010ae | 5486 | upl->map_object->vo_shadow_offset = offset; |
2d21ac55 A |
5487 | upl->map_object->wimg_bits = object->wimg_bits; |
5488 | ||
5489 | VM_PAGE_GRAB_FICTITIOUS(alias_page); | |
5490 | ||
5491 | upl->flags |= UPL_SHADOWED; | |
5492 | } | |
0a7de745 | 5493 | if (cntrl_flags & UPL_FOR_PAGEOUT) { |
91447636 | 5494 | upl->flags |= UPL_PAGEOUT; |
0a7de745 | 5495 | } |
2d21ac55 | 5496 | |
55e303ae | 5497 | vm_object_lock(object); |
b0d623f7 | 5498 | vm_object_activity_begin(object); |
2d21ac55 | 5499 | |
39037602 A |
5500 | grab_options = 0; |
5501 | #if CONFIG_SECLUDED_MEMORY | |
5502 | if (object->can_grab_secluded) { | |
5503 | grab_options |= VM_PAGE_GRAB_SECLUDED; | |
5504 | } | |
5505 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
5506 | ||
2d21ac55 A |
5507 | /* |
5508 | * we can lock in the paging_offset once paging_in_progress is set | |
5509 | */ | |
5510 | upl->size = size; | |
5511 | upl->offset = offset + object->paging_offset; | |
55e303ae | 5512 | |
fe8ab488 A |
5513 | #if CONFIG_IOSCHED || UPL_DEBUG |
5514 | if (object->io_tracking || upl_debug_enabled) { | |
5515 | vm_object_activity_begin(object); | |
5516 | queue_enter(&object->uplq, upl, upl_t, uplq); | |
5517 | } | |
5518 | #endif | |
2d21ac55 | 5519 | if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) { |
91447636 | 5520 | /* |
2d21ac55 A |
5521 | * Honor copy-on-write obligations |
5522 | * | |
91447636 A |
5523 | * The caller is gathering these pages and |
5524 | * might modify their contents. We need to | |
5525 | * make sure that the copy object has its own | |
5526 | * private copies of these pages before we let | |
5527 | * the caller modify them. | |
5528 | */ | |
5529 | vm_object_update(object, | |
0a7de745 A |
5530 | offset, |
5531 | size, | |
5532 | NULL, | |
5533 | NULL, | |
5534 | FALSE, /* should_return */ | |
5535 | MEMORY_OBJECT_COPY_SYNC, | |
5536 | VM_PROT_NO_CHANGE); | |
d9a64523 A |
5537 | |
5538 | VM_PAGEOUT_DEBUG(upl_cow, 1); | |
5539 | VM_PAGEOUT_DEBUG(upl_cow_pages, (size >> PAGE_SHIFT)); | |
55e303ae | 5540 | } |
2d21ac55 A |
5541 | /* |
5542 | * remember which copy object we synchronized with | |
5543 | */ | |
91447636 | 5544 | last_copy_object = object->copy; |
1c79356b | 5545 | entry = 0; |
55e303ae | 5546 | |
2d21ac55 A |
5547 | xfer_size = size; |
5548 | dst_offset = offset; | |
6d2010ae | 5549 | size_in_pages = size / PAGE_SIZE; |
2d21ac55 | 5550 | |
b0d623f7 A |
5551 | dwp = &dw_array[0]; |
5552 | dw_count = 0; | |
6d2010ae A |
5553 | dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); |
5554 | ||
5555 | if (vm_page_free_count > (vm_page_free_target + size_in_pages) || | |
0a7de745 | 5556 | object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) { |
6d2010ae | 5557 | object->scan_collisions = 0; |
0a7de745 | 5558 | } |
b0d623f7 | 5559 | |
fe8ab488 | 5560 | if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) { |
0a7de745 | 5561 | boolean_t isSSD = FALSE; |
fe8ab488 | 5562 | |
5ba3f43e A |
5563 | #if CONFIG_EMBEDDED |
5564 | isSSD = TRUE; | |
5565 | #else | |
fe8ab488 | 5566 | vnode_pager_get_isSSD(object->pager, &isSSD); |
5ba3f43e | 5567 | #endif |
fe8ab488 | 5568 | vm_object_unlock(object); |
d9a64523 | 5569 | |
fe8ab488 A |
5570 | OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages); |
5571 | ||
0a7de745 | 5572 | if (isSSD == TRUE) { |
fe8ab488 | 5573 | delay(1000 * size_in_pages); |
0a7de745 | 5574 | } else { |
fe8ab488 | 5575 | delay(5000 * size_in_pages); |
0a7de745 | 5576 | } |
fe8ab488 A |
5577 | OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages); |
5578 | ||
5579 | vm_object_lock(object); | |
5580 | } | |
5581 | ||
2d21ac55 | 5582 | while (xfer_size) { |
b0d623f7 A |
5583 | dwp->dw_mask = 0; |
5584 | ||
2d21ac55 | 5585 | if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) { |
2d21ac55 A |
5586 | vm_object_unlock(object); |
5587 | VM_PAGE_GRAB_FICTITIOUS(alias_page); | |
b0d623f7 | 5588 | vm_object_lock(object); |
4a3eedf9 | 5589 | } |
2d21ac55 | 5590 | if (cntrl_flags & UPL_COPYOUT_FROM) { |
0a7de745 A |
5591 | upl->flags |= UPL_PAGE_SYNC_DONE; |
5592 | ||
5593 | if (((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) || | |
5594 | dst_page->vmp_fictitious || | |
5595 | dst_page->vmp_absent || | |
5596 | dst_page->vmp_error || | |
5597 | dst_page->vmp_cleaning || | |
5598 | (VM_PAGE_WIRED(dst_page))) { | |
5599 | if (user_page_list) { | |
1c79356b | 5600 | user_page_list[entry].phys_addr = 0; |
0a7de745 | 5601 | } |
2d21ac55 | 5602 | |
b0d623f7 | 5603 | goto try_next_page; |
2d21ac55 | 5604 | } |
39037602 A |
5605 | phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); |
5606 | ||
2d21ac55 A |
5607 | /* |
5608 | * grab this up front... | |
5609 | * a high percentange of the time we're going to | |
5610 | * need the hardware modification state a bit later | |
5611 | * anyway... so we can eliminate an extra call into | |
5612 | * the pmap layer by grabbing it here and recording it | |
5613 | */ | |
0a7de745 A |
5614 | if (dst_page->vmp_pmapped) { |
5615 | refmod_state = pmap_get_refmod(phys_page); | |
5616 | } else { | |
5617 | refmod_state = 0; | |
5618 | } | |
2d21ac55 | 5619 | |
0a7de745 A |
5620 | if ((refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) { |
5621 | /* | |
2d21ac55 A |
5622 | * page is on inactive list and referenced... |
5623 | * reactivate it now... this gets it out of the | |
5624 | * way of vm_pageout_scan which would have to | |
5625 | * reactivate it upon tripping over it | |
91447636 | 5626 | */ |
b0d623f7 | 5627 | dwp->dw_mask |= DW_vm_page_activate; |
2d21ac55 A |
5628 | } |
5629 | if (cntrl_flags & UPL_RET_ONLY_DIRTY) { | |
0a7de745 | 5630 | /* |
2d21ac55 A |
5631 | * we're only asking for DIRTY pages to be returned |
5632 | */ | |
0a7de745 A |
5633 | if (dst_page->vmp_laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) { |
5634 | /* | |
2d21ac55 | 5635 | * if we were the page stolen by vm_pageout_scan to be |
d9a64523 | 5636 | * cleaned (as opposed to a buddy being clustered in |
2d21ac55 A |
5637 | * or this request is not being driven by a PAGEOUT cluster |
5638 | * then we only need to check for the page being dirty or | |
5639 | * precious to decide whether to return it | |
91447636 | 5640 | */ |
0a7de745 A |
5641 | if (dst_page->vmp_dirty || dst_page->vmp_precious || (refmod_state & VM_MEM_MODIFIED)) { |
5642 | goto check_busy; | |
5643 | } | |
2d21ac55 | 5644 | goto dont_return; |
1c79356b | 5645 | } |
2d21ac55 A |
5646 | /* |
5647 | * this is a request for a PAGEOUT cluster and this page | |
5648 | * is merely along for the ride as a 'buddy'... not only | |
5649 | * does it have to be dirty to be returned, but it also | |
316670eb | 5650 | * can't have been referenced recently... |
2d21ac55 | 5651 | */ |
0a7de745 A |
5652 | if ((hibernate_cleaning_in_progress == TRUE || |
5653 | (!((refmod_state & VM_MEM_REFERENCED) || dst_page->vmp_reference) || | |
5654 | (dst_page->vmp_q_state == VM_PAGE_ON_THROTTLED_Q))) && | |
5655 | ((refmod_state & VM_MEM_MODIFIED) || dst_page->vmp_dirty || dst_page->vmp_precious)) { | |
5656 | goto check_busy; | |
1c79356b | 5657 | } |
2d21ac55 A |
5658 | dont_return: |
5659 | /* | |
5660 | * if we reach here, we're not to return | |
5661 | * the page... go on to the next one | |
5662 | */ | |
d9a64523 | 5663 | if (dst_page->vmp_laundry == TRUE) { |
316670eb A |
5664 | /* |
5665 | * if we get here, the page is not 'cleaning' (filtered out above). | |
5666 | * since it has been referenced, remove it from the laundry | |
5667 | * so we don't pay the cost of an I/O to clean a page | |
5668 | * we're just going to take back | |
5669 | */ | |
5670 | vm_page_lockspin_queues(); | |
5671 | ||
5672 | vm_pageout_steal_laundry(dst_page, TRUE); | |
5673 | vm_page_activate(dst_page); | |
d9a64523 | 5674 | |
316670eb A |
5675 | vm_page_unlock_queues(); |
5676 | } | |
0a7de745 A |
5677 | if (user_page_list) { |
5678 | user_page_list[entry].phys_addr = 0; | |
5679 | } | |
55e303ae | 5680 | |
b0d623f7 | 5681 | goto try_next_page; |
2d21ac55 | 5682 | } |
d9a64523 A |
5683 | check_busy: |
5684 | if (dst_page->vmp_busy) { | |
0a7de745 A |
5685 | if (cntrl_flags & UPL_NOBLOCK) { |
5686 | if (user_page_list) { | |
5687 | user_page_list[entry].phys_addr = 0; | |
5688 | } | |
39037602 | 5689 | dwp->dw_mask = 0; |
55e303ae | 5690 | |
b0d623f7 | 5691 | goto try_next_page; |
1c79356b | 5692 | } |
2d21ac55 A |
5693 | /* |
5694 | * someone else is playing with the | |
5695 | * page. We will have to wait. | |
5696 | */ | |
2d21ac55 | 5697 | PAGE_SLEEP(object, dst_page, THREAD_UNINT); |
1c79356b | 5698 | |
316670eb | 5699 | continue; |
2d21ac55 | 5700 | } |
d9a64523 | 5701 | if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { |
b0d623f7 A |
5702 | vm_page_lockspin_queues(); |
5703 | ||
d9a64523 | 5704 | if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { |
b0d623f7 A |
5705 | /* |
5706 | * we've buddied up a page for a clustered pageout | |
5707 | * that has already been moved to the pageout | |
5708 | * queue by pageout_scan... we need to remove | |
5709 | * it from the queue and drop the laundry count | |
5710 | * on that queue | |
5711 | */ | |
5712 | vm_pageout_throttle_up(dst_page); | |
5713 | } | |
5714 | vm_page_unlock_queues(); | |
91447636 | 5715 | } |
2d21ac55 | 5716 | hw_dirty = refmod_state & VM_MEM_MODIFIED; |
d9a64523 | 5717 | dirty = hw_dirty ? TRUE : dst_page->vmp_dirty; |
2d21ac55 | 5718 | |
0a7de745 A |
5719 | if (phys_page > upl->highest_page) { |
5720 | upl->highest_page = phys_page; | |
5721 | } | |
2d21ac55 | 5722 | |
0a7de745 | 5723 | assert(!pmap_is_noencrypt(phys_page)); |
3e170ce0 | 5724 | |
2d21ac55 | 5725 | if (cntrl_flags & UPL_SET_LITE) { |
0a7de745 | 5726 | unsigned int pg_num; |
2d21ac55 | 5727 | |
0a7de745 A |
5728 | pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE); |
5729 | assert(pg_num == (dst_offset - offset) / PAGE_SIZE); | |
cb323159 | 5730 | lite_list[pg_num >> 5] |= 1U << (pg_num & 31); |
2d21ac55 | 5731 | |
d9a64523 | 5732 | if (hw_dirty) { |
0a7de745 A |
5733 | if (pmap_flushes_delayed == FALSE) { |
5734 | pmap_flush_context_init(&pmap_flush_context_storage); | |
d9a64523 A |
5735 | pmap_flushes_delayed = TRUE; |
5736 | } | |
0a7de745 A |
5737 | pmap_clear_refmod_options(phys_page, |
5738 | VM_MEM_MODIFIED, | |
5739 | PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_CLEAR_WRITE, | |
5740 | &pmap_flush_context_storage); | |
d9a64523 | 5741 | } |
2d21ac55 A |
5742 | |
5743 | /* | |
d9a64523 | 5744 | * Mark original page as cleaning |
2d21ac55 A |
5745 | * in place. |
5746 | */ | |
d9a64523 A |
5747 | dst_page->vmp_cleaning = TRUE; |
5748 | dst_page->vmp_precious = FALSE; | |
2d21ac55 | 5749 | } else { |
0a7de745 | 5750 | /* |
2d21ac55 A |
5751 | * use pageclean setup, it is more |
5752 | * convenient even for the pageout | |
5753 | * cases here | |
5754 | */ | |
0a7de745 | 5755 | vm_object_lock(upl->map_object); |
2d21ac55 A |
5756 | vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size); |
5757 | vm_object_unlock(upl->map_object); | |
5758 | ||
d9a64523 | 5759 | alias_page->vmp_absent = FALSE; |
2d21ac55 | 5760 | alias_page = NULL; |
1c79356b | 5761 | } |
316670eb A |
5762 | if (dirty) { |
5763 | SET_PAGE_DIRTY(dst_page, FALSE); | |
5764 | } else { | |
d9a64523 | 5765 | dst_page->vmp_dirty = FALSE; |
316670eb | 5766 | } |
55e303ae | 5767 | |
0a7de745 | 5768 | if (!dirty) { |
d9a64523 | 5769 | dst_page->vmp_precious = TRUE; |
0a7de745 | 5770 | } |
91447636 | 5771 | |
0a7de745 A |
5772 | if (!(cntrl_flags & UPL_CLEAN_IN_PLACE)) { |
5773 | if (!VM_PAGE_WIRED(dst_page)) { | |
d9a64523 | 5774 | dst_page->vmp_free_when_done = TRUE; |
0a7de745 | 5775 | } |
2d21ac55 A |
5776 | } |
5777 | } else { | |
5778 | if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) { | |
91447636 | 5779 | /* |
2d21ac55 A |
5780 | * Honor copy-on-write obligations |
5781 | * | |
91447636 A |
5782 | * The copy object has changed since we |
5783 | * last synchronized for copy-on-write. | |
5784 | * Another copy object might have been | |
5785 | * inserted while we released the object's | |
5786 | * lock. Since someone could have seen the | |
5787 | * original contents of the remaining pages | |
5788 | * through that new object, we have to | |
5789 | * synchronize with it again for the remaining | |
5790 | * pages only. The previous pages are "busy" | |
5791 | * so they can not be seen through the new | |
5792 | * mapping. The new mapping will see our | |
5793 | * upcoming changes for those previous pages, | |
5794 | * but that's OK since they couldn't see what | |
5795 | * was there before. It's just a race anyway | |
5796 | * and there's no guarantee of consistency or | |
5797 | * atomicity. We just don't want new mappings | |
5798 | * to see both the *before* and *after* pages. | |
5799 | */ | |
5800 | if (object->copy != VM_OBJECT_NULL) { | |
5801 | vm_object_update( | |
5802 | object, | |
5803 | dst_offset,/* current offset */ | |
5804 | xfer_size, /* remaining size */ | |
5805 | NULL, | |
5806 | NULL, | |
0a7de745 | 5807 | FALSE, /* should_return */ |
91447636 A |
5808 | MEMORY_OBJECT_COPY_SYNC, |
5809 | VM_PROT_NO_CHANGE); | |
2d21ac55 | 5810 | |
d9a64523 A |
5811 | VM_PAGEOUT_DEBUG(upl_cow_again, 1); |
5812 | VM_PAGEOUT_DEBUG(upl_cow_again_pages, (xfer_size >> PAGE_SHIFT)); | |
91447636 | 5813 | } |
2d21ac55 A |
5814 | /* |
5815 | * remember the copy object we synced with | |
5816 | */ | |
91447636 A |
5817 | last_copy_object = object->copy; |
5818 | } | |
91447636 | 5819 | dst_page = vm_page_lookup(object, dst_offset); |
d9a64523 | 5820 | |
2d21ac55 | 5821 | if (dst_page != VM_PAGE_NULL) { |
b0d623f7 | 5822 | if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) { |
316670eb A |
5823 | /* |
5824 | * skip over pages already present in the cache | |
5825 | */ | |
0a7de745 | 5826 | if (user_page_list) { |
316670eb | 5827 | user_page_list[entry].phys_addr = 0; |
0a7de745 | 5828 | } |
b0d623f7 | 5829 | |
316670eb A |
5830 | goto try_next_page; |
5831 | } | |
d9a64523 | 5832 | if (dst_page->vmp_fictitious) { |
316670eb | 5833 | panic("need corner case for fictitious page"); |
b0d623f7 | 5834 | } |
2d21ac55 | 5835 | |
d9a64523 | 5836 | if (dst_page->vmp_busy || dst_page->vmp_cleaning) { |
316670eb A |
5837 | /* |
5838 | * someone else is playing with the | |
5839 | * page. We will have to wait. | |
5840 | */ | |
5841 | PAGE_SLEEP(object, dst_page, THREAD_UNINT); | |
b0d623f7 | 5842 | |
316670eb A |
5843 | continue; |
5844 | } | |
0a7de745 | 5845 | if (dst_page->vmp_laundry) { |
316670eb | 5846 | vm_pageout_steal_laundry(dst_page, FALSE); |
0a7de745 | 5847 | } |
316670eb | 5848 | } else { |
2d21ac55 | 5849 | if (object->private) { |
d9a64523 A |
5850 | /* |
5851 | * This is a nasty wrinkle for users | |
5852 | * of upl who encounter device or | |
5853 | * private memory however, it is | |
0b4e3aa0 | 5854 | * unavoidable, only a fault can |
2d21ac55 | 5855 | * resolve the actual backing |
0b4e3aa0 A |
5856 | * physical page by asking the |
5857 | * backing device. | |
5858 | */ | |
0a7de745 | 5859 | if (user_page_list) { |
55e303ae | 5860 | user_page_list[entry].phys_addr = 0; |
0a7de745 | 5861 | } |
2d21ac55 | 5862 | |
b0d623f7 | 5863 | goto try_next_page; |
0b4e3aa0 | 5864 | } |
6d2010ae A |
5865 | if (object->scan_collisions) { |
5866 | /* | |
5867 | * the pageout_scan thread is trying to steal | |
5868 | * pages from this object, but has run into our | |
5869 | * lock... grab 2 pages from the head of the object... | |
5870 | * the first is freed on behalf of pageout_scan, the | |
5871 | * 2nd is for our own use... we use vm_object_page_grab | |
5872 | * in both cases to avoid taking pages from the free | |
5873 | * list since we are under memory pressure and our | |
5874 | * lock on this object is getting in the way of | |
5875 | * relieving it | |
5876 | */ | |
5877 | dst_page = vm_object_page_grab(object); | |
5878 | ||
0a7de745 | 5879 | if (dst_page != VM_PAGE_NULL) { |
39037602 | 5880 | vm_page_release(dst_page, |
0a7de745 A |
5881 | FALSE); |
5882 | } | |
2d21ac55 | 5883 | |
6d2010ae A |
5884 | dst_page = vm_object_page_grab(object); |
5885 | } | |
5886 | if (dst_page == VM_PAGE_NULL) { | |
5887 | /* | |
5888 | * need to allocate a page | |
5889 | */ | |
39037602 | 5890 | dst_page = vm_page_grab_options(grab_options); |
0a7de745 | 5891 | if (dst_page != VM_PAGE_NULL) { |
d9a64523 | 5892 | page_grab_count++; |
0a7de745 | 5893 | } |
6d2010ae | 5894 | } |
1c79356b | 5895 | if (dst_page == VM_PAGE_NULL) { |
0a7de745 A |
5896 | if ((cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) { |
5897 | /* | |
5898 | * we don't want to stall waiting for pages to come onto the free list | |
5899 | * while we're already holding absent pages in this UPL | |
5900 | * the caller will deal with the empty slots | |
5901 | */ | |
5902 | if (user_page_list) { | |
5903 | user_page_list[entry].phys_addr = 0; | |
5904 | } | |
2d21ac55 A |
5905 | |
5906 | goto try_next_page; | |
5907 | } | |
0a7de745 | 5908 | /* |
2d21ac55 A |
5909 | * no pages available... wait |
5910 | * then try again for the same | |
5911 | * offset... | |
5912 | */ | |
0b4e3aa0 | 5913 | vm_object_unlock(object); |
d9a64523 | 5914 | |
6d2010ae A |
5915 | OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages); |
5916 | ||
5917 | VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0); | |
5918 | ||
0b4e3aa0 | 5919 | VM_PAGE_WAIT(); |
6d2010ae A |
5920 | OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages); |
5921 | ||
5922 | VM_DEBUG_EVENT(vm_upl_page_wait, VM_UPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0); | |
5923 | ||
b0d623f7 | 5924 | vm_object_lock(object); |
2d21ac55 | 5925 | |
0b4e3aa0 | 5926 | continue; |
1c79356b | 5927 | } |
b0d623f7 | 5928 | vm_page_insert(dst_page, object, dst_offset); |
4a3eedf9 | 5929 | |
d9a64523 A |
5930 | dst_page->vmp_absent = TRUE; |
5931 | dst_page->vmp_busy = FALSE; | |
2d21ac55 A |
5932 | |
5933 | if (cntrl_flags & UPL_RET_ONLY_ABSENT) { | |
0a7de745 | 5934 | /* |
91447636 A |
5935 | * if UPL_RET_ONLY_ABSENT was specified, |
5936 | * than we're definitely setting up a | |
d9a64523 | 5937 | * upl for a clustered read/pagein |
91447636 | 5938 | * operation... mark the pages as clustered |
2d21ac55 A |
5939 | * so upl_commit_range can put them on the |
5940 | * speculative list | |
91447636 | 5941 | */ |
0a7de745 | 5942 | dst_page->vmp_clustered = TRUE; |
fe8ab488 | 5943 | |
0a7de745 | 5944 | if (!(cntrl_flags & UPL_FILE_IO)) { |
fe8ab488 | 5945 | VM_STAT_INCR(pageins); |
0a7de745 | 5946 | } |
91447636 | 5947 | } |
1c79356b | 5948 | } |
39037602 A |
5949 | phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); |
5950 | ||
d9a64523 | 5951 | dst_page->vmp_overwriting = TRUE; |
2d21ac55 | 5952 | |
d9a64523 | 5953 | if (dst_page->vmp_pmapped) { |
0a7de745 A |
5954 | if (!(cntrl_flags & UPL_FILE_IO)) { |
5955 | /* | |
2d21ac55 A |
5956 | * eliminate all mappings from the |
5957 | * original object and its prodigy | |
55e303ae | 5958 | */ |
0a7de745 A |
5959 | refmod_state = pmap_disconnect(phys_page); |
5960 | } else { | |
5961 | refmod_state = pmap_get_refmod(phys_page); | |
5962 | } | |
5963 | } else { | |
5964 | refmod_state = 0; | |
5965 | } | |
55e303ae | 5966 | |
2d21ac55 | 5967 | hw_dirty = refmod_state & VM_MEM_MODIFIED; |
d9a64523 | 5968 | dirty = hw_dirty ? TRUE : dst_page->vmp_dirty; |
1c79356b | 5969 | |
2d21ac55 | 5970 | if (cntrl_flags & UPL_SET_LITE) { |
0a7de745 | 5971 | unsigned int pg_num; |
1c79356b | 5972 | |
0a7de745 A |
5973 | pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE); |
5974 | assert(pg_num == (dst_offset - offset) / PAGE_SIZE); | |
cb323159 | 5975 | lite_list[pg_num >> 5] |= 1U << (pg_num & 31); |
91447636 | 5976 | |
0a7de745 A |
5977 | if (hw_dirty) { |
5978 | pmap_clear_modify(phys_page); | |
5979 | } | |
0b4e3aa0 | 5980 | |
2d21ac55 | 5981 | /* |
d9a64523 | 5982 | * Mark original page as cleaning |
2d21ac55 A |
5983 | * in place. |
5984 | */ | |
d9a64523 A |
5985 | dst_page->vmp_cleaning = TRUE; |
5986 | dst_page->vmp_precious = FALSE; | |
2d21ac55 A |
5987 | } else { |
5988 | /* | |
5989 | * use pageclean setup, it is more | |
5990 | * convenient even for the pageout | |
5991 | * cases here | |
5992 | */ | |
0a7de745 | 5993 | vm_object_lock(upl->map_object); |
2d21ac55 | 5994 | vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size); |
0a7de745 | 5995 | vm_object_unlock(upl->map_object); |
0b4e3aa0 | 5996 | |
d9a64523 | 5997 | alias_page->vmp_absent = FALSE; |
2d21ac55 A |
5998 | alias_page = NULL; |
5999 | } | |
1c79356b | 6000 | |
6d2010ae A |
6001 | if (cntrl_flags & UPL_REQUEST_SET_DIRTY) { |
6002 | upl->flags &= ~UPL_CLEAR_DIRTY; | |
6003 | upl->flags |= UPL_SET_DIRTY; | |
6004 | dirty = TRUE; | |
cb323159 A |
6005 | /* |
6006 | * Page belonging to a code-signed object is about to | |
6007 | * be written. Mark it tainted and disconnect it from | |
6008 | * all pmaps so processes have to fault it back in and | |
6009 | * deal with the tainted bit. | |
6010 | */ | |
6011 | if (object->code_signed && dst_page->vmp_cs_tainted == FALSE) { | |
6012 | dst_page->vmp_cs_tainted = TRUE; | |
6013 | vm_page_upl_tainted++; | |
6014 | if (dst_page->vmp_pmapped) { | |
6015 | refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); | |
6016 | if (refmod_state & VM_MEM_REFERENCED) { | |
6017 | dst_page->vmp_reference = TRUE; | |
6018 | } | |
6019 | } | |
6020 | } | |
6d2010ae | 6021 | } else if (cntrl_flags & UPL_CLEAN_IN_PLACE) { |
2d21ac55 A |
6022 | /* |
6023 | * clean in place for read implies | |
6024 | * that a write will be done on all | |
6025 | * the pages that are dirty before | |
6026 | * a upl commit is done. The caller | |
6027 | * is obligated to preserve the | |
6028 | * contents of all pages marked dirty | |
6029 | */ | |
6030 | upl->flags |= UPL_CLEAR_DIRTY; | |
6031 | } | |
d9a64523 | 6032 | dst_page->vmp_dirty = dirty; |
91447636 | 6033 | |
0a7de745 | 6034 | if (!dirty) { |
d9a64523 | 6035 | dst_page->vmp_precious = TRUE; |
0a7de745 | 6036 | } |
2d21ac55 | 6037 | |
0a7de745 A |
6038 | if (!VM_PAGE_WIRED(dst_page)) { |
6039 | /* | |
2d21ac55 A |
6040 | * deny access to the target page while |
6041 | * it is being worked on | |
6042 | */ | |
d9a64523 | 6043 | dst_page->vmp_busy = TRUE; |
0a7de745 | 6044 | } else { |
b0d623f7 | 6045 | dwp->dw_mask |= DW_vm_page_wire; |
0a7de745 | 6046 | } |
2d21ac55 | 6047 | |
b0d623f7 A |
6048 | /* |
6049 | * We might be about to satisfy a fault which has been | |
6050 | * requested. So no need for the "restart" bit. | |
6051 | */ | |
d9a64523 A |
6052 | dst_page->vmp_restart = FALSE; |
6053 | if (!dst_page->vmp_absent && !(cntrl_flags & UPL_WILL_MODIFY)) { | |
0a7de745 | 6054 | /* |
2d21ac55 A |
6055 | * expect the page to be used |
6056 | */ | |
b0d623f7 | 6057 | dwp->dw_mask |= DW_set_reference; |
2d21ac55 | 6058 | } |
6d2010ae | 6059 | if (cntrl_flags & UPL_PRECIOUS) { |
39037602 | 6060 | if (object->internal) { |
316670eb | 6061 | SET_PAGE_DIRTY(dst_page, FALSE); |
d9a64523 | 6062 | dst_page->vmp_precious = FALSE; |
6d2010ae | 6063 | } else { |
d9a64523 | 6064 | dst_page->vmp_precious = TRUE; |
6d2010ae A |
6065 | } |
6066 | } else { | |
d9a64523 | 6067 | dst_page->vmp_precious = FALSE; |
6d2010ae | 6068 | } |
2d21ac55 | 6069 | } |
0a7de745 | 6070 | if (dst_page->vmp_busy) { |
d41d1dae | 6071 | upl->flags |= UPL_HAS_BUSY; |
0a7de745 | 6072 | } |
d41d1dae | 6073 | |
0a7de745 A |
6074 | if (phys_page > upl->highest_page) { |
6075 | upl->highest_page = phys_page; | |
6076 | } | |
6077 | assert(!pmap_is_noencrypt(phys_page)); | |
2d21ac55 | 6078 | if (user_page_list) { |
39037602 | 6079 | user_page_list[entry].phys_addr = phys_page; |
0a7de745 A |
6080 | user_page_list[entry].free_when_done = dst_page->vmp_free_when_done; |
6081 | user_page_list[entry].absent = dst_page->vmp_absent; | |
6082 | user_page_list[entry].dirty = dst_page->vmp_dirty; | |
6083 | user_page_list[entry].precious = dst_page->vmp_precious; | |
6084 | user_page_list[entry].device = FALSE; | |
316670eb | 6085 | user_page_list[entry].needed = FALSE; |
0a7de745 A |
6086 | if (dst_page->vmp_clustered == TRUE) { |
6087 | user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE; | |
6088 | } else { | |
6089 | user_page_list[entry].speculative = FALSE; | |
6090 | } | |
d9a64523 A |
6091 | user_page_list[entry].cs_validated = dst_page->vmp_cs_validated; |
6092 | user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted; | |
6093 | user_page_list[entry].cs_nx = dst_page->vmp_cs_nx; | |
3e170ce0 | 6094 | user_page_list[entry].mark = FALSE; |
2d21ac55 | 6095 | } |
0a7de745 | 6096 | /* |
2d21ac55 A |
6097 | * if UPL_RET_ONLY_ABSENT is set, then |
6098 | * we are working with a fresh page and we've | |
6099 | * just set the clustered flag on it to | |
6100 | * indicate that it was drug in as part of a | |
6101 | * speculative cluster... so leave it alone | |
6102 | */ | |
0a7de745 A |
6103 | if (!(cntrl_flags & UPL_RET_ONLY_ABSENT)) { |
6104 | /* | |
2d21ac55 A |
6105 | * someone is explicitly grabbing this page... |
6106 | * update clustered and speculative state | |
d9a64523 | 6107 | * |
2d21ac55 | 6108 | */ |
0a7de745 | 6109 | if (dst_page->vmp_clustered) { |
fe8ab488 | 6110 | VM_PAGE_CONSUME_CLUSTERED(dst_page); |
0a7de745 | 6111 | } |
2d21ac55 | 6112 | } |
b0d623f7 A |
6113 | try_next_page: |
6114 | if (dwp->dw_mask) { | |
0a7de745 | 6115 | if (dwp->dw_mask & DW_vm_page_activate) { |
b0d623f7 | 6116 | VM_STAT_INCR(reactivations); |
0a7de745 | 6117 | } |
4a3eedf9 | 6118 | |
6d2010ae | 6119 | VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count); |
b0d623f7 | 6120 | |
6d2010ae | 6121 | if (dw_count >= dw_limit) { |
5ba3f43e | 6122 | vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); |
b0d623f7 A |
6123 | |
6124 | dwp = &dw_array[0]; | |
6125 | dw_count = 0; | |
4a3eedf9 | 6126 | } |
2d21ac55 | 6127 | } |
2d21ac55 A |
6128 | entry++; |
6129 | dst_offset += PAGE_SIZE_64; | |
6130 | xfer_size -= PAGE_SIZE; | |
6131 | } | |
0a7de745 | 6132 | if (dw_count) { |
5ba3f43e | 6133 | vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); |
0a7de745 | 6134 | } |
b0d623f7 | 6135 | |
2d21ac55 | 6136 | if (alias_page != NULL) { |
b0d623f7 | 6137 | VM_PAGE_FREE(alias_page); |
1c79356b | 6138 | } |
0a7de745 A |
6139 | if (pmap_flushes_delayed == TRUE) { |
6140 | pmap_flush(&pmap_flush_context_storage); | |
6141 | } | |
91447636 | 6142 | |
2d21ac55 | 6143 | if (page_list_count != NULL) { |
0a7de745 | 6144 | if (upl->flags & UPL_INTERNAL) { |
2d21ac55 | 6145 | *page_list_count = 0; |
0a7de745 | 6146 | } else if (*page_list_count > entry) { |
2d21ac55 | 6147 | *page_list_count = entry; |
0a7de745 | 6148 | } |
2d21ac55 | 6149 | } |
b0d623f7 A |
6150 | #if UPL_DEBUG |
6151 | upl->upl_state = 1; | |
6152 | #endif | |
1c79356b | 6153 | vm_object_unlock(object); |
2d21ac55 | 6154 | |
d9a64523 | 6155 | VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0); |
0a7de745 A |
6156 | #if DEVELOPMENT || DEBUG |
6157 | if (task != NULL) { | |
6158 | ledger_credit(task->ledger, task_ledgers.pages_grabbed_upl, page_grab_count); | |
6159 | } | |
6160 | #endif /* DEVELOPMENT || DEBUG */ | |
d9a64523 | 6161 | |
1c79356b A |
6162 | return KERN_SUCCESS; |
6163 | } | |
6164 | ||
d9a64523 | 6165 | /* |
0b4e3aa0 | 6166 | * Routine: vm_object_super_upl_request |
d9a64523 | 6167 | * Purpose: |
0b4e3aa0 A |
6168 | * Cause the population of a portion of a vm_object |
6169 | * in much the same way as memory_object_upl_request. | |
6170 | * Depending on the nature of the request, the pages | |
6171 | * returned may be contain valid data or be uninitialized. | |
6172 | * However, the region may be expanded up to the super | |
6173 | * cluster size provided. | |
6174 | */ | |
6175 | ||
6176 | __private_extern__ kern_return_t | |
6177 | vm_object_super_upl_request( | |
6178 | vm_object_t object, | |
0a7de745 A |
6179 | vm_object_offset_t offset, |
6180 | upl_size_t size, | |
6181 | upl_size_t super_cluster, | |
6182 | upl_t *upl, | |
6183 | upl_page_info_t *user_page_list, | |
6184 | unsigned int *page_list_count, | |
6185 | upl_control_flags_t cntrl_flags, | |
6186 | vm_tag_t tag) | |
0b4e3aa0 | 6187 | { |
0a7de745 | 6188 | if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR) == UPL_VECTOR)) { |
1c79356b | 6189 | return KERN_FAILURE; |
0a7de745 | 6190 | } |
0b4e3aa0 | 6191 | |
55e303ae | 6192 | assert(object->paging_in_progress); |
1c79356b | 6193 | offset = offset - object->paging_offset; |
91447636 | 6194 | |
91447636 | 6195 | if (super_cluster > size) { |
0a7de745 A |
6196 | vm_object_offset_t base_offset; |
6197 | upl_size_t super_size; | |
6198 | vm_object_size_t super_size_64; | |
1c79356b | 6199 | |
2d21ac55 | 6200 | base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1)); |
0a7de745 | 6201 | super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster << 1 : super_cluster; |
6d2010ae | 6202 | super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size; |
b0d623f7 A |
6203 | super_size = (upl_size_t) super_size_64; |
6204 | assert(super_size == super_size_64); | |
2d21ac55 A |
6205 | |
6206 | if (offset > (base_offset + super_size)) { | |
0a7de745 A |
6207 | panic("vm_object_super_upl_request: Missed target pageout" |
6208 | " %#llx,%#llx, %#x, %#x, %#x, %#llx\n", | |
6209 | offset, base_offset, super_size, super_cluster, | |
6210 | size, object->paging_offset); | |
2d21ac55 | 6211 | } |
91447636 A |
6212 | /* |
6213 | * apparently there is a case where the vm requests a | |
6214 | * page to be written out who's offset is beyond the | |
6215 | * object size | |
6216 | */ | |
b0d623f7 | 6217 | if ((offset + size) > (base_offset + super_size)) { |
0a7de745 | 6218 | super_size_64 = (offset + size) - base_offset; |
b0d623f7 A |
6219 | super_size = (upl_size_t) super_size_64; |
6220 | assert(super_size == super_size_64); | |
6221 | } | |
1c79356b A |
6222 | |
6223 | offset = base_offset; | |
6224 | size = super_size; | |
6225 | } | |
5ba3f43e | 6226 | return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags, tag); |
1c79356b A |
6227 | } |
6228 | ||
5ba3f43e A |
6229 | #if CONFIG_EMBEDDED |
6230 | int cs_executable_create_upl = 0; | |
6231 | extern int proc_selfpid(void); | |
6232 | extern char *proc_name_address(void *p); | |
6233 | #endif /* CONFIG_EMBEDDED */ | |
b0d623f7 | 6234 | |
91447636 A |
6235 | kern_return_t |
6236 | vm_map_create_upl( | |
0a7de745 A |
6237 | vm_map_t map, |
6238 | vm_map_address_t offset, | |
6239 | upl_size_t *upl_size, | |
6240 | upl_t *upl, | |
6241 | upl_page_info_array_t page_list, | |
6242 | unsigned int *count, | |
6243 | upl_control_flags_t *flags, | |
6244 | vm_tag_t tag) | |
91447636 | 6245 | { |
0a7de745 A |
6246 | vm_map_entry_t entry; |
6247 | upl_control_flags_t caller_flags; | |
6248 | int force_data_sync; | |
6249 | int sync_cow_data; | |
6250 | vm_object_t local_object; | |
6251 | vm_map_offset_t local_offset; | |
6252 | vm_map_offset_t local_start; | |
6253 | kern_return_t ret; | |
91447636 | 6254 | |
39037602 A |
6255 | assert(page_aligned(offset)); |
6256 | ||
91447636 A |
6257 | caller_flags = *flags; |
6258 | ||
6259 | if (caller_flags & ~UPL_VALID_FLAGS) { | |
6260 | /* | |
6261 | * For forward compatibility's sake, | |
6262 | * reject any unknown flag. | |
6263 | */ | |
6264 | return KERN_INVALID_VALUE; | |
6265 | } | |
91447636 A |
6266 | force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC); |
6267 | sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM); | |
6268 | ||
0a7de745 | 6269 | if (upl == NULL) { |
91447636 | 6270 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 6271 | } |
91447636 | 6272 | |
91447636 | 6273 | REDISCOVER_ENTRY: |
b0d623f7 | 6274 | vm_map_lock_read(map); |
2d21ac55 | 6275 | |
3e170ce0 A |
6276 | if (!vm_map_lookup_entry(map, offset, &entry)) { |
6277 | vm_map_unlock_read(map); | |
6278 | return KERN_FAILURE; | |
6279 | } | |
2d21ac55 | 6280 | |
3e170ce0 A |
6281 | if ((entry->vme_end - offset) < *upl_size) { |
6282 | *upl_size = (upl_size_t) (entry->vme_end - offset); | |
6283 | assert(*upl_size == entry->vme_end - offset); | |
6284 | } | |
6285 | ||
6286 | if (caller_flags & UPL_QUERY_OBJECT_TYPE) { | |
6287 | *flags = 0; | |
6288 | ||
6289 | if (!entry->is_sub_map && | |
6290 | VME_OBJECT(entry) != VM_OBJECT_NULL) { | |
0a7de745 | 6291 | if (VME_OBJECT(entry)->private) { |
3e170ce0 | 6292 | *flags = UPL_DEV_MEMORY; |
0a7de745 | 6293 | } |
3e170ce0 | 6294 | |
0a7de745 | 6295 | if (VME_OBJECT(entry)->phys_contiguous) { |
3e170ce0 | 6296 | *flags |= UPL_PHYS_CONTIG; |
0a7de745 | 6297 | } |
b0d623f7 | 6298 | } |
3e170ce0 A |
6299 | vm_map_unlock_read(map); |
6300 | return KERN_SUCCESS; | |
6301 | } | |
2d21ac55 | 6302 | |
3e170ce0 A |
6303 | if (VME_OBJECT(entry) == VM_OBJECT_NULL || |
6304 | !VME_OBJECT(entry)->phys_contiguous) { | |
0a7de745 | 6305 | if (*upl_size > MAX_UPL_SIZE_BYTES) { |
3e170ce0 | 6306 | *upl_size = MAX_UPL_SIZE_BYTES; |
0a7de745 | 6307 | } |
3e170ce0 | 6308 | } |
e2d2fc5c | 6309 | |
3e170ce0 A |
6310 | /* |
6311 | * Create an object if necessary. | |
6312 | */ | |
6313 | if (VME_OBJECT(entry) == VM_OBJECT_NULL) { | |
0a7de745 | 6314 | if (vm_map_lock_read_to_write(map)) { |
3e170ce0 | 6315 | goto REDISCOVER_ENTRY; |
0a7de745 | 6316 | } |
e2d2fc5c | 6317 | |
3e170ce0 | 6318 | VME_OBJECT_SET(entry, |
0a7de745 A |
6319 | vm_object_allocate((vm_size_t) |
6320 | (entry->vme_end - | |
6321 | entry->vme_start))); | |
3e170ce0 | 6322 | VME_OFFSET_SET(entry, 0); |
a39ff7e2 | 6323 | assert(entry->use_pmap); |
e2d2fc5c | 6324 | |
3e170ce0 A |
6325 | vm_map_lock_write_to_read(map); |
6326 | } | |
b0d623f7 | 6327 | |
3e170ce0 | 6328 | if (!(caller_flags & UPL_COPYOUT_FROM) && |
d9a64523 | 6329 | !entry->is_sub_map && |
3e170ce0 A |
6330 | !(entry->protection & VM_PROT_WRITE)) { |
6331 | vm_map_unlock_read(map); | |
6332 | return KERN_PROTECTION_FAILURE; | |
6333 | } | |
6334 | ||
5ba3f43e A |
6335 | #if CONFIG_EMBEDDED |
6336 | if (map->pmap != kernel_pmap && | |
6337 | (caller_flags & UPL_COPYOUT_FROM) && | |
6338 | (entry->protection & VM_PROT_EXECUTE) && | |
6339 | !(entry->protection & VM_PROT_WRITE)) { | |
0a7de745 A |
6340 | vm_offset_t kaddr; |
6341 | vm_size_t ksize; | |
5ba3f43e A |
6342 | |
6343 | /* | |
6344 | * We're about to create a read-only UPL backed by | |
6345 | * memory from an executable mapping. | |
6346 | * Wiring the pages would result in the pages being copied | |
6347 | * (due to the "MAP_PRIVATE" mapping) and no longer | |
6348 | * code-signed, so no longer eligible for execution. | |
6349 | * Instead, let's copy the data into a kernel buffer and | |
6350 | * create the UPL from this kernel buffer. | |
6351 | * The kernel buffer is then freed, leaving the UPL holding | |
6352 | * the last reference on the VM object, so the memory will | |
6353 | * be released when the UPL is committed. | |
6354 | */ | |
6355 | ||
6356 | vm_map_unlock_read(map); | |
6357 | /* allocate kernel buffer */ | |
6358 | ksize = round_page(*upl_size); | |
6359 | kaddr = 0; | |
6360 | ret = kmem_alloc_pageable(kernel_map, | |
0a7de745 A |
6361 | &kaddr, |
6362 | ksize, | |
6363 | tag); | |
5ba3f43e A |
6364 | if (ret == KERN_SUCCESS) { |
6365 | /* copyin the user data */ | |
6366 | assert(page_aligned(offset)); | |
6367 | ret = copyinmap(map, offset, (void *)kaddr, *upl_size); | |
6368 | } | |
6369 | if (ret == KERN_SUCCESS) { | |
6370 | if (ksize > *upl_size) { | |
6371 | /* zero out the extra space in kernel buffer */ | |
6372 | memset((void *)(kaddr + *upl_size), | |
0a7de745 A |
6373 | 0, |
6374 | ksize - *upl_size); | |
5ba3f43e A |
6375 | } |
6376 | /* create the UPL from the kernel buffer */ | |
6377 | ret = vm_map_create_upl(kernel_map, kaddr, upl_size, | |
0a7de745 | 6378 | upl, page_list, count, flags, tag); |
5ba3f43e A |
6379 | } |
6380 | if (kaddr != 0) { | |
6381 | /* free the kernel buffer */ | |
6382 | kmem_free(kernel_map, kaddr, ksize); | |
6383 | kaddr = 0; | |
6384 | ksize = 0; | |
6385 | } | |
6386 | #if DEVELOPMENT || DEBUG | |
6387 | DTRACE_VM4(create_upl_from_executable, | |
0a7de745 A |
6388 | vm_map_t, map, |
6389 | vm_map_address_t, offset, | |
6390 | upl_size_t, *upl_size, | |
6391 | kern_return_t, ret); | |
5ba3f43e A |
6392 | #endif /* DEVELOPMENT || DEBUG */ |
6393 | return ret; | |
6394 | } | |
6395 | #endif /* CONFIG_EMBEDDED */ | |
39037602 | 6396 | |
3e170ce0 A |
6397 | local_object = VME_OBJECT(entry); |
6398 | assert(local_object != VM_OBJECT_NULL); | |
6399 | ||
39037602 A |
6400 | if (!entry->is_sub_map && |
6401 | !entry->needs_copy && | |
6402 | *upl_size != 0 && | |
3e170ce0 A |
6403 | local_object->vo_size > *upl_size && /* partial UPL */ |
6404 | entry->wired_count == 0 && /* No COW for entries that are wired */ | |
6405 | (map->pmap != kernel_pmap) && /* alias checks */ | |
6406 | (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */ | |
0a7de745 A |
6407 | || |
6408 | ( /* case 2 */ | |
6409 | local_object->internal && | |
6410 | (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) && | |
6411 | local_object->ref_count > 1))) { | |
6412 | vm_prot_t prot; | |
b0d623f7 | 6413 | |
3e170ce0 A |
6414 | /* |
6415 | * Case 1: | |
6416 | * Set up the targeted range for copy-on-write to avoid | |
6417 | * applying true_share/copy_delay to the entire object. | |
6418 | * | |
6419 | * Case 2: | |
6420 | * This map entry covers only part of an internal | |
6421 | * object. There could be other map entries covering | |
6422 | * other areas of this object and some of these map | |
6423 | * entries could be marked as "needs_copy", which | |
6424 | * assumes that the object is COPY_SYMMETRIC. | |
6425 | * To avoid marking this object as COPY_DELAY and | |
6426 | * "true_share", let's shadow it and mark the new | |
6427 | * (smaller) object as "true_share" and COPY_DELAY. | |
6428 | */ | |
b0d623f7 | 6429 | |
3e170ce0 A |
6430 | if (vm_map_lock_read_to_write(map)) { |
6431 | goto REDISCOVER_ENTRY; | |
91447636 | 6432 | } |
3e170ce0 A |
6433 | vm_map_lock_assert_exclusive(map); |
6434 | assert(VME_OBJECT(entry) == local_object); | |
6435 | ||
6436 | vm_map_clip_start(map, | |
0a7de745 A |
6437 | entry, |
6438 | vm_map_trunc_page(offset, | |
6439 | VM_MAP_PAGE_MASK(map))); | |
3e170ce0 | 6440 | vm_map_clip_end(map, |
0a7de745 A |
6441 | entry, |
6442 | vm_map_round_page(offset + *upl_size, | |
6443 | VM_MAP_PAGE_MASK(map))); | |
3e170ce0 A |
6444 | if ((entry->vme_end - offset) < *upl_size) { |
6445 | *upl_size = (upl_size_t) (entry->vme_end - offset); | |
6446 | assert(*upl_size == entry->vme_end - offset); | |
fe8ab488 | 6447 | } |
e2d2fc5c | 6448 | |
3e170ce0 | 6449 | prot = entry->protection & ~VM_PROT_WRITE; |
0a7de745 | 6450 | if (override_nx(map, VME_ALIAS(entry)) && prot) { |
3e170ce0 | 6451 | prot |= VM_PROT_EXECUTE; |
0a7de745 | 6452 | } |
3e170ce0 | 6453 | vm_object_pmap_protect(local_object, |
0a7de745 A |
6454 | VME_OFFSET(entry), |
6455 | entry->vme_end - entry->vme_start, | |
6456 | ((entry->is_shared || | |
6457 | map->mapped_in_other_pmaps) | |
6458 | ? PMAP_NULL | |
6459 | : map->pmap), | |
6460 | entry->vme_start, | |
6461 | prot); | |
e2d2fc5c | 6462 | |
3e170ce0 | 6463 | assert(entry->wired_count == 0); |
e2d2fc5c | 6464 | |
3e170ce0 A |
6465 | /* |
6466 | * Lock the VM object and re-check its status: if it's mapped | |
6467 | * in another address space, we could still be racing with | |
6468 | * another thread holding that other VM map exclusively. | |
6469 | */ | |
6470 | vm_object_lock(local_object); | |
6471 | if (local_object->true_share) { | |
6472 | /* object is already in proper state: no COW needed */ | |
6473 | assert(local_object->copy_strategy != | |
0a7de745 | 6474 | MEMORY_OBJECT_COPY_SYMMETRIC); |
3e170ce0 A |
6475 | } else { |
6476 | /* not true_share: ask for copy-on-write below */ | |
6477 | assert(local_object->copy_strategy == | |
0a7de745 | 6478 | MEMORY_OBJECT_COPY_SYMMETRIC); |
fe8ab488 | 6479 | entry->needs_copy = TRUE; |
fe8ab488 | 6480 | } |
3e170ce0 | 6481 | vm_object_unlock(local_object); |
fe8ab488 | 6482 | |
3e170ce0 A |
6483 | vm_map_lock_write_to_read(map); |
6484 | } | |
6485 | ||
0a7de745 | 6486 | if (entry->needs_copy) { |
3e170ce0 A |
6487 | /* |
6488 | * Honor copy-on-write for COPY_SYMMETRIC | |
6489 | * strategy. | |
6490 | */ | |
0a7de745 A |
6491 | vm_map_t local_map; |
6492 | vm_object_t object; | |
6493 | vm_object_offset_t new_offset; | |
6494 | vm_prot_t prot; | |
6495 | boolean_t wired; | |
6496 | vm_map_version_t version; | |
6497 | vm_map_t real_map; | |
6498 | vm_prot_t fault_type; | |
3e170ce0 A |
6499 | |
6500 | local_map = map; | |
6501 | ||
6502 | if (caller_flags & UPL_COPYOUT_FROM) { | |
6503 | fault_type = VM_PROT_READ | VM_PROT_COPY; | |
6504 | vm_counters.create_upl_extra_cow++; | |
6505 | vm_counters.create_upl_extra_cow_pages += | |
0a7de745 | 6506 | (entry->vme_end - entry->vme_start) / PAGE_SIZE; |
3e170ce0 A |
6507 | } else { |
6508 | fault_type = VM_PROT_WRITE; | |
6509 | } | |
6510 | if (vm_map_lookup_locked(&local_map, | |
0a7de745 A |
6511 | offset, fault_type, |
6512 | OBJECT_LOCK_EXCLUSIVE, | |
6513 | &version, &object, | |
6514 | &new_offset, &prot, &wired, | |
6515 | NULL, | |
6516 | &real_map) != KERN_SUCCESS) { | |
3e170ce0 A |
6517 | if (fault_type == VM_PROT_WRITE) { |
6518 | vm_counters.create_upl_lookup_failure_write++; | |
fe8ab488 | 6519 | } else { |
3e170ce0 | 6520 | vm_counters.create_upl_lookup_failure_copy++; |
fe8ab488 | 6521 | } |
fe8ab488 | 6522 | vm_map_unlock_read(local_map); |
3e170ce0 | 6523 | return KERN_FAILURE; |
91447636 | 6524 | } |
0a7de745 | 6525 | if (real_map != map) { |
3e170ce0 | 6526 | vm_map_unlock(real_map); |
0a7de745 | 6527 | } |
3e170ce0 | 6528 | vm_map_unlock_read(local_map); |
fe8ab488 | 6529 | |
3e170ce0 | 6530 | vm_object_unlock(object); |
2d21ac55 | 6531 | |
3e170ce0 A |
6532 | goto REDISCOVER_ENTRY; |
6533 | } | |
2d21ac55 | 6534 | |
39037602 | 6535 | if (entry->is_sub_map) { |
0a7de745 | 6536 | vm_map_t submap; |
39037602 A |
6537 | |
6538 | submap = VME_SUBMAP(entry); | |
6539 | local_start = entry->vme_start; | |
6540 | local_offset = VME_OFFSET(entry); | |
6541 | ||
6542 | vm_map_reference(submap); | |
6543 | vm_map_unlock_read(map); | |
6544 | ||
d9a64523 | 6545 | ret = vm_map_create_upl(submap, |
0a7de745 A |
6546 | local_offset + (offset - local_start), |
6547 | upl_size, upl, page_list, count, flags, tag); | |
39037602 A |
6548 | vm_map_deallocate(submap); |
6549 | ||
6550 | return ret; | |
6551 | } | |
6552 | ||
3e170ce0 A |
6553 | if (sync_cow_data && |
6554 | (VME_OBJECT(entry)->shadow || | |
0a7de745 | 6555 | VME_OBJECT(entry)->copy)) { |
3e170ce0 A |
6556 | local_object = VME_OBJECT(entry); |
6557 | local_start = entry->vme_start; | |
6558 | local_offset = VME_OFFSET(entry); | |
6559 | ||
6560 | vm_object_reference(local_object); | |
6561 | vm_map_unlock_read(map); | |
91447636 | 6562 | |
3e170ce0 A |
6563 | if (local_object->shadow && local_object->copy) { |
6564 | vm_object_lock_request(local_object->shadow, | |
0a7de745 A |
6565 | ((vm_object_offset_t) |
6566 | ((offset - local_start) + | |
6567 | local_offset) + | |
6568 | local_object->vo_shadow_offset), | |
6569 | *upl_size, FALSE, | |
6570 | MEMORY_OBJECT_DATA_SYNC, | |
6571 | VM_PROT_NO_CHANGE); | |
91447636 | 6572 | } |
3e170ce0 A |
6573 | sync_cow_data = FALSE; |
6574 | vm_object_deallocate(local_object); | |
91447636 | 6575 | |
3e170ce0 A |
6576 | goto REDISCOVER_ENTRY; |
6577 | } | |
6578 | if (force_data_sync) { | |
6579 | local_object = VME_OBJECT(entry); | |
91447636 | 6580 | local_start = entry->vme_start; |
3e170ce0 | 6581 | local_offset = VME_OFFSET(entry); |
2d21ac55 | 6582 | |
91447636 | 6583 | vm_object_reference(local_object); |
b0d623f7 | 6584 | vm_map_unlock_read(map); |
2d21ac55 | 6585 | |
3e170ce0 | 6586 | vm_object_lock_request(local_object, |
0a7de745 A |
6587 | ((vm_object_offset_t) |
6588 | ((offset - local_start) + | |
6589 | local_offset)), | |
6590 | (vm_object_size_t)*upl_size, | |
6591 | FALSE, | |
6592 | MEMORY_OBJECT_DATA_SYNC, | |
6593 | VM_PROT_NO_CHANGE); | |
3e170ce0 A |
6594 | |
6595 | force_data_sync = FALSE; | |
91447636 | 6596 | vm_object_deallocate(local_object); |
2d21ac55 | 6597 | |
3e170ce0 A |
6598 | goto REDISCOVER_ENTRY; |
6599 | } | |
0a7de745 | 6600 | if (VME_OBJECT(entry)->private) { |
3e170ce0 | 6601 | *flags = UPL_DEV_MEMORY; |
0a7de745 | 6602 | } else { |
3e170ce0 | 6603 | *flags = 0; |
0a7de745 | 6604 | } |
3e170ce0 | 6605 | |
0a7de745 | 6606 | if (VME_OBJECT(entry)->phys_contiguous) { |
3e170ce0 | 6607 | *flags |= UPL_PHYS_CONTIG; |
0a7de745 | 6608 | } |
3e170ce0 A |
6609 | |
6610 | local_object = VME_OBJECT(entry); | |
6611 | local_offset = VME_OFFSET(entry); | |
6612 | local_start = entry->vme_start; | |
6613 | ||
5ba3f43e A |
6614 | #if CONFIG_EMBEDDED |
6615 | /* | |
6616 | * Wiring will copy the pages to the shadow object. | |
6617 | * The shadow object will not be code-signed so | |
6618 | * attempting to execute code from these copied pages | |
6619 | * would trigger a code-signing violation. | |
6620 | */ | |
6621 | if (entry->protection & VM_PROT_EXECUTE) { | |
6622 | #if MACH_ASSERT | |
6623 | printf("pid %d[%s] create_upl out of executable range from " | |
0a7de745 A |
6624 | "0x%llx to 0x%llx: side effects may include " |
6625 | "code-signing violations later on\n", | |
6626 | proc_selfpid(), | |
6627 | (current_task()->bsd_info | |
6628 | ? proc_name_address(current_task()->bsd_info) | |
6629 | : "?"), | |
6630 | (uint64_t) entry->vme_start, | |
6631 | (uint64_t) entry->vme_end); | |
5ba3f43e A |
6632 | #endif /* MACH_ASSERT */ |
6633 | DTRACE_VM2(cs_executable_create_upl, | |
0a7de745 A |
6634 | uint64_t, (uint64_t)entry->vme_start, |
6635 | uint64_t, (uint64_t)entry->vme_end); | |
5ba3f43e A |
6636 | cs_executable_create_upl++; |
6637 | } | |
6638 | #endif /* CONFIG_EMBEDDED */ | |
39037602 | 6639 | |
3e170ce0 A |
6640 | vm_object_lock(local_object); |
6641 | ||
6642 | /* | |
6643 | * Ensure that this object is "true_share" and "copy_delay" now, | |
6644 | * while we're still holding the VM map lock. After we unlock the map, | |
6645 | * anything could happen to that mapping, including some copy-on-write | |
6646 | * activity. We need to make sure that the IOPL will point at the | |
6647 | * same memory as the mapping. | |
6648 | */ | |
6649 | if (local_object->true_share) { | |
6650 | assert(local_object->copy_strategy != | |
0a7de745 | 6651 | MEMORY_OBJECT_COPY_SYMMETRIC); |
3e170ce0 | 6652 | } else if (local_object != kernel_object && |
0a7de745 A |
6653 | local_object != compressor_object && |
6654 | !local_object->phys_contiguous) { | |
3e170ce0 A |
6655 | #if VM_OBJECT_TRACKING_OP_TRUESHARE |
6656 | if (!local_object->true_share && | |
6657 | vm_object_tracking_inited) { | |
6658 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; | |
6659 | int num = 0; | |
6660 | num = OSBacktrace(bt, | |
0a7de745 | 6661 | VM_OBJECT_TRACKING_BTDEPTH); |
3e170ce0 | 6662 | btlog_add_entry(vm_object_tracking_btlog, |
0a7de745 A |
6663 | local_object, |
6664 | VM_OBJECT_TRACKING_OP_TRUESHARE, | |
6665 | bt, | |
6666 | num); | |
3e170ce0 A |
6667 | } |
6668 | #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ | |
6669 | local_object->true_share = TRUE; | |
6670 | if (local_object->copy_strategy == | |
6671 | MEMORY_OBJECT_COPY_SYMMETRIC) { | |
6672 | local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
6673 | } | |
6674 | } | |
6675 | ||
6676 | vm_object_reference_locked(local_object); | |
6677 | vm_object_unlock(local_object); | |
6678 | ||
b0d623f7 | 6679 | vm_map_unlock_read(map); |
1c79356b | 6680 | |
d9a64523 | 6681 | ret = vm_object_iopl_request(local_object, |
0a7de745 A |
6682 | ((vm_object_offset_t) |
6683 | ((offset - local_start) + local_offset)), | |
6684 | *upl_size, | |
6685 | upl, | |
6686 | page_list, | |
6687 | count, | |
6688 | caller_flags, | |
6689 | tag); | |
3e170ce0 A |
6690 | vm_object_deallocate(local_object); |
6691 | ||
6692 | return ret; | |
91447636 A |
6693 | } |
6694 | ||
6695 | /* | |
6696 | * Internal routine to enter a UPL into a VM map. | |
d9a64523 | 6697 | * |
91447636 A |
6698 | * JMM - This should just be doable through the standard |
6699 | * vm_map_enter() API. | |
6700 | */ | |
1c79356b | 6701 | kern_return_t |
91447636 | 6702 | vm_map_enter_upl( |
0a7de745 A |
6703 | vm_map_t map, |
6704 | upl_t upl, | |
6705 | vm_map_offset_t *dst_addr) | |
1c79356b | 6706 | { |
0a7de745 A |
6707 | vm_map_size_t size; |
6708 | vm_object_offset_t offset; | |
6709 | vm_map_offset_t addr; | |
6710 | vm_page_t m; | |
6711 | kern_return_t kr; | |
6712 | int isVectorUPL = 0, curr_upl = 0; | |
6713 | upl_t vector_upl = NULL; | |
6714 | vm_offset_t vector_upl_dst_addr = 0; | |
6715 | vm_map_t vector_upl_submap = NULL; | |
6716 | upl_offset_t subupl_offset = 0; | |
6717 | upl_size_t subupl_size = 0; | |
6718 | ||
6719 | if (upl == UPL_NULL) { | |
0b4e3aa0 | 6720 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 6721 | } |
0b4e3aa0 | 6722 | |
0a7de745 A |
6723 | if ((isVectorUPL = vector_upl_is_valid(upl))) { |
6724 | int mapped = 0, valid_upls = 0; | |
b0d623f7 A |
6725 | vector_upl = upl; |
6726 | ||
6727 | upl_lock(vector_upl); | |
0a7de745 | 6728 | for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) { |
b0d623f7 | 6729 | upl = vector_upl_subupl_byindex(vector_upl, curr_upl ); |
0a7de745 | 6730 | if (upl == NULL) { |
b0d623f7 | 6731 | continue; |
0a7de745 | 6732 | } |
b0d623f7 | 6733 | valid_upls++; |
0a7de745 | 6734 | if (UPL_PAGE_LIST_MAPPED & upl->flags) { |
b0d623f7 | 6735 | mapped++; |
0a7de745 | 6736 | } |
b0d623f7 A |
6737 | } |
6738 | ||
0a7de745 A |
6739 | if (mapped) { |
6740 | if (mapped != valid_upls) { | |
b0d623f7 | 6741 | panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls); |
0a7de745 | 6742 | } else { |
b0d623f7 A |
6743 | upl_unlock(vector_upl); |
6744 | return KERN_FAILURE; | |
6745 | } | |
6746 | } | |
6747 | ||
5ba3f43e | 6748 | kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, |
0a7de745 A |
6749 | VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, |
6750 | &vector_upl_submap); | |
6751 | if (kr != KERN_SUCCESS) { | |
b0d623f7 | 6752 | panic("Vector UPL submap allocation failed\n"); |
0a7de745 | 6753 | } |
b0d623f7 A |
6754 | map = vector_upl_submap; |
6755 | vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr); | |
0a7de745 A |
6756 | curr_upl = 0; |
6757 | } else { | |
b0d623f7 | 6758 | upl_lock(upl); |
0a7de745 | 6759 | } |
b0d623f7 A |
6760 | |
6761 | process_upl_to_enter: | |
0a7de745 A |
6762 | if (isVectorUPL) { |
6763 | if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) { | |
b0d623f7 A |
6764 | *dst_addr = vector_upl_dst_addr; |
6765 | upl_unlock(vector_upl); | |
6766 | return KERN_SUCCESS; | |
6767 | } | |
6768 | upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ ); | |
0a7de745 | 6769 | if (upl == NULL) { |
b0d623f7 | 6770 | goto process_upl_to_enter; |
0a7de745 | 6771 | } |
6d2010ae | 6772 | |
b0d623f7 A |
6773 | vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size); |
6774 | *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset); | |
d41d1dae A |
6775 | } else { |
6776 | /* | |
6777 | * check to see if already mapped | |
6778 | */ | |
6779 | if (UPL_PAGE_LIST_MAPPED & upl->flags) { | |
6780 | upl_unlock(upl); | |
6781 | return KERN_FAILURE; | |
6782 | } | |
b0d623f7 | 6783 | } |
d41d1dae A |
6784 | if ((!(upl->flags & UPL_SHADOWED)) && |
6785 | ((upl->flags & UPL_HAS_BUSY) || | |
0a7de745 A |
6786 | !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) { |
6787 | vm_object_t object; | |
6788 | vm_page_t alias_page; | |
6789 | vm_object_offset_t new_offset; | |
6790 | unsigned int pg_num; | |
6791 | wpl_array_t lite_list; | |
55e303ae | 6792 | |
2d21ac55 | 6793 | if (upl->flags & UPL_INTERNAL) { |
d9a64523 | 6794 | lite_list = (wpl_array_t) |
0a7de745 A |
6795 | ((((uintptr_t)upl) + sizeof(struct upl)) |
6796 | + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t))); | |
55e303ae | 6797 | } else { |
0a7de745 | 6798 | lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl)); |
55e303ae A |
6799 | } |
6800 | object = upl->map_object; | |
6801 | upl->map_object = vm_object_allocate(upl->size); | |
2d21ac55 | 6802 | |
55e303ae | 6803 | vm_object_lock(upl->map_object); |
2d21ac55 | 6804 | |
55e303ae A |
6805 | upl->map_object->shadow = object; |
6806 | upl->map_object->pageout = TRUE; | |
6807 | upl->map_object->can_persist = FALSE; | |
2d21ac55 | 6808 | upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
6d2010ae | 6809 | upl->map_object->vo_shadow_offset = upl->offset - object->paging_offset; |
55e303ae | 6810 | upl->map_object->wimg_bits = object->wimg_bits; |
6d2010ae | 6811 | offset = upl->map_object->vo_shadow_offset; |
55e303ae A |
6812 | new_offset = 0; |
6813 | size = upl->size; | |
91447636 | 6814 | |
2d21ac55 | 6815 | upl->flags |= UPL_SHADOWED; |
91447636 | 6816 | |
2d21ac55 | 6817 | while (size) { |
b0d623f7 A |
6818 | pg_num = (unsigned int) (new_offset / PAGE_SIZE); |
6819 | assert(pg_num == new_offset / PAGE_SIZE); | |
55e303ae | 6820 | |
cb323159 | 6821 | if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) { |
2d21ac55 | 6822 | VM_PAGE_GRAB_FICTITIOUS(alias_page); |
91447636 | 6823 | |
2d21ac55 | 6824 | vm_object_lock(object); |
91447636 | 6825 | |
2d21ac55 A |
6826 | m = vm_page_lookup(object, offset); |
6827 | if (m == VM_PAGE_NULL) { | |
0a7de745 | 6828 | panic("vm_upl_map: page missing\n"); |
2d21ac55 | 6829 | } |
55e303ae | 6830 | |
2d21ac55 | 6831 | /* |
d9a64523 | 6832 | * Convert the fictitious page to a private |
2d21ac55 A |
6833 | * shadow of the real page. |
6834 | */ | |
d9a64523 A |
6835 | assert(alias_page->vmp_fictitious); |
6836 | alias_page->vmp_fictitious = FALSE; | |
6837 | alias_page->vmp_private = TRUE; | |
6838 | alias_page->vmp_free_when_done = TRUE; | |
2d21ac55 A |
6839 | /* |
6840 | * since m is a page in the upl it must | |
6841 | * already be wired or BUSY, so it's | |
6842 | * safe to assign the underlying physical | |
6843 | * page to the alias | |
6844 | */ | |
39037602 | 6845 | VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m)); |
2d21ac55 | 6846 | |
0a7de745 | 6847 | vm_object_unlock(object); |
2d21ac55 A |
6848 | |
6849 | vm_page_lockspin_queues(); | |
3e170ce0 | 6850 | vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE); |
2d21ac55 | 6851 | vm_page_unlock_queues(); |
d9a64523 | 6852 | |
3e170ce0 | 6853 | vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE); |
2d21ac55 | 6854 | |
d9a64523 A |
6855 | assert(!alias_page->vmp_wanted); |
6856 | alias_page->vmp_busy = FALSE; | |
6857 | alias_page->vmp_absent = FALSE; | |
2d21ac55 A |
6858 | } |
6859 | size -= PAGE_SIZE; | |
6860 | offset += PAGE_SIZE_64; | |
6861 | new_offset += PAGE_SIZE_64; | |
55e303ae | 6862 | } |
91447636 | 6863 | vm_object_unlock(upl->map_object); |
55e303ae | 6864 | } |
0a7de745 A |
6865 | if (upl->flags & UPL_SHADOWED) { |
6866 | offset = 0; | |
6867 | } else { | |
6868 | offset = upl->offset - upl->map_object->paging_offset; | |
6869 | } | |
6d2010ae | 6870 | |
1c79356b | 6871 | size = upl->size; |
d9a64523 | 6872 | |
2d21ac55 | 6873 | vm_object_reference(upl->map_object); |
1c79356b | 6874 | |
0a7de745 | 6875 | if (!isVectorUPL) { |
b0d623f7 A |
6876 | *dst_addr = 0; |
6877 | /* | |
0a7de745 A |
6878 | * NEED A UPL_MAP ALIAS |
6879 | */ | |
b0d623f7 | 6880 | kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0, |
0a7de745 A |
6881 | VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK, |
6882 | upl->map_object, offset, FALSE, | |
6883 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
d41d1dae A |
6884 | |
6885 | if (kr != KERN_SUCCESS) { | |
39037602 | 6886 | vm_object_deallocate(upl->map_object); |
d41d1dae | 6887 | upl_unlock(upl); |
0a7de745 | 6888 | return kr; |
d41d1dae | 6889 | } |
0a7de745 | 6890 | } else { |
b0d623f7 | 6891 | kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0, |
0a7de745 A |
6892 | VM_FLAGS_FIXED, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK, |
6893 | upl->map_object, offset, FALSE, | |
6894 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
6895 | if (kr) { | |
b0d623f7 | 6896 | panic("vm_map_enter failed for a Vector UPL\n"); |
0a7de745 | 6897 | } |
b0d623f7 | 6898 | } |
91447636 A |
6899 | vm_object_lock(upl->map_object); |
6900 | ||
2d21ac55 | 6901 | for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) { |
1c79356b | 6902 | m = vm_page_lookup(upl->map_object, offset); |
2d21ac55 A |
6903 | |
6904 | if (m) { | |
d9a64523 | 6905 | m->vmp_pmapped = TRUE; |
b0d623f7 | 6906 | |
d9a64523 | 6907 | /* CODE SIGNING ENFORCEMENT: page has been wpmapped, |
b0d623f7 A |
6908 | * but only in kernel space. If this was on a user map, |
6909 | * we'd have to set the wpmapped bit. */ | |
d9a64523 | 6910 | /* m->vmp_wpmapped = TRUE; */ |
fe8ab488 | 6911 | assert(map->pmap == kernel_pmap); |
d9a64523 | 6912 | |
5ba3f43e A |
6913 | PMAP_ENTER(map->pmap, addr, m, VM_PROT_DEFAULT, VM_PROT_NONE, 0, TRUE, kr); |
6914 | ||
6915 | assert(kr == KERN_SUCCESS); | |
6916 | #if KASAN | |
6917 | kasan_notify_address(addr, PAGE_SIZE_64); | |
6918 | #endif | |
1c79356b | 6919 | } |
2d21ac55 | 6920 | offset += PAGE_SIZE_64; |
1c79356b | 6921 | } |
91447636 A |
6922 | vm_object_unlock(upl->map_object); |
6923 | ||
2d21ac55 A |
6924 | /* |
6925 | * hold a reference for the mapping | |
6926 | */ | |
6927 | upl->ref_count++; | |
1c79356b | 6928 | upl->flags |= UPL_PAGE_LIST_MAPPED; |
b0d623f7 A |
6929 | upl->kaddr = (vm_offset_t) *dst_addr; |
6930 | assert(upl->kaddr == *dst_addr); | |
d9a64523 | 6931 | |
0a7de745 | 6932 | if (isVectorUPL) { |
b0d623f7 | 6933 | goto process_upl_to_enter; |
0a7de745 | 6934 | } |
2d21ac55 | 6935 | |
d41d1dae A |
6936 | upl_unlock(upl); |
6937 | ||
1c79356b A |
6938 | return KERN_SUCCESS; |
6939 | } | |
d9a64523 | 6940 | |
91447636 A |
6941 | /* |
6942 | * Internal routine to remove a UPL mapping from a VM map. | |
6943 | * | |
6944 | * XXX - This should just be doable through a standard | |
6945 | * vm_map_remove() operation. Otherwise, implicit clean-up | |
6946 | * of the target map won't be able to correctly remove | |
6947 | * these (and release the reference on the UPL). Having | |
6948 | * to do this means we can't map these into user-space | |
6949 | * maps yet. | |
6950 | */ | |
1c79356b | 6951 | kern_return_t |
91447636 | 6952 | vm_map_remove_upl( |
0a7de745 A |
6953 | vm_map_t map, |
6954 | upl_t upl) | |
1c79356b | 6955 | { |
0a7de745 A |
6956 | vm_address_t addr; |
6957 | upl_size_t size; | |
6958 | int isVectorUPL = 0, curr_upl = 0; | |
6959 | upl_t vector_upl = NULL; | |
1c79356b | 6960 | |
0a7de745 | 6961 | if (upl == UPL_NULL) { |
0b4e3aa0 | 6962 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 6963 | } |
0b4e3aa0 | 6964 | |
0a7de745 A |
6965 | if ((isVectorUPL = vector_upl_is_valid(upl))) { |
6966 | int unmapped = 0, valid_upls = 0; | |
b0d623f7 A |
6967 | vector_upl = upl; |
6968 | upl_lock(vector_upl); | |
0a7de745 | 6969 | for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) { |
b0d623f7 | 6970 | upl = vector_upl_subupl_byindex(vector_upl, curr_upl ); |
0a7de745 | 6971 | if (upl == NULL) { |
b0d623f7 | 6972 | continue; |
0a7de745 | 6973 | } |
b0d623f7 | 6974 | valid_upls++; |
0a7de745 | 6975 | if (!(UPL_PAGE_LIST_MAPPED & upl->flags)) { |
b0d623f7 | 6976 | unmapped++; |
0a7de745 | 6977 | } |
b0d623f7 A |
6978 | } |
6979 | ||
0a7de745 A |
6980 | if (unmapped) { |
6981 | if (unmapped != valid_upls) { | |
b0d623f7 | 6982 | panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls); |
0a7de745 | 6983 | } else { |
b0d623f7 A |
6984 | upl_unlock(vector_upl); |
6985 | return KERN_FAILURE; | |
6986 | } | |
6987 | } | |
0a7de745 A |
6988 | curr_upl = 0; |
6989 | } else { | |
b0d623f7 | 6990 | upl_lock(upl); |
0a7de745 | 6991 | } |
b0d623f7 A |
6992 | |
6993 | process_upl_to_remove: | |
0a7de745 A |
6994 | if (isVectorUPL) { |
6995 | if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) { | |
b0d623f7 A |
6996 | vm_map_t v_upl_submap; |
6997 | vm_offset_t v_upl_submap_dst_addr; | |
6998 | vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr); | |
6999 | ||
d9a64523 | 7000 | vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_REMOVE_NO_FLAGS); |
b0d623f7 A |
7001 | vm_map_deallocate(v_upl_submap); |
7002 | upl_unlock(vector_upl); | |
7003 | return KERN_SUCCESS; | |
7004 | } | |
7005 | ||
7006 | upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ ); | |
0a7de745 | 7007 | if (upl == NULL) { |
d9a64523 | 7008 | goto process_upl_to_remove; |
0a7de745 | 7009 | } |
b0d623f7 | 7010 | } |
2d21ac55 A |
7011 | |
7012 | if (upl->flags & UPL_PAGE_LIST_MAPPED) { | |
0b4e3aa0 | 7013 | addr = upl->kaddr; |
1c79356b | 7014 | size = upl->size; |
2d21ac55 | 7015 | |
0b4e3aa0 | 7016 | assert(upl->ref_count > 1); |
0a7de745 | 7017 | upl->ref_count--; /* removing mapping ref */ |
2d21ac55 | 7018 | |
1c79356b A |
7019 | upl->flags &= ~UPL_PAGE_LIST_MAPPED; |
7020 | upl->kaddr = (vm_offset_t) 0; | |
d9a64523 | 7021 | |
0a7de745 | 7022 | if (!isVectorUPL) { |
b0d623f7 | 7023 | upl_unlock(upl); |
d9a64523 | 7024 | |
39236c6e A |
7025 | vm_map_remove( |
7026 | map, | |
7027 | vm_map_trunc_page(addr, | |
0a7de745 | 7028 | VM_MAP_PAGE_MASK(map)), |
39236c6e | 7029 | vm_map_round_page(addr + size, |
0a7de745 | 7030 | VM_MAP_PAGE_MASK(map)), |
d9a64523 | 7031 | VM_MAP_REMOVE_NO_FLAGS); |
b0d623f7 | 7032 | return KERN_SUCCESS; |
0a7de745 | 7033 | } else { |
b0d623f7 | 7034 | /* |
0a7de745 A |
7035 | * If it's a Vectored UPL, we'll be removing the entire |
7036 | * submap anyways, so no need to remove individual UPL | |
7037 | * element mappings from within the submap | |
7038 | */ | |
b0d623f7 A |
7039 | goto process_upl_to_remove; |
7040 | } | |
1c79356b | 7041 | } |
0b4e3aa0 | 7042 | upl_unlock(upl); |
2d21ac55 | 7043 | |
0b4e3aa0 | 7044 | return KERN_FAILURE; |
1c79356b A |
7045 | } |
7046 | ||
39037602 | 7047 | |
1c79356b | 7048 | kern_return_t |
0b4e3aa0 | 7049 | upl_commit_range( |
0a7de745 A |
7050 | upl_t upl, |
7051 | upl_offset_t offset, | |
7052 | upl_size_t size, | |
7053 | int flags, | |
7054 | upl_page_info_t *page_list, | |
7055 | mach_msg_type_number_t count, | |
7056 | boolean_t *empty) | |
1c79356b | 7057 | { |
0a7de745 A |
7058 | upl_size_t xfer_size, subupl_size = size; |
7059 | vm_object_t shadow_object; | |
7060 | vm_object_t object; | |
7061 | vm_object_t m_object; | |
7062 | vm_object_offset_t target_offset; | |
7063 | upl_offset_t subupl_offset = offset; | |
7064 | int entry; | |
7065 | wpl_array_t lite_list; | |
7066 | int occupied; | |
7067 | int clear_refmod = 0; | |
7068 | int pgpgout_count = 0; | |
7069 | struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; | |
7070 | struct vm_page_delayed_work *dwp; | |
7071 | int dw_count; | |
7072 | int dw_limit; | |
7073 | int isVectorUPL = 0; | |
7074 | upl_t vector_upl = NULL; | |
7075 | boolean_t should_be_throttled = FALSE; | |
7076 | ||
7077 | vm_page_t nxt_page = VM_PAGE_NULL; | |
7078 | int fast_path_possible = 0; | |
7079 | int fast_path_full_commit = 0; | |
7080 | int throttle_page = 0; | |
7081 | int unwired_count = 0; | |
7082 | int local_queue_count = 0; | |
7083 | vm_page_t first_local, last_local; | |
fe8ab488 | 7084 | |
0b4e3aa0 A |
7085 | *empty = FALSE; |
7086 | ||
0a7de745 | 7087 | if (upl == UPL_NULL) { |
0b4e3aa0 | 7088 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 7089 | } |
0b4e3aa0 | 7090 | |
0a7de745 | 7091 | if (count == 0) { |
0b4e3aa0 | 7092 | page_list = NULL; |
0a7de745 | 7093 | } |
0b4e3aa0 | 7094 | |
0a7de745 | 7095 | if ((isVectorUPL = vector_upl_is_valid(upl))) { |
b0d623f7 A |
7096 | vector_upl = upl; |
7097 | upl_lock(vector_upl); | |
0a7de745 | 7098 | } else { |
b0d623f7 | 7099 | upl_lock(upl); |
0a7de745 | 7100 | } |
b0d623f7 A |
7101 | |
7102 | process_upl_to_commit: | |
7103 | ||
0a7de745 | 7104 | if (isVectorUPL) { |
b0d623f7 A |
7105 | size = subupl_size; |
7106 | offset = subupl_offset; | |
0a7de745 | 7107 | if (size == 0) { |
b0d623f7 A |
7108 | upl_unlock(vector_upl); |
7109 | return KERN_SUCCESS; | |
7110 | } | |
7111 | upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size); | |
0a7de745 | 7112 | if (upl == NULL) { |
b0d623f7 A |
7113 | upl_unlock(vector_upl); |
7114 | return KERN_FAILURE; | |
7115 | } | |
7116 | page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl); | |
7117 | subupl_size -= size; | |
7118 | subupl_offset += size; | |
7119 | } | |
7120 | ||
7121 | #if UPL_DEBUG | |
7122 | if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) { | |
7123 | (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES); | |
d9a64523 | 7124 | |
b0d623f7 A |
7125 | upl->upl_commit_records[upl->upl_commit_index].c_beg = offset; |
7126 | upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size); | |
7127 | ||
7128 | upl->upl_commit_index++; | |
7129 | } | |
7130 | #endif | |
0a7de745 | 7131 | if (upl->flags & UPL_DEVICE_MEMORY) { |
2d21ac55 | 7132 | xfer_size = 0; |
0a7de745 A |
7133 | } else if ((offset + size) <= upl->size) { |
7134 | xfer_size = size; | |
7135 | } else { | |
7136 | if (!isVectorUPL) { | |
b0d623f7 | 7137 | upl_unlock(upl); |
0a7de745 | 7138 | } else { |
b0d623f7 A |
7139 | upl_unlock(vector_upl); |
7140 | } | |
2d21ac55 | 7141 | return KERN_FAILURE; |
91447636 | 7142 | } |
0a7de745 | 7143 | if (upl->flags & UPL_SET_DIRTY) { |
6d2010ae | 7144 | flags |= UPL_COMMIT_SET_DIRTY; |
0a7de745 A |
7145 | } |
7146 | if (upl->flags & UPL_CLEAR_DIRTY) { | |
7147 | flags |= UPL_COMMIT_CLEAR_DIRTY; | |
7148 | } | |
55e303ae | 7149 | |
0a7de745 | 7150 | if (upl->flags & UPL_INTERNAL) { |
2d21ac55 | 7151 | lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl)) |
0a7de745 A |
7152 | + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t))); |
7153 | } else { | |
2d21ac55 | 7154 | lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl)); |
0a7de745 | 7155 | } |
1c79356b | 7156 | |
2d21ac55 A |
7157 | object = upl->map_object; |
7158 | ||
7159 | if (upl->flags & UPL_SHADOWED) { | |
0a7de745 | 7160 | vm_object_lock(object); |
2d21ac55 | 7161 | shadow_object = object->shadow; |
55e303ae | 7162 | } else { |
2d21ac55 | 7163 | shadow_object = object; |
55e303ae | 7164 | } |
0a7de745 | 7165 | entry = offset / PAGE_SIZE; |
1c79356b | 7166 | target_offset = (vm_object_offset_t)offset; |
55e303ae | 7167 | |
3e170ce0 A |
7168 | assert(!(target_offset & PAGE_MASK)); |
7169 | assert(!(xfer_size & PAGE_MASK)); | |
7170 | ||
0a7de745 | 7171 | if (upl->flags & UPL_KERNEL_OBJECT) { |
b0d623f7 | 7172 | vm_object_lock_shared(shadow_object); |
0a7de745 | 7173 | } else { |
b0d623f7 | 7174 | vm_object_lock(shadow_object); |
0a7de745 | 7175 | } |
4a3eedf9 | 7176 | |
5ba3f43e A |
7177 | VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object); |
7178 | ||
b0d623f7 A |
7179 | if (upl->flags & UPL_ACCESS_BLOCKED) { |
7180 | assert(shadow_object->blocked_access); | |
7181 | shadow_object->blocked_access = FALSE; | |
7182 | vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED); | |
4a3eedf9 | 7183 | } |
4a3eedf9 | 7184 | |
593a1d5f A |
7185 | if (shadow_object->code_signed) { |
7186 | /* | |
7187 | * CODE SIGNING: | |
7188 | * If the object is code-signed, do not let this UPL tell | |
7189 | * us if the pages are valid or not. Let the pages be | |
7190 | * validated by VM the normal way (when they get mapped or | |
7191 | * copied). | |
7192 | */ | |
7193 | flags &= ~UPL_COMMIT_CS_VALIDATED; | |
7194 | } | |
0a7de745 | 7195 | if (!page_list) { |
593a1d5f A |
7196 | /* |
7197 | * No page list to get the code-signing info from !? | |
7198 | */ | |
7199 | flags &= ~UPL_COMMIT_CS_VALIDATED; | |
7200 | } | |
0a7de745 | 7201 | if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) { |
6d2010ae | 7202 | should_be_throttled = TRUE; |
0a7de745 | 7203 | } |
593a1d5f | 7204 | |
b0d623f7 A |
7205 | dwp = &dw_array[0]; |
7206 | dw_count = 0; | |
6d2010ae | 7207 | dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); |
b0d623f7 | 7208 | |
fe8ab488 A |
7209 | if ((upl->flags & UPL_IO_WIRE) && |
7210 | !(flags & UPL_COMMIT_FREE_ABSENT) && | |
7211 | !isVectorUPL && | |
7212 | shadow_object->purgable != VM_PURGABLE_VOLATILE && | |
7213 | shadow_object->purgable != VM_PURGABLE_EMPTY) { | |
39037602 | 7214 | if (!vm_page_queue_empty(&shadow_object->memq)) { |
fe8ab488 | 7215 | if (size == shadow_object->vo_size) { |
39037602 | 7216 | nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq); |
fe8ab488 A |
7217 | fast_path_full_commit = 1; |
7218 | } | |
7219 | fast_path_possible = 1; | |
7220 | ||
39037602 | 7221 | if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal && |
fe8ab488 | 7222 | (shadow_object->purgable == VM_PURGABLE_DENY || |
0a7de745 A |
7223 | shadow_object->purgable == VM_PURGABLE_NONVOLATILE || |
7224 | shadow_object->purgable == VM_PURGABLE_VOLATILE)) { | |
fe8ab488 A |
7225 | throttle_page = 1; |
7226 | } | |
7227 | } | |
7228 | } | |
39037602 A |
7229 | first_local = VM_PAGE_NULL; |
7230 | last_local = VM_PAGE_NULL; | |
fe8ab488 | 7231 | |
91447636 | 7232 | while (xfer_size) { |
0a7de745 | 7233 | vm_page_t t, m; |
2d21ac55 | 7234 | |
b0d623f7 A |
7235 | dwp->dw_mask = 0; |
7236 | clear_refmod = 0; | |
7237 | ||
55e303ae | 7238 | m = VM_PAGE_NULL; |
d7e50217 | 7239 | |
55e303ae | 7240 | if (upl->flags & UPL_LITE) { |
0a7de745 | 7241 | unsigned int pg_num; |
55e303ae | 7242 | |
fe8ab488 A |
7243 | if (nxt_page != VM_PAGE_NULL) { |
7244 | m = nxt_page; | |
d9a64523 A |
7245 | nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq); |
7246 | target_offset = m->vmp_offset; | |
fe8ab488 | 7247 | } |
0a7de745 A |
7248 | pg_num = (unsigned int) (target_offset / PAGE_SIZE); |
7249 | assert(pg_num == target_offset / PAGE_SIZE); | |
55e303ae | 7250 | |
cb323159 A |
7251 | if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) { |
7252 | lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31)); | |
2d21ac55 | 7253 | |
0a7de745 | 7254 | if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) { |
b0d623f7 | 7255 | m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset)); |
0a7de745 A |
7256 | } |
7257 | } else { | |
fe8ab488 | 7258 | m = NULL; |
0a7de745 | 7259 | } |
55e303ae | 7260 | } |
2d21ac55 | 7261 | if (upl->flags & UPL_SHADOWED) { |
0a7de745 | 7262 | if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) { |
d9a64523 | 7263 | t->vmp_free_when_done = FALSE; |
55e303ae | 7264 | |
b0d623f7 | 7265 | VM_PAGE_FREE(t); |
55e303ae | 7266 | |
0a7de745 | 7267 | if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) { |
6d2010ae | 7268 | m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset); |
0a7de745 | 7269 | } |
55e303ae A |
7270 | } |
7271 | } | |
0a7de745 | 7272 | if (m == VM_PAGE_NULL) { |
593a1d5f | 7273 | goto commit_next_page; |
0a7de745 | 7274 | } |
55e303ae | 7275 | |
39037602 A |
7276 | m_object = VM_PAGE_OBJECT(m); |
7277 | ||
d9a64523 A |
7278 | if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) { |
7279 | assert(m->vmp_busy); | |
39236c6e A |
7280 | |
7281 | dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); | |
7282 | goto commit_next_page; | |
7283 | } | |
7284 | ||
593a1d5f A |
7285 | if (flags & UPL_COMMIT_CS_VALIDATED) { |
7286 | /* | |
7287 | * CODE SIGNING: | |
7288 | * Set the code signing bits according to | |
7289 | * what the UPL says they should be. | |
7290 | */ | |
d9a64523 A |
7291 | m->vmp_cs_validated = page_list[entry].cs_validated; |
7292 | m->vmp_cs_tainted = page_list[entry].cs_tainted; | |
7293 | m->vmp_cs_nx = page_list[entry].cs_nx; | |
593a1d5f | 7294 | } |
0a7de745 | 7295 | if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) { |
d9a64523 | 7296 | m->vmp_written_by_kernel = TRUE; |
0a7de745 | 7297 | } |
15129b1c | 7298 | |
593a1d5f | 7299 | if (upl->flags & UPL_IO_WIRE) { |
0a7de745 | 7300 | if (page_list) { |
593a1d5f | 7301 | page_list[entry].phys_addr = 0; |
0a7de745 | 7302 | } |
2d21ac55 | 7303 | |
6d2010ae | 7304 | if (flags & UPL_COMMIT_SET_DIRTY) { |
316670eb | 7305 | SET_PAGE_DIRTY(m, FALSE); |
6d2010ae | 7306 | } else if (flags & UPL_COMMIT_CLEAR_DIRTY) { |
d9a64523 | 7307 | m->vmp_dirty = FALSE; |
b0d623f7 | 7308 | |
0a7de745 | 7309 | if (!(flags & UPL_COMMIT_CS_VALIDATED) && |
d9a64523 | 7310 | m->vmp_cs_validated && !m->vmp_cs_tainted) { |
4a3eedf9 A |
7311 | /* |
7312 | * CODE SIGNING: | |
7313 | * This page is no longer dirty | |
7314 | * but could have been modified, | |
7315 | * so it will need to be | |
7316 | * re-validated. | |
7317 | */ | |
d9a64523 A |
7318 | m->vmp_cs_validated = FALSE; |
7319 | ||
7320 | VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1); | |
7321 | ||
39037602 | 7322 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); |
4a3eedf9 | 7323 | } |
91447636 | 7324 | clear_refmod |= VM_MEM_MODIFIED; |
55e303ae | 7325 | } |
b0d623f7 | 7326 | if (upl->flags & UPL_ACCESS_BLOCKED) { |
593a1d5f A |
7327 | /* |
7328 | * We blocked access to the pages in this UPL. | |
7329 | * Clear the "busy" bit and wake up any waiter | |
7330 | * for this page. | |
7331 | */ | |
b0d623f7 | 7332 | dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); |
593a1d5f | 7333 | } |
fe8ab488 | 7334 | if (fast_path_possible) { |
39037602 A |
7335 | assert(m_object->purgable != VM_PURGABLE_EMPTY); |
7336 | assert(m_object->purgable != VM_PURGABLE_VOLATILE); | |
d9a64523 A |
7337 | if (m->vmp_absent) { |
7338 | assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q); | |
7339 | assert(m->vmp_wire_count == 0); | |
7340 | assert(m->vmp_busy); | |
fe8ab488 | 7341 | |
d9a64523 | 7342 | m->vmp_absent = FALSE; |
d41d1dae | 7343 | dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); |
fe8ab488 | 7344 | } else { |
0a7de745 | 7345 | if (m->vmp_wire_count == 0) { |
fe8ab488 | 7346 | panic("wire_count == 0, m = %p, obj = %p\n", m, shadow_object); |
0a7de745 | 7347 | } |
d9a64523 | 7348 | assert(m->vmp_q_state == VM_PAGE_IS_WIRED); |
fe8ab488 A |
7349 | |
7350 | /* | |
7351 | * XXX FBDP need to update some other | |
7352 | * counters here (purgeable_wired_count) | |
7353 | * (ledgers), ... | |
7354 | */ | |
d9a64523 A |
7355 | assert(m->vmp_wire_count > 0); |
7356 | m->vmp_wire_count--; | |
7ddcb079 | 7357 | |
d9a64523 A |
7358 | if (m->vmp_wire_count == 0) { |
7359 | m->vmp_q_state = VM_PAGE_NOT_ON_Q; | |
fe8ab488 | 7360 | unwired_count++; |
39037602 | 7361 | } |
d41d1dae | 7362 | } |
d9a64523 A |
7363 | if (m->vmp_wire_count == 0) { |
7364 | assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0); | |
39037602 A |
7365 | |
7366 | if (last_local == VM_PAGE_NULL) { | |
7367 | assert(first_local == VM_PAGE_NULL); | |
7368 | ||
7369 | last_local = m; | |
7370 | first_local = m; | |
7371 | } else { | |
7372 | assert(first_local != VM_PAGE_NULL); | |
7373 | ||
d9a64523 A |
7374 | m->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local); |
7375 | first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m); | |
39037602 A |
7376 | first_local = m; |
7377 | } | |
fe8ab488 | 7378 | local_queue_count++; |
d41d1dae | 7379 | |
fe8ab488 | 7380 | if (throttle_page) { |
d9a64523 | 7381 | m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; |
fe8ab488 | 7382 | } else { |
39037602 | 7383 | if (flags & UPL_COMMIT_INACTIVATE) { |
0a7de745 | 7384 | if (shadow_object->internal) { |
d9a64523 | 7385 | m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q; |
0a7de745 | 7386 | } else { |
d9a64523 | 7387 | m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q; |
0a7de745 A |
7388 | } |
7389 | } else { | |
d9a64523 | 7390 | m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q; |
0a7de745 | 7391 | } |
fe8ab488 A |
7392 | } |
7393 | } | |
7394 | } else { | |
7395 | if (flags & UPL_COMMIT_INACTIVATE) { | |
7396 | dwp->dw_mask |= DW_vm_page_deactivate_internal; | |
7397 | clear_refmod |= VM_MEM_REFERENCED; | |
7398 | } | |
d9a64523 | 7399 | if (m->vmp_absent) { |
0a7de745 | 7400 | if (flags & UPL_COMMIT_FREE_ABSENT) { |
fe8ab488 | 7401 | dwp->dw_mask |= DW_vm_page_free; |
0a7de745 | 7402 | } else { |
d9a64523 | 7403 | m->vmp_absent = FALSE; |
fe8ab488 A |
7404 | dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); |
7405 | ||
0a7de745 | 7406 | if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) { |
fe8ab488 | 7407 | dwp->dw_mask |= DW_vm_page_activate; |
0a7de745 | 7408 | } |
fe8ab488 | 7409 | } |
0a7de745 | 7410 | } else { |
fe8ab488 | 7411 | dwp->dw_mask |= DW_vm_page_unwire; |
0a7de745 | 7412 | } |
fe8ab488 | 7413 | } |
593a1d5f A |
7414 | goto commit_next_page; |
7415 | } | |
d9a64523 | 7416 | assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR); |
39236c6e | 7417 | |
0a7de745 | 7418 | if (page_list) { |
316670eb | 7419 | page_list[entry].phys_addr = 0; |
0a7de745 | 7420 | } |
316670eb | 7421 | |
593a1d5f A |
7422 | /* |
7423 | * make sure to clear the hardware | |
7424 | * modify or reference bits before | |
7425 | * releasing the BUSY bit on this page | |
7426 | * otherwise we risk losing a legitimate | |
7427 | * change of state | |
7428 | */ | |
7429 | if (flags & UPL_COMMIT_CLEAR_DIRTY) { | |
d9a64523 | 7430 | m->vmp_dirty = FALSE; |
2d21ac55 | 7431 | |
593a1d5f A |
7432 | clear_refmod |= VM_MEM_MODIFIED; |
7433 | } | |
0a7de745 | 7434 | if (m->vmp_laundry) { |
316670eb | 7435 | dwp->dw_mask |= DW_vm_pageout_throttle_up; |
0a7de745 | 7436 | } |
b0d623f7 | 7437 | |
0a7de745 | 7438 | if (VM_PAGE_WIRED(m)) { |
d9a64523 | 7439 | m->vmp_free_when_done = FALSE; |
0a7de745 | 7440 | } |
d9a64523 | 7441 | |
0a7de745 | 7442 | if (!(flags & UPL_COMMIT_CS_VALIDATED) && |
d9a64523 | 7443 | m->vmp_cs_validated && !m->vmp_cs_tainted) { |
316670eb A |
7444 | /* |
7445 | * CODE SIGNING: | |
7446 | * This page is no longer dirty | |
7447 | * but could have been modified, | |
7448 | * so it will need to be | |
7449 | * re-validated. | |
7450 | */ | |
d9a64523 A |
7451 | m->vmp_cs_validated = FALSE; |
7452 | ||
7453 | VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1); | |
7454 | ||
39037602 | 7455 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); |
316670eb | 7456 | } |
d9a64523 | 7457 | if (m->vmp_overwriting) { |
316670eb A |
7458 | /* |
7459 | * the (COPY_OUT_FROM == FALSE) request_page_list case | |
7460 | */ | |
d9a64523 | 7461 | if (m->vmp_busy) { |
fe8ab488 | 7462 | #if CONFIG_PHANTOM_CACHE |
0a7de745 | 7463 | if (m->vmp_absent && !m_object->internal) { |
fe8ab488 | 7464 | dwp->dw_mask |= DW_vm_phantom_cache_update; |
0a7de745 | 7465 | } |
fe8ab488 | 7466 | #endif |
d9a64523 | 7467 | m->vmp_absent = FALSE; |
b0d623f7 | 7468 | |
316670eb A |
7469 | dwp->dw_mask |= DW_clear_busy; |
7470 | } else { | |
7471 | /* | |
7472 | * alternate (COPY_OUT_FROM == FALSE) page_list case | |
7473 | * Occurs when the original page was wired | |
7474 | * at the time of the list request | |
7475 | */ | |
7476 | assert(VM_PAGE_WIRED(m)); | |
7477 | ||
7478 | dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */ | |
593a1d5f | 7479 | } |
d9a64523 | 7480 | m->vmp_overwriting = FALSE; |
593a1d5f | 7481 | } |
d9a64523 | 7482 | m->vmp_cleaning = FALSE; |
91447636 | 7483 | |
d9a64523 A |
7484 | if (m->vmp_free_when_done) { |
7485 | /* | |
316670eb | 7486 | * With the clean queue enabled, UPL_PAGEOUT should |
cb323159 | 7487 | * no longer set the pageout bit. Its pages now go |
316670eb | 7488 | * to the clean queue. |
cb323159 A |
7489 | * |
7490 | * We don't use the cleaned Q anymore and so this | |
7491 | * assert isn't correct. The code for the clean Q | |
7492 | * still exists and might be used in the future. If we | |
7493 | * go back to the cleaned Q, we will re-enable this | |
7494 | * assert. | |
7495 | * | |
7496 | * assert(!(upl->flags & UPL_PAGEOUT)); | |
316670eb | 7497 | */ |
39037602 | 7498 | assert(!m_object->internal); |
316670eb | 7499 | |
d9a64523 A |
7500 | m->vmp_free_when_done = FALSE; |
7501 | ||
b0d623f7 | 7502 | if ((flags & UPL_COMMIT_SET_DIRTY) || |
d9a64523 | 7503 | (m->vmp_pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) { |
593a1d5f A |
7504 | /* |
7505 | * page was re-dirtied after we started | |
d9a64523 | 7506 | * the pageout... reactivate it since |
593a1d5f A |
7507 | * we don't know whether the on-disk |
7508 | * copy matches what is now in memory | |
2d21ac55 | 7509 | */ |
316670eb | 7510 | SET_PAGE_DIRTY(m, FALSE); |
d9a64523 | 7511 | |
316670eb | 7512 | dwp->dw_mask |= DW_vm_page_activate | DW_PAGE_WAKEUP; |
b0d623f7 | 7513 | |
593a1d5f | 7514 | if (upl->flags & UPL_PAGEOUT) { |
593a1d5f A |
7515 | VM_STAT_INCR(reactivations); |
7516 | DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL); | |
7517 | } | |
593a1d5f A |
7518 | } else { |
7519 | /* | |
7520 | * page has been successfully cleaned | |
7521 | * go ahead and free it for other use | |
2d21ac55 | 7522 | */ |
39037602 | 7523 | if (m_object->internal) { |
593a1d5f A |
7524 | DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL); |
7525 | } else { | |
7526 | DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL); | |
7527 | } | |
d9a64523 A |
7528 | m->vmp_dirty = FALSE; |
7529 | m->vmp_busy = TRUE; | |
b0d623f7 | 7530 | |
316670eb | 7531 | dwp->dw_mask |= DW_vm_page_free; |
de355530 | 7532 | } |
593a1d5f A |
7533 | goto commit_next_page; |
7534 | } | |
593a1d5f A |
7535 | /* |
7536 | * It is a part of the semantic of COPYOUT_FROM | |
7537 | * UPLs that a commit implies cache sync | |
7538 | * between the vm page and the backing store | |
7539 | * this can be used to strip the precious bit | |
7540 | * as well as clean | |
7541 | */ | |
0a7de745 | 7542 | if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) { |
d9a64523 | 7543 | m->vmp_precious = FALSE; |
0a7de745 | 7544 | } |
b0d623f7 | 7545 | |
316670eb A |
7546 | if (flags & UPL_COMMIT_SET_DIRTY) { |
7547 | SET_PAGE_DIRTY(m, FALSE); | |
7548 | } else { | |
d9a64523 | 7549 | m->vmp_dirty = FALSE; |
316670eb A |
7550 | } |
7551 | ||
7552 | /* with the clean queue on, move *all* cleaned pages to the clean queue */ | |
d9a64523 | 7553 | if (hibernate_cleaning_in_progress == FALSE && !m->vmp_dirty && (upl->flags & UPL_PAGEOUT)) { |
316670eb A |
7554 | pgpgout_count++; |
7555 | ||
fe8ab488 A |
7556 | VM_STAT_INCR(pageouts); |
7557 | DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL); | |
b0d623f7 | 7558 | |
316670eb | 7559 | dwp->dw_mask |= DW_enqueue_cleaned; |
d9a64523 | 7560 | } else if (should_be_throttled == TRUE && (m->vmp_q_state == VM_PAGE_NOT_ON_Q)) { |
6d2010ae A |
7561 | /* |
7562 | * page coming back in from being 'frozen'... | |
7563 | * it was dirty before it was frozen, so keep it so | |
7564 | * the vm_page_activate will notice that it really belongs | |
7565 | * on the throttle queue and put it there | |
7566 | */ | |
316670eb | 7567 | SET_PAGE_DIRTY(m, FALSE); |
6d2010ae | 7568 | dwp->dw_mask |= DW_vm_page_activate; |
6d2010ae | 7569 | } else { |
d9a64523 | 7570 | if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) { |
b0d623f7 A |
7571 | dwp->dw_mask |= DW_vm_page_deactivate_internal; |
7572 | clear_refmod |= VM_MEM_REFERENCED; | |
0a7de745 A |
7573 | } else if (!VM_PAGE_PAGEABLE(m)) { |
7574 | if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) { | |
6d2010ae | 7575 | dwp->dw_mask |= DW_vm_page_speculate; |
0a7de745 | 7576 | } else if (m->vmp_reference) { |
6d2010ae | 7577 | dwp->dw_mask |= DW_vm_page_activate; |
0a7de745 | 7578 | } else { |
6d2010ae A |
7579 | dwp->dw_mask |= DW_vm_page_deactivate_internal; |
7580 | clear_refmod |= VM_MEM_REFERENCED; | |
7581 | } | |
b0d623f7 | 7582 | } |
593a1d5f | 7583 | } |
b0d623f7 | 7584 | if (upl->flags & UPL_ACCESS_BLOCKED) { |
2d21ac55 | 7585 | /* |
593a1d5f A |
7586 | * We blocked access to the pages in this URL. |
7587 | * Clear the "busy" bit on this page before we | |
7588 | * wake up any waiter. | |
2d21ac55 | 7589 | */ |
b0d623f7 | 7590 | dwp->dw_mask |= DW_clear_busy; |
1c79356b | 7591 | } |
593a1d5f A |
7592 | /* |
7593 | * Wakeup any thread waiting for the page to be un-cleaning. | |
7594 | */ | |
b0d623f7 | 7595 | dwp->dw_mask |= DW_PAGE_WAKEUP; |
593a1d5f | 7596 | |
2d21ac55 | 7597 | commit_next_page: |
0a7de745 | 7598 | if (clear_refmod) { |
39037602 | 7599 | pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod); |
0a7de745 | 7600 | } |
b0d623f7 | 7601 | |
1c79356b A |
7602 | target_offset += PAGE_SIZE_64; |
7603 | xfer_size -= PAGE_SIZE; | |
7604 | entry++; | |
2d21ac55 | 7605 | |
b0d623f7 A |
7606 | if (dwp->dw_mask) { |
7607 | if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) { | |
6d2010ae | 7608 | VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count); |
4a3eedf9 | 7609 | |
6d2010ae | 7610 | if (dw_count >= dw_limit) { |
3e170ce0 | 7611 | vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
d9a64523 | 7612 | |
b0d623f7 A |
7613 | dwp = &dw_array[0]; |
7614 | dw_count = 0; | |
7615 | } | |
7616 | } else { | |
0a7de745 | 7617 | if (dwp->dw_mask & DW_clear_busy) { |
d9a64523 | 7618 | m->vmp_busy = FALSE; |
0a7de745 | 7619 | } |
b0d623f7 | 7620 | |
0a7de745 | 7621 | if (dwp->dw_mask & DW_PAGE_WAKEUP) { |
b0d623f7 | 7622 | PAGE_WAKEUP(m); |
0a7de745 | 7623 | } |
4a3eedf9 | 7624 | } |
2d21ac55 | 7625 | } |
1c79356b | 7626 | } |
0a7de745 | 7627 | if (dw_count) { |
3e170ce0 | 7628 | vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
0a7de745 | 7629 | } |
55e303ae | 7630 | |
fe8ab488 | 7631 | if (fast_path_possible) { |
fe8ab488 A |
7632 | assert(shadow_object->purgable != VM_PURGABLE_VOLATILE); |
7633 | assert(shadow_object->purgable != VM_PURGABLE_EMPTY); | |
7634 | ||
7635 | if (local_queue_count || unwired_count) { | |
fe8ab488 | 7636 | if (local_queue_count) { |
0a7de745 A |
7637 | vm_page_t first_target; |
7638 | vm_page_queue_head_t *target_queue; | |
fe8ab488 | 7639 | |
0a7de745 | 7640 | if (throttle_page) { |
fe8ab488 | 7641 | target_queue = &vm_page_queue_throttled; |
0a7de745 | 7642 | } else { |
fe8ab488 | 7643 | if (flags & UPL_COMMIT_INACTIVATE) { |
0a7de745 | 7644 | if (shadow_object->internal) { |
fe8ab488 | 7645 | target_queue = &vm_page_queue_anonymous; |
0a7de745 | 7646 | } else { |
fe8ab488 | 7647 | target_queue = &vm_page_queue_inactive; |
0a7de745 A |
7648 | } |
7649 | } else { | |
fe8ab488 | 7650 | target_queue = &vm_page_queue_active; |
0a7de745 | 7651 | } |
fe8ab488 A |
7652 | } |
7653 | /* | |
7654 | * Transfer the entire local queue to a regular LRU page queues. | |
7655 | */ | |
fe8ab488 A |
7656 | vm_page_lockspin_queues(); |
7657 | ||
39037602 | 7658 | first_target = (vm_page_t) vm_page_queue_first(target_queue); |
fe8ab488 | 7659 | |
0a7de745 | 7660 | if (vm_page_queue_empty(target_queue)) { |
39037602 | 7661 | target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local); |
0a7de745 | 7662 | } else { |
d9a64523 | 7663 | first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local); |
0a7de745 | 7664 | } |
fe8ab488 | 7665 | |
39037602 | 7666 | target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local); |
d9a64523 A |
7667 | first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue); |
7668 | last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target); | |
fe8ab488 A |
7669 | |
7670 | /* | |
7671 | * Adjust the global page counts. | |
7672 | */ | |
7673 | if (throttle_page) { | |
7674 | vm_page_throttled_count += local_queue_count; | |
7675 | } else { | |
7676 | if (flags & UPL_COMMIT_INACTIVATE) { | |
0a7de745 | 7677 | if (shadow_object->internal) { |
fe8ab488 | 7678 | vm_page_anonymous_count += local_queue_count; |
0a7de745 | 7679 | } |
fe8ab488 A |
7680 | vm_page_inactive_count += local_queue_count; |
7681 | ||
7682 | token_new_pagecount += local_queue_count; | |
0a7de745 | 7683 | } else { |
fe8ab488 | 7684 | vm_page_active_count += local_queue_count; |
0a7de745 | 7685 | } |
fe8ab488 | 7686 | |
0a7de745 | 7687 | if (shadow_object->internal) { |
fe8ab488 | 7688 | vm_page_pageable_internal_count += local_queue_count; |
0a7de745 | 7689 | } else { |
fe8ab488 | 7690 | vm_page_pageable_external_count += local_queue_count; |
0a7de745 | 7691 | } |
fe8ab488 A |
7692 | } |
7693 | } else { | |
7694 | vm_page_lockspin_queues(); | |
7695 | } | |
d9a64523 | 7696 | if (unwired_count) { |
fe8ab488 A |
7697 | vm_page_wire_count -= unwired_count; |
7698 | VM_CHECK_MEMORYSTATUS; | |
7699 | } | |
7700 | vm_page_unlock_queues(); | |
7701 | ||
5ba3f43e | 7702 | VM_OBJECT_WIRED_PAGE_COUNT(shadow_object, -unwired_count); |
fe8ab488 A |
7703 | } |
7704 | } | |
55e303ae A |
7705 | occupied = 1; |
7706 | ||
0a7de745 | 7707 | if (upl->flags & UPL_DEVICE_MEMORY) { |
55e303ae A |
7708 | occupied = 0; |
7709 | } else if (upl->flags & UPL_LITE) { | |
0a7de745 A |
7710 | int pg_num; |
7711 | int i; | |
2d21ac55 | 7712 | |
55e303ae | 7713 | occupied = 0; |
2d21ac55 | 7714 | |
fe8ab488 | 7715 | if (!fast_path_full_commit) { |
0a7de745 | 7716 | pg_num = upl->size / PAGE_SIZE; |
fe8ab488 A |
7717 | pg_num = (pg_num + 31) >> 5; |
7718 | ||
7719 | for (i = 0; i < pg_num; i++) { | |
7720 | if (lite_list[i] != 0) { | |
7721 | occupied = 1; | |
7722 | break; | |
7723 | } | |
55e303ae A |
7724 | } |
7725 | } | |
7726 | } else { | |
0a7de745 | 7727 | if (vm_page_queue_empty(&upl->map_object->memq)) { |
55e303ae | 7728 | occupied = 0; |
0a7de745 | 7729 | } |
55e303ae | 7730 | } |
2d21ac55 | 7731 | if (occupied == 0) { |
b0d623f7 A |
7732 | /* |
7733 | * If this UPL element belongs to a Vector UPL and is | |
7734 | * empty, then this is the right function to deallocate | |
7735 | * it. So go ahead set the *empty variable. The flag | |
7736 | * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view | |
7737 | * should be considered relevant for the Vector UPL and not | |
7738 | * the internal UPLs. | |
7739 | */ | |
0a7de745 | 7740 | if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) { |
0b4e3aa0 | 7741 | *empty = TRUE; |
0a7de745 | 7742 | } |
2d21ac55 | 7743 | |
b0d623f7 | 7744 | if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) { |
0a7de745 | 7745 | /* |
2d21ac55 A |
7746 | * this is not a paging object |
7747 | * so we need to drop the paging reference | |
7748 | * that was taken when we created the UPL | |
7749 | * against this object | |
7750 | */ | |
b0d623f7 | 7751 | vm_object_activity_end(shadow_object); |
316670eb | 7752 | vm_object_collapse(shadow_object, 0, TRUE); |
2d21ac55 | 7753 | } else { |
0a7de745 A |
7754 | /* |
7755 | * we dontated the paging reference to | |
7756 | * the map object... vm_pageout_object_terminate | |
7757 | * will drop this reference | |
7758 | */ | |
2d21ac55 | 7759 | } |
1c79356b | 7760 | } |
5ba3f43e | 7761 | VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag); |
55e303ae | 7762 | vm_object_unlock(shadow_object); |
0a7de745 A |
7763 | if (object != shadow_object) { |
7764 | vm_object_unlock(object); | |
7765 | } | |
d9a64523 | 7766 | |
0a7de745 | 7767 | if (!isVectorUPL) { |
b0d623f7 | 7768 | upl_unlock(upl); |
0a7de745 | 7769 | } else { |
d9a64523 | 7770 | /* |
b0d623f7 A |
7771 | * If we completed our operations on an UPL that is |
7772 | * part of a Vectored UPL and if empty is TRUE, then | |
d9a64523 | 7773 | * we should go ahead and deallocate this UPL element. |
b0d623f7 A |
7774 | * Then we check if this was the last of the UPL elements |
7775 | * within that Vectored UPL. If so, set empty to TRUE | |
7776 | * so that in ubc_upl_commit_range or ubc_upl_commit, we | |
7777 | * can go ahead and deallocate the Vector UPL too. | |
7778 | */ | |
0a7de745 | 7779 | if (*empty == TRUE) { |
b0d623f7 A |
7780 | *empty = vector_upl_set_subupl(vector_upl, upl, 0); |
7781 | upl_deallocate(upl); | |
7782 | } | |
7783 | goto process_upl_to_commit; | |
7784 | } | |
2d21ac55 A |
7785 | if (pgpgout_count) { |
7786 | DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL); | |
7787 | } | |
7788 | ||
1c79356b A |
7789 | return KERN_SUCCESS; |
7790 | } | |
7791 | ||
0b4e3aa0 A |
7792 | kern_return_t |
7793 | upl_abort_range( | |
0a7de745 A |
7794 | upl_t upl, |
7795 | upl_offset_t offset, | |
7796 | upl_size_t size, | |
7797 | int error, | |
7798 | boolean_t *empty) | |
1c79356b | 7799 | { |
0a7de745 A |
7800 | upl_page_info_t *user_page_list = NULL; |
7801 | upl_size_t xfer_size, subupl_size = size; | |
7802 | vm_object_t shadow_object; | |
7803 | vm_object_t object; | |
7804 | vm_object_offset_t target_offset; | |
7805 | upl_offset_t subupl_offset = offset; | |
7806 | int entry; | |
7807 | wpl_array_t lite_list; | |
7808 | int occupied; | |
7809 | struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; | |
7810 | struct vm_page_delayed_work *dwp; | |
7811 | int dw_count; | |
7812 | int dw_limit; | |
7813 | int isVectorUPL = 0; | |
7814 | upl_t vector_upl = NULL; | |
1c79356b | 7815 | |
0b4e3aa0 A |
7816 | *empty = FALSE; |
7817 | ||
0a7de745 | 7818 | if (upl == UPL_NULL) { |
0b4e3aa0 | 7819 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 7820 | } |
0b4e3aa0 | 7821 | |
0a7de745 | 7822 | if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) { |
0b4c1975 | 7823 | return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty); |
0a7de745 | 7824 | } |
55e303ae | 7825 | |
0a7de745 | 7826 | if ((isVectorUPL = vector_upl_is_valid(upl))) { |
b0d623f7 A |
7827 | vector_upl = upl; |
7828 | upl_lock(vector_upl); | |
0a7de745 | 7829 | } else { |
b0d623f7 | 7830 | upl_lock(upl); |
0a7de745 | 7831 | } |
b0d623f7 A |
7832 | |
7833 | process_upl_to_abort: | |
0a7de745 | 7834 | if (isVectorUPL) { |
b0d623f7 A |
7835 | size = subupl_size; |
7836 | offset = subupl_offset; | |
0a7de745 | 7837 | if (size == 0) { |
b0d623f7 A |
7838 | upl_unlock(vector_upl); |
7839 | return KERN_SUCCESS; | |
7840 | } | |
7841 | upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size); | |
0a7de745 | 7842 | if (upl == NULL) { |
b0d623f7 A |
7843 | upl_unlock(vector_upl); |
7844 | return KERN_FAILURE; | |
7845 | } | |
7846 | subupl_size -= size; | |
7847 | subupl_offset += size; | |
7848 | } | |
7849 | ||
7850 | *empty = FALSE; | |
7851 | ||
7852 | #if UPL_DEBUG | |
7853 | if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) { | |
7854 | (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES); | |
d9a64523 | 7855 | |
b0d623f7 A |
7856 | upl->upl_commit_records[upl->upl_commit_index].c_beg = offset; |
7857 | upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size); | |
7858 | upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1; | |
7859 | ||
7860 | upl->upl_commit_index++; | |
7861 | } | |
7862 | #endif | |
0a7de745 | 7863 | if (upl->flags & UPL_DEVICE_MEMORY) { |
1c79356b | 7864 | xfer_size = 0; |
0a7de745 A |
7865 | } else if ((offset + size) <= upl->size) { |
7866 | xfer_size = size; | |
7867 | } else { | |
7868 | if (!isVectorUPL) { | |
b0d623f7 | 7869 | upl_unlock(upl); |
0a7de745 | 7870 | } else { |
b0d623f7 A |
7871 | upl_unlock(vector_upl); |
7872 | } | |
55e303ae | 7873 | |
b0d623f7 A |
7874 | return KERN_FAILURE; |
7875 | } | |
2d21ac55 | 7876 | if (upl->flags & UPL_INTERNAL) { |
d9a64523 | 7877 | lite_list = (wpl_array_t) |
0a7de745 A |
7878 | ((((uintptr_t)upl) + sizeof(struct upl)) |
7879 | + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t))); | |
316670eb A |
7880 | |
7881 | user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); | |
55e303ae | 7882 | } else { |
d9a64523 | 7883 | lite_list = (wpl_array_t) |
0a7de745 | 7884 | (((uintptr_t)upl) + sizeof(struct upl)); |
55e303ae | 7885 | } |
2d21ac55 A |
7886 | object = upl->map_object; |
7887 | ||
7888 | if (upl->flags & UPL_SHADOWED) { | |
0a7de745 | 7889 | vm_object_lock(object); |
2d21ac55 | 7890 | shadow_object = object->shadow; |
0a7de745 | 7891 | } else { |
2d21ac55 | 7892 | shadow_object = object; |
0a7de745 | 7893 | } |
2d21ac55 | 7894 | |
0a7de745 | 7895 | entry = offset / PAGE_SIZE; |
1c79356b | 7896 | target_offset = (vm_object_offset_t)offset; |
2d21ac55 | 7897 | |
3e170ce0 A |
7898 | assert(!(target_offset & PAGE_MASK)); |
7899 | assert(!(xfer_size & PAGE_MASK)); | |
7900 | ||
0a7de745 | 7901 | if (upl->flags & UPL_KERNEL_OBJECT) { |
b0d623f7 | 7902 | vm_object_lock_shared(shadow_object); |
0a7de745 | 7903 | } else { |
b0d623f7 | 7904 | vm_object_lock(shadow_object); |
0a7de745 | 7905 | } |
4a3eedf9 | 7906 | |
b0d623f7 A |
7907 | if (upl->flags & UPL_ACCESS_BLOCKED) { |
7908 | assert(shadow_object->blocked_access); | |
7909 | shadow_object->blocked_access = FALSE; | |
7910 | vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED); | |
4a3eedf9 | 7911 | } |
b0d623f7 A |
7912 | |
7913 | dwp = &dw_array[0]; | |
7914 | dw_count = 0; | |
6d2010ae | 7915 | dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); |
b0d623f7 | 7916 | |
0a7de745 | 7917 | if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) { |
b0d623f7 | 7918 | panic("upl_abort_range: kernel_object being DUMPED"); |
0a7de745 | 7919 | } |
4a3eedf9 | 7920 | |
2d21ac55 | 7921 | while (xfer_size) { |
0a7de745 A |
7922 | vm_page_t t, m; |
7923 | unsigned int pg_num; | |
7924 | boolean_t needed; | |
2d21ac55 | 7925 | |
0a7de745 A |
7926 | pg_num = (unsigned int) (target_offset / PAGE_SIZE); |
7927 | assert(pg_num == target_offset / PAGE_SIZE); | |
316670eb A |
7928 | |
7929 | needed = FALSE; | |
b0d623f7 | 7930 | |
0a7de745 | 7931 | if (user_page_list) { |
316670eb | 7932 | needed = user_page_list[pg_num].needed; |
0a7de745 | 7933 | } |
316670eb A |
7934 | |
7935 | dwp->dw_mask = 0; | |
55e303ae | 7936 | m = VM_PAGE_NULL; |
2d21ac55 A |
7937 | |
7938 | if (upl->flags & UPL_LITE) { | |
cb323159 A |
7939 | if (lite_list[pg_num >> 5] & (1U << (pg_num & 31))) { |
7940 | lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31)); | |
2d21ac55 | 7941 | |
0a7de745 | 7942 | if (!(upl->flags & UPL_KERNEL_OBJECT)) { |
b0d623f7 | 7943 | m = vm_page_lookup(shadow_object, target_offset + |
0a7de745 A |
7944 | (upl->offset - shadow_object->paging_offset)); |
7945 | } | |
55e303ae A |
7946 | } |
7947 | } | |
2d21ac55 | 7948 | if (upl->flags & UPL_SHADOWED) { |
0a7de745 A |
7949 | if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) { |
7950 | t->vmp_free_when_done = FALSE; | |
2d21ac55 | 7951 | |
b0d623f7 | 7952 | VM_PAGE_FREE(t); |
2d21ac55 | 7953 | |
0a7de745 | 7954 | if (m == VM_PAGE_NULL) { |
6d2010ae | 7955 | m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset); |
0a7de745 | 7956 | } |
55e303ae A |
7957 | } |
7958 | } | |
0a7de745 | 7959 | if ((upl->flags & UPL_KERNEL_OBJECT)) { |
b0d623f7 | 7960 | goto abort_next_page; |
0a7de745 | 7961 | } |
b0d623f7 | 7962 | |
2d21ac55 | 7963 | if (m != VM_PAGE_NULL) { |
d9a64523 | 7964 | assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR); |
39236c6e | 7965 | |
d9a64523 | 7966 | if (m->vmp_absent) { |
0a7de745 | 7967 | boolean_t must_free = TRUE; |
91447636 | 7968 | |
2d21ac55 A |
7969 | /* |
7970 | * COPYOUT = FALSE case | |
7971 | * check for error conditions which must | |
7972 | * be passed back to the pages customer | |
7973 | */ | |
7974 | if (error & UPL_ABORT_RESTART) { | |
d9a64523 A |
7975 | m->vmp_restart = TRUE; |
7976 | m->vmp_absent = FALSE; | |
7977 | m->vmp_unusual = TRUE; | |
91447636 | 7978 | must_free = FALSE; |
2d21ac55 | 7979 | } else if (error & UPL_ABORT_UNAVAILABLE) { |
d9a64523 A |
7980 | m->vmp_restart = FALSE; |
7981 | m->vmp_unusual = TRUE; | |
91447636 | 7982 | must_free = FALSE; |
2d21ac55 | 7983 | } else if (error & UPL_ABORT_ERROR) { |
d9a64523 A |
7984 | m->vmp_restart = FALSE; |
7985 | m->vmp_absent = FALSE; | |
7986 | m->vmp_error = TRUE; | |
7987 | m->vmp_unusual = TRUE; | |
91447636 | 7988 | must_free = FALSE; |
1c79356b | 7989 | } |
d9a64523 | 7990 | if (m->vmp_clustered && needed == FALSE) { |
6d2010ae A |
7991 | /* |
7992 | * This page was a part of a speculative | |
7993 | * read-ahead initiated by the kernel | |
7994 | * itself. No one is expecting this | |
7995 | * page and no one will clean up its | |
7996 | * error state if it ever becomes valid | |
7997 | * in the future. | |
7998 | * We have to free it here. | |
7999 | */ | |
8000 | must_free = TRUE; | |
8001 | } | |
d9a64523 | 8002 | m->vmp_cleaning = FALSE; |
6d2010ae | 8003 | |
d9a64523 | 8004 | if (m->vmp_overwriting && !m->vmp_busy) { |
6d2010ae A |
8005 | /* |
8006 | * this shouldn't happen since | |
8007 | * this is an 'absent' page, but | |
8008 | * it doesn't hurt to check for | |
d9a64523 | 8009 | * the 'alternate' method of |
6d2010ae A |
8010 | * stabilizing the page... |
8011 | * we will mark 'busy' to be cleared | |
8012 | * in the following code which will | |
8013 | * take care of the primary stabilzation | |
8014 | * method (i.e. setting 'busy' to TRUE) | |
8015 | */ | |
8016 | dwp->dw_mask |= DW_vm_page_unwire; | |
8017 | } | |
d9a64523 | 8018 | m->vmp_overwriting = FALSE; |
b0d623f7 A |
8019 | |
8020 | dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); | |
91447636 | 8021 | |
0a7de745 | 8022 | if (must_free == TRUE) { |
b0d623f7 | 8023 | dwp->dw_mask |= DW_vm_page_free; |
0a7de745 | 8024 | } else { |
b0d623f7 | 8025 | dwp->dw_mask |= DW_vm_page_activate; |
0a7de745 | 8026 | } |
2d21ac55 | 8027 | } else { |
0a7de745 | 8028 | /* |
2d21ac55 | 8029 | * Handle the trusted pager throttle. |
d9a64523 | 8030 | */ |
0a7de745 | 8031 | if (m->vmp_laundry) { |
b0d623f7 | 8032 | dwp->dw_mask |= DW_vm_pageout_throttle_up; |
0a7de745 | 8033 | } |
2d21ac55 | 8034 | |
6d2010ae A |
8035 | if (upl->flags & UPL_ACCESS_BLOCKED) { |
8036 | /* | |
8037 | * We blocked access to the pages in this UPL. | |
8038 | * Clear the "busy" bit and wake up any waiter | |
8039 | * for this page. | |
8040 | */ | |
8041 | dwp->dw_mask |= DW_clear_busy; | |
8042 | } | |
d9a64523 | 8043 | if (m->vmp_overwriting) { |
0a7de745 | 8044 | if (m->vmp_busy) { |
6d2010ae | 8045 | dwp->dw_mask |= DW_clear_busy; |
0a7de745 | 8046 | } else { |
6d2010ae A |
8047 | /* |
8048 | * deal with the 'alternate' method | |
8049 | * of stabilizing the page... | |
8050 | * we will either free the page | |
8051 | * or mark 'busy' to be cleared | |
8052 | * in the following code which will | |
8053 | * take care of the primary stabilzation | |
8054 | * method (i.e. setting 'busy' to TRUE) | |
8055 | */ | |
8056 | dwp->dw_mask |= DW_vm_page_unwire; | |
8057 | } | |
d9a64523 | 8058 | m->vmp_overwriting = FALSE; |
6d2010ae | 8059 | } |
d9a64523 A |
8060 | m->vmp_free_when_done = FALSE; |
8061 | m->vmp_cleaning = FALSE; | |
39037602 | 8062 | |
2d21ac55 | 8063 | if (error & UPL_ABORT_DUMP_PAGES) { |
39037602 | 8064 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); |
b0d623f7 A |
8065 | |
8066 | dwp->dw_mask |= DW_vm_page_free; | |
2d21ac55 | 8067 | } else { |
316670eb A |
8068 | if (!(dwp->dw_mask & DW_vm_page_unwire)) { |
8069 | if (error & UPL_ABORT_REFERENCE) { | |
8070 | /* | |
8071 | * we've been told to explictly | |
d9a64523 | 8072 | * reference this page... for |
316670eb A |
8073 | * file I/O, this is done by |
8074 | * implementing an LRU on the inactive q | |
8075 | */ | |
8076 | dwp->dw_mask |= DW_vm_page_lru; | |
0a7de745 | 8077 | } else if (!VM_PAGE_PAGEABLE(m)) { |
316670eb | 8078 | dwp->dw_mask |= DW_vm_page_deactivate_internal; |
0a7de745 | 8079 | } |
2d21ac55 | 8080 | } |
6d2010ae | 8081 | dwp->dw_mask |= DW_PAGE_WAKEUP; |
2d21ac55 | 8082 | } |
1c79356b | 8083 | } |
2d21ac55 | 8084 | } |
b0d623f7 | 8085 | abort_next_page: |
55e303ae A |
8086 | target_offset += PAGE_SIZE_64; |
8087 | xfer_size -= PAGE_SIZE; | |
8088 | entry++; | |
b0d623f7 A |
8089 | |
8090 | if (dwp->dw_mask) { | |
8091 | if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) { | |
6d2010ae | 8092 | VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count); |
b0d623f7 | 8093 | |
6d2010ae | 8094 | if (dw_count >= dw_limit) { |
3e170ce0 | 8095 | vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
d9a64523 | 8096 | |
b0d623f7 A |
8097 | dwp = &dw_array[0]; |
8098 | dw_count = 0; | |
8099 | } | |
8100 | } else { | |
0a7de745 | 8101 | if (dwp->dw_mask & DW_clear_busy) { |
d9a64523 | 8102 | m->vmp_busy = FALSE; |
0a7de745 | 8103 | } |
b0d623f7 | 8104 | |
0a7de745 | 8105 | if (dwp->dw_mask & DW_PAGE_WAKEUP) { |
b0d623f7 | 8106 | PAGE_WAKEUP(m); |
0a7de745 | 8107 | } |
b0d623f7 A |
8108 | } |
8109 | } | |
d7e50217 | 8110 | } |
0a7de745 | 8111 | if (dw_count) { |
3e170ce0 | 8112 | vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
0a7de745 | 8113 | } |
2d21ac55 | 8114 | |
55e303ae | 8115 | occupied = 1; |
2d21ac55 | 8116 | |
0a7de745 | 8117 | if (upl->flags & UPL_DEVICE_MEMORY) { |
55e303ae A |
8118 | occupied = 0; |
8119 | } else if (upl->flags & UPL_LITE) { | |
0a7de745 A |
8120 | int pg_num; |
8121 | int i; | |
2d21ac55 | 8122 | |
0a7de745 | 8123 | pg_num = upl->size / PAGE_SIZE; |
55e303ae A |
8124 | pg_num = (pg_num + 31) >> 5; |
8125 | occupied = 0; | |
2d21ac55 A |
8126 | |
8127 | for (i = 0; i < pg_num; i++) { | |
8128 | if (lite_list[i] != 0) { | |
55e303ae A |
8129 | occupied = 1; |
8130 | break; | |
8131 | } | |
8132 | } | |
8133 | } else { | |
0a7de745 | 8134 | if (vm_page_queue_empty(&upl->map_object->memq)) { |
55e303ae | 8135 | occupied = 0; |
0a7de745 | 8136 | } |
55e303ae | 8137 | } |
2d21ac55 | 8138 | if (occupied == 0) { |
b0d623f7 A |
8139 | /* |
8140 | * If this UPL element belongs to a Vector UPL and is | |
8141 | * empty, then this is the right function to deallocate | |
8142 | * it. So go ahead set the *empty variable. The flag | |
8143 | * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view | |
8144 | * should be considered relevant for the Vector UPL and | |
8145 | * not the internal UPLs. | |
8146 | */ | |
0a7de745 | 8147 | if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) { |
0b4e3aa0 | 8148 | *empty = TRUE; |
0a7de745 | 8149 | } |
2d21ac55 | 8150 | |
b0d623f7 | 8151 | if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) { |
0a7de745 | 8152 | /* |
2d21ac55 A |
8153 | * this is not a paging object |
8154 | * so we need to drop the paging reference | |
8155 | * that was taken when we created the UPL | |
8156 | * against this object | |
8157 | */ | |
b0d623f7 | 8158 | vm_object_activity_end(shadow_object); |
316670eb | 8159 | vm_object_collapse(shadow_object, 0, TRUE); |
2d21ac55 | 8160 | } else { |
0a7de745 A |
8161 | /* |
8162 | * we dontated the paging reference to | |
8163 | * the map object... vm_pageout_object_terminate | |
8164 | * will drop this reference | |
8165 | */ | |
2d21ac55 | 8166 | } |
1c79356b | 8167 | } |
55e303ae | 8168 | vm_object_unlock(shadow_object); |
0a7de745 A |
8169 | if (object != shadow_object) { |
8170 | vm_object_unlock(object); | |
8171 | } | |
d9a64523 | 8172 | |
0a7de745 | 8173 | if (!isVectorUPL) { |
b0d623f7 | 8174 | upl_unlock(upl); |
0a7de745 | 8175 | } else { |
d9a64523 | 8176 | /* |
0a7de745 A |
8177 | * If we completed our operations on an UPL that is |
8178 | * part of a Vectored UPL and if empty is TRUE, then | |
8179 | * we should go ahead and deallocate this UPL element. | |
8180 | * Then we check if this was the last of the UPL elements | |
8181 | * within that Vectored UPL. If so, set empty to TRUE | |
8182 | * so that in ubc_upl_abort_range or ubc_upl_abort, we | |
8183 | * can go ahead and deallocate the Vector UPL too. | |
8184 | */ | |
8185 | if (*empty == TRUE) { | |
8186 | *empty = vector_upl_set_subupl(vector_upl, upl, 0); | |
b0d623f7 A |
8187 | upl_deallocate(upl); |
8188 | } | |
8189 | goto process_upl_to_abort; | |
8190 | } | |
55e303ae | 8191 | |
1c79356b A |
8192 | return KERN_SUCCESS; |
8193 | } | |
8194 | ||
2d21ac55 | 8195 | |
1c79356b | 8196 | kern_return_t |
0b4e3aa0 | 8197 | upl_abort( |
0a7de745 A |
8198 | upl_t upl, |
8199 | int error) | |
2d21ac55 | 8200 | { |
0a7de745 | 8201 | boolean_t empty; |
2d21ac55 | 8202 | |
0a7de745 | 8203 | if (upl == UPL_NULL) { |
7e41aa88 | 8204 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 8205 | } |
7e41aa88 | 8206 | |
2d21ac55 | 8207 | return upl_abort_range(upl, 0, upl->size, error, &empty); |
1c79356b A |
8208 | } |
8209 | ||
55e303ae | 8210 | |
2d21ac55 A |
8211 | /* an option on commit should be wire */ |
8212 | kern_return_t | |
8213 | upl_commit( | |
0a7de745 A |
8214 | upl_t upl, |
8215 | upl_page_info_t *page_list, | |
8216 | mach_msg_type_number_t count) | |
2d21ac55 | 8217 | { |
0a7de745 | 8218 | boolean_t empty; |
2d21ac55 | 8219 | |
0a7de745 | 8220 | if (upl == UPL_NULL) { |
7e41aa88 | 8221 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 8222 | } |
7e41aa88 | 8223 | |
2d21ac55 A |
8224 | return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty); |
8225 | } | |
8226 | ||
fe8ab488 A |
8227 | |
8228 | void | |
8229 | iopl_valid_data( | |
0a7de745 | 8230 | upl_t upl, |
5ba3f43e | 8231 | vm_tag_t tag) |
fe8ab488 | 8232 | { |
0a7de745 A |
8233 | vm_object_t object; |
8234 | vm_offset_t offset; | |
8235 | vm_page_t m, nxt_page = VM_PAGE_NULL; | |
8236 | upl_size_t size; | |
8237 | int wired_count = 0; | |
fe8ab488 | 8238 | |
0a7de745 | 8239 | if (upl == NULL) { |
fe8ab488 | 8240 | panic("iopl_valid_data: NULL upl"); |
0a7de745 A |
8241 | } |
8242 | if (vector_upl_is_valid(upl)) { | |
fe8ab488 | 8243 | panic("iopl_valid_data: vector upl"); |
0a7de745 A |
8244 | } |
8245 | if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_SHADOWED | UPL_ACCESS_BLOCKED | UPL_IO_WIRE | UPL_INTERNAL)) != UPL_IO_WIRE) { | |
fe8ab488 | 8246 | panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags); |
0a7de745 | 8247 | } |
fe8ab488 A |
8248 | |
8249 | object = upl->map_object; | |
8250 | ||
0a7de745 | 8251 | if (object == kernel_object || object == compressor_object) { |
fe8ab488 | 8252 | panic("iopl_valid_data: object == kernel or compressor"); |
0a7de745 | 8253 | } |
fe8ab488 | 8254 | |
39037602 | 8255 | if (object->purgable == VM_PURGABLE_VOLATILE || |
0a7de745 | 8256 | object->purgable == VM_PURGABLE_EMPTY) { |
39037602 | 8257 | panic("iopl_valid_data: object %p purgable %d", |
0a7de745 A |
8258 | object, object->purgable); |
8259 | } | |
fe8ab488 A |
8260 | |
8261 | size = upl->size; | |
8262 | ||
8263 | vm_object_lock(object); | |
5ba3f43e | 8264 | VM_OBJECT_WIRED_PAGE_UPDATE_START(object); |
fe8ab488 | 8265 | |
0a7de745 | 8266 | if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) { |
39037602 | 8267 | nxt_page = (vm_page_t)vm_page_queue_first(&object->memq); |
0a7de745 | 8268 | } else { |
fe8ab488 | 8269 | offset = 0 + upl->offset - object->paging_offset; |
0a7de745 | 8270 | } |
fe8ab488 A |
8271 | |
8272 | while (size) { | |
fe8ab488 A |
8273 | if (nxt_page != VM_PAGE_NULL) { |
8274 | m = nxt_page; | |
d9a64523 | 8275 | nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq); |
fe8ab488 A |
8276 | } else { |
8277 | m = vm_page_lookup(object, offset); | |
8278 | offset += PAGE_SIZE; | |
8279 | ||
0a7de745 | 8280 | if (m == VM_PAGE_NULL) { |
fe8ab488 | 8281 | panic("iopl_valid_data: missing expected page at offset %lx", (long)offset); |
0a7de745 | 8282 | } |
fe8ab488 | 8283 | } |
d9a64523 | 8284 | if (m->vmp_busy) { |
0a7de745 | 8285 | if (!m->vmp_absent) { |
fe8ab488 | 8286 | panic("iopl_valid_data: busy page w/o absent"); |
0a7de745 | 8287 | } |
fe8ab488 | 8288 | |
0a7de745 | 8289 | if (m->vmp_pageq.next || m->vmp_pageq.prev) { |
fe8ab488 | 8290 | panic("iopl_valid_data: busy+absent page on page queue"); |
0a7de745 | 8291 | } |
d9a64523 | 8292 | if (m->vmp_reusable) { |
39037602 A |
8293 | panic("iopl_valid_data: %p is reusable", m); |
8294 | } | |
fe8ab488 | 8295 | |
d9a64523 A |
8296 | m->vmp_absent = FALSE; |
8297 | m->vmp_dirty = TRUE; | |
8298 | assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q); | |
8299 | assert(m->vmp_wire_count == 0); | |
8300 | m->vmp_wire_count++; | |
8301 | assert(m->vmp_wire_count); | |
8302 | if (m->vmp_wire_count == 1) { | |
8303 | m->vmp_q_state = VM_PAGE_IS_WIRED; | |
39037602 A |
8304 | wired_count++; |
8305 | } else { | |
8306 | panic("iopl_valid_data: %p already wired\n", m); | |
8307 | } | |
d9a64523 | 8308 | |
fe8ab488 A |
8309 | PAGE_WAKEUP_DONE(m); |
8310 | } | |
8311 | size -= PAGE_SIZE; | |
8312 | } | |
8313 | if (wired_count) { | |
5ba3f43e | 8314 | VM_OBJECT_WIRED_PAGE_COUNT(object, wired_count); |
39037602 A |
8315 | assert(object->resident_page_count >= object->wired_page_count); |
8316 | ||
8317 | /* no need to adjust purgeable accounting for this object: */ | |
8318 | assert(object->purgable != VM_PURGABLE_VOLATILE); | |
8319 | assert(object->purgable != VM_PURGABLE_EMPTY); | |
fe8ab488 A |
8320 | |
8321 | vm_page_lockspin_queues(); | |
8322 | vm_page_wire_count += wired_count; | |
8323 | vm_page_unlock_queues(); | |
8324 | } | |
5ba3f43e | 8325 | VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag); |
fe8ab488 A |
8326 | vm_object_unlock(object); |
8327 | } | |
8328 | ||
39037602 | 8329 | |
316670eb A |
8330 | void |
8331 | vm_object_set_pmap_cache_attr( | |
0a7de745 A |
8332 | vm_object_t object, |
8333 | upl_page_info_array_t user_page_list, | |
8334 | unsigned int num_pages, | |
8335 | boolean_t batch_pmap_op) | |
316670eb A |
8336 | { |
8337 | unsigned int cache_attr = 0; | |
8338 | ||
8339 | cache_attr = object->wimg_bits & VM_WIMG_MASK; | |
8340 | assert(user_page_list); | |
8341 | if (cache_attr != VM_WIMG_USE_DEFAULT) { | |
8342 | PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op); | |
8343 | } | |
8344 | } | |
55e303ae | 8345 | |
3e170ce0 | 8346 | |
0a7de745 A |
8347 | boolean_t vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t); |
8348 | kern_return_t vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t, vm_object_offset_t *, int, int*); | |
3e170ce0 A |
8349 | |
8350 | ||
8351 | ||
8352 | boolean_t | |
8353 | vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list, | |
0a7de745 | 8354 | wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag) |
3e170ce0 | 8355 | { |
0a7de745 A |
8356 | vm_page_t dst_page; |
8357 | unsigned int entry; | |
8358 | int page_count; | |
8359 | int delayed_unlock = 0; | |
8360 | boolean_t retval = TRUE; | |
8361 | ppnum_t phys_page; | |
3e170ce0 A |
8362 | |
8363 | vm_object_lock_assert_exclusive(object); | |
8364 | assert(object->purgable != VM_PURGABLE_VOLATILE); | |
8365 | assert(object->purgable != VM_PURGABLE_EMPTY); | |
8366 | assert(object->pager == NULL); | |
8367 | assert(object->copy == NULL); | |
8368 | assert(object->shadow == NULL); | |
8369 | ||
3e170ce0 | 8370 | page_count = object->resident_page_count; |
39037602 | 8371 | dst_page = (vm_page_t)vm_page_queue_first(&object->memq); |
3e170ce0 A |
8372 | |
8373 | vm_page_lock_queues(); | |
8374 | ||
8375 | while (page_count--) { | |
d9a64523 A |
8376 | if (dst_page->vmp_busy || |
8377 | dst_page->vmp_fictitious || | |
8378 | dst_page->vmp_absent || | |
8379 | dst_page->vmp_error || | |
8380 | dst_page->vmp_cleaning || | |
8381 | dst_page->vmp_restart || | |
8382 | dst_page->vmp_laundry) { | |
3e170ce0 A |
8383 | retval = FALSE; |
8384 | goto done; | |
8385 | } | |
d9a64523 | 8386 | if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) { |
3e170ce0 A |
8387 | retval = FALSE; |
8388 | goto done; | |
8389 | } | |
d9a64523 | 8390 | dst_page->vmp_reference = TRUE; |
3e170ce0 A |
8391 | |
8392 | vm_page_wire(dst_page, tag, FALSE); | |
8393 | ||
8394 | if (!(cntrl_flags & UPL_COPYOUT_FROM)) { | |
8395 | SET_PAGE_DIRTY(dst_page, FALSE); | |
8396 | } | |
d9a64523 | 8397 | entry = (unsigned int)(dst_page->vmp_offset / PAGE_SIZE); |
3e170ce0 | 8398 | assert(entry >= 0 && entry < object->resident_page_count); |
cb323159 | 8399 | lite_list[entry >> 5] |= 1U << (entry & 31); |
d9a64523 | 8400 | |
39037602 A |
8401 | phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); |
8402 | ||
0a7de745 | 8403 | if (phys_page > upl->highest_page) { |
39037602 | 8404 | upl->highest_page = phys_page; |
0a7de745 | 8405 | } |
3e170ce0 A |
8406 | |
8407 | if (user_page_list) { | |
39037602 | 8408 | user_page_list[entry].phys_addr = phys_page; |
d9a64523 A |
8409 | user_page_list[entry].absent = dst_page->vmp_absent; |
8410 | user_page_list[entry].dirty = dst_page->vmp_dirty; | |
8411 | user_page_list[entry].free_when_done = dst_page->vmp_free_when_done; | |
8412 | user_page_list[entry].precious = dst_page->vmp_precious; | |
3e170ce0 A |
8413 | user_page_list[entry].device = FALSE; |
8414 | user_page_list[entry].speculative = FALSE; | |
8415 | user_page_list[entry].cs_validated = FALSE; | |
8416 | user_page_list[entry].cs_tainted = FALSE; | |
0a7de745 | 8417 | user_page_list[entry].cs_nx = FALSE; |
3e170ce0 A |
8418 | user_page_list[entry].needed = FALSE; |
8419 | user_page_list[entry].mark = FALSE; | |
8420 | } | |
8421 | if (delayed_unlock++ > 256) { | |
8422 | delayed_unlock = 0; | |
8423 | lck_mtx_yield(&vm_page_queue_lock); | |
8424 | ||
8425 | VM_CHECK_MEMORYSTATUS; | |
8426 | } | |
d9a64523 | 8427 | dst_page = (vm_page_t)vm_page_queue_next(&dst_page->vmp_listq); |
3e170ce0 A |
8428 | } |
8429 | done: | |
8430 | vm_page_unlock_queues(); | |
8431 | ||
8432 | VM_CHECK_MEMORYSTATUS; | |
8433 | ||
0a7de745 | 8434 | return retval; |
3e170ce0 A |
8435 | } |
8436 | ||
8437 | ||
8438 | kern_return_t | |
8439 | vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list, | |
0a7de745 A |
8440 | wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag, vm_object_offset_t *dst_offset, |
8441 | int page_count, int* page_grab_count) | |
3e170ce0 | 8442 | { |
0a7de745 A |
8443 | vm_page_t dst_page; |
8444 | boolean_t no_zero_fill = FALSE; | |
8445 | int interruptible; | |
8446 | int pages_wired = 0; | |
8447 | int pages_inserted = 0; | |
8448 | int entry = 0; | |
8449 | uint64_t delayed_ledger_update = 0; | |
8450 | kern_return_t ret = KERN_SUCCESS; | |
8451 | int grab_options; | |
8452 | ppnum_t phys_page; | |
3e170ce0 A |
8453 | |
8454 | vm_object_lock_assert_exclusive(object); | |
8455 | assert(object->purgable != VM_PURGABLE_VOLATILE); | |
8456 | assert(object->purgable != VM_PURGABLE_EMPTY); | |
8457 | assert(object->pager == NULL); | |
8458 | assert(object->copy == NULL); | |
8459 | assert(object->shadow == NULL); | |
8460 | ||
0a7de745 | 8461 | if (cntrl_flags & UPL_SET_INTERRUPTIBLE) { |
3e170ce0 | 8462 | interruptible = THREAD_ABORTSAFE; |
0a7de745 | 8463 | } else { |
3e170ce0 | 8464 | interruptible = THREAD_UNINT; |
0a7de745 | 8465 | } |
3e170ce0 | 8466 | |
0a7de745 A |
8467 | if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) { |
8468 | no_zero_fill = TRUE; | |
8469 | } | |
3e170ce0 | 8470 | |
39037602 A |
8471 | grab_options = 0; |
8472 | #if CONFIG_SECLUDED_MEMORY | |
8473 | if (object->can_grab_secluded) { | |
8474 | grab_options |= VM_PAGE_GRAB_SECLUDED; | |
8475 | } | |
8476 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
8477 | ||
3e170ce0 | 8478 | while (page_count--) { |
39037602 | 8479 | while ((dst_page = vm_page_grab_options(grab_options)) |
0a7de745 | 8480 | == VM_PAGE_NULL) { |
3e170ce0 A |
8481 | OSAddAtomic(page_count, &vm_upl_wait_for_pages); |
8482 | ||
8483 | VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0); | |
8484 | ||
8485 | if (vm_page_wait(interruptible) == FALSE) { | |
8486 | /* | |
8487 | * interrupted case | |
8488 | */ | |
8489 | OSAddAtomic(-page_count, &vm_upl_wait_for_pages); | |
8490 | ||
8491 | VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1); | |
d9a64523 | 8492 | |
3e170ce0 A |
8493 | ret = MACH_SEND_INTERRUPTED; |
8494 | goto done; | |
8495 | } | |
8496 | OSAddAtomic(-page_count, &vm_upl_wait_for_pages); | |
8497 | ||
8498 | VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0); | |
8499 | } | |
0a7de745 | 8500 | if (no_zero_fill == FALSE) { |
3e170ce0 | 8501 | vm_page_zero_fill(dst_page); |
0a7de745 | 8502 | } else { |
d9a64523 | 8503 | dst_page->vmp_absent = TRUE; |
0a7de745 | 8504 | } |
3e170ce0 | 8505 | |
d9a64523 | 8506 | dst_page->vmp_reference = TRUE; |
3e170ce0 A |
8507 | |
8508 | if (!(cntrl_flags & UPL_COPYOUT_FROM)) { | |
d9a64523 A |
8509 | SET_PAGE_DIRTY(dst_page, FALSE); |
8510 | } | |
8511 | if (dst_page->vmp_absent == FALSE) { | |
8512 | assert(dst_page->vmp_q_state == VM_PAGE_NOT_ON_Q); | |
8513 | assert(dst_page->vmp_wire_count == 0); | |
8514 | dst_page->vmp_wire_count++; | |
8515 | dst_page->vmp_q_state = VM_PAGE_IS_WIRED; | |
8516 | assert(dst_page->vmp_wire_count); | |
3e170ce0 A |
8517 | pages_wired++; |
8518 | PAGE_WAKEUP_DONE(dst_page); | |
8519 | } | |
8520 | pages_inserted++; | |
8521 | ||
8522 | vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update); | |
8523 | ||
cb323159 | 8524 | lite_list[entry >> 5] |= 1U << (entry & 31); |
d9a64523 | 8525 | |
39037602 A |
8526 | phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); |
8527 | ||
0a7de745 | 8528 | if (phys_page > upl->highest_page) { |
39037602 | 8529 | upl->highest_page = phys_page; |
0a7de745 | 8530 | } |
3e170ce0 A |
8531 | |
8532 | if (user_page_list) { | |
0a7de745 A |
8533 | user_page_list[entry].phys_addr = phys_page; |
8534 | user_page_list[entry].absent = dst_page->vmp_absent; | |
8535 | user_page_list[entry].dirty = dst_page->vmp_dirty; | |
8536 | user_page_list[entry].free_when_done = FALSE; | |
8537 | user_page_list[entry].precious = FALSE; | |
8538 | user_page_list[entry].device = FALSE; | |
3e170ce0 A |
8539 | user_page_list[entry].speculative = FALSE; |
8540 | user_page_list[entry].cs_validated = FALSE; | |
8541 | user_page_list[entry].cs_tainted = FALSE; | |
8542 | user_page_list[entry].cs_nx = FALSE; | |
8543 | user_page_list[entry].needed = FALSE; | |
8544 | user_page_list[entry].mark = FALSE; | |
8545 | } | |
8546 | entry++; | |
8547 | *dst_offset += PAGE_SIZE_64; | |
8548 | } | |
8549 | done: | |
8550 | if (pages_wired) { | |
8551 | vm_page_lockspin_queues(); | |
8552 | vm_page_wire_count += pages_wired; | |
8553 | vm_page_unlock_queues(); | |
8554 | } | |
8555 | if (pages_inserted) { | |
8556 | if (object->internal) { | |
8557 | OSAddAtomic(pages_inserted, &vm_page_internal_count); | |
8558 | } else { | |
8559 | OSAddAtomic(pages_inserted, &vm_page_external_count); | |
8560 | } | |
8561 | } | |
8562 | if (delayed_ledger_update) { | |
0a7de745 A |
8563 | task_t owner; |
8564 | int ledger_idx_volatile; | |
8565 | int ledger_idx_nonvolatile; | |
8566 | int ledger_idx_volatile_compressed; | |
8567 | int ledger_idx_nonvolatile_compressed; | |
8568 | boolean_t do_footprint; | |
3e170ce0 | 8569 | |
d9a64523 | 8570 | owner = VM_OBJECT_OWNER(object); |
3e170ce0 A |
8571 | assert(owner); |
8572 | ||
d9a64523 | 8573 | vm_object_ledger_tag_ledgers(object, |
0a7de745 A |
8574 | &ledger_idx_volatile, |
8575 | &ledger_idx_nonvolatile, | |
8576 | &ledger_idx_volatile_compressed, | |
8577 | &ledger_idx_nonvolatile_compressed, | |
8578 | &do_footprint); | |
d9a64523 | 8579 | |
3e170ce0 A |
8580 | /* more non-volatile bytes */ |
8581 | ledger_credit(owner->ledger, | |
0a7de745 A |
8582 | ledger_idx_nonvolatile, |
8583 | delayed_ledger_update); | |
d9a64523 A |
8584 | if (do_footprint) { |
8585 | /* more footprint */ | |
8586 | ledger_credit(owner->ledger, | |
0a7de745 A |
8587 | task_ledgers.phys_footprint, |
8588 | delayed_ledger_update); | |
d9a64523 | 8589 | } |
3e170ce0 | 8590 | } |
d9a64523 A |
8591 | |
8592 | assert(page_grab_count); | |
8593 | *page_grab_count = pages_inserted; | |
8594 | ||
0a7de745 | 8595 | return ret; |
3e170ce0 A |
8596 | } |
8597 | ||
8598 | ||
3e170ce0 | 8599 | |
55e303ae A |
8600 | kern_return_t |
8601 | vm_object_iopl_request( | |
0a7de745 A |
8602 | vm_object_t object, |
8603 | vm_object_offset_t offset, | |
8604 | upl_size_t size, | |
8605 | upl_t *upl_ptr, | |
8606 | upl_page_info_array_t user_page_list, | |
8607 | unsigned int *page_list_count, | |
8608 | upl_control_flags_t cntrl_flags, | |
8609 | vm_tag_t tag) | |
55e303ae | 8610 | { |
0a7de745 A |
8611 | vm_page_t dst_page; |
8612 | vm_object_offset_t dst_offset; | |
8613 | upl_size_t xfer_size; | |
8614 | upl_t upl = NULL; | |
8615 | unsigned int entry; | |
8616 | wpl_array_t lite_list = NULL; | |
8617 | int no_zero_fill = FALSE; | |
8618 | unsigned int size_in_pages; | |
8619 | int page_grab_count = 0; | |
8620 | u_int32_t psize; | |
8621 | kern_return_t ret; | |
8622 | vm_prot_t prot; | |
d9a64523 | 8623 | struct vm_object_fault_info fault_info = {}; |
0a7de745 A |
8624 | struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; |
8625 | struct vm_page_delayed_work *dwp; | |
8626 | int dw_count; | |
8627 | int dw_limit; | |
8628 | int dw_index; | |
8629 | boolean_t caller_lookup; | |
8630 | int io_tracking_flag = 0; | |
8631 | int interruptible; | |
8632 | ppnum_t phys_page; | |
8633 | ||
8634 | boolean_t set_cache_attr_needed = FALSE; | |
8635 | boolean_t free_wired_pages = FALSE; | |
8636 | boolean_t fast_path_empty_req = FALSE; | |
8637 | boolean_t fast_path_full_req = FALSE; | |
8638 | ||
8639 | #if DEVELOPMENT || DEBUG | |
8640 | task_t task = current_task(); | |
8641 | #endif /* DEVELOPMENT || DEBUG */ | |
55e303ae | 8642 | |
91447636 A |
8643 | if (cntrl_flags & ~UPL_VALID_FLAGS) { |
8644 | /* | |
8645 | * For forward compatibility's sake, | |
8646 | * reject any unknown flag. | |
8647 | */ | |
8648 | return KERN_INVALID_VALUE; | |
8649 | } | |
0a7de745 A |
8650 | if (vm_lopage_needed == FALSE) { |
8651 | cntrl_flags &= ~UPL_NEED_32BIT_ADDR; | |
8652 | } | |
0c530ab8 A |
8653 | |
8654 | if (cntrl_flags & UPL_NEED_32BIT_ADDR) { | |
0a7de745 A |
8655 | if ((cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE)) { |
8656 | return KERN_INVALID_VALUE; | |
8657 | } | |
0c530ab8 A |
8658 | |
8659 | if (object->phys_contiguous) { | |
0a7de745 A |
8660 | if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address) { |
8661 | return KERN_INVALID_ADDRESS; | |
8662 | } | |
d9a64523 | 8663 | |
0a7de745 A |
8664 | if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address) { |
8665 | return KERN_INVALID_ADDRESS; | |
8666 | } | |
0c530ab8 A |
8667 | } |
8668 | } | |
0a7de745 A |
8669 | if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) { |
8670 | no_zero_fill = TRUE; | |
8671 | } | |
91447636 | 8672 | |
0a7de745 | 8673 | if (cntrl_flags & UPL_COPYOUT_FROM) { |
55e303ae | 8674 | prot = VM_PROT_READ; |
0a7de745 | 8675 | } else { |
55e303ae | 8676 | prot = VM_PROT_READ | VM_PROT_WRITE; |
0a7de745 | 8677 | } |
55e303ae | 8678 | |
0a7de745 | 8679 | if ((!object->internal) && (object->paging_offset != 0)) { |
2d21ac55 | 8680 | panic("vm_object_iopl_request: external object with non-zero paging offset\n"); |
0a7de745 | 8681 | } |
2d21ac55 | 8682 | |
d9a64523 A |
8683 | VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, prot, 0); |
8684 | ||
fe8ab488 | 8685 | #if CONFIG_IOSCHED || UPL_DEBUG |
0a7de745 | 8686 | if ((object->io_tracking && object != kernel_object) || upl_debug_enabled) { |
fe8ab488 | 8687 | io_tracking_flag |= UPL_CREATE_IO_TRACKING; |
0a7de745 | 8688 | } |
fe8ab488 A |
8689 | #endif |
8690 | ||
8691 | #if CONFIG_IOSCHED | |
8692 | if (object->io_tracking) { | |
8693 | /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */ | |
0a7de745 | 8694 | if (object != kernel_object) { |
fe8ab488 | 8695 | io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP; |
0a7de745 | 8696 | } |
fe8ab488 A |
8697 | } |
8698 | #endif | |
2d21ac55 | 8699 | |
0a7de745 A |
8700 | if (object->phys_contiguous) { |
8701 | psize = PAGE_SIZE; | |
8702 | } else { | |
8703 | psize = size; | |
8704 | } | |
2d21ac55 A |
8705 | |
8706 | if (cntrl_flags & UPL_SET_INTERNAL) { | |
0a7de745 | 8707 | upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize); |
2d21ac55 A |
8708 | |
8709 | user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); | |
8710 | lite_list = (wpl_array_t) (((uintptr_t)user_page_list) + | |
0a7de745 | 8711 | ((psize / PAGE_SIZE) * sizeof(upl_page_info_t))); |
b0d623f7 A |
8712 | if (size == 0) { |
8713 | user_page_list = NULL; | |
8714 | lite_list = NULL; | |
8715 | } | |
2d21ac55 | 8716 | } else { |
0a7de745 | 8717 | upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize); |
55e303ae | 8718 | |
2d21ac55 | 8719 | lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl)); |
b0d623f7 A |
8720 | if (size == 0) { |
8721 | lite_list = NULL; | |
8722 | } | |
55e303ae | 8723 | } |
0a7de745 A |
8724 | if (user_page_list) { |
8725 | user_page_list[0].device = FALSE; | |
8726 | } | |
2d21ac55 | 8727 | *upl_ptr = upl; |
55e303ae | 8728 | |
d9a64523 A |
8729 | if (cntrl_flags & UPL_NOZEROFILLIO) { |
8730 | DTRACE_VM4(upl_nozerofillio, | |
0a7de745 A |
8731 | vm_object_t, object, |
8732 | vm_object_offset_t, offset, | |
8733 | upl_size_t, size, | |
8734 | upl_t, upl); | |
d9a64523 A |
8735 | } |
8736 | ||
2d21ac55 A |
8737 | upl->map_object = object; |
8738 | upl->size = size; | |
8739 | ||
6d2010ae A |
8740 | size_in_pages = size / PAGE_SIZE; |
8741 | ||
b0d623f7 A |
8742 | if (object == kernel_object && |
8743 | !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) { | |
8744 | upl->flags |= UPL_KERNEL_OBJECT; | |
8745 | #if UPL_DEBUG | |
8746 | vm_object_lock(object); | |
8747 | #else | |
8748 | vm_object_lock_shared(object); | |
8749 | #endif | |
8750 | } else { | |
8751 | vm_object_lock(object); | |
8752 | vm_object_activity_begin(object); | |
8753 | } | |
2d21ac55 A |
8754 | /* |
8755 | * paging in progress also protects the paging_offset | |
8756 | */ | |
8757 | upl->offset = offset + object->paging_offset; | |
55e303ae | 8758 | |
b0d623f7 A |
8759 | if (cntrl_flags & UPL_BLOCK_ACCESS) { |
8760 | /* | |
316670eb | 8761 | * The user requested that access to the pages in this UPL |
b0d623f7 A |
8762 | * be blocked until the UPL is commited or aborted. |
8763 | */ | |
8764 | upl->flags |= UPL_ACCESS_BLOCKED; | |
8765 | } | |
8766 | ||
fe8ab488 A |
8767 | #if CONFIG_IOSCHED || UPL_DEBUG |
8768 | if (upl->flags & UPL_TRACKED_BY_OBJECT) { | |
316670eb | 8769 | vm_object_activity_begin(object); |
2d21ac55 | 8770 | queue_enter(&object->uplq, upl, upl_t, uplq); |
fe8ab488 A |
8771 | } |
8772 | #endif | |
8773 | ||
8774 | if (object->phys_contiguous) { | |
b0d623f7 A |
8775 | if (upl->flags & UPL_ACCESS_BLOCKED) { |
8776 | assert(!object->blocked_access); | |
8777 | object->blocked_access = TRUE; | |
8778 | } | |
8779 | ||
2d21ac55 | 8780 | vm_object_unlock(object); |
55e303ae | 8781 | |
2d21ac55 A |
8782 | /* |
8783 | * don't need any shadow mappings for this one | |
8784 | * since it is already I/O memory | |
8785 | */ | |
8786 | upl->flags |= UPL_DEVICE_MEMORY; | |
55e303ae | 8787 | |
0a7de745 | 8788 | upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1) >> PAGE_SHIFT); |
2d21ac55 A |
8789 | |
8790 | if (user_page_list) { | |
0a7de745 | 8791 | user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset) >> PAGE_SHIFT); |
2d21ac55 | 8792 | user_page_list[0].device = TRUE; |
55e303ae | 8793 | } |
2d21ac55 | 8794 | if (page_list_count != NULL) { |
0a7de745 A |
8795 | if (upl->flags & UPL_INTERNAL) { |
8796 | *page_list_count = 0; | |
8797 | } else { | |
8798 | *page_list_count = 1; | |
8799 | } | |
55e303ae | 8800 | } |
d9a64523 A |
8801 | |
8802 | VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0); | |
0a7de745 A |
8803 | #if DEVELOPMENT || DEBUG |
8804 | if (task != NULL) { | |
8805 | ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count); | |
8806 | } | |
8807 | #endif /* DEVELOPMENT || DEBUG */ | |
2d21ac55 | 8808 | return KERN_SUCCESS; |
55e303ae | 8809 | } |
39236c6e | 8810 | if (object != kernel_object && object != compressor_object) { |
b0d623f7 A |
8811 | /* |
8812 | * Protect user space from future COW operations | |
8813 | */ | |
fe8ab488 A |
8814 | #if VM_OBJECT_TRACKING_OP_TRUESHARE |
8815 | if (!object->true_share && | |
8816 | vm_object_tracking_inited) { | |
8817 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; | |
8818 | int num = 0; | |
8819 | ||
8820 | num = OSBacktrace(bt, | |
0a7de745 | 8821 | VM_OBJECT_TRACKING_BTDEPTH); |
fe8ab488 | 8822 | btlog_add_entry(vm_object_tracking_btlog, |
0a7de745 A |
8823 | object, |
8824 | VM_OBJECT_TRACKING_OP_TRUESHARE, | |
8825 | bt, | |
8826 | num); | |
fe8ab488 A |
8827 | } |
8828 | #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ | |
8829 | ||
39037602 | 8830 | vm_object_lock_assert_exclusive(object); |
b0d623f7 | 8831 | object->true_share = TRUE; |
55e303ae | 8832 | |
0a7de745 | 8833 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { |
b0d623f7 | 8834 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; |
0a7de745 | 8835 | } |
b0d623f7 | 8836 | } |
91447636 | 8837 | |
b0d623f7 A |
8838 | if (!(cntrl_flags & UPL_COPYOUT_FROM) && |
8839 | object->copy != VM_OBJECT_NULL) { | |
91447636 | 8840 | /* |
b0d623f7 A |
8841 | * Honor copy-on-write obligations |
8842 | * | |
8843 | * The caller is gathering these pages and | |
8844 | * might modify their contents. We need to | |
8845 | * make sure that the copy object has its own | |
8846 | * private copies of these pages before we let | |
8847 | * the caller modify them. | |
8848 | * | |
8849 | * NOTE: someone else could map the original object | |
8850 | * after we've done this copy-on-write here, and they | |
8851 | * could then see an inconsistent picture of the memory | |
8852 | * while it's being modified via the UPL. To prevent this, | |
8853 | * we would have to block access to these pages until the | |
8854 | * UPL is released. We could use the UPL_BLOCK_ACCESS | |
8855 | * code path for that... | |
91447636 | 8856 | */ |
b0d623f7 | 8857 | vm_object_update(object, |
0a7de745 A |
8858 | offset, |
8859 | size, | |
8860 | NULL, | |
8861 | NULL, | |
8862 | FALSE, /* should_return */ | |
8863 | MEMORY_OBJECT_COPY_SYNC, | |
8864 | VM_PROT_NO_CHANGE); | |
d9a64523 A |
8865 | VM_PAGEOUT_DEBUG(iopl_cow, 1); |
8866 | VM_PAGEOUT_DEBUG(iopl_cow_pages, (size >> PAGE_SHIFT)); | |
55e303ae | 8867 | } |
3e170ce0 A |
8868 | if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) && |
8869 | object->purgable != VM_PURGABLE_VOLATILE && | |
8870 | object->purgable != VM_PURGABLE_EMPTY && | |
8871 | object->copy == NULL && | |
8872 | size == object->vo_size && | |
8873 | offset == 0 && | |
8874 | object->shadow == NULL && | |
0a7de745 A |
8875 | object->pager == NULL) { |
8876 | if (object->resident_page_count == size_in_pages) { | |
3e170ce0 A |
8877 | assert(object != compressor_object); |
8878 | assert(object != kernel_object); | |
8879 | fast_path_full_req = TRUE; | |
0a7de745 | 8880 | } else if (object->resident_page_count == 0) { |
3e170ce0 A |
8881 | assert(object != compressor_object); |
8882 | assert(object != kernel_object); | |
8883 | fast_path_empty_req = TRUE; | |
8884 | set_cache_attr_needed = TRUE; | |
8885 | } | |
8886 | } | |
8887 | ||
0a7de745 | 8888 | if (cntrl_flags & UPL_SET_INTERRUPTIBLE) { |
fe8ab488 | 8889 | interruptible = THREAD_ABORTSAFE; |
0a7de745 | 8890 | } else { |
fe8ab488 | 8891 | interruptible = THREAD_UNINT; |
0a7de745 | 8892 | } |
b0d623f7 | 8893 | |
55e303ae | 8894 | entry = 0; |
2d21ac55 A |
8895 | |
8896 | xfer_size = size; | |
8897 | dst_offset = offset; | |
fe8ab488 A |
8898 | dw_count = 0; |
8899 | ||
3e170ce0 | 8900 | if (fast_path_full_req) { |
0a7de745 | 8901 | if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE) { |
3e170ce0 | 8902 | goto finish; |
0a7de745 | 8903 | } |
3e170ce0 A |
8904 | /* |
8905 | * we couldn't complete the processing of this request on the fast path | |
8906 | * so fall through to the slow path and finish up | |
8907 | */ | |
3e170ce0 | 8908 | } else if (fast_path_empty_req) { |
3e170ce0 A |
8909 | if (cntrl_flags & UPL_REQUEST_NO_FAULT) { |
8910 | ret = KERN_MEMORY_ERROR; | |
8911 | goto return_err; | |
fe8ab488 | 8912 | } |
d9a64523 A |
8913 | ret = vm_object_iopl_wire_empty(object, upl, user_page_list, lite_list, cntrl_flags, tag, &dst_offset, size_in_pages, &page_grab_count); |
8914 | ||
3e170ce0 A |
8915 | if (ret) { |
8916 | free_wired_pages = TRUE; | |
8917 | goto return_err; | |
fe8ab488 A |
8918 | } |
8919 | goto finish; | |
8920 | } | |
2d21ac55 A |
8921 | |
8922 | fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; | |
2d21ac55 A |
8923 | fault_info.lo_offset = offset; |
8924 | fault_info.hi_offset = offset + xfer_size; | |
fe8ab488 A |
8925 | fault_info.mark_zf_absent = TRUE; |
8926 | fault_info.interruptible = interruptible; | |
8927 | fault_info.batch_pmap_op = TRUE; | |
b0d623f7 A |
8928 | |
8929 | dwp = &dw_array[0]; | |
6d2010ae | 8930 | dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); |
2d21ac55 | 8931 | |
55e303ae | 8932 | while (xfer_size) { |
0a7de745 | 8933 | vm_fault_return_t result; |
b0d623f7 A |
8934 | |
8935 | dwp->dw_mask = 0; | |
2d21ac55 | 8936 | |
3e170ce0 A |
8937 | if (fast_path_full_req) { |
8938 | /* | |
8939 | * if we get here, it means that we ran into a page | |
8940 | * state we couldn't handle in the fast path and | |
8941 | * bailed out to the slow path... since the order | |
8942 | * we look at pages is different between the 2 paths, | |
8943 | * the following check is needed to determine whether | |
8944 | * this page was already processed in the fast path | |
8945 | */ | |
0a7de745 | 8946 | if (lite_list[entry >> 5] & (1 << (entry & 31))) { |
3e170ce0 | 8947 | goto skip_page; |
0a7de745 | 8948 | } |
3e170ce0 | 8949 | } |
55e303ae A |
8950 | dst_page = vm_page_lookup(object, dst_offset); |
8951 | ||
b0d623f7 | 8952 | if (dst_page == VM_PAGE_NULL || |
d9a64523 A |
8953 | dst_page->vmp_busy || |
8954 | dst_page->vmp_error || | |
8955 | dst_page->vmp_restart || | |
8956 | dst_page->vmp_absent || | |
8957 | dst_page->vmp_fictitious) { | |
0a7de745 A |
8958 | if (object == kernel_object) { |
8959 | panic("vm_object_iopl_request: missing/bad page in kernel object\n"); | |
8960 | } | |
8961 | if (object == compressor_object) { | |
8962 | panic("vm_object_iopl_request: missing/bad page in compressor object\n"); | |
8963 | } | |
b0d623f7 | 8964 | |
0a7de745 A |
8965 | if (cntrl_flags & UPL_REQUEST_NO_FAULT) { |
8966 | ret = KERN_MEMORY_ERROR; | |
8967 | goto return_err; | |
8968 | } | |
8969 | set_cache_attr_needed = TRUE; | |
2d21ac55 | 8970 | |
0a7de745 A |
8971 | /* |
8972 | * We just looked up the page and the result remains valid | |
8973 | * until the object lock is release, so send it to | |
8974 | * vm_fault_page() (as "dst_page"), to avoid having to | |
8975 | * look it up again there. | |
8976 | */ | |
8977 | caller_lookup = TRUE; | |
2d21ac55 | 8978 | |
0a7de745 A |
8979 | do { |
8980 | vm_page_t top_page; | |
8981 | kern_return_t error_code; | |
55e303ae | 8982 | |
0a7de745 | 8983 | fault_info.cluster_size = xfer_size; |
b0d623f7 | 8984 | |
0a7de745 | 8985 | vm_object_paging_begin(object); |
2d21ac55 | 8986 | |
0a7de745 A |
8987 | result = vm_fault_page(object, dst_offset, |
8988 | prot | VM_PROT_WRITE, FALSE, | |
8989 | caller_lookup, | |
8990 | &prot, &dst_page, &top_page, | |
8991 | (int *)0, | |
8992 | &error_code, no_zero_fill, | |
8993 | FALSE, &fault_info); | |
39236c6e | 8994 | |
0a7de745 A |
8995 | /* our lookup is no longer valid at this point */ |
8996 | caller_lookup = FALSE; | |
2d21ac55 | 8997 | |
0a7de745 A |
8998 | switch (result) { |
8999 | case VM_FAULT_SUCCESS: | |
9000 | page_grab_count++; | |
55e303ae | 9001 | |
0a7de745 A |
9002 | if (!dst_page->vmp_absent) { |
9003 | PAGE_WAKEUP_DONE(dst_page); | |
9004 | } else { | |
9005 | /* | |
9006 | * we only get back an absent page if we | |
9007 | * requested that it not be zero-filled | |
9008 | * because we are about to fill it via I/O | |
9009 | * | |
9010 | * absent pages should be left BUSY | |
9011 | * to prevent them from being faulted | |
9012 | * into an address space before we've | |
9013 | * had a chance to complete the I/O on | |
9014 | * them since they may contain info that | |
9015 | * shouldn't be seen by the faulting task | |
9016 | */ | |
9017 | } | |
d41d1dae | 9018 | /* |
0a7de745 A |
9019 | * Release paging references and |
9020 | * top-level placeholder page, if any. | |
d41d1dae | 9021 | */ |
0a7de745 A |
9022 | if (top_page != VM_PAGE_NULL) { |
9023 | vm_object_t local_object; | |
2d21ac55 | 9024 | |
0a7de745 | 9025 | local_object = VM_PAGE_OBJECT(top_page); |
d9a64523 | 9026 | |
0a7de745 A |
9027 | /* |
9028 | * comparing 2 packed pointers | |
9029 | */ | |
9030 | if (top_page->vmp_object != dst_page->vmp_object) { | |
9031 | vm_object_lock(local_object); | |
9032 | VM_PAGE_FREE(top_page); | |
9033 | vm_object_paging_end(local_object); | |
9034 | vm_object_unlock(local_object); | |
9035 | } else { | |
9036 | VM_PAGE_FREE(top_page); | |
9037 | vm_object_paging_end(local_object); | |
9038 | } | |
55e303ae | 9039 | } |
0a7de745 A |
9040 | vm_object_paging_end(object); |
9041 | break; | |
d9a64523 | 9042 | |
0a7de745 A |
9043 | case VM_FAULT_RETRY: |
9044 | vm_object_lock(object); | |
9045 | break; | |
55e303ae | 9046 | |
0a7de745 A |
9047 | case VM_FAULT_MEMORY_SHORTAGE: |
9048 | OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages); | |
2d21ac55 | 9049 | |
0a7de745 | 9050 | VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0); |
55e303ae | 9051 | |
0a7de745 A |
9052 | if (vm_page_wait(interruptible)) { |
9053 | OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages); | |
6d2010ae | 9054 | |
0a7de745 A |
9055 | VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0); |
9056 | vm_object_lock(object); | |
6d2010ae | 9057 | |
0a7de745 A |
9058 | break; |
9059 | } | |
9060 | OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages); | |
6d2010ae | 9061 | |
0a7de745 | 9062 | VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1); |
6d2010ae | 9063 | |
55e303ae A |
9064 | /* fall thru */ |
9065 | ||
0a7de745 A |
9066 | case VM_FAULT_INTERRUPTED: |
9067 | error_code = MACH_SEND_INTERRUPTED; | |
9068 | case VM_FAULT_MEMORY_ERROR: | |
9069 | memory_error: | |
9070 | ret = (error_code ? error_code: KERN_MEMORY_ERROR); | |
0c530ab8 | 9071 | |
0a7de745 A |
9072 | vm_object_lock(object); |
9073 | goto return_err; | |
b0d623f7 | 9074 | |
0a7de745 A |
9075 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
9076 | /* success but no page: fail */ | |
9077 | vm_object_paging_end(object); | |
9078 | vm_object_unlock(object); | |
9079 | goto memory_error; | |
b0d623f7 | 9080 | |
0a7de745 A |
9081 | default: |
9082 | panic("vm_object_iopl_request: unexpected error" | |
9083 | " 0x%x from vm_fault_page()\n", result); | |
9084 | } | |
9085 | } while (result != VM_FAULT_SUCCESS); | |
55e303ae | 9086 | } |
39037602 A |
9087 | phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); |
9088 | ||
0a7de745 | 9089 | if (upl->flags & UPL_KERNEL_OBJECT) { |
b0d623f7 | 9090 | goto record_phys_addr; |
0a7de745 | 9091 | } |
b0d623f7 | 9092 | |
d9a64523 A |
9093 | if (dst_page->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) { |
9094 | dst_page->vmp_busy = TRUE; | |
39236c6e A |
9095 | goto record_phys_addr; |
9096 | } | |
9097 | ||
d9a64523 | 9098 | if (dst_page->vmp_cleaning) { |
b0d623f7 | 9099 | /* |
316670eb | 9100 | * Someone else is cleaning this page in place. |
b0d623f7 A |
9101 | * In theory, we should be able to proceed and use this |
9102 | * page but they'll probably end up clearing the "busy" | |
9103 | * bit on it in upl_commit_range() but they didn't set | |
9104 | * it, so they would clear our "busy" bit and open | |
9105 | * us to race conditions. | |
9106 | * We'd better wait for the cleaning to complete and | |
9107 | * then try again. | |
9108 | */ | |
0a7de745 | 9109 | VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning, 1); |
b0d623f7 A |
9110 | PAGE_SLEEP(object, dst_page, THREAD_UNINT); |
9111 | continue; | |
9112 | } | |
0a7de745 | 9113 | if (dst_page->vmp_laundry) { |
316670eb | 9114 | vm_pageout_steal_laundry(dst_page, FALSE); |
0a7de745 | 9115 | } |
39037602 | 9116 | |
0a7de745 A |
9117 | if ((cntrl_flags & UPL_NEED_32BIT_ADDR) && |
9118 | phys_page >= (max_valid_dma_address >> PAGE_SHIFT)) { | |
9119 | vm_page_t low_page; | |
9120 | int refmod; | |
0c530ab8 A |
9121 | |
9122 | /* | |
9123 | * support devices that can't DMA above 32 bits | |
9124 | * by substituting pages from a pool of low address | |
9125 | * memory for any pages we find above the 4G mark | |
9126 | * can't substitute if the page is already wired because | |
9127 | * we don't know whether that physical address has been | |
9128 | * handed out to some other 64 bit capable DMA device to use | |
9129 | */ | |
b0d623f7 | 9130 | if (VM_PAGE_WIRED(dst_page)) { |
0a7de745 | 9131 | ret = KERN_PROTECTION_FAILURE; |
0c530ab8 A |
9132 | goto return_err; |
9133 | } | |
0c530ab8 A |
9134 | low_page = vm_page_grablo(); |
9135 | ||
9136 | if (low_page == VM_PAGE_NULL) { | |
0a7de745 | 9137 | ret = KERN_RESOURCE_SHORTAGE; |
0c530ab8 A |
9138 | goto return_err; |
9139 | } | |
9140 | /* | |
9141 | * from here until the vm_page_replace completes | |
9142 | * we musn't drop the object lock... we don't | |
9143 | * want anyone refaulting this page in and using | |
9144 | * it after we disconnect it... we want the fault | |
9145 | * to find the new page being substituted. | |
9146 | */ | |
0a7de745 A |
9147 | if (dst_page->vmp_pmapped) { |
9148 | refmod = pmap_disconnect(phys_page); | |
9149 | } else { | |
9150 | refmod = 0; | |
9151 | } | |
d41d1dae | 9152 | |
0a7de745 | 9153 | if (!dst_page->vmp_absent) { |
d41d1dae | 9154 | vm_page_copy(dst_page, low_page); |
0a7de745 | 9155 | } |
d9a64523 A |
9156 | |
9157 | low_page->vmp_reference = dst_page->vmp_reference; | |
9158 | low_page->vmp_dirty = dst_page->vmp_dirty; | |
9159 | low_page->vmp_absent = dst_page->vmp_absent; | |
0c530ab8 | 9160 | |
0a7de745 A |
9161 | if (refmod & VM_MEM_REFERENCED) { |
9162 | low_page->vmp_reference = TRUE; | |
9163 | } | |
316670eb | 9164 | if (refmod & VM_MEM_MODIFIED) { |
0a7de745 | 9165 | SET_PAGE_DIRTY(low_page, FALSE); |
316670eb | 9166 | } |
0c530ab8 | 9167 | |
0c530ab8 | 9168 | vm_page_replace(low_page, object, dst_offset); |
0c530ab8 A |
9169 | |
9170 | dst_page = low_page; | |
9171 | /* | |
9172 | * vm_page_grablo returned the page marked | |
9173 | * BUSY... we don't need a PAGE_WAKEUP_DONE | |
9174 | * here, because we've never dropped the object lock | |
9175 | */ | |
0a7de745 | 9176 | if (!dst_page->vmp_absent) { |
d9a64523 | 9177 | dst_page->vmp_busy = FALSE; |
0a7de745 | 9178 | } |
39037602 A |
9179 | |
9180 | phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); | |
0c530ab8 | 9181 | } |
0a7de745 | 9182 | if (!dst_page->vmp_busy) { |
d41d1dae | 9183 | dwp->dw_mask |= DW_vm_page_wire; |
0a7de745 | 9184 | } |
55e303ae | 9185 | |
91447636 A |
9186 | if (cntrl_flags & UPL_BLOCK_ACCESS) { |
9187 | /* | |
9188 | * Mark the page "busy" to block any future page fault | |
6d2010ae A |
9189 | * on this page in addition to wiring it. |
9190 | * We'll also remove the mapping | |
91447636 A |
9191 | * of all these pages before leaving this routine. |
9192 | */ | |
d9a64523 A |
9193 | assert(!dst_page->vmp_fictitious); |
9194 | dst_page->vmp_busy = TRUE; | |
91447636 | 9195 | } |
2d21ac55 A |
9196 | /* |
9197 | * expect the page to be used | |
9198 | * page queues lock must be held to set 'reference' | |
9199 | */ | |
b0d623f7 | 9200 | dwp->dw_mask |= DW_set_reference; |
55e303ae | 9201 | |
0a7de745 | 9202 | if (!(cntrl_flags & UPL_COPYOUT_FROM)) { |
d9a64523 | 9203 | SET_PAGE_DIRTY(dst_page, TRUE); |
cb323159 A |
9204 | /* |
9205 | * Page belonging to a code-signed object is about to | |
9206 | * be written. Mark it tainted and disconnect it from | |
9207 | * all pmaps so processes have to fault it back in and | |
9208 | * deal with the tainted bit. | |
9209 | */ | |
9210 | if (object->code_signed && dst_page->vmp_cs_tainted == FALSE) { | |
9211 | dst_page->vmp_cs_tainted = TRUE; | |
9212 | vm_page_iopl_tainted++; | |
9213 | if (dst_page->vmp_pmapped) { | |
9214 | int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); | |
9215 | if (refmod & VM_MEM_REFERENCED) { | |
9216 | dst_page->vmp_reference = TRUE; | |
9217 | } | |
9218 | } | |
9219 | } | |
316670eb | 9220 | } |
d9a64523 | 9221 | if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) { |
39037602 | 9222 | pmap_sync_page_attributes_phys(phys_page); |
d9a64523 | 9223 | dst_page->vmp_written_by_kernel = FALSE; |
15129b1c A |
9224 | } |
9225 | ||
b0d623f7 | 9226 | record_phys_addr: |
0a7de745 | 9227 | if (dst_page->vmp_busy) { |
d41d1dae | 9228 | upl->flags |= UPL_HAS_BUSY; |
0a7de745 | 9229 | } |
d41d1dae | 9230 | |
cb323159 | 9231 | lite_list[entry >> 5] |= 1U << (entry & 31); |
55e303ae | 9232 | |
0a7de745 A |
9233 | if (phys_page > upl->highest_page) { |
9234 | upl->highest_page = phys_page; | |
9235 | } | |
55e303ae | 9236 | |
2d21ac55 | 9237 | if (user_page_list) { |
0a7de745 A |
9238 | user_page_list[entry].phys_addr = phys_page; |
9239 | user_page_list[entry].free_when_done = dst_page->vmp_free_when_done; | |
9240 | user_page_list[entry].absent = dst_page->vmp_absent; | |
9241 | user_page_list[entry].dirty = dst_page->vmp_dirty; | |
9242 | user_page_list[entry].precious = dst_page->vmp_precious; | |
9243 | user_page_list[entry].device = FALSE; | |
316670eb | 9244 | user_page_list[entry].needed = FALSE; |
0a7de745 A |
9245 | if (dst_page->vmp_clustered == TRUE) { |
9246 | user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE; | |
9247 | } else { | |
9248 | user_page_list[entry].speculative = FALSE; | |
9249 | } | |
d9a64523 A |
9250 | user_page_list[entry].cs_validated = dst_page->vmp_cs_validated; |
9251 | user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted; | |
9252 | user_page_list[entry].cs_nx = dst_page->vmp_cs_nx; | |
3e170ce0 | 9253 | user_page_list[entry].mark = FALSE; |
55e303ae | 9254 | } |
39236c6e | 9255 | if (object != kernel_object && object != compressor_object) { |
b0d623f7 A |
9256 | /* |
9257 | * someone is explicitly grabbing this page... | |
9258 | * update clustered and speculative state | |
d9a64523 | 9259 | * |
b0d623f7 | 9260 | */ |
0a7de745 | 9261 | if (dst_page->vmp_clustered) { |
fe8ab488 | 9262 | VM_PAGE_CONSUME_CLUSTERED(dst_page); |
0a7de745 | 9263 | } |
55e303ae | 9264 | } |
3e170ce0 | 9265 | skip_page: |
55e303ae A |
9266 | entry++; |
9267 | dst_offset += PAGE_SIZE_64; | |
9268 | xfer_size -= PAGE_SIZE; | |
b0d623f7 A |
9269 | |
9270 | if (dwp->dw_mask) { | |
6d2010ae | 9271 | VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count); |
b0d623f7 | 9272 | |
6d2010ae | 9273 | if (dw_count >= dw_limit) { |
5ba3f43e | 9274 | vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); |
d9a64523 | 9275 | |
b0d623f7 A |
9276 | dwp = &dw_array[0]; |
9277 | dw_count = 0; | |
9278 | } | |
9279 | } | |
55e303ae | 9280 | } |
3e170ce0 | 9281 | assert(entry == size_in_pages); |
55e303ae | 9282 | |
0a7de745 | 9283 | if (dw_count) { |
5ba3f43e | 9284 | vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); |
0a7de745 | 9285 | } |
fe8ab488 | 9286 | finish: |
0a7de745 | 9287 | if (user_page_list && set_cache_attr_needed == TRUE) { |
3e170ce0 | 9288 | vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE); |
0a7de745 | 9289 | } |
316670eb | 9290 | |
2d21ac55 | 9291 | if (page_list_count != NULL) { |
0a7de745 | 9292 | if (upl->flags & UPL_INTERNAL) { |
55e303ae | 9293 | *page_list_count = 0; |
0a7de745 | 9294 | } else if (*page_list_count > size_in_pages) { |
3e170ce0 | 9295 | *page_list_count = size_in_pages; |
0a7de745 | 9296 | } |
55e303ae | 9297 | } |
55e303ae | 9298 | vm_object_unlock(object); |
55e303ae | 9299 | |
91447636 A |
9300 | if (cntrl_flags & UPL_BLOCK_ACCESS) { |
9301 | /* | |
9302 | * We've marked all the pages "busy" so that future | |
9303 | * page faults will block. | |
9304 | * Now remove the mapping for these pages, so that they | |
9305 | * can't be accessed without causing a page fault. | |
9306 | */ | |
9307 | vm_object_pmap_protect(object, offset, (vm_object_size_t)size, | |
0a7de745 | 9308 | PMAP_NULL, 0, VM_PROT_NONE); |
b0d623f7 A |
9309 | assert(!object->blocked_access); |
9310 | object->blocked_access = TRUE; | |
91447636 | 9311 | } |
3e170ce0 | 9312 | |
d9a64523 | 9313 | VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0); |
0a7de745 A |
9314 | #if DEVELOPMENT || DEBUG |
9315 | if (task != NULL) { | |
9316 | ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count); | |
9317 | } | |
9318 | #endif /* DEVELOPMENT || DEBUG */ | |
91447636 | 9319 | return KERN_SUCCESS; |
0c530ab8 | 9320 | |
0c530ab8 | 9321 | return_err: |
b0d623f7 | 9322 | dw_index = 0; |
0c530ab8 A |
9323 | |
9324 | for (; offset < dst_offset; offset += PAGE_SIZE) { | |
0b4c1975 A |
9325 | boolean_t need_unwire; |
9326 | ||
0a7de745 | 9327 | dst_page = vm_page_lookup(object, offset); |
0c530ab8 | 9328 | |
0a7de745 A |
9329 | if (dst_page == VM_PAGE_NULL) { |
9330 | panic("vm_object_iopl_request: Wired page missing. \n"); | |
9331 | } | |
2d21ac55 | 9332 | |
0b4c1975 | 9333 | /* |
d9a64523 | 9334 | * if we've already processed this page in an earlier |
0b4c1975 A |
9335 | * dw_do_work, we need to undo the wiring... we will |
9336 | * leave the dirty and reference bits on if they | |
9337 | * were set, since we don't have a good way of knowing | |
9338 | * what the previous state was and we won't get here | |
9339 | * under any normal circumstances... we will always | |
9340 | * clear BUSY and wakeup any waiters via vm_page_free | |
9341 | * or PAGE_WAKEUP_DONE | |
9342 | */ | |
9343 | need_unwire = TRUE; | |
9344 | ||
b0d623f7 A |
9345 | if (dw_count) { |
9346 | if (dw_array[dw_index].dw_m == dst_page) { | |
0b4c1975 A |
9347 | /* |
9348 | * still in the deferred work list | |
9349 | * which means we haven't yet called | |
9350 | * vm_page_wire on this page | |
9351 | */ | |
9352 | need_unwire = FALSE; | |
d41d1dae A |
9353 | |
9354 | dw_index++; | |
9355 | dw_count--; | |
b0d623f7 A |
9356 | } |
9357 | } | |
0b4c1975 A |
9358 | vm_page_lock_queues(); |
9359 | ||
d9a64523 | 9360 | if (dst_page->vmp_absent || free_wired_pages == TRUE) { |
d41d1dae | 9361 | vm_page_free(dst_page); |
0b4c1975 | 9362 | |
d41d1dae A |
9363 | need_unwire = FALSE; |
9364 | } else { | |
0a7de745 | 9365 | if (need_unwire == TRUE) { |
d41d1dae | 9366 | vm_page_unwire(dst_page, TRUE); |
0a7de745 | 9367 | } |
0b4c1975 | 9368 | |
0b4c1975 | 9369 | PAGE_WAKEUP_DONE(dst_page); |
6d2010ae | 9370 | } |
0c530ab8 | 9371 | vm_page_unlock_queues(); |
2d21ac55 | 9372 | |
0a7de745 | 9373 | if (need_unwire == TRUE) { |
0b4c1975 | 9374 | VM_STAT_INCR(reactivations); |
0a7de745 | 9375 | } |
0c530ab8 | 9376 | } |
b0d623f7 A |
9377 | #if UPL_DEBUG |
9378 | upl->upl_state = 2; | |
9379 | #endif | |
0a7de745 | 9380 | if (!(upl->flags & UPL_KERNEL_OBJECT)) { |
b0d623f7 | 9381 | vm_object_activity_end(object); |
316670eb | 9382 | vm_object_collapse(object, 0, TRUE); |
b0d623f7 | 9383 | } |
0c530ab8 A |
9384 | vm_object_unlock(object); |
9385 | upl_destroy(upl); | |
9386 | ||
d9a64523 | 9387 | VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, ret, 0, 0); |
0a7de745 A |
9388 | #if DEVELOPMENT || DEBUG |
9389 | if (task != NULL) { | |
9390 | ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count); | |
9391 | } | |
9392 | #endif /* DEVELOPMENT || DEBUG */ | |
0c530ab8 | 9393 | return ret; |
1c79356b A |
9394 | } |
9395 | ||
91447636 A |
9396 | kern_return_t |
9397 | upl_transpose( | |
0a7de745 A |
9398 | upl_t upl1, |
9399 | upl_t upl2) | |
1c79356b | 9400 | { |
0a7de745 A |
9401 | kern_return_t retval; |
9402 | boolean_t upls_locked; | |
9403 | vm_object_t object1, object2; | |
1c79356b | 9404 | |
0a7de745 | 9405 | if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR) == UPL_VECTOR) || ((upl2->flags & UPL_VECTOR) == UPL_VECTOR)) { |
91447636 A |
9406 | return KERN_INVALID_ARGUMENT; |
9407 | } | |
d9a64523 | 9408 | |
91447636 | 9409 | upls_locked = FALSE; |
1c79356b | 9410 | |
91447636 A |
9411 | /* |
9412 | * Since we need to lock both UPLs at the same time, | |
9413 | * avoid deadlocks by always taking locks in the same order. | |
9414 | */ | |
9415 | if (upl1 < upl2) { | |
9416 | upl_lock(upl1); | |
9417 | upl_lock(upl2); | |
9418 | } else { | |
9419 | upl_lock(upl2); | |
9420 | upl_lock(upl1); | |
9421 | } | |
0a7de745 | 9422 | upls_locked = TRUE; /* the UPLs will need to be unlocked */ |
91447636 A |
9423 | |
9424 | object1 = upl1->map_object; | |
9425 | object2 = upl2->map_object; | |
9426 | ||
9427 | if (upl1->offset != 0 || upl2->offset != 0 || | |
9428 | upl1->size != upl2->size) { | |
9429 | /* | |
9430 | * We deal only with full objects, not subsets. | |
9431 | * That's because we exchange the entire backing store info | |
9432 | * for the objects: pager, resident pages, etc... We can't do | |
9433 | * only part of it. | |
9434 | */ | |
9435 | retval = KERN_INVALID_VALUE; | |
9436 | goto done; | |
9437 | } | |
9438 | ||
9439 | /* | |
9440 | * Tranpose the VM objects' backing store. | |
9441 | */ | |
9442 | retval = vm_object_transpose(object1, object2, | |
0a7de745 | 9443 | (vm_object_size_t) upl1->size); |
91447636 A |
9444 | |
9445 | if (retval == KERN_SUCCESS) { | |
9446 | /* | |
9447 | * Make each UPL point to the correct VM object, i.e. the | |
9448 | * object holding the pages that the UPL refers to... | |
9449 | */ | |
fe8ab488 A |
9450 | #if CONFIG_IOSCHED || UPL_DEBUG |
9451 | if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) { | |
9452 | vm_object_lock(object1); | |
9453 | vm_object_lock(object2); | |
9454 | } | |
0a7de745 | 9455 | if (upl1->flags & UPL_TRACKED_BY_OBJECT) { |
fe8ab488 | 9456 | queue_remove(&object1->uplq, upl1, upl_t, uplq); |
0a7de745 A |
9457 | } |
9458 | if (upl2->flags & UPL_TRACKED_BY_OBJECT) { | |
fe8ab488 | 9459 | queue_remove(&object2->uplq, upl2, upl_t, uplq); |
0a7de745 | 9460 | } |
2d21ac55 | 9461 | #endif |
91447636 A |
9462 | upl1->map_object = object2; |
9463 | upl2->map_object = object1; | |
fe8ab488 A |
9464 | |
9465 | #if CONFIG_IOSCHED || UPL_DEBUG | |
0a7de745 | 9466 | if (upl1->flags & UPL_TRACKED_BY_OBJECT) { |
fe8ab488 | 9467 | queue_enter(&object2->uplq, upl1, upl_t, uplq); |
0a7de745 A |
9468 | } |
9469 | if (upl2->flags & UPL_TRACKED_BY_OBJECT) { | |
fe8ab488 | 9470 | queue_enter(&object1->uplq, upl2, upl_t, uplq); |
0a7de745 | 9471 | } |
fe8ab488 A |
9472 | if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) { |
9473 | vm_object_unlock(object2); | |
9474 | vm_object_unlock(object1); | |
9475 | } | |
2d21ac55 | 9476 | #endif |
91447636 A |
9477 | } |
9478 | ||
9479 | done: | |
9480 | /* | |
9481 | * Cleanup. | |
9482 | */ | |
9483 | if (upls_locked) { | |
9484 | upl_unlock(upl1); | |
9485 | upl_unlock(upl2); | |
9486 | upls_locked = FALSE; | |
9487 | } | |
9488 | ||
9489 | return retval; | |
9490 | } | |
9491 | ||
316670eb A |
9492 | void |
9493 | upl_range_needed( | |
0a7de745 A |
9494 | upl_t upl, |
9495 | int index, | |
9496 | int count) | |
316670eb | 9497 | { |
0a7de745 A |
9498 | upl_page_info_t *user_page_list; |
9499 | int size_in_pages; | |
316670eb | 9500 | |
0a7de745 | 9501 | if (!(upl->flags & UPL_INTERNAL) || count <= 0) { |
316670eb | 9502 | return; |
0a7de745 | 9503 | } |
316670eb A |
9504 | |
9505 | size_in_pages = upl->size / PAGE_SIZE; | |
9506 | ||
9507 | user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); | |
9508 | ||
0a7de745 | 9509 | while (count-- && index < size_in_pages) { |
316670eb | 9510 | user_page_list[index++].needed = TRUE; |
0a7de745 | 9511 | } |
316670eb A |
9512 | } |
9513 | ||
9514 | ||
91447636 | 9515 | /* |
91447636 A |
9516 | * Reserve of virtual addresses in the kernel address space. |
9517 | * We need to map the physical pages in the kernel, so that we | |
5ba3f43e | 9518 | * can call the code-signing or slide routines with a kernel |
91447636 A |
9519 | * virtual address. We keep this pool of pre-allocated kernel |
9520 | * virtual addresses so that we don't have to scan the kernel's | |
5ba3f43e | 9521 | * virtaul address space each time we need to work with |
91447636 | 9522 | * a physical page. |
91447636 | 9523 | */ |
cb323159 | 9524 | decl_simple_lock_data(, vm_paging_lock); |
0a7de745 | 9525 | #define VM_PAGING_NUM_PAGES 64 |
91447636 | 9526 | vm_map_offset_t vm_paging_base_address = 0; |
0a7de745 A |
9527 | boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, }; |
9528 | int vm_paging_max_index = 0; | |
9529 | int vm_paging_page_waiter = 0; | |
9530 | int vm_paging_page_waiter_total = 0; | |
d9a64523 | 9531 | |
0a7de745 A |
9532 | unsigned long vm_paging_no_kernel_page = 0; |
9533 | unsigned long vm_paging_objects_mapped = 0; | |
9534 | unsigned long vm_paging_pages_mapped = 0; | |
9535 | unsigned long vm_paging_objects_mapped_slow = 0; | |
9536 | unsigned long vm_paging_pages_mapped_slow = 0; | |
91447636 | 9537 | |
2d21ac55 A |
9538 | void |
9539 | vm_paging_map_init(void) | |
9540 | { | |
0a7de745 A |
9541 | kern_return_t kr; |
9542 | vm_map_offset_t page_map_offset; | |
9543 | vm_map_entry_t map_entry; | |
2d21ac55 A |
9544 | |
9545 | assert(vm_paging_base_address == 0); | |
9546 | ||
9547 | /* | |
9548 | * Initialize our pool of pre-allocated kernel | |
9549 | * virtual addresses. | |
9550 | */ | |
9551 | page_map_offset = 0; | |
9552 | kr = vm_map_find_space(kernel_map, | |
0a7de745 A |
9553 | &page_map_offset, |
9554 | VM_PAGING_NUM_PAGES * PAGE_SIZE, | |
9555 | 0, | |
9556 | 0, | |
9557 | VM_MAP_KERNEL_FLAGS_NONE, | |
9558 | VM_KERN_MEMORY_NONE, | |
9559 | &map_entry); | |
2d21ac55 A |
9560 | if (kr != KERN_SUCCESS) { |
9561 | panic("vm_paging_map_init: kernel_map full\n"); | |
9562 | } | |
3e170ce0 A |
9563 | VME_OBJECT_SET(map_entry, kernel_object); |
9564 | VME_OFFSET_SET(map_entry, page_map_offset); | |
6d2010ae A |
9565 | map_entry->protection = VM_PROT_NONE; |
9566 | map_entry->max_protection = VM_PROT_NONE; | |
9567 | map_entry->permanent = TRUE; | |
2d21ac55 A |
9568 | vm_object_reference(kernel_object); |
9569 | vm_map_unlock(kernel_map); | |
9570 | ||
9571 | assert(vm_paging_base_address == 0); | |
9572 | vm_paging_base_address = page_map_offset; | |
9573 | } | |
9574 | ||
91447636 | 9575 | /* |
91447636 A |
9576 | * vm_paging_map_object: |
9577 | * Maps part of a VM object's pages in the kernel | |
0a7de745 | 9578 | * virtual address space, using the pre-allocated |
91447636 A |
9579 | * kernel virtual addresses, if possible. |
9580 | * Context: | |
0a7de745 A |
9581 | * The VM object is locked. This lock will get |
9582 | * dropped and re-acquired though, so the caller | |
9583 | * must make sure the VM object is kept alive | |
2d21ac55 | 9584 | * (by holding a VM map that has a reference |
0a7de745 A |
9585 | * on it, for example, or taking an extra reference). |
9586 | * The page should also be kept busy to prevent | |
2d21ac55 | 9587 | * it from being reclaimed. |
91447636 A |
9588 | */ |
9589 | kern_return_t | |
9590 | vm_paging_map_object( | |
0a7de745 A |
9591 | vm_page_t page, |
9592 | vm_object_t object, | |
9593 | vm_object_offset_t offset, | |
9594 | vm_prot_t protection, | |
9595 | boolean_t can_unlock_object, | |
9596 | vm_map_size_t *size, /* IN/OUT */ | |
9597 | vm_map_offset_t *address, /* OUT */ | |
9598 | boolean_t *need_unmap) /* OUT */ | |
91447636 | 9599 | { |
0a7de745 A |
9600 | kern_return_t kr; |
9601 | vm_map_offset_t page_map_offset; | |
9602 | vm_map_size_t map_size; | |
9603 | vm_object_offset_t object_offset; | |
9604 | int i; | |
91447636 | 9605 | |
91447636 | 9606 | if (page != VM_PAGE_NULL && *size == PAGE_SIZE) { |
39236c6e | 9607 | /* use permanent 1-to-1 kernel mapping of physical memory ? */ |
5ba3f43e | 9608 | *address = (vm_map_offset_t) |
0a7de745 | 9609 | phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT); |
5ba3f43e A |
9610 | *need_unmap = FALSE; |
9611 | return KERN_SUCCESS; | |
39236c6e | 9612 | |
d9a64523 | 9613 | assert(page->vmp_busy); |
91447636 | 9614 | /* |
91447636 A |
9615 | * Use one of the pre-allocated kernel virtual addresses |
9616 | * and just enter the VM page in the kernel address space | |
9617 | * at that virtual address. | |
9618 | */ | |
0a7de745 | 9619 | simple_lock(&vm_paging_lock, &vm_pageout_lck_grp); |
91447636 | 9620 | |
91447636 A |
9621 | /* |
9622 | * Try and find an available kernel virtual address | |
9623 | * from our pre-allocated pool. | |
9624 | */ | |
9625 | page_map_offset = 0; | |
2d21ac55 A |
9626 | for (;;) { |
9627 | for (i = 0; i < VM_PAGING_NUM_PAGES; i++) { | |
9628 | if (vm_paging_page_inuse[i] == FALSE) { | |
9629 | page_map_offset = | |
0a7de745 A |
9630 | vm_paging_base_address + |
9631 | (i * PAGE_SIZE); | |
2d21ac55 A |
9632 | break; |
9633 | } | |
9634 | } | |
9635 | if (page_map_offset != 0) { | |
9636 | /* found a space to map our page ! */ | |
9637 | break; | |
9638 | } | |
9639 | ||
9640 | if (can_unlock_object) { | |
9641 | /* | |
9642 | * If we can afford to unlock the VM object, | |
9643 | * let's take the slow path now... | |
9644 | */ | |
91447636 A |
9645 | break; |
9646 | } | |
2d21ac55 A |
9647 | /* |
9648 | * We can't afford to unlock the VM object, so | |
9649 | * let's wait for a space to become available... | |
9650 | */ | |
9651 | vm_paging_page_waiter_total++; | |
9652 | vm_paging_page_waiter++; | |
fe8ab488 A |
9653 | kr = assert_wait((event_t)&vm_paging_page_waiter, THREAD_UNINT); |
9654 | if (kr == THREAD_WAITING) { | |
9655 | simple_unlock(&vm_paging_lock); | |
9656 | kr = thread_block(THREAD_CONTINUE_NULL); | |
0a7de745 | 9657 | simple_lock(&vm_paging_lock, &vm_pageout_lck_grp); |
fe8ab488 | 9658 | } |
2d21ac55 A |
9659 | vm_paging_page_waiter--; |
9660 | /* ... and try again */ | |
91447636 A |
9661 | } |
9662 | ||
9663 | if (page_map_offset != 0) { | |
9664 | /* | |
9665 | * We found a kernel virtual address; | |
9666 | * map the physical page to that virtual address. | |
9667 | */ | |
9668 | if (i > vm_paging_max_index) { | |
9669 | vm_paging_max_index = i; | |
9670 | } | |
9671 | vm_paging_page_inuse[i] = TRUE; | |
9672 | simple_unlock(&vm_paging_lock); | |
2d21ac55 | 9673 | |
d9a64523 | 9674 | page->vmp_pmapped = TRUE; |
2d21ac55 A |
9675 | |
9676 | /* | |
9677 | * Keep the VM object locked over the PMAP_ENTER | |
9678 | * and the actual use of the page by the kernel, | |
d9a64523 | 9679 | * or this pmap mapping might get undone by a |
2d21ac55 A |
9680 | * vm_object_pmap_protect() call... |
9681 | */ | |
0c530ab8 | 9682 | PMAP_ENTER(kernel_pmap, |
0a7de745 A |
9683 | page_map_offset, |
9684 | page, | |
9685 | protection, | |
9686 | VM_PROT_NONE, | |
9687 | 0, | |
9688 | TRUE, | |
9689 | kr); | |
5ba3f43e | 9690 | assert(kr == KERN_SUCCESS); |
91447636 | 9691 | vm_paging_objects_mapped++; |
d9a64523 | 9692 | vm_paging_pages_mapped++; |
91447636 | 9693 | *address = page_map_offset; |
39236c6e | 9694 | *need_unmap = TRUE; |
91447636 | 9695 | |
5ba3f43e A |
9696 | #if KASAN |
9697 | kasan_notify_address(page_map_offset, PAGE_SIZE); | |
9698 | #endif | |
9699 | ||
91447636 A |
9700 | /* all done and mapped, ready to use ! */ |
9701 | return KERN_SUCCESS; | |
9702 | } | |
9703 | ||
9704 | /* | |
9705 | * We ran out of pre-allocated kernel virtual | |
9706 | * addresses. Just map the page in the kernel | |
9707 | * the slow and regular way. | |
9708 | */ | |
9709 | vm_paging_no_kernel_page++; | |
9710 | simple_unlock(&vm_paging_lock); | |
2d21ac55 A |
9711 | } |
9712 | ||
0a7de745 | 9713 | if (!can_unlock_object) { |
39236c6e A |
9714 | *address = 0; |
9715 | *size = 0; | |
9716 | *need_unmap = FALSE; | |
2d21ac55 | 9717 | return KERN_NOT_SUPPORTED; |
91447636 | 9718 | } |
91447636 A |
9719 | |
9720 | object_offset = vm_object_trunc_page(offset); | |
39236c6e | 9721 | map_size = vm_map_round_page(*size, |
0a7de745 | 9722 | VM_MAP_PAGE_MASK(kernel_map)); |
91447636 A |
9723 | |
9724 | /* | |
9725 | * Try and map the required range of the object | |
9726 | * in the kernel_map | |
9727 | */ | |
9728 | ||
0a7de745 | 9729 | vm_object_reference_locked(object); /* for the map entry */ |
91447636 A |
9730 | vm_object_unlock(object); |
9731 | ||
9732 | kr = vm_map_enter(kernel_map, | |
0a7de745 A |
9733 | address, |
9734 | map_size, | |
9735 | 0, | |
9736 | VM_FLAGS_ANYWHERE, | |
9737 | VM_MAP_KERNEL_FLAGS_NONE, | |
9738 | VM_KERN_MEMORY_NONE, | |
9739 | object, | |
9740 | object_offset, | |
9741 | FALSE, | |
9742 | protection, | |
9743 | VM_PROT_ALL, | |
9744 | VM_INHERIT_NONE); | |
91447636 A |
9745 | if (kr != KERN_SUCCESS) { |
9746 | *address = 0; | |
9747 | *size = 0; | |
39236c6e | 9748 | *need_unmap = FALSE; |
0a7de745 | 9749 | vm_object_deallocate(object); /* for the map entry */ |
2d21ac55 | 9750 | vm_object_lock(object); |
91447636 A |
9751 | return kr; |
9752 | } | |
9753 | ||
9754 | *size = map_size; | |
9755 | ||
9756 | /* | |
9757 | * Enter the mapped pages in the page table now. | |
9758 | */ | |
9759 | vm_object_lock(object); | |
2d21ac55 A |
9760 | /* |
9761 | * VM object must be kept locked from before PMAP_ENTER() | |
9762 | * until after the kernel is done accessing the page(s). | |
9763 | * Otherwise, the pmap mappings in the kernel could be | |
9764 | * undone by a call to vm_object_pmap_protect(). | |
9765 | */ | |
9766 | ||
91447636 | 9767 | for (page_map_offset = 0; |
0a7de745 A |
9768 | map_size != 0; |
9769 | map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) { | |
91447636 A |
9770 | page = vm_page_lookup(object, offset + page_map_offset); |
9771 | if (page == VM_PAGE_NULL) { | |
2d21ac55 A |
9772 | printf("vm_paging_map_object: no page !?"); |
9773 | vm_object_unlock(object); | |
9774 | kr = vm_map_remove(kernel_map, *address, *size, | |
0a7de745 | 9775 | VM_MAP_REMOVE_NO_FLAGS); |
2d21ac55 A |
9776 | assert(kr == KERN_SUCCESS); |
9777 | *address = 0; | |
9778 | *size = 0; | |
39236c6e | 9779 | *need_unmap = FALSE; |
2d21ac55 A |
9780 | vm_object_lock(object); |
9781 | return KERN_MEMORY_ERROR; | |
91447636 | 9782 | } |
d9a64523 | 9783 | page->vmp_pmapped = TRUE; |
91447636 | 9784 | |
39037602 | 9785 | //assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page))); |
91447636 | 9786 | PMAP_ENTER(kernel_pmap, |
0a7de745 A |
9787 | *address + page_map_offset, |
9788 | page, | |
9789 | protection, | |
9790 | VM_PROT_NONE, | |
9791 | 0, | |
9792 | TRUE, | |
9793 | kr); | |
5ba3f43e A |
9794 | assert(kr == KERN_SUCCESS); |
9795 | #if KASAN | |
9796 | kasan_notify_address(*address + page_map_offset, PAGE_SIZE); | |
9797 | #endif | |
91447636 | 9798 | } |
d9a64523 | 9799 | |
91447636 | 9800 | vm_paging_objects_mapped_slow++; |
b0d623f7 | 9801 | vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64); |
91447636 | 9802 | |
39236c6e A |
9803 | *need_unmap = TRUE; |
9804 | ||
91447636 A |
9805 | return KERN_SUCCESS; |
9806 | } | |
9807 | ||
9808 | /* | |
91447636 A |
9809 | * vm_paging_unmap_object: |
9810 | * Unmaps part of a VM object's pages from the kernel | |
0a7de745 | 9811 | * virtual address space. |
91447636 | 9812 | * Context: |
0a7de745 A |
9813 | * The VM object is locked. This lock will get |
9814 | * dropped and re-acquired though. | |
91447636 A |
9815 | */ |
9816 | void | |
9817 | vm_paging_unmap_object( | |
0a7de745 A |
9818 | vm_object_t object, |
9819 | vm_map_offset_t start, | |
9820 | vm_map_offset_t end) | |
91447636 | 9821 | { |
0a7de745 A |
9822 | kern_return_t kr; |
9823 | int i; | |
91447636 | 9824 | |
0c530ab8 | 9825 | if ((vm_paging_base_address == 0) || |
8f6c56a5 A |
9826 | (start < vm_paging_base_address) || |
9827 | (end > (vm_paging_base_address | |
0a7de745 | 9828 | + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) { |
91447636 A |
9829 | /* |
9830 | * We didn't use our pre-allocated pool of | |
9831 | * kernel virtual address. Deallocate the | |
9832 | * virtual memory. | |
9833 | */ | |
9834 | if (object != VM_OBJECT_NULL) { | |
9835 | vm_object_unlock(object); | |
9836 | } | |
d9a64523 | 9837 | kr = vm_map_remove(kernel_map, start, end, |
0a7de745 | 9838 | VM_MAP_REMOVE_NO_FLAGS); |
91447636 A |
9839 | if (object != VM_OBJECT_NULL) { |
9840 | vm_object_lock(object); | |
9841 | } | |
9842 | assert(kr == KERN_SUCCESS); | |
9843 | } else { | |
9844 | /* | |
9845 | * We used a kernel virtual address from our | |
9846 | * pre-allocated pool. Put it back in the pool | |
9847 | * for next time. | |
9848 | */ | |
91447636 | 9849 | assert(end - start == PAGE_SIZE); |
b0d623f7 A |
9850 | i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT); |
9851 | assert(i >= 0 && i < VM_PAGING_NUM_PAGES); | |
91447636 A |
9852 | |
9853 | /* undo the pmap mapping */ | |
0c530ab8 | 9854 | pmap_remove(kernel_pmap, start, end); |
91447636 | 9855 | |
0a7de745 | 9856 | simple_lock(&vm_paging_lock, &vm_pageout_lck_grp); |
91447636 | 9857 | vm_paging_page_inuse[i] = FALSE; |
2d21ac55 A |
9858 | if (vm_paging_page_waiter) { |
9859 | thread_wakeup(&vm_paging_page_waiter); | |
9860 | } | |
91447636 | 9861 | simple_unlock(&vm_paging_lock); |
91447636 A |
9862 | } |
9863 | } | |
9864 | ||
91447636 | 9865 | |
91447636 | 9866 | /* |
d9a64523 | 9867 | * page->vmp_object must be locked |
91447636 | 9868 | */ |
91447636 | 9869 | void |
5ba3f43e | 9870 | vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked) |
91447636 | 9871 | { |
5ba3f43e A |
9872 | if (!queues_locked) { |
9873 | vm_page_lockspin_queues(); | |
9874 | } | |
91447636 | 9875 | |
d9a64523 | 9876 | page->vmp_free_when_done = FALSE; |
91447636 | 9877 | /* |
5ba3f43e A |
9878 | * need to drop the laundry count... |
9879 | * we may also need to remove it | |
9880 | * from the I/O paging queue... | |
9881 | * vm_pageout_throttle_up handles both cases | |
9882 | * | |
9883 | * the laundry and pageout_queue flags are cleared... | |
91447636 | 9884 | */ |
5ba3f43e | 9885 | vm_pageout_throttle_up(page); |
91447636 | 9886 | |
5ba3f43e A |
9887 | if (!queues_locked) { |
9888 | vm_page_unlock_queues(); | |
91447636 | 9889 | } |
5ba3f43e A |
9890 | } |
9891 | ||
9892 | upl_t | |
9893 | vector_upl_create(vm_offset_t upl_offset) | |
9894 | { | |
0a7de745 A |
9895 | int vector_upl_size = sizeof(struct _vector_upl); |
9896 | int i = 0; | |
9897 | upl_t upl; | |
5ba3f43e | 9898 | vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size); |
91447636 | 9899 | |
0a7de745 | 9900 | upl = upl_create(0, UPL_VECTOR, 0); |
5ba3f43e A |
9901 | upl->vector_upl = vector_upl; |
9902 | upl->offset = upl_offset; | |
9903 | vector_upl->size = 0; | |
9904 | vector_upl->offset = upl_offset; | |
0a7de745 A |
9905 | vector_upl->invalid_upls = 0; |
9906 | vector_upl->num_upls = 0; | |
5ba3f43e | 9907 | vector_upl->pagelist = NULL; |
d9a64523 | 9908 | |
0a7de745 | 9909 | for (i = 0; i < MAX_VECTOR_UPL_ELEMENTS; i++) { |
5ba3f43e A |
9910 | vector_upl->upl_iostates[i].size = 0; |
9911 | vector_upl->upl_iostates[i].offset = 0; | |
91447636 | 9912 | } |
5ba3f43e A |
9913 | return upl; |
9914 | } | |
91447636 | 9915 | |
5ba3f43e A |
9916 | void |
9917 | vector_upl_deallocate(upl_t upl) | |
9918 | { | |
0a7de745 | 9919 | if (upl) { |
5ba3f43e | 9920 | vector_upl_t vector_upl = upl->vector_upl; |
0a7de745 A |
9921 | if (vector_upl) { |
9922 | if (vector_upl->invalid_upls != vector_upl->num_upls) { | |
5ba3f43e | 9923 | panic("Deallocating non-empty Vectored UPL\n"); |
0a7de745 A |
9924 | } |
9925 | kfree(vector_upl->pagelist, (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE))); | |
9926 | vector_upl->invalid_upls = 0; | |
5ba3f43e A |
9927 | vector_upl->num_upls = 0; |
9928 | vector_upl->pagelist = NULL; | |
9929 | vector_upl->size = 0; | |
9930 | vector_upl->offset = 0; | |
9931 | kfree(vector_upl, sizeof(struct _vector_upl)); | |
9932 | vector_upl = (vector_upl_t)0xfeedfeed; | |
0a7de745 | 9933 | } else { |
5ba3f43e | 9934 | panic("vector_upl_deallocate was passed a non-vectored upl\n"); |
0a7de745 A |
9935 | } |
9936 | } else { | |
5ba3f43e | 9937 | panic("vector_upl_deallocate was passed a NULL upl\n"); |
0a7de745 | 9938 | } |
5ba3f43e | 9939 | } |
91447636 | 9940 | |
5ba3f43e A |
9941 | boolean_t |
9942 | vector_upl_is_valid(upl_t upl) | |
9943 | { | |
0a7de745 | 9944 | if (upl && ((upl->flags & UPL_VECTOR) == UPL_VECTOR)) { |
5ba3f43e | 9945 | vector_upl_t vector_upl = upl->vector_upl; |
0a7de745 | 9946 | if (vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef) { |
5ba3f43e | 9947 | return FALSE; |
0a7de745 | 9948 | } else { |
5ba3f43e | 9949 | return TRUE; |
0a7de745 | 9950 | } |
91447636 | 9951 | } |
5ba3f43e | 9952 | return FALSE; |
91447636 A |
9953 | } |
9954 | ||
5ba3f43e | 9955 | boolean_t |
0a7de745 | 9956 | vector_upl_set_subupl(upl_t upl, upl_t subupl, uint32_t io_size) |
b0d623f7 | 9957 | { |
0a7de745 | 9958 | if (vector_upl_is_valid(upl)) { |
b0d623f7 | 9959 | vector_upl_t vector_upl = upl->vector_upl; |
d9a64523 | 9960 | |
0a7de745 A |
9961 | if (vector_upl) { |
9962 | if (subupl) { | |
9963 | if (io_size) { | |
9964 | if (io_size < PAGE_SIZE) { | |
b0d623f7 | 9965 | io_size = PAGE_SIZE; |
0a7de745 | 9966 | } |
b0d623f7 A |
9967 | subupl->vector_upl = (void*)vector_upl; |
9968 | vector_upl->upl_elems[vector_upl->num_upls++] = subupl; | |
9969 | vector_upl->size += io_size; | |
9970 | upl->size += io_size; | |
0a7de745 A |
9971 | } else { |
9972 | uint32_t i = 0, invalid_upls = 0; | |
9973 | for (i = 0; i < vector_upl->num_upls; i++) { | |
9974 | if (vector_upl->upl_elems[i] == subupl) { | |
b0d623f7 | 9975 | break; |
0a7de745 | 9976 | } |
b0d623f7 | 9977 | } |
0a7de745 | 9978 | if (i == vector_upl->num_upls) { |
b0d623f7 | 9979 | panic("Trying to remove sub-upl when none exists"); |
0a7de745 | 9980 | } |
d9a64523 | 9981 | |
b0d623f7 | 9982 | vector_upl->upl_elems[i] = NULL; |
cb323159 A |
9983 | invalid_upls = os_atomic_inc(&(vector_upl)->invalid_upls, |
9984 | relaxed); | |
0a7de745 | 9985 | if (invalid_upls == vector_upl->num_upls) { |
b0d623f7 | 9986 | return TRUE; |
0a7de745 | 9987 | } else { |
b0d623f7 | 9988 | return FALSE; |
0a7de745 | 9989 | } |
b0d623f7 | 9990 | } |
0a7de745 | 9991 | } else { |
b0d623f7 | 9992 | panic("vector_upl_set_subupl was passed a NULL upl element\n"); |
0a7de745 A |
9993 | } |
9994 | } else { | |
b0d623f7 | 9995 | panic("vector_upl_set_subupl was passed a non-vectored upl\n"); |
0a7de745 A |
9996 | } |
9997 | } else { | |
b0d623f7 | 9998 | panic("vector_upl_set_subupl was passed a NULL upl\n"); |
0a7de745 | 9999 | } |
b0d623f7 A |
10000 | |
10001 | return FALSE; | |
d9a64523 | 10002 | } |
b0d623f7 A |
10003 | |
10004 | void | |
10005 | vector_upl_set_pagelist(upl_t upl) | |
10006 | { | |
0a7de745 A |
10007 | if (vector_upl_is_valid(upl)) { |
10008 | uint32_t i = 0; | |
b0d623f7 A |
10009 | vector_upl_t vector_upl = upl->vector_upl; |
10010 | ||
0a7de745 A |
10011 | if (vector_upl) { |
10012 | vm_offset_t pagelist_size = 0, cur_upl_pagelist_size = 0; | |
b0d623f7 | 10013 | |
0a7de745 | 10014 | vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)); |
d9a64523 | 10015 | |
0a7de745 A |
10016 | for (i = 0; i < vector_upl->num_upls; i++) { |
10017 | cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size / PAGE_SIZE; | |
b0d623f7 A |
10018 | bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size); |
10019 | pagelist_size += cur_upl_pagelist_size; | |
0a7de745 | 10020 | if (vector_upl->upl_elems[i]->highest_page > upl->highest_page) { |
b0d623f7 | 10021 | upl->highest_page = vector_upl->upl_elems[i]->highest_page; |
0a7de745 | 10022 | } |
b0d623f7 | 10023 | } |
0a7de745 A |
10024 | assert( pagelist_size == (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE))); |
10025 | } else { | |
b0d623f7 | 10026 | panic("vector_upl_set_pagelist was passed a non-vectored upl\n"); |
0a7de745 A |
10027 | } |
10028 | } else { | |
b0d623f7 | 10029 | panic("vector_upl_set_pagelist was passed a NULL upl\n"); |
0a7de745 | 10030 | } |
b0d623f7 A |
10031 | } |
10032 | ||
10033 | upl_t | |
10034 | vector_upl_subupl_byindex(upl_t upl, uint32_t index) | |
10035 | { | |
0a7de745 | 10036 | if (vector_upl_is_valid(upl)) { |
b0d623f7 | 10037 | vector_upl_t vector_upl = upl->vector_upl; |
0a7de745 A |
10038 | if (vector_upl) { |
10039 | if (index < vector_upl->num_upls) { | |
b0d623f7 | 10040 | return vector_upl->upl_elems[index]; |
0a7de745 A |
10041 | } |
10042 | } else { | |
b0d623f7 | 10043 | panic("vector_upl_subupl_byindex was passed a non-vectored upl\n"); |
0a7de745 | 10044 | } |
b0d623f7 A |
10045 | } |
10046 | return NULL; | |
10047 | } | |
10048 | ||
10049 | upl_t | |
10050 | vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size) | |
10051 | { | |
0a7de745 A |
10052 | if (vector_upl_is_valid(upl)) { |
10053 | uint32_t i = 0; | |
b0d623f7 A |
10054 | vector_upl_t vector_upl = upl->vector_upl; |
10055 | ||
0a7de745 | 10056 | if (vector_upl) { |
b0d623f7 A |
10057 | upl_t subupl = NULL; |
10058 | vector_upl_iostates_t subupl_state; | |
10059 | ||
0a7de745 | 10060 | for (i = 0; i < vector_upl->num_upls; i++) { |
b0d623f7 A |
10061 | subupl = vector_upl->upl_elems[i]; |
10062 | subupl_state = vector_upl->upl_iostates[i]; | |
0a7de745 | 10063 | if (*upl_offset <= (subupl_state.offset + subupl_state.size - 1)) { |
b0d623f7 A |
10064 | /* We could have been passed an offset/size pair that belongs |
10065 | * to an UPL element that has already been committed/aborted. | |
10066 | * If so, return NULL. | |
10067 | */ | |
0a7de745 | 10068 | if (subupl == NULL) { |
b0d623f7 | 10069 | return NULL; |
0a7de745 A |
10070 | } |
10071 | if ((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) { | |
b0d623f7 | 10072 | *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset; |
0a7de745 | 10073 | if (*upl_size > subupl_state.size) { |
b0d623f7 | 10074 | *upl_size = subupl_state.size; |
0a7de745 | 10075 | } |
b0d623f7 | 10076 | } |
0a7de745 | 10077 | if (*upl_offset >= subupl_state.offset) { |
b0d623f7 | 10078 | *upl_offset -= subupl_state.offset; |
0a7de745 | 10079 | } else if (i) { |
b0d623f7 | 10080 | panic("Vector UPL offset miscalculation\n"); |
0a7de745 | 10081 | } |
b0d623f7 | 10082 | return subupl; |
d9a64523 | 10083 | } |
b0d623f7 | 10084 | } |
0a7de745 | 10085 | } else { |
b0d623f7 | 10086 | panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n"); |
0a7de745 | 10087 | } |
b0d623f7 A |
10088 | } |
10089 | return NULL; | |
10090 | } | |
10091 | ||
10092 | void | |
10093 | vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr) | |
10094 | { | |
10095 | *v_upl_submap = NULL; | |
10096 | ||
0a7de745 | 10097 | if (vector_upl_is_valid(upl)) { |
b0d623f7 | 10098 | vector_upl_t vector_upl = upl->vector_upl; |
0a7de745 | 10099 | if (vector_upl) { |
b0d623f7 A |
10100 | *v_upl_submap = vector_upl->submap; |
10101 | *submap_dst_addr = vector_upl->submap_dst_addr; | |
0a7de745 | 10102 | } else { |
b0d623f7 | 10103 | panic("vector_upl_get_submap was passed a non-vectored UPL\n"); |
0a7de745 A |
10104 | } |
10105 | } else { | |
b0d623f7 | 10106 | panic("vector_upl_get_submap was passed a null UPL\n"); |
0a7de745 | 10107 | } |
b0d623f7 A |
10108 | } |
10109 | ||
10110 | void | |
10111 | vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr) | |
10112 | { | |
0a7de745 | 10113 | if (vector_upl_is_valid(upl)) { |
b0d623f7 | 10114 | vector_upl_t vector_upl = upl->vector_upl; |
0a7de745 | 10115 | if (vector_upl) { |
b0d623f7 A |
10116 | vector_upl->submap = submap; |
10117 | vector_upl->submap_dst_addr = submap_dst_addr; | |
0a7de745 | 10118 | } else { |
b0d623f7 | 10119 | panic("vector_upl_get_submap was passed a non-vectored UPL\n"); |
0a7de745 A |
10120 | } |
10121 | } else { | |
b0d623f7 | 10122 | panic("vector_upl_get_submap was passed a NULL UPL\n"); |
0a7de745 | 10123 | } |
b0d623f7 A |
10124 | } |
10125 | ||
10126 | void | |
10127 | vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size) | |
10128 | { | |
0a7de745 | 10129 | if (vector_upl_is_valid(upl)) { |
b0d623f7 A |
10130 | uint32_t i = 0; |
10131 | vector_upl_t vector_upl = upl->vector_upl; | |
10132 | ||
0a7de745 A |
10133 | if (vector_upl) { |
10134 | for (i = 0; i < vector_upl->num_upls; i++) { | |
10135 | if (vector_upl->upl_elems[i] == subupl) { | |
b0d623f7 | 10136 | break; |
0a7de745 | 10137 | } |
b0d623f7 | 10138 | } |
d9a64523 | 10139 | |
0a7de745 | 10140 | if (i == vector_upl->num_upls) { |
b0d623f7 | 10141 | panic("setting sub-upl iostate when none exists"); |
0a7de745 | 10142 | } |
b0d623f7 A |
10143 | |
10144 | vector_upl->upl_iostates[i].offset = offset; | |
0a7de745 | 10145 | if (size < PAGE_SIZE) { |
b0d623f7 | 10146 | size = PAGE_SIZE; |
0a7de745 | 10147 | } |
b0d623f7 | 10148 | vector_upl->upl_iostates[i].size = size; |
0a7de745 | 10149 | } else { |
b0d623f7 | 10150 | panic("vector_upl_set_iostate was passed a non-vectored UPL\n"); |
0a7de745 A |
10151 | } |
10152 | } else { | |
b0d623f7 | 10153 | panic("vector_upl_set_iostate was passed a NULL UPL\n"); |
0a7de745 | 10154 | } |
b0d623f7 A |
10155 | } |
10156 | ||
10157 | void | |
10158 | vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size) | |
10159 | { | |
0a7de745 | 10160 | if (vector_upl_is_valid(upl)) { |
b0d623f7 A |
10161 | uint32_t i = 0; |
10162 | vector_upl_t vector_upl = upl->vector_upl; | |
10163 | ||
0a7de745 A |
10164 | if (vector_upl) { |
10165 | for (i = 0; i < vector_upl->num_upls; i++) { | |
10166 | if (vector_upl->upl_elems[i] == subupl) { | |
b0d623f7 | 10167 | break; |
0a7de745 | 10168 | } |
b0d623f7 | 10169 | } |
d9a64523 | 10170 | |
0a7de745 | 10171 | if (i == vector_upl->num_upls) { |
b0d623f7 | 10172 | panic("getting sub-upl iostate when none exists"); |
0a7de745 | 10173 | } |
b0d623f7 A |
10174 | |
10175 | *offset = vector_upl->upl_iostates[i].offset; | |
10176 | *size = vector_upl->upl_iostates[i].size; | |
0a7de745 | 10177 | } else { |
b0d623f7 | 10178 | panic("vector_upl_get_iostate was passed a non-vectored UPL\n"); |
0a7de745 A |
10179 | } |
10180 | } else { | |
b0d623f7 | 10181 | panic("vector_upl_get_iostate was passed a NULL UPL\n"); |
0a7de745 | 10182 | } |
b0d623f7 A |
10183 | } |
10184 | ||
10185 | void | |
10186 | vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size) | |
10187 | { | |
0a7de745 | 10188 | if (vector_upl_is_valid(upl)) { |
b0d623f7 | 10189 | vector_upl_t vector_upl = upl->vector_upl; |
0a7de745 A |
10190 | if (vector_upl) { |
10191 | if (index < vector_upl->num_upls) { | |
b0d623f7 A |
10192 | *offset = vector_upl->upl_iostates[index].offset; |
10193 | *size = vector_upl->upl_iostates[index].size; | |
0a7de745 | 10194 | } else { |
b0d623f7 | 10195 | *offset = *size = 0; |
0a7de745 A |
10196 | } |
10197 | } else { | |
b0d623f7 | 10198 | panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n"); |
0a7de745 A |
10199 | } |
10200 | } else { | |
b0d623f7 | 10201 | panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n"); |
0a7de745 | 10202 | } |
b0d623f7 A |
10203 | } |
10204 | ||
10205 | upl_page_info_t * | |
10206 | upl_get_internal_vectorupl_pagelist(upl_t upl) | |
10207 | { | |
10208 | return ((vector_upl_t)(upl->vector_upl))->pagelist; | |
10209 | } | |
10210 | ||
10211 | void * | |
10212 | upl_get_internal_vectorupl(upl_t upl) | |
10213 | { | |
10214 | return upl->vector_upl; | |
10215 | } | |
10216 | ||
91447636 A |
10217 | vm_size_t |
10218 | upl_get_internal_pagelist_offset(void) | |
10219 | { | |
10220 | return sizeof(struct upl); | |
10221 | } | |
10222 | ||
91447636 A |
10223 | void |
10224 | upl_clear_dirty( | |
0a7de745 A |
10225 | upl_t upl, |
10226 | boolean_t value) | |
91447636 | 10227 | { |
0c530ab8 A |
10228 | if (value) { |
10229 | upl->flags |= UPL_CLEAR_DIRTY; | |
10230 | } else { | |
10231 | upl->flags &= ~UPL_CLEAR_DIRTY; | |
10232 | } | |
91447636 A |
10233 | } |
10234 | ||
6d2010ae A |
10235 | void |
10236 | upl_set_referenced( | |
0a7de745 A |
10237 | upl_t upl, |
10238 | boolean_t value) | |
6d2010ae A |
10239 | { |
10240 | upl_lock(upl); | |
10241 | if (value) { | |
10242 | upl->ext_ref_count++; | |
10243 | } else { | |
10244 | if (!upl->ext_ref_count) { | |
10245 | panic("upl_set_referenced not %p\n", upl); | |
10246 | } | |
10247 | upl->ext_ref_count--; | |
10248 | } | |
10249 | upl_unlock(upl); | |
10250 | } | |
10251 | ||
fe8ab488 A |
10252 | #if CONFIG_IOSCHED |
10253 | void | |
10254 | upl_set_blkno( | |
0a7de745 A |
10255 | upl_t upl, |
10256 | vm_offset_t upl_offset, | |
10257 | int io_size, | |
10258 | int64_t blkno) | |
fe8ab488 | 10259 | { |
0a7de745 A |
10260 | int i, j; |
10261 | if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) { | |
10262 | return; | |
10263 | } | |
d9a64523 | 10264 | |
0a7de745 A |
10265 | assert(upl->upl_reprio_info != 0); |
10266 | for (i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) { | |
10267 | UPL_SET_REPRIO_INFO(upl, i, blkno, io_size); | |
10268 | } | |
fe8ab488 A |
10269 | } |
10270 | #endif | |
10271 | ||
0a7de745 A |
10272 | void inline |
10273 | memoryshot(unsigned int event, unsigned int control) | |
39236c6e A |
10274 | { |
10275 | if (vm_debug_events) { | |
10276 | KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control, | |
0a7de745 A |
10277 | vm_page_active_count, vm_page_inactive_count, |
10278 | vm_page_free_count, vm_page_speculative_count, | |
10279 | vm_page_throttled_count); | |
39236c6e A |
10280 | } else { |
10281 | (void) event; | |
10282 | (void) control; | |
10283 | } | |
39236c6e | 10284 | } |
91447636 A |
10285 | |
10286 | #ifdef MACH_BSD | |
1c79356b | 10287 | |
0a7de745 A |
10288 | boolean_t |
10289 | upl_device_page(upl_page_info_t *upl) | |
2d21ac55 | 10290 | { |
0a7de745 | 10291 | return UPL_DEVICE_PAGE(upl); |
2d21ac55 | 10292 | } |
0a7de745 A |
10293 | boolean_t |
10294 | upl_page_present(upl_page_info_t *upl, int index) | |
1c79356b | 10295 | { |
0a7de745 | 10296 | return UPL_PAGE_PRESENT(upl, index); |
1c79356b | 10297 | } |
0a7de745 A |
10298 | boolean_t |
10299 | upl_speculative_page(upl_page_info_t *upl, int index) | |
2d21ac55 | 10300 | { |
0a7de745 | 10301 | return UPL_SPECULATIVE_PAGE(upl, index); |
2d21ac55 | 10302 | } |
0a7de745 A |
10303 | boolean_t |
10304 | upl_dirty_page(upl_page_info_t *upl, int index) | |
1c79356b | 10305 | { |
0a7de745 | 10306 | return UPL_DIRTY_PAGE(upl, index); |
1c79356b | 10307 | } |
0a7de745 A |
10308 | boolean_t |
10309 | upl_valid_page(upl_page_info_t *upl, int index) | |
1c79356b | 10310 | { |
0a7de745 | 10311 | return UPL_VALID_PAGE(upl, index); |
1c79356b | 10312 | } |
0a7de745 A |
10313 | ppnum_t |
10314 | upl_phys_page(upl_page_info_t *upl, int index) | |
1c79356b | 10315 | { |
0a7de745 | 10316 | return UPL_PHYS_PAGE(upl, index); |
1c79356b A |
10317 | } |
10318 | ||
0a7de745 A |
10319 | void |
10320 | upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v) | |
3e170ce0 A |
10321 | { |
10322 | upl[index].mark = v; | |
10323 | } | |
10324 | ||
0a7de745 A |
10325 | boolean_t |
10326 | upl_page_get_mark(upl_page_info_t *upl, int index) | |
3e170ce0 A |
10327 | { |
10328 | return upl[index].mark; | |
10329 | } | |
10330 | ||
0b4e3aa0 A |
10331 | void |
10332 | vm_countdirtypages(void) | |
1c79356b A |
10333 | { |
10334 | vm_page_t m; | |
10335 | int dpages; | |
10336 | int pgopages; | |
10337 | int precpages; | |
10338 | ||
10339 | ||
0a7de745 A |
10340 | dpages = 0; |
10341 | pgopages = 0; | |
10342 | precpages = 0; | |
1c79356b A |
10343 | |
10344 | vm_page_lock_queues(); | |
39037602 | 10345 | m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); |
1c79356b | 10346 | do { |
0a7de745 A |
10347 | if (m == (vm_page_t)0) { |
10348 | break; | |
10349 | } | |
1c79356b | 10350 | |
0a7de745 A |
10351 | if (m->vmp_dirty) { |
10352 | dpages++; | |
10353 | } | |
10354 | if (m->vmp_free_when_done) { | |
10355 | pgopages++; | |
10356 | } | |
10357 | if (m->vmp_precious) { | |
10358 | precpages++; | |
10359 | } | |
1c79356b | 10360 | |
39037602 | 10361 | assert(VM_PAGE_OBJECT(m) != kernel_object); |
d9a64523 | 10362 | m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq); |
0a7de745 A |
10363 | if (m == (vm_page_t)0) { |
10364 | break; | |
10365 | } | |
39037602 | 10366 | } while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m)); |
1c79356b | 10367 | vm_page_unlock_queues(); |
9bccf70c | 10368 | |
2d21ac55 | 10369 | vm_page_lock_queues(); |
39037602 | 10370 | m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled); |
2d21ac55 | 10371 | do { |
0a7de745 A |
10372 | if (m == (vm_page_t)0) { |
10373 | break; | |
10374 | } | |
2d21ac55 A |
10375 | |
10376 | dpages++; | |
d9a64523 A |
10377 | assert(m->vmp_dirty); |
10378 | assert(!m->vmp_free_when_done); | |
39037602 | 10379 | assert(VM_PAGE_OBJECT(m) != kernel_object); |
d9a64523 | 10380 | m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq); |
0a7de745 A |
10381 | if (m == (vm_page_t)0) { |
10382 | break; | |
10383 | } | |
39037602 | 10384 | } while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m)); |
2d21ac55 A |
10385 | vm_page_unlock_queues(); |
10386 | ||
9bccf70c | 10387 | vm_page_lock_queues(); |
39037602 | 10388 | m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); |
9bccf70c | 10389 | do { |
0a7de745 A |
10390 | if (m == (vm_page_t)0) { |
10391 | break; | |
10392 | } | |
9bccf70c | 10393 | |
0a7de745 A |
10394 | if (m->vmp_dirty) { |
10395 | dpages++; | |
10396 | } | |
10397 | if (m->vmp_free_when_done) { | |
10398 | pgopages++; | |
10399 | } | |
10400 | if (m->vmp_precious) { | |
10401 | precpages++; | |
10402 | } | |
9bccf70c | 10403 | |
39037602 | 10404 | assert(VM_PAGE_OBJECT(m) != kernel_object); |
d9a64523 | 10405 | m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq); |
0a7de745 A |
10406 | if (m == (vm_page_t)0) { |
10407 | break; | |
10408 | } | |
39037602 | 10409 | } while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m)); |
9bccf70c | 10410 | vm_page_unlock_queues(); |
1c79356b A |
10411 | |
10412 | printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages); | |
10413 | ||
0a7de745 A |
10414 | dpages = 0; |
10415 | pgopages = 0; | |
10416 | precpages = 0; | |
1c79356b A |
10417 | |
10418 | vm_page_lock_queues(); | |
39037602 | 10419 | m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active); |
1c79356b A |
10420 | |
10421 | do { | |
0a7de745 A |
10422 | if (m == (vm_page_t)0) { |
10423 | break; | |
10424 | } | |
10425 | if (m->vmp_dirty) { | |
10426 | dpages++; | |
10427 | } | |
10428 | if (m->vmp_free_when_done) { | |
10429 | pgopages++; | |
10430 | } | |
10431 | if (m->vmp_precious) { | |
10432 | precpages++; | |
10433 | } | |
1c79356b | 10434 | |
39037602 | 10435 | assert(VM_PAGE_OBJECT(m) != kernel_object); |
d9a64523 | 10436 | m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq); |
0a7de745 A |
10437 | if (m == (vm_page_t)0) { |
10438 | break; | |
10439 | } | |
39037602 | 10440 | } while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m)); |
1c79356b A |
10441 | vm_page_unlock_queues(); |
10442 | ||
10443 | printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages); | |
1c79356b A |
10444 | } |
10445 | #endif /* MACH_BSD */ | |
10446 | ||
5ba3f43e A |
10447 | |
10448 | #if CONFIG_IOSCHED | |
0a7de745 A |
10449 | int |
10450 | upl_get_cached_tier(upl_t upl) | |
5ba3f43e | 10451 | { |
0a7de745 A |
10452 | assert(upl); |
10453 | if (upl->flags & UPL_TRACKED_BY_OBJECT) { | |
10454 | return upl->upl_priority; | |
10455 | } | |
10456 | return -1; | |
5ba3f43e | 10457 | } |
d9a64523 A |
10458 | #endif /* CONFIG_IOSCHED */ |
10459 | ||
10460 | ||
0a7de745 A |
10461 | void |
10462 | upl_callout_iodone(upl_t upl) | |
d9a64523 | 10463 | { |
0a7de745 | 10464 | struct upl_io_completion *upl_ctx = upl->upl_iodone; |
d9a64523 A |
10465 | |
10466 | if (upl_ctx) { | |
0a7de745 | 10467 | void (*iodone_func)(void *, int) = upl_ctx->io_done; |
d9a64523 A |
10468 | |
10469 | assert(upl_ctx->io_done); | |
10470 | ||
10471 | (*iodone_func)(upl_ctx->io_context, upl_ctx->io_error); | |
10472 | } | |
10473 | } | |
10474 | ||
0a7de745 A |
10475 | void |
10476 | upl_set_iodone(upl_t upl, void *upl_iodone) | |
d9a64523 | 10477 | { |
0a7de745 | 10478 | upl->upl_iodone = (struct upl_io_completion *)upl_iodone; |
d9a64523 A |
10479 | } |
10480 | ||
0a7de745 A |
10481 | void |
10482 | upl_set_iodone_error(upl_t upl, int error) | |
d9a64523 | 10483 | { |
0a7de745 | 10484 | struct upl_io_completion *upl_ctx = upl->upl_iodone; |
d9a64523 | 10485 | |
0a7de745 A |
10486 | if (upl_ctx) { |
10487 | upl_ctx->io_error = error; | |
10488 | } | |
d9a64523 A |
10489 | } |
10490 | ||
5ba3f43e | 10491 | |
0a7de745 A |
10492 | ppnum_t |
10493 | upl_get_highest_page( | |
10494 | upl_t upl) | |
0c530ab8 | 10495 | { |
0a7de745 | 10496 | return upl->highest_page; |
0c530ab8 A |
10497 | } |
10498 | ||
0a7de745 A |
10499 | upl_size_t |
10500 | upl_get_size( | |
10501 | upl_t upl) | |
b0d623f7 | 10502 | { |
0a7de745 | 10503 | return upl->size; |
b0d623f7 A |
10504 | } |
10505 | ||
0a7de745 A |
10506 | upl_t |
10507 | upl_associated_upl(upl_t upl) | |
3e170ce0 A |
10508 | { |
10509 | return upl->associated_upl; | |
10510 | } | |
10511 | ||
0a7de745 A |
10512 | void |
10513 | upl_set_associated_upl(upl_t upl, upl_t associated_upl) | |
3e170ce0 A |
10514 | { |
10515 | upl->associated_upl = associated_upl; | |
10516 | } | |
10517 | ||
0a7de745 A |
10518 | struct vnode * |
10519 | upl_lookup_vnode(upl_t upl) | |
39037602 | 10520 | { |
0a7de745 | 10521 | if (!upl->map_object->internal) { |
39037602 | 10522 | return vnode_pager_lookup_vnode(upl->map_object->pager); |
0a7de745 | 10523 | } else { |
39037602 | 10524 | return NULL; |
0a7de745 | 10525 | } |
d9a64523 | 10526 | } |
39037602 | 10527 | |
b0d623f7 | 10528 | #if UPL_DEBUG |
0a7de745 A |
10529 | kern_return_t |
10530 | upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2) | |
1c79356b A |
10531 | { |
10532 | upl->ubc_alias1 = alias1; | |
10533 | upl->ubc_alias2 = alias2; | |
10534 | return KERN_SUCCESS; | |
10535 | } | |
0a7de745 A |
10536 | int |
10537 | upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2) | |
1c79356b | 10538 | { |
0a7de745 | 10539 | if (al) { |
1c79356b | 10540 | *al = upl->ubc_alias1; |
0a7de745 A |
10541 | } |
10542 | if (al2) { | |
1c79356b | 10543 | *al2 = upl->ubc_alias2; |
0a7de745 | 10544 | } |
1c79356b A |
10545 | return KERN_SUCCESS; |
10546 | } | |
91447636 | 10547 | #endif /* UPL_DEBUG */ |
fe8ab488 A |
10548 | |
10549 | #if VM_PRESSURE_EVENTS | |
10550 | /* | |
10551 | * Upward trajectory. | |
10552 | */ | |
10553 | extern boolean_t vm_compressor_low_on_space(void); | |
10554 | ||
10555 | boolean_t | |
0a7de745 A |
10556 | VM_PRESSURE_NORMAL_TO_WARNING(void) |
10557 | { | |
10558 | if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) { | |
fe8ab488 A |
10559 | /* Available pages below our threshold */ |
10560 | if (memorystatus_available_pages < memorystatus_available_pages_pressure) { | |
10561 | /* No frozen processes to kill */ | |
10562 | if (memorystatus_frozen_count == 0) { | |
10563 | /* Not enough suspended processes available. */ | |
10564 | if (memorystatus_suspended_count < MEMORYSTATUS_SUSPENDED_THRESHOLD) { | |
10565 | return TRUE; | |
10566 | } | |
10567 | } | |
10568 | } | |
10569 | return FALSE; | |
fe8ab488 | 10570 | } else { |
0a7de745 | 10571 | return (AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0; |
fe8ab488 A |
10572 | } |
10573 | } | |
10574 | ||
10575 | boolean_t | |
0a7de745 A |
10576 | VM_PRESSURE_WARNING_TO_CRITICAL(void) |
10577 | { | |
10578 | if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) { | |
fe8ab488 A |
10579 | /* Available pages below our threshold */ |
10580 | if (memorystatus_available_pages < memorystatus_available_pages_critical) { | |
10581 | return TRUE; | |
10582 | } | |
10583 | return FALSE; | |
10584 | } else { | |
0a7de745 | 10585 | return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0; |
fe8ab488 A |
10586 | } |
10587 | } | |
10588 | ||
10589 | /* | |
10590 | * Downward trajectory. | |
10591 | */ | |
10592 | boolean_t | |
0a7de745 A |
10593 | VM_PRESSURE_WARNING_TO_NORMAL(void) |
10594 | { | |
10595 | if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) { | |
fe8ab488 | 10596 | /* Available pages above our threshold */ |
5ba3f43e | 10597 | unsigned int target_threshold = (unsigned int) (memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100)); |
fe8ab488 A |
10598 | if (memorystatus_available_pages > target_threshold) { |
10599 | return TRUE; | |
10600 | } | |
10601 | return FALSE; | |
10602 | } else { | |
0a7de745 | 10603 | return (AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0; |
fe8ab488 A |
10604 | } |
10605 | } | |
10606 | ||
10607 | boolean_t | |
0a7de745 A |
10608 | VM_PRESSURE_CRITICAL_TO_WARNING(void) |
10609 | { | |
10610 | if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) { | |
fe8ab488 | 10611 | /* Available pages above our threshold */ |
5ba3f43e | 10612 | unsigned int target_threshold = (unsigned int)(memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100)); |
fe8ab488 A |
10613 | if (memorystatus_available_pages > target_threshold) { |
10614 | return TRUE; | |
10615 | } | |
10616 | return FALSE; | |
10617 | } else { | |
0a7de745 | 10618 | return (AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0; |
fe8ab488 A |
10619 | } |
10620 | } | |
10621 | #endif /* VM_PRESSURE_EVENTS */ | |
10622 | ||
5ba3f43e A |
10623 | |
10624 | ||
0a7de745 A |
10625 | #define VM_TEST_COLLAPSE_COMPRESSOR 0 |
10626 | #define VM_TEST_WIRE_AND_EXTRACT 0 | |
10627 | #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0 | |
5ba3f43e | 10628 | #if __arm64__ |
0a7de745 | 10629 | #define VM_TEST_KERNEL_OBJECT_FAULT 0 |
5ba3f43e | 10630 | #endif /* __arm64__ */ |
0a7de745 | 10631 | #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG) |
5ba3f43e A |
10632 | |
10633 | #if VM_TEST_COLLAPSE_COMPRESSOR | |
10634 | extern boolean_t vm_object_collapse_compressor_allowed; | |
10635 | #include <IOKit/IOLib.h> | |
10636 | static void | |
10637 | vm_test_collapse_compressor(void) | |
10638 | { | |
0a7de745 A |
10639 | vm_object_size_t backing_size, top_size; |
10640 | vm_object_t backing_object, top_object; | |
10641 | vm_map_offset_t backing_offset, top_offset; | |
10642 | unsigned char *backing_address, *top_address; | |
10643 | kern_return_t kr; | |
5ba3f43e A |
10644 | |
10645 | printf("VM_TEST_COLLAPSE_COMPRESSOR:\n"); | |
10646 | ||
10647 | /* create backing object */ | |
10648 | backing_size = 15 * PAGE_SIZE; | |
10649 | backing_object = vm_object_allocate(backing_size); | |
10650 | assert(backing_object != VM_OBJECT_NULL); | |
10651 | printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n", | |
0a7de745 | 10652 | backing_object); |
5ba3f43e A |
10653 | /* map backing object */ |
10654 | backing_offset = 0; | |
10655 | kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0, | |
0a7de745 A |
10656 | VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, |
10657 | backing_object, 0, FALSE, | |
10658 | VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); | |
5ba3f43e A |
10659 | assert(kr == KERN_SUCCESS); |
10660 | backing_address = (unsigned char *) backing_offset; | |
10661 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
0a7de745 A |
10662 | "mapped backing object %p at 0x%llx\n", |
10663 | backing_object, (uint64_t) backing_offset); | |
5ba3f43e | 10664 | /* populate with pages to be compressed in backing object */ |
0a7de745 A |
10665 | backing_address[0x1 * PAGE_SIZE] = 0xB1; |
10666 | backing_address[0x4 * PAGE_SIZE] = 0xB4; | |
10667 | backing_address[0x7 * PAGE_SIZE] = 0xB7; | |
10668 | backing_address[0xa * PAGE_SIZE] = 0xBA; | |
10669 | backing_address[0xd * PAGE_SIZE] = 0xBD; | |
5ba3f43e | 10670 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " |
0a7de745 A |
10671 | "populated pages to be compressed in " |
10672 | "backing_object %p\n", backing_object); | |
5ba3f43e A |
10673 | /* compress backing object */ |
10674 | vm_object_pageout(backing_object); | |
10675 | printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n", | |
0a7de745 | 10676 | backing_object); |
5ba3f43e | 10677 | /* wait for all the pages to be gone */ |
0a7de745 | 10678 | while (*(volatile int *)&backing_object->resident_page_count != 0) { |
5ba3f43e | 10679 | IODelay(10); |
0a7de745 | 10680 | } |
5ba3f43e | 10681 | printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n", |
0a7de745 | 10682 | backing_object); |
5ba3f43e | 10683 | /* populate with pages to be resident in backing object */ |
0a7de745 A |
10684 | backing_address[0x0 * PAGE_SIZE] = 0xB0; |
10685 | backing_address[0x3 * PAGE_SIZE] = 0xB3; | |
10686 | backing_address[0x6 * PAGE_SIZE] = 0xB6; | |
10687 | backing_address[0x9 * PAGE_SIZE] = 0xB9; | |
10688 | backing_address[0xc * PAGE_SIZE] = 0xBC; | |
5ba3f43e | 10689 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " |
0a7de745 A |
10690 | "populated pages to be resident in " |
10691 | "backing_object %p\n", backing_object); | |
5ba3f43e A |
10692 | /* leave the other pages absent */ |
10693 | /* mess with the paging_offset of the backing_object */ | |
10694 | assert(backing_object->paging_offset == 0); | |
10695 | backing_object->paging_offset = 0x3000; | |
10696 | ||
10697 | /* create top object */ | |
10698 | top_size = 9 * PAGE_SIZE; | |
10699 | top_object = vm_object_allocate(top_size); | |
10700 | assert(top_object != VM_OBJECT_NULL); | |
10701 | printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n", | |
0a7de745 | 10702 | top_object); |
5ba3f43e A |
10703 | /* map top object */ |
10704 | top_offset = 0; | |
10705 | kr = vm_map_enter(kernel_map, &top_offset, top_size, 0, | |
0a7de745 A |
10706 | VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, |
10707 | top_object, 0, FALSE, | |
10708 | VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); | |
5ba3f43e A |
10709 | assert(kr == KERN_SUCCESS); |
10710 | top_address = (unsigned char *) top_offset; | |
10711 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
0a7de745 A |
10712 | "mapped top object %p at 0x%llx\n", |
10713 | top_object, (uint64_t) top_offset); | |
5ba3f43e | 10714 | /* populate with pages to be compressed in top object */ |
0a7de745 A |
10715 | top_address[0x3 * PAGE_SIZE] = 0xA3; |
10716 | top_address[0x4 * PAGE_SIZE] = 0xA4; | |
10717 | top_address[0x5 * PAGE_SIZE] = 0xA5; | |
5ba3f43e | 10718 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " |
0a7de745 A |
10719 | "populated pages to be compressed in " |
10720 | "top_object %p\n", top_object); | |
5ba3f43e A |
10721 | /* compress top object */ |
10722 | vm_object_pageout(top_object); | |
10723 | printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n", | |
0a7de745 | 10724 | top_object); |
5ba3f43e | 10725 | /* wait for all the pages to be gone */ |
0a7de745 | 10726 | while (top_object->resident_page_count != 0) { |
5ba3f43e | 10727 | IODelay(10); |
0a7de745 | 10728 | } |
5ba3f43e | 10729 | printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n", |
0a7de745 | 10730 | top_object); |
5ba3f43e | 10731 | /* populate with pages to be resident in top object */ |
0a7de745 A |
10732 | top_address[0x0 * PAGE_SIZE] = 0xA0; |
10733 | top_address[0x1 * PAGE_SIZE] = 0xA1; | |
10734 | top_address[0x2 * PAGE_SIZE] = 0xA2; | |
5ba3f43e | 10735 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " |
0a7de745 A |
10736 | "populated pages to be resident in " |
10737 | "top_object %p\n", top_object); | |
5ba3f43e A |
10738 | /* leave the other pages absent */ |
10739 | ||
10740 | /* link the 2 objects */ | |
10741 | vm_object_reference(backing_object); | |
10742 | top_object->shadow = backing_object; | |
10743 | top_object->vo_shadow_offset = 0x3000; | |
10744 | printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n", | |
0a7de745 | 10745 | top_object, backing_object); |
5ba3f43e A |
10746 | |
10747 | /* unmap backing object */ | |
10748 | vm_map_remove(kernel_map, | |
0a7de745 A |
10749 | backing_offset, |
10750 | backing_offset + backing_size, | |
10751 | VM_MAP_REMOVE_NO_FLAGS); | |
5ba3f43e | 10752 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " |
0a7de745 A |
10753 | "unmapped backing_object %p [0x%llx:0x%llx]\n", |
10754 | backing_object, | |
10755 | (uint64_t) backing_offset, | |
10756 | (uint64_t) (backing_offset + backing_size)); | |
5ba3f43e A |
10757 | |
10758 | /* collapse */ | |
10759 | printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object); | |
10760 | vm_object_lock(top_object); | |
10761 | vm_object_collapse(top_object, 0, FALSE); | |
10762 | vm_object_unlock(top_object); | |
10763 | printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object); | |
10764 | ||
10765 | /* did it work? */ | |
10766 | if (top_object->shadow != VM_OBJECT_NULL) { | |
10767 | printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n"); | |
10768 | printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); | |
10769 | if (vm_object_collapse_compressor_allowed) { | |
10770 | panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); | |
10771 | } | |
10772 | } else { | |
10773 | /* check the contents of the mapping */ | |
10774 | unsigned char expect[9] = | |
0a7de745 A |
10775 | { 0xA0, 0xA1, 0xA2, /* resident in top */ |
10776 | 0xA3, 0xA4, 0xA5, /* compressed in top */ | |
10777 | 0xB9, /* resident in backing + shadow_offset */ | |
10778 | 0xBD, /* compressed in backing + shadow_offset + paging_offset */ | |
10779 | 0x00 }; /* absent in both */ | |
5ba3f43e A |
10780 | unsigned char actual[9]; |
10781 | unsigned int i, errors; | |
10782 | ||
10783 | errors = 0; | |
0a7de745 A |
10784 | for (i = 0; i < sizeof(actual); i++) { |
10785 | actual[i] = (unsigned char) top_address[i * PAGE_SIZE]; | |
5ba3f43e A |
10786 | if (actual[i] != expect[i]) { |
10787 | errors++; | |
10788 | } | |
10789 | } | |
10790 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
0a7de745 A |
10791 | "actual [%x %x %x %x %x %x %x %x %x] " |
10792 | "expect [%x %x %x %x %x %x %x %x %x] " | |
10793 | "%d errors\n", | |
10794 | actual[0], actual[1], actual[2], actual[3], | |
10795 | actual[4], actual[5], actual[6], actual[7], | |
10796 | actual[8], | |
10797 | expect[0], expect[1], expect[2], expect[3], | |
10798 | expect[4], expect[5], expect[6], expect[7], | |
10799 | expect[8], | |
10800 | errors); | |
5ba3f43e A |
10801 | if (errors) { |
10802 | panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); | |
10803 | } else { | |
10804 | printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n"); | |
10805 | } | |
10806 | } | |
10807 | } | |
10808 | #else /* VM_TEST_COLLAPSE_COMPRESSOR */ | |
10809 | #define vm_test_collapse_compressor() | |
10810 | #endif /* VM_TEST_COLLAPSE_COMPRESSOR */ | |
10811 | ||
10812 | #if VM_TEST_WIRE_AND_EXTRACT | |
0a7de745 | 10813 | extern ledger_template_t task_ledger_template; |
5ba3f43e A |
10814 | #include <mach/mach_vm.h> |
10815 | extern ppnum_t vm_map_get_phys_page(vm_map_t map, | |
0a7de745 | 10816 | vm_offset_t offset); |
5ba3f43e A |
10817 | static void |
10818 | vm_test_wire_and_extract(void) | |
10819 | { | |
0a7de745 A |
10820 | ledger_t ledger; |
10821 | vm_map_t user_map, wire_map; | |
10822 | mach_vm_address_t user_addr, wire_addr; | |
10823 | mach_vm_size_t user_size, wire_size; | |
10824 | mach_vm_offset_t cur_offset; | |
10825 | vm_prot_t cur_prot, max_prot; | |
10826 | ppnum_t user_ppnum, wire_ppnum; | |
10827 | kern_return_t kr; | |
5ba3f43e A |
10828 | |
10829 | ledger = ledger_instantiate(task_ledger_template, | |
0a7de745 | 10830 | LEDGER_CREATE_ACTIVE_ENTRIES); |
cb323159 | 10831 | user_map = vm_map_create(pmap_create_options(ledger, 0, PMAP_CREATE_64BIT), |
0a7de745 A |
10832 | 0x100000000ULL, |
10833 | 0x200000000ULL, | |
10834 | TRUE); | |
5ba3f43e | 10835 | wire_map = vm_map_create(NULL, |
0a7de745 A |
10836 | 0x100000000ULL, |
10837 | 0x200000000ULL, | |
10838 | TRUE); | |
5ba3f43e A |
10839 | user_addr = 0; |
10840 | user_size = 0x10000; | |
10841 | kr = mach_vm_allocate(user_map, | |
0a7de745 A |
10842 | &user_addr, |
10843 | user_size, | |
10844 | VM_FLAGS_ANYWHERE); | |
5ba3f43e A |
10845 | assert(kr == KERN_SUCCESS); |
10846 | wire_addr = 0; | |
10847 | wire_size = user_size; | |
10848 | kr = mach_vm_remap(wire_map, | |
0a7de745 A |
10849 | &wire_addr, |
10850 | wire_size, | |
10851 | 0, | |
10852 | VM_FLAGS_ANYWHERE, | |
10853 | user_map, | |
10854 | user_addr, | |
10855 | FALSE, | |
10856 | &cur_prot, | |
10857 | &max_prot, | |
10858 | VM_INHERIT_NONE); | |
5ba3f43e A |
10859 | assert(kr == KERN_SUCCESS); |
10860 | for (cur_offset = 0; | |
0a7de745 A |
10861 | cur_offset < wire_size; |
10862 | cur_offset += PAGE_SIZE) { | |
5ba3f43e | 10863 | kr = vm_map_wire_and_extract(wire_map, |
0a7de745 A |
10864 | wire_addr + cur_offset, |
10865 | VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK), | |
10866 | TRUE, | |
10867 | &wire_ppnum); | |
5ba3f43e A |
10868 | assert(kr == KERN_SUCCESS); |
10869 | user_ppnum = vm_map_get_phys_page(user_map, | |
0a7de745 | 10870 | user_addr + cur_offset); |
5ba3f43e | 10871 | printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x " |
0a7de745 A |
10872 | "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", |
10873 | kr, | |
10874 | user_map, user_addr + cur_offset, user_ppnum, | |
10875 | wire_map, wire_addr + cur_offset, wire_ppnum); | |
5ba3f43e A |
10876 | if (kr != KERN_SUCCESS || |
10877 | wire_ppnum == 0 || | |
10878 | wire_ppnum != user_ppnum) { | |
10879 | panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n"); | |
10880 | } | |
10881 | } | |
10882 | cur_offset -= PAGE_SIZE; | |
10883 | kr = vm_map_wire_and_extract(wire_map, | |
0a7de745 A |
10884 | wire_addr + cur_offset, |
10885 | VM_PROT_DEFAULT, | |
10886 | TRUE, | |
10887 | &wire_ppnum); | |
5ba3f43e A |
10888 | assert(kr == KERN_SUCCESS); |
10889 | printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x " | |
0a7de745 A |
10890 | "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", |
10891 | kr, | |
10892 | user_map, user_addr + cur_offset, user_ppnum, | |
10893 | wire_map, wire_addr + cur_offset, wire_ppnum); | |
5ba3f43e A |
10894 | if (kr != KERN_SUCCESS || |
10895 | wire_ppnum == 0 || | |
10896 | wire_ppnum != user_ppnum) { | |
10897 | panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n"); | |
10898 | } | |
10899 | ||
10900 | printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n"); | |
10901 | } | |
10902 | #else /* VM_TEST_WIRE_AND_EXTRACT */ | |
10903 | #define vm_test_wire_and_extract() | |
10904 | #endif /* VM_TEST_WIRE_AND_EXTRACT */ | |
10905 | ||
10906 | #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC | |
10907 | static void | |
10908 | vm_test_page_wire_overflow_panic(void) | |
10909 | { | |
10910 | vm_object_t object; | |
10911 | vm_page_t page; | |
10912 | ||
10913 | printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n"); | |
10914 | ||
10915 | object = vm_object_allocate(PAGE_SIZE); | |
10916 | vm_object_lock(object); | |
10917 | page = vm_page_alloc(object, 0x0); | |
10918 | vm_page_lock_queues(); | |
10919 | do { | |
10920 | vm_page_wire(page, 1, FALSE); | |
10921 | } while (page->wire_count != 0); | |
10922 | vm_page_unlock_queues(); | |
10923 | vm_object_unlock(object); | |
10924 | panic("FBDP(%p,%p): wire_count overflow not detected\n", | |
0a7de745 | 10925 | object, page); |
5ba3f43e A |
10926 | } |
10927 | #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */ | |
10928 | #define vm_test_page_wire_overflow_panic() | |
10929 | #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */ | |
10930 | ||
10931 | #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT | |
10932 | extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit); | |
10933 | static void | |
10934 | vm_test_kernel_object_fault(void) | |
10935 | { | |
10936 | kern_return_t kr; | |
10937 | vm_offset_t stack; | |
10938 | uintptr_t frameb[2]; | |
10939 | int ret; | |
10940 | ||
10941 | kr = kernel_memory_allocate(kernel_map, &stack, | |
0a7de745 A |
10942 | kernel_stack_size + (2 * PAGE_SIZE), |
10943 | 0, | |
10944 | (KMA_KSTACK | KMA_KOBJECT | | |
10945 | KMA_GUARD_FIRST | KMA_GUARD_LAST), | |
10946 | VM_KERN_MEMORY_STACK); | |
5ba3f43e A |
10947 | if (kr != KERN_SUCCESS) { |
10948 | panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr); | |
10949 | } | |
10950 | ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE); | |
10951 | if (ret != 0) { | |
10952 | printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n"); | |
10953 | } else { | |
10954 | printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n"); | |
10955 | } | |
10956 | vm_map_remove(kernel_map, | |
0a7de745 A |
10957 | stack, |
10958 | stack + kernel_stack_size + (2 * PAGE_SIZE), | |
10959 | VM_MAP_REMOVE_KUNWIRE); | |
5ba3f43e A |
10960 | stack = 0; |
10961 | } | |
10962 | #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */ | |
10963 | #define vm_test_kernel_object_fault() | |
10964 | #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */ | |
10965 | ||
10966 | #if VM_TEST_DEVICE_PAGER_TRANSPOSE | |
10967 | static void | |
10968 | vm_test_device_pager_transpose(void) | |
10969 | { | |
0a7de745 A |
10970 | memory_object_t device_pager; |
10971 | vm_object_t anon_object, device_object; | |
10972 | vm_size_t size; | |
10973 | vm_map_offset_t device_mapping; | |
10974 | kern_return_t kr; | |
5ba3f43e A |
10975 | |
10976 | size = 3 * PAGE_SIZE; | |
10977 | anon_object = vm_object_allocate(size); | |
10978 | assert(anon_object != VM_OBJECT_NULL); | |
10979 | device_pager = device_pager_setup(NULL, 0, size, 0); | |
10980 | assert(device_pager != NULL); | |
10981 | device_object = memory_object_to_vm_object(device_pager); | |
10982 | assert(device_object != VM_OBJECT_NULL); | |
0a7de745 A |
10983 | #if 0 |
10984 | /* | |
10985 | * Can't actually map this, since another thread might do a | |
10986 | * vm_map_enter() that gets coalesced into this object, which | |
10987 | * would cause the test to fail. | |
10988 | */ | |
10989 | vm_map_offset_t anon_mapping = 0; | |
5ba3f43e | 10990 | kr = vm_map_enter(kernel_map, &anon_mapping, size, 0, |
0a7de745 A |
10991 | VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, |
10992 | anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, | |
10993 | VM_INHERIT_DEFAULT); | |
5ba3f43e | 10994 | assert(kr == KERN_SUCCESS); |
0a7de745 | 10995 | #endif |
5ba3f43e A |
10996 | device_mapping = 0; |
10997 | kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0, | |
0a7de745 A |
10998 | VM_FLAGS_ANYWHERE, |
10999 | VM_MAP_KERNEL_FLAGS_NONE, | |
11000 | VM_KERN_MEMORY_NONE, | |
11001 | (void *)device_pager, 0, FALSE, | |
11002 | VM_PROT_DEFAULT, VM_PROT_ALL, | |
11003 | VM_INHERIT_DEFAULT); | |
5ba3f43e A |
11004 | assert(kr == KERN_SUCCESS); |
11005 | memory_object_deallocate(device_pager); | |
11006 | ||
11007 | vm_object_lock(anon_object); | |
11008 | vm_object_activity_begin(anon_object); | |
11009 | anon_object->blocked_access = TRUE; | |
11010 | vm_object_unlock(anon_object); | |
11011 | vm_object_lock(device_object); | |
11012 | vm_object_activity_begin(device_object); | |
11013 | device_object->blocked_access = TRUE; | |
11014 | vm_object_unlock(device_object); | |
11015 | ||
11016 | assert(anon_object->ref_count == 1); | |
11017 | assert(!anon_object->named); | |
11018 | assert(device_object->ref_count == 2); | |
11019 | assert(device_object->named); | |
11020 | ||
11021 | kr = vm_object_transpose(device_object, anon_object, size); | |
11022 | assert(kr == KERN_SUCCESS); | |
11023 | ||
11024 | vm_object_lock(anon_object); | |
11025 | vm_object_activity_end(anon_object); | |
11026 | anon_object->blocked_access = FALSE; | |
11027 | vm_object_unlock(anon_object); | |
11028 | vm_object_lock(device_object); | |
11029 | vm_object_activity_end(device_object); | |
11030 | device_object->blocked_access = FALSE; | |
11031 | vm_object_unlock(device_object); | |
11032 | ||
11033 | assert(anon_object->ref_count == 2); | |
11034 | assert(anon_object->named); | |
0a7de745 | 11035 | #if 0 |
5ba3f43e A |
11036 | kr = vm_deallocate(kernel_map, anon_mapping, size); |
11037 | assert(kr == KERN_SUCCESS); | |
0a7de745 | 11038 | #endif |
5ba3f43e A |
11039 | assert(device_object->ref_count == 1); |
11040 | assert(!device_object->named); | |
11041 | kr = vm_deallocate(kernel_map, device_mapping, size); | |
11042 | assert(kr == KERN_SUCCESS); | |
11043 | ||
11044 | printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n"); | |
11045 | } | |
11046 | #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */ | |
11047 | #define vm_test_device_pager_transpose() | |
11048 | #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */ | |
11049 | ||
11050 | void | |
11051 | vm_tests(void) | |
11052 | { | |
11053 | vm_test_collapse_compressor(); | |
11054 | vm_test_wire_and_extract(); | |
11055 | vm_test_page_wire_overflow_panic(); | |
11056 | vm_test_kernel_object_fault(); | |
11057 | vm_test_device_pager_transpose(); | |
11058 | } |