]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
f427ee49 | 2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
0a7de745 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
0a7de745 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
0a7de745 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
0a7de745 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_object.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * | |
62 | * Virtual memory object module. | |
63 | */ | |
64 | ||
2d21ac55 | 65 | #include <debug.h> |
1c79356b | 66 | #include <mach_pagemap.h> |
1c79356b | 67 | |
0b4e3aa0 | 68 | #include <mach/mach_types.h> |
1c79356b A |
69 | #include <mach/memory_object.h> |
70 | #include <mach/memory_object_default.h> | |
71 | #include <mach/memory_object_control_server.h> | |
72 | #include <mach/vm_param.h> | |
91447636 | 73 | |
316670eb A |
74 | #include <mach/sdt.h> |
75 | ||
91447636 | 76 | #include <ipc/ipc_types.h> |
1c79356b | 77 | #include <ipc/ipc_port.h> |
91447636 A |
78 | |
79 | #include <kern/kern_types.h> | |
1c79356b | 80 | #include <kern/assert.h> |
1c79356b | 81 | #include <kern/queue.h> |
6d2010ae | 82 | #include <kern/kalloc.h> |
1c79356b A |
83 | #include <kern/zalloc.h> |
84 | #include <kern/host.h> | |
85 | #include <kern/host_statistics.h> | |
86 | #include <kern/processor.h> | |
91447636 | 87 | #include <kern/misc_protos.h> |
39037602 | 88 | #include <kern/policy_internal.h> |
91447636 | 89 | |
1c79356b | 90 | #include <vm/memory_object.h> |
39236c6e | 91 | #include <vm/vm_compressor_pager.h> |
1c79356b A |
92 | #include <vm/vm_fault.h> |
93 | #include <vm/vm_map.h> | |
94 | #include <vm/vm_object.h> | |
95 | #include <vm/vm_page.h> | |
96 | #include <vm/vm_pageout.h> | |
91447636 | 97 | #include <vm/vm_protos.h> |
2d21ac55 | 98 | #include <vm/vm_purgeable_internal.h> |
1c79356b | 99 | |
39236c6e A |
100 | #include <vm/vm_compressor.h> |
101 | ||
fe8ab488 A |
102 | #if CONFIG_PHANTOM_CACHE |
103 | #include <vm/vm_phantom_cache.h> | |
104 | #endif | |
105 | ||
d9a64523 A |
106 | #if VM_OBJECT_ACCESS_TRACKING |
107 | uint64_t vm_object_access_tracking_reads = 0; | |
108 | uint64_t vm_object_access_tracking_writes = 0; | |
109 | #endif /* VM_OBJECT_ACCESS_TRACKING */ | |
110 | ||
fe8ab488 A |
111 | boolean_t vm_object_collapse_compressor_allowed = TRUE; |
112 | ||
113 | struct vm_counters vm_counters; | |
114 | ||
f427ee49 A |
115 | #if DEVELOPMENT || DEBUG |
116 | extern struct memory_object_pager_ops shared_region_pager_ops; | |
117 | extern unsigned int shared_region_pagers_resident_count; | |
118 | extern unsigned int shared_region_pagers_resident_peak; | |
119 | #endif /* DEVELOPMENT || DEBUG */ | |
120 | ||
fe8ab488 A |
121 | #if VM_OBJECT_TRACKING |
122 | boolean_t vm_object_tracking_inited = FALSE; | |
fe8ab488 | 123 | btlog_t *vm_object_tracking_btlog; |
39037602 | 124 | |
fe8ab488 A |
125 | void |
126 | vm_object_tracking_init(void) | |
127 | { | |
128 | int vm_object_tracking; | |
129 | ||
130 | vm_object_tracking = 1; | |
0a7de745 A |
131 | PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking, |
132 | sizeof(vm_object_tracking)); | |
fe8ab488 A |
133 | |
134 | if (vm_object_tracking) { | |
fe8ab488 | 135 | vm_object_tracking_btlog = btlog_create( |
39037602 | 136 | VM_OBJECT_TRACKING_NUM_RECORDS, |
fe8ab488 | 137 | VM_OBJECT_TRACKING_BTDEPTH, |
39037602 | 138 | TRUE /* caller_will_remove_entries_for_element? */); |
fe8ab488 A |
139 | assert(vm_object_tracking_btlog); |
140 | vm_object_tracking_inited = TRUE; | |
141 | } | |
142 | } | |
143 | #endif /* VM_OBJECT_TRACKING */ | |
144 | ||
1c79356b A |
145 | /* |
146 | * Virtual memory objects maintain the actual data | |
147 | * associated with allocated virtual memory. A given | |
148 | * page of memory exists within exactly one object. | |
149 | * | |
150 | * An object is only deallocated when all "references" | |
0b4e3aa0 | 151 | * are given up. |
1c79356b A |
152 | * |
153 | * Associated with each object is a list of all resident | |
154 | * memory pages belonging to that object; this list is | |
155 | * maintained by the "vm_page" module, but locked by the object's | |
156 | * lock. | |
157 | * | |
0b4e3aa0 | 158 | * Each object also records the memory object reference |
1c79356b | 159 | * that is used by the kernel to request and write |
0b4e3aa0 | 160 | * back data (the memory object, field "pager"), etc... |
1c79356b A |
161 | * |
162 | * Virtual memory objects are allocated to provide | |
163 | * zero-filled memory (vm_allocate) or map a user-defined | |
164 | * memory object into a virtual address space (vm_map). | |
165 | * | |
166 | * Virtual memory objects that refer to a user-defined | |
167 | * memory object are called "permanent", because all changes | |
168 | * made in virtual memory are reflected back to the | |
169 | * memory manager, which may then store it permanently. | |
170 | * Other virtual memory objects are called "temporary", | |
171 | * meaning that changes need be written back only when | |
172 | * necessary to reclaim pages, and that storage associated | |
173 | * with the object can be discarded once it is no longer | |
174 | * mapped. | |
175 | * | |
176 | * A permanent memory object may be mapped into more | |
177 | * than one virtual address space. Moreover, two threads | |
178 | * may attempt to make the first mapping of a memory | |
179 | * object concurrently. Only one thread is allowed to | |
180 | * complete this mapping; all others wait for the | |
181 | * "pager_initialized" field is asserted, indicating | |
182 | * that the first thread has initialized all of the | |
183 | * necessary fields in the virtual memory object structure. | |
184 | * | |
185 | * The kernel relies on a *default memory manager* to | |
186 | * provide backing storage for the zero-filled virtual | |
0b4e3aa0 | 187 | * memory objects. The pager memory objects associated |
1c79356b | 188 | * with these temporary virtual memory objects are only |
0b4e3aa0 A |
189 | * requested from the default memory manager when it |
190 | * becomes necessary. Virtual memory objects | |
1c79356b A |
191 | * that depend on the default memory manager are called |
192 | * "internal". The "pager_created" field is provided to | |
193 | * indicate whether these ports have ever been allocated. | |
0a7de745 | 194 | * |
1c79356b A |
195 | * The kernel may also create virtual memory objects to |
196 | * hold changed pages after a copy-on-write operation. | |
197 | * In this case, the virtual memory object (and its | |
198 | * backing storage -- its memory object) only contain | |
199 | * those pages that have been changed. The "shadow" | |
200 | * field refers to the virtual memory object that contains | |
201 | * the remainder of the contents. The "shadow_offset" | |
202 | * field indicates where in the "shadow" these contents begin. | |
203 | * The "copy" field refers to a virtual memory object | |
204 | * to which changed pages must be copied before changing | |
205 | * this object, in order to implement another form | |
206 | * of copy-on-write optimization. | |
207 | * | |
208 | * The virtual memory object structure also records | |
209 | * the attributes associated with its memory object. | |
210 | * The "pager_ready", "can_persist" and "copy_strategy" | |
211 | * fields represent those attributes. The "cached_list" | |
212 | * field is used in the implementation of the persistence | |
213 | * attribute. | |
214 | * | |
215 | * ZZZ Continue this comment. | |
216 | */ | |
217 | ||
218 | /* Forward declarations for internal functions. */ | |
0a7de745 A |
219 | static kern_return_t vm_object_terminate( |
220 | vm_object_t object); | |
1c79356b | 221 | |
0a7de745 A |
222 | static kern_return_t vm_object_copy_call( |
223 | vm_object_t src_object, | |
224 | vm_object_offset_t src_offset, | |
225 | vm_object_size_t size, | |
226 | vm_object_t *_result_object); | |
1c79356b | 227 | |
0a7de745 A |
228 | static void vm_object_do_collapse( |
229 | vm_object_t object, | |
230 | vm_object_t backing_object); | |
1c79356b | 231 | |
0a7de745 A |
232 | static void vm_object_do_bypass( |
233 | vm_object_t object, | |
234 | vm_object_t backing_object); | |
1c79356b | 235 | |
0a7de745 A |
236 | static void vm_object_release_pager( |
237 | memory_object_t pager); | |
1c79356b | 238 | |
f427ee49 | 239 | SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */ |
1c79356b A |
240 | |
241 | /* | |
242 | * All wired-down kernel memory belongs to a single virtual | |
243 | * memory object (kernel_object) to avoid wasting data structures. | |
244 | */ | |
f427ee49 | 245 | static struct vm_object kernel_object_store VM_PAGE_PACKED_ALIGNED; |
c3c9b80d | 246 | SECURITY_READ_ONLY_LATE(vm_object_t) kernel_object = &kernel_object_store; |
1c79356b | 247 | |
f427ee49 | 248 | static struct vm_object compressor_object_store VM_PAGE_PACKED_ALIGNED; |
c3c9b80d A |
249 | SECURITY_READ_ONLY_LATE(vm_object_t) compressor_object = &compressor_object_store; |
250 | ||
251 | /* | |
252 | * This object holds all pages that have been retired due to errors like ECC. | |
253 | * The system should never use the page or look at its contents. The offset | |
254 | * in this object is the same as the page's physical address. | |
255 | */ | |
256 | static struct vm_object retired_pages_object_store VM_PAGE_PACKED_ALIGNED; | |
257 | SECURITY_READ_ONLY_LATE(vm_object_t) retired_pages_object = &retired_pages_object_store; | |
2d21ac55 | 258 | |
1c79356b A |
259 | /* |
260 | * The submap object is used as a placeholder for vm_map_submap | |
261 | * operations. The object is declared in vm_map.c because it | |
262 | * is exported by the vm_map module. The storage is declared | |
263 | * here because it must be initialized here. | |
264 | */ | |
f427ee49 | 265 | static struct vm_object vm_submap_object_store VM_PAGE_PACKED_ALIGNED; |
c3c9b80d A |
266 | SECURITY_READ_ONLY_LATE(vm_object_t) vm_submap_object = &vm_submap_object_store; |
267 | ||
1c79356b A |
268 | |
269 | /* | |
270 | * Virtual memory objects are initialized from | |
271 | * a template (see vm_object_allocate). | |
272 | * | |
273 | * When adding a new field to the virtual memory | |
274 | * object structure, be sure to add initialization | |
0b4e3aa0 | 275 | * (see _vm_object_allocate()). |
1c79356b | 276 | */ |
f427ee49 A |
277 | static const struct vm_object vm_object_template = { |
278 | .memq.prev = 0, | |
279 | .memq.next = 0, | |
280 | /* | |
281 | * The lock will be initialized for each allocated object in | |
282 | * _vm_object_allocate(), so we don't need to initialize it in | |
283 | * the vm_object_template. | |
284 | */ | |
285 | #if DEVELOPMENT || DEBUG | |
286 | .Lock_owner = 0, | |
287 | #endif | |
288 | .vo_size = 0, | |
289 | .memq_hint = VM_PAGE_NULL, | |
290 | .ref_count = 1, | |
f427ee49 A |
291 | .resident_page_count = 0, |
292 | .wired_page_count = 0, | |
293 | .reusable_page_count = 0, | |
294 | .copy = VM_OBJECT_NULL, | |
295 | .shadow = VM_OBJECT_NULL, | |
296 | .vo_shadow_offset = (vm_object_offset_t) 0, | |
297 | .pager = MEMORY_OBJECT_NULL, | |
298 | .paging_offset = 0, | |
299 | .pager_control = MEMORY_OBJECT_CONTROL_NULL, | |
300 | .copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC, | |
301 | .paging_in_progress = 0, | |
302 | #if __LP64__ | |
303 | .__object1_unused_bits = 0, | |
304 | #endif /* __LP64__ */ | |
305 | .activity_in_progress = 0, | |
306 | ||
307 | /* Begin bitfields */ | |
308 | .all_wanted = 0, /* all bits FALSE */ | |
309 | .pager_created = FALSE, | |
310 | .pager_initialized = FALSE, | |
311 | .pager_ready = FALSE, | |
312 | .pager_trusted = FALSE, | |
313 | .can_persist = FALSE, | |
314 | .internal = TRUE, | |
315 | .private = FALSE, | |
316 | .pageout = FALSE, | |
317 | .alive = TRUE, | |
318 | .purgable = VM_PURGABLE_DENY, | |
319 | .purgeable_when_ripe = FALSE, | |
320 | .purgeable_only_by_kernel = FALSE, | |
321 | .shadowed = FALSE, | |
322 | .true_share = FALSE, | |
323 | .terminating = FALSE, | |
324 | .named = FALSE, | |
325 | .shadow_severed = FALSE, | |
326 | .phys_contiguous = FALSE, | |
327 | .nophyscache = FALSE, | |
328 | /* End bitfields */ | |
329 | ||
330 | .cached_list.prev = NULL, | |
331 | .cached_list.next = NULL, | |
332 | ||
333 | .last_alloc = (vm_object_offset_t) 0, | |
334 | .sequential = (vm_object_offset_t) 0, | |
335 | .pages_created = 0, | |
336 | .pages_used = 0, | |
337 | .scan_collisions = 0, | |
338 | #if CONFIG_PHANTOM_CACHE | |
339 | .phantom_object_id = 0, | |
340 | #endif | |
341 | .cow_hint = ~(vm_offset_t)0, | |
342 | ||
343 | /* cache bitfields */ | |
344 | .wimg_bits = VM_WIMG_USE_DEFAULT, | |
345 | .set_cache_attr = FALSE, | |
346 | .object_is_shared_cache = FALSE, | |
347 | .code_signed = FALSE, | |
348 | .transposed = FALSE, | |
349 | .mapping_in_progress = FALSE, | |
350 | .phantom_isssd = FALSE, | |
351 | .volatile_empty = FALSE, | |
352 | .volatile_fault = FALSE, | |
353 | .all_reusable = FALSE, | |
354 | .blocked_access = FALSE, | |
355 | .vo_ledger_tag = VM_LEDGER_TAG_NONE, | |
356 | .vo_no_footprint = FALSE, | |
357 | #if CONFIG_IOSCHED || UPL_DEBUG | |
358 | .uplq.prev = NULL, | |
359 | .uplq.next = NULL, | |
360 | #endif /* UPL_DEBUG */ | |
361 | #ifdef VM_PIP_DEBUG | |
362 | .pip_holders = {0}, | |
363 | #endif /* VM_PIP_DEBUG */ | |
364 | ||
365 | .objq.next = NULL, | |
366 | .objq.prev = NULL, | |
367 | .task_objq.next = NULL, | |
368 | .task_objq.prev = NULL, | |
369 | ||
370 | .purgeable_queue_type = PURGEABLE_Q_TYPE_MAX, | |
371 | .purgeable_queue_group = 0, | |
372 | ||
373 | .wire_tag = VM_KERN_MEMORY_NONE, | |
374 | #if !VM_TAG_ACTIVE_UPDATE | |
375 | .wired_objq.next = NULL, | |
376 | .wired_objq.prev = NULL, | |
377 | #endif /* ! VM_TAG_ACTIVE_UPDATE */ | |
378 | ||
379 | .io_tracking = FALSE, | |
380 | ||
381 | #if CONFIG_SECLUDED_MEMORY | |
382 | .eligible_for_secluded = FALSE, | |
383 | .can_grab_secluded = FALSE, | |
384 | #else /* CONFIG_SECLUDED_MEMORY */ | |
385 | .__object3_unused_bits = 0, | |
386 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
387 | ||
388 | #if VM_OBJECT_ACCESS_TRACKING | |
389 | .access_tracking = FALSE, | |
390 | .access_tracking_reads = 0, | |
391 | .access_tracking_writes = 0, | |
392 | #endif /* VM_OBJECT_ACCESS_TRACKING */ | |
393 | ||
394 | #if DEBUG | |
395 | .purgeable_owner_bt = {0}, | |
396 | .vo_purgeable_volatilizer = NULL, | |
397 | .purgeable_volatilizer_bt = {0}, | |
398 | #endif /* DEBUG */ | |
399 | }; | |
400 | ||
401 | LCK_GRP_DECLARE(vm_object_lck_grp, "vm_object"); | |
402 | LCK_GRP_DECLARE(vm_object_cache_lck_grp, "vm_object_cache"); | |
403 | LCK_ATTR_DECLARE(vm_object_lck_attr, 0, 0); | |
404 | LCK_ATTR_DECLARE(kernel_object_lck_attr, 0, LCK_ATTR_DEBUG); | |
405 | LCK_ATTR_DECLARE(compressor_object_lck_attr, 0, LCK_ATTR_DEBUG); | |
1c79356b | 406 | |
b0d623f7 A |
407 | unsigned int vm_page_purged_wired = 0; |
408 | unsigned int vm_page_purged_busy = 0; | |
409 | unsigned int vm_page_purged_others = 0; | |
410 | ||
0a7de745 A |
411 | static queue_head_t vm_object_cached_list; |
412 | static uint32_t vm_object_cache_pages_freed = 0; | |
413 | static uint32_t vm_object_cache_pages_moved = 0; | |
414 | static uint32_t vm_object_cache_pages_skipped = 0; | |
415 | static uint32_t vm_object_cache_adds = 0; | |
416 | static uint32_t vm_object_cached_count = 0; | |
f427ee49 A |
417 | static LCK_MTX_EARLY_DECLARE_ATTR(vm_object_cached_lock_data, |
418 | &vm_object_cache_lck_grp, &vm_object_lck_attr); | |
6d2010ae | 419 | |
0a7de745 A |
420 | static uint32_t vm_object_page_grab_failed = 0; |
421 | static uint32_t vm_object_page_grab_skipped = 0; | |
422 | static uint32_t vm_object_page_grab_returned = 0; | |
423 | static uint32_t vm_object_page_grab_pmapped = 0; | |
424 | static uint32_t vm_object_page_grab_reactivations = 0; | |
6d2010ae | 425 | |
0a7de745 A |
426 | #define vm_object_cache_lock_spin() \ |
427 | lck_mtx_lock_spin(&vm_object_cached_lock_data) | |
428 | #define vm_object_cache_unlock() \ | |
429 | lck_mtx_unlock(&vm_object_cached_lock_data) | |
b0d623f7 | 430 | |
0a7de745 | 431 | static void vm_object_cache_remove_locked(vm_object_t); |
b0d623f7 | 432 | |
1c79356b | 433 | |
8f6c56a5 A |
434 | static void vm_object_reap(vm_object_t object); |
435 | static void vm_object_reap_async(vm_object_t object); | |
436 | static void vm_object_reaper_thread(void); | |
b0d623f7 | 437 | |
f427ee49 A |
438 | static LCK_MTX_EARLY_DECLARE_ATTR(vm_object_reaper_lock_data, |
439 | &vm_object_lck_grp, &vm_object_lck_attr); | |
b0d623f7 A |
440 | |
441 | static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */ | |
8f6c56a5 A |
442 | unsigned int vm_object_reap_count = 0; |
443 | unsigned int vm_object_reap_count_async = 0; | |
444 | ||
0a7de745 A |
445 | #define vm_object_reaper_lock() \ |
446 | lck_mtx_lock(&vm_object_reaper_lock_data) | |
447 | #define vm_object_reaper_lock_spin() \ | |
448 | lck_mtx_lock_spin(&vm_object_reaper_lock_data) | |
449 | #define vm_object_reaper_unlock() \ | |
450 | lck_mtx_unlock(&vm_object_reaper_lock_data) | |
b0d623f7 | 451 | |
fe8ab488 A |
452 | #if CONFIG_IOSCHED |
453 | /* I/O Re-prioritization request list */ | |
f427ee49 A |
454 | queue_head_t io_reprioritize_list = QUEUE_HEAD_INITIALIZER(io_reprioritize_list); |
455 | ||
456 | LCK_SPIN_DECLARE_ATTR(io_reprioritize_list_lock, | |
457 | &vm_object_lck_grp, &vm_object_lck_attr); | |
fe8ab488 | 458 | |
0a7de745 A |
459 | #define IO_REPRIORITIZE_LIST_LOCK() \ |
460 | lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp) | |
461 | #define IO_REPRIORITIZE_LIST_UNLOCK() \ | |
462 | lck_spin_unlock(&io_reprioritize_list_lock) | |
fe8ab488 | 463 | |
0a7de745 | 464 | #define MAX_IO_REPRIORITIZE_REQS 8192 |
f427ee49 A |
465 | ZONE_DECLARE(io_reprioritize_req_zone, "io_reprioritize_req", |
466 | sizeof(struct io_reprioritize_req), ZC_NOGC); | |
fe8ab488 A |
467 | |
468 | /* I/O Re-prioritization thread */ | |
469 | int io_reprioritize_wakeup = 0; | |
470 | static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused); | |
471 | ||
0a7de745 A |
472 | #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup) |
473 | #define IO_REPRIO_THREAD_CONTINUATION() \ | |
474 | { \ | |
475 | assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \ | |
476 | thread_block(io_reprioritize_thread); \ | |
fe8ab488 A |
477 | } |
478 | ||
479 | void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int); | |
480 | void vm_page_handle_prio_inversion(vm_object_t, vm_page_t); | |
481 | void vm_decmp_upl_reprioritize(upl_t, int); | |
482 | #endif | |
483 | ||
6d2010ae A |
484 | #if 0 |
485 | #undef KERNEL_DEBUG | |
486 | #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT | |
487 | #endif | |
b0d623f7 A |
488 | |
489 | ||
1c79356b A |
490 | /* |
491 | * vm_object_allocate: | |
492 | * | |
493 | * Returns a new object with the given size. | |
494 | */ | |
495 | ||
91447636 | 496 | __private_extern__ void |
1c79356b | 497 | _vm_object_allocate( |
0a7de745 A |
498 | vm_object_size_t size, |
499 | vm_object_t object) | |
1c79356b | 500 | { |
1c79356b | 501 | *object = vm_object_template; |
39037602 | 502 | vm_page_queue_init(&object->memq); |
fe8ab488 | 503 | #if UPL_DEBUG || CONFIG_IOSCHED |
1c79356b | 504 | queue_init(&object->uplq); |
fe8ab488 | 505 | #endif |
1c79356b | 506 | vm_object_lock_init(object); |
f427ee49 | 507 | object->vo_size = vm_object_round_page(size); |
fe8ab488 A |
508 | |
509 | #if VM_OBJECT_TRACKING_OP_CREATED | |
510 | if (vm_object_tracking_inited) { | |
0a7de745 A |
511 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; |
512 | int numsaved = 0; | |
fe8ab488 A |
513 | |
514 | numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH); | |
515 | btlog_add_entry(vm_object_tracking_btlog, | |
0a7de745 A |
516 | object, |
517 | VM_OBJECT_TRACKING_OP_CREATED, | |
518 | bt, | |
519 | numsaved); | |
fe8ab488 A |
520 | } |
521 | #endif /* VM_OBJECT_TRACKING_OP_CREATED */ | |
1c79356b A |
522 | } |
523 | ||
0b4e3aa0 | 524 | __private_extern__ vm_object_t |
1c79356b | 525 | vm_object_allocate( |
0a7de745 | 526 | vm_object_size_t size) |
1c79356b | 527 | { |
39037602 | 528 | vm_object_t object; |
1c79356b A |
529 | |
530 | object = (vm_object_t) zalloc(vm_object_zone); | |
0a7de745 | 531 | |
0b4e3aa0 A |
532 | // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */ |
533 | ||
0a7de745 | 534 | if (object != VM_OBJECT_NULL) { |
0b4e3aa0 | 535 | _vm_object_allocate(size, object); |
0a7de745 | 536 | } |
1c79356b A |
537 | |
538 | return object; | |
539 | } | |
540 | ||
f427ee49 | 541 | TUNABLE(bool, workaround_41447923, "workaround_41447923", false); |
d9a64523 | 542 | |
1c79356b A |
543 | /* |
544 | * vm_object_bootstrap: | |
545 | * | |
546 | * Initialize the VM objects module. | |
547 | */ | |
f427ee49 A |
548 | __startup_func |
549 | void | |
1c79356b A |
550 | vm_object_bootstrap(void) |
551 | { | |
0a7de745 | 552 | vm_size_t vm_object_size; |
39037602 | 553 | |
0a7de745 | 554 | assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t)); |
5ba3f43e | 555 | |
f427ee49 A |
556 | vm_object_size = (sizeof(struct vm_object) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) & |
557 | ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1); | |
1c79356b | 558 | |
f427ee49 A |
559 | vm_object_zone = zone_create_ext("vm objects", vm_object_size, |
560 | ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED, | |
561 | ZONE_ID_ANY, ^(zone_t z){ | |
562 | #if defined(__LP64__) | |
c3c9b80d | 563 | zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED); |
f427ee49 A |
564 | #else |
565 | (void)z; | |
566 | #endif | |
567 | }); | |
b0d623f7 | 568 | |
1c79356b | 569 | queue_init(&vm_object_cached_list); |
b0d623f7 | 570 | |
b0d623f7 A |
571 | queue_init(&vm_object_reaper_queue); |
572 | ||
1c79356b A |
573 | /* |
574 | * Initialize the "kernel object" | |
575 | */ | |
576 | ||
c3c9b80d A |
577 | /* |
578 | * Note that in the following size specifications, we need to add 1 because | |
579 | * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size. | |
580 | */ | |
581 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object); | |
582 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, compressor_object); | |
55e303ae | 583 | kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
39236c6e | 584 | compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
5ba3f43e | 585 | kernel_object->no_tag_update = TRUE; |
1c79356b | 586 | |
c3c9b80d A |
587 | /* |
588 | * The object to hold retired VM pages. | |
589 | */ | |
590 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, retired_pages_object); | |
591 | retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
592 | ||
1c79356b A |
593 | /* |
594 | * Initialize the "submap object". Make it as large as the | |
595 | * kernel object so that no limit is imposed on submap sizes. | |
596 | */ | |
597 | ||
c3c9b80d | 598 | _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, vm_submap_object); |
55e303ae A |
599 | vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; |
600 | ||
1c79356b A |
601 | /* |
602 | * Create an "extra" reference to this object so that we never | |
603 | * try to deallocate it; zfree doesn't like to be called with | |
604 | * non-zone memory. | |
605 | */ | |
606 | vm_object_reference(vm_submap_object); | |
1c79356b A |
607 | } |
608 | ||
fe8ab488 A |
609 | #if CONFIG_IOSCHED |
610 | void | |
611 | vm_io_reprioritize_init(void) | |
612 | { | |
0a7de745 A |
613 | kern_return_t result; |
614 | thread_t thread = THREAD_NULL; | |
fe8ab488 | 615 | |
fe8ab488 | 616 | result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread); |
0a7de745 | 617 | if (result == KERN_SUCCESS) { |
cb323159 | 618 | thread_set_thread_name(thread, "VM_io_reprioritize_thread"); |
0a7de745 A |
619 | thread_deallocate(thread); |
620 | } else { | |
621 | panic("Could not create io_reprioritize_thread"); | |
622 | } | |
fe8ab488 A |
623 | } |
624 | #endif | |
625 | ||
8f6c56a5 A |
626 | void |
627 | vm_object_reaper_init(void) | |
628 | { | |
0a7de745 A |
629 | kern_return_t kr; |
630 | thread_t thread; | |
8f6c56a5 | 631 | |
8f6c56a5 A |
632 | kr = kernel_thread_start_priority( |
633 | (thread_continue_t) vm_object_reaper_thread, | |
634 | NULL, | |
5ba3f43e | 635 | BASEPRI_VM, |
8f6c56a5 A |
636 | &thread); |
637 | if (kr != KERN_SUCCESS) { | |
2d21ac55 | 638 | panic("failed to launch vm_object_reaper_thread kr=0x%x", kr); |
8f6c56a5 | 639 | } |
cb323159 | 640 | thread_set_thread_name(thread, "VM_object_reaper_thread"); |
8f6c56a5 A |
641 | thread_deallocate(thread); |
642 | } | |
643 | ||
1c79356b A |
644 | |
645 | /* | |
646 | * vm_object_deallocate: | |
647 | * | |
648 | * Release a reference to the specified object, | |
649 | * gained either through a vm_object_allocate | |
650 | * or a vm_object_reference call. When all references | |
651 | * are gone, storage associated with this object | |
652 | * may be relinquished. | |
653 | * | |
654 | * No object may be locked. | |
655 | */ | |
2d21ac55 A |
656 | unsigned long vm_object_deallocate_shared_successes = 0; |
657 | unsigned long vm_object_deallocate_shared_failures = 0; | |
658 | unsigned long vm_object_deallocate_shared_swap_failures = 0; | |
3e170ce0 | 659 | |
0b4e3aa0 | 660 | __private_extern__ void |
1c79356b | 661 | vm_object_deallocate( |
0a7de745 | 662 | vm_object_t object) |
1c79356b | 663 | { |
0a7de745 A |
664 | vm_object_t shadow = VM_OBJECT_NULL; |
665 | ||
1c79356b A |
666 | // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */ |
667 | // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */ | |
668 | ||
0a7de745 A |
669 | if (object == VM_OBJECT_NULL) { |
670 | return; | |
671 | } | |
2d21ac55 | 672 | |
c3c9b80d | 673 | if (object == kernel_object || object == compressor_object || object == retired_pages_object) { |
b0d623f7 A |
674 | vm_object_lock_shared(object); |
675 | ||
676 | OSAddAtomic(-1, &object->ref_count); | |
677 | ||
678 | if (object->ref_count == 0) { | |
0a7de745 | 679 | if (object == kernel_object) { |
39236c6e | 680 | panic("vm_object_deallocate: losing kernel_object\n"); |
c3c9b80d A |
681 | } else if (object == retired_pages_object) { |
682 | panic("vm_object_deallocate: losing retired_pages_object\n"); | |
0a7de745 | 683 | } else { |
39236c6e | 684 | panic("vm_object_deallocate: losing compressor_object\n"); |
0a7de745 | 685 | } |
2d21ac55 | 686 | } |
b0d623f7 | 687 | vm_object_unlock(object); |
2d21ac55 A |
688 | return; |
689 | } | |
690 | ||
fe8ab488 A |
691 | if (object->ref_count == 2 && |
692 | object->named) { | |
693 | /* | |
694 | * This "named" object's reference count is about to | |
695 | * drop from 2 to 1: | |
696 | * we'll need to call memory_object_last_unmap(). | |
697 | */ | |
698 | } else if (object->ref_count == 2 && | |
0a7de745 A |
699 | object->internal && |
700 | object->shadow != VM_OBJECT_NULL) { | |
fe8ab488 A |
701 | /* |
702 | * This internal object's reference count is about to | |
703 | * drop from 2 to 1 and it has a shadow object: | |
704 | * we'll want to try and collapse this object with its | |
705 | * shadow. | |
706 | */ | |
0a7de745 A |
707 | } else if (object->ref_count >= 2) { |
708 | UInt32 original_ref_count; | |
709 | volatile UInt32 *ref_count_p; | |
710 | Boolean atomic_swap; | |
2d21ac55 A |
711 | |
712 | /* | |
713 | * The object currently looks like it is not being | |
714 | * kept alive solely by the reference we're about to release. | |
715 | * Let's try and release our reference without taking | |
716 | * all the locks we would need if we had to terminate the | |
717 | * object (cache lock + exclusive object lock). | |
718 | * Lock the object "shared" to make sure we don't race with | |
719 | * anyone holding it "exclusive". | |
720 | */ | |
0a7de745 | 721 | vm_object_lock_shared(object); |
2d21ac55 A |
722 | ref_count_p = (volatile UInt32 *) &object->ref_count; |
723 | original_ref_count = object->ref_count; | |
724 | /* | |
725 | * Test again as "ref_count" could have changed. | |
726 | * "named" shouldn't change. | |
727 | */ | |
fe8ab488 A |
728 | if (original_ref_count == 2 && |
729 | object->named) { | |
730 | /* need to take slow path for m_o_last_unmap() */ | |
731 | atomic_swap = FALSE; | |
732 | } else if (original_ref_count == 2 && | |
0a7de745 A |
733 | object->internal && |
734 | object->shadow != VM_OBJECT_NULL) { | |
fe8ab488 A |
735 | /* need to take slow path for vm_object_collapse() */ |
736 | atomic_swap = FALSE; | |
0a7de745 | 737 | } else if (original_ref_count < 2) { |
fe8ab488 A |
738 | /* need to take slow path for vm_object_terminate() */ |
739 | atomic_swap = FALSE; | |
740 | } else { | |
741 | /* try an atomic update with the shared lock */ | |
2d21ac55 A |
742 | atomic_swap = OSCompareAndSwap( |
743 | original_ref_count, | |
744 | original_ref_count - 1, | |
745 | (UInt32 *) &object->ref_count); | |
746 | if (atomic_swap == FALSE) { | |
747 | vm_object_deallocate_shared_swap_failures++; | |
fe8ab488 | 748 | /* fall back to the slow path... */ |
2d21ac55 | 749 | } |
2d21ac55 | 750 | } |
0a7de745 | 751 | |
2d21ac55 A |
752 | vm_object_unlock(object); |
753 | ||
754 | if (atomic_swap) { | |
b0d623f7 A |
755 | /* |
756 | * ref_count was updated atomically ! | |
757 | */ | |
2d21ac55 A |
758 | vm_object_deallocate_shared_successes++; |
759 | return; | |
760 | } | |
761 | ||
762 | /* | |
763 | * Someone else updated the ref_count at the same | |
764 | * time and we lost the race. Fall back to the usual | |
765 | * slow but safe path... | |
766 | */ | |
767 | vm_object_deallocate_shared_failures++; | |
768 | } | |
1c79356b A |
769 | |
770 | while (object != VM_OBJECT_NULL) { | |
b0d623f7 | 771 | vm_object_lock(object); |
2d21ac55 | 772 | |
0b4e3aa0 A |
773 | assert(object->ref_count > 0); |
774 | ||
775 | /* | |
776 | * If the object has a named reference, and only | |
777 | * that reference would remain, inform the pager | |
778 | * about the last "mapping" reference going away. | |
779 | */ | |
0a7de745 A |
780 | if ((object->ref_count == 2) && (object->named)) { |
781 | memory_object_t pager = object->pager; | |
0b4e3aa0 A |
782 | |
783 | /* Notify the Pager that there are no */ | |
784 | /* more mappers for this object */ | |
785 | ||
786 | if (pager != MEMORY_OBJECT_NULL) { | |
593a1d5f A |
787 | vm_object_mapping_wait(object, THREAD_UNINT); |
788 | vm_object_mapping_begin(object); | |
0b4e3aa0 | 789 | vm_object_unlock(object); |
2d21ac55 | 790 | |
b0d623f7 | 791 | memory_object_last_unmap(pager); |
593a1d5f | 792 | |
b0d623f7 | 793 | vm_object_lock(object); |
593a1d5f | 794 | vm_object_mapping_end(object); |
0b4e3aa0 | 795 | } |
b0d623f7 | 796 | assert(object->ref_count > 0); |
0b4e3aa0 | 797 | } |
1c79356b A |
798 | |
799 | /* | |
800 | * Lose the reference. If other references | |
801 | * remain, then we are done, unless we need | |
802 | * to retry a cache trim. | |
803 | * If it is the last reference, then keep it | |
804 | * until any pending initialization is completed. | |
805 | */ | |
806 | ||
0b4e3aa0 A |
807 | /* if the object is terminating, it cannot go into */ |
808 | /* the cache and we obviously should not call */ | |
809 | /* terminate again. */ | |
810 | ||
811 | if ((object->ref_count > 1) || object->terminating) { | |
2d21ac55 | 812 | vm_object_lock_assert_exclusive(object); |
1c79356b | 813 | object->ref_count--; |
91447636 A |
814 | |
815 | if (object->ref_count == 1 && | |
816 | object->shadow != VM_OBJECT_NULL) { | |
817 | /* | |
0c530ab8 A |
818 | * There's only one reference left on this |
819 | * VM object. We can't tell if it's a valid | |
820 | * one (from a mapping for example) or if this | |
821 | * object is just part of a possibly stale and | |
822 | * useless shadow chain. | |
823 | * We would like to try and collapse it into | |
824 | * its parent, but we don't have any pointers | |
825 | * back to this parent object. | |
91447636 A |
826 | * But we can try and collapse this object with |
827 | * its own shadows, in case these are useless | |
828 | * too... | |
0c530ab8 A |
829 | * We can't bypass this object though, since we |
830 | * don't know if this last reference on it is | |
831 | * meaningful or not. | |
91447636 | 832 | */ |
0c530ab8 | 833 | vm_object_collapse(object, 0, FALSE); |
91447636 | 834 | } |
0a7de745 | 835 | vm_object_unlock(object); |
1c79356b A |
836 | return; |
837 | } | |
838 | ||
839 | /* | |
840 | * We have to wait for initialization | |
841 | * before destroying or caching the object. | |
842 | */ | |
0a7de745 A |
843 | |
844 | if (object->pager_created && !object->pager_initialized) { | |
845 | assert(!object->can_persist); | |
1c79356b | 846 | vm_object_assert_wait(object, |
0a7de745 A |
847 | VM_OBJECT_EVENT_INITIALIZED, |
848 | THREAD_UNINT); | |
1c79356b | 849 | vm_object_unlock(object); |
b0d623f7 | 850 | |
9bccf70c | 851 | thread_block(THREAD_CONTINUE_NULL); |
1c79356b A |
852 | continue; |
853 | } | |
854 | ||
855 | /* | |
5ba3f43e A |
856 | * Terminate this object. If it had a shadow, |
857 | * then deallocate it; otherwise, if we need | |
858 | * to retry a cache trim, do so now; otherwise, | |
859 | * we are done. "pageout" objects have a shadow, | |
860 | * but maintain a "paging reference" rather than | |
861 | * a normal reference. | |
1c79356b | 862 | */ |
5ba3f43e | 863 | shadow = object->pageout?VM_OBJECT_NULL:object->shadow; |
1c79356b | 864 | |
5ba3f43e | 865 | if (vm_object_terminate(object) != KERN_SUCCESS) { |
0a7de745 | 866 | return; |
5ba3f43e A |
867 | } |
868 | if (shadow != VM_OBJECT_NULL) { | |
0a7de745 A |
869 | object = shadow; |
870 | continue; | |
1c79356b | 871 | } |
5ba3f43e | 872 | return; |
1c79356b | 873 | } |
1c79356b A |
874 | } |
875 | ||
b0d623f7 | 876 | |
6d2010ae A |
877 | |
878 | vm_page_t | |
879 | vm_object_page_grab( | |
0a7de745 | 880 | vm_object_t object) |
6d2010ae | 881 | { |
0a7de745 A |
882 | vm_page_t p, next_p; |
883 | int p_limit = 0; | |
884 | int p_skipped = 0; | |
6d2010ae A |
885 | |
886 | vm_object_lock_assert_exclusive(object); | |
887 | ||
39037602 | 888 | next_p = (vm_page_t)vm_page_queue_first(&object->memq); |
6d2010ae A |
889 | p_limit = MIN(50, object->resident_page_count); |
890 | ||
39037602 | 891 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) { |
6d2010ae | 892 | p = next_p; |
d9a64523 | 893 | next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq); |
6d2010ae | 894 | |
0a7de745 | 895 | if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) { |
6d2010ae | 896 | goto move_page_in_obj; |
0a7de745 | 897 | } |
6d2010ae | 898 | |
d9a64523 | 899 | if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) { |
6d2010ae A |
900 | vm_page_lockspin_queues(); |
901 | ||
d9a64523 | 902 | if (p->vmp_pmapped) { |
6d2010ae A |
903 | int refmod_state; |
904 | ||
905 | vm_object_page_grab_pmapped++; | |
906 | ||
d9a64523 | 907 | if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) { |
39037602 | 908 | refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p)); |
6d2010ae | 909 | |
0a7de745 | 910 | if (refmod_state & VM_MEM_REFERENCED) { |
d9a64523 | 911 | p->vmp_reference = TRUE; |
0a7de745 | 912 | } |
316670eb A |
913 | if (refmod_state & VM_MEM_MODIFIED) { |
914 | SET_PAGE_DIRTY(p, FALSE); | |
915 | } | |
6d2010ae | 916 | } |
d9a64523 | 917 | if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) { |
39037602 | 918 | refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); |
6d2010ae | 919 | |
0a7de745 | 920 | if (refmod_state & VM_MEM_REFERENCED) { |
d9a64523 | 921 | p->vmp_reference = TRUE; |
0a7de745 | 922 | } |
316670eb A |
923 | if (refmod_state & VM_MEM_MODIFIED) { |
924 | SET_PAGE_DIRTY(p, FALSE); | |
925 | } | |
6d2010ae | 926 | |
0a7de745 | 927 | if (p->vmp_dirty == FALSE) { |
6d2010ae | 928 | goto take_page; |
0a7de745 | 929 | } |
6d2010ae A |
930 | } |
931 | } | |
d9a64523 | 932 | if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) { |
6d2010ae A |
933 | vm_page_activate(p); |
934 | ||
c3c9b80d | 935 | counter_inc(&vm_statistics_reactivations); |
6d2010ae A |
936 | vm_object_page_grab_reactivations++; |
937 | } | |
938 | vm_page_unlock_queues(); | |
939 | move_page_in_obj: | |
0a7de745 A |
940 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
941 | vm_page_queue_enter(&object->memq, p, vmp_listq); | |
6d2010ae A |
942 | |
943 | p_skipped++; | |
944 | continue; | |
945 | } | |
946 | vm_page_lockspin_queues(); | |
947 | take_page: | |
948 | vm_page_free_prepare_queues(p); | |
949 | vm_object_page_grab_returned++; | |
950 | vm_object_page_grab_skipped += p_skipped; | |
951 | ||
952 | vm_page_unlock_queues(); | |
953 | ||
954 | vm_page_free_prepare_object(p, TRUE); | |
0a7de745 A |
955 | |
956 | return p; | |
6d2010ae A |
957 | } |
958 | vm_object_page_grab_skipped += p_skipped; | |
959 | vm_object_page_grab_failed++; | |
960 | ||
0a7de745 | 961 | return NULL; |
6d2010ae A |
962 | } |
963 | ||
964 | ||
965 | ||
0a7de745 A |
966 | #define EVICT_PREPARE_LIMIT 64 |
967 | #define EVICT_AGE 10 | |
6d2010ae | 968 | |
0a7de745 | 969 | static clock_sec_t vm_object_cache_aging_ts = 0; |
6d2010ae A |
970 | |
971 | static void | |
972 | vm_object_cache_remove_locked( | |
0a7de745 | 973 | vm_object_t object) |
6d2010ae | 974 | { |
39037602 | 975 | assert(object->purgable == VM_PURGABLE_DENY); |
39037602 | 976 | |
a39ff7e2 A |
977 | queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list); |
978 | object->cached_list.next = NULL; | |
979 | object->cached_list.prev = NULL; | |
6d2010ae A |
980 | |
981 | vm_object_cached_count--; | |
982 | } | |
983 | ||
984 | void | |
985 | vm_object_cache_remove( | |
0a7de745 | 986 | vm_object_t object) |
6d2010ae A |
987 | { |
988 | vm_object_cache_lock_spin(); | |
989 | ||
a39ff7e2 | 990 | if (object->cached_list.next && |
0a7de745 | 991 | object->cached_list.prev) { |
6d2010ae | 992 | vm_object_cache_remove_locked(object); |
0a7de745 | 993 | } |
6d2010ae A |
994 | |
995 | vm_object_cache_unlock(); | |
996 | } | |
997 | ||
998 | void | |
999 | vm_object_cache_add( | |
0a7de745 | 1000 | vm_object_t object) |
6d2010ae A |
1001 | { |
1002 | clock_sec_t sec; | |
1003 | clock_nsec_t nsec; | |
1004 | ||
39037602 | 1005 | assert(object->purgable == VM_PURGABLE_DENY); |
39037602 | 1006 | |
0a7de745 | 1007 | if (object->resident_page_count == 0) { |
6d2010ae | 1008 | return; |
0a7de745 | 1009 | } |
6d2010ae A |
1010 | clock_get_system_nanotime(&sec, &nsec); |
1011 | ||
1012 | vm_object_cache_lock_spin(); | |
1013 | ||
a39ff7e2 A |
1014 | if (object->cached_list.next == NULL && |
1015 | object->cached_list.prev == NULL) { | |
1016 | queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list); | |
6d2010ae A |
1017 | object->vo_cache_ts = sec + EVICT_AGE; |
1018 | object->vo_cache_pages_to_scan = object->resident_page_count; | |
1019 | ||
1020 | vm_object_cached_count++; | |
1021 | vm_object_cache_adds++; | |
1022 | } | |
1023 | vm_object_cache_unlock(); | |
1024 | } | |
1025 | ||
1026 | int | |
1027 | vm_object_cache_evict( | |
0a7de745 A |
1028 | int num_to_evict, |
1029 | int max_objects_to_examine) | |
6d2010ae | 1030 | { |
0a7de745 A |
1031 | vm_object_t object = VM_OBJECT_NULL; |
1032 | vm_object_t next_obj = VM_OBJECT_NULL; | |
1033 | vm_page_t local_free_q = VM_PAGE_NULL; | |
1034 | vm_page_t p; | |
1035 | vm_page_t next_p; | |
1036 | int object_cnt = 0; | |
1037 | vm_page_t ep_array[EVICT_PREPARE_LIMIT]; | |
1038 | int ep_count; | |
1039 | int ep_limit; | |
1040 | int ep_index; | |
1041 | int ep_freed = 0; | |
1042 | int ep_moved = 0; | |
1043 | uint32_t ep_skipped = 0; | |
1044 | clock_sec_t sec; | |
1045 | clock_nsec_t nsec; | |
6d2010ae A |
1046 | |
1047 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0); | |
1048 | /* | |
0a7de745 | 1049 | * do a couple of quick checks to see if it's |
6d2010ae A |
1050 | * worthwhile grabbing the lock |
1051 | */ | |
1052 | if (queue_empty(&vm_object_cached_list)) { | |
1053 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
0a7de745 | 1054 | return 0; |
6d2010ae A |
1055 | } |
1056 | clock_get_system_nanotime(&sec, &nsec); | |
1057 | ||
1058 | /* | |
1059 | * the object on the head of the queue has not | |
1060 | * yet sufficiently aged | |
1061 | */ | |
1062 | if (sec < vm_object_cache_aging_ts) { | |
1063 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
0a7de745 | 1064 | return 0; |
6d2010ae A |
1065 | } |
1066 | /* | |
0a7de745 | 1067 | * don't need the queue lock to find |
6d2010ae A |
1068 | * and lock an object on the cached list |
1069 | */ | |
1070 | vm_page_unlock_queues(); | |
1071 | ||
1072 | vm_object_cache_lock_spin(); | |
1073 | ||
1074 | for (;;) { | |
1075 | next_obj = (vm_object_t)queue_first(&vm_object_cached_list); | |
1076 | ||
1077 | while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) { | |
6d2010ae | 1078 | object = next_obj; |
a39ff7e2 | 1079 | next_obj = (vm_object_t)queue_next(&next_obj->cached_list); |
39037602 A |
1080 | |
1081 | assert(object->purgable == VM_PURGABLE_DENY); | |
0a7de745 | 1082 | |
6d2010ae A |
1083 | if (sec < object->vo_cache_ts) { |
1084 | KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0); | |
1085 | ||
1086 | vm_object_cache_aging_ts = object->vo_cache_ts; | |
1087 | object = VM_OBJECT_NULL; | |
1088 | break; | |
1089 | } | |
1090 | if (!vm_object_lock_try_scan(object)) { | |
1091 | /* | |
1092 | * just skip over this guy for now... if we find | |
1093 | * an object to steal pages from, we'll revist in a bit... | |
1094 | * hopefully, the lock will have cleared | |
1095 | */ | |
1096 | KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0); | |
1097 | ||
1098 | object = VM_OBJECT_NULL; | |
1099 | continue; | |
1100 | } | |
39037602 | 1101 | if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) { |
6d2010ae A |
1102 | /* |
1103 | * this case really shouldn't happen, but it's not fatal | |
1104 | * so deal with it... if we don't remove the object from | |
1105 | * the list, we'll never move past it. | |
1106 | */ | |
1107 | KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0); | |
0a7de745 | 1108 | |
6d2010ae A |
1109 | vm_object_cache_remove_locked(object); |
1110 | vm_object_unlock(object); | |
1111 | object = VM_OBJECT_NULL; | |
1112 | continue; | |
1113 | } | |
1114 | /* | |
1115 | * we have a locked object with pages... | |
1116 | * time to start harvesting | |
1117 | */ | |
1118 | break; | |
1119 | } | |
1120 | vm_object_cache_unlock(); | |
1121 | ||
0a7de745 | 1122 | if (object == VM_OBJECT_NULL) { |
6d2010ae | 1123 | break; |
0a7de745 | 1124 | } |
6d2010ae A |
1125 | |
1126 | /* | |
1127 | * object is locked at this point and | |
1128 | * has resident pages | |
1129 | */ | |
39037602 | 1130 | next_p = (vm_page_t)vm_page_queue_first(&object->memq); |
6d2010ae A |
1131 | |
1132 | /* | |
1133 | * break the page scan into 2 pieces to minimize the time spent | |
1134 | * behind the page queue lock... | |
1135 | * the list of pages on these unused objects is likely to be cold | |
1136 | * w/r to the cpu cache which increases the time to scan the list | |
1137 | * tenfold... and we may have a 'run' of pages we can't utilize that | |
1138 | * needs to be skipped over... | |
1139 | */ | |
0a7de745 | 1140 | if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) { |
6d2010ae | 1141 | ep_limit = EVICT_PREPARE_LIMIT; |
0a7de745 | 1142 | } |
6d2010ae A |
1143 | ep_count = 0; |
1144 | ||
39037602 | 1145 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) { |
6d2010ae | 1146 | p = next_p; |
d9a64523 | 1147 | next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq); |
6d2010ae A |
1148 | |
1149 | object->vo_cache_pages_to_scan--; | |
1150 | ||
d9a64523 | 1151 | if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) { |
0a7de745 A |
1152 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
1153 | vm_page_queue_enter(&object->memq, p, vmp_listq); | |
6d2010ae A |
1154 | |
1155 | ep_skipped++; | |
1156 | continue; | |
1157 | } | |
d9a64523 | 1158 | if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) { |
0a7de745 A |
1159 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
1160 | vm_page_queue_enter(&object->memq, p, vmp_listq); | |
6d2010ae | 1161 | |
39037602 | 1162 | pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p)); |
6d2010ae A |
1163 | } |
1164 | ep_array[ep_count++] = p; | |
1165 | } | |
1166 | KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0); | |
1167 | ||
1168 | vm_page_lockspin_queues(); | |
1169 | ||
1170 | for (ep_index = 0; ep_index < ep_count; ep_index++) { | |
6d2010ae A |
1171 | p = ep_array[ep_index]; |
1172 | ||
d9a64523 A |
1173 | if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) { |
1174 | p->vmp_reference = FALSE; | |
1175 | p->vmp_no_cache = FALSE; | |
6d2010ae | 1176 | |
316670eb A |
1177 | /* |
1178 | * we've already filtered out pages that are in the laundry | |
1179 | * so if we get here, this page can't be on the pageout queue | |
1180 | */ | |
39037602 | 1181 | vm_page_queues_remove(p, FALSE); |
3e170ce0 | 1182 | vm_page_enqueue_inactive(p, TRUE); |
6d2010ae A |
1183 | |
1184 | ep_moved++; | |
1185 | } else { | |
fe8ab488 A |
1186 | #if CONFIG_PHANTOM_CACHE |
1187 | vm_phantom_cache_add_ghost(p); | |
1188 | #endif | |
6d2010ae A |
1189 | vm_page_free_prepare_queues(p); |
1190 | ||
d9a64523 | 1191 | assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0); |
6d2010ae A |
1192 | /* |
1193 | * Add this page to our list of reclaimed pages, | |
1194 | * to be freed later. | |
1195 | */ | |
d9a64523 | 1196 | p->vmp_snext = local_free_q; |
6d2010ae A |
1197 | local_free_q = p; |
1198 | ||
1199 | ep_freed++; | |
1200 | } | |
1201 | } | |
1202 | vm_page_unlock_queues(); | |
1203 | ||
1204 | KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0); | |
1205 | ||
1206 | if (local_free_q) { | |
1207 | vm_page_free_list(local_free_q, TRUE); | |
1208 | local_free_q = VM_PAGE_NULL; | |
1209 | } | |
1210 | if (object->vo_cache_pages_to_scan == 0) { | |
1211 | KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0); | |
1212 | ||
1213 | vm_object_cache_remove(object); | |
1214 | ||
1215 | KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0); | |
1216 | } | |
1217 | /* | |
1218 | * done with this object | |
1219 | */ | |
1220 | vm_object_unlock(object); | |
1221 | object = VM_OBJECT_NULL; | |
1222 | ||
1223 | /* | |
1224 | * at this point, we are not holding any locks | |
1225 | */ | |
1226 | if ((ep_freed + ep_moved) >= num_to_evict) { | |
1227 | /* | |
1228 | * we've reached our target for the | |
1229 | * number of pages to evict | |
1230 | */ | |
1231 | break; | |
1232 | } | |
1233 | vm_object_cache_lock_spin(); | |
1234 | } | |
1235 | /* | |
1236 | * put the page queues lock back to the caller's | |
0a7de745 | 1237 | * idea of it |
6d2010ae A |
1238 | */ |
1239 | vm_page_lock_queues(); | |
1240 | ||
1241 | vm_object_cache_pages_freed += ep_freed; | |
1242 | vm_object_cache_pages_moved += ep_moved; | |
1243 | vm_object_cache_pages_skipped += ep_skipped; | |
1244 | ||
1245 | KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0); | |
0a7de745 | 1246 | return ep_freed; |
6d2010ae A |
1247 | } |
1248 | ||
1c79356b A |
1249 | /* |
1250 | * Routine: vm_object_terminate | |
1251 | * Purpose: | |
1252 | * Free all resources associated with a vm_object. | |
1253 | * In/out conditions: | |
0b4e3aa0 | 1254 | * Upon entry, the object must be locked, |
1c79356b A |
1255 | * and the object must have exactly one reference. |
1256 | * | |
1257 | * The shadow object reference is left alone. | |
1258 | * | |
1259 | * The object must be unlocked if its found that pages | |
1260 | * must be flushed to a backing object. If someone | |
1261 | * manages to map the object while it is being flushed | |
1262 | * the object is returned unlocked and unchanged. Otherwise, | |
1263 | * upon exit, the cache will be unlocked, and the | |
1264 | * object will cease to exist. | |
1265 | */ | |
0b4e3aa0 | 1266 | static kern_return_t |
1c79356b | 1267 | vm_object_terminate( |
0a7de745 | 1268 | vm_object_t object) |
1c79356b | 1269 | { |
0a7de745 | 1270 | vm_object_t shadow_object; |
1c79356b | 1271 | |
39037602 A |
1272 | vm_object_lock_assert_exclusive(object); |
1273 | ||
5ba3f43e | 1274 | if (!object->pageout && (!object->internal && object->can_persist) && |
b0d623f7 | 1275 | (object->pager != NULL || object->shadow_severed)) { |
1c79356b A |
1276 | /* |
1277 | * Clear pager_trusted bit so that the pages get yanked | |
1278 | * out of the object instead of cleaned in place. This | |
1279 | * prevents a deadlock in XMM and makes more sense anyway. | |
1280 | */ | |
1281 | object->pager_trusted = FALSE; | |
1282 | ||
b0d623f7 | 1283 | vm_object_reap_pages(object, REAP_TERMINATE); |
1c79356b | 1284 | } |
0b4e3aa0 A |
1285 | /* |
1286 | * Make sure the object isn't already being terminated | |
1287 | */ | |
b0d623f7 | 1288 | if (object->terminating) { |
2d21ac55 A |
1289 | vm_object_lock_assert_exclusive(object); |
1290 | object->ref_count--; | |
0b4e3aa0 | 1291 | assert(object->ref_count > 0); |
0b4e3aa0 A |
1292 | vm_object_unlock(object); |
1293 | return KERN_FAILURE; | |
1294 | } | |
1295 | ||
1296 | /* | |
1297 | * Did somebody get a reference to the object while we were | |
1298 | * cleaning it? | |
1299 | */ | |
b0d623f7 | 1300 | if (object->ref_count != 1) { |
2d21ac55 A |
1301 | vm_object_lock_assert_exclusive(object); |
1302 | object->ref_count--; | |
0b4e3aa0 | 1303 | assert(object->ref_count > 0); |
1c79356b A |
1304 | vm_object_unlock(object); |
1305 | return KERN_FAILURE; | |
1306 | } | |
1307 | ||
1c79356b A |
1308 | /* |
1309 | * Make sure no one can look us up now. | |
1310 | */ | |
1311 | ||
0b4e3aa0 A |
1312 | object->terminating = TRUE; |
1313 | object->alive = FALSE; | |
1c79356b | 1314 | |
a39ff7e2 A |
1315 | if (!object->internal && |
1316 | object->cached_list.next && | |
0a7de745 | 1317 | object->cached_list.prev) { |
6d2010ae | 1318 | vm_object_cache_remove(object); |
0a7de745 | 1319 | } |
6d2010ae | 1320 | |
1c79356b A |
1321 | /* |
1322 | * Detach the object from its shadow if we are the shadow's | |
55e303ae A |
1323 | * copy. The reference we hold on the shadow must be dropped |
1324 | * by our caller. | |
1c79356b A |
1325 | */ |
1326 | if (((shadow_object = object->shadow) != VM_OBJECT_NULL) && | |
1327 | !(object->pageout)) { | |
1328 | vm_object_lock(shadow_object); | |
0a7de745 | 1329 | if (shadow_object->copy == object) { |
55e303ae | 1330 | shadow_object->copy = VM_OBJECT_NULL; |
0a7de745 | 1331 | } |
1c79356b A |
1332 | vm_object_unlock(shadow_object); |
1333 | } | |
1334 | ||
b0d623f7 A |
1335 | if (object->paging_in_progress != 0 || |
1336 | object->activity_in_progress != 0) { | |
8f6c56a5 A |
1337 | /* |
1338 | * There are still some paging_in_progress references | |
1339 | * on this object, meaning that there are some paging | |
1340 | * or other I/O operations in progress for this VM object. | |
1341 | * Such operations take some paging_in_progress references | |
1342 | * up front to ensure that the object doesn't go away, but | |
1343 | * they may also need to acquire a reference on the VM object, | |
1344 | * to map it in kernel space, for example. That means that | |
1345 | * they may end up releasing the last reference on the VM | |
1346 | * object, triggering its termination, while still holding | |
1347 | * paging_in_progress references. Waiting for these | |
1348 | * pending paging_in_progress references to go away here would | |
1349 | * deadlock. | |
1350 | * | |
1351 | * To avoid deadlocking, we'll let the vm_object_reaper_thread | |
1352 | * complete the VM object termination if it still holds | |
1353 | * paging_in_progress references at this point. | |
1354 | * | |
1355 | * No new paging_in_progress should appear now that the | |
1356 | * VM object is "terminating" and not "alive". | |
1357 | */ | |
1358 | vm_object_reap_async(object); | |
8f6c56a5 | 1359 | vm_object_unlock(object); |
6601e61a A |
1360 | /* |
1361 | * Return KERN_FAILURE to let the caller know that we | |
1362 | * haven't completed the termination and it can't drop this | |
1363 | * object's reference on its shadow object yet. | |
1364 | * The reaper thread will take care of that once it has | |
1365 | * completed this object's termination. | |
1366 | */ | |
1367 | return KERN_FAILURE; | |
8f6c56a5 | 1368 | } |
b0d623f7 A |
1369 | /* |
1370 | * complete the VM object termination | |
1371 | */ | |
8f6c56a5 A |
1372 | vm_object_reap(object); |
1373 | object = VM_OBJECT_NULL; | |
8f6c56a5 | 1374 | |
2d21ac55 | 1375 | /* |
b0d623f7 A |
1376 | * the object lock was released by vm_object_reap() |
1377 | * | |
2d21ac55 A |
1378 | * KERN_SUCCESS means that this object has been terminated |
1379 | * and no longer needs its shadow object but still holds a | |
1380 | * reference on it. | |
1381 | * The caller is responsible for dropping that reference. | |
1382 | * We can't call vm_object_deallocate() here because that | |
1383 | * would create a recursion. | |
1384 | */ | |
8f6c56a5 A |
1385 | return KERN_SUCCESS; |
1386 | } | |
1387 | ||
b0d623f7 | 1388 | |
8f6c56a5 A |
1389 | /* |
1390 | * vm_object_reap(): | |
1391 | * | |
1392 | * Complete the termination of a VM object after it's been marked | |
1393 | * as "terminating" and "!alive" by vm_object_terminate(). | |
1394 | * | |
b0d623f7 A |
1395 | * The VM object must be locked by caller. |
1396 | * The lock will be released on return and the VM object is no longer valid. | |
8f6c56a5 | 1397 | */ |
3e170ce0 | 1398 | |
8f6c56a5 A |
1399 | void |
1400 | vm_object_reap( | |
1401 | vm_object_t object) | |
1402 | { | |
0a7de745 | 1403 | memory_object_t pager; |
8f6c56a5 | 1404 | |
2d21ac55 A |
1405 | vm_object_lock_assert_exclusive(object); |
1406 | assert(object->paging_in_progress == 0); | |
b0d623f7 | 1407 | assert(object->activity_in_progress == 0); |
8f6c56a5 A |
1408 | |
1409 | vm_object_reap_count++; | |
1410 | ||
fe8ab488 A |
1411 | /* |
1412 | * Disown this purgeable object to cleanup its owner's purgeable | |
1413 | * ledgers. We need to do this before disconnecting the object | |
1414 | * from its pager, to properly account for compressed pages. | |
1415 | */ | |
1416 | if (object->internal && | |
d9a64523 | 1417 | (object->purgable != VM_PURGABLE_DENY || |
0a7de745 | 1418 | object->vo_ledger_tag)) { |
cb323159 A |
1419 | int ledger_flags; |
1420 | kern_return_t kr; | |
1421 | ||
1422 | ledger_flags = 0; | |
1423 | if (object->vo_no_footprint) { | |
1424 | ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT; | |
1425 | } | |
d9a64523 A |
1426 | assert(!object->alive); |
1427 | assert(object->terminating); | |
cb323159 A |
1428 | kr = vm_object_ownership_change(object, |
1429 | object->vo_ledger_tag, /* unchanged */ | |
1430 | NULL, /* no owner */ | |
1431 | ledger_flags, | |
1432 | FALSE); /* task_objq not locked */ | |
1433 | assert(kr == KERN_SUCCESS); | |
d9a64523 | 1434 | assert(object->vo_owner == NULL); |
fe8ab488 A |
1435 | } |
1436 | ||
f427ee49 A |
1437 | #if DEVELOPMENT || DEBUG |
1438 | if (object->object_is_shared_cache && | |
1439 | object->pager != NULL && | |
1440 | object->pager->mo_pager_ops == &shared_region_pager_ops) { | |
1441 | OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count); | |
1442 | } | |
1443 | #endif /* DEVELOPMENT || DEBUG */ | |
1444 | ||
0b4e3aa0 A |
1445 | pager = object->pager; |
1446 | object->pager = MEMORY_OBJECT_NULL; | |
1447 | ||
0a7de745 | 1448 | if (pager != MEMORY_OBJECT_NULL) { |
c3c9b80d | 1449 | memory_object_control_disable(&object->pager_control); |
0a7de745 | 1450 | } |
0b4e3aa0 | 1451 | |
1c79356b | 1452 | object->ref_count--; |
0a7de745 | 1453 | assert(object->ref_count == 0); |
1c79356b | 1454 | |
b0d623f7 A |
1455 | /* |
1456 | * remove from purgeable queue if it's on | |
1457 | */ | |
fe8ab488 | 1458 | if (object->internal) { |
d9a64523 | 1459 | assert(VM_OBJECT_OWNER(object) == TASK_NULL); |
fe8ab488 | 1460 | |
3e170ce0 A |
1461 | VM_OBJECT_UNWIRED(object); |
1462 | ||
fe8ab488 A |
1463 | if (object->purgable == VM_PURGABLE_DENY) { |
1464 | /* not purgeable: nothing to do */ | |
1465 | } else if (object->purgable == VM_PURGABLE_VOLATILE) { | |
1466 | purgeable_q_t queue; | |
1467 | ||
fe8ab488 A |
1468 | queue = vm_purgeable_object_remove(object); |
1469 | assert(queue); | |
1470 | ||
1471 | if (object->purgeable_when_ripe) { | |
1472 | /* | |
1473 | * Must take page lock for this - | |
1474 | * using it to protect token queue | |
1475 | */ | |
1476 | vm_page_lock_queues(); | |
1477 | vm_purgeable_token_delete_first(queue); | |
0a7de745 A |
1478 | |
1479 | assert(queue->debug_count_objects >= 0); | |
fe8ab488 A |
1480 | vm_page_unlock_queues(); |
1481 | } | |
2d21ac55 | 1482 | |
39236c6e | 1483 | /* |
fe8ab488 | 1484 | * Update "vm_page_purgeable_count" in bulk and mark |
0a7de745 | 1485 | * object as VM_PURGABLE_EMPTY to avoid updating |
fe8ab488 A |
1486 | * "vm_page_purgeable_count" again in vm_page_remove() |
1487 | * when reaping the pages. | |
39236c6e | 1488 | */ |
fe8ab488 A |
1489 | unsigned int delta; |
1490 | assert(object->resident_page_count >= | |
0a7de745 | 1491 | object->wired_page_count); |
fe8ab488 | 1492 | delta = (object->resident_page_count - |
0a7de745 | 1493 | object->wired_page_count); |
fe8ab488 A |
1494 | if (delta != 0) { |
1495 | assert(vm_page_purgeable_count >= delta); | |
1496 | OSAddAtomic(-delta, | |
0a7de745 | 1497 | (SInt32 *)&vm_page_purgeable_count); |
fe8ab488 A |
1498 | } |
1499 | if (object->wired_page_count != 0) { | |
1500 | assert(vm_page_purgeable_wired_count >= | |
0a7de745 | 1501 | object->wired_page_count); |
fe8ab488 | 1502 | OSAddAtomic(-object->wired_page_count, |
0a7de745 | 1503 | (SInt32 *)&vm_page_purgeable_wired_count); |
fe8ab488 A |
1504 | } |
1505 | object->purgable = VM_PURGABLE_EMPTY; | |
0a7de745 A |
1506 | } else if (object->purgable == VM_PURGABLE_NONVOLATILE || |
1507 | object->purgable == VM_PURGABLE_EMPTY) { | |
fe8ab488 | 1508 | /* remove from nonvolatile queue */ |
fe8ab488 A |
1509 | vm_purgeable_nonvolatile_dequeue(object); |
1510 | } else { | |
1511 | panic("object %p in unexpected purgeable state 0x%x\n", | |
0a7de745 | 1512 | object, object->purgable); |
39236c6e | 1513 | } |
a39ff7e2 A |
1514 | if (object->transposed && |
1515 | object->cached_list.next != NULL && | |
1516 | object->cached_list.prev == NULL) { | |
1517 | /* | |
1518 | * object->cached_list.next "points" to the | |
1519 | * object that was transposed with this object. | |
1520 | */ | |
1521 | } else { | |
1522 | assert(object->cached_list.next == NULL); | |
1523 | } | |
1524 | assert(object->cached_list.prev == NULL); | |
2d21ac55 | 1525 | } |
0a7de745 | 1526 | |
1c79356b | 1527 | if (object->pageout) { |
5ba3f43e A |
1528 | /* |
1529 | * free all remaining pages tabled on | |
1530 | * this object | |
1531 | * clean up it's shadow | |
1532 | */ | |
8f6c56a5 | 1533 | assert(object->shadow != VM_OBJECT_NULL); |
1c79356b A |
1534 | |
1535 | vm_pageout_object_terminate(object); | |
5ba3f43e A |
1536 | } else if (object->resident_page_count) { |
1537 | /* | |
0a7de745 | 1538 | * free all remaining pages tabled on |
5ba3f43e A |
1539 | * this object |
1540 | */ | |
b0d623f7 | 1541 | vm_object_reap_pages(object, REAP_REAP); |
1c79356b | 1542 | } |
39037602 | 1543 | assert(vm_page_queue_empty(&object->memq)); |
1c79356b | 1544 | assert(object->paging_in_progress == 0); |
b0d623f7 | 1545 | assert(object->activity_in_progress == 0); |
1c79356b A |
1546 | assert(object->ref_count == 0); |
1547 | ||
1c79356b | 1548 | /* |
0b4e3aa0 A |
1549 | * If the pager has not already been released by |
1550 | * vm_object_destroy, we need to terminate it and | |
1551 | * release our reference to it here. | |
1c79356b | 1552 | */ |
0b4e3aa0 A |
1553 | if (pager != MEMORY_OBJECT_NULL) { |
1554 | vm_object_unlock(object); | |
5ba3f43e | 1555 | vm_object_release_pager(pager); |
0b4e3aa0 | 1556 | vm_object_lock(object); |
1c79356b | 1557 | } |
0b4e3aa0 | 1558 | |
1c79356b | 1559 | /* kick off anyone waiting on terminating */ |
0b4e3aa0 | 1560 | object->terminating = FALSE; |
1c79356b A |
1561 | vm_object_paging_begin(object); |
1562 | vm_object_paging_end(object); | |
1563 | vm_object_unlock(object); | |
1564 | ||
6601e61a A |
1565 | object->shadow = VM_OBJECT_NULL; |
1566 | ||
fe8ab488 A |
1567 | #if VM_OBJECT_TRACKING |
1568 | if (vm_object_tracking_inited) { | |
1569 | btlog_remove_entries_for_element(vm_object_tracking_btlog, | |
0a7de745 | 1570 | object); |
fe8ab488 A |
1571 | } |
1572 | #endif /* VM_OBJECT_TRACKING */ | |
1573 | ||
2d21ac55 | 1574 | vm_object_lock_destroy(object); |
1c79356b A |
1575 | /* |
1576 | * Free the space for the object. | |
1577 | */ | |
91447636 | 1578 | zfree(vm_object_zone, object); |
8f6c56a5 A |
1579 | object = VM_OBJECT_NULL; |
1580 | } | |
1581 | ||
8f6c56a5 | 1582 | |
6d2010ae | 1583 | unsigned int vm_max_batch = 256; |
8f6c56a5 | 1584 | |
b0d623f7 A |
1585 | #define V_O_R_MAX_BATCH 128 |
1586 | ||
0a7de745 A |
1587 | #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch) |
1588 | ||
1589 | ||
1590 | #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \ | |
1591 | MACRO_BEGIN \ | |
1592 | if (_local_free_q) { \ | |
1593 | if (do_disconnect) { \ | |
1594 | vm_page_t m; \ | |
1595 | for (m = _local_free_q; \ | |
1596 | m != VM_PAGE_NULL; \ | |
1597 | m = m->vmp_snext) { \ | |
1598 | if (m->vmp_pmapped) { \ | |
1599 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \ | |
1600 | } \ | |
1601 | } \ | |
1602 | } \ | |
1603 | vm_page_free_list(_local_free_q, TRUE); \ | |
1604 | _local_free_q = VM_PAGE_NULL; \ | |
1605 | } \ | |
b0d623f7 A |
1606 | MACRO_END |
1607 | ||
8f6c56a5 A |
1608 | |
1609 | void | |
b0d623f7 | 1610 | vm_object_reap_pages( |
0a7de745 A |
1611 | vm_object_t object, |
1612 | int reap_type) | |
8f6c56a5 | 1613 | { |
0a7de745 A |
1614 | vm_page_t p; |
1615 | vm_page_t next; | |
1616 | vm_page_t local_free_q = VM_PAGE_NULL; | |
1617 | int loop_count; | |
1618 | boolean_t disconnect_on_release; | |
1619 | pmap_flush_context pmap_flush_context_storage; | |
8f6c56a5 | 1620 | |
b0d623f7 | 1621 | if (reap_type == REAP_DATA_FLUSH) { |
2d21ac55 | 1622 | /* |
b0d623f7 A |
1623 | * We need to disconnect pages from all pmaps before |
1624 | * releasing them to the free list | |
2d21ac55 | 1625 | */ |
b0d623f7 A |
1626 | disconnect_on_release = TRUE; |
1627 | } else { | |
1628 | /* | |
1629 | * Either the caller has already disconnected the pages | |
1630 | * from all pmaps, or we disconnect them here as we add | |
1631 | * them to out local list of pages to be released. | |
1632 | * No need to re-disconnect them when we release the pages | |
1633 | * to the free list. | |
1634 | */ | |
1635 | disconnect_on_release = FALSE; | |
1636 | } | |
0a7de745 | 1637 | |
b0d623f7 | 1638 | restart_after_sleep: |
0a7de745 | 1639 | if (vm_page_queue_empty(&object->memq)) { |
b0d623f7 | 1640 | return; |
0a7de745 | 1641 | } |
316670eb | 1642 | loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); |
b0d623f7 | 1643 | |
0a7de745 | 1644 | if (reap_type == REAP_PURGEABLE) { |
39236c6e | 1645 | pmap_flush_context_init(&pmap_flush_context_storage); |
0a7de745 | 1646 | } |
39236c6e | 1647 | |
c3c9b80d | 1648 | vm_page_lock_queues(); |
b0d623f7 | 1649 | |
39037602 | 1650 | next = (vm_page_t)vm_page_queue_first(&object->memq); |
b0d623f7 | 1651 | |
39037602 | 1652 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) { |
b0d623f7 | 1653 | p = next; |
d9a64523 | 1654 | next = (vm_page_t)vm_page_queue_next(&next->vmp_listq); |
b0d623f7 A |
1655 | |
1656 | if (--loop_count == 0) { | |
b0d623f7 A |
1657 | vm_page_unlock_queues(); |
1658 | ||
1659 | if (local_free_q) { | |
39236c6e A |
1660 | if (reap_type == REAP_PURGEABLE) { |
1661 | pmap_flush(&pmap_flush_context_storage); | |
1662 | pmap_flush_context_init(&pmap_flush_context_storage); | |
1663 | } | |
b0d623f7 A |
1664 | /* |
1665 | * Free the pages we reclaimed so far | |
1666 | * and take a little break to avoid | |
1667 | * hogging the page queue lock too long | |
1668 | */ | |
1669 | VM_OBJ_REAP_FREELIST(local_free_q, | |
0a7de745 A |
1670 | disconnect_on_release); |
1671 | } else { | |
b0d623f7 | 1672 | mutex_pause(0); |
0a7de745 | 1673 | } |
b0d623f7 | 1674 | |
316670eb | 1675 | loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); |
b0d623f7 | 1676 | |
c3c9b80d | 1677 | vm_page_lock_queues(); |
b0d623f7 A |
1678 | } |
1679 | if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) { | |
d9a64523 | 1680 | if (p->vmp_busy || p->vmp_cleaning) { |
b0d623f7 A |
1681 | vm_page_unlock_queues(); |
1682 | /* | |
1683 | * free the pages reclaimed so far | |
1684 | */ | |
1685 | VM_OBJ_REAP_FREELIST(local_free_q, | |
0a7de745 | 1686 | disconnect_on_release); |
b0d623f7 A |
1687 | |
1688 | PAGE_SLEEP(object, p, THREAD_UNINT); | |
1689 | ||
1690 | goto restart_after_sleep; | |
1691 | } | |
0a7de745 | 1692 | if (p->vmp_laundry) { |
316670eb | 1693 | vm_pageout_steal_laundry(p, TRUE); |
0a7de745 | 1694 | } |
b0d623f7 A |
1695 | } |
1696 | switch (reap_type) { | |
b0d623f7 A |
1697 | case REAP_DATA_FLUSH: |
1698 | if (VM_PAGE_WIRED(p)) { | |
1699 | /* | |
1700 | * this is an odd case... perhaps we should | |
1701 | * zero-fill this page since we're conceptually | |
1702 | * tossing its data at this point, but leaving | |
1703 | * it on the object to honor the 'wire' contract | |
1704 | */ | |
1705 | continue; | |
1706 | } | |
1707 | break; | |
0a7de745 | 1708 | |
b0d623f7 A |
1709 | case REAP_PURGEABLE: |
1710 | if (VM_PAGE_WIRED(p)) { | |
316670eb A |
1711 | /* |
1712 | * can't purge a wired page | |
1713 | */ | |
b0d623f7 A |
1714 | vm_page_purged_wired++; |
1715 | continue; | |
1716 | } | |
0a7de745 | 1717 | if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) { |
316670eb | 1718 | vm_pageout_steal_laundry(p, TRUE); |
0a7de745 | 1719 | } |
39037602 | 1720 | |
d9a64523 | 1721 | if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) { |
316670eb A |
1722 | /* |
1723 | * page is being acted upon, | |
1724 | * so don't mess with it | |
1725 | */ | |
1726 | vm_page_purged_others++; | |
1727 | continue; | |
1728 | } | |
d9a64523 | 1729 | if (p->vmp_busy) { |
b0d623f7 A |
1730 | /* |
1731 | * We can't reclaim a busy page but we can | |
316670eb | 1732 | * make it more likely to be paged (it's not wired) to make |
b0d623f7 A |
1733 | * sure that it gets considered by |
1734 | * vm_pageout_scan() later. | |
1735 | */ | |
0a7de745 | 1736 | if (VM_PAGE_PAGEABLE(p)) { |
39037602 | 1737 | vm_page_deactivate(p); |
0a7de745 | 1738 | } |
b0d623f7 A |
1739 | vm_page_purged_busy++; |
1740 | continue; | |
1741 | } | |
1742 | ||
39037602 | 1743 | assert(VM_PAGE_OBJECT(p) != kernel_object); |
b0d623f7 A |
1744 | |
1745 | /* | |
1746 | * we can discard this page... | |
1747 | */ | |
d9a64523 | 1748 | if (p->vmp_pmapped == TRUE) { |
b0d623f7 A |
1749 | /* |
1750 | * unmap the page | |
1751 | */ | |
39037602 | 1752 | pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage); |
b0d623f7 | 1753 | } |
39236c6e | 1754 | vm_page_purged_count++; |
b0d623f7 A |
1755 | |
1756 | break; | |
1757 | ||
1758 | case REAP_TERMINATE: | |
d9a64523 | 1759 | if (p->vmp_absent || p->vmp_private) { |
b0d623f7 A |
1760 | /* |
1761 | * For private pages, VM_PAGE_FREE just | |
1762 | * leaves the page structure around for | |
1763 | * its owner to clean up. For absent | |
1764 | * pages, the structure is returned to | |
1765 | * the appropriate pool. | |
1766 | */ | |
1767 | break; | |
1768 | } | |
d9a64523 | 1769 | if (p->vmp_fictitious) { |
0a7de745 | 1770 | assert(VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr); |
b0d623f7 A |
1771 | break; |
1772 | } | |
0a7de745 | 1773 | if (!p->vmp_dirty && p->vmp_wpmapped) { |
d9a64523 | 1774 | p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)); |
0a7de745 | 1775 | } |
b0d623f7 | 1776 | |
d9a64523 | 1777 | if ((p->vmp_dirty || p->vmp_precious) && !p->vmp_error && object->alive) { |
3e170ce0 | 1778 | assert(!object->internal); |
0a7de745 | 1779 | |
d9a64523 | 1780 | p->vmp_free_when_done = TRUE; |
39037602 | 1781 | |
d9a64523 | 1782 | if (!p->vmp_laundry) { |
39037602 | 1783 | vm_page_queues_remove(p, TRUE); |
316670eb A |
1784 | /* |
1785 | * flush page... page will be freed | |
1786 | * upon completion of I/O | |
1787 | */ | |
5ba3f43e | 1788 | vm_pageout_cluster(p); |
316670eb | 1789 | } |
b0d623f7 A |
1790 | vm_page_unlock_queues(); |
1791 | /* | |
1792 | * free the pages reclaimed so far | |
1793 | */ | |
1794 | VM_OBJ_REAP_FREELIST(local_free_q, | |
0a7de745 | 1795 | disconnect_on_release); |
b0d623f7 | 1796 | |
b0d623f7 A |
1797 | vm_object_paging_wait(object, THREAD_UNINT); |
1798 | ||
1799 | goto restart_after_sleep; | |
1800 | } | |
1801 | break; | |
1802 | ||
1803 | case REAP_REAP: | |
1804 | break; | |
1805 | } | |
1806 | vm_page_free_prepare_queues(p); | |
d9a64523 | 1807 | assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0); |
b0d623f7 A |
1808 | /* |
1809 | * Add this page to our list of reclaimed pages, | |
1810 | * to be freed later. | |
1811 | */ | |
d9a64523 | 1812 | p->vmp_snext = local_free_q; |
b0d623f7 A |
1813 | local_free_q = p; |
1814 | } | |
1815 | vm_page_unlock_queues(); | |
1816 | ||
1817 | /* | |
1818 | * Free the remaining reclaimed pages | |
1819 | */ | |
0a7de745 | 1820 | if (reap_type == REAP_PURGEABLE) { |
39236c6e | 1821 | pmap_flush(&pmap_flush_context_storage); |
0a7de745 | 1822 | } |
39236c6e | 1823 | |
b0d623f7 | 1824 | VM_OBJ_REAP_FREELIST(local_free_q, |
0a7de745 | 1825 | disconnect_on_release); |
b0d623f7 A |
1826 | } |
1827 | ||
1828 | ||
1829 | void | |
1830 | vm_object_reap_async( | |
0a7de745 | 1831 | vm_object_t object) |
b0d623f7 A |
1832 | { |
1833 | vm_object_lock_assert_exclusive(object); | |
1834 | ||
1835 | vm_object_reaper_lock_spin(); | |
1836 | ||
1837 | vm_object_reap_count_async++; | |
1838 | ||
1839 | /* enqueue the VM object... */ | |
1840 | queue_enter(&vm_object_reaper_queue, object, | |
0a7de745 | 1841 | vm_object_t, cached_list); |
b0d623f7 A |
1842 | |
1843 | vm_object_reaper_unlock(); | |
1844 | ||
1845 | /* ... and wake up the reaper thread */ | |
1846 | thread_wakeup((event_t) &vm_object_reaper_queue); | |
1847 | } | |
1848 | ||
1849 | ||
1850 | void | |
1851 | vm_object_reaper_thread(void) | |
1852 | { | |
0a7de745 | 1853 | vm_object_t object, shadow_object; |
b0d623f7 A |
1854 | |
1855 | vm_object_reaper_lock_spin(); | |
1856 | ||
1857 | while (!queue_empty(&vm_object_reaper_queue)) { | |
1858 | queue_remove_first(&vm_object_reaper_queue, | |
0a7de745 A |
1859 | object, |
1860 | vm_object_t, | |
1861 | cached_list); | |
b0d623f7 A |
1862 | |
1863 | vm_object_reaper_unlock(); | |
1864 | vm_object_lock(object); | |
1865 | ||
1866 | assert(object->terminating); | |
1867 | assert(!object->alive); | |
0a7de745 | 1868 | |
b0d623f7 A |
1869 | /* |
1870 | * The pageout daemon might be playing with our pages. | |
1871 | * Now that the object is dead, it won't touch any more | |
1872 | * pages, but some pages might already be on their way out. | |
1873 | * Hence, we wait until the active paging activities have | |
1874 | * ceased before we break the association with the pager | |
1875 | * itself. | |
1876 | */ | |
1877 | while (object->paging_in_progress != 0 || | |
0a7de745 | 1878 | object->activity_in_progress != 0) { |
b0d623f7 | 1879 | vm_object_wait(object, |
0a7de745 A |
1880 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS, |
1881 | THREAD_UNINT); | |
b0d623f7 A |
1882 | vm_object_lock(object); |
1883 | } | |
1884 | ||
1885 | shadow_object = | |
0a7de745 | 1886 | object->pageout ? VM_OBJECT_NULL : object->shadow; |
6601e61a | 1887 | |
8f6c56a5 A |
1888 | vm_object_reap(object); |
1889 | /* cache is unlocked and object is no longer valid */ | |
1890 | object = VM_OBJECT_NULL; | |
1891 | ||
6601e61a A |
1892 | if (shadow_object != VM_OBJECT_NULL) { |
1893 | /* | |
1894 | * Drop the reference "object" was holding on | |
1895 | * its shadow object. | |
1896 | */ | |
1897 | vm_object_deallocate(shadow_object); | |
1898 | shadow_object = VM_OBJECT_NULL; | |
1899 | } | |
b0d623f7 | 1900 | vm_object_reaper_lock_spin(); |
8f6c56a5 A |
1901 | } |
1902 | ||
1903 | /* wait for more work... */ | |
1904 | assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT); | |
b0d623f7 A |
1905 | |
1906 | vm_object_reaper_unlock(); | |
1907 | ||
8f6c56a5 A |
1908 | thread_block((thread_continue_t) vm_object_reaper_thread); |
1909 | /*NOTREACHED*/ | |
1c79356b A |
1910 | } |
1911 | ||
1c79356b | 1912 | /* |
0b4e3aa0 A |
1913 | * Routine: vm_object_release_pager |
1914 | * Purpose: Terminate the pager and, upon completion, | |
1915 | * release our last reference to it. | |
1c79356b | 1916 | */ |
0b4e3aa0 A |
1917 | static void |
1918 | vm_object_release_pager( | |
0a7de745 | 1919 | memory_object_t pager) |
1c79356b | 1920 | { |
0b4e3aa0 A |
1921 | /* |
1922 | * Terminate the pager. | |
1923 | */ | |
1c79356b | 1924 | |
0b4e3aa0 | 1925 | (void) memory_object_terminate(pager); |
1c79356b | 1926 | |
0b4e3aa0 A |
1927 | /* |
1928 | * Release reference to pager. | |
1929 | */ | |
1930 | memory_object_deallocate(pager); | |
1931 | } | |
1c79356b | 1932 | |
1c79356b | 1933 | /* |
0b4e3aa0 | 1934 | * Routine: vm_object_destroy |
1c79356b | 1935 | * Purpose: |
0b4e3aa0 | 1936 | * Shut down a VM object, despite the |
1c79356b A |
1937 | * presence of address map (or other) references |
1938 | * to the vm_object. | |
1939 | */ | |
1940 | kern_return_t | |
0b4e3aa0 | 1941 | vm_object_destroy( |
0a7de745 A |
1942 | vm_object_t object, |
1943 | __unused kern_return_t reason) | |
1c79356b | 1944 | { |
0a7de745 | 1945 | memory_object_t old_pager; |
1c79356b | 1946 | |
0a7de745 A |
1947 | if (object == VM_OBJECT_NULL) { |
1948 | return KERN_SUCCESS; | |
1949 | } | |
1c79356b A |
1950 | |
1951 | /* | |
0b4e3aa0 | 1952 | * Remove the pager association immediately. |
1c79356b A |
1953 | * |
1954 | * This will prevent the memory manager from further | |
1955 | * meddling. [If it wanted to flush data or make | |
1956 | * other changes, it should have done so before performing | |
1957 | * the destroy call.] | |
1958 | */ | |
1959 | ||
1c79356b | 1960 | vm_object_lock(object); |
1c79356b A |
1961 | object->can_persist = FALSE; |
1962 | object->named = FALSE; | |
0b4e3aa0 | 1963 | object->alive = FALSE; |
1c79356b | 1964 | |
f427ee49 A |
1965 | #if DEVELOPMENT || DEBUG |
1966 | if (object->object_is_shared_cache && | |
1967 | object->pager != NULL && | |
1968 | object->pager->mo_pager_ops == &shared_region_pager_ops) { | |
1969 | OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count); | |
1970 | } | |
1971 | #endif /* DEVELOPMENT || DEBUG */ | |
1972 | ||
0b4e3aa0 A |
1973 | old_pager = object->pager; |
1974 | object->pager = MEMORY_OBJECT_NULL; | |
0a7de745 | 1975 | if (old_pager != MEMORY_OBJECT_NULL) { |
c3c9b80d | 1976 | memory_object_control_disable(&object->pager_control); |
0a7de745 | 1977 | } |
1c79356b A |
1978 | |
1979 | /* | |
b0d623f7 A |
1980 | * Wait for the existing paging activity (that got |
1981 | * through before we nulled out the pager) to subside. | |
1982 | */ | |
1983 | ||
1984 | vm_object_paging_wait(object, THREAD_UNINT); | |
1985 | vm_object_unlock(object); | |
1986 | ||
1987 | /* | |
1988 | * Terminate the object now. | |
1989 | */ | |
1990 | if (old_pager != MEMORY_OBJECT_NULL) { | |
5ba3f43e | 1991 | vm_object_release_pager(old_pager); |
b0d623f7 | 1992 | |
0a7de745 | 1993 | /* |
b0d623f7 A |
1994 | * JMM - Release the caller's reference. This assumes the |
1995 | * caller had a reference to release, which is a big (but | |
1996 | * currently valid) assumption if this is driven from the | |
1997 | * vnode pager (it is holding a named reference when making | |
1998 | * this call).. | |
1999 | */ | |
2000 | vm_object_deallocate(object); | |
b0d623f7 | 2001 | } |
0a7de745 | 2002 | return KERN_SUCCESS; |
b0d623f7 A |
2003 | } |
2004 | ||
b0d623f7 A |
2005 | /* |
2006 | * The "chunk" macros are used by routines below when looking for pages to deactivate. These | |
2007 | * exist because of the need to handle shadow chains. When deactivating pages, we only | |
2008 | * want to deactive the ones at the top most level in the object chain. In order to do | |
2009 | * this efficiently, the specified address range is divided up into "chunks" and we use | |
2010 | * a bit map to keep track of which pages have already been processed as we descend down | |
2011 | * the shadow chain. These chunk macros hide the details of the bit map implementation | |
2012 | * as much as we can. | |
2013 | * | |
2014 | * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is | |
2015 | * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest | |
2016 | * order bit represents page 0 in the current range and highest order bit represents | |
2017 | * page 63. | |
2018 | * | |
2019 | * For further convenience, we also use negative logic for the page state in the bit map. | |
2020 | * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has | |
2021 | * been processed. This way we can simply test the 64-bit long word to see if it's zero | |
2022 | * to easily tell if the whole range has been processed. Therefore, the bit map starts | |
2023 | * out with all the bits set. The macros below hide all these details from the caller. | |
2024 | */ | |
2025 | ||
0a7de745 A |
2026 | #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */ |
2027 | /* be the same as the number of bits in */ | |
2028 | /* the chunk_state_t type. We use 64 */ | |
2029 | /* just for convenience. */ | |
b0d623f7 | 2030 | |
0a7de745 | 2031 | #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */ |
b0d623f7 | 2032 | |
0a7de745 | 2033 | typedef uint64_t chunk_state_t; |
b0d623f7 A |
2034 | |
2035 | /* | |
2036 | * The bit map uses negative logic, so we start out with all 64 bits set to indicate | |
2037 | * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE, | |
2038 | * then we mark pages beyond the len as having been "processed" so that we don't waste time | |
0a7de745 | 2039 | * looking at pages in that range. This can save us from unnecessarily chasing down the |
b0d623f7 A |
2040 | * shadow chain. |
2041 | */ | |
2042 | ||
0a7de745 A |
2043 | #define CHUNK_INIT(c, len) \ |
2044 | MACRO_BEGIN \ | |
2045 | uint64_t p; \ | |
2046 | \ | |
2047 | (c) = 0xffffffffffffffffLL; \ | |
2048 | \ | |
2049 | for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \ | |
2050 | MARK_PAGE_HANDLED(c, p); \ | |
b0d623f7 A |
2051 | MACRO_END |
2052 | ||
6d2010ae | 2053 | |
b0d623f7 A |
2054 | /* |
2055 | * Return true if all pages in the chunk have not yet been processed. | |
2056 | */ | |
2057 | ||
0a7de745 | 2058 | #define CHUNK_NOT_COMPLETE(c) ((c) != 0) |
b0d623f7 A |
2059 | |
2060 | /* | |
2061 | * Return true if the page at offset 'p' in the bit map has already been handled | |
2062 | * while processing a higher level object in the shadow chain. | |
2063 | */ | |
2064 | ||
cb323159 | 2065 | #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1ULL << (p))) == 0) |
b0d623f7 A |
2066 | |
2067 | /* | |
2068 | * Mark the page at offset 'p' in the bit map as having been processed. | |
2069 | */ | |
2070 | ||
2071 | #define MARK_PAGE_HANDLED(c, p) \ | |
2072 | MACRO_BEGIN \ | |
cb323159 | 2073 | (c) = (c) & ~(1ULL << (p)); \ |
b0d623f7 A |
2074 | MACRO_END |
2075 | ||
2076 | ||
2077 | /* | |
2078 | * Return true if the page at the given offset has been paged out. Object is | |
2079 | * locked upon entry and returned locked. | |
2080 | */ | |
2081 | ||
2082 | static boolean_t | |
2083 | page_is_paged_out( | |
0a7de745 A |
2084 | vm_object_t object, |
2085 | vm_object_offset_t offset) | |
b0d623f7 | 2086 | { |
39236c6e | 2087 | if (object->internal && |
0a7de745 A |
2088 | object->alive && |
2089 | !object->terminating && | |
2090 | object->pager_ready) { | |
2091 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) | |
39037602 | 2092 | == VM_EXTERNAL_STATE_EXISTS) { |
b0d623f7 A |
2093 | return TRUE; |
2094 | } | |
2095 | } | |
b0d623f7 A |
2096 | return FALSE; |
2097 | } | |
2098 | ||
2099 | ||
6d2010ae | 2100 | |
39236c6e A |
2101 | /* |
2102 | * madvise_free_debug | |
2103 | * | |
2104 | * To help debug madvise(MADV_FREE*) mis-usage, this triggers a | |
2105 | * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to | |
2106 | * simulate the loss of the page's contents as if the page had been | |
2107 | * reclaimed and then re-faulted. | |
2108 | */ | |
2109 | #if DEVELOPMENT || DEBUG | |
2110 | int madvise_free_debug = 1; | |
2111 | #else /* DEBUG */ | |
2112 | int madvise_free_debug = 0; | |
2113 | #endif /* DEBUG */ | |
2114 | ||
f427ee49 A |
2115 | __options_decl(deactivate_flags_t, uint32_t, { |
2116 | DEACTIVATE_KILL = 0x1, | |
2117 | DEACTIVATE_REUSABLE = 0x2, | |
2118 | DEACTIVATE_ALL_REUSABLE = 0x4, | |
2119 | DEACTIVATE_CLEAR_REFMOD = 0x8 | |
2120 | }); | |
2121 | ||
b0d623f7 A |
2122 | /* |
2123 | * Deactivate the pages in the specified object and range. If kill_page is set, also discard any | |
2124 | * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify | |
2125 | * a size that is less than or equal to the CHUNK_SIZE. | |
2126 | */ | |
2127 | ||
2128 | static void | |
2129 | deactivate_pages_in_object( | |
0a7de745 A |
2130 | vm_object_t object, |
2131 | vm_object_offset_t offset, | |
2132 | vm_object_size_t size, | |
f427ee49 | 2133 | deactivate_flags_t flags, |
0a7de745 | 2134 | chunk_state_t *chunk_state, |
3e170ce0 | 2135 | pmap_flush_context *pfc, |
0a7de745 A |
2136 | struct pmap *pmap, |
2137 | vm_map_offset_t pmap_offset) | |
b0d623f7 | 2138 | { |
0a7de745 A |
2139 | vm_page_t m; |
2140 | int p; | |
f427ee49 A |
2141 | struct vm_page_delayed_work dw_array; |
2142 | struct vm_page_delayed_work *dwp, *dwp_start; | |
2143 | bool dwp_finish_ctx = TRUE; | |
0a7de745 A |
2144 | int dw_count; |
2145 | int dw_limit; | |
2146 | unsigned int reusable = 0; | |
b0d623f7 | 2147 | |
b0d623f7 A |
2148 | /* |
2149 | * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the | |
2150 | * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may | |
2151 | * have pages marked as having been processed already. We stop the loop early if we find we've handled | |
2152 | * all the pages in the chunk. | |
2153 | */ | |
2154 | ||
f427ee49 | 2155 | dwp_start = dwp = NULL; |
b0d623f7 | 2156 | dw_count = 0; |
6d2010ae | 2157 | dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); |
f427ee49 A |
2158 | dwp_start = vm_page_delayed_work_get_ctx(); |
2159 | if (dwp_start == NULL) { | |
2160 | dwp_start = &dw_array; | |
2161 | dw_limit = 1; | |
2162 | dwp_finish_ctx = FALSE; | |
2163 | } | |
2164 | ||
2165 | dwp = dwp_start; | |
b0d623f7 | 2166 | |
0a7de745 | 2167 | for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) { |
b0d623f7 A |
2168 | /* |
2169 | * If this offset has already been found and handled in a higher level object, then don't | |
2170 | * do anything with it in the current shadow object. | |
2171 | */ | |
2172 | ||
0a7de745 | 2173 | if (PAGE_ALREADY_HANDLED(*chunk_state, p)) { |
b0d623f7 | 2174 | continue; |
0a7de745 A |
2175 | } |
2176 | ||
b0d623f7 A |
2177 | /* |
2178 | * See if the page at this offset is around. First check to see if the page is resident, | |
2179 | * then if not, check the existence map or with the pager. | |
2180 | */ | |
2181 | ||
0a7de745 | 2182 | if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
b0d623f7 A |
2183 | /* |
2184 | * We found a page we were looking for. Mark it as "handled" now in the chunk_state | |
2185 | * so that we won't bother looking for a page at this offset again if there are more | |
2186 | * shadow objects. Then deactivate the page. | |
2187 | */ | |
2188 | ||
2189 | MARK_PAGE_HANDLED(*chunk_state, p); | |
0a7de745 A |
2190 | |
2191 | if ((!VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) && | |
d9a64523 | 2192 | (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) { |
f427ee49 | 2193 | int clear_refmod_mask; |
0a7de745 | 2194 | int pmap_options; |
39236c6e A |
2195 | dwp->dw_mask = 0; |
2196 | ||
fe8ab488 | 2197 | pmap_options = 0; |
f427ee49 | 2198 | clear_refmod_mask = VM_MEM_REFERENCED; |
39236c6e | 2199 | dwp->dw_mask |= DW_clear_reference; |
b0d623f7 | 2200 | |
f427ee49 | 2201 | if ((flags & DEACTIVATE_KILL) && (object->internal)) { |
39236c6e A |
2202 | if (madvise_free_debug) { |
2203 | /* | |
2204 | * zero-fill the page now | |
2205 | * to simulate it being | |
2206 | * reclaimed and re-faulted. | |
2207 | */ | |
39037602 | 2208 | pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m)); |
39236c6e | 2209 | } |
0a7de745 A |
2210 | m->vmp_precious = FALSE; |
2211 | m->vmp_dirty = FALSE; | |
b0d623f7 | 2212 | |
f427ee49 | 2213 | clear_refmod_mask |= VM_MEM_MODIFIED; |
d9a64523 | 2214 | if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) { |
d1ecb069 A |
2215 | /* |
2216 | * This page is now clean and | |
2217 | * reclaimable. Move it out | |
2218 | * of the throttled queue, so | |
2219 | * that vm_pageout_scan() can | |
2220 | * find it. | |
2221 | */ | |
2222 | dwp->dw_mask |= DW_move_page; | |
2223 | } | |
39037602 A |
2224 | |
2225 | VM_COMPRESSOR_PAGER_STATE_CLR(object, offset); | |
b0d623f7 | 2226 | |
f427ee49 A |
2227 | if ((flags & DEACTIVATE_REUSABLE) && !m->vmp_reusable) { |
2228 | assert(!(flags & DEACTIVATE_ALL_REUSABLE)); | |
b0d623f7 | 2229 | assert(!object->all_reusable); |
d9a64523 | 2230 | m->vmp_reusable = TRUE; |
b0d623f7 A |
2231 | object->reusable_page_count++; |
2232 | assert(object->resident_page_count >= object->reusable_page_count); | |
2233 | reusable++; | |
fe8ab488 A |
2234 | /* |
2235 | * Tell pmap this page is now | |
2236 | * "reusable" (to update pmap | |
2237 | * stats for all mappings). | |
2238 | */ | |
0a7de745 | 2239 | pmap_options |= PMAP_OPTIONS_SET_REUSABLE; |
b0d623f7 A |
2240 | } |
2241 | } | |
f427ee49 A |
2242 | if (flags & DEACTIVATE_CLEAR_REFMOD) { |
2243 | /* | |
2244 | * The caller didn't clear the refmod bits in advance. | |
2245 | * Clear them for this page now. | |
2246 | */ | |
2247 | pmap_options |= PMAP_OPTIONS_NOFLUSH; | |
2248 | pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), | |
2249 | clear_refmod_mask, | |
2250 | pmap_options, | |
2251 | (void *)pfc); | |
2252 | } | |
b0d623f7 | 2253 | |
f427ee49 A |
2254 | if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && |
2255 | !(flags & (DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE))) { | |
b0d623f7 | 2256 | dwp->dw_mask |= DW_move_page; |
0a7de745 A |
2257 | } |
2258 | ||
2259 | if (dwp->dw_mask) { | |
39236c6e | 2260 | VM_PAGE_ADD_DELAYED_WORK(dwp, m, |
0a7de745 A |
2261 | dw_count); |
2262 | } | |
b0d623f7 | 2263 | |
6d2010ae | 2264 | if (dw_count >= dw_limit) { |
b0d623f7 A |
2265 | if (reusable) { |
2266 | OSAddAtomic(reusable, | |
0a7de745 | 2267 | &vm_page_stats_reusable.reusable_count); |
b0d623f7 A |
2268 | vm_page_stats_reusable.reusable += reusable; |
2269 | reusable = 0; | |
2270 | } | |
f427ee49 | 2271 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); |
b0d623f7 | 2272 | |
f427ee49 | 2273 | dwp = dwp_start; |
b0d623f7 A |
2274 | dw_count = 0; |
2275 | } | |
2276 | } | |
b0d623f7 | 2277 | } else { |
b0d623f7 A |
2278 | /* |
2279 | * The page at this offset isn't memory resident, check to see if it's | |
2280 | * been paged out. If so, mark it as handled so we don't bother looking | |
2281 | * for it in the shadow chain. | |
2282 | */ | |
2283 | ||
2284 | if (page_is_paged_out(object, offset)) { | |
2285 | MARK_PAGE_HANDLED(*chunk_state, p); | |
2286 | ||
2287 | /* | |
0a7de745 | 2288 | * If we're killing a non-resident page, then clear the page in the existence |
b0d623f7 A |
2289 | * map so we don't bother paging it back in if it's touched again in the future. |
2290 | */ | |
2291 | ||
f427ee49 | 2292 | if ((flags & DEACTIVATE_KILL) && (object->internal)) { |
39037602 A |
2293 | VM_COMPRESSOR_PAGER_STATE_CLR(object, offset); |
2294 | ||
2295 | if (pmap != PMAP_NULL) { | |
3e170ce0 A |
2296 | /* |
2297 | * Tell pmap that this page | |
2298 | * is no longer mapped, to | |
2299 | * adjust the footprint ledger | |
2300 | * because this page is no | |
2301 | * longer compressed. | |
2302 | */ | |
2303 | pmap_remove_options( | |
2304 | pmap, | |
2305 | pmap_offset, | |
2306 | (pmap_offset + | |
0a7de745 | 2307 | PAGE_SIZE), |
3e170ce0 A |
2308 | PMAP_OPTIONS_REMOVE); |
2309 | } | |
b0d623f7 A |
2310 | } |
2311 | } | |
2312 | } | |
2313 | } | |
2314 | ||
2315 | if (reusable) { | |
2316 | OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count); | |
0a7de745 | 2317 | vm_page_stats_reusable.reusable += reusable; |
b0d623f7 A |
2318 | reusable = 0; |
2319 | } | |
0a7de745 A |
2320 | |
2321 | if (dw_count) { | |
f427ee49 A |
2322 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); |
2323 | dwp = dwp_start; | |
2324 | dw_count = 0; | |
2325 | } | |
2326 | ||
2327 | if (dwp_start && dwp_finish_ctx) { | |
2328 | vm_page_delayed_work_finish_ctx(dwp_start); | |
2329 | dwp_start = dwp = NULL; | |
0a7de745 | 2330 | } |
b0d623f7 A |
2331 | } |
2332 | ||
2333 | ||
2334 | /* | |
2335 | * Deactive a "chunk" of the given range of the object starting at offset. A "chunk" | |
2336 | * will always be less than or equal to the given size. The total range is divided up | |
2337 | * into chunks for efficiency and performance related to the locks and handling the shadow | |
2338 | * chain. This routine returns how much of the given "size" it actually processed. It's | |
2339 | * up to the caler to loop and keep calling this routine until the entire range they want | |
2340 | * to process has been done. | |
f427ee49 | 2341 | * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range. |
b0d623f7 A |
2342 | */ |
2343 | ||
2344 | static vm_object_size_t | |
2345 | deactivate_a_chunk( | |
0a7de745 A |
2346 | vm_object_t orig_object, |
2347 | vm_object_offset_t offset, | |
2348 | vm_object_size_t size, | |
f427ee49 | 2349 | deactivate_flags_t flags, |
3e170ce0 | 2350 | pmap_flush_context *pfc, |
0a7de745 A |
2351 | struct pmap *pmap, |
2352 | vm_map_offset_t pmap_offset) | |
b0d623f7 | 2353 | { |
0a7de745 A |
2354 | vm_object_t object; |
2355 | vm_object_t tmp_object; | |
2356 | vm_object_size_t length; | |
2357 | chunk_state_t chunk_state; | |
b0d623f7 A |
2358 | |
2359 | ||
2360 | /* | |
2361 | * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the | |
2362 | * remaining size the caller asked for. | |
2363 | */ | |
2364 | ||
2365 | length = MIN(size, CHUNK_SIZE); | |
2366 | ||
2367 | /* | |
2368 | * The chunk_state keeps track of which pages we've already processed if there's | |
2369 | * a shadow chain on this object. At this point, we haven't done anything with this | |
2370 | * range of pages yet, so initialize the state to indicate no pages processed yet. | |
1c79356b A |
2371 | */ |
2372 | ||
b0d623f7 A |
2373 | CHUNK_INIT(chunk_state, length); |
2374 | object = orig_object; | |
1c79356b A |
2375 | |
2376 | /* | |
b0d623f7 A |
2377 | * Start at the top level object and iterate around the loop once for each object |
2378 | * in the shadow chain. We stop processing early if we've already found all the pages | |
2379 | * in the range. Otherwise we stop when we run out of shadow objects. | |
1c79356b | 2380 | */ |
0b4e3aa0 | 2381 | |
b0d623f7 A |
2382 | while (object && CHUNK_NOT_COMPLETE(chunk_state)) { |
2383 | vm_object_paging_begin(object); | |
2384 | ||
f427ee49 | 2385 | deactivate_pages_in_object(object, offset, length, flags, &chunk_state, pfc, pmap, pmap_offset); |
b0d623f7 A |
2386 | |
2387 | vm_object_paging_end(object); | |
2388 | ||
2389 | /* | |
2390 | * We've finished with this object, see if there's a shadow object. If | |
2391 | * there is, update the offset and lock the new object. We also turn off | |
2392 | * kill_page at this point since we only kill pages in the top most object. | |
0b4e3aa0 | 2393 | */ |
1c79356b | 2394 | |
b0d623f7 A |
2395 | tmp_object = object->shadow; |
2396 | ||
2397 | if (tmp_object) { | |
f427ee49 A |
2398 | assert(!(flags & DEACTIVATE_KILL) || (flags & DEACTIVATE_CLEAR_REFMOD)); |
2399 | flags &= ~(DEACTIVATE_KILL | DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE); | |
0a7de745 A |
2400 | offset += object->vo_shadow_offset; |
2401 | vm_object_lock(tmp_object); | |
b0d623f7 A |
2402 | } |
2403 | ||
0a7de745 A |
2404 | if (object != orig_object) { |
2405 | vm_object_unlock(object); | |
2406 | } | |
b0d623f7 A |
2407 | |
2408 | object = tmp_object; | |
1c79356b | 2409 | } |
b0d623f7 | 2410 | |
0a7de745 A |
2411 | if (object && object != orig_object) { |
2412 | vm_object_unlock(object); | |
2413 | } | |
b0d623f7 A |
2414 | |
2415 | return length; | |
1c79356b A |
2416 | } |
2417 | ||
b0d623f7 A |
2418 | |
2419 | ||
1c79356b | 2420 | /* |
b0d623f7 A |
2421 | * Move any resident pages in the specified range to the inactive queue. If kill_page is set, |
2422 | * we also clear the modified status of the page and "forget" any changes that have been made | |
2423 | * to the page. | |
1c79356b | 2424 | */ |
1c79356b | 2425 | |
0b4e3aa0 A |
2426 | __private_extern__ void |
2427 | vm_object_deactivate_pages( | |
0a7de745 A |
2428 | vm_object_t object, |
2429 | vm_object_offset_t offset, | |
2430 | vm_object_size_t size, | |
b0d623f7 | 2431 | boolean_t kill_page, |
0a7de745 A |
2432 | boolean_t reusable_page, |
2433 | struct pmap *pmap, | |
2434 | vm_map_offset_t pmap_offset) | |
0b4e3aa0 | 2435 | { |
0a7de745 A |
2436 | vm_object_size_t length; |
2437 | boolean_t all_reusable; | |
2438 | pmap_flush_context pmap_flush_context_storage; | |
f427ee49 A |
2439 | unsigned int pmap_clear_refmod_mask = VM_MEM_REFERENCED; |
2440 | unsigned int pmap_clear_refmod_options = 0; | |
2441 | deactivate_flags_t flags = DEACTIVATE_CLEAR_REFMOD; | |
2442 | bool refmod_cleared = false; | |
2443 | if (kill_page) { | |
2444 | flags |= DEACTIVATE_KILL; | |
2445 | } | |
2446 | if (reusable_page) { | |
2447 | flags |= DEACTIVATE_REUSABLE; | |
2448 | } | |
0b4e3aa0 A |
2449 | |
2450 | /* | |
b0d623f7 | 2451 | * We break the range up into chunks and do one chunk at a time. This is for |
0a7de745 | 2452 | * efficiency and performance while handling the shadow chains and the locks. |
b0d623f7 A |
2453 | * The deactivate_a_chunk() function returns how much of the range it processed. |
2454 | * We keep calling this routine until the given size is exhausted. | |
0b4e3aa0 | 2455 | */ |
0b4e3aa0 | 2456 | |
0b4e3aa0 | 2457 | |
b0d623f7 | 2458 | all_reusable = FALSE; |
fe8ab488 A |
2459 | #if 11 |
2460 | /* | |
0a7de745 | 2461 | * For the sake of accurate "reusable" pmap stats, we need |
fe8ab488 A |
2462 | * to tell pmap about each page that is no longer "reusable", |
2463 | * so we can't do the "all_reusable" optimization. | |
f427ee49 A |
2464 | * |
2465 | * If we do go with the all_reusable optimization, we can't | |
2466 | * return if size is 0 since we could have "all_reusable == TRUE" | |
2467 | * In this case, we save the overhead of doing the pmap_flush_context | |
2468 | * work. | |
fe8ab488 | 2469 | */ |
f427ee49 A |
2470 | if (size == 0) { |
2471 | return; | |
2472 | } | |
fe8ab488 | 2473 | #else |
b0d623f7 | 2474 | if (reusable_page && |
6d2010ae A |
2475 | object->internal && |
2476 | object->vo_size != 0 && | |
2477 | object->vo_size == size && | |
b0d623f7 A |
2478 | object->reusable_page_count == 0) { |
2479 | all_reusable = TRUE; | |
2480 | reusable_page = FALSE; | |
f427ee49 | 2481 | flags |= DEACTIVATE_ALL_REUSABLE; |
b0d623f7 | 2482 | } |
fe8ab488 | 2483 | #endif |
0b4e3aa0 | 2484 | |
d1ecb069 | 2485 | if ((reusable_page || all_reusable) && object->all_reusable) { |
0a7de745 | 2486 | /* This means MADV_FREE_REUSABLE has been called twice, which |
d1ecb069 A |
2487 | * is probably illegal. */ |
2488 | return; | |
2489 | } | |
d1ecb069 | 2490 | |
f427ee49 | 2491 | |
39236c6e A |
2492 | pmap_flush_context_init(&pmap_flush_context_storage); |
2493 | ||
f427ee49 A |
2494 | /* |
2495 | * If we're deactivating multiple pages, try to perform one bulk pmap operation. | |
2496 | * We can't do this if we're killing pages and there's a shadow chain as | |
2497 | * we don't yet know which pages are in the top object (pages in shadow copies aren't | |
2498 | * safe to kill). | |
2499 | * And we can only do this on hardware that supports it. | |
2500 | */ | |
2501 | if (size > PAGE_SIZE && (!kill_page || !object->shadow)) { | |
2502 | if (kill_page && object->internal) { | |
2503 | pmap_clear_refmod_mask |= VM_MEM_MODIFIED; | |
2504 | } | |
2505 | if (reusable_page) { | |
2506 | pmap_clear_refmod_options |= PMAP_OPTIONS_SET_REUSABLE; | |
2507 | } | |
2508 | ||
2509 | refmod_cleared = pmap_clear_refmod_range_options(pmap, pmap_offset, pmap_offset + size, pmap_clear_refmod_mask, pmap_clear_refmod_options); | |
2510 | if (refmod_cleared) { | |
2511 | // We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it. | |
2512 | flags &= ~DEACTIVATE_CLEAR_REFMOD; | |
2513 | } | |
2514 | } | |
2515 | ||
b0d623f7 | 2516 | while (size) { |
f427ee49 A |
2517 | length = deactivate_a_chunk(object, offset, size, flags, |
2518 | &pmap_flush_context_storage, pmap, pmap_offset); | |
0b4e3aa0 | 2519 | |
b0d623f7 A |
2520 | size -= length; |
2521 | offset += length; | |
3e170ce0 | 2522 | pmap_offset += length; |
b0d623f7 | 2523 | } |
39236c6e | 2524 | pmap_flush(&pmap_flush_context_storage); |
91447636 | 2525 | |
b0d623f7 A |
2526 | if (all_reusable) { |
2527 | if (!object->all_reusable) { | |
2528 | unsigned int reusable; | |
2529 | ||
2530 | object->all_reusable = TRUE; | |
2531 | assert(object->reusable_page_count == 0); | |
2532 | /* update global stats */ | |
2533 | reusable = object->resident_page_count; | |
2534 | OSAddAtomic(reusable, | |
0a7de745 | 2535 | &vm_page_stats_reusable.reusable_count); |
b0d623f7 A |
2536 | vm_page_stats_reusable.reusable += reusable; |
2537 | vm_page_stats_reusable.all_reusable_calls++; | |
2538 | } | |
2539 | } else if (reusable_page) { | |
2540 | vm_page_stats_reusable.partial_reusable_calls++; | |
2541 | } | |
2542 | } | |
0b4e3aa0 | 2543 | |
b0d623f7 A |
2544 | void |
2545 | vm_object_reuse_pages( | |
0a7de745 A |
2546 | vm_object_t object, |
2547 | vm_object_offset_t start_offset, | |
2548 | vm_object_offset_t end_offset, | |
2549 | boolean_t allow_partial_reuse) | |
b0d623f7 | 2550 | { |
0a7de745 A |
2551 | vm_object_offset_t cur_offset; |
2552 | vm_page_t m; | |
2553 | unsigned int reused, reusable; | |
2554 | ||
2555 | #define VM_OBJECT_REUSE_PAGE(object, m, reused) \ | |
2556 | MACRO_BEGIN \ | |
2557 | if ((m) != VM_PAGE_NULL && \ | |
2558 | (m)->vmp_reusable) { \ | |
2559 | assert((object)->reusable_page_count <= \ | |
2560 | (object)->resident_page_count); \ | |
2561 | assert((object)->reusable_page_count > 0); \ | |
2562 | (object)->reusable_page_count--; \ | |
2563 | (m)->vmp_reusable = FALSE; \ | |
2564 | (reused)++; \ | |
2565 | /* \ | |
2566 | * Tell pmap that this page is no longer \ | |
2567 | * "reusable", to update the "reusable" stats \ | |
2568 | * for all the pmaps that have mapped this \ | |
2569 | * page. \ | |
2570 | */ \ | |
2571 | pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \ | |
2572 | 0, /* refmod */ \ | |
2573 | (PMAP_OPTIONS_CLEAR_REUSABLE \ | |
2574 | | PMAP_OPTIONS_NOFLUSH), \ | |
2575 | NULL); \ | |
2576 | } \ | |
b0d623f7 | 2577 | MACRO_END |
2d21ac55 | 2578 | |
b0d623f7 A |
2579 | reused = 0; |
2580 | reusable = 0; | |
0b4e3aa0 | 2581 | |
b0d623f7 | 2582 | vm_object_lock_assert_exclusive(object); |
0b4e3aa0 | 2583 | |
b0d623f7 | 2584 | if (object->all_reusable) { |
fe8ab488 | 2585 | panic("object %p all_reusable: can't update pmap stats\n", |
0a7de745 | 2586 | object); |
b0d623f7 A |
2587 | assert(object->reusable_page_count == 0); |
2588 | object->all_reusable = FALSE; | |
6d2010ae | 2589 | if (end_offset - start_offset == object->vo_size || |
b0d623f7 A |
2590 | !allow_partial_reuse) { |
2591 | vm_page_stats_reusable.all_reuse_calls++; | |
2592 | reused = object->resident_page_count; | |
2593 | } else { | |
2594 | vm_page_stats_reusable.partial_reuse_calls++; | |
0a7de745 | 2595 | vm_page_queue_iterate(&object->memq, m, vmp_listq) { |
d9a64523 A |
2596 | if (m->vmp_offset < start_offset || |
2597 | m->vmp_offset >= end_offset) { | |
2598 | m->vmp_reusable = TRUE; | |
b0d623f7 A |
2599 | object->reusable_page_count++; |
2600 | assert(object->resident_page_count >= object->reusable_page_count); | |
2601 | continue; | |
2602 | } else { | |
d9a64523 | 2603 | assert(!m->vmp_reusable); |
b0d623f7 | 2604 | reused++; |
0b4e3aa0 A |
2605 | } |
2606 | } | |
2607 | } | |
b0d623f7 | 2608 | } else if (object->resident_page_count > |
0a7de745 | 2609 | ((end_offset - start_offset) >> PAGE_SHIFT)) { |
b0d623f7 A |
2610 | vm_page_stats_reusable.partial_reuse_calls++; |
2611 | for (cur_offset = start_offset; | |
0a7de745 A |
2612 | cur_offset < end_offset; |
2613 | cur_offset += PAGE_SIZE_64) { | |
b0d623f7 A |
2614 | if (object->reusable_page_count == 0) { |
2615 | break; | |
2616 | } | |
2617 | m = vm_page_lookup(object, cur_offset); | |
2618 | VM_OBJECT_REUSE_PAGE(object, m, reused); | |
2619 | } | |
2620 | } else { | |
2621 | vm_page_stats_reusable.partial_reuse_calls++; | |
0a7de745 | 2622 | vm_page_queue_iterate(&object->memq, m, vmp_listq) { |
b0d623f7 A |
2623 | if (object->reusable_page_count == 0) { |
2624 | break; | |
2625 | } | |
d9a64523 A |
2626 | if (m->vmp_offset < start_offset || |
2627 | m->vmp_offset >= end_offset) { | |
b0d623f7 A |
2628 | continue; |
2629 | } | |
2630 | VM_OBJECT_REUSE_PAGE(object, m, reused); | |
2631 | } | |
0b4e3aa0 | 2632 | } |
b0d623f7 A |
2633 | |
2634 | /* update global stats */ | |
0a7de745 | 2635 | OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count); |
b0d623f7 A |
2636 | vm_page_stats_reusable.reused += reused; |
2637 | vm_page_stats_reusable.reusable += reusable; | |
0b4e3aa0 | 2638 | } |
1c79356b A |
2639 | |
2640 | /* | |
2641 | * Routine: vm_object_pmap_protect | |
2642 | * | |
2643 | * Purpose: | |
2644 | * Reduces the permission for all physical | |
2645 | * pages in the specified object range. | |
2646 | * | |
2647 | * If removing write permission only, it is | |
2648 | * sufficient to protect only the pages in | |
2649 | * the top-level object; only those pages may | |
2650 | * have write permission. | |
2651 | * | |
2652 | * If removing all access, we must follow the | |
2653 | * shadow chain from the top-level object to | |
2654 | * remove access to all pages in shadowed objects. | |
2655 | * | |
2656 | * The object must *not* be locked. The object must | |
0a7de745 | 2657 | * be internal. |
1c79356b A |
2658 | * |
2659 | * If pmap is not NULL, this routine assumes that | |
2660 | * the only mappings for the pages are in that | |
2661 | * pmap. | |
2662 | */ | |
2663 | ||
0b4e3aa0 | 2664 | __private_extern__ void |
1c79356b | 2665 | vm_object_pmap_protect( |
0a7de745 A |
2666 | vm_object_t object, |
2667 | vm_object_offset_t offset, | |
2668 | vm_object_size_t size, | |
2669 | pmap_t pmap, | |
f427ee49 | 2670 | vm_map_size_t pmap_page_size, |
0a7de745 A |
2671 | vm_map_offset_t pmap_start, |
2672 | vm_prot_t prot) | |
1c79356b | 2673 | { |
f427ee49 A |
2674 | vm_object_pmap_protect_options(object, offset, size, pmap, |
2675 | pmap_page_size, | |
2676 | pmap_start, prot, 0); | |
39236c6e A |
2677 | } |
2678 | ||
2679 | __private_extern__ void | |
2680 | vm_object_pmap_protect_options( | |
0a7de745 A |
2681 | vm_object_t object, |
2682 | vm_object_offset_t offset, | |
2683 | vm_object_size_t size, | |
2684 | pmap_t pmap, | |
f427ee49 | 2685 | vm_map_size_t pmap_page_size, |
0a7de745 A |
2686 | vm_map_offset_t pmap_start, |
2687 | vm_prot_t prot, | |
2688 | int options) | |
39236c6e | 2689 | { |
0a7de745 A |
2690 | pmap_flush_context pmap_flush_context_storage; |
2691 | boolean_t delayed_pmap_flush = FALSE; | |
f427ee49 A |
2692 | vm_object_offset_t offset_in_object; |
2693 | vm_object_size_t size_in_object; | |
39236c6e | 2694 | |
0a7de745 | 2695 | if (object == VM_OBJECT_NULL) { |
39236c6e | 2696 | return; |
0a7de745 | 2697 | } |
f427ee49 A |
2698 | if (pmap_page_size > PAGE_SIZE) { |
2699 | /* for 16K map on 4K device... */ | |
2700 | pmap_page_size = PAGE_SIZE; | |
2701 | } | |
2702 | /* | |
2703 | * If we decide to work on the object itself, extend the range to | |
2704 | * cover a full number of native pages. | |
2705 | */ | |
2706 | size_in_object = vm_object_round_page(offset + size) - vm_object_trunc_page(offset); | |
2707 | offset_in_object = vm_object_trunc_page(offset); | |
2708 | /* | |
2709 | * If we decide to work on the pmap, use the exact range specified, | |
2710 | * so no rounding/truncating offset and size. They should already | |
2711 | * be aligned to pmap_page_size. | |
2712 | */ | |
2713 | assertf(!(offset & (pmap_page_size - 1)) && !(size & (pmap_page_size - 1)), | |
2714 | "offset 0x%llx size 0x%llx pmap_page_size 0x%llx", | |
2715 | offset, size, (uint64_t)pmap_page_size); | |
1c79356b A |
2716 | |
2717 | vm_object_lock(object); | |
2718 | ||
2d21ac55 A |
2719 | if (object->phys_contiguous) { |
2720 | if (pmap != NULL) { | |
2721 | vm_object_unlock(object); | |
39236c6e | 2722 | pmap_protect_options(pmap, |
0a7de745 A |
2723 | pmap_start, |
2724 | pmap_start + size, | |
2725 | prot, | |
2726 | options & ~PMAP_OPTIONS_NOFLUSH, | |
2727 | NULL); | |
2d21ac55 A |
2728 | } else { |
2729 | vm_object_offset_t phys_start, phys_end, phys_addr; | |
2730 | ||
f427ee49 A |
2731 | phys_start = object->vo_shadow_offset + offset_in_object; |
2732 | phys_end = phys_start + size_in_object; | |
2d21ac55 | 2733 | assert(phys_start <= phys_end); |
6d2010ae | 2734 | assert(phys_end <= object->vo_shadow_offset + object->vo_size); |
2d21ac55 A |
2735 | vm_object_unlock(object); |
2736 | ||
39236c6e A |
2737 | pmap_flush_context_init(&pmap_flush_context_storage); |
2738 | delayed_pmap_flush = FALSE; | |
2739 | ||
2d21ac55 | 2740 | for (phys_addr = phys_start; |
0a7de745 A |
2741 | phys_addr < phys_end; |
2742 | phys_addr += PAGE_SIZE_64) { | |
39236c6e A |
2743 | pmap_page_protect_options( |
2744 | (ppnum_t) (phys_addr >> PAGE_SHIFT), | |
2745 | prot, | |
2746 | options | PMAP_OPTIONS_NOFLUSH, | |
2747 | (void *)&pmap_flush_context_storage); | |
2748 | delayed_pmap_flush = TRUE; | |
2d21ac55 | 2749 | } |
0a7de745 | 2750 | if (delayed_pmap_flush == TRUE) { |
39236c6e | 2751 | pmap_flush(&pmap_flush_context_storage); |
0a7de745 | 2752 | } |
2d21ac55 A |
2753 | } |
2754 | return; | |
2755 | } | |
2756 | ||
55e303ae | 2757 | assert(object->internal); |
de355530 | 2758 | |
1c79356b | 2759 | while (TRUE) { |
f427ee49 | 2760 | if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) { |
0a7de745 | 2761 | vm_object_unlock(object); |
f427ee49 A |
2762 | if (pmap_page_size < PAGE_SIZE) { |
2763 | DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot); | |
2764 | } | |
0a7de745 A |
2765 | pmap_protect_options(pmap, pmap_start, pmap_start + size, prot, |
2766 | options & ~PMAP_OPTIONS_NOFLUSH, NULL); | |
2767 | return; | |
2768 | } | |
2769 | ||
f427ee49 A |
2770 | if (pmap_page_size < PAGE_SIZE) { |
2771 | DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot, offset, size, object, offset_in_object, size_in_object); | |
2772 | } | |
2773 | ||
0a7de745 A |
2774 | pmap_flush_context_init(&pmap_flush_context_storage); |
2775 | delayed_pmap_flush = FALSE; | |
2776 | ||
2777 | /* | |
2778 | * if we are doing large ranges with respect to resident | |
2779 | * page count then we should interate over pages otherwise | |
2780 | * inverse page look-up will be faster | |
2781 | */ | |
f427ee49 | 2782 | if (ptoa_64(object->resident_page_count / 4) < size_in_object) { |
0a7de745 A |
2783 | vm_page_t p; |
2784 | vm_object_offset_t end; | |
2785 | ||
f427ee49 | 2786 | end = offset_in_object + size_in_object; |
0a7de745 A |
2787 | |
2788 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { | |
f427ee49 | 2789 | if (!p->vmp_fictitious && (offset_in_object <= p->vmp_offset) && (p->vmp_offset < end)) { |
0a7de745 A |
2790 | vm_map_offset_t start; |
2791 | ||
f427ee49 A |
2792 | /* |
2793 | * XXX FBDP 4K: intentionally using "offset" here instead | |
2794 | * of "offset_in_object", since "start" is a pmap address. | |
2795 | */ | |
0a7de745 A |
2796 | start = pmap_start + p->vmp_offset - offset; |
2797 | ||
2798 | if (pmap != PMAP_NULL) { | |
f427ee49 A |
2799 | vm_map_offset_t curr; |
2800 | for (curr = start; | |
2801 | curr < start + PAGE_SIZE_64; | |
2802 | curr += pmap_page_size) { | |
2803 | if (curr < pmap_start) { | |
2804 | continue; | |
2805 | } | |
2806 | if (curr >= pmap_start + size) { | |
2807 | break; | |
2808 | } | |
2809 | pmap_protect_options( | |
2810 | pmap, | |
2811 | curr, | |
2812 | curr + pmap_page_size, | |
2813 | prot, | |
2814 | options | PMAP_OPTIONS_NOFLUSH, | |
2815 | &pmap_flush_context_storage); | |
2816 | } | |
0a7de745 A |
2817 | } else { |
2818 | pmap_page_protect_options( | |
2819 | VM_PAGE_GET_PHYS_PAGE(p), | |
2820 | prot, | |
2821 | options | PMAP_OPTIONS_NOFLUSH, | |
2822 | &pmap_flush_context_storage); | |
2823 | } | |
39236c6e | 2824 | delayed_pmap_flush = TRUE; |
0a7de745 | 2825 | } |
39236c6e | 2826 | } |
0a7de745 A |
2827 | } else { |
2828 | vm_page_t p; | |
2829 | vm_object_offset_t end; | |
2830 | vm_object_offset_t target_off; | |
39236c6e | 2831 | |
f427ee49 | 2832 | end = offset_in_object + size_in_object; |
9bccf70c | 2833 | |
f427ee49 | 2834 | for (target_off = offset_in_object; |
0a7de745 A |
2835 | target_off < end; target_off += PAGE_SIZE) { |
2836 | p = vm_page_lookup(object, target_off); | |
9bccf70c | 2837 | |
0a7de745 A |
2838 | if (p != VM_PAGE_NULL) { |
2839 | vm_object_offset_t start; | |
39236c6e | 2840 | |
f427ee49 A |
2841 | /* |
2842 | * XXX FBDP 4K: intentionally using "offset" here instead | |
2843 | * of "offset_in_object", since "start" is a pmap address. | |
2844 | */ | |
0a7de745 | 2845 | start = pmap_start + (p->vmp_offset - offset); |
39236c6e | 2846 | |
0a7de745 | 2847 | if (pmap != PMAP_NULL) { |
f427ee49 A |
2848 | vm_map_offset_t curr; |
2849 | for (curr = start; | |
2850 | curr < start + PAGE_SIZE; | |
2851 | curr += pmap_page_size) { | |
2852 | if (curr < pmap_start) { | |
2853 | continue; | |
2854 | } | |
2855 | if (curr >= pmap_start + size) { | |
2856 | break; | |
2857 | } | |
2858 | pmap_protect_options( | |
2859 | pmap, | |
2860 | curr, | |
2861 | curr + pmap_page_size, | |
2862 | prot, | |
2863 | options | PMAP_OPTIONS_NOFLUSH, | |
2864 | &pmap_flush_context_storage); | |
2865 | } | |
0a7de745 A |
2866 | } else { |
2867 | pmap_page_protect_options( | |
2868 | VM_PAGE_GET_PHYS_PAGE(p), | |
2869 | prot, | |
2870 | options | PMAP_OPTIONS_NOFLUSH, | |
2871 | &pmap_flush_context_storage); | |
2872 | } | |
39236c6e | 2873 | delayed_pmap_flush = TRUE; |
0a7de745 A |
2874 | } |
2875 | } | |
2876 | } | |
2877 | if (delayed_pmap_flush == TRUE) { | |
2878 | pmap_flush(&pmap_flush_context_storage); | |
9bccf70c | 2879 | } |
1c79356b | 2880 | |
0a7de745 A |
2881 | if (prot == VM_PROT_NONE) { |
2882 | /* | |
2883 | * Must follow shadow chain to remove access | |
2884 | * to pages in shadowed objects. | |
2885 | */ | |
2886 | vm_object_t next_object; | |
2887 | ||
2888 | next_object = object->shadow; | |
2889 | if (next_object != VM_OBJECT_NULL) { | |
f427ee49 | 2890 | offset_in_object += object->vo_shadow_offset; |
0a7de745 A |
2891 | offset += object->vo_shadow_offset; |
2892 | vm_object_lock(next_object); | |
2893 | vm_object_unlock(object); | |
2894 | object = next_object; | |
2895 | } else { | |
2896 | /* | |
2897 | * End of chain - we are done. | |
2898 | */ | |
2899 | break; | |
2900 | } | |
2901 | } else { | |
2902 | /* | |
2903 | * Pages in shadowed objects may never have | |
2904 | * write permission - we may stop here. | |
2905 | */ | |
2906 | break; | |
2907 | } | |
1c79356b A |
2908 | } |
2909 | ||
2910 | vm_object_unlock(object); | |
2911 | } | |
2912 | ||
d9a64523 A |
2913 | uint32_t vm_page_busy_absent_skipped = 0; |
2914 | ||
1c79356b A |
2915 | /* |
2916 | * Routine: vm_object_copy_slowly | |
2917 | * | |
2918 | * Description: | |
2919 | * Copy the specified range of the source | |
2920 | * virtual memory object without using | |
2921 | * protection-based optimizations (such | |
2922 | * as copy-on-write). The pages in the | |
2923 | * region are actually copied. | |
2924 | * | |
2925 | * In/out conditions: | |
2926 | * The caller must hold a reference and a lock | |
2927 | * for the source virtual memory object. The source | |
2928 | * object will be returned *unlocked*. | |
2929 | * | |
2930 | * Results: | |
2931 | * If the copy is completed successfully, KERN_SUCCESS is | |
2932 | * returned. If the caller asserted the interruptible | |
2933 | * argument, and an interruption occurred while waiting | |
2934 | * for a user-generated event, MACH_SEND_INTERRUPTED is | |
2935 | * returned. Other values may be returned to indicate | |
2936 | * hard errors during the copy operation. | |
2937 | * | |
2938 | * A new virtual memory object is returned in a | |
2939 | * parameter (_result_object). The contents of this | |
2940 | * new object, starting at a zero offset, are a copy | |
2941 | * of the source memory region. In the event of | |
2942 | * an error, this parameter will contain the value | |
2943 | * VM_OBJECT_NULL. | |
2944 | */ | |
0b4e3aa0 | 2945 | __private_extern__ kern_return_t |
1c79356b | 2946 | vm_object_copy_slowly( |
0a7de745 A |
2947 | vm_object_t src_object, |
2948 | vm_object_offset_t src_offset, | |
2949 | vm_object_size_t size, | |
2950 | boolean_t interruptible, | |
2951 | vm_object_t *_result_object) /* OUT */ | |
1c79356b | 2952 | { |
0a7de745 A |
2953 | vm_object_t new_object; |
2954 | vm_object_offset_t new_offset; | |
1c79356b | 2955 | |
d9a64523 | 2956 | struct vm_object_fault_info fault_info = {}; |
1c79356b | 2957 | |
1c79356b A |
2958 | if (size == 0) { |
2959 | vm_object_unlock(src_object); | |
2960 | *_result_object = VM_OBJECT_NULL; | |
0a7de745 | 2961 | return KERN_INVALID_ARGUMENT; |
1c79356b A |
2962 | } |
2963 | ||
2964 | /* | |
2965 | * Prevent destruction of the source object while we copy. | |
2966 | */ | |
2967 | ||
2d21ac55 | 2968 | vm_object_reference_locked(src_object); |
1c79356b A |
2969 | vm_object_unlock(src_object); |
2970 | ||
2971 | /* | |
2972 | * Create a new object to hold the copied pages. | |
2973 | * A few notes: | |
2974 | * We fill the new object starting at offset 0, | |
2975 | * regardless of the input offset. | |
2976 | * We don't bother to lock the new object within | |
2977 | * this routine, since we have the only reference. | |
2978 | */ | |
2979 | ||
f427ee49 A |
2980 | size = vm_object_round_page(src_offset + size) - vm_object_trunc_page(src_offset); |
2981 | src_offset = vm_object_trunc_page(src_offset); | |
1c79356b A |
2982 | new_object = vm_object_allocate(size); |
2983 | new_offset = 0; | |
2984 | ||
0a7de745 | 2985 | assert(size == trunc_page_64(size)); /* Will the loop terminate? */ |
1c79356b | 2986 | |
2d21ac55 A |
2987 | fault_info.interruptible = interruptible; |
2988 | fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; | |
2d21ac55 A |
2989 | fault_info.lo_offset = src_offset; |
2990 | fault_info.hi_offset = src_offset + size; | |
b0d623f7 | 2991 | fault_info.stealth = TRUE; |
2d21ac55 | 2992 | |
0a7de745 A |
2993 | for (; |
2994 | size != 0; | |
2995 | src_offset += PAGE_SIZE_64, | |
2996 | new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64 | |
1c79356b | 2997 | ) { |
0a7de745 | 2998 | vm_page_t new_page; |
1c79356b A |
2999 | vm_fault_return_t result; |
3000 | ||
2d21ac55 A |
3001 | vm_object_lock(new_object); |
3002 | ||
1c79356b | 3003 | while ((new_page = vm_page_alloc(new_object, new_offset)) |
0a7de745 | 3004 | == VM_PAGE_NULL) { |
2d21ac55 A |
3005 | vm_object_unlock(new_object); |
3006 | ||
1c79356b A |
3007 | if (!vm_page_wait(interruptible)) { |
3008 | vm_object_deallocate(new_object); | |
91447636 | 3009 | vm_object_deallocate(src_object); |
1c79356b | 3010 | *_result_object = VM_OBJECT_NULL; |
0a7de745 | 3011 | return MACH_SEND_INTERRUPTED; |
1c79356b | 3012 | } |
2d21ac55 | 3013 | vm_object_lock(new_object); |
1c79356b | 3014 | } |
2d21ac55 | 3015 | vm_object_unlock(new_object); |
1c79356b A |
3016 | |
3017 | do { | |
0a7de745 A |
3018 | vm_prot_t prot = VM_PROT_READ; |
3019 | vm_page_t _result_page; | |
3020 | vm_page_t top_page; | |
3021 | vm_page_t result_page; | |
3022 | kern_return_t error_code; | |
3023 | vm_object_t result_page_object; | |
39037602 | 3024 | |
1c79356b A |
3025 | |
3026 | vm_object_lock(src_object); | |
3e170ce0 A |
3027 | |
3028 | if (src_object->internal && | |
3029 | src_object->shadow == VM_OBJECT_NULL && | |
3e170ce0 | 3030 | (src_object->pager == NULL || |
0a7de745 A |
3031 | (VM_COMPRESSOR_PAGER_STATE_GET(src_object, |
3032 | src_offset) == | |
3033 | VM_EXTERNAL_STATE_ABSENT))) { | |
d9a64523 A |
3034 | boolean_t can_skip_page; |
3035 | ||
3036 | _result_page = vm_page_lookup(src_object, | |
0a7de745 | 3037 | src_offset); |
d9a64523 A |
3038 | if (_result_page == VM_PAGE_NULL) { |
3039 | /* | |
3040 | * This page is neither resident nor | |
3041 | * compressed and there's no shadow | |
3042 | * object below "src_object", so this | |
3043 | * page is really missing. | |
3044 | * There's no need to zero-fill it just | |
3045 | * to copy it: let's leave it missing | |
3046 | * in "new_object" and get zero-filled | |
3047 | * on demand. | |
3048 | */ | |
3049 | can_skip_page = TRUE; | |
3050 | } else if (workaround_41447923 && | |
0a7de745 A |
3051 | src_object->pager == NULL && |
3052 | _result_page != VM_PAGE_NULL && | |
3053 | _result_page->vmp_busy && | |
3054 | _result_page->vmp_absent && | |
3055 | src_object->purgable == VM_PURGABLE_DENY && | |
3056 | !src_object->blocked_access) { | |
d9a64523 A |
3057 | /* |
3058 | * This page is "busy" and "absent" | |
3059 | * but not because we're waiting for | |
3060 | * it to be decompressed. It must | |
3061 | * be because it's a "no zero fill" | |
3062 | * page that is currently not | |
3063 | * accessible until it gets overwritten | |
3064 | * by a device driver. | |
3065 | * Since its initial state would have | |
3066 | * been "zero-filled", let's leave the | |
3067 | * copy page missing and get zero-filled | |
3068 | * on demand. | |
3069 | */ | |
3070 | assert(src_object->internal); | |
3071 | assert(src_object->shadow == NULL); | |
3072 | assert(src_object->pager == NULL); | |
3073 | can_skip_page = TRUE; | |
3074 | vm_page_busy_absent_skipped++; | |
3075 | } else { | |
3076 | can_skip_page = FALSE; | |
3077 | } | |
3078 | if (can_skip_page) { | |
3079 | vm_object_unlock(src_object); | |
3080 | /* free the unused "new_page"... */ | |
3081 | vm_object_lock(new_object); | |
3082 | VM_PAGE_FREE(new_page); | |
3083 | new_page = VM_PAGE_NULL; | |
3084 | vm_object_unlock(new_object); | |
3085 | /* ...and go to next page in "src_object" */ | |
3086 | result = VM_FAULT_SUCCESS; | |
3087 | break; | |
3088 | } | |
3e170ce0 A |
3089 | } |
3090 | ||
1c79356b A |
3091 | vm_object_paging_begin(src_object); |
3092 | ||
d9a64523 A |
3093 | /* cap size at maximum UPL size */ |
3094 | upl_size_t cluster_size; | |
3095 | if (os_convert_overflow(size, &cluster_size)) { | |
3096 | cluster_size = 0 - (upl_size_t)PAGE_SIZE; | |
b0d623f7 | 3097 | } |
d9a64523 | 3098 | fault_info.cluster_size = cluster_size; |
2d21ac55 | 3099 | |
39236c6e | 3100 | _result_page = VM_PAGE_NULL; |
1c79356b | 3101 | result = vm_fault_page(src_object, src_offset, |
0a7de745 A |
3102 | VM_PROT_READ, FALSE, |
3103 | FALSE, /* page not looked up */ | |
3104 | &prot, &_result_page, &top_page, | |
3105 | (int *)0, | |
3106 | &error_code, FALSE, FALSE, &fault_info); | |
1c79356b | 3107 | |
0a7de745 | 3108 | switch (result) { |
b0d623f7 A |
3109 | case VM_FAULT_SUCCESS: |
3110 | result_page = _result_page; | |
39037602 | 3111 | result_page_object = VM_PAGE_OBJECT(result_page); |
1c79356b | 3112 | |
b0d623f7 | 3113 | /* |
b0d623f7 A |
3114 | * Copy the page to the new object. |
3115 | * | |
3116 | * POLICY DECISION: | |
3117 | * If result_page is clean, | |
3118 | * we could steal it instead | |
3119 | * of copying. | |
3120 | */ | |
1c79356b | 3121 | |
b0d623f7 | 3122 | vm_page_copy(result_page, new_page); |
39037602 | 3123 | vm_object_unlock(result_page_object); |
1c79356b | 3124 | |
b0d623f7 A |
3125 | /* |
3126 | * Let go of both pages (make them | |
3127 | * not busy, perform wakeup, activate). | |
3128 | */ | |
3129 | vm_object_lock(new_object); | |
316670eb | 3130 | SET_PAGE_DIRTY(new_page, FALSE); |
b0d623f7 A |
3131 | PAGE_WAKEUP_DONE(new_page); |
3132 | vm_object_unlock(new_object); | |
1c79356b | 3133 | |
39037602 | 3134 | vm_object_lock(result_page_object); |
b0d623f7 | 3135 | PAGE_WAKEUP_DONE(result_page); |
1c79356b | 3136 | |
b0d623f7 | 3137 | vm_page_lockspin_queues(); |
d9a64523 A |
3138 | if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) || |
3139 | (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) { | |
b0d623f7 | 3140 | vm_page_activate(result_page); |
39037602 | 3141 | } |
b0d623f7 A |
3142 | vm_page_activate(new_page); |
3143 | vm_page_unlock_queues(); | |
1c79356b | 3144 | |
b0d623f7 A |
3145 | /* |
3146 | * Release paging references and | |
3147 | * top-level placeholder page, if any. | |
3148 | */ | |
3149 | ||
39037602 | 3150 | vm_fault_cleanup(result_page_object, |
0a7de745 | 3151 | top_page); |
b0d623f7 A |
3152 | |
3153 | break; | |
0a7de745 | 3154 | |
b0d623f7 A |
3155 | case VM_FAULT_RETRY: |
3156 | break; | |
3157 | ||
b0d623f7 | 3158 | case VM_FAULT_MEMORY_SHORTAGE: |
0a7de745 | 3159 | if (vm_page_wait(interruptible)) { |
1c79356b | 3160 | break; |
0a7de745 | 3161 | } |
f427ee49 | 3162 | OS_FALLTHROUGH; |
1c79356b | 3163 | |
b0d623f7 A |
3164 | case VM_FAULT_INTERRUPTED: |
3165 | vm_object_lock(new_object); | |
3166 | VM_PAGE_FREE(new_page); | |
3167 | vm_object_unlock(new_object); | |
0a7de745 | 3168 | |
b0d623f7 A |
3169 | vm_object_deallocate(new_object); |
3170 | vm_object_deallocate(src_object); | |
3171 | *_result_object = VM_OBJECT_NULL; | |
0a7de745 | 3172 | return MACH_SEND_INTERRUPTED; |
1c79356b | 3173 | |
b0d623f7 A |
3174 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
3175 | /* success but no VM page: fail */ | |
3176 | vm_object_paging_end(src_object); | |
3177 | vm_object_unlock(src_object); | |
f427ee49 | 3178 | OS_FALLTHROUGH; |
b0d623f7 A |
3179 | case VM_FAULT_MEMORY_ERROR: |
3180 | /* | |
3181 | * A policy choice: | |
3182 | * (a) ignore pages that we can't | |
3183 | * copy | |
3184 | * (b) return the null object if | |
3185 | * any page fails [chosen] | |
3186 | */ | |
593a1d5f | 3187 | |
b0d623f7 A |
3188 | vm_object_lock(new_object); |
3189 | VM_PAGE_FREE(new_page); | |
3190 | vm_object_unlock(new_object); | |
1c79356b | 3191 | |
b0d623f7 A |
3192 | vm_object_deallocate(new_object); |
3193 | vm_object_deallocate(src_object); | |
3194 | *_result_object = VM_OBJECT_NULL; | |
0a7de745 A |
3195 | return error_code ? error_code: |
3196 | KERN_MEMORY_ERROR; | |
1c79356b | 3197 | |
b0d623f7 A |
3198 | default: |
3199 | panic("vm_object_copy_slowly: unexpected error" | |
0a7de745 | 3200 | " 0x%x from vm_fault_page()\n", result); |
1c79356b A |
3201 | } |
3202 | } while (result != VM_FAULT_SUCCESS); | |
3203 | } | |
3204 | ||
3205 | /* | |
3206 | * Lose the extra reference, and return our object. | |
3207 | */ | |
1c79356b A |
3208 | vm_object_deallocate(src_object); |
3209 | *_result_object = new_object; | |
0a7de745 | 3210 | return KERN_SUCCESS; |
1c79356b A |
3211 | } |
3212 | ||
3213 | /* | |
3214 | * Routine: vm_object_copy_quickly | |
3215 | * | |
3216 | * Purpose: | |
3217 | * Copy the specified range of the source virtual | |
3218 | * memory object, if it can be done without waiting | |
3219 | * for user-generated events. | |
3220 | * | |
3221 | * Results: | |
3222 | * If the copy is successful, the copy is returned in | |
3223 | * the arguments; otherwise, the arguments are not | |
3224 | * affected. | |
3225 | * | |
3226 | * In/out conditions: | |
3227 | * The object should be unlocked on entry and exit. | |
3228 | */ | |
3229 | ||
3230 | /*ARGSUSED*/ | |
0b4e3aa0 | 3231 | __private_extern__ boolean_t |
1c79356b | 3232 | vm_object_copy_quickly( |
0a7de745 A |
3233 | vm_object_t *_object, /* INOUT */ |
3234 | __unused vm_object_offset_t offset, /* IN */ | |
3235 | __unused vm_object_size_t size, /* IN */ | |
3236 | boolean_t *_src_needs_copy, /* OUT */ | |
3237 | boolean_t *_dst_needs_copy) /* OUT */ | |
1c79356b | 3238 | { |
0a7de745 | 3239 | vm_object_t object = *_object; |
1c79356b A |
3240 | memory_object_copy_strategy_t copy_strategy; |
3241 | ||
1c79356b A |
3242 | if (object == VM_OBJECT_NULL) { |
3243 | *_src_needs_copy = FALSE; | |
3244 | *_dst_needs_copy = FALSE; | |
0a7de745 | 3245 | return TRUE; |
1c79356b A |
3246 | } |
3247 | ||
3248 | vm_object_lock(object); | |
3249 | ||
3250 | copy_strategy = object->copy_strategy; | |
3251 | ||
3252 | switch (copy_strategy) { | |
3253 | case MEMORY_OBJECT_COPY_SYMMETRIC: | |
3254 | ||
3255 | /* | |
3256 | * Symmetric copy strategy. | |
3257 | * Make another reference to the object. | |
3258 | * Leave object/offset unchanged. | |
3259 | */ | |
3260 | ||
2d21ac55 | 3261 | vm_object_reference_locked(object); |
1c79356b A |
3262 | object->shadowed = TRUE; |
3263 | vm_object_unlock(object); | |
3264 | ||
3265 | /* | |
3266 | * Both source and destination must make | |
3267 | * shadows, and the source must be made | |
3268 | * read-only if not already. | |
3269 | */ | |
3270 | ||
3271 | *_src_needs_copy = TRUE; | |
3272 | *_dst_needs_copy = TRUE; | |
3273 | ||
3274 | break; | |
3275 | ||
3276 | case MEMORY_OBJECT_COPY_DELAY: | |
3277 | vm_object_unlock(object); | |
0a7de745 | 3278 | return FALSE; |
1c79356b A |
3279 | |
3280 | default: | |
3281 | vm_object_unlock(object); | |
0a7de745 | 3282 | return FALSE; |
1c79356b | 3283 | } |
0a7de745 | 3284 | return TRUE; |
1c79356b A |
3285 | } |
3286 | ||
0b4e3aa0 A |
3287 | static int copy_call_count = 0; |
3288 | static int copy_call_sleep_count = 0; | |
3289 | static int copy_call_restart_count = 0; | |
1c79356b A |
3290 | |
3291 | /* | |
3292 | * Routine: vm_object_copy_call [internal] | |
3293 | * | |
3294 | * Description: | |
3295 | * Copy the source object (src_object), using the | |
3296 | * user-managed copy algorithm. | |
3297 | * | |
3298 | * In/out conditions: | |
3299 | * The source object must be locked on entry. It | |
3300 | * will be *unlocked* on exit. | |
3301 | * | |
3302 | * Results: | |
3303 | * If the copy is successful, KERN_SUCCESS is returned. | |
3304 | * A new object that represents the copied virtual | |
3305 | * memory is returned in a parameter (*_result_object). | |
3306 | * If the return value indicates an error, this parameter | |
3307 | * is not valid. | |
3308 | */ | |
0b4e3aa0 | 3309 | static kern_return_t |
1c79356b | 3310 | vm_object_copy_call( |
0a7de745 A |
3311 | vm_object_t src_object, |
3312 | vm_object_offset_t src_offset, | |
3313 | vm_object_size_t size, | |
3314 | vm_object_t *_result_object) /* OUT */ | |
1c79356b | 3315 | { |
0a7de745 A |
3316 | kern_return_t kr; |
3317 | vm_object_t copy; | |
3318 | boolean_t check_ready = FALSE; | |
3319 | uint32_t try_failed_count = 0; | |
1c79356b A |
3320 | |
3321 | /* | |
3322 | * If a copy is already in progress, wait and retry. | |
3323 | * | |
3324 | * XXX | |
3325 | * Consider making this call interruptable, as Mike | |
3326 | * intended it to be. | |
3327 | * | |
3328 | * XXXO | |
3329 | * Need a counter or version or something to allow | |
3330 | * us to use the copy that the currently requesting | |
3331 | * thread is obtaining -- is it worth adding to the | |
3332 | * vm object structure? Depends how common this case it. | |
3333 | */ | |
3334 | copy_call_count++; | |
3335 | while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) { | |
9bccf70c | 3336 | vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL, |
0a7de745 | 3337 | THREAD_UNINT); |
1c79356b A |
3338 | copy_call_restart_count++; |
3339 | } | |
3340 | ||
3341 | /* | |
3342 | * Indicate (for the benefit of memory_object_create_copy) | |
3343 | * that we want a copy for src_object. (Note that we cannot | |
3344 | * do a real assert_wait before calling memory_object_copy, | |
3345 | * so we simply set the flag.) | |
3346 | */ | |
3347 | ||
3348 | vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL); | |
3349 | vm_object_unlock(src_object); | |
3350 | ||
3351 | /* | |
3352 | * Ask the memory manager to give us a memory object | |
3353 | * which represents a copy of the src object. | |
3354 | * The memory manager may give us a memory object | |
3355 | * which we already have, or it may give us a | |
3356 | * new memory object. This memory object will arrive | |
3357 | * via memory_object_create_copy. | |
3358 | */ | |
3359 | ||
0a7de745 | 3360 | kr = KERN_FAILURE; /* XXX need to change memory_object.defs */ |
1c79356b A |
3361 | if (kr != KERN_SUCCESS) { |
3362 | return kr; | |
3363 | } | |
3364 | ||
3365 | /* | |
3366 | * Wait for the copy to arrive. | |
3367 | */ | |
3368 | vm_object_lock(src_object); | |
3369 | while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) { | |
9bccf70c | 3370 | vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL, |
0a7de745 | 3371 | THREAD_UNINT); |
1c79356b A |
3372 | copy_call_sleep_count++; |
3373 | } | |
3374 | Retry: | |
3375 | assert(src_object->copy != VM_OBJECT_NULL); | |
3376 | copy = src_object->copy; | |
3377 | if (!vm_object_lock_try(copy)) { | |
3378 | vm_object_unlock(src_object); | |
2d21ac55 A |
3379 | |
3380 | try_failed_count++; | |
0a7de745 | 3381 | mutex_pause(try_failed_count); /* wait a bit */ |
2d21ac55 | 3382 | |
1c79356b A |
3383 | vm_object_lock(src_object); |
3384 | goto Retry; | |
3385 | } | |
0a7de745 | 3386 | if (copy->vo_size < src_offset + size) { |
f427ee49 A |
3387 | assertf(page_aligned(src_offset + size), |
3388 | "object %p size 0x%llx", | |
3389 | copy, (uint64_t)(src_offset + size)); | |
0a7de745 A |
3390 | copy->vo_size = src_offset + size; |
3391 | } | |
1c79356b | 3392 | |
0a7de745 | 3393 | if (!copy->pager_ready) { |
1c79356b | 3394 | check_ready = TRUE; |
0a7de745 | 3395 | } |
1c79356b A |
3396 | |
3397 | /* | |
3398 | * Return the copy. | |
3399 | */ | |
3400 | *_result_object = copy; | |
3401 | vm_object_unlock(copy); | |
3402 | vm_object_unlock(src_object); | |
3403 | ||
3404 | /* Wait for the copy to be ready. */ | |
3405 | if (check_ready == TRUE) { | |
3406 | vm_object_lock(copy); | |
3407 | while (!copy->pager_ready) { | |
9bccf70c | 3408 | vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT); |
1c79356b A |
3409 | } |
3410 | vm_object_unlock(copy); | |
3411 | } | |
3412 | ||
3413 | return KERN_SUCCESS; | |
3414 | } | |
3415 | ||
0b4e3aa0 A |
3416 | static int copy_delayed_lock_collisions = 0; |
3417 | static int copy_delayed_max_collisions = 0; | |
3418 | static int copy_delayed_lock_contention = 0; | |
3419 | static int copy_delayed_protect_iterate = 0; | |
1c79356b A |
3420 | |
3421 | /* | |
3422 | * Routine: vm_object_copy_delayed [internal] | |
3423 | * | |
3424 | * Description: | |
3425 | * Copy the specified virtual memory object, using | |
3426 | * the asymmetric copy-on-write algorithm. | |
3427 | * | |
3428 | * In/out conditions: | |
55e303ae A |
3429 | * The src_object must be locked on entry. It will be unlocked |
3430 | * on exit - so the caller must also hold a reference to it. | |
1c79356b A |
3431 | * |
3432 | * This routine will not block waiting for user-generated | |
3433 | * events. It is not interruptible. | |
3434 | */ | |
0b4e3aa0 | 3435 | __private_extern__ vm_object_t |
1c79356b | 3436 | vm_object_copy_delayed( |
0a7de745 A |
3437 | vm_object_t src_object, |
3438 | vm_object_offset_t src_offset, | |
3439 | vm_object_size_t size, | |
3440 | boolean_t src_object_shared) | |
1c79356b | 3441 | { |
0a7de745 A |
3442 | vm_object_t new_copy = VM_OBJECT_NULL; |
3443 | vm_object_t old_copy; | |
3444 | vm_page_t p; | |
3445 | vm_object_size_t copy_size = src_offset + size; | |
3446 | pmap_flush_context pmap_flush_context_storage; | |
3447 | boolean_t delayed_pmap_flush = FALSE; | |
1c79356b | 3448 | |
2d21ac55 | 3449 | |
1c79356b A |
3450 | int collisions = 0; |
3451 | /* | |
3452 | * The user-level memory manager wants to see all of the changes | |
3453 | * to this object, but it has promised not to make any changes on | |
0a7de745 | 3454 | * its own. |
1c79356b A |
3455 | * |
3456 | * Perform an asymmetric copy-on-write, as follows: | |
3457 | * Create a new object, called a "copy object" to hold | |
3458 | * pages modified by the new mapping (i.e., the copy, | |
3459 | * not the original mapping). | |
3460 | * Record the original object as the backing object for | |
3461 | * the copy object. If the original mapping does not | |
3462 | * change a page, it may be used read-only by the copy. | |
3463 | * Record the copy object in the original object. | |
3464 | * When the original mapping causes a page to be modified, | |
3465 | * it must be copied to a new page that is "pushed" to | |
3466 | * the copy object. | |
3467 | * Mark the new mapping (the copy object) copy-on-write. | |
3468 | * This makes the copy object itself read-only, allowing | |
3469 | * it to be reused if the original mapping makes no | |
3470 | * changes, and simplifying the synchronization required | |
3471 | * in the "push" operation described above. | |
3472 | * | |
3473 | * The copy-on-write is said to be assymetric because the original | |
3474 | * object is *not* marked copy-on-write. A copied page is pushed | |
3475 | * to the copy object, regardless which party attempted to modify | |
3476 | * the page. | |
3477 | * | |
3478 | * Repeated asymmetric copy operations may be done. If the | |
3479 | * original object has not been changed since the last copy, its | |
3480 | * copy object can be reused. Otherwise, a new copy object can be | |
3481 | * inserted between the original object and its previous copy | |
3482 | * object. Since any copy object is read-only, this cannot affect | |
3483 | * affect the contents of the previous copy object. | |
3484 | * | |
3485 | * Note that a copy object is higher in the object tree than the | |
3486 | * original object; therefore, use of the copy object recorded in | |
3487 | * the original object must be done carefully, to avoid deadlock. | |
3488 | */ | |
3489 | ||
3e170ce0 | 3490 | copy_size = vm_object_round_page(copy_size); |
0a7de745 A |
3491 | Retry: |
3492 | ||
55e303ae A |
3493 | /* |
3494 | * Wait for paging in progress. | |
3495 | */ | |
b0d623f7 A |
3496 | if (!src_object->true_share && |
3497 | (src_object->paging_in_progress != 0 || | |
0a7de745 A |
3498 | src_object->activity_in_progress != 0)) { |
3499 | if (src_object_shared == TRUE) { | |
3500 | vm_object_unlock(src_object); | |
2d21ac55 A |
3501 | vm_object_lock(src_object); |
3502 | src_object_shared = FALSE; | |
b0d623f7 | 3503 | goto Retry; |
2d21ac55 | 3504 | } |
55e303ae | 3505 | vm_object_paging_wait(src_object, THREAD_UNINT); |
2d21ac55 | 3506 | } |
1c79356b A |
3507 | /* |
3508 | * See whether we can reuse the result of a previous | |
3509 | * copy operation. | |
3510 | */ | |
3511 | ||
3512 | old_copy = src_object->copy; | |
3513 | if (old_copy != VM_OBJECT_NULL) { | |
0a7de745 | 3514 | int lock_granted; |
2d21ac55 | 3515 | |
1c79356b A |
3516 | /* |
3517 | * Try to get the locks (out of order) | |
3518 | */ | |
0a7de745 A |
3519 | if (src_object_shared == TRUE) { |
3520 | lock_granted = vm_object_lock_try_shared(old_copy); | |
3521 | } else { | |
3522 | lock_granted = vm_object_lock_try(old_copy); | |
3523 | } | |
2d21ac55 A |
3524 | |
3525 | if (!lock_granted) { | |
1c79356b | 3526 | vm_object_unlock(src_object); |
1c79356b | 3527 | |
0a7de745 | 3528 | if (collisions++ == 0) { |
1c79356b | 3529 | copy_delayed_lock_contention++; |
0a7de745 | 3530 | } |
2d21ac55 A |
3531 | mutex_pause(collisions); |
3532 | ||
3533 | /* Heisenberg Rules */ | |
3534 | copy_delayed_lock_collisions++; | |
1c79356b | 3535 | |
0a7de745 | 3536 | if (collisions > copy_delayed_max_collisions) { |
1c79356b | 3537 | copy_delayed_max_collisions = collisions; |
0a7de745 | 3538 | } |
1c79356b | 3539 | |
0a7de745 A |
3540 | if (src_object_shared == TRUE) { |
3541 | vm_object_lock_shared(src_object); | |
3542 | } else { | |
3543 | vm_object_lock(src_object); | |
3544 | } | |
2d21ac55 | 3545 | |
1c79356b A |
3546 | goto Retry; |
3547 | } | |
3548 | ||
3549 | /* | |
3550 | * Determine whether the old copy object has | |
3551 | * been modified. | |
3552 | */ | |
3553 | ||
3554 | if (old_copy->resident_page_count == 0 && | |
3555 | !old_copy->pager_created) { | |
3556 | /* | |
3557 | * It has not been modified. | |
3558 | * | |
3559 | * Return another reference to | |
55e303ae A |
3560 | * the existing copy-object if |
3561 | * we can safely grow it (if | |
3562 | * needed). | |
de355530 | 3563 | */ |
1c79356b | 3564 | |
6d2010ae | 3565 | if (old_copy->vo_size < copy_size) { |
0a7de745 A |
3566 | if (src_object_shared == TRUE) { |
3567 | vm_object_unlock(old_copy); | |
2d21ac55 | 3568 | vm_object_unlock(src_object); |
0a7de745 | 3569 | |
2d21ac55 A |
3570 | vm_object_lock(src_object); |
3571 | src_object_shared = FALSE; | |
3572 | goto Retry; | |
3573 | } | |
55e303ae A |
3574 | /* |
3575 | * We can't perform a delayed copy if any of the | |
3576 | * pages in the extended range are wired (because | |
3577 | * we can't safely take write permission away from | |
3578 | * wired pages). If the pages aren't wired, then | |
3579 | * go ahead and protect them. | |
3580 | */ | |
3581 | copy_delayed_protect_iterate++; | |
2d21ac55 | 3582 | |
39236c6e A |
3583 | pmap_flush_context_init(&pmap_flush_context_storage); |
3584 | delayed_pmap_flush = FALSE; | |
3585 | ||
0a7de745 A |
3586 | vm_page_queue_iterate(&src_object->memq, p, vmp_listq) { |
3587 | if (!p->vmp_fictitious && | |
3588 | p->vmp_offset >= old_copy->vo_size && | |
d9a64523 | 3589 | p->vmp_offset < copy_size) { |
b0d623f7 | 3590 | if (VM_PAGE_WIRED(p)) { |
55e303ae A |
3591 | vm_object_unlock(old_copy); |
3592 | vm_object_unlock(src_object); | |
91447636 A |
3593 | |
3594 | if (new_copy != VM_OBJECT_NULL) { | |
3595 | vm_object_unlock(new_copy); | |
3596 | vm_object_deallocate(new_copy); | |
3597 | } | |
0a7de745 | 3598 | if (delayed_pmap_flush == TRUE) { |
39236c6e | 3599 | pmap_flush(&pmap_flush_context_storage); |
0a7de745 | 3600 | } |
91447636 | 3601 | |
55e303ae A |
3602 | return VM_OBJECT_NULL; |
3603 | } else { | |
39037602 | 3604 | pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE), |
0a7de745 | 3605 | PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); |
39236c6e | 3606 | delayed_pmap_flush = TRUE; |
55e303ae A |
3607 | } |
3608 | } | |
3609 | } | |
0a7de745 | 3610 | if (delayed_pmap_flush == TRUE) { |
39236c6e | 3611 | pmap_flush(&pmap_flush_context_storage); |
0a7de745 | 3612 | } |
39236c6e | 3613 | |
f427ee49 A |
3614 | assertf(page_aligned(copy_size), |
3615 | "object %p size 0x%llx", | |
3616 | old_copy, (uint64_t)copy_size); | |
6d2010ae | 3617 | old_copy->vo_size = copy_size; |
55e303ae | 3618 | } |
0a7de745 A |
3619 | if (src_object_shared == TRUE) { |
3620 | vm_object_reference_shared(old_copy); | |
3621 | } else { | |
3622 | vm_object_reference_locked(old_copy); | |
3623 | } | |
d7e50217 A |
3624 | vm_object_unlock(old_copy); |
3625 | vm_object_unlock(src_object); | |
91447636 A |
3626 | |
3627 | if (new_copy != VM_OBJECT_NULL) { | |
3628 | vm_object_unlock(new_copy); | |
3629 | vm_object_deallocate(new_copy); | |
3630 | } | |
0a7de745 | 3631 | return old_copy; |
d7e50217 | 3632 | } |
0a7de745 A |
3633 | |
3634 | ||
de355530 A |
3635 | |
3636 | /* | |
0a7de745 | 3637 | * Adjust the size argument so that the newly-created |
de355530 | 3638 | * copy object will be large enough to back either the |
55e303ae | 3639 | * old copy object or the new mapping. |
de355530 | 3640 | */ |
0a7de745 | 3641 | if (old_copy->vo_size > copy_size) { |
6d2010ae | 3642 | copy_size = old_copy->vo_size; |
0a7de745 | 3643 | } |
55e303ae A |
3644 | |
3645 | if (new_copy == VM_OBJECT_NULL) { | |
3646 | vm_object_unlock(old_copy); | |
3647 | vm_object_unlock(src_object); | |
3648 | new_copy = vm_object_allocate(copy_size); | |
3649 | vm_object_lock(src_object); | |
3650 | vm_object_lock(new_copy); | |
2d21ac55 A |
3651 | |
3652 | src_object_shared = FALSE; | |
55e303ae A |
3653 | goto Retry; |
3654 | } | |
f427ee49 A |
3655 | assertf(page_aligned(copy_size), |
3656 | "object %p size 0x%llx", | |
3657 | new_copy, (uint64_t)copy_size); | |
0a7de745 | 3658 | new_copy->vo_size = copy_size; |
1c79356b A |
3659 | |
3660 | /* | |
3661 | * The copy-object is always made large enough to | |
3662 | * completely shadow the original object, since | |
3663 | * it may have several users who want to shadow | |
3664 | * the original object at different points. | |
3665 | */ | |
3666 | ||
3667 | assert((old_copy->shadow == src_object) && | |
6d2010ae | 3668 | (old_copy->vo_shadow_offset == (vm_object_offset_t) 0)); |
55e303ae A |
3669 | } else if (new_copy == VM_OBJECT_NULL) { |
3670 | vm_object_unlock(src_object); | |
3671 | new_copy = vm_object_allocate(copy_size); | |
3672 | vm_object_lock(src_object); | |
3673 | vm_object_lock(new_copy); | |
2d21ac55 A |
3674 | |
3675 | src_object_shared = FALSE; | |
55e303ae A |
3676 | goto Retry; |
3677 | } | |
3678 | ||
3679 | /* | |
3680 | * We now have the src object locked, and the new copy object | |
3681 | * allocated and locked (and potentially the old copy locked). | |
3682 | * Before we go any further, make sure we can still perform | |
3683 | * a delayed copy, as the situation may have changed. | |
3684 | * | |
3685 | * Specifically, we can't perform a delayed copy if any of the | |
3686 | * pages in the range are wired (because we can't safely take | |
3687 | * write permission away from wired pages). If the pages aren't | |
3688 | * wired, then go ahead and protect them. | |
3689 | */ | |
3690 | copy_delayed_protect_iterate++; | |
2d21ac55 | 3691 | |
39236c6e A |
3692 | pmap_flush_context_init(&pmap_flush_context_storage); |
3693 | delayed_pmap_flush = FALSE; | |
3694 | ||
0a7de745 | 3695 | vm_page_queue_iterate(&src_object->memq, p, vmp_listq) { |
d9a64523 | 3696 | if (!p->vmp_fictitious && p->vmp_offset < copy_size) { |
b0d623f7 | 3697 | if (VM_PAGE_WIRED(p)) { |
0a7de745 | 3698 | if (old_copy) { |
55e303ae | 3699 | vm_object_unlock(old_copy); |
0a7de745 | 3700 | } |
55e303ae A |
3701 | vm_object_unlock(src_object); |
3702 | vm_object_unlock(new_copy); | |
3703 | vm_object_deallocate(new_copy); | |
39236c6e | 3704 | |
0a7de745 | 3705 | if (delayed_pmap_flush == TRUE) { |
39236c6e | 3706 | pmap_flush(&pmap_flush_context_storage); |
0a7de745 | 3707 | } |
39236c6e | 3708 | |
55e303ae A |
3709 | return VM_OBJECT_NULL; |
3710 | } else { | |
39037602 | 3711 | pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE), |
0a7de745 | 3712 | PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); |
39236c6e | 3713 | delayed_pmap_flush = TRUE; |
55e303ae A |
3714 | } |
3715 | } | |
3716 | } | |
0a7de745 | 3717 | if (delayed_pmap_flush == TRUE) { |
39236c6e | 3718 | pmap_flush(&pmap_flush_context_storage); |
0a7de745 | 3719 | } |
39236c6e | 3720 | |
55e303ae | 3721 | if (old_copy != VM_OBJECT_NULL) { |
1c79356b A |
3722 | /* |
3723 | * Make the old copy-object shadow the new one. | |
3724 | * It will receive no more pages from the original | |
3725 | * object. | |
3726 | */ | |
3727 | ||
2d21ac55 A |
3728 | /* remove ref. from old_copy */ |
3729 | vm_object_lock_assert_exclusive(src_object); | |
3730 | src_object->ref_count--; | |
1c79356b | 3731 | assert(src_object->ref_count > 0); |
2d21ac55 | 3732 | vm_object_lock_assert_exclusive(old_copy); |
1c79356b | 3733 | old_copy->shadow = new_copy; |
2d21ac55 | 3734 | vm_object_lock_assert_exclusive(new_copy); |
1c79356b | 3735 | assert(new_copy->ref_count > 0); |
0a7de745 | 3736 | new_copy->ref_count++; /* for old_copy->shadow ref. */ |
1c79356b | 3737 | |
0a7de745 | 3738 | vm_object_unlock(old_copy); /* done with old_copy */ |
1c79356b A |
3739 | } |
3740 | ||
3741 | /* | |
3742 | * Point the new copy at the existing object. | |
3743 | */ | |
2d21ac55 | 3744 | vm_object_lock_assert_exclusive(new_copy); |
1c79356b | 3745 | new_copy->shadow = src_object; |
6d2010ae | 3746 | new_copy->vo_shadow_offset = 0; |
0a7de745 | 3747 | new_copy->shadowed = TRUE; /* caller must set needs_copy */ |
2d21ac55 A |
3748 | |
3749 | vm_object_lock_assert_exclusive(src_object); | |
3750 | vm_object_reference_locked(src_object); | |
1c79356b | 3751 | src_object->copy = new_copy; |
55e303ae | 3752 | vm_object_unlock(src_object); |
1c79356b A |
3753 | vm_object_unlock(new_copy); |
3754 | ||
2d21ac55 | 3755 | return new_copy; |
1c79356b A |
3756 | } |
3757 | ||
3758 | /* | |
3759 | * Routine: vm_object_copy_strategically | |
3760 | * | |
3761 | * Purpose: | |
3762 | * Perform a copy according to the source object's | |
3763 | * declared strategy. This operation may block, | |
3764 | * and may be interrupted. | |
3765 | */ | |
0b4e3aa0 | 3766 | __private_extern__ kern_return_t |
1c79356b | 3767 | vm_object_copy_strategically( |
0a7de745 A |
3768 | vm_object_t src_object, |
3769 | vm_object_offset_t src_offset, | |
3770 | vm_object_size_t size, | |
3771 | vm_object_t *dst_object, /* OUT */ | |
3772 | vm_object_offset_t *dst_offset, /* OUT */ | |
3773 | boolean_t *dst_needs_copy) /* OUT */ | |
1c79356b | 3774 | { |
0a7de745 A |
3775 | boolean_t result; |
3776 | boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */ | |
3777 | boolean_t object_lock_shared = FALSE; | |
1c79356b A |
3778 | memory_object_copy_strategy_t copy_strategy; |
3779 | ||
3780 | assert(src_object != VM_OBJECT_NULL); | |
3781 | ||
2d21ac55 A |
3782 | copy_strategy = src_object->copy_strategy; |
3783 | ||
3784 | if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) { | |
0a7de745 | 3785 | vm_object_lock_shared(src_object); |
2d21ac55 | 3786 | object_lock_shared = TRUE; |
0a7de745 A |
3787 | } else { |
3788 | vm_object_lock(src_object); | |
3789 | } | |
1c79356b A |
3790 | |
3791 | /* | |
3792 | * The copy strategy is only valid if the memory manager | |
3793 | * is "ready". Internal objects are always ready. | |
3794 | */ | |
3795 | ||
3796 | while (!src_object->internal && !src_object->pager_ready) { | |
9bccf70c | 3797 | wait_result_t wait_result; |
1c79356b | 3798 | |
2d21ac55 | 3799 | if (object_lock_shared == TRUE) { |
0a7de745 | 3800 | vm_object_unlock(src_object); |
2d21ac55 A |
3801 | vm_object_lock(src_object); |
3802 | object_lock_shared = FALSE; | |
3803 | continue; | |
3804 | } | |
0a7de745 A |
3805 | wait_result = vm_object_sleep( src_object, |
3806 | VM_OBJECT_EVENT_PAGER_READY, | |
3807 | interruptible); | |
9bccf70c A |
3808 | if (wait_result != THREAD_AWAKENED) { |
3809 | vm_object_unlock(src_object); | |
1c79356b A |
3810 | *dst_object = VM_OBJECT_NULL; |
3811 | *dst_offset = 0; | |
3812 | *dst_needs_copy = FALSE; | |
0a7de745 | 3813 | return MACH_SEND_INTERRUPTED; |
1c79356b | 3814 | } |
1c79356b A |
3815 | } |
3816 | ||
1c79356b A |
3817 | /* |
3818 | * Use the appropriate copy strategy. | |
3819 | */ | |
3820 | ||
3821 | switch (copy_strategy) { | |
0a7de745 | 3822 | case MEMORY_OBJECT_COPY_DELAY: |
55e303ae | 3823 | *dst_object = vm_object_copy_delayed(src_object, |
0a7de745 | 3824 | src_offset, size, object_lock_shared); |
55e303ae A |
3825 | if (*dst_object != VM_OBJECT_NULL) { |
3826 | *dst_offset = src_offset; | |
3827 | *dst_needs_copy = TRUE; | |
3828 | result = KERN_SUCCESS; | |
3829 | break; | |
3830 | } | |
3831 | vm_object_lock(src_object); | |
f427ee49 | 3832 | OS_FALLTHROUGH; /* fall thru when delayed copy not allowed */ |
55e303ae | 3833 | |
0a7de745 | 3834 | case MEMORY_OBJECT_COPY_NONE: |
1c79356b | 3835 | result = vm_object_copy_slowly(src_object, src_offset, size, |
0a7de745 | 3836 | interruptible, dst_object); |
1c79356b | 3837 | if (result == KERN_SUCCESS) { |
f427ee49 | 3838 | *dst_offset = src_offset - vm_object_trunc_page(src_offset); |
1c79356b A |
3839 | *dst_needs_copy = FALSE; |
3840 | } | |
3841 | break; | |
3842 | ||
0a7de745 | 3843 | case MEMORY_OBJECT_COPY_CALL: |
1c79356b | 3844 | result = vm_object_copy_call(src_object, src_offset, size, |
0a7de745 | 3845 | dst_object); |
1c79356b A |
3846 | if (result == KERN_SUCCESS) { |
3847 | *dst_offset = src_offset; | |
3848 | *dst_needs_copy = TRUE; | |
3849 | } | |
3850 | break; | |
3851 | ||
0a7de745 | 3852 | case MEMORY_OBJECT_COPY_SYMMETRIC: |
1c79356b A |
3853 | vm_object_unlock(src_object); |
3854 | result = KERN_MEMORY_RESTART_COPY; | |
3855 | break; | |
3856 | ||
0a7de745 | 3857 | default: |
1c79356b A |
3858 | panic("copy_strategically: bad strategy"); |
3859 | result = KERN_INVALID_ARGUMENT; | |
3860 | } | |
0a7de745 | 3861 | return result; |
1c79356b A |
3862 | } |
3863 | ||
3864 | /* | |
3865 | * vm_object_shadow: | |
3866 | * | |
3867 | * Create a new object which is backed by the | |
3868 | * specified existing object range. The source | |
3869 | * object reference is deallocated. | |
3870 | * | |
3871 | * The new object and offset into that object | |
3872 | * are returned in the source parameters. | |
3873 | */ | |
6d2010ae | 3874 | boolean_t vm_object_shadow_check = TRUE; |
1c79356b | 3875 | |
0b4e3aa0 | 3876 | __private_extern__ boolean_t |
1c79356b | 3877 | vm_object_shadow( |
0a7de745 A |
3878 | vm_object_t *object, /* IN/OUT */ |
3879 | vm_object_offset_t *offset, /* IN/OUT */ | |
3880 | vm_object_size_t length) | |
1c79356b | 3881 | { |
0a7de745 A |
3882 | vm_object_t source; |
3883 | vm_object_t result; | |
1c79356b A |
3884 | |
3885 | source = *object; | |
e2d2fc5c | 3886 | assert(source != VM_OBJECT_NULL); |
0a7de745 | 3887 | if (source == VM_OBJECT_NULL) { |
e2d2fc5c | 3888 | return FALSE; |
0a7de745 | 3889 | } |
e2d2fc5c | 3890 | |
2d21ac55 A |
3891 | #if 0 |
3892 | /* | |
3893 | * XXX FBDP | |
3894 | * This assertion is valid but it gets triggered by Rosetta for example | |
3895 | * due to a combination of vm_remap() that changes a VM object's | |
3896 | * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY) | |
3897 | * that then sets "needs_copy" on its map entry. This creates a | |
3898 | * mapping situation that VM should never see and doesn't know how to | |
3899 | * handle. | |
3900 | * It's not clear if this can create any real problem but we should | |
3901 | * look into fixing this, probably by having vm_protect(VM_PROT_COPY) | |
3902 | * do more than just set "needs_copy" to handle the copy-on-write... | |
3903 | * In the meantime, let's disable the assertion. | |
3904 | */ | |
1c79356b | 3905 | assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC); |
2d21ac55 | 3906 | #endif |
1c79356b A |
3907 | |
3908 | /* | |
3909 | * Determine if we really need a shadow. | |
6d2010ae A |
3910 | * |
3911 | * If the source object is larger than what we are trying | |
3912 | * to create, then force the shadow creation even if the | |
3913 | * ref count is 1. This will allow us to [potentially] | |
3914 | * collapse the underlying object away in the future | |
3915 | * (freeing up the extra data it might contain and that | |
3916 | * we don't need). | |
1c79356b | 3917 | */ |
39037602 A |
3918 | |
3919 | assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */ | |
3920 | ||
c3c9b80d A |
3921 | #if 00 |
3922 | /* | |
3923 | * The following optimization does not work in the context of submaps | |
3924 | * (the shared region, in particular). | |
3925 | * This object might have only 1 reference (in the submap) but that | |
3926 | * submap can itself be mapped multiple times, so the object is | |
3927 | * actually indirectly referenced more than once... | |
3928 | */ | |
6d2010ae A |
3929 | if (vm_object_shadow_check && |
3930 | source->vo_size == length && | |
0a7de745 A |
3931 | source->ref_count == 1) { |
3932 | /* | |
3933 | * Lock the object and check again. | |
3934 | * We also check to see if there's | |
3935 | * a shadow or copy object involved. | |
3936 | * We can't do that earlier because | |
3937 | * without the object locked, there | |
3938 | * could be a collapse and the chain | |
3939 | * gets modified leaving us with an | |
3940 | * invalid pointer. | |
3941 | */ | |
39037602 A |
3942 | vm_object_lock(source); |
3943 | if (source->vo_size == length && | |
3944 | source->ref_count == 1 && | |
3945 | (source->shadow == VM_OBJECT_NULL || | |
0a7de745 | 3946 | source->shadow->copy == VM_OBJECT_NULL)) { |
39037602 A |
3947 | source->shadowed = FALSE; |
3948 | vm_object_unlock(source); | |
3949 | return FALSE; | |
3950 | } | |
3951 | /* things changed while we were locking "source"... */ | |
3952 | vm_object_unlock(source); | |
1c79356b | 3953 | } |
c3c9b80d | 3954 | #endif /* 00 */ |
1c79356b | 3955 | |
f427ee49 A |
3956 | /* |
3957 | * *offset is the map entry's offset into the VM object and | |
3958 | * is aligned to the map's page size. | |
3959 | * VM objects need to be aligned to the system's page size. | |
3960 | * Record the necessary adjustment and re-align the offset so | |
3961 | * that result->vo_shadow_offset is properly page-aligned. | |
3962 | */ | |
3963 | vm_object_offset_t offset_adjustment; | |
3964 | offset_adjustment = *offset - vm_object_trunc_page(*offset); | |
3965 | length = vm_object_round_page(length + offset_adjustment); | |
3966 | *offset = vm_object_trunc_page(*offset); | |
3967 | ||
1c79356b A |
3968 | /* |
3969 | * Allocate a new object with the given length | |
3970 | */ | |
3971 | ||
0a7de745 | 3972 | if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) { |
1c79356b | 3973 | panic("vm_object_shadow: no object for shadowing"); |
0a7de745 | 3974 | } |
1c79356b A |
3975 | |
3976 | /* | |
3977 | * The new object shadows the source object, adding | |
3978 | * a reference to it. Our caller changes his reference | |
3979 | * to point to the new object, removing a reference to | |
3980 | * the source object. Net result: no change of reference | |
3981 | * count. | |
3982 | */ | |
3983 | result->shadow = source; | |
0a7de745 | 3984 | |
1c79356b A |
3985 | /* |
3986 | * Store the offset into the source object, | |
3987 | * and fix up the offset into the new object. | |
3988 | */ | |
3989 | ||
6d2010ae | 3990 | result->vo_shadow_offset = *offset; |
f427ee49 A |
3991 | assertf(page_aligned(result->vo_shadow_offset), |
3992 | "result %p shadow offset 0x%llx", | |
3993 | result, result->vo_shadow_offset); | |
1c79356b A |
3994 | |
3995 | /* | |
3996 | * Return the new things | |
3997 | */ | |
3998 | ||
3999 | *offset = 0; | |
f427ee49 A |
4000 | if (offset_adjustment) { |
4001 | /* | |
4002 | * Make the map entry point to the equivalent offset | |
4003 | * in the new object. | |
4004 | */ | |
4005 | DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n", offset, *offset, *offset + offset_adjustment, result, length); | |
4006 | *offset += offset_adjustment; | |
4007 | } | |
1c79356b A |
4008 | *object = result; |
4009 | return TRUE; | |
4010 | } | |
4011 | ||
4012 | /* | |
4013 | * The relationship between vm_object structures and | |
0b4e3aa0 | 4014 | * the memory_object requires careful synchronization. |
1c79356b | 4015 | * |
0b4e3aa0 | 4016 | * All associations are created by memory_object_create_named |
39037602 | 4017 | * for external pagers and vm_object_compressor_pager_create for internal |
0b4e3aa0 A |
4018 | * objects as follows: |
4019 | * | |
4020 | * pager: the memory_object itself, supplied by | |
1c79356b A |
4021 | * the user requesting a mapping (or the kernel, |
4022 | * when initializing internal objects); the | |
4023 | * kernel simulates holding send rights by keeping | |
4024 | * a port reference; | |
0b4e3aa0 | 4025 | * |
1c79356b A |
4026 | * pager_request: |
4027 | * the memory object control port, | |
4028 | * created by the kernel; the kernel holds | |
4029 | * receive (and ownership) rights to this | |
4030 | * port, but no other references. | |
1c79356b A |
4031 | * |
4032 | * When initialization is complete, the "initialized" field | |
4033 | * is asserted. Other mappings using a particular memory object, | |
4034 | * and any references to the vm_object gained through the | |
4035 | * port association must wait for this initialization to occur. | |
4036 | * | |
4037 | * In order to allow the memory manager to set attributes before | |
4038 | * requests (notably virtual copy operations, but also data or | |
4039 | * unlock requests) are made, a "ready" attribute is made available. | |
4040 | * Only the memory manager may affect the value of this attribute. | |
4041 | * Its value does not affect critical kernel functions, such as | |
4042 | * internal object initialization or destruction. [Furthermore, | |
4043 | * memory objects created by the kernel are assumed to be ready | |
4044 | * immediately; the default memory manager need not explicitly | |
4045 | * set the "ready" attribute.] | |
4046 | * | |
4047 | * [Both the "initialized" and "ready" attribute wait conditions | |
4048 | * use the "pager" field as the wait event.] | |
4049 | * | |
4050 | * The port associations can be broken down by any of the | |
4051 | * following routines: | |
4052 | * vm_object_terminate: | |
4053 | * No references to the vm_object remain, and | |
4054 | * the object cannot (or will not) be cached. | |
4055 | * This is the normal case, and is done even | |
4056 | * though one of the other cases has already been | |
4057 | * done. | |
1c79356b A |
4058 | * memory_object_destroy: |
4059 | * The memory manager has requested that the | |
0b4e3aa0 A |
4060 | * kernel relinquish references to the memory |
4061 | * object. [The memory manager may not want to | |
4062 | * destroy the memory object, but may wish to | |
4063 | * refuse or tear down existing memory mappings.] | |
4064 | * | |
1c79356b A |
4065 | * Each routine that breaks an association must break all of |
4066 | * them at once. At some later time, that routine must clear | |
0b4e3aa0 | 4067 | * the pager field and release the memory object references. |
1c79356b A |
4068 | * [Furthermore, each routine must cope with the simultaneous |
4069 | * or previous operations of the others.] | |
4070 | * | |
0b4e3aa0 | 4071 | * Because the pager field may be cleared spontaneously, it |
1c79356b A |
4072 | * cannot be used to determine whether a memory object has |
4073 | * ever been associated with a particular vm_object. [This | |
2d21ac55 A |
4074 | * knowledge is important to the shadow object mechanism.] |
4075 | * For this reason, an additional "created" attribute is | |
4076 | * provided. | |
4077 | * | |
4078 | * During various paging operations, the pager reference found in the | |
4079 | * vm_object must be valid. To prevent this from being released, | |
4080 | * (other than being removed, i.e., made null), routines may use | |
4081 | * the vm_object_paging_begin/end routines [actually, macros]. | |
4082 | * The implementation uses the "paging_in_progress" and "wanted" fields. | |
4083 | * [Operations that alter the validity of the pager values include the | |
4084 | * termination routines and vm_object_collapse.] | |
4085 | */ | |
1c79356b | 4086 | |
1c79356b A |
4087 | |
4088 | /* | |
5ba3f43e | 4089 | * Routine: vm_object_memory_object_associate |
1c79356b | 4090 | * Purpose: |
5ba3f43e A |
4091 | * Associate a VM object to the given pager. |
4092 | * If a VM object is not provided, create one. | |
4093 | * Initialize the pager. | |
1c79356b A |
4094 | */ |
4095 | vm_object_t | |
5ba3f43e | 4096 | vm_object_memory_object_associate( |
0a7de745 A |
4097 | memory_object_t pager, |
4098 | vm_object_t object, | |
4099 | vm_object_size_t size, | |
4100 | boolean_t named) | |
1c79356b | 4101 | { |
5ba3f43e | 4102 | memory_object_control_t control; |
1c79356b | 4103 | |
5ba3f43e | 4104 | assert(pager != MEMORY_OBJECT_NULL); |
1c79356b | 4105 | |
5ba3f43e A |
4106 | if (object != VM_OBJECT_NULL) { |
4107 | assert(object->internal); | |
4108 | assert(object->pager_created); | |
4109 | assert(!object->pager_initialized); | |
4110 | assert(!object->pager_ready); | |
cb323159 | 4111 | assert(object->pager_trusted); |
5ba3f43e A |
4112 | } else { |
4113 | object = vm_object_allocate(size); | |
4114 | assert(object != VM_OBJECT_NULL); | |
4115 | object->internal = FALSE; | |
4116 | object->pager_trusted = FALSE; | |
4117 | /* copy strategy invalid until set by memory manager */ | |
4118 | object->copy_strategy = MEMORY_OBJECT_COPY_INVALID; | |
4119 | } | |
1c79356b A |
4120 | |
4121 | /* | |
5ba3f43e | 4122 | * Allocate request port. |
1c79356b | 4123 | */ |
b0d623f7 | 4124 | |
5ba3f43e | 4125 | control = memory_object_control_allocate(object); |
0a7de745 | 4126 | assert(control != MEMORY_OBJECT_CONTROL_NULL); |
1c79356b | 4127 | |
5ba3f43e | 4128 | vm_object_lock(object); |
1c79356b | 4129 | |
5ba3f43e A |
4130 | assert(!object->pager_ready); |
4131 | assert(!object->pager_initialized); | |
4132 | assert(object->pager == NULL); | |
4133 | assert(object->pager_control == NULL); | |
1c79356b A |
4134 | |
4135 | /* | |
5ba3f43e | 4136 | * Copy the reference we were given. |
1c79356b A |
4137 | */ |
4138 | ||
5ba3f43e A |
4139 | memory_object_reference(pager); |
4140 | object->pager_created = TRUE; | |
4141 | object->pager = pager; | |
4142 | object->pager_control = control; | |
4143 | object->pager_ready = FALSE; | |
1c79356b | 4144 | |
5ba3f43e | 4145 | vm_object_unlock(object); |
1c79356b A |
4146 | |
4147 | /* | |
5ba3f43e | 4148 | * Let the pager know we're using it. |
1c79356b A |
4149 | */ |
4150 | ||
5ba3f43e | 4151 | (void) memory_object_init(pager, |
0a7de745 A |
4152 | object->pager_control, |
4153 | PAGE_SIZE); | |
1c79356b | 4154 | |
5ba3f43e | 4155 | vm_object_lock(object); |
0a7de745 | 4156 | if (named) { |
5ba3f43e | 4157 | object->named = TRUE; |
0a7de745 | 4158 | } |
5ba3f43e A |
4159 | if (object->internal) { |
4160 | object->pager_ready = TRUE; | |
4161 | vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); | |
1c79356b | 4162 | } |
5ba3f43e A |
4163 | |
4164 | object->pager_initialized = TRUE; | |
4165 | vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED); | |
4166 | ||
1c79356b A |
4167 | vm_object_unlock(object); |
4168 | ||
5ba3f43e | 4169 | return object; |
1c79356b A |
4170 | } |
4171 | ||
4172 | /* | |
39037602 | 4173 | * Routine: vm_object_compressor_pager_create |
1c79356b A |
4174 | * Purpose: |
4175 | * Create a memory object for an internal object. | |
4176 | * In/out conditions: | |
4177 | * The object is locked on entry and exit; | |
4178 | * it may be unlocked within this call. | |
4179 | * Limitations: | |
4180 | * Only one thread may be performing a | |
39037602 | 4181 | * vm_object_compressor_pager_create on an object at |
1c79356b A |
4182 | * a time. Presumably, only the pageout |
4183 | * daemon will be using this routine. | |
4184 | */ | |
4185 | ||
39236c6e A |
4186 | void |
4187 | vm_object_compressor_pager_create( | |
0a7de745 | 4188 | vm_object_t object) |
39236c6e | 4189 | { |
0a7de745 A |
4190 | memory_object_t pager; |
4191 | vm_object_t pager_object = VM_OBJECT_NULL; | |
39236c6e A |
4192 | |
4193 | assert(object != kernel_object); | |
4194 | ||
4195 | /* | |
4196 | * Prevent collapse or termination by holding a paging reference | |
4197 | */ | |
4198 | ||
4199 | vm_object_paging_begin(object); | |
4200 | if (object->pager_created) { | |
4201 | /* | |
4202 | * Someone else got to it first... | |
4203 | * wait for them to finish initializing the ports | |
4204 | */ | |
4205 | while (!object->pager_initialized) { | |
4206 | vm_object_sleep(object, | |
0a7de745 A |
4207 | VM_OBJECT_EVENT_INITIALIZED, |
4208 | THREAD_UNINT); | |
39236c6e A |
4209 | } |
4210 | vm_object_paging_end(object); | |
4211 | return; | |
4212 | } | |
4213 | ||
0a7de745 A |
4214 | if ((uint32_t) (object->vo_size / PAGE_SIZE) != |
4215 | (object->vo_size / PAGE_SIZE)) { | |
813fb2f6 A |
4216 | #if DEVELOPMENT || DEBUG |
4217 | printf("vm_object_compressor_pager_create(%p): " | |
0a7de745 A |
4218 | "object size 0x%llx >= 0x%llx\n", |
4219 | object, | |
4220 | (uint64_t) object->vo_size, | |
4221 | 0x0FFFFFFFFULL * PAGE_SIZE); | |
813fb2f6 A |
4222 | #endif /* DEVELOPMENT || DEBUG */ |
4223 | vm_object_paging_end(object); | |
4224 | return; | |
4225 | } | |
4226 | ||
39236c6e A |
4227 | /* |
4228 | * Indicate that a memory object has been assigned | |
4229 | * before dropping the lock, to prevent a race. | |
4230 | */ | |
4231 | ||
4232 | object->pager_created = TRUE; | |
cb323159 | 4233 | object->pager_trusted = TRUE; |
39236c6e | 4234 | object->paging_offset = 0; |
0a7de745 | 4235 | |
39236c6e A |
4236 | vm_object_unlock(object); |
4237 | ||
39236c6e A |
4238 | /* |
4239 | * Create the [internal] pager, and associate it with this object. | |
4240 | * | |
4241 | * We make the association here so that vm_object_enter() | |
0a7de745 | 4242 | * can look up the object to complete initializing it. No |
39236c6e A |
4243 | * user will ever map this object. |
4244 | */ | |
4245 | { | |
39236c6e | 4246 | /* create our new memory object */ |
0a7de745 A |
4247 | assert((uint32_t) (object->vo_size / PAGE_SIZE) == |
4248 | (object->vo_size / PAGE_SIZE)); | |
39236c6e | 4249 | (void) compressor_memory_object_create( |
22ba694c | 4250 | (memory_object_size_t) object->vo_size, |
39236c6e | 4251 | &pager); |
22ba694c A |
4252 | if (pager == NULL) { |
4253 | panic("vm_object_compressor_pager_create(): " | |
0a7de745 A |
4254 | "no pager for object %p size 0x%llx\n", |
4255 | object, (uint64_t) object->vo_size); | |
22ba694c | 4256 | } |
0a7de745 | 4257 | } |
39236c6e | 4258 | |
39236c6e A |
4259 | /* |
4260 | * A reference was returned by | |
4261 | * memory_object_create(), and it is | |
5ba3f43e | 4262 | * copied by vm_object_memory_object_associate(). |
39236c6e A |
4263 | */ |
4264 | ||
5ba3f43e | 4265 | pager_object = vm_object_memory_object_associate(pager, |
0a7de745 A |
4266 | object, |
4267 | object->vo_size, | |
4268 | FALSE); | |
fe8ab488 A |
4269 | if (pager_object != object) { |
4270 | panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager, pager_object, object, (uint64_t) object->vo_size); | |
4271 | } | |
39236c6e A |
4272 | |
4273 | /* | |
4274 | * Drop the reference we were passed. | |
4275 | */ | |
4276 | memory_object_deallocate(pager); | |
4277 | ||
4278 | vm_object_lock(object); | |
4279 | ||
4280 | /* | |
4281 | * Release the paging reference | |
4282 | */ | |
4283 | vm_object_paging_end(object); | |
4284 | } | |
4285 | ||
1c79356b A |
4286 | /* |
4287 | * Global variables for vm_object_collapse(): | |
4288 | * | |
4289 | * Counts for normal collapses and bypasses. | |
4290 | * Debugging variables, to watch or disable collapse. | |
4291 | */ | |
0a7de745 A |
4292 | static long object_collapses = 0; |
4293 | static long object_bypasses = 0; | |
1c79356b | 4294 | |
0a7de745 A |
4295 | static boolean_t vm_object_collapse_allowed = TRUE; |
4296 | static boolean_t vm_object_bypass_allowed = TRUE; | |
0b4e3aa0 | 4297 | |
fe8ab488 | 4298 | void vm_object_do_collapse_compressor(vm_object_t object, |
0a7de745 | 4299 | vm_object_t backing_object); |
fe8ab488 A |
4300 | void |
4301 | vm_object_do_collapse_compressor( | |
4302 | vm_object_t object, | |
4303 | vm_object_t backing_object) | |
4304 | { | |
4305 | vm_object_offset_t new_offset, backing_offset; | |
4306 | vm_object_size_t size; | |
4307 | ||
4308 | vm_counters.do_collapse_compressor++; | |
4309 | ||
4310 | vm_object_lock_assert_exclusive(object); | |
4311 | vm_object_lock_assert_exclusive(backing_object); | |
4312 | ||
4313 | size = object->vo_size; | |
4314 | ||
4315 | /* | |
4316 | * Move all compressed pages from backing_object | |
4317 | * to the parent. | |
4318 | */ | |
4319 | ||
4320 | for (backing_offset = object->vo_shadow_offset; | |
0a7de745 A |
4321 | backing_offset < object->vo_shadow_offset + object->vo_size; |
4322 | backing_offset += PAGE_SIZE) { | |
fe8ab488 A |
4323 | memory_object_offset_t backing_pager_offset; |
4324 | ||
4325 | /* find the next compressed page at or after this offset */ | |
4326 | backing_pager_offset = (backing_offset + | |
0a7de745 | 4327 | backing_object->paging_offset); |
fe8ab488 A |
4328 | backing_pager_offset = vm_compressor_pager_next_compressed( |
4329 | backing_object->pager, | |
4330 | backing_pager_offset); | |
4331 | if (backing_pager_offset == (memory_object_offset_t) -1) { | |
4332 | /* no more compressed pages */ | |
4333 | break; | |
4334 | } | |
4335 | backing_offset = (backing_pager_offset - | |
0a7de745 | 4336 | backing_object->paging_offset); |
fe8ab488 A |
4337 | |
4338 | new_offset = backing_offset - object->vo_shadow_offset; | |
4339 | ||
4340 | if (new_offset >= object->vo_size) { | |
4341 | /* we're out of the scope of "object": done */ | |
4342 | break; | |
4343 | } | |
4344 | ||
4345 | if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) || | |
4346 | (vm_compressor_pager_state_get(object->pager, | |
0a7de745 A |
4347 | (new_offset + |
4348 | object->paging_offset)) == | |
4349 | VM_EXTERNAL_STATE_EXISTS)) { | |
fe8ab488 A |
4350 | /* |
4351 | * This page already exists in object, resident or | |
4352 | * compressed. | |
4353 | * We don't need this compressed page in backing_object | |
4354 | * and it will be reclaimed when we release | |
4355 | * backing_object. | |
4356 | */ | |
4357 | continue; | |
4358 | } | |
4359 | ||
4360 | /* | |
4361 | * backing_object has this page in the VM compressor and | |
4362 | * we need to transfer it to object. | |
4363 | */ | |
4364 | vm_counters.do_collapse_compressor_pages++; | |
4365 | vm_compressor_pager_transfer( | |
4366 | /* destination: */ | |
4367 | object->pager, | |
4368 | (new_offset + object->paging_offset), | |
4369 | /* source: */ | |
4370 | backing_object->pager, | |
4371 | (backing_offset + backing_object->paging_offset)); | |
4372 | } | |
4373 | } | |
4374 | ||
1c79356b | 4375 | /* |
0b4e3aa0 A |
4376 | * Routine: vm_object_do_collapse |
4377 | * Purpose: | |
4378 | * Collapse an object with the object backing it. | |
4379 | * Pages in the backing object are moved into the | |
4380 | * parent, and the backing object is deallocated. | |
4381 | * Conditions: | |
4382 | * Both objects and the cache are locked; the page | |
4383 | * queues are unlocked. | |
1c79356b A |
4384 | * |
4385 | */ | |
0b4e3aa0 | 4386 | static void |
1c79356b A |
4387 | vm_object_do_collapse( |
4388 | vm_object_t object, | |
4389 | vm_object_t backing_object) | |
4390 | { | |
4391 | vm_page_t p, pp; | |
4392 | vm_object_offset_t new_offset, backing_offset; | |
4393 | vm_object_size_t size; | |
4394 | ||
b0d623f7 A |
4395 | vm_object_lock_assert_exclusive(object); |
4396 | vm_object_lock_assert_exclusive(backing_object); | |
4397 | ||
fe8ab488 A |
4398 | assert(object->purgable == VM_PURGABLE_DENY); |
4399 | assert(backing_object->purgable == VM_PURGABLE_DENY); | |
4400 | ||
6d2010ae A |
4401 | backing_offset = object->vo_shadow_offset; |
4402 | size = object->vo_size; | |
1c79356b | 4403 | |
1c79356b A |
4404 | /* |
4405 | * Move all in-memory pages from backing_object | |
4406 | * to the parent. Pages that have been paged out | |
4407 | * will be overwritten by any of the parent's | |
4408 | * pages that shadow them. | |
4409 | */ | |
0a7de745 | 4410 | |
39037602 | 4411 | while (!vm_page_queue_empty(&backing_object->memq)) { |
39037602 | 4412 | p = (vm_page_t) vm_page_queue_first(&backing_object->memq); |
0a7de745 | 4413 | |
d9a64523 | 4414 | new_offset = (p->vmp_offset - backing_offset); |
0a7de745 | 4415 | |
d9a64523 | 4416 | assert(!p->vmp_busy || p->vmp_absent); |
91447636 | 4417 | |
1c79356b A |
4418 | /* |
4419 | * If the parent has a page here, or if | |
4420 | * this page falls outside the parent, | |
4421 | * dispose of it. | |
4422 | * | |
4423 | * Otherwise, move it as planned. | |
4424 | */ | |
0a7de745 | 4425 | |
d9a64523 | 4426 | if (p->vmp_offset < backing_offset || new_offset >= size) { |
1c79356b A |
4427 | VM_PAGE_FREE(p); |
4428 | } else { | |
4429 | pp = vm_page_lookup(object, new_offset); | |
4430 | if (pp == VM_PAGE_NULL) { | |
fe8ab488 | 4431 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, |
0a7de745 | 4432 | new_offset) |
fe8ab488 A |
4433 | == VM_EXTERNAL_STATE_EXISTS) { |
4434 | /* | |
4435 | * Parent object has this page | |
4436 | * in the VM compressor. | |
4437 | * Throw away the backing | |
4438 | * object's page. | |
4439 | */ | |
4440 | VM_PAGE_FREE(p); | |
4441 | } else { | |
4442 | /* | |
4443 | * Parent now has no page. | |
4444 | * Move the backing object's page | |
0a7de745 | 4445 | * up. |
fe8ab488 | 4446 | */ |
5ba3f43e | 4447 | vm_page_rename(p, object, new_offset); |
fe8ab488 | 4448 | } |
1c79356b | 4449 | } else { |
0a7de745 | 4450 | assert(!pp->vmp_absent); |
1c79356b A |
4451 | |
4452 | /* | |
4453 | * Parent object has a real page. | |
4454 | * Throw away the backing object's | |
4455 | * page. | |
4456 | */ | |
4457 | VM_PAGE_FREE(p); | |
4458 | } | |
4459 | } | |
4460 | } | |
1c79356b | 4461 | |
fe8ab488 A |
4462 | if (vm_object_collapse_compressor_allowed && |
4463 | object->pager != MEMORY_OBJECT_NULL && | |
4464 | backing_object->pager != MEMORY_OBJECT_NULL) { | |
fe8ab488 A |
4465 | /* move compressed pages from backing_object to object */ |
4466 | vm_object_do_collapse_compressor(object, backing_object); | |
fe8ab488 | 4467 | } else if (backing_object->pager != MEMORY_OBJECT_NULL) { |
fe8ab488 | 4468 | assert((!object->pager_created && |
0a7de745 A |
4469 | (object->pager == MEMORY_OBJECT_NULL)) || |
4470 | (!backing_object->pager_created && | |
4471 | (backing_object->pager == MEMORY_OBJECT_NULL))); | |
1c79356b A |
4472 | /* |
4473 | * Move the pager from backing_object to object. | |
4474 | * | |
4475 | * XXX We're only using part of the paging space | |
4476 | * for keeps now... we ought to discard the | |
4477 | * unused portion. | |
4478 | */ | |
4479 | ||
55e303ae | 4480 | assert(!object->paging_in_progress); |
b0d623f7 | 4481 | assert(!object->activity_in_progress); |
fe8ab488 A |
4482 | assert(!object->pager_created); |
4483 | assert(object->pager == NULL); | |
1c79356b | 4484 | object->pager = backing_object->pager; |
b0d623f7 | 4485 | |
1c79356b | 4486 | object->pager_created = backing_object->pager_created; |
91447636 | 4487 | object->pager_control = backing_object->pager_control; |
1c79356b A |
4488 | object->pager_ready = backing_object->pager_ready; |
4489 | object->pager_initialized = backing_object->pager_initialized; | |
1c79356b A |
4490 | object->paging_offset = |
4491 | backing_object->paging_offset + backing_offset; | |
91447636 | 4492 | if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) { |
c3c9b80d | 4493 | memory_object_control_collapse(&object->pager_control, |
0a7de745 | 4494 | object); |
1c79356b | 4495 | } |
fe8ab488 A |
4496 | /* the backing_object has lost its pager: reset all fields */ |
4497 | backing_object->pager_created = FALSE; | |
4498 | backing_object->pager_control = NULL; | |
4499 | backing_object->pager_ready = FALSE; | |
4500 | backing_object->paging_offset = 0; | |
4501 | backing_object->pager = NULL; | |
1c79356b | 4502 | } |
1c79356b A |
4503 | /* |
4504 | * Object now shadows whatever backing_object did. | |
4505 | * Note that the reference to backing_object->shadow | |
4506 | * moves from within backing_object to within object. | |
4507 | */ | |
0a7de745 | 4508 | |
91447636 A |
4509 | assert(!object->phys_contiguous); |
4510 | assert(!backing_object->phys_contiguous); | |
1c79356b | 4511 | object->shadow = backing_object->shadow; |
91447636 | 4512 | if (object->shadow) { |
f427ee49 A |
4513 | assertf(page_aligned(object->vo_shadow_offset), |
4514 | "object %p shadow_offset 0x%llx", | |
4515 | object, object->vo_shadow_offset); | |
4516 | assertf(page_aligned(backing_object->vo_shadow_offset), | |
4517 | "backing_object %p shadow_offset 0x%llx", | |
4518 | backing_object, backing_object->vo_shadow_offset); | |
6d2010ae | 4519 | object->vo_shadow_offset += backing_object->vo_shadow_offset; |
fe8ab488 A |
4520 | /* "backing_object" gave its shadow to "object" */ |
4521 | backing_object->shadow = VM_OBJECT_NULL; | |
4522 | backing_object->vo_shadow_offset = 0; | |
91447636 A |
4523 | } else { |
4524 | /* no shadow, therefore no shadow offset... */ | |
6d2010ae | 4525 | object->vo_shadow_offset = 0; |
91447636 | 4526 | } |
1c79356b | 4527 | assert((object->shadow == VM_OBJECT_NULL) || |
0a7de745 | 4528 | (object->shadow->copy != backing_object)); |
1c79356b A |
4529 | |
4530 | /* | |
4531 | * Discard backing_object. | |
4532 | * | |
4533 | * Since the backing object has no pages, no | |
4534 | * pager left, and no object references within it, | |
4535 | * all that is necessary is to dispose of it. | |
4536 | */ | |
fe8ab488 | 4537 | object_collapses++; |
0a7de745 | 4538 | |
fe8ab488 A |
4539 | assert(backing_object->ref_count == 1); |
4540 | assert(backing_object->resident_page_count == 0); | |
4541 | assert(backing_object->paging_in_progress == 0); | |
4542 | assert(backing_object->activity_in_progress == 0); | |
4543 | assert(backing_object->shadow == VM_OBJECT_NULL); | |
4544 | assert(backing_object->vo_shadow_offset == 0); | |
4545 | ||
4546 | if (backing_object->pager != MEMORY_OBJECT_NULL) { | |
4547 | /* ... unless it has a pager; need to terminate pager too */ | |
4548 | vm_counters.do_collapse_terminate++; | |
4549 | if (vm_object_terminate(backing_object) != KERN_SUCCESS) { | |
4550 | vm_counters.do_collapse_terminate_failure++; | |
4551 | } | |
4552 | return; | |
4553 | } | |
4554 | ||
4555 | assert(backing_object->pager == NULL); | |
1c79356b | 4556 | |
1c79356b A |
4557 | backing_object->alive = FALSE; |
4558 | vm_object_unlock(backing_object); | |
4559 | ||
fe8ab488 A |
4560 | #if VM_OBJECT_TRACKING |
4561 | if (vm_object_tracking_inited) { | |
4562 | btlog_remove_entries_for_element(vm_object_tracking_btlog, | |
0a7de745 | 4563 | backing_object); |
fe8ab488 A |
4564 | } |
4565 | #endif /* VM_OBJECT_TRACKING */ | |
4566 | ||
2d21ac55 A |
4567 | vm_object_lock_destroy(backing_object); |
4568 | ||
91447636 | 4569 | zfree(vm_object_zone, backing_object); |
1c79356b A |
4570 | } |
4571 | ||
0b4e3aa0 | 4572 | static void |
1c79356b A |
4573 | vm_object_do_bypass( |
4574 | vm_object_t object, | |
4575 | vm_object_t backing_object) | |
4576 | { | |
4577 | /* | |
4578 | * Make the parent shadow the next object | |
4579 | * in the chain. | |
4580 | */ | |
0a7de745 | 4581 | |
b0d623f7 | 4582 | vm_object_lock_assert_exclusive(object); |
2d21ac55 A |
4583 | vm_object_lock_assert_exclusive(backing_object); |
4584 | ||
1c79356b | 4585 | vm_object_reference(backing_object->shadow); |
1c79356b | 4586 | |
91447636 A |
4587 | assert(!object->phys_contiguous); |
4588 | assert(!backing_object->phys_contiguous); | |
1c79356b | 4589 | object->shadow = backing_object->shadow; |
91447636 | 4590 | if (object->shadow) { |
f427ee49 A |
4591 | assertf(page_aligned(object->vo_shadow_offset), |
4592 | "object %p shadow_offset 0x%llx", | |
4593 | object, object->vo_shadow_offset); | |
4594 | assertf(page_aligned(backing_object->vo_shadow_offset), | |
4595 | "backing_object %p shadow_offset 0x%llx", | |
4596 | backing_object, backing_object->vo_shadow_offset); | |
6d2010ae | 4597 | object->vo_shadow_offset += backing_object->vo_shadow_offset; |
91447636 A |
4598 | } else { |
4599 | /* no shadow, therefore no shadow offset... */ | |
6d2010ae | 4600 | object->vo_shadow_offset = 0; |
91447636 | 4601 | } |
0a7de745 | 4602 | |
1c79356b A |
4603 | /* |
4604 | * Backing object might have had a copy pointer | |
0a7de745 | 4605 | * to us. If it did, clear it. |
1c79356b A |
4606 | */ |
4607 | if (backing_object->copy == object) { | |
4608 | backing_object->copy = VM_OBJECT_NULL; | |
4609 | } | |
0a7de745 | 4610 | |
1c79356b A |
4611 | /* |
4612 | * Drop the reference count on backing_object. | |
0a7de745 | 4613 | #if TASK_SWAPPER |
1c79356b A |
4614 | * Since its ref_count was at least 2, it |
4615 | * will not vanish; so we don't need to call | |
4616 | * vm_object_deallocate. | |
593a1d5f | 4617 | * [with a caveat for "named" objects] |
0a7de745 | 4618 | * |
1c79356b A |
4619 | * The res_count on the backing object is |
4620 | * conditionally decremented. It's possible | |
4621 | * (via vm_pageout_scan) to get here with | |
4622 | * a "swapped" object, which has a 0 res_count, | |
4623 | * in which case, the backing object res_count | |
4624 | * is already down by one. | |
0a7de745 | 4625 | #else |
1c79356b A |
4626 | * Don't call vm_object_deallocate unless |
4627 | * ref_count drops to zero. | |
4628 | * | |
4629 | * The ref_count can drop to zero here if the | |
4630 | * backing object could be bypassed but not | |
4631 | * collapsed, such as when the backing object | |
4632 | * is temporary and cachable. | |
0a7de745 | 4633 | #endif |
1c79356b | 4634 | */ |
593a1d5f A |
4635 | if (backing_object->ref_count > 2 || |
4636 | (!backing_object->named && backing_object->ref_count > 1)) { | |
2d21ac55 | 4637 | vm_object_lock_assert_exclusive(backing_object); |
1c79356b | 4638 | backing_object->ref_count--; |
1c79356b A |
4639 | vm_object_unlock(backing_object); |
4640 | } else { | |
1c79356b A |
4641 | /* |
4642 | * Drop locks so that we can deallocate | |
4643 | * the backing object. | |
4644 | */ | |
4645 | ||
316670eb A |
4646 | /* |
4647 | * vm_object_collapse (the caller of this function) is | |
4648 | * now called from contexts that may not guarantee that a | |
4649 | * valid reference is held on the object... w/o a valid | |
4650 | * reference, it is unsafe and unwise (you will definitely | |
4651 | * regret it) to unlock the object and then retake the lock | |
4652 | * since the object may be terminated and recycled in between. | |
4653 | * The "activity_in_progress" reference will keep the object | |
4654 | * 'stable'. | |
4655 | */ | |
4656 | vm_object_activity_begin(object); | |
1c79356b | 4657 | vm_object_unlock(object); |
316670eb | 4658 | |
1c79356b A |
4659 | vm_object_unlock(backing_object); |
4660 | vm_object_deallocate(backing_object); | |
4661 | ||
4662 | /* | |
4663 | * Relock object. We don't have to reverify | |
4664 | * its state since vm_object_collapse will | |
4665 | * do that for us as it starts at the | |
4666 | * top of its loop. | |
4667 | */ | |
4668 | ||
4669 | vm_object_lock(object); | |
316670eb | 4670 | vm_object_activity_end(object); |
1c79356b | 4671 | } |
0a7de745 | 4672 | |
1c79356b A |
4673 | object_bypasses++; |
4674 | } | |
0b4e3aa0 | 4675 | |
0a7de745 | 4676 | |
1c79356b A |
4677 | /* |
4678 | * vm_object_collapse: | |
4679 | * | |
4680 | * Perform an object collapse or an object bypass if appropriate. | |
4681 | * The real work of collapsing and bypassing is performed in | |
4682 | * the routines vm_object_do_collapse and vm_object_do_bypass. | |
4683 | * | |
4684 | * Requires that the object be locked and the page queues be unlocked. | |
4685 | * | |
4686 | */ | |
91447636 A |
4687 | static unsigned long vm_object_collapse_calls = 0; |
4688 | static unsigned long vm_object_collapse_objects = 0; | |
4689 | static unsigned long vm_object_collapse_do_collapse = 0; | |
4690 | static unsigned long vm_object_collapse_do_bypass = 0; | |
99c3a104 | 4691 | |
0b4e3aa0 | 4692 | __private_extern__ void |
1c79356b | 4693 | vm_object_collapse( |
0a7de745 A |
4694 | vm_object_t object, |
4695 | vm_object_offset_t hint_offset, | |
4696 | boolean_t can_bypass) | |
1c79356b | 4697 | { |
0a7de745 A |
4698 | vm_object_t backing_object; |
4699 | unsigned int rcount; | |
4700 | unsigned int size; | |
4701 | vm_object_t original_object; | |
4702 | int object_lock_type; | |
4703 | int backing_object_lock_type; | |
91447636 A |
4704 | |
4705 | vm_object_collapse_calls++; | |
0b4e3aa0 | 4706 | |
f427ee49 A |
4707 | assertf(page_aligned(hint_offset), "hint_offset 0x%llx", hint_offset); |
4708 | ||
0a7de745 A |
4709 | if (!vm_object_collapse_allowed && |
4710 | !(can_bypass && vm_object_bypass_allowed)) { | |
1c79356b A |
4711 | return; |
4712 | } | |
4713 | ||
0a7de745 | 4714 | if (object == VM_OBJECT_NULL) { |
91447636 | 4715 | return; |
0a7de745 | 4716 | } |
91447636 A |
4717 | |
4718 | original_object = object; | |
4719 | ||
b0d623f7 A |
4720 | /* |
4721 | * The top object was locked "exclusive" by the caller. | |
4722 | * In the first pass, to determine if we can collapse the shadow chain, | |
4723 | * take a "shared" lock on the shadow objects. If we can collapse, | |
4724 | * we'll have to go down the chain again with exclusive locks. | |
4725 | */ | |
4726 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
4727 | backing_object_lock_type = OBJECT_LOCK_SHARED; | |
4728 | ||
4729 | retry: | |
4730 | object = original_object; | |
4731 | vm_object_lock_assert_exclusive(object); | |
4732 | ||
1c79356b | 4733 | while (TRUE) { |
91447636 | 4734 | vm_object_collapse_objects++; |
1c79356b A |
4735 | /* |
4736 | * Verify that the conditions are right for either | |
4737 | * collapse or bypass: | |
1c79356b | 4738 | */ |
1c79356b A |
4739 | |
4740 | /* | |
4741 | * There is a backing object, and | |
4742 | */ | |
0a7de745 | 4743 | |
91447636 A |
4744 | backing_object = object->shadow; |
4745 | if (backing_object == VM_OBJECT_NULL) { | |
4746 | if (object != original_object) { | |
4747 | vm_object_unlock(object); | |
4748 | } | |
1c79356b | 4749 | return; |
91447636 | 4750 | } |
b0d623f7 A |
4751 | if (backing_object_lock_type == OBJECT_LOCK_SHARED) { |
4752 | vm_object_lock_shared(backing_object); | |
4753 | } else { | |
4754 | vm_object_lock(backing_object); | |
4755 | } | |
4756 | ||
91447636 A |
4757 | /* |
4758 | * No pages in the object are currently | |
4759 | * being paged out, and | |
4760 | */ | |
b0d623f7 A |
4761 | if (object->paging_in_progress != 0 || |
4762 | object->activity_in_progress != 0) { | |
91447636 | 4763 | /* try and collapse the rest of the shadow chain */ |
91447636 A |
4764 | if (object != original_object) { |
4765 | vm_object_unlock(object); | |
4766 | } | |
4767 | object = backing_object; | |
b0d623f7 | 4768 | object_lock_type = backing_object_lock_type; |
91447636 A |
4769 | continue; |
4770 | } | |
4771 | ||
1c79356b A |
4772 | /* |
4773 | * ... | |
4774 | * The backing object is not read_only, | |
4775 | * and no pages in the backing object are | |
4776 | * currently being paged out. | |
4777 | * The backing object is internal. | |
4778 | * | |
4779 | */ | |
0a7de745 | 4780 | |
1c79356b | 4781 | if (!backing_object->internal || |
b0d623f7 A |
4782 | backing_object->paging_in_progress != 0 || |
4783 | backing_object->activity_in_progress != 0) { | |
91447636 A |
4784 | /* try and collapse the rest of the shadow chain */ |
4785 | if (object != original_object) { | |
4786 | vm_object_unlock(object); | |
4787 | } | |
4788 | object = backing_object; | |
b0d623f7 | 4789 | object_lock_type = backing_object_lock_type; |
91447636 | 4790 | continue; |
1c79356b | 4791 | } |
fe8ab488 A |
4792 | |
4793 | /* | |
4794 | * Purgeable objects are not supposed to engage in | |
4795 | * copy-on-write activities, so should not have | |
4796 | * any shadow objects or be a shadow object to another | |
4797 | * object. | |
4798 | * Collapsing a purgeable object would require some | |
4799 | * updates to the purgeable compressed ledgers. | |
4800 | */ | |
4801 | if (object->purgable != VM_PURGABLE_DENY || | |
4802 | backing_object->purgable != VM_PURGABLE_DENY) { | |
4803 | panic("vm_object_collapse() attempting to collapse " | |
0a7de745 A |
4804 | "purgeable object: %p(%d) %p(%d)\n", |
4805 | object, object->purgable, | |
4806 | backing_object, backing_object->purgable); | |
fe8ab488 A |
4807 | /* try and collapse the rest of the shadow chain */ |
4808 | if (object != original_object) { | |
4809 | vm_object_unlock(object); | |
4810 | } | |
4811 | object = backing_object; | |
4812 | object_lock_type = backing_object_lock_type; | |
4813 | continue; | |
4814 | } | |
0a7de745 | 4815 | |
1c79356b A |
4816 | /* |
4817 | * The backing object can't be a copy-object: | |
4818 | * the shadow_offset for the copy-object must stay | |
4819 | * as 0. Furthermore (for the 'we have all the | |
4820 | * pages' case), if we bypass backing_object and | |
4821 | * just shadow the next object in the chain, old | |
4822 | * pages from that object would then have to be copied | |
4823 | * BOTH into the (former) backing_object and into the | |
4824 | * parent object. | |
4825 | */ | |
4826 | if (backing_object->shadow != VM_OBJECT_NULL && | |
55e303ae | 4827 | backing_object->shadow->copy == backing_object) { |
91447636 A |
4828 | /* try and collapse the rest of the shadow chain */ |
4829 | if (object != original_object) { | |
4830 | vm_object_unlock(object); | |
4831 | } | |
4832 | object = backing_object; | |
b0d623f7 | 4833 | object_lock_type = backing_object_lock_type; |
91447636 | 4834 | continue; |
1c79356b A |
4835 | } |
4836 | ||
4837 | /* | |
4838 | * We can now try to either collapse the backing | |
4839 | * object (if the parent is the only reference to | |
4840 | * it) or (perhaps) remove the parent's reference | |
4841 | * to it. | |
1c79356b | 4842 | * |
0b4e3aa0 A |
4843 | * If there is exactly one reference to the backing |
4844 | * object, we may be able to collapse it into the | |
4845 | * parent. | |
1c79356b | 4846 | * |
55e303ae A |
4847 | * As long as one of the objects is still not known |
4848 | * to the pager, we can collapse them. | |
1c79356b | 4849 | */ |
1c79356b | 4850 | if (backing_object->ref_count == 1 && |
fe8ab488 | 4851 | (vm_object_collapse_compressor_allowed || |
0a7de745 A |
4852 | !object->pager_created |
4853 | || (!backing_object->pager_created) | |
55e303ae | 4854 | ) && vm_object_collapse_allowed) { |
1c79356b | 4855 | /* |
b0d623f7 | 4856 | * We need the exclusive lock on the VM objects. |
1c79356b | 4857 | */ |
b0d623f7 A |
4858 | if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) { |
4859 | /* | |
0a7de745 | 4860 | * We have an object and its shadow locked |
b0d623f7 A |
4861 | * "shared". We can't just upgrade the locks |
4862 | * to "exclusive", as some other thread might | |
4863 | * also have these objects locked "shared" and | |
0a7de745 | 4864 | * attempt to upgrade one or the other to |
b0d623f7 A |
4865 | * "exclusive". The upgrades would block |
4866 | * forever waiting for the other "shared" locks | |
4867 | * to get released. | |
4868 | * So we have to release the locks and go | |
4869 | * down the shadow chain again (since it could | |
4870 | * have changed) with "exclusive" locking. | |
4871 | */ | |
1c79356b | 4872 | vm_object_unlock(backing_object); |
0a7de745 | 4873 | if (object != original_object) { |
b0d623f7 | 4874 | vm_object_unlock(object); |
0a7de745 | 4875 | } |
b0d623f7 A |
4876 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
4877 | backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
4878 | goto retry; | |
1c79356b A |
4879 | } |
4880 | ||
4881 | /* | |
4882 | * Collapse the object with its backing | |
4883 | * object, and try again with the object's | |
4884 | * new backing object. | |
4885 | */ | |
4886 | ||
4887 | vm_object_do_collapse(object, backing_object); | |
91447636 | 4888 | vm_object_collapse_do_collapse++; |
1c79356b A |
4889 | continue; |
4890 | } | |
4891 | ||
1c79356b A |
4892 | /* |
4893 | * Collapsing the backing object was not possible | |
4894 | * or permitted, so let's try bypassing it. | |
4895 | */ | |
4896 | ||
0a7de745 | 4897 | if (!(can_bypass && vm_object_bypass_allowed)) { |
91447636 A |
4898 | /* try and collapse the rest of the shadow chain */ |
4899 | if (object != original_object) { | |
4900 | vm_object_unlock(object); | |
4901 | } | |
4902 | object = backing_object; | |
b0d623f7 | 4903 | object_lock_type = backing_object_lock_type; |
91447636 | 4904 | continue; |
1c79356b A |
4905 | } |
4906 | ||
0b4e3aa0 | 4907 | |
1c79356b | 4908 | /* |
55e303ae A |
4909 | * If the object doesn't have all its pages present, |
4910 | * we have to make sure no pages in the backing object | |
4911 | * "show through" before bypassing it. | |
1c79356b | 4912 | */ |
39236c6e | 4913 | size = (unsigned int)atop(object->vo_size); |
55e303ae | 4914 | rcount = object->resident_page_count; |
99c3a104 | 4915 | |
55e303ae | 4916 | if (rcount != size) { |
0a7de745 A |
4917 | vm_object_offset_t offset; |
4918 | vm_object_offset_t backing_offset; | |
4919 | unsigned int backing_rcount; | |
55e303ae A |
4920 | |
4921 | /* | |
4922 | * If the backing object has a pager but no pagemap, | |
4923 | * then we cannot bypass it, because we don't know | |
4924 | * what pages it has. | |
4925 | */ | |
39037602 | 4926 | if (backing_object->pager_created) { |
91447636 A |
4927 | /* try and collapse the rest of the shadow chain */ |
4928 | if (object != original_object) { | |
4929 | vm_object_unlock(object); | |
4930 | } | |
4931 | object = backing_object; | |
b0d623f7 | 4932 | object_lock_type = backing_object_lock_type; |
91447636 | 4933 | continue; |
55e303ae | 4934 | } |
1c79356b | 4935 | |
55e303ae A |
4936 | /* |
4937 | * If the object has a pager but no pagemap, | |
4938 | * then we cannot bypass it, because we don't know | |
4939 | * what pages it has. | |
4940 | */ | |
39037602 | 4941 | if (object->pager_created) { |
91447636 A |
4942 | /* try and collapse the rest of the shadow chain */ |
4943 | if (object != original_object) { | |
4944 | vm_object_unlock(object); | |
4945 | } | |
4946 | object = backing_object; | |
b0d623f7 | 4947 | object_lock_type = backing_object_lock_type; |
91447636 | 4948 | continue; |
55e303ae | 4949 | } |
0b4e3aa0 | 4950 | |
99c3a104 A |
4951 | backing_offset = object->vo_shadow_offset; |
4952 | backing_rcount = backing_object->resident_page_count; | |
4953 | ||
0a7de745 A |
4954 | if ((int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) { |
4955 | /* | |
99c3a104 A |
4956 | * we have enough pages in the backing object to guarantee that |
4957 | * at least 1 of them must be 'uncovered' by a resident page | |
4958 | * in the object we're evaluating, so move on and | |
4959 | * try to collapse the rest of the shadow chain | |
4960 | */ | |
39236c6e A |
4961 | if (object != original_object) { |
4962 | vm_object_unlock(object); | |
4963 | } | |
4964 | object = backing_object; | |
4965 | object_lock_type = backing_object_lock_type; | |
4966 | continue; | |
99c3a104 A |
4967 | } |
4968 | ||
55e303ae A |
4969 | /* |
4970 | * If all of the pages in the backing object are | |
4971 | * shadowed by the parent object, the parent | |
4972 | * object no longer has to shadow the backing | |
4973 | * object; it can shadow the next one in the | |
4974 | * chain. | |
4975 | * | |
4976 | * If the backing object has existence info, | |
4977 | * we must check examine its existence info | |
4978 | * as well. | |
4979 | * | |
4980 | */ | |
1c79356b | 4981 | |
0a7de745 A |
4982 | #define EXISTS_IN_OBJECT(obj, off, rc) \ |
4983 | ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \ | |
4984 | == VM_EXTERNAL_STATE_EXISTS) || \ | |
99c3a104 | 4985 | ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--)) |
55e303ae A |
4986 | |
4987 | /* | |
4988 | * Check the hint location first | |
4989 | * (since it is often the quickest way out of here). | |
4990 | */ | |
0a7de745 | 4991 | if (object->cow_hint != ~(vm_offset_t)0) { |
55e303ae | 4992 | hint_offset = (vm_object_offset_t)object->cow_hint; |
0a7de745 | 4993 | } else { |
55e303ae | 4994 | hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ? |
0a7de745 A |
4995 | (hint_offset - 8 * PAGE_SIZE_64) : 0; |
4996 | } | |
55e303ae A |
4997 | |
4998 | if (EXISTS_IN_OBJECT(backing_object, hint_offset + | |
0a7de745 | 4999 | backing_offset, backing_rcount) && |
55e303ae A |
5000 | !EXISTS_IN_OBJECT(object, hint_offset, rcount)) { |
5001 | /* dependency right at the hint */ | |
b0d623f7 | 5002 | object->cow_hint = (vm_offset_t) hint_offset; /* atomic */ |
91447636 A |
5003 | /* try and collapse the rest of the shadow chain */ |
5004 | if (object != original_object) { | |
5005 | vm_object_unlock(object); | |
5006 | } | |
5007 | object = backing_object; | |
b0d623f7 | 5008 | object_lock_type = backing_object_lock_type; |
91447636 | 5009 | continue; |
0b4e3aa0 | 5010 | } |
55e303ae A |
5011 | |
5012 | /* | |
5013 | * If the object's window onto the backing_object | |
5014 | * is large compared to the number of resident | |
5015 | * pages in the backing object, it makes sense to | |
5016 | * walk the backing_object's resident pages first. | |
5017 | * | |
99c3a104 | 5018 | * NOTE: Pages may be in both the existence map and/or |
0a7de745 | 5019 | * resident, so if we don't find a dependency while |
99c3a104 A |
5020 | * walking the backing object's resident page list |
5021 | * directly, and there is an existence map, we'll have | |
5022 | * to run the offset based 2nd pass. Because we may | |
5023 | * have to run both passes, we need to be careful | |
5024 | * not to decrement 'rcount' in the 1st pass | |
55e303ae | 5025 | */ |
99c3a104 | 5026 | if (backing_rcount && backing_rcount < (size / 8)) { |
55e303ae A |
5027 | unsigned int rc = rcount; |
5028 | vm_page_t p; | |
5029 | ||
5030 | backing_rcount = backing_object->resident_page_count; | |
39037602 | 5031 | p = (vm_page_t)vm_page_queue_first(&backing_object->memq); |
55e303ae | 5032 | do { |
d9a64523 | 5033 | offset = (p->vmp_offset - backing_offset); |
99c3a104 | 5034 | |
6d2010ae | 5035 | if (offset < object->vo_size && |
55e303ae A |
5036 | offset != hint_offset && |
5037 | !EXISTS_IN_OBJECT(object, offset, rc)) { | |
5038 | /* found a dependency */ | |
b0d623f7 | 5039 | object->cow_hint = (vm_offset_t) offset; /* atomic */ |
0a7de745 | 5040 | |
91447636 | 5041 | break; |
55e303ae | 5042 | } |
d9a64523 | 5043 | p = (vm_page_t) vm_page_queue_next(&p->vmp_listq); |
55e303ae | 5044 | } while (--backing_rcount); |
0a7de745 | 5045 | if (backing_rcount != 0) { |
91447636 A |
5046 | /* try and collapse the rest of the shadow chain */ |
5047 | if (object != original_object) { | |
5048 | vm_object_unlock(object); | |
5049 | } | |
5050 | object = backing_object; | |
b0d623f7 | 5051 | object_lock_type = backing_object_lock_type; |
91447636 A |
5052 | continue; |
5053 | } | |
0b4e3aa0 | 5054 | } |
55e303ae A |
5055 | |
5056 | /* | |
5057 | * Walk through the offsets looking for pages in the | |
5058 | * backing object that show through to the object. | |
5059 | */ | |
39037602 | 5060 | if (backing_rcount) { |
55e303ae | 5061 | offset = hint_offset; |
55e303ae | 5062 | |
0a7de745 A |
5063 | while ((offset = |
5064 | (offset + PAGE_SIZE_64 < object->vo_size) ? | |
5065 | (offset + PAGE_SIZE_64) : 0) != hint_offset) { | |
55e303ae | 5066 | if (EXISTS_IN_OBJECT(backing_object, offset + |
0a7de745 | 5067 | backing_offset, backing_rcount) && |
55e303ae A |
5068 | !EXISTS_IN_OBJECT(object, offset, rcount)) { |
5069 | /* found a dependency */ | |
b0d623f7 | 5070 | object->cow_hint = (vm_offset_t) offset; /* atomic */ |
91447636 | 5071 | break; |
55e303ae A |
5072 | } |
5073 | } | |
91447636 A |
5074 | if (offset != hint_offset) { |
5075 | /* try and collapse the rest of the shadow chain */ | |
5076 | if (object != original_object) { | |
5077 | vm_object_unlock(object); | |
5078 | } | |
5079 | object = backing_object; | |
b0d623f7 | 5080 | object_lock_type = backing_object_lock_type; |
91447636 A |
5081 | continue; |
5082 | } | |
0b4e3aa0 A |
5083 | } |
5084 | } | |
1c79356b | 5085 | |
b0d623f7 A |
5086 | /* |
5087 | * We need "exclusive" locks on the 2 VM objects. | |
5088 | */ | |
5089 | if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) { | |
5090 | vm_object_unlock(backing_object); | |
0a7de745 | 5091 | if (object != original_object) { |
b0d623f7 | 5092 | vm_object_unlock(object); |
0a7de745 | 5093 | } |
b0d623f7 A |
5094 | object_lock_type = OBJECT_LOCK_EXCLUSIVE; |
5095 | backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE; | |
5096 | goto retry; | |
5097 | } | |
5098 | ||
55e303ae A |
5099 | /* reset the offset hint for any objects deeper in the chain */ |
5100 | object->cow_hint = (vm_offset_t)0; | |
1c79356b A |
5101 | |
5102 | /* | |
5103 | * All interesting pages in the backing object | |
5104 | * already live in the parent or its pager. | |
5105 | * Thus we can bypass the backing object. | |
5106 | */ | |
5107 | ||
5108 | vm_object_do_bypass(object, backing_object); | |
91447636 | 5109 | vm_object_collapse_do_bypass++; |
1c79356b A |
5110 | |
5111 | /* | |
5112 | * Try again with this object's new backing object. | |
5113 | */ | |
5114 | ||
5115 | continue; | |
5116 | } | |
91447636 | 5117 | |
fe8ab488 A |
5118 | /* NOT REACHED */ |
5119 | /* | |
0a7de745 A |
5120 | * if (object != original_object) { |
5121 | * vm_object_unlock(object); | |
5122 | * } | |
5123 | */ | |
1c79356b A |
5124 | } |
5125 | ||
5126 | /* | |
5127 | * Routine: vm_object_page_remove: [internal] | |
5128 | * Purpose: | |
5129 | * Removes all physical pages in the specified | |
5130 | * object range from the object's list of pages. | |
5131 | * | |
5132 | * In/out conditions: | |
5133 | * The object must be locked. | |
5134 | * The object must not have paging_in_progress, usually | |
5135 | * guaranteed by not having a pager. | |
5136 | */ | |
5137 | unsigned int vm_object_page_remove_lookup = 0; | |
5138 | unsigned int vm_object_page_remove_iterate = 0; | |
5139 | ||
0b4e3aa0 | 5140 | __private_extern__ void |
1c79356b | 5141 | vm_object_page_remove( |
0a7de745 A |
5142 | vm_object_t object, |
5143 | vm_object_offset_t start, | |
5144 | vm_object_offset_t end) | |
1c79356b | 5145 | { |
0a7de745 | 5146 | vm_page_t p, next; |
1c79356b A |
5147 | |
5148 | /* | |
5149 | * One and two page removals are most popular. | |
5150 | * The factor of 16 here is somewhat arbitrary. | |
5151 | * It balances vm_object_lookup vs iteration. | |
5152 | */ | |
5153 | ||
0a7de745 | 5154 | if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) { |
1c79356b A |
5155 | vm_object_page_remove_lookup++; |
5156 | ||
5157 | for (; start < end; start += PAGE_SIZE_64) { | |
5158 | p = vm_page_lookup(object, start); | |
5159 | if (p != VM_PAGE_NULL) { | |
d9a64523 | 5160 | assert(!p->vmp_cleaning && !p->vmp_laundry); |
0a7de745 A |
5161 | if (!p->vmp_fictitious && p->vmp_pmapped) { |
5162 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); | |
5163 | } | |
1c79356b A |
5164 | VM_PAGE_FREE(p); |
5165 | } | |
5166 | } | |
5167 | } else { | |
5168 | vm_object_page_remove_iterate++; | |
5169 | ||
39037602 A |
5170 | p = (vm_page_t) vm_page_queue_first(&object->memq); |
5171 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) { | |
d9a64523 A |
5172 | next = (vm_page_t) vm_page_queue_next(&p->vmp_listq); |
5173 | if ((start <= p->vmp_offset) && (p->vmp_offset < end)) { | |
5174 | assert(!p->vmp_cleaning && !p->vmp_laundry); | |
0a7de745 A |
5175 | if (!p->vmp_fictitious && p->vmp_pmapped) { |
5176 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); | |
5177 | } | |
1c79356b A |
5178 | VM_PAGE_FREE(p); |
5179 | } | |
5180 | p = next; | |
5181 | } | |
5182 | } | |
5183 | } | |
5184 | ||
0b4e3aa0 | 5185 | |
1c79356b A |
5186 | /* |
5187 | * Routine: vm_object_coalesce | |
5188 | * Function: Coalesces two objects backing up adjoining | |
5189 | * regions of memory into a single object. | |
5190 | * | |
5191 | * returns TRUE if objects were combined. | |
5192 | * | |
5193 | * NOTE: Only works at the moment if the second object is NULL - | |
5194 | * if it's not, which object do we lock first? | |
5195 | * | |
5196 | * Parameters: | |
5197 | * prev_object First object to coalesce | |
5198 | * prev_offset Offset into prev_object | |
5199 | * next_object Second object into coalesce | |
5200 | * next_offset Offset into next_object | |
5201 | * | |
5202 | * prev_size Size of reference to prev_object | |
5203 | * next_size Size of reference to next_object | |
5204 | * | |
5205 | * Conditions: | |
5206 | * The object(s) must *not* be locked. The map must be locked | |
5207 | * to preserve the reference to the object(s). | |
5208 | */ | |
0b4e3aa0 | 5209 | static int vm_object_coalesce_count = 0; |
1c79356b | 5210 | |
0b4e3aa0 | 5211 | __private_extern__ boolean_t |
1c79356b | 5212 | vm_object_coalesce( |
0a7de745 A |
5213 | vm_object_t prev_object, |
5214 | vm_object_t next_object, | |
5215 | vm_object_offset_t prev_offset, | |
91447636 | 5216 | __unused vm_object_offset_t next_offset, |
0a7de745 A |
5217 | vm_object_size_t prev_size, |
5218 | vm_object_size_t next_size) | |
1c79356b | 5219 | { |
0a7de745 | 5220 | vm_object_size_t newsize; |
1c79356b | 5221 | |
0a7de745 | 5222 | #ifdef lint |
1c79356b | 5223 | next_offset++; |
0a7de745 | 5224 | #endif /* lint */ |
1c79356b A |
5225 | |
5226 | if (next_object != VM_OBJECT_NULL) { | |
0a7de745 | 5227 | return FALSE; |
1c79356b A |
5228 | } |
5229 | ||
5230 | if (prev_object == VM_OBJECT_NULL) { | |
0a7de745 | 5231 | return TRUE; |
1c79356b A |
5232 | } |
5233 | ||
1c79356b A |
5234 | vm_object_lock(prev_object); |
5235 | ||
5236 | /* | |
5237 | * Try to collapse the object first | |
5238 | */ | |
0c530ab8 | 5239 | vm_object_collapse(prev_object, prev_offset, TRUE); |
1c79356b A |
5240 | |
5241 | /* | |
5242 | * Can't coalesce if pages not mapped to | |
5243 | * prev_entry may be in use any way: | |
5244 | * . more than one reference | |
5245 | * . paged out | |
5246 | * . shadows another object | |
5247 | * . has a copy elsewhere | |
2d21ac55 | 5248 | * . is purgeable |
1c79356b A |
5249 | * . paging references (pages might be in page-list) |
5250 | */ | |
5251 | ||
5252 | if ((prev_object->ref_count > 1) || | |
5253 | prev_object->pager_created || | |
5254 | (prev_object->shadow != VM_OBJECT_NULL) || | |
5255 | (prev_object->copy != VM_OBJECT_NULL) || | |
5256 | (prev_object->true_share != FALSE) || | |
2d21ac55 | 5257 | (prev_object->purgable != VM_PURGABLE_DENY) || |
b0d623f7 A |
5258 | (prev_object->paging_in_progress != 0) || |
5259 | (prev_object->activity_in_progress != 0)) { | |
1c79356b | 5260 | vm_object_unlock(prev_object); |
0a7de745 | 5261 | return FALSE; |
1c79356b A |
5262 | } |
5263 | ||
5264 | vm_object_coalesce_count++; | |
5265 | ||
5266 | /* | |
5267 | * Remove any pages that may still be in the object from | |
5268 | * a previous deallocation. | |
5269 | */ | |
5270 | vm_object_page_remove(prev_object, | |
0a7de745 A |
5271 | prev_offset + prev_size, |
5272 | prev_offset + prev_size + next_size); | |
1c79356b A |
5273 | |
5274 | /* | |
5275 | * Extend the object if necessary. | |
5276 | */ | |
5277 | newsize = prev_offset + prev_size + next_size; | |
6d2010ae | 5278 | if (newsize > prev_object->vo_size) { |
f427ee49 A |
5279 | assertf(page_aligned(newsize), |
5280 | "object %p size 0x%llx", | |
5281 | prev_object, (uint64_t)newsize); | |
6d2010ae | 5282 | prev_object->vo_size = newsize; |
1c79356b A |
5283 | } |
5284 | ||
5285 | vm_object_unlock(prev_object); | |
0a7de745 | 5286 | return TRUE; |
1c79356b A |
5287 | } |
5288 | ||
0b4e3aa0 A |
5289 | kern_return_t |
5290 | vm_object_populate_with_private( | |
0a7de745 A |
5291 | vm_object_t object, |
5292 | vm_object_offset_t offset, | |
5293 | ppnum_t phys_page, | |
5294 | vm_size_t size) | |
0b4e3aa0 | 5295 | { |
0a7de745 A |
5296 | ppnum_t base_page; |
5297 | vm_object_offset_t base_offset; | |
0b4e3aa0 A |
5298 | |
5299 | ||
0a7de745 | 5300 | if (!object->private) { |
0b4e3aa0 | 5301 | return KERN_FAILURE; |
0a7de745 | 5302 | } |
0b4e3aa0 | 5303 | |
55e303ae | 5304 | base_page = phys_page; |
0b4e3aa0 A |
5305 | |
5306 | vm_object_lock(object); | |
316670eb A |
5307 | |
5308 | if (!object->phys_contiguous) { | |
0a7de745 | 5309 | vm_page_t m; |
316670eb A |
5310 | |
5311 | if ((base_offset = trunc_page_64(offset)) != offset) { | |
0b4e3aa0 A |
5312 | vm_object_unlock(object); |
5313 | return KERN_FAILURE; | |
5314 | } | |
5315 | base_offset += object->paging_offset; | |
316670eb A |
5316 | |
5317 | while (size) { | |
0b4e3aa0 | 5318 | m = vm_page_lookup(object, base_offset); |
316670eb A |
5319 | |
5320 | if (m != VM_PAGE_NULL) { | |
d9a64523 | 5321 | if (m->vmp_fictitious) { |
39037602 | 5322 | if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) { |
2d21ac55 | 5323 | vm_page_lockspin_queues(); |
d9a64523 | 5324 | m->vmp_private = TRUE; |
b0d623f7 A |
5325 | vm_page_unlock_queues(); |
5326 | ||
d9a64523 | 5327 | m->vmp_fictitious = FALSE; |
39037602 | 5328 | VM_PAGE_SET_PHYS_PAGE(m, base_page); |
0b4e3aa0 | 5329 | } |
39037602 | 5330 | } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) { |
0a7de745 | 5331 | if (!m->vmp_private) { |
316670eb A |
5332 | /* |
5333 | * we'd leak a real page... that can't be right | |
5334 | */ | |
5335 | panic("vm_object_populate_with_private - %p not private", m); | |
5336 | } | |
d9a64523 | 5337 | if (m->vmp_pmapped) { |
0a7de745 | 5338 | /* |
2d21ac55 A |
5339 | * pmap call to clear old mapping |
5340 | */ | |
0a7de745 | 5341 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); |
2d21ac55 | 5342 | } |
39037602 | 5343 | VM_PAGE_SET_PHYS_PAGE(m, base_page); |
0b4e3aa0 A |
5344 | } |
5345 | } else { | |
c3c9b80d | 5346 | m = vm_page_grab_fictitious(TRUE); |
b0d623f7 A |
5347 | |
5348 | /* | |
5349 | * private normally requires lock_queues but since we | |
5350 | * are initializing the page, its not necessary here | |
5351 | */ | |
d9a64523 A |
5352 | m->vmp_private = TRUE; |
5353 | m->vmp_fictitious = FALSE; | |
39037602 | 5354 | VM_PAGE_SET_PHYS_PAGE(m, base_page); |
d9a64523 A |
5355 | m->vmp_unusual = TRUE; |
5356 | m->vmp_busy = FALSE; | |
b0d623f7 | 5357 | |
0a7de745 | 5358 | vm_page_insert(m, object, base_offset); |
0b4e3aa0 | 5359 | } |
0a7de745 | 5360 | base_page++; /* Go to the next physical page */ |
0b4e3aa0 A |
5361 | base_offset += PAGE_SIZE; |
5362 | size -= PAGE_SIZE; | |
5363 | } | |
5364 | } else { | |
5365 | /* NOTE: we should check the original settings here */ | |
5366 | /* if we have a size > zero a pmap call should be made */ | |
0a7de745 | 5367 | /* to disable the range */ |
0b4e3aa0 A |
5368 | |
5369 | /* pmap_? */ | |
0a7de745 | 5370 | |
0b4e3aa0 A |
5371 | /* shadows on contiguous memory are not allowed */ |
5372 | /* we therefore can use the offset field */ | |
6d2010ae | 5373 | object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT; |
f427ee49 A |
5374 | assertf(page_aligned(size), |
5375 | "object %p size 0x%llx", | |
5376 | object, (uint64_t)size); | |
6d2010ae | 5377 | object->vo_size = size; |
0b4e3aa0 A |
5378 | } |
5379 | vm_object_unlock(object); | |
316670eb | 5380 | |
0b4e3aa0 A |
5381 | return KERN_SUCCESS; |
5382 | } | |
5383 | ||
1c79356b A |
5384 | |
5385 | kern_return_t | |
0b4e3aa0 | 5386 | memory_object_create_named( |
0a7de745 A |
5387 | memory_object_t pager, |
5388 | memory_object_offset_t size, | |
5389 | memory_object_control_t *control) | |
1c79356b | 5390 | { |
0a7de745 | 5391 | vm_object_t object; |
1c79356b | 5392 | |
0b4e3aa0 | 5393 | *control = MEMORY_OBJECT_CONTROL_NULL; |
0a7de745 | 5394 | if (pager == MEMORY_OBJECT_NULL) { |
0b4e3aa0 | 5395 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 5396 | } |
1c79356b | 5397 | |
5ba3f43e | 5398 | object = vm_object_memory_object_associate(pager, |
0a7de745 A |
5399 | VM_OBJECT_NULL, |
5400 | size, | |
5401 | TRUE); | |
5ba3f43e A |
5402 | if (object == VM_OBJECT_NULL) { |
5403 | return KERN_INVALID_OBJECT; | |
0b4e3aa0 | 5404 | } |
0a7de745 | 5405 | |
0b4e3aa0 A |
5406 | /* wait for object (if any) to be ready */ |
5407 | if (object != VM_OBJECT_NULL) { | |
5408 | vm_object_lock(object); | |
5409 | object->named = TRUE; | |
5410 | while (!object->pager_ready) { | |
9bccf70c | 5411 | vm_object_sleep(object, |
0a7de745 A |
5412 | VM_OBJECT_EVENT_PAGER_READY, |
5413 | THREAD_UNINT); | |
0b4e3aa0 | 5414 | } |
91447636 | 5415 | *control = object->pager_control; |
0b4e3aa0 A |
5416 | vm_object_unlock(object); |
5417 | } | |
0a7de745 | 5418 | return KERN_SUCCESS; |
0b4e3aa0 | 5419 | } |
1c79356b | 5420 | |
1c79356b | 5421 | |
0b4e3aa0 A |
5422 | /* |
5423 | * Routine: memory_object_recover_named [user interface] | |
5424 | * Purpose: | |
5425 | * Attempt to recover a named reference for a VM object. | |
5426 | * VM will verify that the object has not already started | |
5427 | * down the termination path, and if it has, will optionally | |
5428 | * wait for that to finish. | |
5429 | * Returns: | |
5430 | * KERN_SUCCESS - we recovered a named reference on the object | |
5431 | * KERN_FAILURE - we could not recover a reference (object dead) | |
5432 | * KERN_INVALID_ARGUMENT - bad memory object control | |
5433 | */ | |
5434 | kern_return_t | |
5435 | memory_object_recover_named( | |
0a7de745 A |
5436 | memory_object_control_t control, |
5437 | boolean_t wait_on_terminating) | |
0b4e3aa0 | 5438 | { |
0a7de745 | 5439 | vm_object_t object; |
1c79356b | 5440 | |
0b4e3aa0 A |
5441 | object = memory_object_control_to_vm_object(control); |
5442 | if (object == VM_OBJECT_NULL) { | |
0a7de745 | 5443 | return KERN_INVALID_ARGUMENT; |
0b4e3aa0 | 5444 | } |
0b4e3aa0 A |
5445 | restart: |
5446 | vm_object_lock(object); | |
1c79356b | 5447 | |
0b4e3aa0 | 5448 | if (object->terminating && wait_on_terminating) { |
0a7de745 A |
5449 | vm_object_wait(object, |
5450 | VM_OBJECT_EVENT_PAGING_IN_PROGRESS, | |
5451 | THREAD_UNINT); | |
0b4e3aa0 A |
5452 | goto restart; |
5453 | } | |
5454 | ||
5455 | if (!object->alive) { | |
0b4e3aa0 A |
5456 | vm_object_unlock(object); |
5457 | return KERN_FAILURE; | |
1c79356b A |
5458 | } |
5459 | ||
0b4e3aa0 | 5460 | if (object->named == TRUE) { |
0b4e3aa0 A |
5461 | vm_object_unlock(object); |
5462 | return KERN_SUCCESS; | |
5463 | } | |
0b4e3aa0 | 5464 | object->named = TRUE; |
2d21ac55 | 5465 | vm_object_lock_assert_exclusive(object); |
0b4e3aa0 | 5466 | object->ref_count++; |
0b4e3aa0 | 5467 | while (!object->pager_ready) { |
9bccf70c | 5468 | vm_object_sleep(object, |
0a7de745 A |
5469 | VM_OBJECT_EVENT_PAGER_READY, |
5470 | THREAD_UNINT); | |
0b4e3aa0 A |
5471 | } |
5472 | vm_object_unlock(object); | |
0a7de745 | 5473 | return KERN_SUCCESS; |
1c79356b A |
5474 | } |
5475 | ||
0b4e3aa0 A |
5476 | |
5477 | /* | |
0a7de745 | 5478 | * vm_object_release_name: |
0b4e3aa0 A |
5479 | * |
5480 | * Enforces name semantic on memory_object reference count decrement | |
5481 | * This routine should not be called unless the caller holds a name | |
5482 | * reference gained through the memory_object_create_named. | |
5483 | * | |
5484 | * If the TERMINATE_IDLE flag is set, the call will return if the | |
5485 | * reference count is not 1. i.e. idle with the only remaining reference | |
5486 | * being the name. | |
5487 | * If the decision is made to proceed the name field flag is set to | |
5488 | * false and the reference count is decremented. If the RESPECT_CACHE | |
0a7de745 | 5489 | * flag is set and the reference count has gone to zero, the |
0b4e3aa0 A |
5490 | * memory_object is checked to see if it is cacheable otherwise when |
5491 | * the reference count is zero, it is simply terminated. | |
5492 | */ | |
5493 | ||
5494 | __private_extern__ kern_return_t | |
5495 | vm_object_release_name( | |
0a7de745 A |
5496 | vm_object_t object, |
5497 | int flags) | |
1c79356b | 5498 | { |
0a7de745 A |
5499 | vm_object_t shadow; |
5500 | boolean_t original_object = TRUE; | |
1c79356b | 5501 | |
0b4e3aa0 | 5502 | while (object != VM_OBJECT_NULL) { |
0b4e3aa0 | 5503 | vm_object_lock(object); |
b0d623f7 | 5504 | |
0b4e3aa0 | 5505 | assert(object->alive); |
0a7de745 | 5506 | if (original_object) { |
0b4e3aa0 | 5507 | assert(object->named); |
0a7de745 | 5508 | } |
0b4e3aa0 A |
5509 | assert(object->ref_count > 0); |
5510 | ||
5511 | /* | |
5512 | * We have to wait for initialization before | |
5513 | * destroying or caching the object. | |
5514 | */ | |
5515 | ||
5516 | if (object->pager_created && !object->pager_initialized) { | |
5517 | assert(!object->can_persist); | |
5518 | vm_object_assert_wait(object, | |
0a7de745 A |
5519 | VM_OBJECT_EVENT_INITIALIZED, |
5520 | THREAD_UNINT); | |
0b4e3aa0 | 5521 | vm_object_unlock(object); |
9bccf70c | 5522 | thread_block(THREAD_CONTINUE_NULL); |
0b4e3aa0 | 5523 | continue; |
1c79356b A |
5524 | } |
5525 | ||
0b4e3aa0 | 5526 | if (((object->ref_count > 1) |
0a7de745 A |
5527 | && (flags & MEMORY_OBJECT_TERMINATE_IDLE)) |
5528 | || (object->terminating)) { | |
0b4e3aa0 | 5529 | vm_object_unlock(object); |
0b4e3aa0 A |
5530 | return KERN_FAILURE; |
5531 | } else { | |
5532 | if (flags & MEMORY_OBJECT_RELEASE_NO_OP) { | |
5533 | vm_object_unlock(object); | |
0b4e3aa0 | 5534 | return KERN_SUCCESS; |
1c79356b | 5535 | } |
0b4e3aa0 | 5536 | } |
0a7de745 | 5537 | |
0b4e3aa0 | 5538 | if ((flags & MEMORY_OBJECT_RESPECT_CACHE) && |
0a7de745 A |
5539 | (object->ref_count == 1)) { |
5540 | if (original_object) { | |
0b4e3aa0 | 5541 | object->named = FALSE; |
0a7de745 | 5542 | } |
1c79356b | 5543 | vm_object_unlock(object); |
0b4e3aa0 A |
5544 | /* let vm_object_deallocate push this thing into */ |
5545 | /* the cache, if that it is where it is bound */ | |
5546 | vm_object_deallocate(object); | |
5547 | return KERN_SUCCESS; | |
5548 | } | |
0b4e3aa0 | 5549 | shadow = object->pageout?VM_OBJECT_NULL:object->shadow; |
b0d623f7 A |
5550 | |
5551 | if (object->ref_count == 1) { | |
5552 | if (vm_object_terminate(object) != KERN_SUCCESS) { | |
5553 | if (original_object) { | |
0b4e3aa0 A |
5554 | return KERN_FAILURE; |
5555 | } else { | |
5556 | return KERN_SUCCESS; | |
5557 | } | |
5558 | } | |
5559 | if (shadow != VM_OBJECT_NULL) { | |
5560 | original_object = FALSE; | |
5561 | object = shadow; | |
5562 | continue; | |
5563 | } | |
5564 | return KERN_SUCCESS; | |
5565 | } else { | |
2d21ac55 | 5566 | vm_object_lock_assert_exclusive(object); |
0b4e3aa0 A |
5567 | object->ref_count--; |
5568 | assert(object->ref_count > 0); | |
0a7de745 | 5569 | if (original_object) { |
0b4e3aa0 | 5570 | object->named = FALSE; |
0a7de745 | 5571 | } |
0b4e3aa0 | 5572 | vm_object_unlock(object); |
0b4e3aa0 | 5573 | return KERN_SUCCESS; |
1c79356b | 5574 | } |
1c79356b | 5575 | } |
91447636 A |
5576 | /*NOTREACHED*/ |
5577 | assert(0); | |
5578 | return KERN_FAILURE; | |
1c79356b A |
5579 | } |
5580 | ||
0b4e3aa0 A |
5581 | |
5582 | __private_extern__ kern_return_t | |
5583 | vm_object_lock_request( | |
0a7de745 A |
5584 | vm_object_t object, |
5585 | vm_object_offset_t offset, | |
5586 | vm_object_size_t size, | |
5587 | memory_object_return_t should_return, | |
5588 | int flags, | |
5589 | vm_prot_t prot) | |
1c79356b | 5590 | { |
0a7de745 | 5591 | __unused boolean_t should_flush; |
91447636 A |
5592 | |
5593 | should_flush = flags & MEMORY_OBJECT_DATA_FLUSH; | |
1c79356b | 5594 | |
0b4e3aa0 A |
5595 | /* |
5596 | * Check for bogus arguments. | |
5597 | */ | |
0a7de745 A |
5598 | if (object == VM_OBJECT_NULL) { |
5599 | return KERN_INVALID_ARGUMENT; | |
5600 | } | |
1c79356b | 5601 | |
0a7de745 A |
5602 | if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) { |
5603 | return KERN_INVALID_ARGUMENT; | |
5604 | } | |
1c79356b | 5605 | |
f427ee49 A |
5606 | /* |
5607 | * XXX TODO4K | |
5608 | * extend range for conservative operations (copy-on-write, sync, ...) | |
5609 | * truncate range for destructive operations (purge, ...) | |
5610 | */ | |
5611 | size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset); | |
5612 | offset = vm_object_trunc_page(offset); | |
0b4e3aa0 A |
5613 | |
5614 | /* | |
5615 | * Lock the object, and acquire a paging reference to | |
5616 | * prevent the memory_object reference from being released. | |
5617 | */ | |
5618 | vm_object_lock(object); | |
5619 | vm_object_paging_begin(object); | |
0b4e3aa0 A |
5620 | |
5621 | (void)vm_object_update(object, | |
0a7de745 | 5622 | offset, size, NULL, NULL, should_return, flags, prot); |
0b4e3aa0 A |
5623 | |
5624 | vm_object_paging_end(object); | |
5625 | vm_object_unlock(object); | |
5626 | ||
0a7de745 | 5627 | return KERN_SUCCESS; |
0b4e3aa0 A |
5628 | } |
5629 | ||
91447636 | 5630 | /* |
2d21ac55 | 5631 | * Empty a purgeable object by grabbing the physical pages assigned to it and |
91447636 A |
5632 | * putting them on the free queue without writing them to backing store, etc. |
5633 | * When the pages are next touched they will be demand zero-fill pages. We | |
5634 | * skip pages which are busy, being paged in/out, wired, etc. We do _not_ | |
5635 | * skip referenced/dirty pages, pages on the active queue, etc. We're more | |
2d21ac55 | 5636 | * than happy to grab these since this is a purgeable object. We mark the |
91447636 A |
5637 | * object as "empty" after reaping its pages. |
5638 | * | |
b0d623f7 A |
5639 | * On entry the object must be locked and it must be |
5640 | * purgeable with no delayed copies pending. | |
91447636 | 5641 | */ |
a39ff7e2 | 5642 | uint64_t |
fe8ab488 | 5643 | vm_object_purge(vm_object_t object, int flags) |
91447636 | 5644 | { |
0a7de745 A |
5645 | unsigned int object_page_count = 0, pgcount = 0; |
5646 | uint64_t total_purged_pgcount = 0; | |
5647 | boolean_t skipped_object = FALSE; | |
4bd07ac2 | 5648 | |
0a7de745 | 5649 | vm_object_lock_assert_exclusive(object); |
0b4e3aa0 | 5650 | |
0a7de745 | 5651 | if (object->purgable == VM_PURGABLE_DENY) { |
a39ff7e2 | 5652 | return 0; |
0a7de745 | 5653 | } |
91447636 A |
5654 | |
5655 | assert(object->copy == VM_OBJECT_NULL); | |
5656 | assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); | |
593a1d5f | 5657 | |
fe8ab488 A |
5658 | /* |
5659 | * We need to set the object's state to VM_PURGABLE_EMPTY *before* | |
5660 | * reaping its pages. We update vm_page_purgeable_count in bulk | |
5661 | * and we don't want vm_page_remove() to update it again for each | |
5662 | * page we reap later. | |
5663 | * | |
5664 | * For the purgeable ledgers, pages from VOLATILE and EMPTY objects | |
5665 | * are all accounted for in the "volatile" ledgers, so this does not | |
5666 | * make any difference. | |
5667 | * If we transitioned directly from NONVOLATILE to EMPTY, | |
5668 | * vm_page_purgeable_count must have been updated when the object | |
5669 | * was dequeued from its volatile queue and the purgeable ledgers | |
5670 | * must have also been updated accordingly at that time (in | |
5671 | * vm_object_purgable_control()). | |
5672 | */ | |
5673 | if (object->purgable == VM_PURGABLE_VOLATILE) { | |
b0d623f7 A |
5674 | unsigned int delta; |
5675 | assert(object->resident_page_count >= | |
0a7de745 | 5676 | object->wired_page_count); |
b0d623f7 | 5677 | delta = (object->resident_page_count - |
0a7de745 | 5678 | object->wired_page_count); |
b0d623f7 A |
5679 | if (delta != 0) { |
5680 | assert(vm_page_purgeable_count >= | |
0a7de745 | 5681 | delta); |
b0d623f7 | 5682 | OSAddAtomic(-delta, |
0a7de745 | 5683 | (SInt32 *)&vm_page_purgeable_count); |
91447636 | 5684 | } |
b0d623f7 A |
5685 | if (object->wired_page_count != 0) { |
5686 | assert(vm_page_purgeable_wired_count >= | |
0a7de745 | 5687 | object->wired_page_count); |
b0d623f7 | 5688 | OSAddAtomic(-object->wired_page_count, |
0a7de745 | 5689 | (SInt32 *)&vm_page_purgeable_wired_count); |
91447636 | 5690 | } |
fe8ab488 | 5691 | object->purgable = VM_PURGABLE_EMPTY; |
91447636 | 5692 | } |
fe8ab488 | 5693 | assert(object->purgable == VM_PURGABLE_EMPTY); |
0a7de745 | 5694 | |
4bd07ac2 A |
5695 | object_page_count = object->resident_page_count; |
5696 | ||
b0d623f7 | 5697 | vm_object_reap_pages(object, REAP_PURGEABLE); |
fe8ab488 | 5698 | |
a39ff7e2 A |
5699 | if (object->resident_page_count >= object_page_count) { |
5700 | total_purged_pgcount = 0; | |
5701 | } else { | |
5702 | total_purged_pgcount = object_page_count - object->resident_page_count; | |
5703 | } | |
5704 | ||
39037602 | 5705 | if (object->pager != NULL) { |
39037602 | 5706 | assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); |
fe8ab488 A |
5707 | |
5708 | if (object->activity_in_progress == 0 && | |
5709 | object->paging_in_progress == 0) { | |
5710 | /* | |
5711 | * Also reap any memory coming from this object | |
5712 | * in the VM compressor. | |
5713 | * | |
5714 | * There are no operations in progress on the VM object | |
5715 | * and no operation can start while we're holding the | |
5716 | * VM object lock, so it's safe to reap the compressed | |
5717 | * pages and update the page counts. | |
5718 | */ | |
5719 | pgcount = vm_compressor_pager_get_count(object->pager); | |
5720 | if (pgcount) { | |
5721 | pgcount = vm_compressor_pager_reap_pages(object->pager, flags); | |
5722 | vm_compressor_pager_count(object->pager, | |
0a7de745 A |
5723 | -pgcount, |
5724 | FALSE, /* shared */ | |
5725 | object); | |
d9a64523 | 5726 | vm_object_owner_compressed_update(object, |
0a7de745 | 5727 | -pgcount); |
fe8ab488 | 5728 | } |
0a7de745 | 5729 | if (!(flags & C_DONT_BLOCK)) { |
fe8ab488 | 5730 | assert(vm_compressor_pager_get_count(object->pager) |
0a7de745 | 5731 | == 0); |
fe8ab488 A |
5732 | } |
5733 | } else { | |
5734 | /* | |
5735 | * There's some kind of paging activity in progress | |
5736 | * for this object, which could result in a page | |
5737 | * being compressed or decompressed, possibly while | |
5738 | * the VM object is not locked, so it could race | |
5739 | * with us. | |
5740 | * | |
0a7de745 | 5741 | * We can't really synchronize this without possibly |
fe8ab488 A |
5742 | * causing a deadlock when the compressor needs to |
5743 | * allocate or free memory while compressing or | |
5744 | * decompressing a page from a purgeable object | |
5745 | * mapped in the kernel_map... | |
5746 | * | |
5747 | * So let's not attempt to purge the compressor | |
5748 | * pager if there's any kind of operation in | |
5749 | * progress on the VM object. | |
5750 | */ | |
4bd07ac2 | 5751 | skipped_object = TRUE; |
fe8ab488 A |
5752 | } |
5753 | } | |
5754 | ||
5755 | vm_object_lock_assert_exclusive(object); | |
4bd07ac2 | 5756 | |
a39ff7e2 A |
5757 | total_purged_pgcount += pgcount; |
5758 | ||
4bd07ac2 | 5759 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)), |
0a7de745 A |
5760 | VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ |
5761 | object_page_count, | |
5762 | total_purged_pgcount, | |
5763 | skipped_object, | |
5764 | 0); | |
4bd07ac2 | 5765 | |
a39ff7e2 | 5766 | return total_purged_pgcount; |
91447636 | 5767 | } |
0a7de745 | 5768 | |
91447636 A |
5769 | |
5770 | /* | |
2d21ac55 A |
5771 | * vm_object_purgeable_control() allows the caller to control and investigate the |
5772 | * state of a purgeable object. A purgeable object is created via a call to | |
5773 | * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will | |
5774 | * never be coalesced with any other object -- even other purgeable objects -- | |
5775 | * and will thus always remain a distinct object. A purgeable object has | |
91447636 | 5776 | * special semantics when its reference count is exactly 1. If its reference |
2d21ac55 | 5777 | * count is greater than 1, then a purgeable object will behave like a normal |
91447636 A |
5778 | * object and attempts to use this interface will result in an error return |
5779 | * of KERN_INVALID_ARGUMENT. | |
5780 | * | |
2d21ac55 | 5781 | * A purgeable object may be put into a "volatile" state which will make the |
91447636 A |
5782 | * object's pages elligable for being reclaimed without paging to backing |
5783 | * store if the system runs low on memory. If the pages in a volatile | |
2d21ac55 A |
5784 | * purgeable object are reclaimed, the purgeable object is said to have been |
5785 | * "emptied." When a purgeable object is emptied the system will reclaim as | |
91447636 A |
5786 | * many pages from the object as it can in a convenient manner (pages already |
5787 | * en route to backing store or busy for other reasons are left as is). When | |
2d21ac55 | 5788 | * a purgeable object is made volatile, its pages will generally be reclaimed |
91447636 A |
5789 | * before other pages in the application's working set. This semantic is |
5790 | * generally used by applications which can recreate the data in the object | |
5791 | * faster than it can be paged in. One such example might be media assets | |
5792 | * which can be reread from a much faster RAID volume. | |
5793 | * | |
2d21ac55 | 5794 | * A purgeable object may be designated as "non-volatile" which means it will |
91447636 A |
5795 | * behave like all other objects in the system with pages being written to and |
5796 | * read from backing store as needed to satisfy system memory needs. If the | |
5797 | * object was emptied before the object was made non-volatile, that fact will | |
2d21ac55 | 5798 | * be returned as the old state of the purgeable object (see |
91447636 A |
5799 | * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which |
5800 | * were reclaimed as part of emptying the object will be refaulted in as | |
5801 | * zero-fill on demand. It is up to the application to note that an object | |
5802 | * was emptied and recreate the objects contents if necessary. When a | |
2d21ac55 A |
5803 | * purgeable object is made non-volatile, its pages will generally not be paged |
5804 | * out to backing store in the immediate future. A purgeable object may also | |
91447636 A |
5805 | * be manually emptied. |
5806 | * | |
5807 | * Finally, the current state (non-volatile, volatile, volatile & empty) of a | |
2d21ac55 | 5808 | * volatile purgeable object may be queried at any time. This information may |
91447636 A |
5809 | * be used as a control input to let the application know when the system is |
5810 | * experiencing memory pressure and is reclaiming memory. | |
5811 | * | |
2d21ac55 | 5812 | * The specified address may be any address within the purgeable object. If |
91447636 A |
5813 | * the specified address does not represent any object in the target task's |
5814 | * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the | |
2d21ac55 | 5815 | * object containing the specified address is not a purgeable object, then |
91447636 A |
5816 | * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be |
5817 | * returned. | |
5818 | * | |
5819 | * The control parameter may be any one of VM_PURGABLE_SET_STATE or | |
5820 | * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter | |
2d21ac55 A |
5821 | * state is used to set the new state of the purgeable object and return its |
5822 | * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable | |
91447636 A |
5823 | * object is returned in the parameter state. |
5824 | * | |
5825 | * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE, | |
5826 | * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent | |
5827 | * the non-volatile, volatile and volatile/empty states described above. | |
2d21ac55 | 5828 | * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will |
91447636 A |
5829 | * immediately reclaim as many pages in the object as can be conveniently |
5830 | * collected (some may have already been written to backing store or be | |
5831 | * otherwise busy). | |
5832 | * | |
2d21ac55 A |
5833 | * The process of making a purgeable object non-volatile and determining its |
5834 | * previous state is atomic. Thus, if a purgeable object is made | |
91447636 | 5835 | * VM_PURGABLE_NONVOLATILE and the old state is returned as |
2d21ac55 | 5836 | * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are |
91447636 A |
5837 | * completely intact and will remain so until the object is made volatile |
5838 | * again. If the old state is returned as VM_PURGABLE_EMPTY then the object | |
5839 | * was reclaimed while it was in a volatile state and its previous contents | |
5840 | * have been lost. | |
5841 | */ | |
5842 | /* | |
5843 | * The object must be locked. | |
5844 | */ | |
5845 | kern_return_t | |
5846 | vm_object_purgable_control( | |
0a7de745 A |
5847 | vm_object_t object, |
5848 | vm_purgable_t control, | |
5849 | int *state) | |
91447636 | 5850 | { |
0a7de745 A |
5851 | int old_state; |
5852 | int new_state; | |
91447636 A |
5853 | |
5854 | if (object == VM_OBJECT_NULL) { | |
5855 | /* | |
2d21ac55 | 5856 | * Object must already be present or it can't be purgeable. |
91447636 A |
5857 | */ |
5858 | return KERN_INVALID_ARGUMENT; | |
5859 | } | |
5860 | ||
fe8ab488 A |
5861 | vm_object_lock_assert_exclusive(object); |
5862 | ||
91447636 | 5863 | /* |
2d21ac55 | 5864 | * Get current state of the purgeable object. |
91447636 | 5865 | */ |
2d21ac55 | 5866 | old_state = object->purgable; |
0a7de745 | 5867 | if (old_state == VM_PURGABLE_DENY) { |
91447636 | 5868 | return KERN_INVALID_ARGUMENT; |
0a7de745 A |
5869 | } |
5870 | ||
2d21ac55 | 5871 | /* purgeable cant have delayed copies - now or in the future */ |
0a7de745 | 5872 | assert(object->copy == VM_OBJECT_NULL); |
91447636 A |
5873 | assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); |
5874 | ||
5875 | /* | |
5876 | * Execute the desired operation. | |
5877 | */ | |
5878 | if (control == VM_PURGABLE_GET_STATE) { | |
5879 | *state = old_state; | |
5880 | return KERN_SUCCESS; | |
5881 | } | |
5882 | ||
5ba3f43e A |
5883 | if (control == VM_PURGABLE_SET_STATE && |
5884 | object->purgeable_only_by_kernel) { | |
5885 | return KERN_PROTECTION_FAILURE; | |
5886 | } | |
5887 | ||
5888 | if (control != VM_PURGABLE_SET_STATE && | |
5889 | control != VM_PURGABLE_SET_STATE_FROM_KERNEL) { | |
5890 | return KERN_INVALID_ARGUMENT; | |
5891 | } | |
5892 | ||
b0d623f7 A |
5893 | if ((*state) & VM_PURGABLE_DEBUG_EMPTY) { |
5894 | object->volatile_empty = TRUE; | |
5895 | } | |
5896 | if ((*state) & VM_PURGABLE_DEBUG_FAULT) { | |
5897 | object->volatile_fault = TRUE; | |
5898 | } | |
5899 | ||
2d21ac55 | 5900 | new_state = *state & VM_PURGABLE_STATE_MASK; |
813fb2f6 A |
5901 | if (new_state == VM_PURGABLE_VOLATILE) { |
5902 | if (old_state == VM_PURGABLE_EMPTY) { | |
5903 | /* what's been emptied must stay empty */ | |
5904 | new_state = VM_PURGABLE_EMPTY; | |
5905 | } | |
5906 | if (object->volatile_empty) { | |
5907 | /* debugging mode: go straight to empty */ | |
5908 | new_state = VM_PURGABLE_EMPTY; | |
5909 | } | |
b0d623f7 A |
5910 | } |
5911 | ||
2d21ac55 A |
5912 | switch (new_state) { |
5913 | case VM_PURGABLE_DENY: | |
5ba3f43e A |
5914 | /* |
5915 | * Attempting to convert purgeable memory to non-purgeable: | |
5916 | * not allowed. | |
5917 | */ | |
5918 | return KERN_INVALID_ARGUMENT; | |
91447636 | 5919 | case VM_PURGABLE_NONVOLATILE: |
2d21ac55 A |
5920 | object->purgable = new_state; |
5921 | ||
b0d623f7 A |
5922 | if (old_state == VM_PURGABLE_VOLATILE) { |
5923 | unsigned int delta; | |
5924 | ||
5925 | assert(object->resident_page_count >= | |
0a7de745 | 5926 | object->wired_page_count); |
b0d623f7 | 5927 | delta = (object->resident_page_count - |
0a7de745 | 5928 | object->wired_page_count); |
b0d623f7 A |
5929 | |
5930 | assert(vm_page_purgeable_count >= delta); | |
5931 | ||
5932 | if (delta != 0) { | |
5933 | OSAddAtomic(-delta, | |
0a7de745 | 5934 | (SInt32 *)&vm_page_purgeable_count); |
b0d623f7 A |
5935 | } |
5936 | if (object->wired_page_count != 0) { | |
5937 | assert(vm_page_purgeable_wired_count >= | |
0a7de745 | 5938 | object->wired_page_count); |
b0d623f7 | 5939 | OSAddAtomic(-object->wired_page_count, |
0a7de745 | 5940 | (SInt32 *)&vm_page_purgeable_wired_count); |
b0d623f7 A |
5941 | } |
5942 | ||
2d21ac55 | 5943 | vm_page_lock_queues(); |
b0d623f7 | 5944 | |
fe8ab488 A |
5945 | /* object should be on a queue */ |
5946 | assert(object->objq.next != NULL && | |
0a7de745 | 5947 | object->objq.prev != NULL); |
fe8ab488 A |
5948 | purgeable_q_t queue; |
5949 | ||
5950 | /* | |
5951 | * Move object from its volatile queue to the | |
5952 | * non-volatile queue... | |
5953 | */ | |
5954 | queue = vm_purgeable_object_remove(object); | |
b0d623f7 A |
5955 | assert(queue); |
5956 | ||
39236c6e A |
5957 | if (object->purgeable_when_ripe) { |
5958 | vm_purgeable_token_delete_last(queue); | |
5959 | } | |
0a7de745 | 5960 | assert(queue->debug_count_objects >= 0); |
b0d623f7 | 5961 | |
2d21ac55 | 5962 | vm_page_unlock_queues(); |
91447636 | 5963 | } |
fe8ab488 A |
5964 | if (old_state == VM_PURGABLE_VOLATILE || |
5965 | old_state == VM_PURGABLE_EMPTY) { | |
5966 | /* | |
5967 | * Transfer the object's pages from the volatile to | |
5968 | * non-volatile ledgers. | |
5969 | */ | |
d9a64523 | 5970 | vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE); |
fe8ab488 A |
5971 | } |
5972 | ||
91447636 A |
5973 | break; |
5974 | ||
5975 | case VM_PURGABLE_VOLATILE: | |
b0d623f7 | 5976 | if (object->volatile_fault) { |
0a7de745 A |
5977 | vm_page_t p; |
5978 | int refmod; | |
b0d623f7 | 5979 | |
0a7de745 | 5980 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { |
d9a64523 | 5981 | if (p->vmp_busy || |
b0d623f7 | 5982 | VM_PAGE_WIRED(p) || |
d9a64523 | 5983 | p->vmp_fictitious) { |
b0d623f7 A |
5984 | continue; |
5985 | } | |
39037602 | 5986 | refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); |
b0d623f7 | 5987 | if ((refmod & VM_MEM_MODIFIED) && |
d9a64523 | 5988 | !p->vmp_dirty) { |
316670eb | 5989 | SET_PAGE_DIRTY(p, FALSE); |
b0d623f7 A |
5990 | } |
5991 | } | |
5992 | } | |
813fb2f6 A |
5993 | |
5994 | assert(old_state != VM_PURGABLE_EMPTY); | |
b0d623f7 | 5995 | |
2d21ac55 | 5996 | purgeable_q_t queue; |
0a7de745 | 5997 | |
2d21ac55 | 5998 | /* find the correct queue */ |
0a7de745 A |
5999 | if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) { |
6000 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; | |
6001 | } else { | |
6002 | if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) { | |
6003 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; | |
6004 | } else { | |
6005 | queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; | |
6006 | } | |
6007 | } | |
6008 | ||
593a1d5f A |
6009 | if (old_state == VM_PURGABLE_NONVOLATILE || |
6010 | old_state == VM_PURGABLE_EMPTY) { | |
b0d623f7 A |
6011 | unsigned int delta; |
6012 | ||
39236c6e A |
6013 | if ((*state & VM_PURGABLE_NO_AGING_MASK) == |
6014 | VM_PURGABLE_NO_AGING) { | |
6015 | object->purgeable_when_ripe = FALSE; | |
6016 | } else { | |
6017 | object->purgeable_when_ripe = TRUE; | |
6018 | } | |
0a7de745 | 6019 | |
39236c6e A |
6020 | if (object->purgeable_when_ripe) { |
6021 | kern_return_t result; | |
91447636 | 6022 | |
39236c6e A |
6023 | /* try to add token... this can fail */ |
6024 | vm_page_lock_queues(); | |
6025 | ||
6026 | result = vm_purgeable_token_add(queue); | |
6027 | if (result != KERN_SUCCESS) { | |
6028 | vm_page_unlock_queues(); | |
6029 | return result; | |
6030 | } | |
6031 | vm_page_unlock_queues(); | |
91447636 | 6032 | } |
2d21ac55 | 6033 | |
b0d623f7 | 6034 | assert(object->resident_page_count >= |
0a7de745 | 6035 | object->wired_page_count); |
b0d623f7 | 6036 | delta = (object->resident_page_count - |
0a7de745 | 6037 | object->wired_page_count); |
b0d623f7 A |
6038 | |
6039 | if (delta != 0) { | |
6040 | OSAddAtomic(delta, | |
0a7de745 | 6041 | &vm_page_purgeable_count); |
b0d623f7 A |
6042 | } |
6043 | if (object->wired_page_count != 0) { | |
6044 | OSAddAtomic(object->wired_page_count, | |
0a7de745 | 6045 | &vm_page_purgeable_wired_count); |
b0d623f7 A |
6046 | } |
6047 | ||
2d21ac55 A |
6048 | object->purgable = new_state; |
6049 | ||
fe8ab488 A |
6050 | /* object should be on "non-volatile" queue */ |
6051 | assert(object->objq.next != NULL); | |
6052 | assert(object->objq.prev != NULL); | |
0a7de745 A |
6053 | } else if (old_state == VM_PURGABLE_VOLATILE) { |
6054 | purgeable_q_t old_queue; | |
6055 | boolean_t purgeable_when_ripe; | |
39236c6e | 6056 | |
0a7de745 | 6057 | /* |
2d21ac55 A |
6058 | * if reassigning priorities / purgeable groups, we don't change the |
6059 | * token queue. So moving priorities will not make pages stay around longer. | |
6060 | * Reasoning is that the algorithm gives most priority to the most important | |
6061 | * object. If a new token is added, the most important object' priority is boosted. | |
6062 | * This biases the system already for purgeable queues that move a lot. | |
6063 | * It doesn't seem more biasing is neccessary in this case, where no new object is added. | |
6064 | */ | |
0a7de745 A |
6065 | assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */ |
6066 | ||
39236c6e | 6067 | old_queue = vm_purgeable_object_remove(object); |
2d21ac55 | 6068 | assert(old_queue); |
0a7de745 | 6069 | |
39236c6e A |
6070 | if ((*state & VM_PURGABLE_NO_AGING_MASK) == |
6071 | VM_PURGABLE_NO_AGING) { | |
6072 | purgeable_when_ripe = FALSE; | |
6073 | } else { | |
6074 | purgeable_when_ripe = TRUE; | |
6075 | } | |
0a7de745 | 6076 | |
39236c6e A |
6077 | if (old_queue != queue || |
6078 | (purgeable_when_ripe != | |
0a7de745 | 6079 | object->purgeable_when_ripe)) { |
2d21ac55 A |
6080 | kern_return_t result; |
6081 | ||
0a7de745 A |
6082 | /* Changing queue. Have to move token. */ |
6083 | vm_page_lock_queues(); | |
39236c6e A |
6084 | if (object->purgeable_when_ripe) { |
6085 | vm_purgeable_token_delete_last(old_queue); | |
6086 | } | |
6087 | object->purgeable_when_ripe = purgeable_when_ripe; | |
6088 | if (object->purgeable_when_ripe) { | |
6089 | result = vm_purgeable_token_add(queue); | |
0a7de745 | 6090 | assert(result == KERN_SUCCESS); /* this should never fail since we just freed a token */ |
39236c6e | 6091 | } |
2d21ac55 | 6092 | vm_page_unlock_queues(); |
2d21ac55 | 6093 | } |
0a7de745 A |
6094 | } |
6095 | ; | |
6096 | vm_purgeable_object_add(object, queue, (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT ); | |
fe8ab488 | 6097 | if (old_state == VM_PURGABLE_NONVOLATILE) { |
a39ff7e2 | 6098 | vm_purgeable_accounting(object, |
0a7de745 | 6099 | VM_PURGABLE_NONVOLATILE); |
fe8ab488 | 6100 | } |
2d21ac55 | 6101 | |
0a7de745 A |
6102 | assert(queue->debug_count_objects >= 0); |
6103 | ||
91447636 A |
6104 | break; |
6105 | ||
6106 | ||
6107 | case VM_PURGABLE_EMPTY: | |
b0d623f7 | 6108 | if (object->volatile_fault) { |
0a7de745 A |
6109 | vm_page_t p; |
6110 | int refmod; | |
b0d623f7 | 6111 | |
0a7de745 | 6112 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { |
d9a64523 | 6113 | if (p->vmp_busy || |
b0d623f7 | 6114 | VM_PAGE_WIRED(p) || |
d9a64523 | 6115 | p->vmp_fictitious) { |
b0d623f7 A |
6116 | continue; |
6117 | } | |
39037602 | 6118 | refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); |
b0d623f7 | 6119 | if ((refmod & VM_MEM_MODIFIED) && |
d9a64523 | 6120 | !p->vmp_dirty) { |
316670eb | 6121 | SET_PAGE_DIRTY(p, FALSE); |
b0d623f7 | 6122 | } |
2d21ac55 | 6123 | } |
b0d623f7 A |
6124 | } |
6125 | ||
fe8ab488 A |
6126 | if (old_state == VM_PURGABLE_VOLATILE) { |
6127 | purgeable_q_t old_queue; | |
6128 | ||
6129 | /* object should be on a queue */ | |
6130 | assert(object->objq.next != NULL && | |
0a7de745 | 6131 | object->objq.prev != NULL); |
fe8ab488 A |
6132 | |
6133 | old_queue = vm_purgeable_object_remove(object); | |
6134 | assert(old_queue); | |
6135 | if (object->purgeable_when_ripe) { | |
6136 | vm_page_lock_queues(); | |
6137 | vm_purgeable_token_delete_first(old_queue); | |
6138 | vm_page_unlock_queues(); | |
2d21ac55 | 6139 | } |
91447636 | 6140 | } |
91447636 | 6141 | |
fe8ab488 A |
6142 | if (old_state == VM_PURGABLE_NONVOLATILE) { |
6143 | /* | |
6144 | * This object's pages were previously accounted as | |
6145 | * "non-volatile" and now need to be accounted as | |
6146 | * "volatile". | |
6147 | */ | |
a39ff7e2 | 6148 | vm_purgeable_accounting(object, |
0a7de745 | 6149 | VM_PURGABLE_NONVOLATILE); |
fe8ab488 A |
6150 | /* |
6151 | * Set to VM_PURGABLE_EMPTY because the pages are no | |
6152 | * longer accounted in the "non-volatile" ledger | |
6153 | * and are also not accounted for in | |
6154 | * "vm_page_purgeable_count". | |
6155 | */ | |
6156 | object->purgable = VM_PURGABLE_EMPTY; | |
6157 | } | |
6158 | ||
6159 | (void) vm_object_purge(object, 0); | |
6160 | assert(object->purgable == VM_PURGABLE_EMPTY); | |
6161 | ||
6162 | break; | |
91447636 | 6163 | } |
fe8ab488 | 6164 | |
91447636 A |
6165 | *state = old_state; |
6166 | ||
fe8ab488 A |
6167 | vm_object_lock_assert_exclusive(object); |
6168 | ||
91447636 A |
6169 | return KERN_SUCCESS; |
6170 | } | |
0b4e3aa0 | 6171 | |
39236c6e A |
6172 | kern_return_t |
6173 | vm_object_get_page_counts( | |
0a7de745 A |
6174 | vm_object_t object, |
6175 | vm_object_offset_t offset, | |
6176 | vm_object_size_t size, | |
6177 | unsigned int *resident_page_count, | |
6178 | unsigned int *dirty_page_count) | |
39236c6e | 6179 | { |
0a7de745 A |
6180 | kern_return_t kr = KERN_SUCCESS; |
6181 | boolean_t count_dirty_pages = FALSE; | |
6182 | vm_page_t p = VM_PAGE_NULL; | |
6183 | unsigned int local_resident_count = 0; | |
6184 | unsigned int local_dirty_count = 0; | |
6185 | vm_object_offset_t cur_offset = 0; | |
6186 | vm_object_offset_t end_offset = 0; | |
39236c6e | 6187 | |
0a7de745 | 6188 | if (object == VM_OBJECT_NULL) { |
39236c6e | 6189 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 6190 | } |
39236c6e A |
6191 | |
6192 | ||
6193 | cur_offset = offset; | |
0a7de745 | 6194 | |
39236c6e A |
6195 | end_offset = offset + size; |
6196 | ||
6197 | vm_object_lock_assert_exclusive(object); | |
6198 | ||
6199 | if (dirty_page_count != NULL) { | |
39236c6e A |
6200 | count_dirty_pages = TRUE; |
6201 | } | |
6202 | ||
6203 | if (resident_page_count != NULL && count_dirty_pages == FALSE) { | |
6204 | /* | |
6205 | * Fast path when: | |
6206 | * - we only want the resident page count, and, | |
6207 | * - the entire object is exactly covered by the request. | |
6208 | */ | |
6209 | if (offset == 0 && (object->vo_size == size)) { | |
39236c6e A |
6210 | *resident_page_count = object->resident_page_count; |
6211 | goto out; | |
6212 | } | |
6213 | } | |
6214 | ||
6215 | if (object->resident_page_count <= (size >> PAGE_SHIFT)) { | |
0a7de745 | 6216 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { |
d9a64523 | 6217 | if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) { |
39236c6e A |
6218 | local_resident_count++; |
6219 | ||
6220 | if (count_dirty_pages) { | |
d9a64523 | 6221 | if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) { |
39236c6e A |
6222 | local_dirty_count++; |
6223 | } | |
6224 | } | |
6225 | } | |
6226 | } | |
6227 | } else { | |
39236c6e | 6228 | for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) { |
39236c6e | 6229 | p = vm_page_lookup(object, cur_offset); |
39236c6e | 6230 | |
0a7de745 | 6231 | if (p != VM_PAGE_NULL) { |
39236c6e A |
6232 | local_resident_count++; |
6233 | ||
6234 | if (count_dirty_pages) { | |
d9a64523 | 6235 | if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) { |
39236c6e A |
6236 | local_dirty_count++; |
6237 | } | |
6238 | } | |
6239 | } | |
6240 | } | |
39236c6e A |
6241 | } |
6242 | ||
6243 | if (resident_page_count != NULL) { | |
6244 | *resident_page_count = local_resident_count; | |
6245 | } | |
6246 | ||
6247 | if (dirty_page_count != NULL) { | |
6248 | *dirty_page_count = local_dirty_count; | |
6249 | } | |
6250 | ||
6251 | out: | |
6252 | return kr; | |
6253 | } | |
6254 | ||
6255 | ||
0b4e3aa0 A |
6256 | /* |
6257 | * vm_object_reference: | |
6258 | * | |
6259 | * Gets another reference to the given object. | |
6260 | */ | |
6261 | #ifdef vm_object_reference | |
6262 | #undef vm_object_reference | |
6263 | #endif | |
6264 | __private_extern__ void | |
6265 | vm_object_reference( | |
0a7de745 | 6266 | vm_object_t object) |
0b4e3aa0 | 6267 | { |
0a7de745 | 6268 | if (object == VM_OBJECT_NULL) { |
0b4e3aa0 | 6269 | return; |
0a7de745 | 6270 | } |
0b4e3aa0 A |
6271 | |
6272 | vm_object_lock(object); | |
6273 | assert(object->ref_count > 0); | |
6274 | vm_object_reference_locked(object); | |
6275 | vm_object_unlock(object); | |
6276 | } | |
6277 | ||
91447636 A |
6278 | /* |
6279 | * vm_object_transpose | |
6280 | * | |
6281 | * This routine takes two VM objects of the same size and exchanges | |
6282 | * their backing store. | |
6283 | * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE | |
6284 | * and UPL_BLOCK_ACCESS if they are referenced anywhere. | |
6285 | * | |
6286 | * The VM objects must not be locked by caller. | |
6287 | */ | |
b0d623f7 | 6288 | unsigned int vm_object_transpose_count = 0; |
91447636 A |
6289 | kern_return_t |
6290 | vm_object_transpose( | |
0a7de745 A |
6291 | vm_object_t object1, |
6292 | vm_object_t object2, | |
6293 | vm_object_size_t transpose_size) | |
91447636 | 6294 | { |
0a7de745 A |
6295 | vm_object_t tmp_object; |
6296 | kern_return_t retval; | |
6297 | boolean_t object1_locked, object2_locked; | |
6298 | vm_page_t page; | |
6299 | vm_object_offset_t page_offset; | |
91447636 A |
6300 | |
6301 | tmp_object = VM_OBJECT_NULL; | |
6302 | object1_locked = FALSE; object2_locked = FALSE; | |
91447636 A |
6303 | |
6304 | if (object1 == object2 || | |
6305 | object1 == VM_OBJECT_NULL || | |
6306 | object2 == VM_OBJECT_NULL) { | |
6307 | /* | |
6308 | * If the 2 VM objects are the same, there's | |
6309 | * no point in exchanging their backing store. | |
6310 | */ | |
6311 | retval = KERN_INVALID_VALUE; | |
6312 | goto done; | |
6313 | } | |
6314 | ||
b0d623f7 A |
6315 | /* |
6316 | * Since we need to lock both objects at the same time, | |
6317 | * make sure we always lock them in the same order to | |
6318 | * avoid deadlocks. | |
6319 | */ | |
0a7de745 | 6320 | if (object1 > object2) { |
b0d623f7 A |
6321 | tmp_object = object1; |
6322 | object1 = object2; | |
6323 | object2 = tmp_object; | |
6324 | } | |
6325 | ||
6326 | /* | |
6327 | * Allocate a temporary VM object to hold object1's contents | |
6328 | * while we copy object2 to object1. | |
6329 | */ | |
6330 | tmp_object = vm_object_allocate(transpose_size); | |
6331 | vm_object_lock(tmp_object); | |
6332 | tmp_object->can_persist = FALSE; | |
6333 | ||
6334 | ||
6335 | /* | |
6336 | * Grab control of the 1st VM object. | |
6337 | */ | |
91447636 A |
6338 | vm_object_lock(object1); |
6339 | object1_locked = TRUE; | |
2d21ac55 A |
6340 | if (!object1->alive || object1->terminating || |
6341 | object1->copy || object1->shadow || object1->shadowed || | |
6342 | object1->purgable != VM_PURGABLE_DENY) { | |
91447636 A |
6343 | /* |
6344 | * We don't deal with copy or shadow objects (yet). | |
6345 | */ | |
6346 | retval = KERN_INVALID_VALUE; | |
6347 | goto done; | |
6348 | } | |
6349 | /* | |
0a7de745 | 6350 | * We're about to mess with the object's backing store and |
b0d623f7 | 6351 | * taking a "paging_in_progress" reference wouldn't be enough |
91447636 A |
6352 | * to prevent any paging activity on this object, so the caller should |
6353 | * have "quiesced" the objects beforehand, via a UPL operation with | |
6354 | * UPL_SET_IO_WIRE (to make sure all the pages are there and wired) | |
6355 | * and UPL_BLOCK_ACCESS (to mark the pages "busy"). | |
0a7de745 A |
6356 | * |
6357 | * Wait for any paging operation to complete (but only paging, not | |
b0d623f7 A |
6358 | * other kind of activities not linked to the pager). After we're |
6359 | * statisfied that there's no more paging in progress, we keep the | |
6360 | * object locked, to guarantee that no one tries to access its pager. | |
91447636 | 6361 | */ |
b0d623f7 | 6362 | vm_object_paging_only_wait(object1, THREAD_UNINT); |
91447636 A |
6363 | |
6364 | /* | |
6365 | * Same as above for the 2nd object... | |
6366 | */ | |
6367 | vm_object_lock(object2); | |
6368 | object2_locked = TRUE; | |
0a7de745 | 6369 | if (!object2->alive || object2->terminating || |
2d21ac55 A |
6370 | object2->copy || object2->shadow || object2->shadowed || |
6371 | object2->purgable != VM_PURGABLE_DENY) { | |
91447636 A |
6372 | retval = KERN_INVALID_VALUE; |
6373 | goto done; | |
6374 | } | |
b0d623f7 | 6375 | vm_object_paging_only_wait(object2, THREAD_UNINT); |
91447636 | 6376 | |
91447636 | 6377 | |
6d2010ae A |
6378 | if (object1->vo_size != object2->vo_size || |
6379 | object1->vo_size != transpose_size) { | |
91447636 A |
6380 | /* |
6381 | * If the 2 objects don't have the same size, we can't | |
6382 | * exchange their backing stores or one would overflow. | |
6383 | * If their size doesn't match the caller's | |
6384 | * "transpose_size", we can't do it either because the | |
0a7de745 | 6385 | * transpose operation will affect the entire span of |
91447636 A |
6386 | * the objects. |
6387 | */ | |
6388 | retval = KERN_INVALID_VALUE; | |
6389 | goto done; | |
6390 | } | |
6391 | ||
6392 | ||
6393 | /* | |
6394 | * Transpose the lists of resident pages. | |
2d21ac55 | 6395 | * This also updates the resident_page_count and the memq_hint. |
91447636 | 6396 | */ |
39037602 | 6397 | if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) { |
91447636 A |
6398 | /* |
6399 | * No pages in object1, just transfer pages | |
6400 | * from object2 to object1. No need to go through | |
6401 | * an intermediate object. | |
6402 | */ | |
39037602 A |
6403 | while (!vm_page_queue_empty(&object2->memq)) { |
6404 | page = (vm_page_t) vm_page_queue_first(&object2->memq); | |
d9a64523 | 6405 | vm_page_rename(page, object1, page->vmp_offset); |
91447636 | 6406 | } |
39037602 A |
6407 | assert(vm_page_queue_empty(&object2->memq)); |
6408 | } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) { | |
91447636 A |
6409 | /* |
6410 | * No pages in object2, just transfer pages | |
6411 | * from object1 to object2. No need to go through | |
6412 | * an intermediate object. | |
6413 | */ | |
39037602 A |
6414 | while (!vm_page_queue_empty(&object1->memq)) { |
6415 | page = (vm_page_t) vm_page_queue_first(&object1->memq); | |
d9a64523 | 6416 | vm_page_rename(page, object2, page->vmp_offset); |
91447636 | 6417 | } |
39037602 | 6418 | assert(vm_page_queue_empty(&object1->memq)); |
91447636 A |
6419 | } else { |
6420 | /* transfer object1's pages to tmp_object */ | |
39037602 A |
6421 | while (!vm_page_queue_empty(&object1->memq)) { |
6422 | page = (vm_page_t) vm_page_queue_first(&object1->memq); | |
d9a64523 | 6423 | page_offset = page->vmp_offset; |
b0d623f7 | 6424 | vm_page_remove(page, TRUE); |
d9a64523 | 6425 | page->vmp_offset = page_offset; |
0a7de745 | 6426 | vm_page_queue_enter(&tmp_object->memq, page, vmp_listq); |
91447636 | 6427 | } |
39037602 | 6428 | assert(vm_page_queue_empty(&object1->memq)); |
91447636 | 6429 | /* transfer object2's pages to object1 */ |
39037602 A |
6430 | while (!vm_page_queue_empty(&object2->memq)) { |
6431 | page = (vm_page_t) vm_page_queue_first(&object2->memq); | |
d9a64523 | 6432 | vm_page_rename(page, object1, page->vmp_offset); |
91447636 | 6433 | } |
39037602 | 6434 | assert(vm_page_queue_empty(&object2->memq)); |
3e170ce0 | 6435 | /* transfer tmp_object's pages to object2 */ |
39037602 A |
6436 | while (!vm_page_queue_empty(&tmp_object->memq)) { |
6437 | page = (vm_page_t) vm_page_queue_first(&tmp_object->memq); | |
0a7de745 | 6438 | vm_page_queue_remove(&tmp_object->memq, page, vmp_listq); |
d9a64523 | 6439 | vm_page_insert(page, object2, page->vmp_offset); |
91447636 | 6440 | } |
39037602 | 6441 | assert(vm_page_queue_empty(&tmp_object->memq)); |
91447636 A |
6442 | } |
6443 | ||
0a7de745 A |
6444 | #define __TRANSPOSE_FIELD(field) \ |
6445 | MACRO_BEGIN \ | |
6446 | tmp_object->field = object1->field; \ | |
6447 | object1->field = object2->field; \ | |
6448 | object2->field = tmp_object->field; \ | |
91447636 A |
6449 | MACRO_END |
6450 | ||
b0d623f7 | 6451 | /* "Lock" refers to the object not its contents */ |
2d21ac55 | 6452 | /* "size" should be identical */ |
6d2010ae | 6453 | assert(object1->vo_size == object2->vo_size); |
b0d623f7 | 6454 | /* "memq_hint" was updated above when transposing pages */ |
2d21ac55 | 6455 | /* "ref_count" refers to the object not its contents */ |
5ba3f43e A |
6456 | assert(object1->ref_count >= 1); |
6457 | assert(object2->ref_count >= 1); | |
2d21ac55 | 6458 | /* "resident_page_count" was updated above when transposing pages */ |
b0d623f7 | 6459 | /* "wired_page_count" was updated above when transposing pages */ |
0a7de745 | 6460 | #if !VM_TAG_ACTIVE_UPDATE |
d9a64523 A |
6461 | /* "wired_objq" was dealt with along with "wired_page_count" */ |
6462 | #endif /* ! VM_TAG_ACTIVE_UPDATE */ | |
b0d623f7 | 6463 | /* "reusable_page_count" was updated above when transposing pages */ |
2d21ac55 | 6464 | /* there should be no "copy" */ |
91447636 A |
6465 | assert(!object1->copy); |
6466 | assert(!object2->copy); | |
2d21ac55 | 6467 | /* there should be no "shadow" */ |
91447636 A |
6468 | assert(!object1->shadow); |
6469 | assert(!object2->shadow); | |
6d2010ae | 6470 | __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */ |
91447636 A |
6471 | __TRANSPOSE_FIELD(pager); |
6472 | __TRANSPOSE_FIELD(paging_offset); | |
91447636 A |
6473 | __TRANSPOSE_FIELD(pager_control); |
6474 | /* update the memory_objects' pointers back to the VM objects */ | |
6475 | if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) { | |
c3c9b80d | 6476 | memory_object_control_collapse(&object1->pager_control, |
0a7de745 | 6477 | object1); |
91447636 A |
6478 | } |
6479 | if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) { | |
c3c9b80d | 6480 | memory_object_control_collapse(&object2->pager_control, |
0a7de745 | 6481 | object2); |
91447636 | 6482 | } |
2d21ac55 A |
6483 | __TRANSPOSE_FIELD(copy_strategy); |
6484 | /* "paging_in_progress" refers to the object not its contents */ | |
b0d623f7 A |
6485 | assert(!object1->paging_in_progress); |
6486 | assert(!object2->paging_in_progress); | |
6487 | assert(object1->activity_in_progress); | |
6488 | assert(object2->activity_in_progress); | |
2d21ac55 | 6489 | /* "all_wanted" refers to the object not its contents */ |
91447636 A |
6490 | __TRANSPOSE_FIELD(pager_created); |
6491 | __TRANSPOSE_FIELD(pager_initialized); | |
6492 | __TRANSPOSE_FIELD(pager_ready); | |
6493 | __TRANSPOSE_FIELD(pager_trusted); | |
2d21ac55 | 6494 | __TRANSPOSE_FIELD(can_persist); |
91447636 | 6495 | __TRANSPOSE_FIELD(internal); |
91447636 A |
6496 | __TRANSPOSE_FIELD(private); |
6497 | __TRANSPOSE_FIELD(pageout); | |
2d21ac55 A |
6498 | /* "alive" should be set */ |
6499 | assert(object1->alive); | |
6500 | assert(object2->alive); | |
6501 | /* "purgeable" should be non-purgeable */ | |
6502 | assert(object1->purgable == VM_PURGABLE_DENY); | |
6503 | assert(object2->purgable == VM_PURGABLE_DENY); | |
6504 | /* "shadowed" refers to the the object not its contents */ | |
39236c6e | 6505 | __TRANSPOSE_FIELD(purgeable_when_ripe); |
91447636 | 6506 | __TRANSPOSE_FIELD(true_share); |
2d21ac55 A |
6507 | /* "terminating" should not be set */ |
6508 | assert(!object1->terminating); | |
6509 | assert(!object2->terminating); | |
5ba3f43e A |
6510 | /* transfer "named" reference if needed */ |
6511 | if (object1->named && !object2->named) { | |
6512 | assert(object1->ref_count >= 2); | |
6513 | assert(object2->ref_count >= 1); | |
6514 | object1->ref_count--; | |
6515 | object2->ref_count++; | |
6516 | } else if (!object1->named && object2->named) { | |
6517 | assert(object1->ref_count >= 1); | |
6518 | assert(object2->ref_count >= 2); | |
6519 | object1->ref_count++; | |
6520 | object2->ref_count--; | |
6521 | } | |
2d21ac55 A |
6522 | __TRANSPOSE_FIELD(named); |
6523 | /* "shadow_severed" refers to the object not its contents */ | |
91447636 A |
6524 | __TRANSPOSE_FIELD(phys_contiguous); |
6525 | __TRANSPOSE_FIELD(nophyscache); | |
b0d623f7 A |
6526 | /* "cached_list.next" points to transposed object */ |
6527 | object1->cached_list.next = (queue_entry_t) object2; | |
6528 | object2->cached_list.next = (queue_entry_t) object1; | |
6529 | /* "cached_list.prev" should be NULL */ | |
2d21ac55 | 6530 | assert(object1->cached_list.prev == NULL); |
2d21ac55 | 6531 | assert(object2->cached_list.prev == NULL); |
91447636 A |
6532 | __TRANSPOSE_FIELD(last_alloc); |
6533 | __TRANSPOSE_FIELD(sequential); | |
2d21ac55 A |
6534 | __TRANSPOSE_FIELD(pages_created); |
6535 | __TRANSPOSE_FIELD(pages_used); | |
6d2010ae | 6536 | __TRANSPOSE_FIELD(scan_collisions); |
91447636 A |
6537 | __TRANSPOSE_FIELD(cow_hint); |
6538 | __TRANSPOSE_FIELD(wimg_bits); | |
6d2010ae | 6539 | __TRANSPOSE_FIELD(set_cache_attr); |
2d21ac55 | 6540 | __TRANSPOSE_FIELD(code_signed); |
b0d623f7 A |
6541 | object1->transposed = TRUE; |
6542 | object2->transposed = TRUE; | |
6543 | __TRANSPOSE_FIELD(mapping_in_progress); | |
6544 | __TRANSPOSE_FIELD(volatile_empty); | |
6545 | __TRANSPOSE_FIELD(volatile_fault); | |
6546 | __TRANSPOSE_FIELD(all_reusable); | |
6547 | assert(object1->blocked_access); | |
6548 | assert(object2->blocked_access); | |
d9a64523 A |
6549 | __TRANSPOSE_FIELD(set_cache_attr); |
6550 | assert(!object1->object_is_shared_cache); | |
6551 | assert(!object2->object_is_shared_cache); | |
6552 | /* ignore purgeable_queue_type and purgeable_queue_group */ | |
6553 | assert(!object1->io_tracking); | |
6554 | assert(!object2->io_tracking); | |
6555 | #if VM_OBJECT_ACCESS_TRACKING | |
6556 | assert(!object1->access_tracking); | |
6557 | assert(!object2->access_tracking); | |
6558 | #endif /* VM_OBJECT_ACCESS_TRACKING */ | |
6559 | __TRANSPOSE_FIELD(no_tag_update); | |
6560 | #if CONFIG_SECLUDED_MEMORY | |
6561 | assert(!object1->eligible_for_secluded); | |
6562 | assert(!object2->eligible_for_secluded); | |
6563 | assert(!object1->can_grab_secluded); | |
6564 | assert(!object2->can_grab_secluded); | |
6565 | #else /* CONFIG_SECLUDED_MEMORY */ | |
6566 | assert(object1->__object3_unused_bits == 0); | |
6567 | assert(object2->__object3_unused_bits == 0); | |
6568 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
b0d623f7 | 6569 | #if UPL_DEBUG |
2d21ac55 A |
6570 | /* "uplq" refers to the object not its contents (see upl_transpose()) */ |
6571 | #endif | |
3e170ce0 A |
6572 | assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL)); |
6573 | assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL)); | |
6574 | assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL)); | |
6575 | assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL)); | |
91447636 A |
6576 | |
6577 | #undef __TRANSPOSE_FIELD | |
6578 | ||
6579 | retval = KERN_SUCCESS; | |
6580 | ||
6581 | done: | |
6582 | /* | |
6583 | * Cleanup. | |
6584 | */ | |
6585 | if (tmp_object != VM_OBJECT_NULL) { | |
91447636 A |
6586 | vm_object_unlock(tmp_object); |
6587 | /* | |
6588 | * Re-initialize the temporary object to avoid | |
6589 | * deallocating a real pager. | |
6590 | */ | |
6591 | _vm_object_allocate(transpose_size, tmp_object); | |
6592 | vm_object_deallocate(tmp_object); | |
6593 | tmp_object = VM_OBJECT_NULL; | |
6594 | } | |
6595 | ||
6596 | if (object1_locked) { | |
6597 | vm_object_unlock(object1); | |
6598 | object1_locked = FALSE; | |
6599 | } | |
6600 | if (object2_locked) { | |
6601 | vm_object_unlock(object2); | |
6602 | object2_locked = FALSE; | |
6603 | } | |
b0d623f7 A |
6604 | |
6605 | vm_object_transpose_count++; | |
91447636 A |
6606 | |
6607 | return retval; | |
6608 | } | |
0c530ab8 A |
6609 | |
6610 | ||
2d21ac55 | 6611 | /* |
b0d623f7 | 6612 | * vm_object_cluster_size |
2d21ac55 A |
6613 | * |
6614 | * Determine how big a cluster we should issue an I/O for... | |
6615 | * | |
6616 | * Inputs: *start == offset of page needed | |
6617 | * *length == maximum cluster pager can handle | |
6618 | * Outputs: *start == beginning offset of cluster | |
6619 | * *length == length of cluster to try | |
6620 | * | |
6621 | * The original *start will be encompassed by the cluster | |
6622 | * | |
6623 | */ | |
6624 | extern int speculative_reads_disabled; | |
6d2010ae | 6625 | |
39037602 A |
6626 | /* |
6627 | * Try to always keep these values an even multiple of PAGE_SIZE. We use these values | |
6628 | * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to | |
6629 | * always be page-aligned. The derivation could involve operations (e.g. division) | |
6630 | * that could give us non-page-size aligned values if we start out with values that | |
6631 | * are odd multiples of PAGE_SIZE. | |
6632 | */ | |
c3c9b80d | 6633 | #if !XNU_TARGET_OS_OSX |
0a7de745 | 6634 | unsigned int preheat_max_bytes = (1024 * 512); |
c3c9b80d | 6635 | #else /* !XNU_TARGET_OS_OSX */ |
0a7de745 | 6636 | unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES; |
c3c9b80d | 6637 | #endif /* !XNU_TARGET_OS_OSX */ |
fe8ab488 | 6638 | unsigned int preheat_min_bytes = (1024 * 32); |
2d21ac55 | 6639 | |
2d21ac55 A |
6640 | |
6641 | __private_extern__ void | |
6642 | vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, | |
0a7de745 | 6643 | vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming) |
2d21ac55 | 6644 | { |
0a7de745 A |
6645 | vm_size_t pre_heat_size; |
6646 | vm_size_t tail_size; | |
6647 | vm_size_t head_size; | |
6648 | vm_size_t max_length; | |
6649 | vm_size_t cluster_size; | |
6650 | vm_object_offset_t object_size; | |
6651 | vm_object_offset_t orig_start; | |
6652 | vm_object_offset_t target_start; | |
6653 | vm_object_offset_t offset; | |
6654 | vm_behavior_t behavior; | |
6655 | boolean_t look_behind = TRUE; | |
6656 | boolean_t look_ahead = TRUE; | |
6657 | boolean_t isSSD = FALSE; | |
6658 | uint32_t throttle_limit; | |
6659 | int sequential_run; | |
6660 | int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; | |
6661 | vm_size_t max_ph_size; | |
6662 | vm_size_t min_ph_size; | |
2d21ac55 A |
6663 | |
6664 | assert( !(*length & PAGE_MASK)); | |
6665 | assert( !(*start & PAGE_MASK_64)); | |
6666 | ||
6d2010ae A |
6667 | /* |
6668 | * remember maxiumum length of run requested | |
6669 | */ | |
6670 | max_length = *length; | |
2d21ac55 A |
6671 | /* |
6672 | * we'll always return a cluster size of at least | |
6673 | * 1 page, since the original fault must always | |
6674 | * be processed | |
6675 | */ | |
6676 | *length = PAGE_SIZE; | |
b0d623f7 | 6677 | *io_streaming = 0; |
2d21ac55 | 6678 | |
6d2010ae | 6679 | if (speculative_reads_disabled || fault_info == NULL) { |
0a7de745 | 6680 | /* |
2d21ac55 A |
6681 | * no cluster... just fault the page in |
6682 | */ | |
0a7de745 | 6683 | return; |
2d21ac55 A |
6684 | } |
6685 | orig_start = *start; | |
6686 | target_start = orig_start; | |
b0d623f7 | 6687 | cluster_size = round_page(fault_info->cluster_size); |
2d21ac55 A |
6688 | behavior = fault_info->behavior; |
6689 | ||
6690 | vm_object_lock(object); | |
6691 | ||
0a7de745 A |
6692 | if (object->pager == MEMORY_OBJECT_NULL) { |
6693 | goto out; /* pager is gone for this object, nothing more to do */ | |
6694 | } | |
5ba3f43e | 6695 | vnode_pager_get_isSSD(object->pager, &isSSD); |
6d2010ae | 6696 | |
fe8ab488 A |
6697 | min_ph_size = round_page(preheat_min_bytes); |
6698 | max_ph_size = round_page(preheat_max_bytes); | |
6d2010ae | 6699 | |
c3c9b80d | 6700 | #if XNU_TARGET_OS_OSX |
6d2010ae A |
6701 | if (isSSD) { |
6702 | min_ph_size /= 2; | |
6703 | max_ph_size /= 8; | |
39037602 A |
6704 | |
6705 | if (min_ph_size & PAGE_MASK_64) { | |
6706 | min_ph_size = trunc_page(min_ph_size); | |
6707 | } | |
6708 | ||
6709 | if (max_ph_size & PAGE_MASK_64) { | |
6710 | max_ph_size = trunc_page(max_ph_size); | |
6711 | } | |
6d2010ae | 6712 | } |
c3c9b80d | 6713 | #endif /* XNU_TARGET_OS_OSX */ |
39037602 | 6714 | |
0a7de745 | 6715 | if (min_ph_size < PAGE_SIZE) { |
fe8ab488 | 6716 | min_ph_size = PAGE_SIZE; |
0a7de745 | 6717 | } |
6d2010ae | 6718 | |
0a7de745 | 6719 | if (max_ph_size < PAGE_SIZE) { |
fe8ab488 | 6720 | max_ph_size = PAGE_SIZE; |
0a7de745 | 6721 | } else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) { |
fe8ab488 | 6722 | max_ph_size = MAX_UPL_TRANSFER_BYTES; |
0a7de745 | 6723 | } |
6d2010ae | 6724 | |
0a7de745 A |
6725 | if (max_length > max_ph_size) { |
6726 | max_length = max_ph_size; | |
6727 | } | |
6d2010ae | 6728 | |
0a7de745 | 6729 | if (max_length <= PAGE_SIZE) { |
6d2010ae | 6730 | goto out; |
0a7de745 | 6731 | } |
6d2010ae | 6732 | |
0a7de745 A |
6733 | if (object->internal) { |
6734 | object_size = object->vo_size; | |
6735 | } else { | |
6736 | vnode_pager_get_object_size(object->pager, &object_size); | |
6737 | } | |
2d21ac55 A |
6738 | |
6739 | object_size = round_page_64(object_size); | |
6740 | ||
6741 | if (orig_start >= object_size) { | |
0a7de745 | 6742 | /* |
2d21ac55 A |
6743 | * fault occurred beyond the EOF... |
6744 | * we need to punt w/o changing the | |
6745 | * starting offset | |
6746 | */ | |
0a7de745 | 6747 | goto out; |
2d21ac55 A |
6748 | } |
6749 | if (object->pages_used > object->pages_created) { | |
0a7de745 | 6750 | /* |
2d21ac55 A |
6751 | * must have wrapped our 32 bit counters |
6752 | * so reset | |
6753 | */ | |
0a7de745 | 6754 | object->pages_used = object->pages_created = 0; |
2d21ac55 A |
6755 | } |
6756 | if ((sequential_run = object->sequential)) { | |
0a7de745 A |
6757 | if (sequential_run < 0) { |
6758 | sequential_behavior = VM_BEHAVIOR_RSEQNTL; | |
6759 | sequential_run = 0 - sequential_run; | |
6760 | } else { | |
6761 | sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; | |
6762 | } | |
2d21ac55 | 6763 | } |
6d2010ae | 6764 | switch (behavior) { |
2d21ac55 | 6765 | default: |
0a7de745 | 6766 | behavior = VM_BEHAVIOR_DEFAULT; |
f427ee49 | 6767 | OS_FALLTHROUGH; |
2d21ac55 A |
6768 | |
6769 | case VM_BEHAVIOR_DEFAULT: | |
0a7de745 A |
6770 | if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) { |
6771 | goto out; | |
6772 | } | |
2d21ac55 | 6773 | |
b0d623f7 | 6774 | if (sequential_run >= (3 * PAGE_SIZE)) { |
0a7de745 | 6775 | pre_heat_size = sequential_run + PAGE_SIZE; |
2d21ac55 | 6776 | |
0a7de745 A |
6777 | if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) { |
6778 | look_behind = FALSE; | |
6779 | } else { | |
6780 | look_ahead = FALSE; | |
6781 | } | |
b0d623f7 A |
6782 | |
6783 | *io_streaming = 1; | |
2d21ac55 | 6784 | } else { |
fe8ab488 | 6785 | if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) { |
0a7de745 | 6786 | /* |
2d21ac55 A |
6787 | * prime the pump |
6788 | */ | |
0a7de745 | 6789 | pre_heat_size = min_ph_size; |
6d2010ae A |
6790 | } else { |
6791 | /* | |
6792 | * Linear growth in PH size: The maximum size is max_length... | |
0a7de745 | 6793 | * this cacluation will result in a size that is neither a |
6d2010ae A |
6794 | * power of 2 nor a multiple of PAGE_SIZE... so round |
6795 | * it up to the nearest PAGE_SIZE boundary | |
6796 | */ | |
3e170ce0 | 6797 | pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created; |
fe8ab488 | 6798 | |
0a7de745 | 6799 | if (pre_heat_size < min_ph_size) { |
fe8ab488 | 6800 | pre_heat_size = min_ph_size; |
0a7de745 | 6801 | } else { |
6d2010ae | 6802 | pre_heat_size = round_page(pre_heat_size); |
0a7de745 | 6803 | } |
2d21ac55 | 6804 | } |
2d21ac55 A |
6805 | } |
6806 | break; | |
6807 | ||
6808 | case VM_BEHAVIOR_RANDOM: | |
0a7de745 A |
6809 | if ((pre_heat_size = cluster_size) <= PAGE_SIZE) { |
6810 | goto out; | |
6811 | } | |
6812 | break; | |
2d21ac55 A |
6813 | |
6814 | case VM_BEHAVIOR_SEQUENTIAL: | |
0a7de745 A |
6815 | if ((pre_heat_size = cluster_size) == 0) { |
6816 | pre_heat_size = sequential_run + PAGE_SIZE; | |
6817 | } | |
2d21ac55 | 6818 | look_behind = FALSE; |
b0d623f7 | 6819 | *io_streaming = 1; |
2d21ac55 | 6820 | |
0a7de745 | 6821 | break; |
2d21ac55 A |
6822 | |
6823 | case VM_BEHAVIOR_RSEQNTL: | |
0a7de745 A |
6824 | if ((pre_heat_size = cluster_size) == 0) { |
6825 | pre_heat_size = sequential_run + PAGE_SIZE; | |
6826 | } | |
2d21ac55 | 6827 | look_ahead = FALSE; |
b0d623f7 | 6828 | *io_streaming = 1; |
2d21ac55 | 6829 | |
0a7de745 | 6830 | break; |
2d21ac55 | 6831 | } |
b0d623f7 A |
6832 | throttle_limit = (uint32_t) max_length; |
6833 | assert(throttle_limit == max_length); | |
6834 | ||
39236c6e | 6835 | if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) { |
0a7de745 | 6836 | if (max_length > throttle_limit) { |
b0d623f7 | 6837 | max_length = throttle_limit; |
0a7de745 A |
6838 | } |
6839 | } | |
6840 | if (pre_heat_size > max_length) { | |
6841 | pre_heat_size = max_length; | |
b0d623f7 | 6842 | } |
2d21ac55 | 6843 | |
fe8ab488 | 6844 | if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) { |
316670eb | 6845 | unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count; |
0a7de745 | 6846 | |
316670eb | 6847 | if (consider_free < vm_page_throttle_limit) { |
6d2010ae | 6848 | pre_heat_size = trunc_page(pre_heat_size / 16); |
316670eb | 6849 | } else if (consider_free < vm_page_free_target) { |
6d2010ae | 6850 | pre_heat_size = trunc_page(pre_heat_size / 4); |
316670eb | 6851 | } |
0a7de745 A |
6852 | |
6853 | if (pre_heat_size < min_ph_size) { | |
fe8ab488 | 6854 | pre_heat_size = min_ph_size; |
0a7de745 | 6855 | } |
b0d623f7 | 6856 | } |
2d21ac55 | 6857 | if (look_ahead == TRUE) { |
0a7de745 | 6858 | if (look_behind == TRUE) { |
b0d623f7 | 6859 | /* |
0a7de745 | 6860 | * if we get here its due to a random access... |
b0d623f7 A |
6861 | * so we want to center the original fault address |
6862 | * within the cluster we will issue... make sure | |
6863 | * to calculate 'head_size' as a multiple of PAGE_SIZE... | |
6864 | * 'pre_heat_size' is a multiple of PAGE_SIZE but not | |
6865 | * necessarily an even number of pages so we need to truncate | |
6866 | * the result to a PAGE_SIZE boundary | |
6867 | */ | |
6868 | head_size = trunc_page(pre_heat_size / 2); | |
2d21ac55 | 6869 | |
0a7de745 | 6870 | if (target_start > head_size) { |
b0d623f7 | 6871 | target_start -= head_size; |
0a7de745 | 6872 | } else { |
b0d623f7 | 6873 | target_start = 0; |
0a7de745 | 6874 | } |
2d21ac55 | 6875 | |
b0d623f7 A |
6876 | /* |
6877 | * 'target_start' at this point represents the beginning offset | |
6878 | * of the cluster we are considering... 'orig_start' will be in | |
6879 | * the center of this cluster if we didn't have to clip the start | |
6880 | * due to running into the start of the file | |
6881 | */ | |
6882 | } | |
0a7de745 A |
6883 | if ((target_start + pre_heat_size) > object_size) { |
6884 | pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start)); | |
6885 | } | |
b0d623f7 A |
6886 | /* |
6887 | * at this point caclulate the number of pages beyond the original fault | |
6888 | * address that we want to consider... this is guaranteed not to extend beyond | |
6889 | * the current EOF... | |
6890 | */ | |
6891 | assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start)); | |
0a7de745 | 6892 | tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE; |
2d21ac55 | 6893 | } else { |
0a7de745 | 6894 | if (pre_heat_size > target_start) { |
6d2010ae A |
6895 | /* |
6896 | * since pre_heat_size is always smaller then 2^32, | |
6897 | * if it is larger then target_start (a 64 bit value) | |
6898 | * it is safe to clip target_start to 32 bits | |
6899 | */ | |
0a7de745 | 6900 | pre_heat_size = (vm_size_t) target_start; |
6d2010ae | 6901 | } |
2d21ac55 A |
6902 | tail_size = 0; |
6903 | } | |
b0d623f7 | 6904 | assert( !(target_start & PAGE_MASK_64)); |
39037602 | 6905 | assert( !(pre_heat_size & PAGE_MASK_64)); |
b0d623f7 | 6906 | |
0a7de745 A |
6907 | if (pre_heat_size <= PAGE_SIZE) { |
6908 | goto out; | |
6909 | } | |
2d21ac55 A |
6910 | |
6911 | if (look_behind == TRUE) { | |
0a7de745 | 6912 | /* |
2d21ac55 | 6913 | * take a look at the pages before the original |
b0d623f7 | 6914 | * faulting offset... recalculate this in case |
0a7de745 | 6915 | * we had to clip 'pre_heat_size' above to keep |
b0d623f7 | 6916 | * from running past the EOF. |
2d21ac55 | 6917 | */ |
0a7de745 | 6918 | head_size = pre_heat_size - tail_size - PAGE_SIZE; |
2d21ac55 | 6919 | |
0a7de745 A |
6920 | for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) { |
6921 | /* | |
6922 | * don't poke below the lowest offset | |
2d21ac55 | 6923 | */ |
0a7de745 A |
6924 | if (offset < fault_info->lo_offset) { |
6925 | break; | |
6926 | } | |
39037602 A |
6927 | /* |
6928 | * for external objects or internal objects w/o a pager, | |
6929 | * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN | |
2d21ac55 | 6930 | */ |
39037602 | 6931 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) { |
39236c6e A |
6932 | break; |
6933 | } | |
2d21ac55 | 6934 | if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { |
0a7de745 | 6935 | /* |
2d21ac55 A |
6936 | * don't bridge resident pages |
6937 | */ | |
0a7de745 | 6938 | break; |
2d21ac55 A |
6939 | } |
6940 | *start = offset; | |
6941 | *length += PAGE_SIZE; | |
6942 | } | |
6943 | } | |
6944 | if (look_ahead == TRUE) { | |
0a7de745 A |
6945 | for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) { |
6946 | /* | |
6947 | * don't poke above the highest offset | |
2d21ac55 | 6948 | */ |
0a7de745 A |
6949 | if (offset >= fault_info->hi_offset) { |
6950 | break; | |
6951 | } | |
b0d623f7 A |
6952 | assert(offset < object_size); |
6953 | ||
39037602 A |
6954 | /* |
6955 | * for external objects or internal objects w/o a pager, | |
6956 | * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN | |
2d21ac55 | 6957 | */ |
fe8ab488 | 6958 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) { |
39236c6e A |
6959 | break; |
6960 | } | |
2d21ac55 | 6961 | if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { |
0a7de745 | 6962 | /* |
2d21ac55 A |
6963 | * don't bridge resident pages |
6964 | */ | |
0a7de745 | 6965 | break; |
2d21ac55 A |
6966 | } |
6967 | *length += PAGE_SIZE; | |
6968 | } | |
6969 | } | |
6970 | out: | |
0a7de745 | 6971 | if (*length > max_length) { |
b0d623f7 | 6972 | *length = max_length; |
0a7de745 | 6973 | } |
b0d623f7 | 6974 | |
2d21ac55 | 6975 | vm_object_unlock(object); |
0a7de745 | 6976 | |
316670eb | 6977 | DTRACE_VM1(clustersize, vm_size_t, *length); |
2d21ac55 A |
6978 | } |
6979 | ||
6980 | ||
6981 | /* | |
6982 | * Allow manipulation of individual page state. This is actually part of | |
6983 | * the UPL regimen but takes place on the VM object rather than on a UPL | |
6984 | */ | |
0c530ab8 A |
6985 | |
6986 | kern_return_t | |
6987 | vm_object_page_op( | |
0a7de745 A |
6988 | vm_object_t object, |
6989 | vm_object_offset_t offset, | |
6990 | int ops, | |
6991 | ppnum_t *phys_entry, | |
6992 | int *flags) | |
0c530ab8 | 6993 | { |
0a7de745 | 6994 | vm_page_t dst_page; |
0c530ab8 A |
6995 | |
6996 | vm_object_lock(object); | |
6997 | ||
0a7de745 A |
6998 | if (ops & UPL_POP_PHYSICAL) { |
6999 | if (object->phys_contiguous) { | |
0c530ab8 A |
7000 | if (phys_entry) { |
7001 | *phys_entry = (ppnum_t) | |
0a7de745 | 7002 | (object->vo_shadow_offset >> PAGE_SHIFT); |
0c530ab8 A |
7003 | } |
7004 | vm_object_unlock(object); | |
7005 | return KERN_SUCCESS; | |
7006 | } else { | |
7007 | vm_object_unlock(object); | |
7008 | return KERN_INVALID_OBJECT; | |
7009 | } | |
7010 | } | |
0a7de745 | 7011 | if (object->phys_contiguous) { |
0c530ab8 A |
7012 | vm_object_unlock(object); |
7013 | return KERN_INVALID_OBJECT; | |
7014 | } | |
7015 | ||
0a7de745 A |
7016 | while (TRUE) { |
7017 | if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) { | |
0c530ab8 A |
7018 | vm_object_unlock(object); |
7019 | return KERN_FAILURE; | |
7020 | } | |
7021 | ||
7022 | /* Sync up on getting the busy bit */ | |
0a7de745 A |
7023 | if ((dst_page->vmp_busy || dst_page->vmp_cleaning) && |
7024 | (((ops & UPL_POP_SET) && | |
7025 | (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) { | |
0c530ab8 A |
7026 | /* someone else is playing with the page, we will */ |
7027 | /* have to wait */ | |
7028 | PAGE_SLEEP(object, dst_page, THREAD_UNINT); | |
7029 | continue; | |
7030 | } | |
7031 | ||
7032 | if (ops & UPL_POP_DUMP) { | |
0a7de745 A |
7033 | if (dst_page->vmp_pmapped == TRUE) { |
7034 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); | |
7035 | } | |
0c530ab8 | 7036 | |
b0d623f7 | 7037 | VM_PAGE_FREE(dst_page); |
0c530ab8 A |
7038 | break; |
7039 | } | |
7040 | ||
7041 | if (flags) { | |
0a7de745 | 7042 | *flags = 0; |
0c530ab8 A |
7043 | |
7044 | /* Get the condition of flags before requested ops */ | |
7045 | /* are undertaken */ | |
7046 | ||
0a7de745 A |
7047 | if (dst_page->vmp_dirty) { |
7048 | *flags |= UPL_POP_DIRTY; | |
7049 | } | |
7050 | if (dst_page->vmp_free_when_done) { | |
7051 | *flags |= UPL_POP_PAGEOUT; | |
7052 | } | |
7053 | if (dst_page->vmp_precious) { | |
7054 | *flags |= UPL_POP_PRECIOUS; | |
7055 | } | |
7056 | if (dst_page->vmp_absent) { | |
7057 | *flags |= UPL_POP_ABSENT; | |
7058 | } | |
7059 | if (dst_page->vmp_busy) { | |
7060 | *flags |= UPL_POP_BUSY; | |
7061 | } | |
0c530ab8 A |
7062 | } |
7063 | ||
7064 | /* The caller should have made a call either contingent with */ | |
7065 | /* or prior to this call to set UPL_POP_BUSY */ | |
0a7de745 | 7066 | if (ops & UPL_POP_SET) { |
0c530ab8 A |
7067 | /* The protection granted with this assert will */ |
7068 | /* not be complete. If the caller violates the */ | |
7069 | /* convention and attempts to change page state */ | |
7070 | /* without first setting busy we may not see it */ | |
7071 | /* because the page may already be busy. However */ | |
7072 | /* if such violations occur we will assert sooner */ | |
7073 | /* or later. */ | |
d9a64523 | 7074 | assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY)); |
316670eb A |
7075 | if (ops & UPL_POP_DIRTY) { |
7076 | SET_PAGE_DIRTY(dst_page, FALSE); | |
7077 | } | |
0a7de745 A |
7078 | if (ops & UPL_POP_PAGEOUT) { |
7079 | dst_page->vmp_free_when_done = TRUE; | |
7080 | } | |
7081 | if (ops & UPL_POP_PRECIOUS) { | |
7082 | dst_page->vmp_precious = TRUE; | |
7083 | } | |
7084 | if (ops & UPL_POP_ABSENT) { | |
7085 | dst_page->vmp_absent = TRUE; | |
7086 | } | |
7087 | if (ops & UPL_POP_BUSY) { | |
7088 | dst_page->vmp_busy = TRUE; | |
7089 | } | |
0c530ab8 A |
7090 | } |
7091 | ||
0a7de745 | 7092 | if (ops & UPL_POP_CLR) { |
d9a64523 | 7093 | assert(dst_page->vmp_busy); |
0a7de745 A |
7094 | if (ops & UPL_POP_DIRTY) { |
7095 | dst_page->vmp_dirty = FALSE; | |
7096 | } | |
7097 | if (ops & UPL_POP_PAGEOUT) { | |
7098 | dst_page->vmp_free_when_done = FALSE; | |
7099 | } | |
7100 | if (ops & UPL_POP_PRECIOUS) { | |
7101 | dst_page->vmp_precious = FALSE; | |
7102 | } | |
7103 | if (ops & UPL_POP_ABSENT) { | |
7104 | dst_page->vmp_absent = FALSE; | |
7105 | } | |
0c530ab8 | 7106 | if (ops & UPL_POP_BUSY) { |
0a7de745 | 7107 | dst_page->vmp_busy = FALSE; |
0c530ab8 A |
7108 | PAGE_WAKEUP(dst_page); |
7109 | } | |
7110 | } | |
0c530ab8 A |
7111 | if (phys_entry) { |
7112 | /* | |
7113 | * The physical page number will remain valid | |
7114 | * only if the page is kept busy. | |
0c530ab8 | 7115 | */ |
d9a64523 | 7116 | assert(dst_page->vmp_busy); |
39037602 | 7117 | *phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page); |
0c530ab8 A |
7118 | } |
7119 | ||
7120 | break; | |
7121 | } | |
7122 | ||
7123 | vm_object_unlock(object); | |
7124 | return KERN_SUCCESS; | |
0c530ab8 A |
7125 | } |
7126 | ||
7127 | /* | |
0a7de745 A |
7128 | * vm_object_range_op offers performance enhancement over |
7129 | * vm_object_page_op for page_op functions which do not require page | |
7130 | * level state to be returned from the call. Page_op was created to provide | |
7131 | * a low-cost alternative to page manipulation via UPLs when only a single | |
7132 | * page was involved. The range_op call establishes the ability in the _op | |
0c530ab8 A |
7133 | * family of functions to work on multiple pages where the lack of page level |
7134 | * state handling allows the caller to avoid the overhead of the upl structures. | |
7135 | */ | |
7136 | ||
7137 | kern_return_t | |
7138 | vm_object_range_op( | |
0a7de745 A |
7139 | vm_object_t object, |
7140 | vm_object_offset_t offset_beg, | |
7141 | vm_object_offset_t offset_end, | |
0c530ab8 | 7142 | int ops, |
0a7de745 | 7143 | uint32_t *range) |
0c530ab8 | 7144 | { |
0a7de745 A |
7145 | vm_object_offset_t offset; |
7146 | vm_page_t dst_page; | |
0c530ab8 | 7147 | |
b0d623f7 A |
7148 | if (offset_end - offset_beg > (uint32_t) -1) { |
7149 | /* range is too big and would overflow "*range" */ | |
7150 | return KERN_INVALID_ARGUMENT; | |
0a7de745 | 7151 | } |
0c530ab8 | 7152 | if (object->resident_page_count == 0) { |
0a7de745 A |
7153 | if (range) { |
7154 | if (ops & UPL_ROP_PRESENT) { | |
7155 | *range = 0; | |
b0d623f7 | 7156 | } else { |
0a7de745 | 7157 | *range = (uint32_t) (offset_end - offset_beg); |
b0d623f7 A |
7158 | assert(*range == (offset_end - offset_beg)); |
7159 | } | |
0c530ab8 A |
7160 | } |
7161 | return KERN_SUCCESS; | |
7162 | } | |
7163 | vm_object_lock(object); | |
7164 | ||
7165 | if (object->phys_contiguous) { | |
7166 | vm_object_unlock(object); | |
0a7de745 | 7167 | return KERN_INVALID_OBJECT; |
0c530ab8 | 7168 | } |
0a7de745 | 7169 | |
2d21ac55 | 7170 | offset = offset_beg & ~PAGE_MASK_64; |
0c530ab8 A |
7171 | |
7172 | while (offset < offset_end) { | |
7173 | dst_page = vm_page_lookup(object, offset); | |
7174 | if (dst_page != VM_PAGE_NULL) { | |
7175 | if (ops & UPL_ROP_DUMP) { | |
d9a64523 | 7176 | if (dst_page->vmp_busy || dst_page->vmp_cleaning) { |
6d2010ae | 7177 | /* |
0a7de745 | 7178 | * someone else is playing with the |
0c530ab8 A |
7179 | * page, we will have to wait |
7180 | */ | |
0a7de745 | 7181 | PAGE_SLEEP(object, dst_page, THREAD_UNINT); |
0c530ab8 A |
7182 | /* |
7183 | * need to relook the page up since it's | |
7184 | * state may have changed while we slept | |
7185 | * it might even belong to a different object | |
7186 | * at this point | |
7187 | */ | |
7188 | continue; | |
7189 | } | |
0a7de745 | 7190 | if (dst_page->vmp_laundry) { |
316670eb | 7191 | vm_pageout_steal_laundry(dst_page, FALSE); |
0a7de745 | 7192 | } |
39037602 | 7193 | |
0a7de745 A |
7194 | if (dst_page->vmp_pmapped == TRUE) { |
7195 | pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); | |
7196 | } | |
0c530ab8 | 7197 | |
b0d623f7 | 7198 | VM_PAGE_FREE(dst_page); |
3e170ce0 | 7199 | } else if ((ops & UPL_ROP_ABSENT) |
0a7de745 | 7200 | && (!dst_page->vmp_absent || dst_page->vmp_busy)) { |
3e170ce0 A |
7201 | break; |
7202 | } | |
0a7de745 A |
7203 | } else if (ops & UPL_ROP_PRESENT) { |
7204 | break; | |
7205 | } | |
0c530ab8 A |
7206 | |
7207 | offset += PAGE_SIZE; | |
7208 | } | |
7209 | vm_object_unlock(object); | |
7210 | ||
2d21ac55 | 7211 | if (range) { |
0a7de745 A |
7212 | if (offset > offset_end) { |
7213 | offset = offset_end; | |
7214 | } | |
7215 | if (offset > offset_beg) { | |
b0d623f7 A |
7216 | *range = (uint32_t) (offset - offset_beg); |
7217 | assert(*range == (offset - offset_beg)); | |
7218 | } else { | |
7219 | *range = 0; | |
7220 | } | |
2d21ac55 | 7221 | } |
0c530ab8 A |
7222 | return KERN_SUCCESS; |
7223 | } | |
2d21ac55 | 7224 | |
39236c6e A |
7225 | /* |
7226 | * Used to point a pager directly to a range of memory (when the pager may be associated | |
7227 | * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently | |
7228 | * expect that the virtual address will denote the start of a range that is physically contiguous. | |
7229 | */ | |
0a7de745 A |
7230 | kern_return_t |
7231 | pager_map_to_phys_contiguous( | |
7232 | memory_object_control_t object, | |
7233 | memory_object_offset_t offset, | |
7234 | addr64_t base_vaddr, | |
7235 | vm_size_t size) | |
39236c6e A |
7236 | { |
7237 | ppnum_t page_num; | |
7238 | boolean_t clobbered_private; | |
7239 | kern_return_t retval; | |
7240 | vm_object_t pager_object; | |
7241 | ||
7242 | page_num = pmap_find_phys(kernel_pmap, base_vaddr); | |
7243 | ||
7244 | if (!page_num) { | |
7245 | retval = KERN_FAILURE; | |
7246 | goto out; | |
7247 | } | |
7248 | ||
7249 | pager_object = memory_object_control_to_vm_object(object); | |
7250 | ||
7251 | if (!pager_object) { | |
7252 | retval = KERN_FAILURE; | |
7253 | goto out; | |
7254 | } | |
7255 | ||
7256 | clobbered_private = pager_object->private; | |
39037602 A |
7257 | if (pager_object->private != TRUE) { |
7258 | vm_object_lock(pager_object); | |
7259 | pager_object->private = TRUE; | |
7260 | vm_object_unlock(pager_object); | |
7261 | } | |
39236c6e A |
7262 | retval = vm_object_populate_with_private(pager_object, offset, page_num, size); |
7263 | ||
39037602 A |
7264 | if (retval != KERN_SUCCESS) { |
7265 | if (pager_object->private != clobbered_private) { | |
7266 | vm_object_lock(pager_object); | |
7267 | pager_object->private = clobbered_private; | |
7268 | vm_object_unlock(pager_object); | |
7269 | } | |
7270 | } | |
39236c6e A |
7271 | |
7272 | out: | |
7273 | return retval; | |
7274 | } | |
2d21ac55 A |
7275 | |
7276 | uint32_t scan_object_collision = 0; | |
7277 | ||
7278 | void | |
7279 | vm_object_lock(vm_object_t object) | |
7280 | { | |
0a7de745 A |
7281 | if (object == vm_pageout_scan_wants_object) { |
7282 | scan_object_collision++; | |
7283 | mutex_pause(2); | |
2d21ac55 | 7284 | } |
0a7de745 A |
7285 | DTRACE_VM(vm_object_lock_w); |
7286 | lck_rw_lock_exclusive(&object->Lock); | |
39037602 A |
7287 | #if DEVELOPMENT || DEBUG |
7288 | object->Lock_owner = current_thread(); | |
7289 | #endif | |
2d21ac55 A |
7290 | } |
7291 | ||
7292 | boolean_t | |
b0d623f7 | 7293 | vm_object_lock_avoid(vm_object_t object) |
2d21ac55 | 7294 | { |
0a7de745 A |
7295 | if (object == vm_pageout_scan_wants_object) { |
7296 | scan_object_collision++; | |
b0d623f7 | 7297 | return TRUE; |
2d21ac55 | 7298 | } |
b0d623f7 A |
7299 | return FALSE; |
7300 | } | |
7301 | ||
7302 | boolean_t | |
7303 | _vm_object_lock_try(vm_object_t object) | |
7304 | { | |
0a7de745 | 7305 | boolean_t retval; |
39037602 A |
7306 | |
7307 | retval = lck_rw_try_lock_exclusive(&object->Lock); | |
7308 | #if DEVELOPMENT || DEBUG | |
0a7de745 A |
7309 | if (retval == TRUE) { |
7310 | DTRACE_VM(vm_object_lock_w); | |
39037602 | 7311 | object->Lock_owner = current_thread(); |
0a7de745 | 7312 | } |
39037602 | 7313 | #endif |
0a7de745 | 7314 | return retval; |
2d21ac55 A |
7315 | } |
7316 | ||
b0d623f7 A |
7317 | boolean_t |
7318 | vm_object_lock_try(vm_object_t object) | |
7319 | { | |
6d2010ae A |
7320 | /* |
7321 | * Called from hibernate path so check before blocking. | |
7322 | */ | |
0a7de745 | 7323 | if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) { |
b0d623f7 A |
7324 | mutex_pause(2); |
7325 | } | |
7326 | return _vm_object_lock_try(object); | |
7327 | } | |
6d2010ae | 7328 | |
f427ee49 A |
7329 | /* |
7330 | * Lock the object exclusive. | |
7331 | * | |
7332 | * Returns true iff the thread had to spin or block before | |
7333 | * acquiring the lock. | |
7334 | */ | |
7335 | bool | |
7336 | vm_object_lock_check_contended(vm_object_t object) | |
7337 | { | |
7338 | bool contended; | |
7339 | if (object == vm_pageout_scan_wants_object) { | |
7340 | scan_object_collision++; | |
7341 | mutex_pause(2); | |
7342 | } | |
7343 | DTRACE_VM(vm_object_lock_w); | |
7344 | contended = lck_rw_lock_exclusive_check_contended(&object->Lock); | |
7345 | #if DEVELOPMENT || DEBUG | |
7346 | object->Lock_owner = current_thread(); | |
7347 | #endif | |
7348 | return contended; | |
7349 | } | |
7350 | ||
2d21ac55 A |
7351 | void |
7352 | vm_object_lock_shared(vm_object_t object) | |
7353 | { | |
0a7de745 A |
7354 | if (vm_object_lock_avoid(object)) { |
7355 | mutex_pause(2); | |
2d21ac55 | 7356 | } |
0a7de745 | 7357 | DTRACE_VM(vm_object_lock_r); |
2d21ac55 A |
7358 | lck_rw_lock_shared(&object->Lock); |
7359 | } | |
7360 | ||
5ba3f43e A |
7361 | boolean_t |
7362 | vm_object_lock_yield_shared(vm_object_t object) | |
7363 | { | |
7364 | boolean_t retval = FALSE, force_yield = FALSE;; | |
7365 | ||
7366 | vm_object_lock_assert_shared(object); | |
7367 | ||
7368 | force_yield = vm_object_lock_avoid(object); | |
7369 | ||
7370 | retval = lck_rw_lock_yield_shared(&object->Lock, force_yield); | |
0a7de745 A |
7371 | if (retval) { |
7372 | DTRACE_VM(vm_object_lock_yield); | |
7373 | } | |
5ba3f43e | 7374 | |
0a7de745 | 7375 | return retval; |
5ba3f43e A |
7376 | } |
7377 | ||
2d21ac55 A |
7378 | boolean_t |
7379 | vm_object_lock_try_shared(vm_object_t object) | |
7380 | { | |
0a7de745 A |
7381 | boolean_t retval; |
7382 | ||
7383 | if (vm_object_lock_avoid(object)) { | |
7384 | mutex_pause(2); | |
7385 | } | |
7386 | retval = lck_rw_try_lock_shared(&object->Lock); | |
7387 | if (retval) { | |
7388 | DTRACE_VM(vm_object_lock_r); | |
2d21ac55 | 7389 | } |
0a7de745 | 7390 | return retval; |
2d21ac55 | 7391 | } |
6d2010ae | 7392 | |
39037602 A |
7393 | boolean_t |
7394 | vm_object_lock_upgrade(vm_object_t object) | |
0a7de745 A |
7395 | { |
7396 | boolean_t retval; | |
39037602 A |
7397 | |
7398 | retval = lck_rw_lock_shared_to_exclusive(&object->Lock); | |
7399 | #if DEVELOPMENT || DEBUG | |
0a7de745 A |
7400 | if (retval == TRUE) { |
7401 | DTRACE_VM(vm_object_lock_w); | |
39037602 | 7402 | object->Lock_owner = current_thread(); |
0a7de745 | 7403 | } |
39037602 | 7404 | #endif |
0a7de745 | 7405 | return retval; |
39037602 A |
7406 | } |
7407 | ||
7408 | void | |
7409 | vm_object_unlock(vm_object_t object) | |
7410 | { | |
7411 | #if DEVELOPMENT || DEBUG | |
7412 | if (object->Lock_owner) { | |
0a7de745 | 7413 | if (object->Lock_owner != current_thread()) { |
39037602 | 7414 | panic("vm_object_unlock: not owner - %p\n", object); |
0a7de745 | 7415 | } |
39037602 | 7416 | object->Lock_owner = 0; |
0a7de745 | 7417 | DTRACE_VM(vm_object_unlock); |
39037602 A |
7418 | } |
7419 | #endif | |
7420 | lck_rw_done(&object->Lock); | |
7421 | } | |
7422 | ||
6d2010ae A |
7423 | |
7424 | unsigned int vm_object_change_wimg_mode_count = 0; | |
7425 | ||
7426 | /* | |
7427 | * The object must be locked | |
7428 | */ | |
7429 | void | |
7430 | vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode) | |
7431 | { | |
7432 | vm_page_t p; | |
7433 | ||
7434 | vm_object_lock_assert_exclusive(object); | |
7435 | ||
7436 | vm_object_paging_wait(object, THREAD_UNINT); | |
7437 | ||
0a7de745 A |
7438 | vm_page_queue_iterate(&object->memq, p, vmp_listq) { |
7439 | if (!p->vmp_fictitious) { | |
39037602 | 7440 | pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode); |
0a7de745 | 7441 | } |
6d2010ae | 7442 | } |
0a7de745 | 7443 | if (wimg_mode == VM_WIMG_USE_DEFAULT) { |
6d2010ae | 7444 | object->set_cache_attr = FALSE; |
0a7de745 | 7445 | } else { |
6d2010ae | 7446 | object->set_cache_attr = TRUE; |
0a7de745 | 7447 | } |
6d2010ae A |
7448 | |
7449 | object->wimg_bits = wimg_mode; | |
7450 | ||
7451 | vm_object_change_wimg_mode_count++; | |
7452 | } | |
7453 | ||
7454 | #if CONFIG_FREEZE | |
7455 | ||
f427ee49 A |
7456 | extern struct freezer_context freezer_context_global; |
7457 | ||
3e170ce0 A |
7458 | /* |
7459 | * This routine does the "relocation" of previously | |
7460 | * compressed pages belonging to this object that are | |
7461 | * residing in a number of compressed segments into | |
7462 | * a set of compressed segments dedicated to hold | |
7463 | * compressed pages belonging to this object. | |
7464 | */ | |
7465 | ||
3e170ce0 A |
7466 | extern AbsoluteTime c_freezer_last_yield_ts; |
7467 | ||
0a7de745 A |
7468 | #define MAX_FREE_BATCH 32 |
7469 | #define FREEZER_DUTY_CYCLE_ON_MS 5 | |
7470 | #define FREEZER_DUTY_CYCLE_OFF_MS 5 | |
3e170ce0 A |
7471 | |
7472 | static int c_freezer_should_yield(void); | |
7473 | ||
7474 | ||
7475 | static int | |
7476 | c_freezer_should_yield() | |
7477 | { | |
0a7de745 A |
7478 | AbsoluteTime cur_time; |
7479 | uint64_t nsecs; | |
3e170ce0 A |
7480 | |
7481 | assert(c_freezer_last_yield_ts); | |
7482 | clock_get_uptime(&cur_time); | |
7483 | ||
7484 | SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts); | |
7485 | absolutetime_to_nanoseconds(cur_time, &nsecs); | |
7486 | ||
0a7de745 A |
7487 | if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) { |
7488 | return 1; | |
7489 | } | |
7490 | return 0; | |
3e170ce0 A |
7491 | } |
7492 | ||
7493 | ||
6d2010ae | 7494 | void |
3e170ce0 A |
7495 | vm_object_compressed_freezer_done() |
7496 | { | |
f427ee49 | 7497 | vm_compressor_finished_filling( &(freezer_context_global.freezer_ctx_chead)); |
3e170ce0 A |
7498 | } |
7499 | ||
7500 | ||
cb323159 | 7501 | uint32_t |
3e170ce0 | 7502 | vm_object_compressed_freezer_pageout( |
cb323159 | 7503 | vm_object_t object, uint32_t dirty_budget) |
6d2010ae | 7504 | { |
0a7de745 A |
7505 | vm_page_t p; |
7506 | vm_page_t local_freeq = NULL; | |
7507 | int local_freed = 0; | |
7508 | kern_return_t retval = KERN_SUCCESS; | |
7509 | int obj_resident_page_count_snapshot = 0; | |
cb323159 | 7510 | uint32_t paged_out_count = 0; |
3e170ce0 A |
7511 | |
7512 | assert(object != VM_OBJECT_NULL); | |
39037602 | 7513 | assert(object->internal); |
39236c6e | 7514 | |
6d2010ae | 7515 | vm_object_lock(object); |
39236c6e | 7516 | |
0a7de745 | 7517 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
39236c6e | 7518 | if (!object->pager_initialized) { |
3e170ce0 A |
7519 | vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); |
7520 | ||
0a7de745 | 7521 | if (!object->pager_initialized) { |
3e170ce0 | 7522 | vm_object_compressor_pager_create(object); |
0a7de745 | 7523 | } |
39236c6e | 7524 | } |
fe8ab488 | 7525 | |
0a7de745 | 7526 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
3e170ce0 | 7527 | vm_object_unlock(object); |
cb323159 | 7528 | return paged_out_count; |
3e170ce0 | 7529 | } |
fe8ab488 | 7530 | } |
0a7de745 | 7531 | |
f427ee49 A |
7532 | /* |
7533 | * We could be freezing a shared internal object that might | |
7534 | * be part of some other thread's current VM operations. | |
7535 | * We skip it if there's a paging-in-progress or activity-in-progress | |
7536 | * because we could be here a long time with the map lock held. | |
7537 | * | |
7538 | * Note: We are holding the map locked while we wait. | |
7539 | * This is fine in the freezer path because the task | |
7540 | * is suspended and so this latency is acceptable. | |
7541 | */ | |
7542 | if (object->paging_in_progress || object->activity_in_progress) { | |
7543 | vm_object_unlock(object); | |
7544 | return paged_out_count; | |
7545 | } | |
7546 | ||
39037602 | 7547 | if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { |
0a7de745 | 7548 | vm_object_offset_t curr_offset = 0; |
3e170ce0 A |
7549 | |
7550 | /* | |
7551 | * Go through the object and make sure that any | |
7552 | * previously compressed pages are relocated into | |
7553 | * a compressed segment associated with our "freezer_chead". | |
7554 | */ | |
7555 | while (curr_offset < object->vo_size) { | |
3e170ce0 | 7556 | curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset); |
0a7de745 A |
7557 | |
7558 | if (curr_offset == (vm_object_offset_t) -1) { | |
3e170ce0 | 7559 | break; |
0a7de745 | 7560 | } |
3e170ce0 | 7561 | |
f427ee49 | 7562 | retval = vm_compressor_pager_relocate(object->pager, curr_offset, &(freezer_context_global.freezer_ctx_chead)); |
3e170ce0 | 7563 | |
0a7de745 | 7564 | if (retval != KERN_SUCCESS) { |
3e170ce0 | 7565 | break; |
0a7de745 | 7566 | } |
fe8ab488 | 7567 | |
3e170ce0 A |
7568 | curr_offset += PAGE_SIZE_64; |
7569 | } | |
39236c6e A |
7570 | } |
7571 | ||
3e170ce0 A |
7572 | /* |
7573 | * We can't hold the object lock while heading down into the compressed pager | |
7574 | * layer because we might need the kernel map lock down there to allocate new | |
7575 | * compressor data structures. And if this same object is mapped in the kernel | |
7576 | * and there's a fault on it, then that thread will want the object lock while | |
7577 | * holding the kernel map lock. | |
7578 | * | |
7579 | * Since we are going to drop/grab the object lock repeatedly, we must make sure | |
7580 | * we won't be stuck in an infinite loop if the same page(s) keep getting | |
7581 | * decompressed. So we grab a snapshot of the number of pages in the object and | |
7582 | * we won't process any more than that number of pages. | |
7583 | */ | |
7584 | ||
7585 | obj_resident_page_count_snapshot = object->resident_page_count; | |
7586 | ||
7587 | vm_object_activity_begin(object); | |
7588 | ||
cb323159 | 7589 | while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) { |
39037602 | 7590 | p = (vm_page_t)vm_page_queue_first(&object->memq); |
3e170ce0 A |
7591 | |
7592 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0); | |
6d2010ae | 7593 | |
6d2010ae A |
7594 | vm_page_lockspin_queues(); |
7595 | ||
d9a64523 | 7596 | if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || p->vmp_error || VM_PAGE_WIRED(p)) { |
3e170ce0 A |
7597 | vm_page_unlock_queues(); |
7598 | ||
7599 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0); | |
7600 | ||
0a7de745 A |
7601 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
7602 | vm_page_queue_enter(&object->memq, p, vmp_listq); | |
3e170ce0 A |
7603 | |
7604 | continue; | |
7605 | } | |
7606 | ||
d9a64523 | 7607 | if (p->vmp_pmapped == TRUE) { |
3e170ce0 A |
7608 | int refmod_state, pmap_flags; |
7609 | ||
d9a64523 | 7610 | if (p->vmp_dirty || p->vmp_precious) { |
3e170ce0 A |
7611 | pmap_flags = PMAP_OPTIONS_COMPRESSOR; |
7612 | } else { | |
7613 | pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; | |
7614 | } | |
7615 | ||
39037602 | 7616 | refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL); |
3e170ce0 A |
7617 | if (refmod_state & VM_MEM_MODIFIED) { |
7618 | SET_PAGE_DIRTY(p, FALSE); | |
7619 | } | |
7620 | } | |
0a7de745 | 7621 | |
d9a64523 | 7622 | if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) { |
3e170ce0 A |
7623 | /* |
7624 | * Clean and non-precious page. | |
7625 | */ | |
7626 | vm_page_unlock_queues(); | |
7627 | VM_PAGE_FREE(p); | |
7628 | ||
7629 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0); | |
7630 | continue; | |
7631 | } | |
7632 | ||
0a7de745 | 7633 | if (p->vmp_laundry) { |
3e170ce0 | 7634 | vm_pageout_steal_laundry(p, TRUE); |
0a7de745 | 7635 | } |
3e170ce0 | 7636 | |
39037602 A |
7637 | vm_page_queues_remove(p, TRUE); |
7638 | ||
3e170ce0 A |
7639 | vm_page_unlock_queues(); |
7640 | ||
7641 | ||
316670eb | 7642 | /* |
3e170ce0 A |
7643 | * In case the compressor fails to compress this page, we need it at |
7644 | * the back of the object memq so that we don't keep trying to process it. | |
7645 | * Make the move here while we have the object lock held. | |
316670eb | 7646 | */ |
39236c6e | 7647 | |
0a7de745 A |
7648 | vm_page_queue_remove(&object->memq, p, vmp_listq); |
7649 | vm_page_queue_enter(&object->memq, p, vmp_listq); | |
39236c6e | 7650 | |
3e170ce0 A |
7651 | /* |
7652 | * Grab an activity_in_progress here for vm_pageout_compress_page() to consume. | |
7653 | * | |
7654 | * Mark the page busy so no one messes with it while we have the object lock dropped. | |
7655 | */ | |
d9a64523 | 7656 | p->vmp_busy = TRUE; |
39236c6e | 7657 | |
3e170ce0 | 7658 | vm_object_activity_begin(object); |
39236c6e | 7659 | |
3e170ce0 A |
7660 | vm_object_unlock(object); |
7661 | ||
f427ee49 A |
7662 | if (vm_pageout_compress_page(&(freezer_context_global.freezer_ctx_chead), |
7663 | (freezer_context_global.freezer_ctx_compressor_scratch_buf), | |
7664 | p) == KERN_SUCCESS) { | |
3e170ce0 A |
7665 | /* |
7666 | * page has already been un-tabled from the object via 'vm_page_remove' | |
7667 | */ | |
d9a64523 | 7668 | p->vmp_snext = local_freeq; |
3e170ce0 A |
7669 | local_freeq = p; |
7670 | local_freed++; | |
cb323159 | 7671 | paged_out_count++; |
3e170ce0 A |
7672 | |
7673 | if (local_freed >= MAX_FREE_BATCH) { | |
0a7de745 | 7674 | OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); |
d9a64523 | 7675 | |
3e170ce0 | 7676 | vm_page_free_list(local_freeq, TRUE); |
0a7de745 | 7677 | |
3e170ce0 A |
7678 | local_freeq = NULL; |
7679 | local_freed = 0; | |
39236c6e | 7680 | } |
f427ee49 | 7681 | freezer_context_global.freezer_ctx_uncompressed_pages++; |
3e170ce0 A |
7682 | } |
7683 | KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0); | |
7684 | ||
7685 | if (local_freed == 0 && c_freezer_should_yield()) { | |
3e170ce0 A |
7686 | thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); |
7687 | clock_get_uptime(&c_freezer_last_yield_ts); | |
316670eb | 7688 | } |
3e170ce0 A |
7689 | |
7690 | vm_object_lock(object); | |
6d2010ae A |
7691 | } |
7692 | ||
3e170ce0 | 7693 | if (local_freeq) { |
0a7de745 | 7694 | OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); |
d9a64523 | 7695 | |
3e170ce0 | 7696 | vm_page_free_list(local_freeq, TRUE); |
0a7de745 | 7697 | |
3e170ce0 A |
7698 | local_freeq = NULL; |
7699 | local_freed = 0; | |
7700 | } | |
0a7de745 | 7701 | |
3e170ce0 A |
7702 | vm_object_activity_end(object); |
7703 | ||
6d2010ae | 7704 | vm_object_unlock(object); |
3e170ce0 A |
7705 | |
7706 | if (c_freezer_should_yield()) { | |
3e170ce0 A |
7707 | thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); |
7708 | clock_get_uptime(&c_freezer_last_yield_ts); | |
7709 | } | |
cb323159 | 7710 | return paged_out_count; |
6d2010ae A |
7711 | } |
7712 | ||
6d2010ae | 7713 | #endif /* CONFIG_FREEZE */ |
fe8ab488 A |
7714 | |
7715 | ||
3e170ce0 A |
7716 | void |
7717 | vm_object_pageout( | |
7718 | vm_object_t object) | |
7719 | { | |
0a7de745 A |
7720 | vm_page_t p, next; |
7721 | struct vm_pageout_queue *iq; | |
3e170ce0 | 7722 | |
0a7de745 | 7723 | if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) { |
39037602 | 7724 | return; |
0a7de745 | 7725 | } |
39037602 | 7726 | |
3e170ce0 | 7727 | iq = &vm_pageout_queue_internal; |
0a7de745 | 7728 | |
3e170ce0 | 7729 | assert(object != VM_OBJECT_NULL ); |
0a7de745 | 7730 | |
3e170ce0 A |
7731 | vm_object_lock(object); |
7732 | ||
7733 | if (!object->internal || | |
7734 | object->terminating || | |
7735 | !object->alive) { | |
7736 | vm_object_unlock(object); | |
7737 | return; | |
7738 | } | |
7739 | ||
0a7de745 | 7740 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
3e170ce0 | 7741 | if (!object->pager_initialized) { |
3e170ce0 A |
7742 | vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); |
7743 | ||
0a7de745 | 7744 | if (!object->pager_initialized) { |
3e170ce0 | 7745 | vm_object_compressor_pager_create(object); |
0a7de745 | 7746 | } |
3e170ce0 A |
7747 | } |
7748 | ||
0a7de745 | 7749 | if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { |
3e170ce0 A |
7750 | vm_object_unlock(object); |
7751 | return; | |
7752 | } | |
7753 | } | |
0a7de745 A |
7754 | |
7755 | ReScan: | |
39037602 | 7756 | next = (vm_page_t)vm_page_queue_first(&object->memq); |
3e170ce0 | 7757 | |
39037602 | 7758 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) { |
3e170ce0 | 7759 | p = next; |
d9a64523 | 7760 | next = (vm_page_t)vm_page_queue_next(&next->vmp_listq); |
0a7de745 | 7761 | |
d9a64523 | 7762 | assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q); |
0a7de745 | 7763 | |
d9a64523 A |
7764 | if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) || |
7765 | p->vmp_cleaning || | |
7766 | p->vmp_laundry || | |
7767 | p->vmp_busy || | |
7768 | p->vmp_absent || | |
7769 | p->vmp_error || | |
7770 | p->vmp_fictitious || | |
3e170ce0 A |
7771 | VM_PAGE_WIRED(p)) { |
7772 | /* | |
7773 | * Page is already being cleaned or can't be cleaned. | |
7774 | */ | |
7775 | continue; | |
7776 | } | |
d9a64523 | 7777 | if (vm_compressor_low_on_space()) { |
0a7de745 | 7778 | break; |
d9a64523 | 7779 | } |
3e170ce0 A |
7780 | |
7781 | /* Throw to the pageout queue */ | |
7782 | ||
7783 | vm_page_lockspin_queues(); | |
3e170ce0 | 7784 | |
3e170ce0 | 7785 | if (VM_PAGE_Q_THROTTLED(iq)) { |
3e170ce0 | 7786 | iq->pgo_draining = TRUE; |
0a7de745 | 7787 | |
3e170ce0 | 7788 | assert_wait((event_t) (&iq->pgo_laundry + 1), |
0a7de745 | 7789 | THREAD_INTERRUPTIBLE); |
3e170ce0 A |
7790 | vm_page_unlock_queues(); |
7791 | vm_object_unlock(object); | |
0a7de745 | 7792 | |
3e170ce0 A |
7793 | thread_block(THREAD_CONTINUE_NULL); |
7794 | ||
7795 | vm_object_lock(object); | |
7796 | goto ReScan; | |
7797 | } | |
7798 | ||
d9a64523 A |
7799 | assert(!p->vmp_fictitious); |
7800 | assert(!p->vmp_busy); | |
7801 | assert(!p->vmp_absent); | |
7802 | assert(!p->vmp_unusual); | |
7803 | assert(!p->vmp_error); | |
3e170ce0 | 7804 | assert(!VM_PAGE_WIRED(p)); |
d9a64523 | 7805 | assert(!p->vmp_cleaning); |
3e170ce0 | 7806 | |
d9a64523 | 7807 | if (p->vmp_pmapped == TRUE) { |
3e170ce0 A |
7808 | int refmod_state; |
7809 | int pmap_options; | |
7810 | ||
39037602 A |
7811 | /* |
7812 | * Tell pmap the page should be accounted | |
7813 | * for as "compressed" if it's been modified. | |
7814 | */ | |
7815 | pmap_options = | |
0a7de745 | 7816 | PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; |
d9a64523 | 7817 | if (p->vmp_dirty || p->vmp_precious) { |
3e170ce0 | 7818 | /* |
39037602 A |
7819 | * We already know it's been modified, |
7820 | * so tell pmap to account for it | |
7821 | * as "compressed". | |
3e170ce0 | 7822 | */ |
39037602 | 7823 | pmap_options = PMAP_OPTIONS_COMPRESSOR; |
3e170ce0 | 7824 | } |
39037602 | 7825 | refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), |
0a7de745 A |
7826 | pmap_options, |
7827 | NULL); | |
3e170ce0 A |
7828 | if (refmod_state & VM_MEM_MODIFIED) { |
7829 | SET_PAGE_DIRTY(p, FALSE); | |
7830 | } | |
7831 | } | |
7832 | ||
d9a64523 | 7833 | if (!p->vmp_dirty && !p->vmp_precious) { |
3e170ce0 A |
7834 | vm_page_unlock_queues(); |
7835 | VM_PAGE_FREE(p); | |
7836 | continue; | |
7837 | } | |
39037602 A |
7838 | vm_page_queues_remove(p, TRUE); |
7839 | ||
5ba3f43e | 7840 | vm_pageout_cluster(p); |
0a7de745 | 7841 | |
5ba3f43e | 7842 | vm_page_unlock_queues(); |
3e170ce0 | 7843 | } |
3e170ce0 A |
7844 | vm_object_unlock(object); |
7845 | } | |
7846 | ||
7847 | ||
fe8ab488 A |
7848 | #if CONFIG_IOSCHED |
7849 | void | |
7850 | vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio) | |
7851 | { | |
0a7de745 A |
7852 | io_reprioritize_req_t req; |
7853 | struct vnode *devvp = NULL; | |
fe8ab488 | 7854 | |
0a7de745 | 7855 | if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) { |
fe8ab488 | 7856 | return; |
0a7de745 A |
7857 | } |
7858 | ||
3e170ce0 A |
7859 | /* |
7860 | * Create the request for I/O reprioritization. | |
7861 | * We use the noblock variant of zalloc because we're holding the object | |
7862 | * lock here and we could cause a deadlock in low memory conditions. | |
7863 | */ | |
7864 | req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone); | |
0a7de745 | 7865 | if (req == NULL) { |
3e170ce0 | 7866 | return; |
0a7de745 | 7867 | } |
fe8ab488 A |
7868 | req->blkno = blkno; |
7869 | req->len = len; | |
7870 | req->priority = prio; | |
7871 | req->devvp = devvp; | |
7872 | ||
7873 | /* Insert request into the reprioritization list */ | |
7874 | IO_REPRIORITIZE_LIST_LOCK(); | |
7875 | queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); | |
7876 | IO_REPRIORITIZE_LIST_UNLOCK(); | |
7877 | ||
7878 | /* Wakeup reprioritize thread */ | |
0a7de745 | 7879 | IO_REPRIO_THREAD_WAKEUP(); |
fe8ab488 | 7880 | |
0a7de745 A |
7881 | return; |
7882 | } | |
fe8ab488 A |
7883 | |
7884 | void | |
7885 | vm_decmp_upl_reprioritize(upl_t upl, int prio) | |
7886 | { | |
7887 | int offset; | |
7888 | vm_object_t object; | |
0a7de745 | 7889 | io_reprioritize_req_t req; |
fe8ab488 | 7890 | struct vnode *devvp = NULL; |
0a7de745 A |
7891 | uint64_t blkno; |
7892 | uint32_t len; | |
7893 | upl_t io_upl; | |
7894 | uint64_t *io_upl_reprio_info; | |
7895 | int io_upl_size; | |
fe8ab488 | 7896 | |
0a7de745 | 7897 | if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) { |
fe8ab488 | 7898 | return; |
0a7de745 | 7899 | } |
fe8ab488 | 7900 | |
0a7de745 A |
7901 | /* |
7902 | * We dont want to perform any allocations with the upl lock held since that might | |
7903 | * result in a deadlock. If the system is low on memory, the pageout thread would | |
fe8ab488 A |
7904 | * try to pageout stuff and might wait on this lock. If we are waiting for the memory to |
7905 | * be freed up by the pageout thread, it would be a deadlock. | |
7906 | */ | |
7907 | ||
7908 | ||
7909 | /* First step is just to get the size of the upl to find out how big the reprio info is */ | |
0a7de745 | 7910 | if (!upl_try_lock(upl)) { |
a1c7dba1 | 7911 | return; |
0a7de745 | 7912 | } |
a1c7dba1 | 7913 | |
fe8ab488 A |
7914 | if (upl->decmp_io_upl == NULL) { |
7915 | /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ | |
7916 | upl_unlock(upl); | |
7917 | return; | |
7918 | } | |
7919 | ||
7920 | io_upl = upl->decmp_io_upl; | |
7921 | assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0); | |
f427ee49 A |
7922 | assertf(page_aligned(io_upl->u_offset) && page_aligned(io_upl->u_size), |
7923 | "upl %p offset 0x%llx size 0x%x\n", | |
7924 | io_upl, io_upl->u_offset, io_upl->u_size); | |
7925 | io_upl_size = io_upl->u_size; | |
fe8ab488 | 7926 | upl_unlock(upl); |
0a7de745 | 7927 | |
fe8ab488 | 7928 | /* Now perform the allocation */ |
f427ee49 A |
7929 | io_upl_reprio_info = (uint64_t *)kheap_alloc(KHEAP_TEMP, |
7930 | sizeof(uint64_t) * atop(io_upl_size), Z_WAITOK); | |
0a7de745 | 7931 | if (io_upl_reprio_info == NULL) { |
fe8ab488 | 7932 | return; |
0a7de745 | 7933 | } |
fe8ab488 A |
7934 | |
7935 | /* Now again take the lock, recheck the state and grab out the required info */ | |
0a7de745 | 7936 | if (!upl_try_lock(upl)) { |
a1c7dba1 | 7937 | goto out; |
0a7de745 | 7938 | } |
a1c7dba1 | 7939 | |
fe8ab488 A |
7940 | if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) { |
7941 | /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ | |
7942 | upl_unlock(upl); | |
7943 | goto out; | |
7944 | } | |
f427ee49 A |
7945 | memcpy(io_upl_reprio_info, io_upl->upl_reprio_info, |
7946 | sizeof(uint64_t) * atop(io_upl_size)); | |
fe8ab488 A |
7947 | |
7948 | /* Get the VM object for this UPL */ | |
7949 | if (io_upl->flags & UPL_SHADOWED) { | |
7950 | object = io_upl->map_object->shadow; | |
7951 | } else { | |
7952 | object = io_upl->map_object; | |
7953 | } | |
7954 | ||
7955 | /* Get the dev vnode ptr for this object */ | |
0a7de745 A |
7956 | if (!object || !object->pager || |
7957 | vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) { | |
fe8ab488 A |
7958 | upl_unlock(upl); |
7959 | goto out; | |
7960 | } | |
7961 | ||
7962 | upl_unlock(upl); | |
7963 | ||
7964 | /* Now we have all the information needed to do the expedite */ | |
7965 | ||
7966 | offset = 0; | |
7967 | while (offset < io_upl_size) { | |
f427ee49 A |
7968 | blkno = io_upl_reprio_info[atop(offset)] & UPL_REPRIO_INFO_MASK; |
7969 | len = (io_upl_reprio_info[atop(offset)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK; | |
fe8ab488 A |
7970 | |
7971 | /* | |
0a7de745 A |
7972 | * This implementation may cause some spurious expedites due to the |
7973 | * fact that we dont cleanup the blkno & len from the upl_reprio_info | |
7974 | * even after the I/O is complete. | |
fe8ab488 | 7975 | */ |
0a7de745 | 7976 | |
fe8ab488 A |
7977 | if (blkno != 0 && len != 0) { |
7978 | /* Create the request for I/O reprioritization */ | |
0a7de745 A |
7979 | req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone); |
7980 | assert(req != NULL); | |
7981 | req->blkno = blkno; | |
7982 | req->len = len; | |
7983 | req->priority = prio; | |
7984 | req->devvp = devvp; | |
7985 | ||
7986 | /* Insert request into the reprioritization list */ | |
7987 | IO_REPRIORITIZE_LIST_LOCK(); | |
7988 | queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); | |
7989 | IO_REPRIORITIZE_LIST_UNLOCK(); | |
7990 | ||
fe8ab488 A |
7991 | offset += len; |
7992 | } else { | |
7993 | offset += PAGE_SIZE; | |
7994 | } | |
7995 | } | |
7996 | ||
7997 | /* Wakeup reprioritize thread */ | |
0a7de745 | 7998 | IO_REPRIO_THREAD_WAKEUP(); |
fe8ab488 A |
7999 | |
8000 | out: | |
f427ee49 A |
8001 | kheap_free(KHEAP_TEMP, io_upl_reprio_info, |
8002 | sizeof(uint64_t) * atop(io_upl_size)); | |
fe8ab488 A |
8003 | } |
8004 | ||
8005 | void | |
8006 | vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m) | |
8007 | { | |
8008 | upl_t upl; | |
0a7de745 A |
8009 | upl_page_info_t *pl; |
8010 | unsigned int i, num_pages; | |
8011 | int cur_tier; | |
fe8ab488 A |
8012 | |
8013 | cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO); | |
8014 | ||
0a7de745 A |
8015 | /* |
8016 | * Scan through all UPLs associated with the object to find the | |
8017 | * UPL containing the contended page. | |
8018 | */ | |
fe8ab488 | 8019 | queue_iterate(&o->uplq, upl, upl_t, uplq) { |
0a7de745 | 8020 | if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) { |
fe8ab488 | 8021 | continue; |
0a7de745 | 8022 | } |
fe8ab488 | 8023 | pl = UPL_GET_INTERNAL_PAGE_LIST(upl); |
f427ee49 A |
8024 | assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), |
8025 | "upl %p offset 0x%llx size 0x%x\n", | |
8026 | upl, upl->u_offset, upl->u_size); | |
8027 | num_pages = (upl->u_size / PAGE_SIZE); | |
0a7de745 | 8028 | |
fe8ab488 | 8029 | /* |
0a7de745 A |
8030 | * For each page in the UPL page list, see if it matches the contended |
8031 | * page and was issued as a low prio I/O. | |
8032 | */ | |
8033 | for (i = 0; i < num_pages; i++) { | |
8034 | if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) { | |
fe8ab488 | 8035 | if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) { |
d190cdc3 | 8036 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), |
0a7de745 | 8037 | VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0); |
fe8ab488 A |
8038 | vm_decmp_upl_reprioritize(upl, cur_tier); |
8039 | break; | |
8040 | } | |
d190cdc3 | 8041 | KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), |
0a7de745 A |
8042 | upl->upl_reprio_info[i], upl->upl_priority, 0); |
8043 | if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) { | |
fe8ab488 | 8044 | vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier); |
0a7de745 A |
8045 | } |
8046 | break; | |
8047 | } | |
8048 | } | |
8049 | /* Check if we found any hits */ | |
8050 | if (i != num_pages) { | |
fe8ab488 | 8051 | break; |
0a7de745 | 8052 | } |
fe8ab488 | 8053 | } |
0a7de745 | 8054 | |
fe8ab488 | 8055 | return; |
0a7de745 | 8056 | } |
fe8ab488 A |
8057 | |
8058 | wait_result_t | |
8059 | vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible) | |
8060 | { | |
8061 | wait_result_t ret; | |
8062 | ||
d190cdc3 | 8063 | KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0); |
0a7de745 | 8064 | |
d9a64523 | 8065 | if (o->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) { |
0a7de745 A |
8066 | /* |
8067 | * Indicates page is busy due to an I/O. Issue a reprioritize request if necessary. | |
8068 | */ | |
8069 | vm_page_handle_prio_inversion(o, m); | |
fe8ab488 | 8070 | } |
d9a64523 | 8071 | m->vmp_wanted = TRUE; |
fe8ab488 A |
8072 | ret = thread_sleep_vm_object(o, m, interruptible); |
8073 | KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0); | |
8074 | return ret; | |
8075 | } | |
8076 | ||
8077 | static void | |
8078 | io_reprioritize_thread(void *param __unused, wait_result_t wr __unused) | |
8079 | { | |
8080 | io_reprioritize_req_t req = NULL; | |
fe8ab488 | 8081 | |
0a7de745 | 8082 | while (1) { |
fe8ab488 A |
8083 | IO_REPRIORITIZE_LIST_LOCK(); |
8084 | if (queue_empty(&io_reprioritize_list)) { | |
8085 | IO_REPRIORITIZE_LIST_UNLOCK(); | |
8086 | break; | |
8087 | } | |
0a7de745 A |
8088 | |
8089 | queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); | |
fe8ab488 | 8090 | IO_REPRIORITIZE_LIST_UNLOCK(); |
0a7de745 | 8091 | |
fe8ab488 | 8092 | vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority); |
0a7de745 A |
8093 | zfree(io_reprioritize_req_zone, req); |
8094 | } | |
8095 | ||
fe8ab488 A |
8096 | IO_REPRIO_THREAD_CONTINUATION(); |
8097 | } | |
8098 | #endif | |
d9a64523 A |
8099 | |
8100 | #if VM_OBJECT_ACCESS_TRACKING | |
8101 | void | |
8102 | vm_object_access_tracking( | |
0a7de745 A |
8103 | vm_object_t object, |
8104 | int *access_tracking_p, | |
8105 | uint32_t *access_tracking_reads_p, | |
8106 | uint32_t *access_tracking_writes_p) | |
d9a64523 | 8107 | { |
0a7de745 | 8108 | int access_tracking; |
d9a64523 A |
8109 | |
8110 | access_tracking = !!*access_tracking_p; | |
8111 | ||
8112 | vm_object_lock(object); | |
8113 | *access_tracking_p = object->access_tracking; | |
8114 | if (access_tracking_reads_p) { | |
8115 | *access_tracking_reads_p = object->access_tracking_reads; | |
8116 | } | |
8117 | if (access_tracking_writes_p) { | |
8118 | *access_tracking_writes_p = object->access_tracking_writes; | |
8119 | } | |
8120 | object->access_tracking = access_tracking; | |
8121 | object->access_tracking_reads = 0; | |
8122 | object->access_tracking_writes = 0; | |
8123 | vm_object_unlock(object); | |
8124 | ||
8125 | if (access_tracking) { | |
8126 | vm_object_pmap_protect_options(object, | |
0a7de745 A |
8127 | 0, |
8128 | object->vo_size, | |
8129 | PMAP_NULL, | |
f427ee49 | 8130 | PAGE_SIZE, |
0a7de745 A |
8131 | 0, |
8132 | VM_PROT_NONE, | |
8133 | 0); | |
d9a64523 A |
8134 | } |
8135 | } | |
8136 | #endif /* VM_OBJECT_ACCESS_TRACKING */ | |
8137 | ||
8138 | void | |
8139 | vm_object_ledger_tag_ledgers( | |
0a7de745 A |
8140 | vm_object_t object, |
8141 | int *ledger_idx_volatile, | |
8142 | int *ledger_idx_nonvolatile, | |
8143 | int *ledger_idx_volatile_compressed, | |
8144 | int *ledger_idx_nonvolatile_compressed, | |
8145 | boolean_t *do_footprint) | |
d9a64523 A |
8146 | { |
8147 | assert(object->shadow == VM_OBJECT_NULL); | |
8148 | ||
cb323159 A |
8149 | *do_footprint = !object->vo_no_footprint; |
8150 | ||
d9a64523 | 8151 | switch (object->vo_ledger_tag) { |
cb323159 A |
8152 | case VM_LEDGER_TAG_NONE: |
8153 | /* | |
8154 | * Regular purgeable memory: | |
8155 | * counts in footprint only when nonvolatile. | |
8156 | */ | |
8157 | *do_footprint = TRUE; | |
d9a64523 A |
8158 | assert(object->purgable != VM_PURGABLE_DENY); |
8159 | *ledger_idx_volatile = task_ledgers.purgeable_volatile; | |
8160 | *ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile; | |
8161 | *ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed; | |
8162 | *ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed; | |
d9a64523 | 8163 | break; |
cb323159 A |
8164 | case VM_LEDGER_TAG_DEFAULT: |
8165 | /* | |
8166 | * "default" tagged memory: | |
8167 | * counts in footprint only when nonvolatile and not marked | |
8168 | * as "no_footprint". | |
8169 | */ | |
8170 | *ledger_idx_volatile = task_ledgers.tagged_nofootprint; | |
8171 | *ledger_idx_volatile_compressed = task_ledgers.tagged_nofootprint_compressed; | |
8172 | if (*do_footprint) { | |
8173 | *ledger_idx_nonvolatile = task_ledgers.tagged_footprint; | |
8174 | *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_footprint_compressed; | |
8175 | } else { | |
8176 | *ledger_idx_nonvolatile = task_ledgers.tagged_nofootprint; | |
8177 | *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_nofootprint_compressed; | |
8178 | } | |
8179 | break; | |
8180 | case VM_LEDGER_TAG_NETWORK: | |
8181 | /* | |
8182 | * "network" tagged memory: | |
8183 | * never counts in footprint. | |
8184 | */ | |
8185 | *do_footprint = FALSE; | |
d9a64523 A |
8186 | *ledger_idx_volatile = task_ledgers.network_volatile; |
8187 | *ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed; | |
8188 | *ledger_idx_nonvolatile = task_ledgers.network_nonvolatile; | |
8189 | *ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed; | |
d9a64523 | 8190 | break; |
cb323159 A |
8191 | case VM_LEDGER_TAG_MEDIA: |
8192 | /* | |
8193 | * "media" tagged memory: | |
8194 | * counts in footprint only when nonvolatile and not marked | |
8195 | * as "no footprint". | |
8196 | */ | |
8197 | *ledger_idx_volatile = task_ledgers.media_nofootprint; | |
8198 | *ledger_idx_volatile_compressed = task_ledgers.media_nofootprint_compressed; | |
8199 | if (*do_footprint) { | |
8200 | *ledger_idx_nonvolatile = task_ledgers.media_footprint; | |
8201 | *ledger_idx_nonvolatile_compressed = task_ledgers.media_footprint_compressed; | |
8202 | } else { | |
8203 | *ledger_idx_nonvolatile = task_ledgers.media_nofootprint; | |
8204 | *ledger_idx_nonvolatile_compressed = task_ledgers.media_nofootprint_compressed; | |
8205 | } | |
8206 | break; | |
8207 | case VM_LEDGER_TAG_GRAPHICS: | |
8208 | /* | |
8209 | * "graphics" tagged memory: | |
8210 | * counts in footprint only when nonvolatile and not marked | |
8211 | * as "no footprint". | |
8212 | */ | |
8213 | *ledger_idx_volatile = task_ledgers.graphics_nofootprint; | |
8214 | *ledger_idx_volatile_compressed = task_ledgers.graphics_nofootprint_compressed; | |
8215 | if (*do_footprint) { | |
8216 | *ledger_idx_nonvolatile = task_ledgers.graphics_footprint; | |
8217 | *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_footprint_compressed; | |
8218 | } else { | |
8219 | *ledger_idx_nonvolatile = task_ledgers.graphics_nofootprint; | |
8220 | *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_nofootprint_compressed; | |
8221 | } | |
8222 | break; | |
8223 | case VM_LEDGER_TAG_NEURAL: | |
8224 | /* | |
8225 | * "neural" tagged memory: | |
8226 | * counts in footprint only when nonvolatile and not marked | |
8227 | * as "no footprint". | |
8228 | */ | |
8229 | *ledger_idx_volatile = task_ledgers.neural_nofootprint; | |
8230 | *ledger_idx_volatile_compressed = task_ledgers.neural_nofootprint_compressed; | |
8231 | if (*do_footprint) { | |
8232 | *ledger_idx_nonvolatile = task_ledgers.neural_footprint; | |
8233 | *ledger_idx_nonvolatile_compressed = task_ledgers.neural_footprint_compressed; | |
8234 | } else { | |
8235 | *ledger_idx_nonvolatile = task_ledgers.neural_nofootprint; | |
8236 | *ledger_idx_nonvolatile_compressed = task_ledgers.neural_nofootprint_compressed; | |
8237 | } | |
8238 | break; | |
d9a64523 A |
8239 | default: |
8240 | panic("%s: object %p has unsupported ledger_tag %d\n", | |
0a7de745 | 8241 | __FUNCTION__, object, object->vo_ledger_tag); |
d9a64523 A |
8242 | } |
8243 | } | |
8244 | ||
8245 | kern_return_t | |
8246 | vm_object_ownership_change( | |
0a7de745 A |
8247 | vm_object_t object, |
8248 | int new_ledger_tag, | |
8249 | task_t new_owner, | |
cb323159 A |
8250 | int new_ledger_flags, |
8251 | boolean_t old_task_objq_locked) | |
d9a64523 | 8252 | { |
0a7de745 A |
8253 | int old_ledger_tag; |
8254 | task_t old_owner; | |
8255 | int resident_count, wired_count; | |
8256 | unsigned int compressed_count; | |
8257 | int ledger_idx_volatile; | |
8258 | int ledger_idx_nonvolatile; | |
8259 | int ledger_idx_volatile_compressed; | |
8260 | int ledger_idx_nonvolatile_compressed; | |
8261 | int ledger_idx; | |
8262 | int ledger_idx_compressed; | |
cb323159 A |
8263 | boolean_t do_footprint, old_no_footprint, new_no_footprint; |
8264 | boolean_t new_task_objq_locked; | |
d9a64523 A |
8265 | |
8266 | vm_object_lock_assert_exclusive(object); | |
cb323159 A |
8267 | |
8268 | if (!object->internal) { | |
8269 | return KERN_INVALID_ARGUMENT; | |
8270 | } | |
8271 | if (new_ledger_tag == VM_LEDGER_TAG_NONE && | |
8272 | object->purgable == VM_PURGABLE_DENY) { | |
8273 | /* non-purgeable memory must have a valid non-zero ledger tag */ | |
8274 | return KERN_INVALID_ARGUMENT; | |
8275 | } | |
8276 | if (new_ledger_tag < 0 || | |
8277 | new_ledger_tag > VM_LEDGER_TAG_MAX) { | |
8278 | return KERN_INVALID_ARGUMENT; | |
8279 | } | |
8280 | if (new_ledger_flags & ~VM_LEDGER_FLAGS) { | |
8281 | return KERN_INVALID_ARGUMENT; | |
8282 | } | |
8283 | if (object->vo_ledger_tag == VM_LEDGER_TAG_NONE && | |
8284 | object->purgable == VM_PURGABLE_DENY) { | |
8285 | /* | |
8286 | * This VM object is neither ledger-tagged nor purgeable. | |
8287 | * We can convert it to "ledger tag" ownership iff it | |
8288 | * has not been used at all yet (no resident pages and | |
8289 | * no pager) and it's going to be assigned to a valid task. | |
8290 | */ | |
8291 | if (object->resident_page_count != 0 || | |
8292 | object->pager != NULL || | |
8293 | object->pager_created || | |
8294 | object->ref_count != 1 || | |
8295 | object->vo_owner != TASK_NULL || | |
8296 | object->copy_strategy != MEMORY_OBJECT_COPY_NONE || | |
8297 | new_owner == TASK_NULL) { | |
8298 | return KERN_FAILURE; | |
8299 | } | |
8300 | } | |
8301 | ||
8302 | if (new_ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) { | |
8303 | new_no_footprint = TRUE; | |
8304 | } else { | |
8305 | new_no_footprint = FALSE; | |
8306 | } | |
8307 | #if __arm64__ | |
8308 | if (!new_no_footprint && | |
8309 | object->purgable != VM_PURGABLE_DENY && | |
8310 | new_owner != TASK_NULL && | |
8311 | new_owner != VM_OBJECT_OWNER_DISOWNED && | |
8312 | new_owner->task_legacy_footprint) { | |
8313 | /* | |
8314 | * This task has been granted "legacy footprint" and should | |
8315 | * not be charged for its IOKit purgeable memory. Since we | |
8316 | * might now change the accounting of such memory to the | |
8317 | * "graphics" ledger, for example, give it the "no footprint" | |
8318 | * option. | |
8319 | */ | |
8320 | new_no_footprint = TRUE; | |
8321 | } | |
8322 | #endif /* __arm64__ */ | |
8323 | assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); | |
8324 | assert(object->shadow == VM_OBJECT_NULL); | |
8325 | assert(object->copy == VM_OBJECT_NULL); | |
d9a64523 A |
8326 | |
8327 | old_ledger_tag = object->vo_ledger_tag; | |
cb323159 | 8328 | old_no_footprint = object->vo_no_footprint; |
d9a64523 A |
8329 | old_owner = VM_OBJECT_OWNER(object); |
8330 | ||
f427ee49 | 8331 | DTRACE_VM8(object_ownership_change, |
cb323159 A |
8332 | vm_object_t, object, |
8333 | task_t, old_owner, | |
8334 | int, old_ledger_tag, | |
8335 | int, old_no_footprint, | |
8336 | task_t, new_owner, | |
8337 | int, new_ledger_tag, | |
f427ee49 A |
8338 | int, new_no_footprint, |
8339 | int, VM_OBJECT_ID(object)); | |
cb323159 A |
8340 | |
8341 | assert(object->internal); | |
d9a64523 A |
8342 | resident_count = object->resident_page_count - object->wired_page_count; |
8343 | wired_count = object->wired_page_count; | |
8344 | compressed_count = vm_compressor_pager_get_count(object->pager); | |
8345 | ||
8346 | /* | |
8347 | * Deal with the old owner and/or ledger tag, if needed. | |
8348 | */ | |
8349 | if (old_owner != TASK_NULL && | |
0a7de745 A |
8350 | ((old_owner != new_owner) /* new owner ... */ |
8351 | || /* ... or ... */ | |
cb323159 A |
8352 | (old_no_footprint != new_no_footprint) /* new "no_footprint" */ |
8353 | || /* ... or ... */ | |
8354 | old_ledger_tag != new_ledger_tag)) { /* ... new ledger */ | |
d9a64523 A |
8355 | /* |
8356 | * Take this object off of the old owner's ledgers. | |
8357 | */ | |
8358 | vm_object_ledger_tag_ledgers(object, | |
0a7de745 A |
8359 | &ledger_idx_volatile, |
8360 | &ledger_idx_nonvolatile, | |
8361 | &ledger_idx_volatile_compressed, | |
8362 | &ledger_idx_nonvolatile_compressed, | |
8363 | &do_footprint); | |
d9a64523 A |
8364 | if (object->purgable == VM_PURGABLE_VOLATILE || |
8365 | object->purgable == VM_PURGABLE_EMPTY) { | |
8366 | ledger_idx = ledger_idx_volatile; | |
8367 | ledger_idx_compressed = ledger_idx_volatile_compressed; | |
8368 | } else { | |
8369 | ledger_idx = ledger_idx_nonvolatile; | |
8370 | ledger_idx_compressed = ledger_idx_nonvolatile_compressed; | |
8371 | } | |
8372 | if (resident_count) { | |
8373 | /* | |
8374 | * Adjust the appropriate old owners's ledgers by the | |
8375 | * number of resident pages. | |
8376 | */ | |
8377 | ledger_debit(old_owner->ledger, | |
0a7de745 A |
8378 | ledger_idx, |
8379 | ptoa_64(resident_count)); | |
d9a64523 A |
8380 | /* adjust old owner's footprint */ |
8381 | if (do_footprint && | |
8382 | object->purgable != VM_PURGABLE_VOLATILE && | |
8383 | object->purgable != VM_PURGABLE_EMPTY) { | |
8384 | ledger_debit(old_owner->ledger, | |
0a7de745 A |
8385 | task_ledgers.phys_footprint, |
8386 | ptoa_64(resident_count)); | |
d9a64523 A |
8387 | } |
8388 | } | |
8389 | if (wired_count) { | |
8390 | /* wired pages are always nonvolatile */ | |
8391 | ledger_debit(old_owner->ledger, | |
0a7de745 A |
8392 | ledger_idx_nonvolatile, |
8393 | ptoa_64(wired_count)); | |
d9a64523 A |
8394 | if (do_footprint) { |
8395 | ledger_debit(old_owner->ledger, | |
0a7de745 A |
8396 | task_ledgers.phys_footprint, |
8397 | ptoa_64(wired_count)); | |
d9a64523 A |
8398 | } |
8399 | } | |
8400 | if (compressed_count) { | |
8401 | /* | |
8402 | * Adjust the appropriate old owner's ledgers | |
8403 | * by the number of compressed pages. | |
8404 | */ | |
8405 | ledger_debit(old_owner->ledger, | |
0a7de745 A |
8406 | ledger_idx_compressed, |
8407 | ptoa_64(compressed_count)); | |
d9a64523 A |
8408 | if (do_footprint && |
8409 | object->purgable != VM_PURGABLE_VOLATILE && | |
8410 | object->purgable != VM_PURGABLE_EMPTY) { | |
8411 | ledger_debit(old_owner->ledger, | |
0a7de745 A |
8412 | task_ledgers.phys_footprint, |
8413 | ptoa_64(compressed_count)); | |
d9a64523 A |
8414 | } |
8415 | } | |
8416 | if (old_owner != new_owner) { | |
8417 | /* remove object from old_owner's list of owned objects */ | |
8418 | DTRACE_VM2(object_owner_remove, | |
0a7de745 | 8419 | vm_object_t, object, |
cb323159 A |
8420 | task_t, old_owner); |
8421 | if (!old_task_objq_locked) { | |
d9a64523 A |
8422 | task_objq_lock(old_owner); |
8423 | } | |
cb323159 | 8424 | old_owner->task_owned_objects--; |
d9a64523 | 8425 | queue_remove(&old_owner->task_objq, object, |
0a7de745 | 8426 | vm_object_t, task_objq); |
d9a64523 A |
8427 | switch (object->purgable) { |
8428 | case VM_PURGABLE_NONVOLATILE: | |
8429 | case VM_PURGABLE_EMPTY: | |
8430 | vm_purgeable_nonvolatile_owner_update(old_owner, | |
0a7de745 | 8431 | -1); |
d9a64523 A |
8432 | break; |
8433 | case VM_PURGABLE_VOLATILE: | |
8434 | vm_purgeable_volatile_owner_update(old_owner, | |
0a7de745 | 8435 | -1); |
d9a64523 A |
8436 | break; |
8437 | default: | |
8438 | break; | |
8439 | } | |
cb323159 | 8440 | if (!old_task_objq_locked) { |
d9a64523 A |
8441 | task_objq_unlock(old_owner); |
8442 | } | |
8443 | } | |
8444 | } | |
8445 | ||
8446 | /* | |
8447 | * Switch to new ledger tag and/or owner. | |
8448 | */ | |
cb323159 A |
8449 | |
8450 | new_task_objq_locked = FALSE; | |
8451 | if (new_owner != old_owner && | |
8452 | new_owner != TASK_NULL && | |
8453 | new_owner != VM_OBJECT_OWNER_DISOWNED) { | |
8454 | /* | |
8455 | * If the new owner is not accepting new objects ("disowning"), | |
8456 | * the object becomes "disowned" and will be added to | |
8457 | * the kernel's task_objq. | |
8458 | * | |
8459 | * Check first without locking, to avoid blocking while the | |
8460 | * task is disowning its objects. | |
8461 | */ | |
8462 | if (new_owner->task_objects_disowning) { | |
8463 | new_owner = VM_OBJECT_OWNER_DISOWNED; | |
8464 | } else { | |
8465 | task_objq_lock(new_owner); | |
8466 | /* check again now that we have the lock */ | |
8467 | if (new_owner->task_objects_disowning) { | |
8468 | new_owner = VM_OBJECT_OWNER_DISOWNED; | |
8469 | task_objq_unlock(new_owner); | |
8470 | } else { | |
8471 | new_task_objq_locked = TRUE; | |
8472 | } | |
8473 | } | |
8474 | } | |
8475 | ||
d9a64523 A |
8476 | object->vo_ledger_tag = new_ledger_tag; |
8477 | object->vo_owner = new_owner; | |
cb323159 | 8478 | object->vo_no_footprint = new_no_footprint; |
d9a64523 A |
8479 | |
8480 | if (new_owner == VM_OBJECT_OWNER_DISOWNED) { | |
cb323159 A |
8481 | /* |
8482 | * Disowned objects are added to the kernel's task_objq but | |
8483 | * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to | |
8484 | * differentiate them from objects intentionally owned by | |
8485 | * the kernel. | |
8486 | */ | |
d9a64523 A |
8487 | assert(old_owner != kernel_task); |
8488 | new_owner = kernel_task; | |
cb323159 A |
8489 | assert(!new_task_objq_locked); |
8490 | task_objq_lock(new_owner); | |
8491 | new_task_objq_locked = TRUE; | |
d9a64523 A |
8492 | } |
8493 | ||
8494 | /* | |
8495 | * Deal with the new owner and/or ledger tag, if needed. | |
8496 | */ | |
8497 | if (new_owner != TASK_NULL && | |
0a7de745 A |
8498 | ((new_owner != old_owner) /* new owner ... */ |
8499 | || /* ... or ... */ | |
cb323159 A |
8500 | (new_no_footprint != old_no_footprint) /* ... new "no_footprint" */ |
8501 | || /* ... or ... */ | |
8502 | new_ledger_tag != old_ledger_tag)) { /* ... new ledger */ | |
d9a64523 A |
8503 | /* |
8504 | * Add this object to the new owner's ledgers. | |
8505 | */ | |
8506 | vm_object_ledger_tag_ledgers(object, | |
0a7de745 A |
8507 | &ledger_idx_volatile, |
8508 | &ledger_idx_nonvolatile, | |
8509 | &ledger_idx_volatile_compressed, | |
8510 | &ledger_idx_nonvolatile_compressed, | |
8511 | &do_footprint); | |
d9a64523 A |
8512 | if (object->purgable == VM_PURGABLE_VOLATILE || |
8513 | object->purgable == VM_PURGABLE_EMPTY) { | |
8514 | ledger_idx = ledger_idx_volatile; | |
8515 | ledger_idx_compressed = ledger_idx_volatile_compressed; | |
8516 | } else { | |
8517 | ledger_idx = ledger_idx_nonvolatile; | |
8518 | ledger_idx_compressed = ledger_idx_nonvolatile_compressed; | |
8519 | } | |
8520 | if (resident_count) { | |
8521 | /* | |
8522 | * Adjust the appropriate new owners's ledgers by the | |
8523 | * number of resident pages. | |
8524 | */ | |
8525 | ledger_credit(new_owner->ledger, | |
0a7de745 A |
8526 | ledger_idx, |
8527 | ptoa_64(resident_count)); | |
d9a64523 A |
8528 | /* adjust new owner's footprint */ |
8529 | if (do_footprint && | |
8530 | object->purgable != VM_PURGABLE_VOLATILE && | |
8531 | object->purgable != VM_PURGABLE_EMPTY) { | |
8532 | ledger_credit(new_owner->ledger, | |
0a7de745 A |
8533 | task_ledgers.phys_footprint, |
8534 | ptoa_64(resident_count)); | |
d9a64523 A |
8535 | } |
8536 | } | |
8537 | if (wired_count) { | |
8538 | /* wired pages are always nonvolatile */ | |
8539 | ledger_credit(new_owner->ledger, | |
0a7de745 A |
8540 | ledger_idx_nonvolatile, |
8541 | ptoa_64(wired_count)); | |
d9a64523 A |
8542 | if (do_footprint) { |
8543 | ledger_credit(new_owner->ledger, | |
0a7de745 A |
8544 | task_ledgers.phys_footprint, |
8545 | ptoa_64(wired_count)); | |
d9a64523 A |
8546 | } |
8547 | } | |
8548 | if (compressed_count) { | |
8549 | /* | |
8550 | * Adjust the new owner's ledgers by the number of | |
8551 | * compressed pages. | |
8552 | */ | |
8553 | ledger_credit(new_owner->ledger, | |
0a7de745 A |
8554 | ledger_idx_compressed, |
8555 | ptoa_64(compressed_count)); | |
d9a64523 A |
8556 | if (do_footprint && |
8557 | object->purgable != VM_PURGABLE_VOLATILE && | |
8558 | object->purgable != VM_PURGABLE_EMPTY) { | |
8559 | ledger_credit(new_owner->ledger, | |
0a7de745 A |
8560 | task_ledgers.phys_footprint, |
8561 | ptoa_64(compressed_count)); | |
d9a64523 A |
8562 | } |
8563 | } | |
8564 | if (new_owner != old_owner) { | |
8565 | /* add object to new_owner's list of owned objects */ | |
8566 | DTRACE_VM2(object_owner_add, | |
0a7de745 A |
8567 | vm_object_t, object, |
8568 | task_t, new_owner); | |
cb323159 A |
8569 | assert(new_task_objq_locked); |
8570 | new_owner->task_owned_objects++; | |
d9a64523 | 8571 | queue_enter(&new_owner->task_objq, object, |
0a7de745 | 8572 | vm_object_t, task_objq); |
d9a64523 A |
8573 | switch (object->purgable) { |
8574 | case VM_PURGABLE_NONVOLATILE: | |
8575 | case VM_PURGABLE_EMPTY: | |
8576 | vm_purgeable_nonvolatile_owner_update(new_owner, | |
0a7de745 | 8577 | +1); |
d9a64523 A |
8578 | break; |
8579 | case VM_PURGABLE_VOLATILE: | |
8580 | vm_purgeable_volatile_owner_update(new_owner, | |
0a7de745 | 8581 | +1); |
d9a64523 A |
8582 | break; |
8583 | default: | |
8584 | break; | |
8585 | } | |
d9a64523 A |
8586 | } |
8587 | } | |
8588 | ||
cb323159 A |
8589 | if (new_task_objq_locked) { |
8590 | task_objq_unlock(new_owner); | |
8591 | } | |
8592 | ||
d9a64523 A |
8593 | return KERN_SUCCESS; |
8594 | } | |
cb323159 A |
8595 | |
8596 | void | |
8597 | vm_owned_objects_disown( | |
8598 | task_t task) | |
8599 | { | |
8600 | vm_object_t next_object; | |
8601 | vm_object_t object; | |
8602 | int collisions; | |
8603 | kern_return_t kr; | |
8604 | ||
8605 | if (task == NULL) { | |
8606 | return; | |
8607 | } | |
8608 | ||
8609 | collisions = 0; | |
8610 | ||
8611 | again: | |
8612 | if (task->task_objects_disowned) { | |
8613 | /* task has already disowned its owned objects */ | |
8614 | assert(task->task_volatile_objects == 0); | |
8615 | assert(task->task_nonvolatile_objects == 0); | |
8616 | assert(task->task_owned_objects == 0); | |
8617 | return; | |
8618 | } | |
8619 | ||
8620 | task_objq_lock(task); | |
8621 | ||
8622 | task->task_objects_disowning = TRUE; | |
8623 | ||
8624 | for (object = (vm_object_t) queue_first(&task->task_objq); | |
8625 | !queue_end(&task->task_objq, (queue_entry_t) object); | |
8626 | object = next_object) { | |
8627 | if (task->task_nonvolatile_objects == 0 && | |
8628 | task->task_volatile_objects == 0 && | |
8629 | task->task_owned_objects == 0) { | |
8630 | /* no more objects owned by "task" */ | |
8631 | break; | |
8632 | } | |
8633 | ||
8634 | next_object = (vm_object_t) queue_next(&object->task_objq); | |
8635 | ||
8636 | #if DEBUG | |
8637 | assert(object->vo_purgeable_volatilizer == NULL); | |
8638 | #endif /* DEBUG */ | |
8639 | assert(object->vo_owner == task); | |
8640 | if (!vm_object_lock_try(object)) { | |
8641 | task_objq_unlock(task); | |
8642 | mutex_pause(collisions++); | |
8643 | goto again; | |
8644 | } | |
8645 | /* transfer ownership to the kernel */ | |
8646 | assert(VM_OBJECT_OWNER(object) != kernel_task); | |
8647 | kr = vm_object_ownership_change( | |
8648 | object, | |
8649 | object->vo_ledger_tag, /* unchanged */ | |
8650 | VM_OBJECT_OWNER_DISOWNED, /* new owner */ | |
8651 | 0, /* new_ledger_flags */ | |
8652 | TRUE); /* old_owner->task_objq locked */ | |
8653 | assert(kr == KERN_SUCCESS); | |
8654 | assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED); | |
8655 | vm_object_unlock(object); | |
8656 | } | |
8657 | ||
c3c9b80d | 8658 | if (__improbable(task->task_owned_objects != 0)) { |
cb323159 A |
8659 | panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p", |
8660 | __FUNCTION__, | |
8661 | task, | |
8662 | task->task_volatile_objects, | |
8663 | task->task_nonvolatile_objects, | |
8664 | task->task_owned_objects, | |
8665 | &task->task_objq, | |
8666 | queue_first(&task->task_objq), | |
8667 | queue_last(&task->task_objq)); | |
8668 | } | |
8669 | ||
8670 | /* there shouldn't be any objects owned by task now */ | |
8671 | assert(task->task_volatile_objects == 0); | |
8672 | assert(task->task_nonvolatile_objects == 0); | |
8673 | assert(task->task_owned_objects == 0); | |
8674 | assert(task->task_objects_disowning); | |
8675 | ||
8676 | /* and we don't need to try and disown again */ | |
8677 | task->task_objects_disowned = TRUE; | |
8678 | ||
8679 | task_objq_unlock(task); | |
8680 | } |