]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Virtual memory object module.
63 */
64
65 #include <debug.h>
66 #include <mach_pagemap.h>
67
68 #include <mach/mach_types.h>
69 #include <mach/memory_object.h>
70 #include <mach/memory_object_default.h>
71 #include <mach/memory_object_control_server.h>
72 #include <mach/vm_param.h>
73
74 #include <mach/sdt.h>
75
76 #include <ipc/ipc_types.h>
77 #include <ipc/ipc_port.h>
78
79 #include <kern/kern_types.h>
80 #include <kern/assert.h>
81 #include <kern/queue.h>
82 #include <kern/kalloc.h>
83 #include <kern/zalloc.h>
84 #include <kern/host.h>
85 #include <kern/host_statistics.h>
86 #include <kern/processor.h>
87 #include <kern/misc_protos.h>
88 #include <kern/policy_internal.h>
89
90 #include <vm/memory_object.h>
91 #include <vm/vm_compressor_pager.h>
92 #include <vm/vm_fault.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98 #include <vm/vm_purgeable_internal.h>
99
100 #include <vm/vm_compressor.h>
101
102 #if CONFIG_PHANTOM_CACHE
103 #include <vm/vm_phantom_cache.h>
104 #endif
105
106 #if VM_OBJECT_ACCESS_TRACKING
107 uint64_t vm_object_access_tracking_reads = 0;
108 uint64_t vm_object_access_tracking_writes = 0;
109 #endif /* VM_OBJECT_ACCESS_TRACKING */
110
111 boolean_t vm_object_collapse_compressor_allowed = TRUE;
112
113 struct vm_counters vm_counters;
114
115 #if DEVELOPMENT || DEBUG
116 extern struct memory_object_pager_ops shared_region_pager_ops;
117 extern unsigned int shared_region_pagers_resident_count;
118 extern unsigned int shared_region_pagers_resident_peak;
119 #endif /* DEVELOPMENT || DEBUG */
120
121 #if VM_OBJECT_TRACKING
122 boolean_t vm_object_tracking_inited = FALSE;
123 btlog_t *vm_object_tracking_btlog;
124
125 void
126 vm_object_tracking_init(void)
127 {
128 int vm_object_tracking;
129
130 vm_object_tracking = 1;
131 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking,
132 sizeof(vm_object_tracking));
133
134 if (vm_object_tracking) {
135 vm_object_tracking_btlog = btlog_create(
136 VM_OBJECT_TRACKING_NUM_RECORDS,
137 VM_OBJECT_TRACKING_BTDEPTH,
138 TRUE /* caller_will_remove_entries_for_element? */);
139 assert(vm_object_tracking_btlog);
140 vm_object_tracking_inited = TRUE;
141 }
142 }
143 #endif /* VM_OBJECT_TRACKING */
144
145 /*
146 * Virtual memory objects maintain the actual data
147 * associated with allocated virtual memory. A given
148 * page of memory exists within exactly one object.
149 *
150 * An object is only deallocated when all "references"
151 * are given up.
152 *
153 * Associated with each object is a list of all resident
154 * memory pages belonging to that object; this list is
155 * maintained by the "vm_page" module, but locked by the object's
156 * lock.
157 *
158 * Each object also records the memory object reference
159 * that is used by the kernel to request and write
160 * back data (the memory object, field "pager"), etc...
161 *
162 * Virtual memory objects are allocated to provide
163 * zero-filled memory (vm_allocate) or map a user-defined
164 * memory object into a virtual address space (vm_map).
165 *
166 * Virtual memory objects that refer to a user-defined
167 * memory object are called "permanent", because all changes
168 * made in virtual memory are reflected back to the
169 * memory manager, which may then store it permanently.
170 * Other virtual memory objects are called "temporary",
171 * meaning that changes need be written back only when
172 * necessary to reclaim pages, and that storage associated
173 * with the object can be discarded once it is no longer
174 * mapped.
175 *
176 * A permanent memory object may be mapped into more
177 * than one virtual address space. Moreover, two threads
178 * may attempt to make the first mapping of a memory
179 * object concurrently. Only one thread is allowed to
180 * complete this mapping; all others wait for the
181 * "pager_initialized" field is asserted, indicating
182 * that the first thread has initialized all of the
183 * necessary fields in the virtual memory object structure.
184 *
185 * The kernel relies on a *default memory manager* to
186 * provide backing storage for the zero-filled virtual
187 * memory objects. The pager memory objects associated
188 * with these temporary virtual memory objects are only
189 * requested from the default memory manager when it
190 * becomes necessary. Virtual memory objects
191 * that depend on the default memory manager are called
192 * "internal". The "pager_created" field is provided to
193 * indicate whether these ports have ever been allocated.
194 *
195 * The kernel may also create virtual memory objects to
196 * hold changed pages after a copy-on-write operation.
197 * In this case, the virtual memory object (and its
198 * backing storage -- its memory object) only contain
199 * those pages that have been changed. The "shadow"
200 * field refers to the virtual memory object that contains
201 * the remainder of the contents. The "shadow_offset"
202 * field indicates where in the "shadow" these contents begin.
203 * The "copy" field refers to a virtual memory object
204 * to which changed pages must be copied before changing
205 * this object, in order to implement another form
206 * of copy-on-write optimization.
207 *
208 * The virtual memory object structure also records
209 * the attributes associated with its memory object.
210 * The "pager_ready", "can_persist" and "copy_strategy"
211 * fields represent those attributes. The "cached_list"
212 * field is used in the implementation of the persistence
213 * attribute.
214 *
215 * ZZZ Continue this comment.
216 */
217
218 /* Forward declarations for internal functions. */
219 static kern_return_t vm_object_terminate(
220 vm_object_t object);
221
222 static kern_return_t vm_object_copy_call(
223 vm_object_t src_object,
224 vm_object_offset_t src_offset,
225 vm_object_size_t size,
226 vm_object_t *_result_object);
227
228 static void vm_object_do_collapse(
229 vm_object_t object,
230 vm_object_t backing_object);
231
232 static void vm_object_do_bypass(
233 vm_object_t object,
234 vm_object_t backing_object);
235
236 static void vm_object_release_pager(
237 memory_object_t pager);
238
239 SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */
240
241 /*
242 * All wired-down kernel memory belongs to a single virtual
243 * memory object (kernel_object) to avoid wasting data structures.
244 */
245 static struct vm_object kernel_object_store VM_PAGE_PACKED_ALIGNED;
246 SECURITY_READ_ONLY_LATE(vm_object_t) kernel_object = &kernel_object_store;
247
248 static struct vm_object compressor_object_store VM_PAGE_PACKED_ALIGNED;
249 SECURITY_READ_ONLY_LATE(vm_object_t) compressor_object = &compressor_object_store;
250
251 /*
252 * This object holds all pages that have been retired due to errors like ECC.
253 * The system should never use the page or look at its contents. The offset
254 * in this object is the same as the page's physical address.
255 */
256 static struct vm_object retired_pages_object_store VM_PAGE_PACKED_ALIGNED;
257 SECURITY_READ_ONLY_LATE(vm_object_t) retired_pages_object = &retired_pages_object_store;
258
259 /*
260 * The submap object is used as a placeholder for vm_map_submap
261 * operations. The object is declared in vm_map.c because it
262 * is exported by the vm_map module. The storage is declared
263 * here because it must be initialized here.
264 */
265 static struct vm_object vm_submap_object_store VM_PAGE_PACKED_ALIGNED;
266 SECURITY_READ_ONLY_LATE(vm_object_t) vm_submap_object = &vm_submap_object_store;
267
268
269 /*
270 * Virtual memory objects are initialized from
271 * a template (see vm_object_allocate).
272 *
273 * When adding a new field to the virtual memory
274 * object structure, be sure to add initialization
275 * (see _vm_object_allocate()).
276 */
277 static const struct vm_object vm_object_template = {
278 .memq.prev = 0,
279 .memq.next = 0,
280 /*
281 * The lock will be initialized for each allocated object in
282 * _vm_object_allocate(), so we don't need to initialize it in
283 * the vm_object_template.
284 */
285 #if DEVELOPMENT || DEBUG
286 .Lock_owner = 0,
287 #endif
288 .vo_size = 0,
289 .memq_hint = VM_PAGE_NULL,
290 .ref_count = 1,
291 .resident_page_count = 0,
292 .wired_page_count = 0,
293 .reusable_page_count = 0,
294 .copy = VM_OBJECT_NULL,
295 .shadow = VM_OBJECT_NULL,
296 .vo_shadow_offset = (vm_object_offset_t) 0,
297 .pager = MEMORY_OBJECT_NULL,
298 .paging_offset = 0,
299 .pager_control = MEMORY_OBJECT_CONTROL_NULL,
300 .copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC,
301 .paging_in_progress = 0,
302 #if __LP64__
303 .__object1_unused_bits = 0,
304 #endif /* __LP64__ */
305 .activity_in_progress = 0,
306
307 /* Begin bitfields */
308 .all_wanted = 0, /* all bits FALSE */
309 .pager_created = FALSE,
310 .pager_initialized = FALSE,
311 .pager_ready = FALSE,
312 .pager_trusted = FALSE,
313 .can_persist = FALSE,
314 .internal = TRUE,
315 .private = FALSE,
316 .pageout = FALSE,
317 .alive = TRUE,
318 .purgable = VM_PURGABLE_DENY,
319 .purgeable_when_ripe = FALSE,
320 .purgeable_only_by_kernel = FALSE,
321 .shadowed = FALSE,
322 .true_share = FALSE,
323 .terminating = FALSE,
324 .named = FALSE,
325 .shadow_severed = FALSE,
326 .phys_contiguous = FALSE,
327 .nophyscache = FALSE,
328 /* End bitfields */
329
330 .cached_list.prev = NULL,
331 .cached_list.next = NULL,
332
333 .last_alloc = (vm_object_offset_t) 0,
334 .sequential = (vm_object_offset_t) 0,
335 .pages_created = 0,
336 .pages_used = 0,
337 .scan_collisions = 0,
338 #if CONFIG_PHANTOM_CACHE
339 .phantom_object_id = 0,
340 #endif
341 .cow_hint = ~(vm_offset_t)0,
342
343 /* cache bitfields */
344 .wimg_bits = VM_WIMG_USE_DEFAULT,
345 .set_cache_attr = FALSE,
346 .object_is_shared_cache = FALSE,
347 .code_signed = FALSE,
348 .transposed = FALSE,
349 .mapping_in_progress = FALSE,
350 .phantom_isssd = FALSE,
351 .volatile_empty = FALSE,
352 .volatile_fault = FALSE,
353 .all_reusable = FALSE,
354 .blocked_access = FALSE,
355 .vo_ledger_tag = VM_LEDGER_TAG_NONE,
356 .vo_no_footprint = FALSE,
357 #if CONFIG_IOSCHED || UPL_DEBUG
358 .uplq.prev = NULL,
359 .uplq.next = NULL,
360 #endif /* UPL_DEBUG */
361 #ifdef VM_PIP_DEBUG
362 .pip_holders = {0},
363 #endif /* VM_PIP_DEBUG */
364
365 .objq.next = NULL,
366 .objq.prev = NULL,
367 .task_objq.next = NULL,
368 .task_objq.prev = NULL,
369
370 .purgeable_queue_type = PURGEABLE_Q_TYPE_MAX,
371 .purgeable_queue_group = 0,
372
373 .wire_tag = VM_KERN_MEMORY_NONE,
374 #if !VM_TAG_ACTIVE_UPDATE
375 .wired_objq.next = NULL,
376 .wired_objq.prev = NULL,
377 #endif /* ! VM_TAG_ACTIVE_UPDATE */
378
379 .io_tracking = FALSE,
380
381 #if CONFIG_SECLUDED_MEMORY
382 .eligible_for_secluded = FALSE,
383 .can_grab_secluded = FALSE,
384 #else /* CONFIG_SECLUDED_MEMORY */
385 .__object3_unused_bits = 0,
386 #endif /* CONFIG_SECLUDED_MEMORY */
387
388 #if VM_OBJECT_ACCESS_TRACKING
389 .access_tracking = FALSE,
390 .access_tracking_reads = 0,
391 .access_tracking_writes = 0,
392 #endif /* VM_OBJECT_ACCESS_TRACKING */
393
394 #if DEBUG
395 .purgeable_owner_bt = {0},
396 .vo_purgeable_volatilizer = NULL,
397 .purgeable_volatilizer_bt = {0},
398 #endif /* DEBUG */
399 };
400
401 LCK_GRP_DECLARE(vm_object_lck_grp, "vm_object");
402 LCK_GRP_DECLARE(vm_object_cache_lck_grp, "vm_object_cache");
403 LCK_ATTR_DECLARE(vm_object_lck_attr, 0, 0);
404 LCK_ATTR_DECLARE(kernel_object_lck_attr, 0, LCK_ATTR_DEBUG);
405 LCK_ATTR_DECLARE(compressor_object_lck_attr, 0, LCK_ATTR_DEBUG);
406
407 unsigned int vm_page_purged_wired = 0;
408 unsigned int vm_page_purged_busy = 0;
409 unsigned int vm_page_purged_others = 0;
410
411 static queue_head_t vm_object_cached_list;
412 static uint32_t vm_object_cache_pages_freed = 0;
413 static uint32_t vm_object_cache_pages_moved = 0;
414 static uint32_t vm_object_cache_pages_skipped = 0;
415 static uint32_t vm_object_cache_adds = 0;
416 static uint32_t vm_object_cached_count = 0;
417 static LCK_MTX_EARLY_DECLARE_ATTR(vm_object_cached_lock_data,
418 &vm_object_cache_lck_grp, &vm_object_lck_attr);
419
420 static uint32_t vm_object_page_grab_failed = 0;
421 static uint32_t vm_object_page_grab_skipped = 0;
422 static uint32_t vm_object_page_grab_returned = 0;
423 static uint32_t vm_object_page_grab_pmapped = 0;
424 static uint32_t vm_object_page_grab_reactivations = 0;
425
426 #define vm_object_cache_lock_spin() \
427 lck_mtx_lock_spin(&vm_object_cached_lock_data)
428 #define vm_object_cache_unlock() \
429 lck_mtx_unlock(&vm_object_cached_lock_data)
430
431 static void vm_object_cache_remove_locked(vm_object_t);
432
433
434 static void vm_object_reap(vm_object_t object);
435 static void vm_object_reap_async(vm_object_t object);
436 static void vm_object_reaper_thread(void);
437
438 static LCK_MTX_EARLY_DECLARE_ATTR(vm_object_reaper_lock_data,
439 &vm_object_lck_grp, &vm_object_lck_attr);
440
441 static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
442 unsigned int vm_object_reap_count = 0;
443 unsigned int vm_object_reap_count_async = 0;
444
445 #define vm_object_reaper_lock() \
446 lck_mtx_lock(&vm_object_reaper_lock_data)
447 #define vm_object_reaper_lock_spin() \
448 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
449 #define vm_object_reaper_unlock() \
450 lck_mtx_unlock(&vm_object_reaper_lock_data)
451
452 #if CONFIG_IOSCHED
453 /* I/O Re-prioritization request list */
454 queue_head_t io_reprioritize_list = QUEUE_HEAD_INITIALIZER(io_reprioritize_list);
455
456 LCK_SPIN_DECLARE_ATTR(io_reprioritize_list_lock,
457 &vm_object_lck_grp, &vm_object_lck_attr);
458
459 #define IO_REPRIORITIZE_LIST_LOCK() \
460 lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp)
461 #define IO_REPRIORITIZE_LIST_UNLOCK() \
462 lck_spin_unlock(&io_reprioritize_list_lock)
463
464 #define MAX_IO_REPRIORITIZE_REQS 8192
465 ZONE_DECLARE(io_reprioritize_req_zone, "io_reprioritize_req",
466 sizeof(struct io_reprioritize_req), ZC_NOGC);
467
468 /* I/O Re-prioritization thread */
469 int io_reprioritize_wakeup = 0;
470 static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused);
471
472 #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup)
473 #define IO_REPRIO_THREAD_CONTINUATION() \
474 { \
475 assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \
476 thread_block(io_reprioritize_thread); \
477 }
478
479 void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int);
480 void vm_page_handle_prio_inversion(vm_object_t, vm_page_t);
481 void vm_decmp_upl_reprioritize(upl_t, int);
482 #endif
483
484 #if 0
485 #undef KERNEL_DEBUG
486 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
487 #endif
488
489
490 /*
491 * vm_object_allocate:
492 *
493 * Returns a new object with the given size.
494 */
495
496 __private_extern__ void
497 _vm_object_allocate(
498 vm_object_size_t size,
499 vm_object_t object)
500 {
501 *object = vm_object_template;
502 vm_page_queue_init(&object->memq);
503 #if UPL_DEBUG || CONFIG_IOSCHED
504 queue_init(&object->uplq);
505 #endif
506 vm_object_lock_init(object);
507 object->vo_size = vm_object_round_page(size);
508
509 #if VM_OBJECT_TRACKING_OP_CREATED
510 if (vm_object_tracking_inited) {
511 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
512 int numsaved = 0;
513
514 numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
515 btlog_add_entry(vm_object_tracking_btlog,
516 object,
517 VM_OBJECT_TRACKING_OP_CREATED,
518 bt,
519 numsaved);
520 }
521 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
522 }
523
524 __private_extern__ vm_object_t
525 vm_object_allocate(
526 vm_object_size_t size)
527 {
528 vm_object_t object;
529
530 object = (vm_object_t) zalloc(vm_object_zone);
531
532 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
533
534 if (object != VM_OBJECT_NULL) {
535 _vm_object_allocate(size, object);
536 }
537
538 return object;
539 }
540
541 TUNABLE(bool, workaround_41447923, "workaround_41447923", false);
542
543 /*
544 * vm_object_bootstrap:
545 *
546 * Initialize the VM objects module.
547 */
548 __startup_func
549 void
550 vm_object_bootstrap(void)
551 {
552 vm_size_t vm_object_size;
553
554 assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t));
555
556 vm_object_size = (sizeof(struct vm_object) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
557 ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
558
559 vm_object_zone = zone_create_ext("vm objects", vm_object_size,
560 ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED,
561 ZONE_ID_ANY, ^(zone_t z){
562 #if defined(__LP64__)
563 zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED);
564 #else
565 (void)z;
566 #endif
567 });
568
569 queue_init(&vm_object_cached_list);
570
571 queue_init(&vm_object_reaper_queue);
572
573 /*
574 * Initialize the "kernel object"
575 */
576
577 /*
578 * Note that in the following size specifications, we need to add 1 because
579 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
580 */
581 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object);
582 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, compressor_object);
583 kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
584 compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
585 kernel_object->no_tag_update = TRUE;
586
587 /*
588 * The object to hold retired VM pages.
589 */
590 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, retired_pages_object);
591 retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
592
593 /*
594 * Initialize the "submap object". Make it as large as the
595 * kernel object so that no limit is imposed on submap sizes.
596 */
597
598 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, vm_submap_object);
599 vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
600
601 /*
602 * Create an "extra" reference to this object so that we never
603 * try to deallocate it; zfree doesn't like to be called with
604 * non-zone memory.
605 */
606 vm_object_reference(vm_submap_object);
607 }
608
609 #if CONFIG_IOSCHED
610 void
611 vm_io_reprioritize_init(void)
612 {
613 kern_return_t result;
614 thread_t thread = THREAD_NULL;
615
616 result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread);
617 if (result == KERN_SUCCESS) {
618 thread_set_thread_name(thread, "VM_io_reprioritize_thread");
619 thread_deallocate(thread);
620 } else {
621 panic("Could not create io_reprioritize_thread");
622 }
623 }
624 #endif
625
626 void
627 vm_object_reaper_init(void)
628 {
629 kern_return_t kr;
630 thread_t thread;
631
632 kr = kernel_thread_start_priority(
633 (thread_continue_t) vm_object_reaper_thread,
634 NULL,
635 BASEPRI_VM,
636 &thread);
637 if (kr != KERN_SUCCESS) {
638 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
639 }
640 thread_set_thread_name(thread, "VM_object_reaper_thread");
641 thread_deallocate(thread);
642 }
643
644
645 /*
646 * vm_object_deallocate:
647 *
648 * Release a reference to the specified object,
649 * gained either through a vm_object_allocate
650 * or a vm_object_reference call. When all references
651 * are gone, storage associated with this object
652 * may be relinquished.
653 *
654 * No object may be locked.
655 */
656 unsigned long vm_object_deallocate_shared_successes = 0;
657 unsigned long vm_object_deallocate_shared_failures = 0;
658 unsigned long vm_object_deallocate_shared_swap_failures = 0;
659
660 __private_extern__ void
661 vm_object_deallocate(
662 vm_object_t object)
663 {
664 vm_object_t shadow = VM_OBJECT_NULL;
665
666 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
667 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
668
669 if (object == VM_OBJECT_NULL) {
670 return;
671 }
672
673 if (object == kernel_object || object == compressor_object || object == retired_pages_object) {
674 vm_object_lock_shared(object);
675
676 OSAddAtomic(-1, &object->ref_count);
677
678 if (object->ref_count == 0) {
679 if (object == kernel_object) {
680 panic("vm_object_deallocate: losing kernel_object\n");
681 } else if (object == retired_pages_object) {
682 panic("vm_object_deallocate: losing retired_pages_object\n");
683 } else {
684 panic("vm_object_deallocate: losing compressor_object\n");
685 }
686 }
687 vm_object_unlock(object);
688 return;
689 }
690
691 if (object->ref_count == 2 &&
692 object->named) {
693 /*
694 * This "named" object's reference count is about to
695 * drop from 2 to 1:
696 * we'll need to call memory_object_last_unmap().
697 */
698 } else if (object->ref_count == 2 &&
699 object->internal &&
700 object->shadow != VM_OBJECT_NULL) {
701 /*
702 * This internal object's reference count is about to
703 * drop from 2 to 1 and it has a shadow object:
704 * we'll want to try and collapse this object with its
705 * shadow.
706 */
707 } else if (object->ref_count >= 2) {
708 UInt32 original_ref_count;
709 volatile UInt32 *ref_count_p;
710 Boolean atomic_swap;
711
712 /*
713 * The object currently looks like it is not being
714 * kept alive solely by the reference we're about to release.
715 * Let's try and release our reference without taking
716 * all the locks we would need if we had to terminate the
717 * object (cache lock + exclusive object lock).
718 * Lock the object "shared" to make sure we don't race with
719 * anyone holding it "exclusive".
720 */
721 vm_object_lock_shared(object);
722 ref_count_p = (volatile UInt32 *) &object->ref_count;
723 original_ref_count = object->ref_count;
724 /*
725 * Test again as "ref_count" could have changed.
726 * "named" shouldn't change.
727 */
728 if (original_ref_count == 2 &&
729 object->named) {
730 /* need to take slow path for m_o_last_unmap() */
731 atomic_swap = FALSE;
732 } else if (original_ref_count == 2 &&
733 object->internal &&
734 object->shadow != VM_OBJECT_NULL) {
735 /* need to take slow path for vm_object_collapse() */
736 atomic_swap = FALSE;
737 } else if (original_ref_count < 2) {
738 /* need to take slow path for vm_object_terminate() */
739 atomic_swap = FALSE;
740 } else {
741 /* try an atomic update with the shared lock */
742 atomic_swap = OSCompareAndSwap(
743 original_ref_count,
744 original_ref_count - 1,
745 (UInt32 *) &object->ref_count);
746 if (atomic_swap == FALSE) {
747 vm_object_deallocate_shared_swap_failures++;
748 /* fall back to the slow path... */
749 }
750 }
751
752 vm_object_unlock(object);
753
754 if (atomic_swap) {
755 /*
756 * ref_count was updated atomically !
757 */
758 vm_object_deallocate_shared_successes++;
759 return;
760 }
761
762 /*
763 * Someone else updated the ref_count at the same
764 * time and we lost the race. Fall back to the usual
765 * slow but safe path...
766 */
767 vm_object_deallocate_shared_failures++;
768 }
769
770 while (object != VM_OBJECT_NULL) {
771 vm_object_lock(object);
772
773 assert(object->ref_count > 0);
774
775 /*
776 * If the object has a named reference, and only
777 * that reference would remain, inform the pager
778 * about the last "mapping" reference going away.
779 */
780 if ((object->ref_count == 2) && (object->named)) {
781 memory_object_t pager = object->pager;
782
783 /* Notify the Pager that there are no */
784 /* more mappers for this object */
785
786 if (pager != MEMORY_OBJECT_NULL) {
787 vm_object_mapping_wait(object, THREAD_UNINT);
788 vm_object_mapping_begin(object);
789 vm_object_unlock(object);
790
791 memory_object_last_unmap(pager);
792
793 vm_object_lock(object);
794 vm_object_mapping_end(object);
795 }
796 assert(object->ref_count > 0);
797 }
798
799 /*
800 * Lose the reference. If other references
801 * remain, then we are done, unless we need
802 * to retry a cache trim.
803 * If it is the last reference, then keep it
804 * until any pending initialization is completed.
805 */
806
807 /* if the object is terminating, it cannot go into */
808 /* the cache and we obviously should not call */
809 /* terminate again. */
810
811 if ((object->ref_count > 1) || object->terminating) {
812 vm_object_lock_assert_exclusive(object);
813 object->ref_count--;
814
815 if (object->ref_count == 1 &&
816 object->shadow != VM_OBJECT_NULL) {
817 /*
818 * There's only one reference left on this
819 * VM object. We can't tell if it's a valid
820 * one (from a mapping for example) or if this
821 * object is just part of a possibly stale and
822 * useless shadow chain.
823 * We would like to try and collapse it into
824 * its parent, but we don't have any pointers
825 * back to this parent object.
826 * But we can try and collapse this object with
827 * its own shadows, in case these are useless
828 * too...
829 * We can't bypass this object though, since we
830 * don't know if this last reference on it is
831 * meaningful or not.
832 */
833 vm_object_collapse(object, 0, FALSE);
834 }
835 vm_object_unlock(object);
836 return;
837 }
838
839 /*
840 * We have to wait for initialization
841 * before destroying or caching the object.
842 */
843
844 if (object->pager_created && !object->pager_initialized) {
845 assert(!object->can_persist);
846 vm_object_assert_wait(object,
847 VM_OBJECT_EVENT_INITIALIZED,
848 THREAD_UNINT);
849 vm_object_unlock(object);
850
851 thread_block(THREAD_CONTINUE_NULL);
852 continue;
853 }
854
855 /*
856 * Terminate this object. If it had a shadow,
857 * then deallocate it; otherwise, if we need
858 * to retry a cache trim, do so now; otherwise,
859 * we are done. "pageout" objects have a shadow,
860 * but maintain a "paging reference" rather than
861 * a normal reference.
862 */
863 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
864
865 if (vm_object_terminate(object) != KERN_SUCCESS) {
866 return;
867 }
868 if (shadow != VM_OBJECT_NULL) {
869 object = shadow;
870 continue;
871 }
872 return;
873 }
874 }
875
876
877
878 vm_page_t
879 vm_object_page_grab(
880 vm_object_t object)
881 {
882 vm_page_t p, next_p;
883 int p_limit = 0;
884 int p_skipped = 0;
885
886 vm_object_lock_assert_exclusive(object);
887
888 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
889 p_limit = MIN(50, object->resident_page_count);
890
891 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) {
892 p = next_p;
893 next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
894
895 if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) {
896 goto move_page_in_obj;
897 }
898
899 if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) {
900 vm_page_lockspin_queues();
901
902 if (p->vmp_pmapped) {
903 int refmod_state;
904
905 vm_object_page_grab_pmapped++;
906
907 if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) {
908 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p));
909
910 if (refmod_state & VM_MEM_REFERENCED) {
911 p->vmp_reference = TRUE;
912 }
913 if (refmod_state & VM_MEM_MODIFIED) {
914 SET_PAGE_DIRTY(p, FALSE);
915 }
916 }
917 if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
918 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
919
920 if (refmod_state & VM_MEM_REFERENCED) {
921 p->vmp_reference = TRUE;
922 }
923 if (refmod_state & VM_MEM_MODIFIED) {
924 SET_PAGE_DIRTY(p, FALSE);
925 }
926
927 if (p->vmp_dirty == FALSE) {
928 goto take_page;
929 }
930 }
931 }
932 if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
933 vm_page_activate(p);
934
935 counter_inc(&vm_statistics_reactivations);
936 vm_object_page_grab_reactivations++;
937 }
938 vm_page_unlock_queues();
939 move_page_in_obj:
940 vm_page_queue_remove(&object->memq, p, vmp_listq);
941 vm_page_queue_enter(&object->memq, p, vmp_listq);
942
943 p_skipped++;
944 continue;
945 }
946 vm_page_lockspin_queues();
947 take_page:
948 vm_page_free_prepare_queues(p);
949 vm_object_page_grab_returned++;
950 vm_object_page_grab_skipped += p_skipped;
951
952 vm_page_unlock_queues();
953
954 vm_page_free_prepare_object(p, TRUE);
955
956 return p;
957 }
958 vm_object_page_grab_skipped += p_skipped;
959 vm_object_page_grab_failed++;
960
961 return NULL;
962 }
963
964
965
966 #define EVICT_PREPARE_LIMIT 64
967 #define EVICT_AGE 10
968
969 static clock_sec_t vm_object_cache_aging_ts = 0;
970
971 static void
972 vm_object_cache_remove_locked(
973 vm_object_t object)
974 {
975 assert(object->purgable == VM_PURGABLE_DENY);
976
977 queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
978 object->cached_list.next = NULL;
979 object->cached_list.prev = NULL;
980
981 vm_object_cached_count--;
982 }
983
984 void
985 vm_object_cache_remove(
986 vm_object_t object)
987 {
988 vm_object_cache_lock_spin();
989
990 if (object->cached_list.next &&
991 object->cached_list.prev) {
992 vm_object_cache_remove_locked(object);
993 }
994
995 vm_object_cache_unlock();
996 }
997
998 void
999 vm_object_cache_add(
1000 vm_object_t object)
1001 {
1002 clock_sec_t sec;
1003 clock_nsec_t nsec;
1004
1005 assert(object->purgable == VM_PURGABLE_DENY);
1006
1007 if (object->resident_page_count == 0) {
1008 return;
1009 }
1010 clock_get_system_nanotime(&sec, &nsec);
1011
1012 vm_object_cache_lock_spin();
1013
1014 if (object->cached_list.next == NULL &&
1015 object->cached_list.prev == NULL) {
1016 queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
1017 object->vo_cache_ts = sec + EVICT_AGE;
1018 object->vo_cache_pages_to_scan = object->resident_page_count;
1019
1020 vm_object_cached_count++;
1021 vm_object_cache_adds++;
1022 }
1023 vm_object_cache_unlock();
1024 }
1025
1026 int
1027 vm_object_cache_evict(
1028 int num_to_evict,
1029 int max_objects_to_examine)
1030 {
1031 vm_object_t object = VM_OBJECT_NULL;
1032 vm_object_t next_obj = VM_OBJECT_NULL;
1033 vm_page_t local_free_q = VM_PAGE_NULL;
1034 vm_page_t p;
1035 vm_page_t next_p;
1036 int object_cnt = 0;
1037 vm_page_t ep_array[EVICT_PREPARE_LIMIT];
1038 int ep_count;
1039 int ep_limit;
1040 int ep_index;
1041 int ep_freed = 0;
1042 int ep_moved = 0;
1043 uint32_t ep_skipped = 0;
1044 clock_sec_t sec;
1045 clock_nsec_t nsec;
1046
1047 KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
1048 /*
1049 * do a couple of quick checks to see if it's
1050 * worthwhile grabbing the lock
1051 */
1052 if (queue_empty(&vm_object_cached_list)) {
1053 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1054 return 0;
1055 }
1056 clock_get_system_nanotime(&sec, &nsec);
1057
1058 /*
1059 * the object on the head of the queue has not
1060 * yet sufficiently aged
1061 */
1062 if (sec < vm_object_cache_aging_ts) {
1063 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1064 return 0;
1065 }
1066 /*
1067 * don't need the queue lock to find
1068 * and lock an object on the cached list
1069 */
1070 vm_page_unlock_queues();
1071
1072 vm_object_cache_lock_spin();
1073
1074 for (;;) {
1075 next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
1076
1077 while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
1078 object = next_obj;
1079 next_obj = (vm_object_t)queue_next(&next_obj->cached_list);
1080
1081 assert(object->purgable == VM_PURGABLE_DENY);
1082
1083 if (sec < object->vo_cache_ts) {
1084 KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
1085
1086 vm_object_cache_aging_ts = object->vo_cache_ts;
1087 object = VM_OBJECT_NULL;
1088 break;
1089 }
1090 if (!vm_object_lock_try_scan(object)) {
1091 /*
1092 * just skip over this guy for now... if we find
1093 * an object to steal pages from, we'll revist in a bit...
1094 * hopefully, the lock will have cleared
1095 */
1096 KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0);
1097
1098 object = VM_OBJECT_NULL;
1099 continue;
1100 }
1101 if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
1102 /*
1103 * this case really shouldn't happen, but it's not fatal
1104 * so deal with it... if we don't remove the object from
1105 * the list, we'll never move past it.
1106 */
1107 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1108
1109 vm_object_cache_remove_locked(object);
1110 vm_object_unlock(object);
1111 object = VM_OBJECT_NULL;
1112 continue;
1113 }
1114 /*
1115 * we have a locked object with pages...
1116 * time to start harvesting
1117 */
1118 break;
1119 }
1120 vm_object_cache_unlock();
1121
1122 if (object == VM_OBJECT_NULL) {
1123 break;
1124 }
1125
1126 /*
1127 * object is locked at this point and
1128 * has resident pages
1129 */
1130 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
1131
1132 /*
1133 * break the page scan into 2 pieces to minimize the time spent
1134 * behind the page queue lock...
1135 * the list of pages on these unused objects is likely to be cold
1136 * w/r to the cpu cache which increases the time to scan the list
1137 * tenfold... and we may have a 'run' of pages we can't utilize that
1138 * needs to be skipped over...
1139 */
1140 if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) {
1141 ep_limit = EVICT_PREPARE_LIMIT;
1142 }
1143 ep_count = 0;
1144
1145 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
1146 p = next_p;
1147 next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
1148
1149 object->vo_cache_pages_to_scan--;
1150
1151 if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) {
1152 vm_page_queue_remove(&object->memq, p, vmp_listq);
1153 vm_page_queue_enter(&object->memq, p, vmp_listq);
1154
1155 ep_skipped++;
1156 continue;
1157 }
1158 if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1159 vm_page_queue_remove(&object->memq, p, vmp_listq);
1160 vm_page_queue_enter(&object->memq, p, vmp_listq);
1161
1162 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p));
1163 }
1164 ep_array[ep_count++] = p;
1165 }
1166 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0);
1167
1168 vm_page_lockspin_queues();
1169
1170 for (ep_index = 0; ep_index < ep_count; ep_index++) {
1171 p = ep_array[ep_index];
1172
1173 if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1174 p->vmp_reference = FALSE;
1175 p->vmp_no_cache = FALSE;
1176
1177 /*
1178 * we've already filtered out pages that are in the laundry
1179 * so if we get here, this page can't be on the pageout queue
1180 */
1181 vm_page_queues_remove(p, FALSE);
1182 vm_page_enqueue_inactive(p, TRUE);
1183
1184 ep_moved++;
1185 } else {
1186 #if CONFIG_PHANTOM_CACHE
1187 vm_phantom_cache_add_ghost(p);
1188 #endif
1189 vm_page_free_prepare_queues(p);
1190
1191 assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1192 /*
1193 * Add this page to our list of reclaimed pages,
1194 * to be freed later.
1195 */
1196 p->vmp_snext = local_free_q;
1197 local_free_q = p;
1198
1199 ep_freed++;
1200 }
1201 }
1202 vm_page_unlock_queues();
1203
1204 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0);
1205
1206 if (local_free_q) {
1207 vm_page_free_list(local_free_q, TRUE);
1208 local_free_q = VM_PAGE_NULL;
1209 }
1210 if (object->vo_cache_pages_to_scan == 0) {
1211 KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0);
1212
1213 vm_object_cache_remove(object);
1214
1215 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1216 }
1217 /*
1218 * done with this object
1219 */
1220 vm_object_unlock(object);
1221 object = VM_OBJECT_NULL;
1222
1223 /*
1224 * at this point, we are not holding any locks
1225 */
1226 if ((ep_freed + ep_moved) >= num_to_evict) {
1227 /*
1228 * we've reached our target for the
1229 * number of pages to evict
1230 */
1231 break;
1232 }
1233 vm_object_cache_lock_spin();
1234 }
1235 /*
1236 * put the page queues lock back to the caller's
1237 * idea of it
1238 */
1239 vm_page_lock_queues();
1240
1241 vm_object_cache_pages_freed += ep_freed;
1242 vm_object_cache_pages_moved += ep_moved;
1243 vm_object_cache_pages_skipped += ep_skipped;
1244
1245 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0);
1246 return ep_freed;
1247 }
1248
1249 /*
1250 * Routine: vm_object_terminate
1251 * Purpose:
1252 * Free all resources associated with a vm_object.
1253 * In/out conditions:
1254 * Upon entry, the object must be locked,
1255 * and the object must have exactly one reference.
1256 *
1257 * The shadow object reference is left alone.
1258 *
1259 * The object must be unlocked if its found that pages
1260 * must be flushed to a backing object. If someone
1261 * manages to map the object while it is being flushed
1262 * the object is returned unlocked and unchanged. Otherwise,
1263 * upon exit, the cache will be unlocked, and the
1264 * object will cease to exist.
1265 */
1266 static kern_return_t
1267 vm_object_terminate(
1268 vm_object_t object)
1269 {
1270 vm_object_t shadow_object;
1271
1272 vm_object_lock_assert_exclusive(object);
1273
1274 if (!object->pageout && (!object->internal && object->can_persist) &&
1275 (object->pager != NULL || object->shadow_severed)) {
1276 /*
1277 * Clear pager_trusted bit so that the pages get yanked
1278 * out of the object instead of cleaned in place. This
1279 * prevents a deadlock in XMM and makes more sense anyway.
1280 */
1281 object->pager_trusted = FALSE;
1282
1283 vm_object_reap_pages(object, REAP_TERMINATE);
1284 }
1285 /*
1286 * Make sure the object isn't already being terminated
1287 */
1288 if (object->terminating) {
1289 vm_object_lock_assert_exclusive(object);
1290 object->ref_count--;
1291 assert(object->ref_count > 0);
1292 vm_object_unlock(object);
1293 return KERN_FAILURE;
1294 }
1295
1296 /*
1297 * Did somebody get a reference to the object while we were
1298 * cleaning it?
1299 */
1300 if (object->ref_count != 1) {
1301 vm_object_lock_assert_exclusive(object);
1302 object->ref_count--;
1303 assert(object->ref_count > 0);
1304 vm_object_unlock(object);
1305 return KERN_FAILURE;
1306 }
1307
1308 /*
1309 * Make sure no one can look us up now.
1310 */
1311
1312 object->terminating = TRUE;
1313 object->alive = FALSE;
1314
1315 if (!object->internal &&
1316 object->cached_list.next &&
1317 object->cached_list.prev) {
1318 vm_object_cache_remove(object);
1319 }
1320
1321 /*
1322 * Detach the object from its shadow if we are the shadow's
1323 * copy. The reference we hold on the shadow must be dropped
1324 * by our caller.
1325 */
1326 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1327 !(object->pageout)) {
1328 vm_object_lock(shadow_object);
1329 if (shadow_object->copy == object) {
1330 shadow_object->copy = VM_OBJECT_NULL;
1331 }
1332 vm_object_unlock(shadow_object);
1333 }
1334
1335 if (object->paging_in_progress != 0 ||
1336 object->activity_in_progress != 0) {
1337 /*
1338 * There are still some paging_in_progress references
1339 * on this object, meaning that there are some paging
1340 * or other I/O operations in progress for this VM object.
1341 * Such operations take some paging_in_progress references
1342 * up front to ensure that the object doesn't go away, but
1343 * they may also need to acquire a reference on the VM object,
1344 * to map it in kernel space, for example. That means that
1345 * they may end up releasing the last reference on the VM
1346 * object, triggering its termination, while still holding
1347 * paging_in_progress references. Waiting for these
1348 * pending paging_in_progress references to go away here would
1349 * deadlock.
1350 *
1351 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1352 * complete the VM object termination if it still holds
1353 * paging_in_progress references at this point.
1354 *
1355 * No new paging_in_progress should appear now that the
1356 * VM object is "terminating" and not "alive".
1357 */
1358 vm_object_reap_async(object);
1359 vm_object_unlock(object);
1360 /*
1361 * Return KERN_FAILURE to let the caller know that we
1362 * haven't completed the termination and it can't drop this
1363 * object's reference on its shadow object yet.
1364 * The reaper thread will take care of that once it has
1365 * completed this object's termination.
1366 */
1367 return KERN_FAILURE;
1368 }
1369 /*
1370 * complete the VM object termination
1371 */
1372 vm_object_reap(object);
1373 object = VM_OBJECT_NULL;
1374
1375 /*
1376 * the object lock was released by vm_object_reap()
1377 *
1378 * KERN_SUCCESS means that this object has been terminated
1379 * and no longer needs its shadow object but still holds a
1380 * reference on it.
1381 * The caller is responsible for dropping that reference.
1382 * We can't call vm_object_deallocate() here because that
1383 * would create a recursion.
1384 */
1385 return KERN_SUCCESS;
1386 }
1387
1388
1389 /*
1390 * vm_object_reap():
1391 *
1392 * Complete the termination of a VM object after it's been marked
1393 * as "terminating" and "!alive" by vm_object_terminate().
1394 *
1395 * The VM object must be locked by caller.
1396 * The lock will be released on return and the VM object is no longer valid.
1397 */
1398
1399 void
1400 vm_object_reap(
1401 vm_object_t object)
1402 {
1403 memory_object_t pager;
1404
1405 vm_object_lock_assert_exclusive(object);
1406 assert(object->paging_in_progress == 0);
1407 assert(object->activity_in_progress == 0);
1408
1409 vm_object_reap_count++;
1410
1411 /*
1412 * Disown this purgeable object to cleanup its owner's purgeable
1413 * ledgers. We need to do this before disconnecting the object
1414 * from its pager, to properly account for compressed pages.
1415 */
1416 if (object->internal &&
1417 (object->purgable != VM_PURGABLE_DENY ||
1418 object->vo_ledger_tag)) {
1419 int ledger_flags;
1420 kern_return_t kr;
1421
1422 ledger_flags = 0;
1423 if (object->vo_no_footprint) {
1424 ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
1425 }
1426 assert(!object->alive);
1427 assert(object->terminating);
1428 kr = vm_object_ownership_change(object,
1429 object->vo_ledger_tag, /* unchanged */
1430 NULL, /* no owner */
1431 ledger_flags,
1432 FALSE); /* task_objq not locked */
1433 assert(kr == KERN_SUCCESS);
1434 assert(object->vo_owner == NULL);
1435 }
1436
1437 #if DEVELOPMENT || DEBUG
1438 if (object->object_is_shared_cache &&
1439 object->pager != NULL &&
1440 object->pager->mo_pager_ops == &shared_region_pager_ops) {
1441 OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1442 }
1443 #endif /* DEVELOPMENT || DEBUG */
1444
1445 pager = object->pager;
1446 object->pager = MEMORY_OBJECT_NULL;
1447
1448 if (pager != MEMORY_OBJECT_NULL) {
1449 memory_object_control_disable(&object->pager_control);
1450 }
1451
1452 object->ref_count--;
1453 assert(object->ref_count == 0);
1454
1455 /*
1456 * remove from purgeable queue if it's on
1457 */
1458 if (object->internal) {
1459 assert(VM_OBJECT_OWNER(object) == TASK_NULL);
1460
1461 VM_OBJECT_UNWIRED(object);
1462
1463 if (object->purgable == VM_PURGABLE_DENY) {
1464 /* not purgeable: nothing to do */
1465 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
1466 purgeable_q_t queue;
1467
1468 queue = vm_purgeable_object_remove(object);
1469 assert(queue);
1470
1471 if (object->purgeable_when_ripe) {
1472 /*
1473 * Must take page lock for this -
1474 * using it to protect token queue
1475 */
1476 vm_page_lock_queues();
1477 vm_purgeable_token_delete_first(queue);
1478
1479 assert(queue->debug_count_objects >= 0);
1480 vm_page_unlock_queues();
1481 }
1482
1483 /*
1484 * Update "vm_page_purgeable_count" in bulk and mark
1485 * object as VM_PURGABLE_EMPTY to avoid updating
1486 * "vm_page_purgeable_count" again in vm_page_remove()
1487 * when reaping the pages.
1488 */
1489 unsigned int delta;
1490 assert(object->resident_page_count >=
1491 object->wired_page_count);
1492 delta = (object->resident_page_count -
1493 object->wired_page_count);
1494 if (delta != 0) {
1495 assert(vm_page_purgeable_count >= delta);
1496 OSAddAtomic(-delta,
1497 (SInt32 *)&vm_page_purgeable_count);
1498 }
1499 if (object->wired_page_count != 0) {
1500 assert(vm_page_purgeable_wired_count >=
1501 object->wired_page_count);
1502 OSAddAtomic(-object->wired_page_count,
1503 (SInt32 *)&vm_page_purgeable_wired_count);
1504 }
1505 object->purgable = VM_PURGABLE_EMPTY;
1506 } else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
1507 object->purgable == VM_PURGABLE_EMPTY) {
1508 /* remove from nonvolatile queue */
1509 vm_purgeable_nonvolatile_dequeue(object);
1510 } else {
1511 panic("object %p in unexpected purgeable state 0x%x\n",
1512 object, object->purgable);
1513 }
1514 if (object->transposed &&
1515 object->cached_list.next != NULL &&
1516 object->cached_list.prev == NULL) {
1517 /*
1518 * object->cached_list.next "points" to the
1519 * object that was transposed with this object.
1520 */
1521 } else {
1522 assert(object->cached_list.next == NULL);
1523 }
1524 assert(object->cached_list.prev == NULL);
1525 }
1526
1527 if (object->pageout) {
1528 /*
1529 * free all remaining pages tabled on
1530 * this object
1531 * clean up it's shadow
1532 */
1533 assert(object->shadow != VM_OBJECT_NULL);
1534
1535 vm_pageout_object_terminate(object);
1536 } else if (object->resident_page_count) {
1537 /*
1538 * free all remaining pages tabled on
1539 * this object
1540 */
1541 vm_object_reap_pages(object, REAP_REAP);
1542 }
1543 assert(vm_page_queue_empty(&object->memq));
1544 assert(object->paging_in_progress == 0);
1545 assert(object->activity_in_progress == 0);
1546 assert(object->ref_count == 0);
1547
1548 /*
1549 * If the pager has not already been released by
1550 * vm_object_destroy, we need to terminate it and
1551 * release our reference to it here.
1552 */
1553 if (pager != MEMORY_OBJECT_NULL) {
1554 vm_object_unlock(object);
1555 vm_object_release_pager(pager);
1556 vm_object_lock(object);
1557 }
1558
1559 /* kick off anyone waiting on terminating */
1560 object->terminating = FALSE;
1561 vm_object_paging_begin(object);
1562 vm_object_paging_end(object);
1563 vm_object_unlock(object);
1564
1565 object->shadow = VM_OBJECT_NULL;
1566
1567 #if VM_OBJECT_TRACKING
1568 if (vm_object_tracking_inited) {
1569 btlog_remove_entries_for_element(vm_object_tracking_btlog,
1570 object);
1571 }
1572 #endif /* VM_OBJECT_TRACKING */
1573
1574 vm_object_lock_destroy(object);
1575 /*
1576 * Free the space for the object.
1577 */
1578 zfree(vm_object_zone, object);
1579 object = VM_OBJECT_NULL;
1580 }
1581
1582
1583 unsigned int vm_max_batch = 256;
1584
1585 #define V_O_R_MAX_BATCH 128
1586
1587 #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
1588
1589
1590 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
1591 MACRO_BEGIN \
1592 if (_local_free_q) { \
1593 if (do_disconnect) { \
1594 vm_page_t m; \
1595 for (m = _local_free_q; \
1596 m != VM_PAGE_NULL; \
1597 m = m->vmp_snext) { \
1598 if (m->vmp_pmapped) { \
1599 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
1600 } \
1601 } \
1602 } \
1603 vm_page_free_list(_local_free_q, TRUE); \
1604 _local_free_q = VM_PAGE_NULL; \
1605 } \
1606 MACRO_END
1607
1608
1609 void
1610 vm_object_reap_pages(
1611 vm_object_t object,
1612 int reap_type)
1613 {
1614 vm_page_t p;
1615 vm_page_t next;
1616 vm_page_t local_free_q = VM_PAGE_NULL;
1617 int loop_count;
1618 boolean_t disconnect_on_release;
1619 pmap_flush_context pmap_flush_context_storage;
1620
1621 if (reap_type == REAP_DATA_FLUSH) {
1622 /*
1623 * We need to disconnect pages from all pmaps before
1624 * releasing them to the free list
1625 */
1626 disconnect_on_release = TRUE;
1627 } else {
1628 /*
1629 * Either the caller has already disconnected the pages
1630 * from all pmaps, or we disconnect them here as we add
1631 * them to out local list of pages to be released.
1632 * No need to re-disconnect them when we release the pages
1633 * to the free list.
1634 */
1635 disconnect_on_release = FALSE;
1636 }
1637
1638 restart_after_sleep:
1639 if (vm_page_queue_empty(&object->memq)) {
1640 return;
1641 }
1642 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1643
1644 if (reap_type == REAP_PURGEABLE) {
1645 pmap_flush_context_init(&pmap_flush_context_storage);
1646 }
1647
1648 vm_page_lock_queues();
1649
1650 next = (vm_page_t)vm_page_queue_first(&object->memq);
1651
1652 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
1653 p = next;
1654 next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
1655
1656 if (--loop_count == 0) {
1657 vm_page_unlock_queues();
1658
1659 if (local_free_q) {
1660 if (reap_type == REAP_PURGEABLE) {
1661 pmap_flush(&pmap_flush_context_storage);
1662 pmap_flush_context_init(&pmap_flush_context_storage);
1663 }
1664 /*
1665 * Free the pages we reclaimed so far
1666 * and take a little break to avoid
1667 * hogging the page queue lock too long
1668 */
1669 VM_OBJ_REAP_FREELIST(local_free_q,
1670 disconnect_on_release);
1671 } else {
1672 mutex_pause(0);
1673 }
1674
1675 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1676
1677 vm_page_lock_queues();
1678 }
1679 if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
1680 if (p->vmp_busy || p->vmp_cleaning) {
1681 vm_page_unlock_queues();
1682 /*
1683 * free the pages reclaimed so far
1684 */
1685 VM_OBJ_REAP_FREELIST(local_free_q,
1686 disconnect_on_release);
1687
1688 PAGE_SLEEP(object, p, THREAD_UNINT);
1689
1690 goto restart_after_sleep;
1691 }
1692 if (p->vmp_laundry) {
1693 vm_pageout_steal_laundry(p, TRUE);
1694 }
1695 }
1696 switch (reap_type) {
1697 case REAP_DATA_FLUSH:
1698 if (VM_PAGE_WIRED(p)) {
1699 /*
1700 * this is an odd case... perhaps we should
1701 * zero-fill this page since we're conceptually
1702 * tossing its data at this point, but leaving
1703 * it on the object to honor the 'wire' contract
1704 */
1705 continue;
1706 }
1707 break;
1708
1709 case REAP_PURGEABLE:
1710 if (VM_PAGE_WIRED(p)) {
1711 /*
1712 * can't purge a wired page
1713 */
1714 vm_page_purged_wired++;
1715 continue;
1716 }
1717 if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) {
1718 vm_pageout_steal_laundry(p, TRUE);
1719 }
1720
1721 if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) {
1722 /*
1723 * page is being acted upon,
1724 * so don't mess with it
1725 */
1726 vm_page_purged_others++;
1727 continue;
1728 }
1729 if (p->vmp_busy) {
1730 /*
1731 * We can't reclaim a busy page but we can
1732 * make it more likely to be paged (it's not wired) to make
1733 * sure that it gets considered by
1734 * vm_pageout_scan() later.
1735 */
1736 if (VM_PAGE_PAGEABLE(p)) {
1737 vm_page_deactivate(p);
1738 }
1739 vm_page_purged_busy++;
1740 continue;
1741 }
1742
1743 assert(VM_PAGE_OBJECT(p) != kernel_object);
1744
1745 /*
1746 * we can discard this page...
1747 */
1748 if (p->vmp_pmapped == TRUE) {
1749 /*
1750 * unmap the page
1751 */
1752 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
1753 }
1754 vm_page_purged_count++;
1755
1756 break;
1757
1758 case REAP_TERMINATE:
1759 if (p->vmp_absent || p->vmp_private) {
1760 /*
1761 * For private pages, VM_PAGE_FREE just
1762 * leaves the page structure around for
1763 * its owner to clean up. For absent
1764 * pages, the structure is returned to
1765 * the appropriate pool.
1766 */
1767 break;
1768 }
1769 if (p->vmp_fictitious) {
1770 assert(VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr);
1771 break;
1772 }
1773 if (!p->vmp_dirty && p->vmp_wpmapped) {
1774 p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
1775 }
1776
1777 if ((p->vmp_dirty || p->vmp_precious) && !p->vmp_error && object->alive) {
1778 assert(!object->internal);
1779
1780 p->vmp_free_when_done = TRUE;
1781
1782 if (!p->vmp_laundry) {
1783 vm_page_queues_remove(p, TRUE);
1784 /*
1785 * flush page... page will be freed
1786 * upon completion of I/O
1787 */
1788 vm_pageout_cluster(p);
1789 }
1790 vm_page_unlock_queues();
1791 /*
1792 * free the pages reclaimed so far
1793 */
1794 VM_OBJ_REAP_FREELIST(local_free_q,
1795 disconnect_on_release);
1796
1797 vm_object_paging_wait(object, THREAD_UNINT);
1798
1799 goto restart_after_sleep;
1800 }
1801 break;
1802
1803 case REAP_REAP:
1804 break;
1805 }
1806 vm_page_free_prepare_queues(p);
1807 assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1808 /*
1809 * Add this page to our list of reclaimed pages,
1810 * to be freed later.
1811 */
1812 p->vmp_snext = local_free_q;
1813 local_free_q = p;
1814 }
1815 vm_page_unlock_queues();
1816
1817 /*
1818 * Free the remaining reclaimed pages
1819 */
1820 if (reap_type == REAP_PURGEABLE) {
1821 pmap_flush(&pmap_flush_context_storage);
1822 }
1823
1824 VM_OBJ_REAP_FREELIST(local_free_q,
1825 disconnect_on_release);
1826 }
1827
1828
1829 void
1830 vm_object_reap_async(
1831 vm_object_t object)
1832 {
1833 vm_object_lock_assert_exclusive(object);
1834
1835 vm_object_reaper_lock_spin();
1836
1837 vm_object_reap_count_async++;
1838
1839 /* enqueue the VM object... */
1840 queue_enter(&vm_object_reaper_queue, object,
1841 vm_object_t, cached_list);
1842
1843 vm_object_reaper_unlock();
1844
1845 /* ... and wake up the reaper thread */
1846 thread_wakeup((event_t) &vm_object_reaper_queue);
1847 }
1848
1849
1850 void
1851 vm_object_reaper_thread(void)
1852 {
1853 vm_object_t object, shadow_object;
1854
1855 vm_object_reaper_lock_spin();
1856
1857 while (!queue_empty(&vm_object_reaper_queue)) {
1858 queue_remove_first(&vm_object_reaper_queue,
1859 object,
1860 vm_object_t,
1861 cached_list);
1862
1863 vm_object_reaper_unlock();
1864 vm_object_lock(object);
1865
1866 assert(object->terminating);
1867 assert(!object->alive);
1868
1869 /*
1870 * The pageout daemon might be playing with our pages.
1871 * Now that the object is dead, it won't touch any more
1872 * pages, but some pages might already be on their way out.
1873 * Hence, we wait until the active paging activities have
1874 * ceased before we break the association with the pager
1875 * itself.
1876 */
1877 while (object->paging_in_progress != 0 ||
1878 object->activity_in_progress != 0) {
1879 vm_object_wait(object,
1880 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1881 THREAD_UNINT);
1882 vm_object_lock(object);
1883 }
1884
1885 shadow_object =
1886 object->pageout ? VM_OBJECT_NULL : object->shadow;
1887
1888 vm_object_reap(object);
1889 /* cache is unlocked and object is no longer valid */
1890 object = VM_OBJECT_NULL;
1891
1892 if (shadow_object != VM_OBJECT_NULL) {
1893 /*
1894 * Drop the reference "object" was holding on
1895 * its shadow object.
1896 */
1897 vm_object_deallocate(shadow_object);
1898 shadow_object = VM_OBJECT_NULL;
1899 }
1900 vm_object_reaper_lock_spin();
1901 }
1902
1903 /* wait for more work... */
1904 assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
1905
1906 vm_object_reaper_unlock();
1907
1908 thread_block((thread_continue_t) vm_object_reaper_thread);
1909 /*NOTREACHED*/
1910 }
1911
1912 /*
1913 * Routine: vm_object_release_pager
1914 * Purpose: Terminate the pager and, upon completion,
1915 * release our last reference to it.
1916 */
1917 static void
1918 vm_object_release_pager(
1919 memory_object_t pager)
1920 {
1921 /*
1922 * Terminate the pager.
1923 */
1924
1925 (void) memory_object_terminate(pager);
1926
1927 /*
1928 * Release reference to pager.
1929 */
1930 memory_object_deallocate(pager);
1931 }
1932
1933 /*
1934 * Routine: vm_object_destroy
1935 * Purpose:
1936 * Shut down a VM object, despite the
1937 * presence of address map (or other) references
1938 * to the vm_object.
1939 */
1940 kern_return_t
1941 vm_object_destroy(
1942 vm_object_t object,
1943 __unused kern_return_t reason)
1944 {
1945 memory_object_t old_pager;
1946
1947 if (object == VM_OBJECT_NULL) {
1948 return KERN_SUCCESS;
1949 }
1950
1951 /*
1952 * Remove the pager association immediately.
1953 *
1954 * This will prevent the memory manager from further
1955 * meddling. [If it wanted to flush data or make
1956 * other changes, it should have done so before performing
1957 * the destroy call.]
1958 */
1959
1960 vm_object_lock(object);
1961 object->can_persist = FALSE;
1962 object->named = FALSE;
1963 object->alive = FALSE;
1964
1965 #if DEVELOPMENT || DEBUG
1966 if (object->object_is_shared_cache &&
1967 object->pager != NULL &&
1968 object->pager->mo_pager_ops == &shared_region_pager_ops) {
1969 OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1970 }
1971 #endif /* DEVELOPMENT || DEBUG */
1972
1973 old_pager = object->pager;
1974 object->pager = MEMORY_OBJECT_NULL;
1975 if (old_pager != MEMORY_OBJECT_NULL) {
1976 memory_object_control_disable(&object->pager_control);
1977 }
1978
1979 /*
1980 * Wait for the existing paging activity (that got
1981 * through before we nulled out the pager) to subside.
1982 */
1983
1984 vm_object_paging_wait(object, THREAD_UNINT);
1985 vm_object_unlock(object);
1986
1987 /*
1988 * Terminate the object now.
1989 */
1990 if (old_pager != MEMORY_OBJECT_NULL) {
1991 vm_object_release_pager(old_pager);
1992
1993 /*
1994 * JMM - Release the caller's reference. This assumes the
1995 * caller had a reference to release, which is a big (but
1996 * currently valid) assumption if this is driven from the
1997 * vnode pager (it is holding a named reference when making
1998 * this call)..
1999 */
2000 vm_object_deallocate(object);
2001 }
2002 return KERN_SUCCESS;
2003 }
2004
2005 /*
2006 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2007 * exist because of the need to handle shadow chains. When deactivating pages, we only
2008 * want to deactive the ones at the top most level in the object chain. In order to do
2009 * this efficiently, the specified address range is divided up into "chunks" and we use
2010 * a bit map to keep track of which pages have already been processed as we descend down
2011 * the shadow chain. These chunk macros hide the details of the bit map implementation
2012 * as much as we can.
2013 *
2014 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2015 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2016 * order bit represents page 0 in the current range and highest order bit represents
2017 * page 63.
2018 *
2019 * For further convenience, we also use negative logic for the page state in the bit map.
2020 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2021 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2022 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2023 * out with all the bits set. The macros below hide all these details from the caller.
2024 */
2025
2026 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2027 /* be the same as the number of bits in */
2028 /* the chunk_state_t type. We use 64 */
2029 /* just for convenience. */
2030
2031 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2032
2033 typedef uint64_t chunk_state_t;
2034
2035 /*
2036 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2037 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2038 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2039 * looking at pages in that range. This can save us from unnecessarily chasing down the
2040 * shadow chain.
2041 */
2042
2043 #define CHUNK_INIT(c, len) \
2044 MACRO_BEGIN \
2045 uint64_t p; \
2046 \
2047 (c) = 0xffffffffffffffffLL; \
2048 \
2049 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2050 MARK_PAGE_HANDLED(c, p); \
2051 MACRO_END
2052
2053
2054 /*
2055 * Return true if all pages in the chunk have not yet been processed.
2056 */
2057
2058 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2059
2060 /*
2061 * Return true if the page at offset 'p' in the bit map has already been handled
2062 * while processing a higher level object in the shadow chain.
2063 */
2064
2065 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1ULL << (p))) == 0)
2066
2067 /*
2068 * Mark the page at offset 'p' in the bit map as having been processed.
2069 */
2070
2071 #define MARK_PAGE_HANDLED(c, p) \
2072 MACRO_BEGIN \
2073 (c) = (c) & ~(1ULL << (p)); \
2074 MACRO_END
2075
2076
2077 /*
2078 * Return true if the page at the given offset has been paged out. Object is
2079 * locked upon entry and returned locked.
2080 */
2081
2082 static boolean_t
2083 page_is_paged_out(
2084 vm_object_t object,
2085 vm_object_offset_t offset)
2086 {
2087 if (object->internal &&
2088 object->alive &&
2089 !object->terminating &&
2090 object->pager_ready) {
2091 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
2092 == VM_EXTERNAL_STATE_EXISTS) {
2093 return TRUE;
2094 }
2095 }
2096 return FALSE;
2097 }
2098
2099
2100
2101 /*
2102 * madvise_free_debug
2103 *
2104 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2105 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2106 * simulate the loss of the page's contents as if the page had been
2107 * reclaimed and then re-faulted.
2108 */
2109 #if DEVELOPMENT || DEBUG
2110 int madvise_free_debug = 1;
2111 #else /* DEBUG */
2112 int madvise_free_debug = 0;
2113 #endif /* DEBUG */
2114
2115 __options_decl(deactivate_flags_t, uint32_t, {
2116 DEACTIVATE_KILL = 0x1,
2117 DEACTIVATE_REUSABLE = 0x2,
2118 DEACTIVATE_ALL_REUSABLE = 0x4,
2119 DEACTIVATE_CLEAR_REFMOD = 0x8
2120 });
2121
2122 /*
2123 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2124 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2125 * a size that is less than or equal to the CHUNK_SIZE.
2126 */
2127
2128 static void
2129 deactivate_pages_in_object(
2130 vm_object_t object,
2131 vm_object_offset_t offset,
2132 vm_object_size_t size,
2133 deactivate_flags_t flags,
2134 chunk_state_t *chunk_state,
2135 pmap_flush_context *pfc,
2136 struct pmap *pmap,
2137 vm_map_offset_t pmap_offset)
2138 {
2139 vm_page_t m;
2140 int p;
2141 struct vm_page_delayed_work dw_array;
2142 struct vm_page_delayed_work *dwp, *dwp_start;
2143 bool dwp_finish_ctx = TRUE;
2144 int dw_count;
2145 int dw_limit;
2146 unsigned int reusable = 0;
2147
2148 /*
2149 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2150 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2151 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2152 * all the pages in the chunk.
2153 */
2154
2155 dwp_start = dwp = NULL;
2156 dw_count = 0;
2157 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
2158 dwp_start = vm_page_delayed_work_get_ctx();
2159 if (dwp_start == NULL) {
2160 dwp_start = &dw_array;
2161 dw_limit = 1;
2162 dwp_finish_ctx = FALSE;
2163 }
2164
2165 dwp = dwp_start;
2166
2167 for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
2168 /*
2169 * If this offset has already been found and handled in a higher level object, then don't
2170 * do anything with it in the current shadow object.
2171 */
2172
2173 if (PAGE_ALREADY_HANDLED(*chunk_state, p)) {
2174 continue;
2175 }
2176
2177 /*
2178 * See if the page at this offset is around. First check to see if the page is resident,
2179 * then if not, check the existence map or with the pager.
2180 */
2181
2182 if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2183 /*
2184 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2185 * so that we won't bother looking for a page at this offset again if there are more
2186 * shadow objects. Then deactivate the page.
2187 */
2188
2189 MARK_PAGE_HANDLED(*chunk_state, p);
2190
2191 if ((!VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) &&
2192 (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) {
2193 int clear_refmod_mask;
2194 int pmap_options;
2195 dwp->dw_mask = 0;
2196
2197 pmap_options = 0;
2198 clear_refmod_mask = VM_MEM_REFERENCED;
2199 dwp->dw_mask |= DW_clear_reference;
2200
2201 if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2202 if (madvise_free_debug) {
2203 /*
2204 * zero-fill the page now
2205 * to simulate it being
2206 * reclaimed and re-faulted.
2207 */
2208 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
2209 }
2210 m->vmp_precious = FALSE;
2211 m->vmp_dirty = FALSE;
2212
2213 clear_refmod_mask |= VM_MEM_MODIFIED;
2214 if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
2215 /*
2216 * This page is now clean and
2217 * reclaimable. Move it out
2218 * of the throttled queue, so
2219 * that vm_pageout_scan() can
2220 * find it.
2221 */
2222 dwp->dw_mask |= DW_move_page;
2223 }
2224
2225 VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2226
2227 if ((flags & DEACTIVATE_REUSABLE) && !m->vmp_reusable) {
2228 assert(!(flags & DEACTIVATE_ALL_REUSABLE));
2229 assert(!object->all_reusable);
2230 m->vmp_reusable = TRUE;
2231 object->reusable_page_count++;
2232 assert(object->resident_page_count >= object->reusable_page_count);
2233 reusable++;
2234 /*
2235 * Tell pmap this page is now
2236 * "reusable" (to update pmap
2237 * stats for all mappings).
2238 */
2239 pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
2240 }
2241 }
2242 if (flags & DEACTIVATE_CLEAR_REFMOD) {
2243 /*
2244 * The caller didn't clear the refmod bits in advance.
2245 * Clear them for this page now.
2246 */
2247 pmap_options |= PMAP_OPTIONS_NOFLUSH;
2248 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m),
2249 clear_refmod_mask,
2250 pmap_options,
2251 (void *)pfc);
2252 }
2253
2254 if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
2255 !(flags & (DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE))) {
2256 dwp->dw_mask |= DW_move_page;
2257 }
2258
2259 if (dwp->dw_mask) {
2260 VM_PAGE_ADD_DELAYED_WORK(dwp, m,
2261 dw_count);
2262 }
2263
2264 if (dw_count >= dw_limit) {
2265 if (reusable) {
2266 OSAddAtomic(reusable,
2267 &vm_page_stats_reusable.reusable_count);
2268 vm_page_stats_reusable.reusable += reusable;
2269 reusable = 0;
2270 }
2271 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2272
2273 dwp = dwp_start;
2274 dw_count = 0;
2275 }
2276 }
2277 } else {
2278 /*
2279 * The page at this offset isn't memory resident, check to see if it's
2280 * been paged out. If so, mark it as handled so we don't bother looking
2281 * for it in the shadow chain.
2282 */
2283
2284 if (page_is_paged_out(object, offset)) {
2285 MARK_PAGE_HANDLED(*chunk_state, p);
2286
2287 /*
2288 * If we're killing a non-resident page, then clear the page in the existence
2289 * map so we don't bother paging it back in if it's touched again in the future.
2290 */
2291
2292 if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2293 VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2294
2295 if (pmap != PMAP_NULL) {
2296 /*
2297 * Tell pmap that this page
2298 * is no longer mapped, to
2299 * adjust the footprint ledger
2300 * because this page is no
2301 * longer compressed.
2302 */
2303 pmap_remove_options(
2304 pmap,
2305 pmap_offset,
2306 (pmap_offset +
2307 PAGE_SIZE),
2308 PMAP_OPTIONS_REMOVE);
2309 }
2310 }
2311 }
2312 }
2313 }
2314
2315 if (reusable) {
2316 OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
2317 vm_page_stats_reusable.reusable += reusable;
2318 reusable = 0;
2319 }
2320
2321 if (dw_count) {
2322 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2323 dwp = dwp_start;
2324 dw_count = 0;
2325 }
2326
2327 if (dwp_start && dwp_finish_ctx) {
2328 vm_page_delayed_work_finish_ctx(dwp_start);
2329 dwp_start = dwp = NULL;
2330 }
2331 }
2332
2333
2334 /*
2335 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2336 * will always be less than or equal to the given size. The total range is divided up
2337 * into chunks for efficiency and performance related to the locks and handling the shadow
2338 * chain. This routine returns how much of the given "size" it actually processed. It's
2339 * up to the caler to loop and keep calling this routine until the entire range they want
2340 * to process has been done.
2341 * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range.
2342 */
2343
2344 static vm_object_size_t
2345 deactivate_a_chunk(
2346 vm_object_t orig_object,
2347 vm_object_offset_t offset,
2348 vm_object_size_t size,
2349 deactivate_flags_t flags,
2350 pmap_flush_context *pfc,
2351 struct pmap *pmap,
2352 vm_map_offset_t pmap_offset)
2353 {
2354 vm_object_t object;
2355 vm_object_t tmp_object;
2356 vm_object_size_t length;
2357 chunk_state_t chunk_state;
2358
2359
2360 /*
2361 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2362 * remaining size the caller asked for.
2363 */
2364
2365 length = MIN(size, CHUNK_SIZE);
2366
2367 /*
2368 * The chunk_state keeps track of which pages we've already processed if there's
2369 * a shadow chain on this object. At this point, we haven't done anything with this
2370 * range of pages yet, so initialize the state to indicate no pages processed yet.
2371 */
2372
2373 CHUNK_INIT(chunk_state, length);
2374 object = orig_object;
2375
2376 /*
2377 * Start at the top level object and iterate around the loop once for each object
2378 * in the shadow chain. We stop processing early if we've already found all the pages
2379 * in the range. Otherwise we stop when we run out of shadow objects.
2380 */
2381
2382 while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2383 vm_object_paging_begin(object);
2384
2385 deactivate_pages_in_object(object, offset, length, flags, &chunk_state, pfc, pmap, pmap_offset);
2386
2387 vm_object_paging_end(object);
2388
2389 /*
2390 * We've finished with this object, see if there's a shadow object. If
2391 * there is, update the offset and lock the new object. We also turn off
2392 * kill_page at this point since we only kill pages in the top most object.
2393 */
2394
2395 tmp_object = object->shadow;
2396
2397 if (tmp_object) {
2398 assert(!(flags & DEACTIVATE_KILL) || (flags & DEACTIVATE_CLEAR_REFMOD));
2399 flags &= ~(DEACTIVATE_KILL | DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE);
2400 offset += object->vo_shadow_offset;
2401 vm_object_lock(tmp_object);
2402 }
2403
2404 if (object != orig_object) {
2405 vm_object_unlock(object);
2406 }
2407
2408 object = tmp_object;
2409 }
2410
2411 if (object && object != orig_object) {
2412 vm_object_unlock(object);
2413 }
2414
2415 return length;
2416 }
2417
2418
2419
2420 /*
2421 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2422 * we also clear the modified status of the page and "forget" any changes that have been made
2423 * to the page.
2424 */
2425
2426 __private_extern__ void
2427 vm_object_deactivate_pages(
2428 vm_object_t object,
2429 vm_object_offset_t offset,
2430 vm_object_size_t size,
2431 boolean_t kill_page,
2432 boolean_t reusable_page,
2433 struct pmap *pmap,
2434 vm_map_offset_t pmap_offset)
2435 {
2436 vm_object_size_t length;
2437 boolean_t all_reusable;
2438 pmap_flush_context pmap_flush_context_storage;
2439 unsigned int pmap_clear_refmod_mask = VM_MEM_REFERENCED;
2440 unsigned int pmap_clear_refmod_options = 0;
2441 deactivate_flags_t flags = DEACTIVATE_CLEAR_REFMOD;
2442 bool refmod_cleared = false;
2443 if (kill_page) {
2444 flags |= DEACTIVATE_KILL;
2445 }
2446 if (reusable_page) {
2447 flags |= DEACTIVATE_REUSABLE;
2448 }
2449
2450 /*
2451 * We break the range up into chunks and do one chunk at a time. This is for
2452 * efficiency and performance while handling the shadow chains and the locks.
2453 * The deactivate_a_chunk() function returns how much of the range it processed.
2454 * We keep calling this routine until the given size is exhausted.
2455 */
2456
2457
2458 all_reusable = FALSE;
2459 #if 11
2460 /*
2461 * For the sake of accurate "reusable" pmap stats, we need
2462 * to tell pmap about each page that is no longer "reusable",
2463 * so we can't do the "all_reusable" optimization.
2464 *
2465 * If we do go with the all_reusable optimization, we can't
2466 * return if size is 0 since we could have "all_reusable == TRUE"
2467 * In this case, we save the overhead of doing the pmap_flush_context
2468 * work.
2469 */
2470 if (size == 0) {
2471 return;
2472 }
2473 #else
2474 if (reusable_page &&
2475 object->internal &&
2476 object->vo_size != 0 &&
2477 object->vo_size == size &&
2478 object->reusable_page_count == 0) {
2479 all_reusable = TRUE;
2480 reusable_page = FALSE;
2481 flags |= DEACTIVATE_ALL_REUSABLE;
2482 }
2483 #endif
2484
2485 if ((reusable_page || all_reusable) && object->all_reusable) {
2486 /* This means MADV_FREE_REUSABLE has been called twice, which
2487 * is probably illegal. */
2488 return;
2489 }
2490
2491
2492 pmap_flush_context_init(&pmap_flush_context_storage);
2493
2494 /*
2495 * If we're deactivating multiple pages, try to perform one bulk pmap operation.
2496 * We can't do this if we're killing pages and there's a shadow chain as
2497 * we don't yet know which pages are in the top object (pages in shadow copies aren't
2498 * safe to kill).
2499 * And we can only do this on hardware that supports it.
2500 */
2501 if (size > PAGE_SIZE && (!kill_page || !object->shadow)) {
2502 if (kill_page && object->internal) {
2503 pmap_clear_refmod_mask |= VM_MEM_MODIFIED;
2504 }
2505 if (reusable_page) {
2506 pmap_clear_refmod_options |= PMAP_OPTIONS_SET_REUSABLE;
2507 }
2508
2509 refmod_cleared = pmap_clear_refmod_range_options(pmap, pmap_offset, pmap_offset + size, pmap_clear_refmod_mask, pmap_clear_refmod_options);
2510 if (refmod_cleared) {
2511 // We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it.
2512 flags &= ~DEACTIVATE_CLEAR_REFMOD;
2513 }
2514 }
2515
2516 while (size) {
2517 length = deactivate_a_chunk(object, offset, size, flags,
2518 &pmap_flush_context_storage, pmap, pmap_offset);
2519
2520 size -= length;
2521 offset += length;
2522 pmap_offset += length;
2523 }
2524 pmap_flush(&pmap_flush_context_storage);
2525
2526 if (all_reusable) {
2527 if (!object->all_reusable) {
2528 unsigned int reusable;
2529
2530 object->all_reusable = TRUE;
2531 assert(object->reusable_page_count == 0);
2532 /* update global stats */
2533 reusable = object->resident_page_count;
2534 OSAddAtomic(reusable,
2535 &vm_page_stats_reusable.reusable_count);
2536 vm_page_stats_reusable.reusable += reusable;
2537 vm_page_stats_reusable.all_reusable_calls++;
2538 }
2539 } else if (reusable_page) {
2540 vm_page_stats_reusable.partial_reusable_calls++;
2541 }
2542 }
2543
2544 void
2545 vm_object_reuse_pages(
2546 vm_object_t object,
2547 vm_object_offset_t start_offset,
2548 vm_object_offset_t end_offset,
2549 boolean_t allow_partial_reuse)
2550 {
2551 vm_object_offset_t cur_offset;
2552 vm_page_t m;
2553 unsigned int reused, reusable;
2554
2555 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
2556 MACRO_BEGIN \
2557 if ((m) != VM_PAGE_NULL && \
2558 (m)->vmp_reusable) { \
2559 assert((object)->reusable_page_count <= \
2560 (object)->resident_page_count); \
2561 assert((object)->reusable_page_count > 0); \
2562 (object)->reusable_page_count--; \
2563 (m)->vmp_reusable = FALSE; \
2564 (reused)++; \
2565 /* \
2566 * Tell pmap that this page is no longer \
2567 * "reusable", to update the "reusable" stats \
2568 * for all the pmaps that have mapped this \
2569 * page. \
2570 */ \
2571 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
2572 0, /* refmod */ \
2573 (PMAP_OPTIONS_CLEAR_REUSABLE \
2574 | PMAP_OPTIONS_NOFLUSH), \
2575 NULL); \
2576 } \
2577 MACRO_END
2578
2579 reused = 0;
2580 reusable = 0;
2581
2582 vm_object_lock_assert_exclusive(object);
2583
2584 if (object->all_reusable) {
2585 panic("object %p all_reusable: can't update pmap stats\n",
2586 object);
2587 assert(object->reusable_page_count == 0);
2588 object->all_reusable = FALSE;
2589 if (end_offset - start_offset == object->vo_size ||
2590 !allow_partial_reuse) {
2591 vm_page_stats_reusable.all_reuse_calls++;
2592 reused = object->resident_page_count;
2593 } else {
2594 vm_page_stats_reusable.partial_reuse_calls++;
2595 vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2596 if (m->vmp_offset < start_offset ||
2597 m->vmp_offset >= end_offset) {
2598 m->vmp_reusable = TRUE;
2599 object->reusable_page_count++;
2600 assert(object->resident_page_count >= object->reusable_page_count);
2601 continue;
2602 } else {
2603 assert(!m->vmp_reusable);
2604 reused++;
2605 }
2606 }
2607 }
2608 } else if (object->resident_page_count >
2609 ((end_offset - start_offset) >> PAGE_SHIFT)) {
2610 vm_page_stats_reusable.partial_reuse_calls++;
2611 for (cur_offset = start_offset;
2612 cur_offset < end_offset;
2613 cur_offset += PAGE_SIZE_64) {
2614 if (object->reusable_page_count == 0) {
2615 break;
2616 }
2617 m = vm_page_lookup(object, cur_offset);
2618 VM_OBJECT_REUSE_PAGE(object, m, reused);
2619 }
2620 } else {
2621 vm_page_stats_reusable.partial_reuse_calls++;
2622 vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2623 if (object->reusable_page_count == 0) {
2624 break;
2625 }
2626 if (m->vmp_offset < start_offset ||
2627 m->vmp_offset >= end_offset) {
2628 continue;
2629 }
2630 VM_OBJECT_REUSE_PAGE(object, m, reused);
2631 }
2632 }
2633
2634 /* update global stats */
2635 OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count);
2636 vm_page_stats_reusable.reused += reused;
2637 vm_page_stats_reusable.reusable += reusable;
2638 }
2639
2640 /*
2641 * Routine: vm_object_pmap_protect
2642 *
2643 * Purpose:
2644 * Reduces the permission for all physical
2645 * pages in the specified object range.
2646 *
2647 * If removing write permission only, it is
2648 * sufficient to protect only the pages in
2649 * the top-level object; only those pages may
2650 * have write permission.
2651 *
2652 * If removing all access, we must follow the
2653 * shadow chain from the top-level object to
2654 * remove access to all pages in shadowed objects.
2655 *
2656 * The object must *not* be locked. The object must
2657 * be internal.
2658 *
2659 * If pmap is not NULL, this routine assumes that
2660 * the only mappings for the pages are in that
2661 * pmap.
2662 */
2663
2664 __private_extern__ void
2665 vm_object_pmap_protect(
2666 vm_object_t object,
2667 vm_object_offset_t offset,
2668 vm_object_size_t size,
2669 pmap_t pmap,
2670 vm_map_size_t pmap_page_size,
2671 vm_map_offset_t pmap_start,
2672 vm_prot_t prot)
2673 {
2674 vm_object_pmap_protect_options(object, offset, size, pmap,
2675 pmap_page_size,
2676 pmap_start, prot, 0);
2677 }
2678
2679 __private_extern__ void
2680 vm_object_pmap_protect_options(
2681 vm_object_t object,
2682 vm_object_offset_t offset,
2683 vm_object_size_t size,
2684 pmap_t pmap,
2685 vm_map_size_t pmap_page_size,
2686 vm_map_offset_t pmap_start,
2687 vm_prot_t prot,
2688 int options)
2689 {
2690 pmap_flush_context pmap_flush_context_storage;
2691 boolean_t delayed_pmap_flush = FALSE;
2692 vm_object_offset_t offset_in_object;
2693 vm_object_size_t size_in_object;
2694
2695 if (object == VM_OBJECT_NULL) {
2696 return;
2697 }
2698 if (pmap_page_size > PAGE_SIZE) {
2699 /* for 16K map on 4K device... */
2700 pmap_page_size = PAGE_SIZE;
2701 }
2702 /*
2703 * If we decide to work on the object itself, extend the range to
2704 * cover a full number of native pages.
2705 */
2706 size_in_object = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
2707 offset_in_object = vm_object_trunc_page(offset);
2708 /*
2709 * If we decide to work on the pmap, use the exact range specified,
2710 * so no rounding/truncating offset and size. They should already
2711 * be aligned to pmap_page_size.
2712 */
2713 assertf(!(offset & (pmap_page_size - 1)) && !(size & (pmap_page_size - 1)),
2714 "offset 0x%llx size 0x%llx pmap_page_size 0x%llx",
2715 offset, size, (uint64_t)pmap_page_size);
2716
2717 vm_object_lock(object);
2718
2719 if (object->phys_contiguous) {
2720 if (pmap != NULL) {
2721 vm_object_unlock(object);
2722 pmap_protect_options(pmap,
2723 pmap_start,
2724 pmap_start + size,
2725 prot,
2726 options & ~PMAP_OPTIONS_NOFLUSH,
2727 NULL);
2728 } else {
2729 vm_object_offset_t phys_start, phys_end, phys_addr;
2730
2731 phys_start = object->vo_shadow_offset + offset_in_object;
2732 phys_end = phys_start + size_in_object;
2733 assert(phys_start <= phys_end);
2734 assert(phys_end <= object->vo_shadow_offset + object->vo_size);
2735 vm_object_unlock(object);
2736
2737 pmap_flush_context_init(&pmap_flush_context_storage);
2738 delayed_pmap_flush = FALSE;
2739
2740 for (phys_addr = phys_start;
2741 phys_addr < phys_end;
2742 phys_addr += PAGE_SIZE_64) {
2743 pmap_page_protect_options(
2744 (ppnum_t) (phys_addr >> PAGE_SHIFT),
2745 prot,
2746 options | PMAP_OPTIONS_NOFLUSH,
2747 (void *)&pmap_flush_context_storage);
2748 delayed_pmap_flush = TRUE;
2749 }
2750 if (delayed_pmap_flush == TRUE) {
2751 pmap_flush(&pmap_flush_context_storage);
2752 }
2753 }
2754 return;
2755 }
2756
2757 assert(object->internal);
2758
2759 while (TRUE) {
2760 if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) {
2761 vm_object_unlock(object);
2762 if (pmap_page_size < PAGE_SIZE) {
2763 DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot);
2764 }
2765 pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
2766 options & ~PMAP_OPTIONS_NOFLUSH, NULL);
2767 return;
2768 }
2769
2770 if (pmap_page_size < PAGE_SIZE) {
2771 DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot, offset, size, object, offset_in_object, size_in_object);
2772 }
2773
2774 pmap_flush_context_init(&pmap_flush_context_storage);
2775 delayed_pmap_flush = FALSE;
2776
2777 /*
2778 * if we are doing large ranges with respect to resident
2779 * page count then we should interate over pages otherwise
2780 * inverse page look-up will be faster
2781 */
2782 if (ptoa_64(object->resident_page_count / 4) < size_in_object) {
2783 vm_page_t p;
2784 vm_object_offset_t end;
2785
2786 end = offset_in_object + size_in_object;
2787
2788 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
2789 if (!p->vmp_fictitious && (offset_in_object <= p->vmp_offset) && (p->vmp_offset < end)) {
2790 vm_map_offset_t start;
2791
2792 /*
2793 * XXX FBDP 4K: intentionally using "offset" here instead
2794 * of "offset_in_object", since "start" is a pmap address.
2795 */
2796 start = pmap_start + p->vmp_offset - offset;
2797
2798 if (pmap != PMAP_NULL) {
2799 vm_map_offset_t curr;
2800 for (curr = start;
2801 curr < start + PAGE_SIZE_64;
2802 curr += pmap_page_size) {
2803 if (curr < pmap_start) {
2804 continue;
2805 }
2806 if (curr >= pmap_start + size) {
2807 break;
2808 }
2809 pmap_protect_options(
2810 pmap,
2811 curr,
2812 curr + pmap_page_size,
2813 prot,
2814 options | PMAP_OPTIONS_NOFLUSH,
2815 &pmap_flush_context_storage);
2816 }
2817 } else {
2818 pmap_page_protect_options(
2819 VM_PAGE_GET_PHYS_PAGE(p),
2820 prot,
2821 options | PMAP_OPTIONS_NOFLUSH,
2822 &pmap_flush_context_storage);
2823 }
2824 delayed_pmap_flush = TRUE;
2825 }
2826 }
2827 } else {
2828 vm_page_t p;
2829 vm_object_offset_t end;
2830 vm_object_offset_t target_off;
2831
2832 end = offset_in_object + size_in_object;
2833
2834 for (target_off = offset_in_object;
2835 target_off < end; target_off += PAGE_SIZE) {
2836 p = vm_page_lookup(object, target_off);
2837
2838 if (p != VM_PAGE_NULL) {
2839 vm_object_offset_t start;
2840
2841 /*
2842 * XXX FBDP 4K: intentionally using "offset" here instead
2843 * of "offset_in_object", since "start" is a pmap address.
2844 */
2845 start = pmap_start + (p->vmp_offset - offset);
2846
2847 if (pmap != PMAP_NULL) {
2848 vm_map_offset_t curr;
2849 for (curr = start;
2850 curr < start + PAGE_SIZE;
2851 curr += pmap_page_size) {
2852 if (curr < pmap_start) {
2853 continue;
2854 }
2855 if (curr >= pmap_start + size) {
2856 break;
2857 }
2858 pmap_protect_options(
2859 pmap,
2860 curr,
2861 curr + pmap_page_size,
2862 prot,
2863 options | PMAP_OPTIONS_NOFLUSH,
2864 &pmap_flush_context_storage);
2865 }
2866 } else {
2867 pmap_page_protect_options(
2868 VM_PAGE_GET_PHYS_PAGE(p),
2869 prot,
2870 options | PMAP_OPTIONS_NOFLUSH,
2871 &pmap_flush_context_storage);
2872 }
2873 delayed_pmap_flush = TRUE;
2874 }
2875 }
2876 }
2877 if (delayed_pmap_flush == TRUE) {
2878 pmap_flush(&pmap_flush_context_storage);
2879 }
2880
2881 if (prot == VM_PROT_NONE) {
2882 /*
2883 * Must follow shadow chain to remove access
2884 * to pages in shadowed objects.
2885 */
2886 vm_object_t next_object;
2887
2888 next_object = object->shadow;
2889 if (next_object != VM_OBJECT_NULL) {
2890 offset_in_object += object->vo_shadow_offset;
2891 offset += object->vo_shadow_offset;
2892 vm_object_lock(next_object);
2893 vm_object_unlock(object);
2894 object = next_object;
2895 } else {
2896 /*
2897 * End of chain - we are done.
2898 */
2899 break;
2900 }
2901 } else {
2902 /*
2903 * Pages in shadowed objects may never have
2904 * write permission - we may stop here.
2905 */
2906 break;
2907 }
2908 }
2909
2910 vm_object_unlock(object);
2911 }
2912
2913 uint32_t vm_page_busy_absent_skipped = 0;
2914
2915 /*
2916 * Routine: vm_object_copy_slowly
2917 *
2918 * Description:
2919 * Copy the specified range of the source
2920 * virtual memory object without using
2921 * protection-based optimizations (such
2922 * as copy-on-write). The pages in the
2923 * region are actually copied.
2924 *
2925 * In/out conditions:
2926 * The caller must hold a reference and a lock
2927 * for the source virtual memory object. The source
2928 * object will be returned *unlocked*.
2929 *
2930 * Results:
2931 * If the copy is completed successfully, KERN_SUCCESS is
2932 * returned. If the caller asserted the interruptible
2933 * argument, and an interruption occurred while waiting
2934 * for a user-generated event, MACH_SEND_INTERRUPTED is
2935 * returned. Other values may be returned to indicate
2936 * hard errors during the copy operation.
2937 *
2938 * A new virtual memory object is returned in a
2939 * parameter (_result_object). The contents of this
2940 * new object, starting at a zero offset, are a copy
2941 * of the source memory region. In the event of
2942 * an error, this parameter will contain the value
2943 * VM_OBJECT_NULL.
2944 */
2945 __private_extern__ kern_return_t
2946 vm_object_copy_slowly(
2947 vm_object_t src_object,
2948 vm_object_offset_t src_offset,
2949 vm_object_size_t size,
2950 boolean_t interruptible,
2951 vm_object_t *_result_object) /* OUT */
2952 {
2953 vm_object_t new_object;
2954 vm_object_offset_t new_offset;
2955
2956 struct vm_object_fault_info fault_info = {};
2957
2958 if (size == 0) {
2959 vm_object_unlock(src_object);
2960 *_result_object = VM_OBJECT_NULL;
2961 return KERN_INVALID_ARGUMENT;
2962 }
2963
2964 /*
2965 * Prevent destruction of the source object while we copy.
2966 */
2967
2968 vm_object_reference_locked(src_object);
2969 vm_object_unlock(src_object);
2970
2971 /*
2972 * Create a new object to hold the copied pages.
2973 * A few notes:
2974 * We fill the new object starting at offset 0,
2975 * regardless of the input offset.
2976 * We don't bother to lock the new object within
2977 * this routine, since we have the only reference.
2978 */
2979
2980 size = vm_object_round_page(src_offset + size) - vm_object_trunc_page(src_offset);
2981 src_offset = vm_object_trunc_page(src_offset);
2982 new_object = vm_object_allocate(size);
2983 new_offset = 0;
2984
2985 assert(size == trunc_page_64(size)); /* Will the loop terminate? */
2986
2987 fault_info.interruptible = interruptible;
2988 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
2989 fault_info.lo_offset = src_offset;
2990 fault_info.hi_offset = src_offset + size;
2991 fault_info.stealth = TRUE;
2992
2993 for (;
2994 size != 0;
2995 src_offset += PAGE_SIZE_64,
2996 new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
2997 ) {
2998 vm_page_t new_page;
2999 vm_fault_return_t result;
3000
3001 vm_object_lock(new_object);
3002
3003 while ((new_page = vm_page_alloc(new_object, new_offset))
3004 == VM_PAGE_NULL) {
3005 vm_object_unlock(new_object);
3006
3007 if (!vm_page_wait(interruptible)) {
3008 vm_object_deallocate(new_object);
3009 vm_object_deallocate(src_object);
3010 *_result_object = VM_OBJECT_NULL;
3011 return MACH_SEND_INTERRUPTED;
3012 }
3013 vm_object_lock(new_object);
3014 }
3015 vm_object_unlock(new_object);
3016
3017 do {
3018 vm_prot_t prot = VM_PROT_READ;
3019 vm_page_t _result_page;
3020 vm_page_t top_page;
3021 vm_page_t result_page;
3022 kern_return_t error_code;
3023 vm_object_t result_page_object;
3024
3025
3026 vm_object_lock(src_object);
3027
3028 if (src_object->internal &&
3029 src_object->shadow == VM_OBJECT_NULL &&
3030 (src_object->pager == NULL ||
3031 (VM_COMPRESSOR_PAGER_STATE_GET(src_object,
3032 src_offset) ==
3033 VM_EXTERNAL_STATE_ABSENT))) {
3034 boolean_t can_skip_page;
3035
3036 _result_page = vm_page_lookup(src_object,
3037 src_offset);
3038 if (_result_page == VM_PAGE_NULL) {
3039 /*
3040 * This page is neither resident nor
3041 * compressed and there's no shadow
3042 * object below "src_object", so this
3043 * page is really missing.
3044 * There's no need to zero-fill it just
3045 * to copy it: let's leave it missing
3046 * in "new_object" and get zero-filled
3047 * on demand.
3048 */
3049 can_skip_page = TRUE;
3050 } else if (workaround_41447923 &&
3051 src_object->pager == NULL &&
3052 _result_page != VM_PAGE_NULL &&
3053 _result_page->vmp_busy &&
3054 _result_page->vmp_absent &&
3055 src_object->purgable == VM_PURGABLE_DENY &&
3056 !src_object->blocked_access) {
3057 /*
3058 * This page is "busy" and "absent"
3059 * but not because we're waiting for
3060 * it to be decompressed. It must
3061 * be because it's a "no zero fill"
3062 * page that is currently not
3063 * accessible until it gets overwritten
3064 * by a device driver.
3065 * Since its initial state would have
3066 * been "zero-filled", let's leave the
3067 * copy page missing and get zero-filled
3068 * on demand.
3069 */
3070 assert(src_object->internal);
3071 assert(src_object->shadow == NULL);
3072 assert(src_object->pager == NULL);
3073 can_skip_page = TRUE;
3074 vm_page_busy_absent_skipped++;
3075 } else {
3076 can_skip_page = FALSE;
3077 }
3078 if (can_skip_page) {
3079 vm_object_unlock(src_object);
3080 /* free the unused "new_page"... */
3081 vm_object_lock(new_object);
3082 VM_PAGE_FREE(new_page);
3083 new_page = VM_PAGE_NULL;
3084 vm_object_unlock(new_object);
3085 /* ...and go to next page in "src_object" */
3086 result = VM_FAULT_SUCCESS;
3087 break;
3088 }
3089 }
3090
3091 vm_object_paging_begin(src_object);
3092
3093 /* cap size at maximum UPL size */
3094 upl_size_t cluster_size;
3095 if (os_convert_overflow(size, &cluster_size)) {
3096 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
3097 }
3098 fault_info.cluster_size = cluster_size;
3099
3100 _result_page = VM_PAGE_NULL;
3101 result = vm_fault_page(src_object, src_offset,
3102 VM_PROT_READ, FALSE,
3103 FALSE, /* page not looked up */
3104 &prot, &_result_page, &top_page,
3105 (int *)0,
3106 &error_code, FALSE, FALSE, &fault_info);
3107
3108 switch (result) {
3109 case VM_FAULT_SUCCESS:
3110 result_page = _result_page;
3111 result_page_object = VM_PAGE_OBJECT(result_page);
3112
3113 /*
3114 * Copy the page to the new object.
3115 *
3116 * POLICY DECISION:
3117 * If result_page is clean,
3118 * we could steal it instead
3119 * of copying.
3120 */
3121
3122 vm_page_copy(result_page, new_page);
3123 vm_object_unlock(result_page_object);
3124
3125 /*
3126 * Let go of both pages (make them
3127 * not busy, perform wakeup, activate).
3128 */
3129 vm_object_lock(new_object);
3130 SET_PAGE_DIRTY(new_page, FALSE);
3131 PAGE_WAKEUP_DONE(new_page);
3132 vm_object_unlock(new_object);
3133
3134 vm_object_lock(result_page_object);
3135 PAGE_WAKEUP_DONE(result_page);
3136
3137 vm_page_lockspin_queues();
3138 if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3139 (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
3140 vm_page_activate(result_page);
3141 }
3142 vm_page_activate(new_page);
3143 vm_page_unlock_queues();
3144
3145 /*
3146 * Release paging references and
3147 * top-level placeholder page, if any.
3148 */
3149
3150 vm_fault_cleanup(result_page_object,
3151 top_page);
3152
3153 break;
3154
3155 case VM_FAULT_RETRY:
3156 break;
3157
3158 case VM_FAULT_MEMORY_SHORTAGE:
3159 if (vm_page_wait(interruptible)) {
3160 break;
3161 }
3162 OS_FALLTHROUGH;
3163
3164 case VM_FAULT_INTERRUPTED:
3165 vm_object_lock(new_object);
3166 VM_PAGE_FREE(new_page);
3167 vm_object_unlock(new_object);
3168
3169 vm_object_deallocate(new_object);
3170 vm_object_deallocate(src_object);
3171 *_result_object = VM_OBJECT_NULL;
3172 return MACH_SEND_INTERRUPTED;
3173
3174 case VM_FAULT_SUCCESS_NO_VM_PAGE:
3175 /* success but no VM page: fail */
3176 vm_object_paging_end(src_object);
3177 vm_object_unlock(src_object);
3178 OS_FALLTHROUGH;
3179 case VM_FAULT_MEMORY_ERROR:
3180 /*
3181 * A policy choice:
3182 * (a) ignore pages that we can't
3183 * copy
3184 * (b) return the null object if
3185 * any page fails [chosen]
3186 */
3187
3188 vm_object_lock(new_object);
3189 VM_PAGE_FREE(new_page);
3190 vm_object_unlock(new_object);
3191
3192 vm_object_deallocate(new_object);
3193 vm_object_deallocate(src_object);
3194 *_result_object = VM_OBJECT_NULL;
3195 return error_code ? error_code:
3196 KERN_MEMORY_ERROR;
3197
3198 default:
3199 panic("vm_object_copy_slowly: unexpected error"
3200 " 0x%x from vm_fault_page()\n", result);
3201 }
3202 } while (result != VM_FAULT_SUCCESS);
3203 }
3204
3205 /*
3206 * Lose the extra reference, and return our object.
3207 */
3208 vm_object_deallocate(src_object);
3209 *_result_object = new_object;
3210 return KERN_SUCCESS;
3211 }
3212
3213 /*
3214 * Routine: vm_object_copy_quickly
3215 *
3216 * Purpose:
3217 * Copy the specified range of the source virtual
3218 * memory object, if it can be done without waiting
3219 * for user-generated events.
3220 *
3221 * Results:
3222 * If the copy is successful, the copy is returned in
3223 * the arguments; otherwise, the arguments are not
3224 * affected.
3225 *
3226 * In/out conditions:
3227 * The object should be unlocked on entry and exit.
3228 */
3229
3230 /*ARGSUSED*/
3231 __private_extern__ boolean_t
3232 vm_object_copy_quickly(
3233 vm_object_t *_object, /* INOUT */
3234 __unused vm_object_offset_t offset, /* IN */
3235 __unused vm_object_size_t size, /* IN */
3236 boolean_t *_src_needs_copy, /* OUT */
3237 boolean_t *_dst_needs_copy) /* OUT */
3238 {
3239 vm_object_t object = *_object;
3240 memory_object_copy_strategy_t copy_strategy;
3241
3242 if (object == VM_OBJECT_NULL) {
3243 *_src_needs_copy = FALSE;
3244 *_dst_needs_copy = FALSE;
3245 return TRUE;
3246 }
3247
3248 vm_object_lock(object);
3249
3250 copy_strategy = object->copy_strategy;
3251
3252 switch (copy_strategy) {
3253 case MEMORY_OBJECT_COPY_SYMMETRIC:
3254
3255 /*
3256 * Symmetric copy strategy.
3257 * Make another reference to the object.
3258 * Leave object/offset unchanged.
3259 */
3260
3261 vm_object_reference_locked(object);
3262 object->shadowed = TRUE;
3263 vm_object_unlock(object);
3264
3265 /*
3266 * Both source and destination must make
3267 * shadows, and the source must be made
3268 * read-only if not already.
3269 */
3270
3271 *_src_needs_copy = TRUE;
3272 *_dst_needs_copy = TRUE;
3273
3274 break;
3275
3276 case MEMORY_OBJECT_COPY_DELAY:
3277 vm_object_unlock(object);
3278 return FALSE;
3279
3280 default:
3281 vm_object_unlock(object);
3282 return FALSE;
3283 }
3284 return TRUE;
3285 }
3286
3287 static int copy_call_count = 0;
3288 static int copy_call_sleep_count = 0;
3289 static int copy_call_restart_count = 0;
3290
3291 /*
3292 * Routine: vm_object_copy_call [internal]
3293 *
3294 * Description:
3295 * Copy the source object (src_object), using the
3296 * user-managed copy algorithm.
3297 *
3298 * In/out conditions:
3299 * The source object must be locked on entry. It
3300 * will be *unlocked* on exit.
3301 *
3302 * Results:
3303 * If the copy is successful, KERN_SUCCESS is returned.
3304 * A new object that represents the copied virtual
3305 * memory is returned in a parameter (*_result_object).
3306 * If the return value indicates an error, this parameter
3307 * is not valid.
3308 */
3309 static kern_return_t
3310 vm_object_copy_call(
3311 vm_object_t src_object,
3312 vm_object_offset_t src_offset,
3313 vm_object_size_t size,
3314 vm_object_t *_result_object) /* OUT */
3315 {
3316 kern_return_t kr;
3317 vm_object_t copy;
3318 boolean_t check_ready = FALSE;
3319 uint32_t try_failed_count = 0;
3320
3321 /*
3322 * If a copy is already in progress, wait and retry.
3323 *
3324 * XXX
3325 * Consider making this call interruptable, as Mike
3326 * intended it to be.
3327 *
3328 * XXXO
3329 * Need a counter or version or something to allow
3330 * us to use the copy that the currently requesting
3331 * thread is obtaining -- is it worth adding to the
3332 * vm object structure? Depends how common this case it.
3333 */
3334 copy_call_count++;
3335 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
3336 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
3337 THREAD_UNINT);
3338 copy_call_restart_count++;
3339 }
3340
3341 /*
3342 * Indicate (for the benefit of memory_object_create_copy)
3343 * that we want a copy for src_object. (Note that we cannot
3344 * do a real assert_wait before calling memory_object_copy,
3345 * so we simply set the flag.)
3346 */
3347
3348 vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL);
3349 vm_object_unlock(src_object);
3350
3351 /*
3352 * Ask the memory manager to give us a memory object
3353 * which represents a copy of the src object.
3354 * The memory manager may give us a memory object
3355 * which we already have, or it may give us a
3356 * new memory object. This memory object will arrive
3357 * via memory_object_create_copy.
3358 */
3359
3360 kr = KERN_FAILURE; /* XXX need to change memory_object.defs */
3361 if (kr != KERN_SUCCESS) {
3362 return kr;
3363 }
3364
3365 /*
3366 * Wait for the copy to arrive.
3367 */
3368 vm_object_lock(src_object);
3369 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
3370 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
3371 THREAD_UNINT);
3372 copy_call_sleep_count++;
3373 }
3374 Retry:
3375 assert(src_object->copy != VM_OBJECT_NULL);
3376 copy = src_object->copy;
3377 if (!vm_object_lock_try(copy)) {
3378 vm_object_unlock(src_object);
3379
3380 try_failed_count++;
3381 mutex_pause(try_failed_count); /* wait a bit */
3382
3383 vm_object_lock(src_object);
3384 goto Retry;
3385 }
3386 if (copy->vo_size < src_offset + size) {
3387 assertf(page_aligned(src_offset + size),
3388 "object %p size 0x%llx",
3389 copy, (uint64_t)(src_offset + size));
3390 copy->vo_size = src_offset + size;
3391 }
3392
3393 if (!copy->pager_ready) {
3394 check_ready = TRUE;
3395 }
3396
3397 /*
3398 * Return the copy.
3399 */
3400 *_result_object = copy;
3401 vm_object_unlock(copy);
3402 vm_object_unlock(src_object);
3403
3404 /* Wait for the copy to be ready. */
3405 if (check_ready == TRUE) {
3406 vm_object_lock(copy);
3407 while (!copy->pager_ready) {
3408 vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
3409 }
3410 vm_object_unlock(copy);
3411 }
3412
3413 return KERN_SUCCESS;
3414 }
3415
3416 static int copy_delayed_lock_collisions = 0;
3417 static int copy_delayed_max_collisions = 0;
3418 static int copy_delayed_lock_contention = 0;
3419 static int copy_delayed_protect_iterate = 0;
3420
3421 /*
3422 * Routine: vm_object_copy_delayed [internal]
3423 *
3424 * Description:
3425 * Copy the specified virtual memory object, using
3426 * the asymmetric copy-on-write algorithm.
3427 *
3428 * In/out conditions:
3429 * The src_object must be locked on entry. It will be unlocked
3430 * on exit - so the caller must also hold a reference to it.
3431 *
3432 * This routine will not block waiting for user-generated
3433 * events. It is not interruptible.
3434 */
3435 __private_extern__ vm_object_t
3436 vm_object_copy_delayed(
3437 vm_object_t src_object,
3438 vm_object_offset_t src_offset,
3439 vm_object_size_t size,
3440 boolean_t src_object_shared)
3441 {
3442 vm_object_t new_copy = VM_OBJECT_NULL;
3443 vm_object_t old_copy;
3444 vm_page_t p;
3445 vm_object_size_t copy_size = src_offset + size;
3446 pmap_flush_context pmap_flush_context_storage;
3447 boolean_t delayed_pmap_flush = FALSE;
3448
3449
3450 int collisions = 0;
3451 /*
3452 * The user-level memory manager wants to see all of the changes
3453 * to this object, but it has promised not to make any changes on
3454 * its own.
3455 *
3456 * Perform an asymmetric copy-on-write, as follows:
3457 * Create a new object, called a "copy object" to hold
3458 * pages modified by the new mapping (i.e., the copy,
3459 * not the original mapping).
3460 * Record the original object as the backing object for
3461 * the copy object. If the original mapping does not
3462 * change a page, it may be used read-only by the copy.
3463 * Record the copy object in the original object.
3464 * When the original mapping causes a page to be modified,
3465 * it must be copied to a new page that is "pushed" to
3466 * the copy object.
3467 * Mark the new mapping (the copy object) copy-on-write.
3468 * This makes the copy object itself read-only, allowing
3469 * it to be reused if the original mapping makes no
3470 * changes, and simplifying the synchronization required
3471 * in the "push" operation described above.
3472 *
3473 * The copy-on-write is said to be assymetric because the original
3474 * object is *not* marked copy-on-write. A copied page is pushed
3475 * to the copy object, regardless which party attempted to modify
3476 * the page.
3477 *
3478 * Repeated asymmetric copy operations may be done. If the
3479 * original object has not been changed since the last copy, its
3480 * copy object can be reused. Otherwise, a new copy object can be
3481 * inserted between the original object and its previous copy
3482 * object. Since any copy object is read-only, this cannot affect
3483 * affect the contents of the previous copy object.
3484 *
3485 * Note that a copy object is higher in the object tree than the
3486 * original object; therefore, use of the copy object recorded in
3487 * the original object must be done carefully, to avoid deadlock.
3488 */
3489
3490 copy_size = vm_object_round_page(copy_size);
3491 Retry:
3492
3493 /*
3494 * Wait for paging in progress.
3495 */
3496 if (!src_object->true_share &&
3497 (src_object->paging_in_progress != 0 ||
3498 src_object->activity_in_progress != 0)) {
3499 if (src_object_shared == TRUE) {
3500 vm_object_unlock(src_object);
3501 vm_object_lock(src_object);
3502 src_object_shared = FALSE;
3503 goto Retry;
3504 }
3505 vm_object_paging_wait(src_object, THREAD_UNINT);
3506 }
3507 /*
3508 * See whether we can reuse the result of a previous
3509 * copy operation.
3510 */
3511
3512 old_copy = src_object->copy;
3513 if (old_copy != VM_OBJECT_NULL) {
3514 int lock_granted;
3515
3516 /*
3517 * Try to get the locks (out of order)
3518 */
3519 if (src_object_shared == TRUE) {
3520 lock_granted = vm_object_lock_try_shared(old_copy);
3521 } else {
3522 lock_granted = vm_object_lock_try(old_copy);
3523 }
3524
3525 if (!lock_granted) {
3526 vm_object_unlock(src_object);
3527
3528 if (collisions++ == 0) {
3529 copy_delayed_lock_contention++;
3530 }
3531 mutex_pause(collisions);
3532
3533 /* Heisenberg Rules */
3534 copy_delayed_lock_collisions++;
3535
3536 if (collisions > copy_delayed_max_collisions) {
3537 copy_delayed_max_collisions = collisions;
3538 }
3539
3540 if (src_object_shared == TRUE) {
3541 vm_object_lock_shared(src_object);
3542 } else {
3543 vm_object_lock(src_object);
3544 }
3545
3546 goto Retry;
3547 }
3548
3549 /*
3550 * Determine whether the old copy object has
3551 * been modified.
3552 */
3553
3554 if (old_copy->resident_page_count == 0 &&
3555 !old_copy->pager_created) {
3556 /*
3557 * It has not been modified.
3558 *
3559 * Return another reference to
3560 * the existing copy-object if
3561 * we can safely grow it (if
3562 * needed).
3563 */
3564
3565 if (old_copy->vo_size < copy_size) {
3566 if (src_object_shared == TRUE) {
3567 vm_object_unlock(old_copy);
3568 vm_object_unlock(src_object);
3569
3570 vm_object_lock(src_object);
3571 src_object_shared = FALSE;
3572 goto Retry;
3573 }
3574 /*
3575 * We can't perform a delayed copy if any of the
3576 * pages in the extended range are wired (because
3577 * we can't safely take write permission away from
3578 * wired pages). If the pages aren't wired, then
3579 * go ahead and protect them.
3580 */
3581 copy_delayed_protect_iterate++;
3582
3583 pmap_flush_context_init(&pmap_flush_context_storage);
3584 delayed_pmap_flush = FALSE;
3585
3586 vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3587 if (!p->vmp_fictitious &&
3588 p->vmp_offset >= old_copy->vo_size &&
3589 p->vmp_offset < copy_size) {
3590 if (VM_PAGE_WIRED(p)) {
3591 vm_object_unlock(old_copy);
3592 vm_object_unlock(src_object);
3593
3594 if (new_copy != VM_OBJECT_NULL) {
3595 vm_object_unlock(new_copy);
3596 vm_object_deallocate(new_copy);
3597 }
3598 if (delayed_pmap_flush == TRUE) {
3599 pmap_flush(&pmap_flush_context_storage);
3600 }
3601
3602 return VM_OBJECT_NULL;
3603 } else {
3604 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
3605 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3606 delayed_pmap_flush = TRUE;
3607 }
3608 }
3609 }
3610 if (delayed_pmap_flush == TRUE) {
3611 pmap_flush(&pmap_flush_context_storage);
3612 }
3613
3614 assertf(page_aligned(copy_size),
3615 "object %p size 0x%llx",
3616 old_copy, (uint64_t)copy_size);
3617 old_copy->vo_size = copy_size;
3618 }
3619 if (src_object_shared == TRUE) {
3620 vm_object_reference_shared(old_copy);
3621 } else {
3622 vm_object_reference_locked(old_copy);
3623 }
3624 vm_object_unlock(old_copy);
3625 vm_object_unlock(src_object);
3626
3627 if (new_copy != VM_OBJECT_NULL) {
3628 vm_object_unlock(new_copy);
3629 vm_object_deallocate(new_copy);
3630 }
3631 return old_copy;
3632 }
3633
3634
3635
3636 /*
3637 * Adjust the size argument so that the newly-created
3638 * copy object will be large enough to back either the
3639 * old copy object or the new mapping.
3640 */
3641 if (old_copy->vo_size > copy_size) {
3642 copy_size = old_copy->vo_size;
3643 }
3644
3645 if (new_copy == VM_OBJECT_NULL) {
3646 vm_object_unlock(old_copy);
3647 vm_object_unlock(src_object);
3648 new_copy = vm_object_allocate(copy_size);
3649 vm_object_lock(src_object);
3650 vm_object_lock(new_copy);
3651
3652 src_object_shared = FALSE;
3653 goto Retry;
3654 }
3655 assertf(page_aligned(copy_size),
3656 "object %p size 0x%llx",
3657 new_copy, (uint64_t)copy_size);
3658 new_copy->vo_size = copy_size;
3659
3660 /*
3661 * The copy-object is always made large enough to
3662 * completely shadow the original object, since
3663 * it may have several users who want to shadow
3664 * the original object at different points.
3665 */
3666
3667 assert((old_copy->shadow == src_object) &&
3668 (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
3669 } else if (new_copy == VM_OBJECT_NULL) {
3670 vm_object_unlock(src_object);
3671 new_copy = vm_object_allocate(copy_size);
3672 vm_object_lock(src_object);
3673 vm_object_lock(new_copy);
3674
3675 src_object_shared = FALSE;
3676 goto Retry;
3677 }
3678
3679 /*
3680 * We now have the src object locked, and the new copy object
3681 * allocated and locked (and potentially the old copy locked).
3682 * Before we go any further, make sure we can still perform
3683 * a delayed copy, as the situation may have changed.
3684 *
3685 * Specifically, we can't perform a delayed copy if any of the
3686 * pages in the range are wired (because we can't safely take
3687 * write permission away from wired pages). If the pages aren't
3688 * wired, then go ahead and protect them.
3689 */
3690 copy_delayed_protect_iterate++;
3691
3692 pmap_flush_context_init(&pmap_flush_context_storage);
3693 delayed_pmap_flush = FALSE;
3694
3695 vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3696 if (!p->vmp_fictitious && p->vmp_offset < copy_size) {
3697 if (VM_PAGE_WIRED(p)) {
3698 if (old_copy) {
3699 vm_object_unlock(old_copy);
3700 }
3701 vm_object_unlock(src_object);
3702 vm_object_unlock(new_copy);
3703 vm_object_deallocate(new_copy);
3704
3705 if (delayed_pmap_flush == TRUE) {
3706 pmap_flush(&pmap_flush_context_storage);
3707 }
3708
3709 return VM_OBJECT_NULL;
3710 } else {
3711 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
3712 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3713 delayed_pmap_flush = TRUE;
3714 }
3715 }
3716 }
3717 if (delayed_pmap_flush == TRUE) {
3718 pmap_flush(&pmap_flush_context_storage);
3719 }
3720
3721 if (old_copy != VM_OBJECT_NULL) {
3722 /*
3723 * Make the old copy-object shadow the new one.
3724 * It will receive no more pages from the original
3725 * object.
3726 */
3727
3728 /* remove ref. from old_copy */
3729 vm_object_lock_assert_exclusive(src_object);
3730 src_object->ref_count--;
3731 assert(src_object->ref_count > 0);
3732 vm_object_lock_assert_exclusive(old_copy);
3733 old_copy->shadow = new_copy;
3734 vm_object_lock_assert_exclusive(new_copy);
3735 assert(new_copy->ref_count > 0);
3736 new_copy->ref_count++; /* for old_copy->shadow ref. */
3737
3738 vm_object_unlock(old_copy); /* done with old_copy */
3739 }
3740
3741 /*
3742 * Point the new copy at the existing object.
3743 */
3744 vm_object_lock_assert_exclusive(new_copy);
3745 new_copy->shadow = src_object;
3746 new_copy->vo_shadow_offset = 0;
3747 new_copy->shadowed = TRUE; /* caller must set needs_copy */
3748
3749 vm_object_lock_assert_exclusive(src_object);
3750 vm_object_reference_locked(src_object);
3751 src_object->copy = new_copy;
3752 vm_object_unlock(src_object);
3753 vm_object_unlock(new_copy);
3754
3755 return new_copy;
3756 }
3757
3758 /*
3759 * Routine: vm_object_copy_strategically
3760 *
3761 * Purpose:
3762 * Perform a copy according to the source object's
3763 * declared strategy. This operation may block,
3764 * and may be interrupted.
3765 */
3766 __private_extern__ kern_return_t
3767 vm_object_copy_strategically(
3768 vm_object_t src_object,
3769 vm_object_offset_t src_offset,
3770 vm_object_size_t size,
3771 vm_object_t *dst_object, /* OUT */
3772 vm_object_offset_t *dst_offset, /* OUT */
3773 boolean_t *dst_needs_copy) /* OUT */
3774 {
3775 boolean_t result;
3776 boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */
3777 boolean_t object_lock_shared = FALSE;
3778 memory_object_copy_strategy_t copy_strategy;
3779
3780 assert(src_object != VM_OBJECT_NULL);
3781
3782 copy_strategy = src_object->copy_strategy;
3783
3784 if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
3785 vm_object_lock_shared(src_object);
3786 object_lock_shared = TRUE;
3787 } else {
3788 vm_object_lock(src_object);
3789 }
3790
3791 /*
3792 * The copy strategy is only valid if the memory manager
3793 * is "ready". Internal objects are always ready.
3794 */
3795
3796 while (!src_object->internal && !src_object->pager_ready) {
3797 wait_result_t wait_result;
3798
3799 if (object_lock_shared == TRUE) {
3800 vm_object_unlock(src_object);
3801 vm_object_lock(src_object);
3802 object_lock_shared = FALSE;
3803 continue;
3804 }
3805 wait_result = vm_object_sleep( src_object,
3806 VM_OBJECT_EVENT_PAGER_READY,
3807 interruptible);
3808 if (wait_result != THREAD_AWAKENED) {
3809 vm_object_unlock(src_object);
3810 *dst_object = VM_OBJECT_NULL;
3811 *dst_offset = 0;
3812 *dst_needs_copy = FALSE;
3813 return MACH_SEND_INTERRUPTED;
3814 }
3815 }
3816
3817 /*
3818 * Use the appropriate copy strategy.
3819 */
3820
3821 switch (copy_strategy) {
3822 case MEMORY_OBJECT_COPY_DELAY:
3823 *dst_object = vm_object_copy_delayed(src_object,
3824 src_offset, size, object_lock_shared);
3825 if (*dst_object != VM_OBJECT_NULL) {
3826 *dst_offset = src_offset;
3827 *dst_needs_copy = TRUE;
3828 result = KERN_SUCCESS;
3829 break;
3830 }
3831 vm_object_lock(src_object);
3832 OS_FALLTHROUGH; /* fall thru when delayed copy not allowed */
3833
3834 case MEMORY_OBJECT_COPY_NONE:
3835 result = vm_object_copy_slowly(src_object, src_offset, size,
3836 interruptible, dst_object);
3837 if (result == KERN_SUCCESS) {
3838 *dst_offset = src_offset - vm_object_trunc_page(src_offset);
3839 *dst_needs_copy = FALSE;
3840 }
3841 break;
3842
3843 case MEMORY_OBJECT_COPY_CALL:
3844 result = vm_object_copy_call(src_object, src_offset, size,
3845 dst_object);
3846 if (result == KERN_SUCCESS) {
3847 *dst_offset = src_offset;
3848 *dst_needs_copy = TRUE;
3849 }
3850 break;
3851
3852 case MEMORY_OBJECT_COPY_SYMMETRIC:
3853 vm_object_unlock(src_object);
3854 result = KERN_MEMORY_RESTART_COPY;
3855 break;
3856
3857 default:
3858 panic("copy_strategically: bad strategy");
3859 result = KERN_INVALID_ARGUMENT;
3860 }
3861 return result;
3862 }
3863
3864 /*
3865 * vm_object_shadow:
3866 *
3867 * Create a new object which is backed by the
3868 * specified existing object range. The source
3869 * object reference is deallocated.
3870 *
3871 * The new object and offset into that object
3872 * are returned in the source parameters.
3873 */
3874 boolean_t vm_object_shadow_check = TRUE;
3875
3876 __private_extern__ boolean_t
3877 vm_object_shadow(
3878 vm_object_t *object, /* IN/OUT */
3879 vm_object_offset_t *offset, /* IN/OUT */
3880 vm_object_size_t length)
3881 {
3882 vm_object_t source;
3883 vm_object_t result;
3884
3885 source = *object;
3886 assert(source != VM_OBJECT_NULL);
3887 if (source == VM_OBJECT_NULL) {
3888 return FALSE;
3889 }
3890
3891 #if 0
3892 /*
3893 * XXX FBDP
3894 * This assertion is valid but it gets triggered by Rosetta for example
3895 * due to a combination of vm_remap() that changes a VM object's
3896 * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
3897 * that then sets "needs_copy" on its map entry. This creates a
3898 * mapping situation that VM should never see and doesn't know how to
3899 * handle.
3900 * It's not clear if this can create any real problem but we should
3901 * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
3902 * do more than just set "needs_copy" to handle the copy-on-write...
3903 * In the meantime, let's disable the assertion.
3904 */
3905 assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
3906 #endif
3907
3908 /*
3909 * Determine if we really need a shadow.
3910 *
3911 * If the source object is larger than what we are trying
3912 * to create, then force the shadow creation even if the
3913 * ref count is 1. This will allow us to [potentially]
3914 * collapse the underlying object away in the future
3915 * (freeing up the extra data it might contain and that
3916 * we don't need).
3917 */
3918
3919 assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
3920
3921 #if 00
3922 /*
3923 * The following optimization does not work in the context of submaps
3924 * (the shared region, in particular).
3925 * This object might have only 1 reference (in the submap) but that
3926 * submap can itself be mapped multiple times, so the object is
3927 * actually indirectly referenced more than once...
3928 */
3929 if (vm_object_shadow_check &&
3930 source->vo_size == length &&
3931 source->ref_count == 1) {
3932 /*
3933 * Lock the object and check again.
3934 * We also check to see if there's
3935 * a shadow or copy object involved.
3936 * We can't do that earlier because
3937 * without the object locked, there
3938 * could be a collapse and the chain
3939 * gets modified leaving us with an
3940 * invalid pointer.
3941 */
3942 vm_object_lock(source);
3943 if (source->vo_size == length &&
3944 source->ref_count == 1 &&
3945 (source->shadow == VM_OBJECT_NULL ||
3946 source->shadow->copy == VM_OBJECT_NULL)) {
3947 source->shadowed = FALSE;
3948 vm_object_unlock(source);
3949 return FALSE;
3950 }
3951 /* things changed while we were locking "source"... */
3952 vm_object_unlock(source);
3953 }
3954 #endif /* 00 */
3955
3956 /*
3957 * *offset is the map entry's offset into the VM object and
3958 * is aligned to the map's page size.
3959 * VM objects need to be aligned to the system's page size.
3960 * Record the necessary adjustment and re-align the offset so
3961 * that result->vo_shadow_offset is properly page-aligned.
3962 */
3963 vm_object_offset_t offset_adjustment;
3964 offset_adjustment = *offset - vm_object_trunc_page(*offset);
3965 length = vm_object_round_page(length + offset_adjustment);
3966 *offset = vm_object_trunc_page(*offset);
3967
3968 /*
3969 * Allocate a new object with the given length
3970 */
3971
3972 if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) {
3973 panic("vm_object_shadow: no object for shadowing");
3974 }
3975
3976 /*
3977 * The new object shadows the source object, adding
3978 * a reference to it. Our caller changes his reference
3979 * to point to the new object, removing a reference to
3980 * the source object. Net result: no change of reference
3981 * count.
3982 */
3983 result->shadow = source;
3984
3985 /*
3986 * Store the offset into the source object,
3987 * and fix up the offset into the new object.
3988 */
3989
3990 result->vo_shadow_offset = *offset;
3991 assertf(page_aligned(result->vo_shadow_offset),
3992 "result %p shadow offset 0x%llx",
3993 result, result->vo_shadow_offset);
3994
3995 /*
3996 * Return the new things
3997 */
3998
3999 *offset = 0;
4000 if (offset_adjustment) {
4001 /*
4002 * Make the map entry point to the equivalent offset
4003 * in the new object.
4004 */
4005 DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n", offset, *offset, *offset + offset_adjustment, result, length);
4006 *offset += offset_adjustment;
4007 }
4008 *object = result;
4009 return TRUE;
4010 }
4011
4012 /*
4013 * The relationship between vm_object structures and
4014 * the memory_object requires careful synchronization.
4015 *
4016 * All associations are created by memory_object_create_named
4017 * for external pagers and vm_object_compressor_pager_create for internal
4018 * objects as follows:
4019 *
4020 * pager: the memory_object itself, supplied by
4021 * the user requesting a mapping (or the kernel,
4022 * when initializing internal objects); the
4023 * kernel simulates holding send rights by keeping
4024 * a port reference;
4025 *
4026 * pager_request:
4027 * the memory object control port,
4028 * created by the kernel; the kernel holds
4029 * receive (and ownership) rights to this
4030 * port, but no other references.
4031 *
4032 * When initialization is complete, the "initialized" field
4033 * is asserted. Other mappings using a particular memory object,
4034 * and any references to the vm_object gained through the
4035 * port association must wait for this initialization to occur.
4036 *
4037 * In order to allow the memory manager to set attributes before
4038 * requests (notably virtual copy operations, but also data or
4039 * unlock requests) are made, a "ready" attribute is made available.
4040 * Only the memory manager may affect the value of this attribute.
4041 * Its value does not affect critical kernel functions, such as
4042 * internal object initialization or destruction. [Furthermore,
4043 * memory objects created by the kernel are assumed to be ready
4044 * immediately; the default memory manager need not explicitly
4045 * set the "ready" attribute.]
4046 *
4047 * [Both the "initialized" and "ready" attribute wait conditions
4048 * use the "pager" field as the wait event.]
4049 *
4050 * The port associations can be broken down by any of the
4051 * following routines:
4052 * vm_object_terminate:
4053 * No references to the vm_object remain, and
4054 * the object cannot (or will not) be cached.
4055 * This is the normal case, and is done even
4056 * though one of the other cases has already been
4057 * done.
4058 * memory_object_destroy:
4059 * The memory manager has requested that the
4060 * kernel relinquish references to the memory
4061 * object. [The memory manager may not want to
4062 * destroy the memory object, but may wish to
4063 * refuse or tear down existing memory mappings.]
4064 *
4065 * Each routine that breaks an association must break all of
4066 * them at once. At some later time, that routine must clear
4067 * the pager field and release the memory object references.
4068 * [Furthermore, each routine must cope with the simultaneous
4069 * or previous operations of the others.]
4070 *
4071 * Because the pager field may be cleared spontaneously, it
4072 * cannot be used to determine whether a memory object has
4073 * ever been associated with a particular vm_object. [This
4074 * knowledge is important to the shadow object mechanism.]
4075 * For this reason, an additional "created" attribute is
4076 * provided.
4077 *
4078 * During various paging operations, the pager reference found in the
4079 * vm_object must be valid. To prevent this from being released,
4080 * (other than being removed, i.e., made null), routines may use
4081 * the vm_object_paging_begin/end routines [actually, macros].
4082 * The implementation uses the "paging_in_progress" and "wanted" fields.
4083 * [Operations that alter the validity of the pager values include the
4084 * termination routines and vm_object_collapse.]
4085 */
4086
4087
4088 /*
4089 * Routine: vm_object_memory_object_associate
4090 * Purpose:
4091 * Associate a VM object to the given pager.
4092 * If a VM object is not provided, create one.
4093 * Initialize the pager.
4094 */
4095 vm_object_t
4096 vm_object_memory_object_associate(
4097 memory_object_t pager,
4098 vm_object_t object,
4099 vm_object_size_t size,
4100 boolean_t named)
4101 {
4102 memory_object_control_t control;
4103
4104 assert(pager != MEMORY_OBJECT_NULL);
4105
4106 if (object != VM_OBJECT_NULL) {
4107 assert(object->internal);
4108 assert(object->pager_created);
4109 assert(!object->pager_initialized);
4110 assert(!object->pager_ready);
4111 assert(object->pager_trusted);
4112 } else {
4113 object = vm_object_allocate(size);
4114 assert(object != VM_OBJECT_NULL);
4115 object->internal = FALSE;
4116 object->pager_trusted = FALSE;
4117 /* copy strategy invalid until set by memory manager */
4118 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
4119 }
4120
4121 /*
4122 * Allocate request port.
4123 */
4124
4125 control = memory_object_control_allocate(object);
4126 assert(control != MEMORY_OBJECT_CONTROL_NULL);
4127
4128 vm_object_lock(object);
4129
4130 assert(!object->pager_ready);
4131 assert(!object->pager_initialized);
4132 assert(object->pager == NULL);
4133 assert(object->pager_control == NULL);
4134
4135 /*
4136 * Copy the reference we were given.
4137 */
4138
4139 memory_object_reference(pager);
4140 object->pager_created = TRUE;
4141 object->pager = pager;
4142 object->pager_control = control;
4143 object->pager_ready = FALSE;
4144
4145 vm_object_unlock(object);
4146
4147 /*
4148 * Let the pager know we're using it.
4149 */
4150
4151 (void) memory_object_init(pager,
4152 object->pager_control,
4153 PAGE_SIZE);
4154
4155 vm_object_lock(object);
4156 if (named) {
4157 object->named = TRUE;
4158 }
4159 if (object->internal) {
4160 object->pager_ready = TRUE;
4161 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
4162 }
4163
4164 object->pager_initialized = TRUE;
4165 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
4166
4167 vm_object_unlock(object);
4168
4169 return object;
4170 }
4171
4172 /*
4173 * Routine: vm_object_compressor_pager_create
4174 * Purpose:
4175 * Create a memory object for an internal object.
4176 * In/out conditions:
4177 * The object is locked on entry and exit;
4178 * it may be unlocked within this call.
4179 * Limitations:
4180 * Only one thread may be performing a
4181 * vm_object_compressor_pager_create on an object at
4182 * a time. Presumably, only the pageout
4183 * daemon will be using this routine.
4184 */
4185
4186 void
4187 vm_object_compressor_pager_create(
4188 vm_object_t object)
4189 {
4190 memory_object_t pager;
4191 vm_object_t pager_object = VM_OBJECT_NULL;
4192
4193 assert(object != kernel_object);
4194
4195 /*
4196 * Prevent collapse or termination by holding a paging reference
4197 */
4198
4199 vm_object_paging_begin(object);
4200 if (object->pager_created) {
4201 /*
4202 * Someone else got to it first...
4203 * wait for them to finish initializing the ports
4204 */
4205 while (!object->pager_initialized) {
4206 vm_object_sleep(object,
4207 VM_OBJECT_EVENT_INITIALIZED,
4208 THREAD_UNINT);
4209 }
4210 vm_object_paging_end(object);
4211 return;
4212 }
4213
4214 if ((uint32_t) (object->vo_size / PAGE_SIZE) !=
4215 (object->vo_size / PAGE_SIZE)) {
4216 #if DEVELOPMENT || DEBUG
4217 printf("vm_object_compressor_pager_create(%p): "
4218 "object size 0x%llx >= 0x%llx\n",
4219 object,
4220 (uint64_t) object->vo_size,
4221 0x0FFFFFFFFULL * PAGE_SIZE);
4222 #endif /* DEVELOPMENT || DEBUG */
4223 vm_object_paging_end(object);
4224 return;
4225 }
4226
4227 /*
4228 * Indicate that a memory object has been assigned
4229 * before dropping the lock, to prevent a race.
4230 */
4231
4232 object->pager_created = TRUE;
4233 object->pager_trusted = TRUE;
4234 object->paging_offset = 0;
4235
4236 vm_object_unlock(object);
4237
4238 /*
4239 * Create the [internal] pager, and associate it with this object.
4240 *
4241 * We make the association here so that vm_object_enter()
4242 * can look up the object to complete initializing it. No
4243 * user will ever map this object.
4244 */
4245 {
4246 /* create our new memory object */
4247 assert((uint32_t) (object->vo_size / PAGE_SIZE) ==
4248 (object->vo_size / PAGE_SIZE));
4249 (void) compressor_memory_object_create(
4250 (memory_object_size_t) object->vo_size,
4251 &pager);
4252 if (pager == NULL) {
4253 panic("vm_object_compressor_pager_create(): "
4254 "no pager for object %p size 0x%llx\n",
4255 object, (uint64_t) object->vo_size);
4256 }
4257 }
4258
4259 /*
4260 * A reference was returned by
4261 * memory_object_create(), and it is
4262 * copied by vm_object_memory_object_associate().
4263 */
4264
4265 pager_object = vm_object_memory_object_associate(pager,
4266 object,
4267 object->vo_size,
4268 FALSE);
4269 if (pager_object != object) {
4270 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager, pager_object, object, (uint64_t) object->vo_size);
4271 }
4272
4273 /*
4274 * Drop the reference we were passed.
4275 */
4276 memory_object_deallocate(pager);
4277
4278 vm_object_lock(object);
4279
4280 /*
4281 * Release the paging reference
4282 */
4283 vm_object_paging_end(object);
4284 }
4285
4286 /*
4287 * Global variables for vm_object_collapse():
4288 *
4289 * Counts for normal collapses and bypasses.
4290 * Debugging variables, to watch or disable collapse.
4291 */
4292 static long object_collapses = 0;
4293 static long object_bypasses = 0;
4294
4295 static boolean_t vm_object_collapse_allowed = TRUE;
4296 static boolean_t vm_object_bypass_allowed = TRUE;
4297
4298 void vm_object_do_collapse_compressor(vm_object_t object,
4299 vm_object_t backing_object);
4300 void
4301 vm_object_do_collapse_compressor(
4302 vm_object_t object,
4303 vm_object_t backing_object)
4304 {
4305 vm_object_offset_t new_offset, backing_offset;
4306 vm_object_size_t size;
4307
4308 vm_counters.do_collapse_compressor++;
4309
4310 vm_object_lock_assert_exclusive(object);
4311 vm_object_lock_assert_exclusive(backing_object);
4312
4313 size = object->vo_size;
4314
4315 /*
4316 * Move all compressed pages from backing_object
4317 * to the parent.
4318 */
4319
4320 for (backing_offset = object->vo_shadow_offset;
4321 backing_offset < object->vo_shadow_offset + object->vo_size;
4322 backing_offset += PAGE_SIZE) {
4323 memory_object_offset_t backing_pager_offset;
4324
4325 /* find the next compressed page at or after this offset */
4326 backing_pager_offset = (backing_offset +
4327 backing_object->paging_offset);
4328 backing_pager_offset = vm_compressor_pager_next_compressed(
4329 backing_object->pager,
4330 backing_pager_offset);
4331 if (backing_pager_offset == (memory_object_offset_t) -1) {
4332 /* no more compressed pages */
4333 break;
4334 }
4335 backing_offset = (backing_pager_offset -
4336 backing_object->paging_offset);
4337
4338 new_offset = backing_offset - object->vo_shadow_offset;
4339
4340 if (new_offset >= object->vo_size) {
4341 /* we're out of the scope of "object": done */
4342 break;
4343 }
4344
4345 if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
4346 (vm_compressor_pager_state_get(object->pager,
4347 (new_offset +
4348 object->paging_offset)) ==
4349 VM_EXTERNAL_STATE_EXISTS)) {
4350 /*
4351 * This page already exists in object, resident or
4352 * compressed.
4353 * We don't need this compressed page in backing_object
4354 * and it will be reclaimed when we release
4355 * backing_object.
4356 */
4357 continue;
4358 }
4359
4360 /*
4361 * backing_object has this page in the VM compressor and
4362 * we need to transfer it to object.
4363 */
4364 vm_counters.do_collapse_compressor_pages++;
4365 vm_compressor_pager_transfer(
4366 /* destination: */
4367 object->pager,
4368 (new_offset + object->paging_offset),
4369 /* source: */
4370 backing_object->pager,
4371 (backing_offset + backing_object->paging_offset));
4372 }
4373 }
4374
4375 /*
4376 * Routine: vm_object_do_collapse
4377 * Purpose:
4378 * Collapse an object with the object backing it.
4379 * Pages in the backing object are moved into the
4380 * parent, and the backing object is deallocated.
4381 * Conditions:
4382 * Both objects and the cache are locked; the page
4383 * queues are unlocked.
4384 *
4385 */
4386 static void
4387 vm_object_do_collapse(
4388 vm_object_t object,
4389 vm_object_t backing_object)
4390 {
4391 vm_page_t p, pp;
4392 vm_object_offset_t new_offset, backing_offset;
4393 vm_object_size_t size;
4394
4395 vm_object_lock_assert_exclusive(object);
4396 vm_object_lock_assert_exclusive(backing_object);
4397
4398 assert(object->purgable == VM_PURGABLE_DENY);
4399 assert(backing_object->purgable == VM_PURGABLE_DENY);
4400
4401 backing_offset = object->vo_shadow_offset;
4402 size = object->vo_size;
4403
4404 /*
4405 * Move all in-memory pages from backing_object
4406 * to the parent. Pages that have been paged out
4407 * will be overwritten by any of the parent's
4408 * pages that shadow them.
4409 */
4410
4411 while (!vm_page_queue_empty(&backing_object->memq)) {
4412 p = (vm_page_t) vm_page_queue_first(&backing_object->memq);
4413
4414 new_offset = (p->vmp_offset - backing_offset);
4415
4416 assert(!p->vmp_busy || p->vmp_absent);
4417
4418 /*
4419 * If the parent has a page here, or if
4420 * this page falls outside the parent,
4421 * dispose of it.
4422 *
4423 * Otherwise, move it as planned.
4424 */
4425
4426 if (p->vmp_offset < backing_offset || new_offset >= size) {
4427 VM_PAGE_FREE(p);
4428 } else {
4429 pp = vm_page_lookup(object, new_offset);
4430 if (pp == VM_PAGE_NULL) {
4431 if (VM_COMPRESSOR_PAGER_STATE_GET(object,
4432 new_offset)
4433 == VM_EXTERNAL_STATE_EXISTS) {
4434 /*
4435 * Parent object has this page
4436 * in the VM compressor.
4437 * Throw away the backing
4438 * object's page.
4439 */
4440 VM_PAGE_FREE(p);
4441 } else {
4442 /*
4443 * Parent now has no page.
4444 * Move the backing object's page
4445 * up.
4446 */
4447 vm_page_rename(p, object, new_offset);
4448 }
4449 } else {
4450 assert(!pp->vmp_absent);
4451
4452 /*
4453 * Parent object has a real page.
4454 * Throw away the backing object's
4455 * page.
4456 */
4457 VM_PAGE_FREE(p);
4458 }
4459 }
4460 }
4461
4462 if (vm_object_collapse_compressor_allowed &&
4463 object->pager != MEMORY_OBJECT_NULL &&
4464 backing_object->pager != MEMORY_OBJECT_NULL) {
4465 /* move compressed pages from backing_object to object */
4466 vm_object_do_collapse_compressor(object, backing_object);
4467 } else if (backing_object->pager != MEMORY_OBJECT_NULL) {
4468 assert((!object->pager_created &&
4469 (object->pager == MEMORY_OBJECT_NULL)) ||
4470 (!backing_object->pager_created &&
4471 (backing_object->pager == MEMORY_OBJECT_NULL)));
4472 /*
4473 * Move the pager from backing_object to object.
4474 *
4475 * XXX We're only using part of the paging space
4476 * for keeps now... we ought to discard the
4477 * unused portion.
4478 */
4479
4480 assert(!object->paging_in_progress);
4481 assert(!object->activity_in_progress);
4482 assert(!object->pager_created);
4483 assert(object->pager == NULL);
4484 object->pager = backing_object->pager;
4485
4486 object->pager_created = backing_object->pager_created;
4487 object->pager_control = backing_object->pager_control;
4488 object->pager_ready = backing_object->pager_ready;
4489 object->pager_initialized = backing_object->pager_initialized;
4490 object->paging_offset =
4491 backing_object->paging_offset + backing_offset;
4492 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
4493 memory_object_control_collapse(&object->pager_control,
4494 object);
4495 }
4496 /* the backing_object has lost its pager: reset all fields */
4497 backing_object->pager_created = FALSE;
4498 backing_object->pager_control = NULL;
4499 backing_object->pager_ready = FALSE;
4500 backing_object->paging_offset = 0;
4501 backing_object->pager = NULL;
4502 }
4503 /*
4504 * Object now shadows whatever backing_object did.
4505 * Note that the reference to backing_object->shadow
4506 * moves from within backing_object to within object.
4507 */
4508
4509 assert(!object->phys_contiguous);
4510 assert(!backing_object->phys_contiguous);
4511 object->shadow = backing_object->shadow;
4512 if (object->shadow) {
4513 assertf(page_aligned(object->vo_shadow_offset),
4514 "object %p shadow_offset 0x%llx",
4515 object, object->vo_shadow_offset);
4516 assertf(page_aligned(backing_object->vo_shadow_offset),
4517 "backing_object %p shadow_offset 0x%llx",
4518 backing_object, backing_object->vo_shadow_offset);
4519 object->vo_shadow_offset += backing_object->vo_shadow_offset;
4520 /* "backing_object" gave its shadow to "object" */
4521 backing_object->shadow = VM_OBJECT_NULL;
4522 backing_object->vo_shadow_offset = 0;
4523 } else {
4524 /* no shadow, therefore no shadow offset... */
4525 object->vo_shadow_offset = 0;
4526 }
4527 assert((object->shadow == VM_OBJECT_NULL) ||
4528 (object->shadow->copy != backing_object));
4529
4530 /*
4531 * Discard backing_object.
4532 *
4533 * Since the backing object has no pages, no
4534 * pager left, and no object references within it,
4535 * all that is necessary is to dispose of it.
4536 */
4537 object_collapses++;
4538
4539 assert(backing_object->ref_count == 1);
4540 assert(backing_object->resident_page_count == 0);
4541 assert(backing_object->paging_in_progress == 0);
4542 assert(backing_object->activity_in_progress == 0);
4543 assert(backing_object->shadow == VM_OBJECT_NULL);
4544 assert(backing_object->vo_shadow_offset == 0);
4545
4546 if (backing_object->pager != MEMORY_OBJECT_NULL) {
4547 /* ... unless it has a pager; need to terminate pager too */
4548 vm_counters.do_collapse_terminate++;
4549 if (vm_object_terminate(backing_object) != KERN_SUCCESS) {
4550 vm_counters.do_collapse_terminate_failure++;
4551 }
4552 return;
4553 }
4554
4555 assert(backing_object->pager == NULL);
4556
4557 backing_object->alive = FALSE;
4558 vm_object_unlock(backing_object);
4559
4560 #if VM_OBJECT_TRACKING
4561 if (vm_object_tracking_inited) {
4562 btlog_remove_entries_for_element(vm_object_tracking_btlog,
4563 backing_object);
4564 }
4565 #endif /* VM_OBJECT_TRACKING */
4566
4567 vm_object_lock_destroy(backing_object);
4568
4569 zfree(vm_object_zone, backing_object);
4570 }
4571
4572 static void
4573 vm_object_do_bypass(
4574 vm_object_t object,
4575 vm_object_t backing_object)
4576 {
4577 /*
4578 * Make the parent shadow the next object
4579 * in the chain.
4580 */
4581
4582 vm_object_lock_assert_exclusive(object);
4583 vm_object_lock_assert_exclusive(backing_object);
4584
4585 vm_object_reference(backing_object->shadow);
4586
4587 assert(!object->phys_contiguous);
4588 assert(!backing_object->phys_contiguous);
4589 object->shadow = backing_object->shadow;
4590 if (object->shadow) {
4591 assertf(page_aligned(object->vo_shadow_offset),
4592 "object %p shadow_offset 0x%llx",
4593 object, object->vo_shadow_offset);
4594 assertf(page_aligned(backing_object->vo_shadow_offset),
4595 "backing_object %p shadow_offset 0x%llx",
4596 backing_object, backing_object->vo_shadow_offset);
4597 object->vo_shadow_offset += backing_object->vo_shadow_offset;
4598 } else {
4599 /* no shadow, therefore no shadow offset... */
4600 object->vo_shadow_offset = 0;
4601 }
4602
4603 /*
4604 * Backing object might have had a copy pointer
4605 * to us. If it did, clear it.
4606 */
4607 if (backing_object->copy == object) {
4608 backing_object->copy = VM_OBJECT_NULL;
4609 }
4610
4611 /*
4612 * Drop the reference count on backing_object.
4613 #if TASK_SWAPPER
4614 * Since its ref_count was at least 2, it
4615 * will not vanish; so we don't need to call
4616 * vm_object_deallocate.
4617 * [with a caveat for "named" objects]
4618 *
4619 * The res_count on the backing object is
4620 * conditionally decremented. It's possible
4621 * (via vm_pageout_scan) to get here with
4622 * a "swapped" object, which has a 0 res_count,
4623 * in which case, the backing object res_count
4624 * is already down by one.
4625 #else
4626 * Don't call vm_object_deallocate unless
4627 * ref_count drops to zero.
4628 *
4629 * The ref_count can drop to zero here if the
4630 * backing object could be bypassed but not
4631 * collapsed, such as when the backing object
4632 * is temporary and cachable.
4633 #endif
4634 */
4635 if (backing_object->ref_count > 2 ||
4636 (!backing_object->named && backing_object->ref_count > 1)) {
4637 vm_object_lock_assert_exclusive(backing_object);
4638 backing_object->ref_count--;
4639 vm_object_unlock(backing_object);
4640 } else {
4641 /*
4642 * Drop locks so that we can deallocate
4643 * the backing object.
4644 */
4645
4646 /*
4647 * vm_object_collapse (the caller of this function) is
4648 * now called from contexts that may not guarantee that a
4649 * valid reference is held on the object... w/o a valid
4650 * reference, it is unsafe and unwise (you will definitely
4651 * regret it) to unlock the object and then retake the lock
4652 * since the object may be terminated and recycled in between.
4653 * The "activity_in_progress" reference will keep the object
4654 * 'stable'.
4655 */
4656 vm_object_activity_begin(object);
4657 vm_object_unlock(object);
4658
4659 vm_object_unlock(backing_object);
4660 vm_object_deallocate(backing_object);
4661
4662 /*
4663 * Relock object. We don't have to reverify
4664 * its state since vm_object_collapse will
4665 * do that for us as it starts at the
4666 * top of its loop.
4667 */
4668
4669 vm_object_lock(object);
4670 vm_object_activity_end(object);
4671 }
4672
4673 object_bypasses++;
4674 }
4675
4676
4677 /*
4678 * vm_object_collapse:
4679 *
4680 * Perform an object collapse or an object bypass if appropriate.
4681 * The real work of collapsing and bypassing is performed in
4682 * the routines vm_object_do_collapse and vm_object_do_bypass.
4683 *
4684 * Requires that the object be locked and the page queues be unlocked.
4685 *
4686 */
4687 static unsigned long vm_object_collapse_calls = 0;
4688 static unsigned long vm_object_collapse_objects = 0;
4689 static unsigned long vm_object_collapse_do_collapse = 0;
4690 static unsigned long vm_object_collapse_do_bypass = 0;
4691
4692 __private_extern__ void
4693 vm_object_collapse(
4694 vm_object_t object,
4695 vm_object_offset_t hint_offset,
4696 boolean_t can_bypass)
4697 {
4698 vm_object_t backing_object;
4699 unsigned int rcount;
4700 unsigned int size;
4701 vm_object_t original_object;
4702 int object_lock_type;
4703 int backing_object_lock_type;
4704
4705 vm_object_collapse_calls++;
4706
4707 assertf(page_aligned(hint_offset), "hint_offset 0x%llx", hint_offset);
4708
4709 if (!vm_object_collapse_allowed &&
4710 !(can_bypass && vm_object_bypass_allowed)) {
4711 return;
4712 }
4713
4714 if (object == VM_OBJECT_NULL) {
4715 return;
4716 }
4717
4718 original_object = object;
4719
4720 /*
4721 * The top object was locked "exclusive" by the caller.
4722 * In the first pass, to determine if we can collapse the shadow chain,
4723 * take a "shared" lock on the shadow objects. If we can collapse,
4724 * we'll have to go down the chain again with exclusive locks.
4725 */
4726 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4727 backing_object_lock_type = OBJECT_LOCK_SHARED;
4728
4729 retry:
4730 object = original_object;
4731 vm_object_lock_assert_exclusive(object);
4732
4733 while (TRUE) {
4734 vm_object_collapse_objects++;
4735 /*
4736 * Verify that the conditions are right for either
4737 * collapse or bypass:
4738 */
4739
4740 /*
4741 * There is a backing object, and
4742 */
4743
4744 backing_object = object->shadow;
4745 if (backing_object == VM_OBJECT_NULL) {
4746 if (object != original_object) {
4747 vm_object_unlock(object);
4748 }
4749 return;
4750 }
4751 if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
4752 vm_object_lock_shared(backing_object);
4753 } else {
4754 vm_object_lock(backing_object);
4755 }
4756
4757 /*
4758 * No pages in the object are currently
4759 * being paged out, and
4760 */
4761 if (object->paging_in_progress != 0 ||
4762 object->activity_in_progress != 0) {
4763 /* try and collapse the rest of the shadow chain */
4764 if (object != original_object) {
4765 vm_object_unlock(object);
4766 }
4767 object = backing_object;
4768 object_lock_type = backing_object_lock_type;
4769 continue;
4770 }
4771
4772 /*
4773 * ...
4774 * The backing object is not read_only,
4775 * and no pages in the backing object are
4776 * currently being paged out.
4777 * The backing object is internal.
4778 *
4779 */
4780
4781 if (!backing_object->internal ||
4782 backing_object->paging_in_progress != 0 ||
4783 backing_object->activity_in_progress != 0) {
4784 /* try and collapse the rest of the shadow chain */
4785 if (object != original_object) {
4786 vm_object_unlock(object);
4787 }
4788 object = backing_object;
4789 object_lock_type = backing_object_lock_type;
4790 continue;
4791 }
4792
4793 /*
4794 * Purgeable objects are not supposed to engage in
4795 * copy-on-write activities, so should not have
4796 * any shadow objects or be a shadow object to another
4797 * object.
4798 * Collapsing a purgeable object would require some
4799 * updates to the purgeable compressed ledgers.
4800 */
4801 if (object->purgable != VM_PURGABLE_DENY ||
4802 backing_object->purgable != VM_PURGABLE_DENY) {
4803 panic("vm_object_collapse() attempting to collapse "
4804 "purgeable object: %p(%d) %p(%d)\n",
4805 object, object->purgable,
4806 backing_object, backing_object->purgable);
4807 /* try and collapse the rest of the shadow chain */
4808 if (object != original_object) {
4809 vm_object_unlock(object);
4810 }
4811 object = backing_object;
4812 object_lock_type = backing_object_lock_type;
4813 continue;
4814 }
4815
4816 /*
4817 * The backing object can't be a copy-object:
4818 * the shadow_offset for the copy-object must stay
4819 * as 0. Furthermore (for the 'we have all the
4820 * pages' case), if we bypass backing_object and
4821 * just shadow the next object in the chain, old
4822 * pages from that object would then have to be copied
4823 * BOTH into the (former) backing_object and into the
4824 * parent object.
4825 */
4826 if (backing_object->shadow != VM_OBJECT_NULL &&
4827 backing_object->shadow->copy == backing_object) {
4828 /* try and collapse the rest of the shadow chain */
4829 if (object != original_object) {
4830 vm_object_unlock(object);
4831 }
4832 object = backing_object;
4833 object_lock_type = backing_object_lock_type;
4834 continue;
4835 }
4836
4837 /*
4838 * We can now try to either collapse the backing
4839 * object (if the parent is the only reference to
4840 * it) or (perhaps) remove the parent's reference
4841 * to it.
4842 *
4843 * If there is exactly one reference to the backing
4844 * object, we may be able to collapse it into the
4845 * parent.
4846 *
4847 * As long as one of the objects is still not known
4848 * to the pager, we can collapse them.
4849 */
4850 if (backing_object->ref_count == 1 &&
4851 (vm_object_collapse_compressor_allowed ||
4852 !object->pager_created
4853 || (!backing_object->pager_created)
4854 ) && vm_object_collapse_allowed) {
4855 /*
4856 * We need the exclusive lock on the VM objects.
4857 */
4858 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4859 /*
4860 * We have an object and its shadow locked
4861 * "shared". We can't just upgrade the locks
4862 * to "exclusive", as some other thread might
4863 * also have these objects locked "shared" and
4864 * attempt to upgrade one or the other to
4865 * "exclusive". The upgrades would block
4866 * forever waiting for the other "shared" locks
4867 * to get released.
4868 * So we have to release the locks and go
4869 * down the shadow chain again (since it could
4870 * have changed) with "exclusive" locking.
4871 */
4872 vm_object_unlock(backing_object);
4873 if (object != original_object) {
4874 vm_object_unlock(object);
4875 }
4876 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4877 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4878 goto retry;
4879 }
4880
4881 /*
4882 * Collapse the object with its backing
4883 * object, and try again with the object's
4884 * new backing object.
4885 */
4886
4887 vm_object_do_collapse(object, backing_object);
4888 vm_object_collapse_do_collapse++;
4889 continue;
4890 }
4891
4892 /*
4893 * Collapsing the backing object was not possible
4894 * or permitted, so let's try bypassing it.
4895 */
4896
4897 if (!(can_bypass && vm_object_bypass_allowed)) {
4898 /* try and collapse the rest of the shadow chain */
4899 if (object != original_object) {
4900 vm_object_unlock(object);
4901 }
4902 object = backing_object;
4903 object_lock_type = backing_object_lock_type;
4904 continue;
4905 }
4906
4907
4908 /*
4909 * If the object doesn't have all its pages present,
4910 * we have to make sure no pages in the backing object
4911 * "show through" before bypassing it.
4912 */
4913 size = (unsigned int)atop(object->vo_size);
4914 rcount = object->resident_page_count;
4915
4916 if (rcount != size) {
4917 vm_object_offset_t offset;
4918 vm_object_offset_t backing_offset;
4919 unsigned int backing_rcount;
4920
4921 /*
4922 * If the backing object has a pager but no pagemap,
4923 * then we cannot bypass it, because we don't know
4924 * what pages it has.
4925 */
4926 if (backing_object->pager_created) {
4927 /* try and collapse the rest of the shadow chain */
4928 if (object != original_object) {
4929 vm_object_unlock(object);
4930 }
4931 object = backing_object;
4932 object_lock_type = backing_object_lock_type;
4933 continue;
4934 }
4935
4936 /*
4937 * If the object has a pager but no pagemap,
4938 * then we cannot bypass it, because we don't know
4939 * what pages it has.
4940 */
4941 if (object->pager_created) {
4942 /* try and collapse the rest of the shadow chain */
4943 if (object != original_object) {
4944 vm_object_unlock(object);
4945 }
4946 object = backing_object;
4947 object_lock_type = backing_object_lock_type;
4948 continue;
4949 }
4950
4951 backing_offset = object->vo_shadow_offset;
4952 backing_rcount = backing_object->resident_page_count;
4953
4954 if ((int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
4955 /*
4956 * we have enough pages in the backing object to guarantee that
4957 * at least 1 of them must be 'uncovered' by a resident page
4958 * in the object we're evaluating, so move on and
4959 * try to collapse the rest of the shadow chain
4960 */
4961 if (object != original_object) {
4962 vm_object_unlock(object);
4963 }
4964 object = backing_object;
4965 object_lock_type = backing_object_lock_type;
4966 continue;
4967 }
4968
4969 /*
4970 * If all of the pages in the backing object are
4971 * shadowed by the parent object, the parent
4972 * object no longer has to shadow the backing
4973 * object; it can shadow the next one in the
4974 * chain.
4975 *
4976 * If the backing object has existence info,
4977 * we must check examine its existence info
4978 * as well.
4979 *
4980 */
4981
4982 #define EXISTS_IN_OBJECT(obj, off, rc) \
4983 ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
4984 == VM_EXTERNAL_STATE_EXISTS) || \
4985 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
4986
4987 /*
4988 * Check the hint location first
4989 * (since it is often the quickest way out of here).
4990 */
4991 if (object->cow_hint != ~(vm_offset_t)0) {
4992 hint_offset = (vm_object_offset_t)object->cow_hint;
4993 } else {
4994 hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
4995 (hint_offset - 8 * PAGE_SIZE_64) : 0;
4996 }
4997
4998 if (EXISTS_IN_OBJECT(backing_object, hint_offset +
4999 backing_offset, backing_rcount) &&
5000 !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
5001 /* dependency right at the hint */
5002 object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
5003 /* try and collapse the rest of the shadow chain */
5004 if (object != original_object) {
5005 vm_object_unlock(object);
5006 }
5007 object = backing_object;
5008 object_lock_type = backing_object_lock_type;
5009 continue;
5010 }
5011
5012 /*
5013 * If the object's window onto the backing_object
5014 * is large compared to the number of resident
5015 * pages in the backing object, it makes sense to
5016 * walk the backing_object's resident pages first.
5017 *
5018 * NOTE: Pages may be in both the existence map and/or
5019 * resident, so if we don't find a dependency while
5020 * walking the backing object's resident page list
5021 * directly, and there is an existence map, we'll have
5022 * to run the offset based 2nd pass. Because we may
5023 * have to run both passes, we need to be careful
5024 * not to decrement 'rcount' in the 1st pass
5025 */
5026 if (backing_rcount && backing_rcount < (size / 8)) {
5027 unsigned int rc = rcount;
5028 vm_page_t p;
5029
5030 backing_rcount = backing_object->resident_page_count;
5031 p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
5032 do {
5033 offset = (p->vmp_offset - backing_offset);
5034
5035 if (offset < object->vo_size &&
5036 offset != hint_offset &&
5037 !EXISTS_IN_OBJECT(object, offset, rc)) {
5038 /* found a dependency */
5039 object->cow_hint = (vm_offset_t) offset; /* atomic */
5040
5041 break;
5042 }
5043 p = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5044 } while (--backing_rcount);
5045 if (backing_rcount != 0) {
5046 /* try and collapse the rest of the shadow chain */
5047 if (object != original_object) {
5048 vm_object_unlock(object);
5049 }
5050 object = backing_object;
5051 object_lock_type = backing_object_lock_type;
5052 continue;
5053 }
5054 }
5055
5056 /*
5057 * Walk through the offsets looking for pages in the
5058 * backing object that show through to the object.
5059 */
5060 if (backing_rcount) {
5061 offset = hint_offset;
5062
5063 while ((offset =
5064 (offset + PAGE_SIZE_64 < object->vo_size) ?
5065 (offset + PAGE_SIZE_64) : 0) != hint_offset) {
5066 if (EXISTS_IN_OBJECT(backing_object, offset +
5067 backing_offset, backing_rcount) &&
5068 !EXISTS_IN_OBJECT(object, offset, rcount)) {
5069 /* found a dependency */
5070 object->cow_hint = (vm_offset_t) offset; /* atomic */
5071 break;
5072 }
5073 }
5074 if (offset != hint_offset) {
5075 /* try and collapse the rest of the shadow chain */
5076 if (object != original_object) {
5077 vm_object_unlock(object);
5078 }
5079 object = backing_object;
5080 object_lock_type = backing_object_lock_type;
5081 continue;
5082 }
5083 }
5084 }
5085
5086 /*
5087 * We need "exclusive" locks on the 2 VM objects.
5088 */
5089 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5090 vm_object_unlock(backing_object);
5091 if (object != original_object) {
5092 vm_object_unlock(object);
5093 }
5094 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5095 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5096 goto retry;
5097 }
5098
5099 /* reset the offset hint for any objects deeper in the chain */
5100 object->cow_hint = (vm_offset_t)0;
5101
5102 /*
5103 * All interesting pages in the backing object
5104 * already live in the parent or its pager.
5105 * Thus we can bypass the backing object.
5106 */
5107
5108 vm_object_do_bypass(object, backing_object);
5109 vm_object_collapse_do_bypass++;
5110
5111 /*
5112 * Try again with this object's new backing object.
5113 */
5114
5115 continue;
5116 }
5117
5118 /* NOT REACHED */
5119 /*
5120 * if (object != original_object) {
5121 * vm_object_unlock(object);
5122 * }
5123 */
5124 }
5125
5126 /*
5127 * Routine: vm_object_page_remove: [internal]
5128 * Purpose:
5129 * Removes all physical pages in the specified
5130 * object range from the object's list of pages.
5131 *
5132 * In/out conditions:
5133 * The object must be locked.
5134 * The object must not have paging_in_progress, usually
5135 * guaranteed by not having a pager.
5136 */
5137 unsigned int vm_object_page_remove_lookup = 0;
5138 unsigned int vm_object_page_remove_iterate = 0;
5139
5140 __private_extern__ void
5141 vm_object_page_remove(
5142 vm_object_t object,
5143 vm_object_offset_t start,
5144 vm_object_offset_t end)
5145 {
5146 vm_page_t p, next;
5147
5148 /*
5149 * One and two page removals are most popular.
5150 * The factor of 16 here is somewhat arbitrary.
5151 * It balances vm_object_lookup vs iteration.
5152 */
5153
5154 if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) {
5155 vm_object_page_remove_lookup++;
5156
5157 for (; start < end; start += PAGE_SIZE_64) {
5158 p = vm_page_lookup(object, start);
5159 if (p != VM_PAGE_NULL) {
5160 assert(!p->vmp_cleaning && !p->vmp_laundry);
5161 if (!p->vmp_fictitious && p->vmp_pmapped) {
5162 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5163 }
5164 VM_PAGE_FREE(p);
5165 }
5166 }
5167 } else {
5168 vm_object_page_remove_iterate++;
5169
5170 p = (vm_page_t) vm_page_queue_first(&object->memq);
5171 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
5172 next = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5173 if ((start <= p->vmp_offset) && (p->vmp_offset < end)) {
5174 assert(!p->vmp_cleaning && !p->vmp_laundry);
5175 if (!p->vmp_fictitious && p->vmp_pmapped) {
5176 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5177 }
5178 VM_PAGE_FREE(p);
5179 }
5180 p = next;
5181 }
5182 }
5183 }
5184
5185
5186 /*
5187 * Routine: vm_object_coalesce
5188 * Function: Coalesces two objects backing up adjoining
5189 * regions of memory into a single object.
5190 *
5191 * returns TRUE if objects were combined.
5192 *
5193 * NOTE: Only works at the moment if the second object is NULL -
5194 * if it's not, which object do we lock first?
5195 *
5196 * Parameters:
5197 * prev_object First object to coalesce
5198 * prev_offset Offset into prev_object
5199 * next_object Second object into coalesce
5200 * next_offset Offset into next_object
5201 *
5202 * prev_size Size of reference to prev_object
5203 * next_size Size of reference to next_object
5204 *
5205 * Conditions:
5206 * The object(s) must *not* be locked. The map must be locked
5207 * to preserve the reference to the object(s).
5208 */
5209 static int vm_object_coalesce_count = 0;
5210
5211 __private_extern__ boolean_t
5212 vm_object_coalesce(
5213 vm_object_t prev_object,
5214 vm_object_t next_object,
5215 vm_object_offset_t prev_offset,
5216 __unused vm_object_offset_t next_offset,
5217 vm_object_size_t prev_size,
5218 vm_object_size_t next_size)
5219 {
5220 vm_object_size_t newsize;
5221
5222 #ifdef lint
5223 next_offset++;
5224 #endif /* lint */
5225
5226 if (next_object != VM_OBJECT_NULL) {
5227 return FALSE;
5228 }
5229
5230 if (prev_object == VM_OBJECT_NULL) {
5231 return TRUE;
5232 }
5233
5234 vm_object_lock(prev_object);
5235
5236 /*
5237 * Try to collapse the object first
5238 */
5239 vm_object_collapse(prev_object, prev_offset, TRUE);
5240
5241 /*
5242 * Can't coalesce if pages not mapped to
5243 * prev_entry may be in use any way:
5244 * . more than one reference
5245 * . paged out
5246 * . shadows another object
5247 * . has a copy elsewhere
5248 * . is purgeable
5249 * . paging references (pages might be in page-list)
5250 */
5251
5252 if ((prev_object->ref_count > 1) ||
5253 prev_object->pager_created ||
5254 (prev_object->shadow != VM_OBJECT_NULL) ||
5255 (prev_object->copy != VM_OBJECT_NULL) ||
5256 (prev_object->true_share != FALSE) ||
5257 (prev_object->purgable != VM_PURGABLE_DENY) ||
5258 (prev_object->paging_in_progress != 0) ||
5259 (prev_object->activity_in_progress != 0)) {
5260 vm_object_unlock(prev_object);
5261 return FALSE;
5262 }
5263
5264 vm_object_coalesce_count++;
5265
5266 /*
5267 * Remove any pages that may still be in the object from
5268 * a previous deallocation.
5269 */
5270 vm_object_page_remove(prev_object,
5271 prev_offset + prev_size,
5272 prev_offset + prev_size + next_size);
5273
5274 /*
5275 * Extend the object if necessary.
5276 */
5277 newsize = prev_offset + prev_size + next_size;
5278 if (newsize > prev_object->vo_size) {
5279 assertf(page_aligned(newsize),
5280 "object %p size 0x%llx",
5281 prev_object, (uint64_t)newsize);
5282 prev_object->vo_size = newsize;
5283 }
5284
5285 vm_object_unlock(prev_object);
5286 return TRUE;
5287 }
5288
5289 kern_return_t
5290 vm_object_populate_with_private(
5291 vm_object_t object,
5292 vm_object_offset_t offset,
5293 ppnum_t phys_page,
5294 vm_size_t size)
5295 {
5296 ppnum_t base_page;
5297 vm_object_offset_t base_offset;
5298
5299
5300 if (!object->private) {
5301 return KERN_FAILURE;
5302 }
5303
5304 base_page = phys_page;
5305
5306 vm_object_lock(object);
5307
5308 if (!object->phys_contiguous) {
5309 vm_page_t m;
5310
5311 if ((base_offset = trunc_page_64(offset)) != offset) {
5312 vm_object_unlock(object);
5313 return KERN_FAILURE;
5314 }
5315 base_offset += object->paging_offset;
5316
5317 while (size) {
5318 m = vm_page_lookup(object, base_offset);
5319
5320 if (m != VM_PAGE_NULL) {
5321 if (m->vmp_fictitious) {
5322 if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) {
5323 vm_page_lockspin_queues();
5324 m->vmp_private = TRUE;
5325 vm_page_unlock_queues();
5326
5327 m->vmp_fictitious = FALSE;
5328 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5329 }
5330 } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) {
5331 if (!m->vmp_private) {
5332 /*
5333 * we'd leak a real page... that can't be right
5334 */
5335 panic("vm_object_populate_with_private - %p not private", m);
5336 }
5337 if (m->vmp_pmapped) {
5338 /*
5339 * pmap call to clear old mapping
5340 */
5341 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
5342 }
5343 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5344 }
5345 } else {
5346 m = vm_page_grab_fictitious(TRUE);
5347
5348 /*
5349 * private normally requires lock_queues but since we
5350 * are initializing the page, its not necessary here
5351 */
5352 m->vmp_private = TRUE;
5353 m->vmp_fictitious = FALSE;
5354 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5355 m->vmp_unusual = TRUE;
5356 m->vmp_busy = FALSE;
5357
5358 vm_page_insert(m, object, base_offset);
5359 }
5360 base_page++; /* Go to the next physical page */
5361 base_offset += PAGE_SIZE;
5362 size -= PAGE_SIZE;
5363 }
5364 } else {
5365 /* NOTE: we should check the original settings here */
5366 /* if we have a size > zero a pmap call should be made */
5367 /* to disable the range */
5368
5369 /* pmap_? */
5370
5371 /* shadows on contiguous memory are not allowed */
5372 /* we therefore can use the offset field */
5373 object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
5374 assertf(page_aligned(size),
5375 "object %p size 0x%llx",
5376 object, (uint64_t)size);
5377 object->vo_size = size;
5378 }
5379 vm_object_unlock(object);
5380
5381 return KERN_SUCCESS;
5382 }
5383
5384
5385 kern_return_t
5386 memory_object_create_named(
5387 memory_object_t pager,
5388 memory_object_offset_t size,
5389 memory_object_control_t *control)
5390 {
5391 vm_object_t object;
5392
5393 *control = MEMORY_OBJECT_CONTROL_NULL;
5394 if (pager == MEMORY_OBJECT_NULL) {
5395 return KERN_INVALID_ARGUMENT;
5396 }
5397
5398 object = vm_object_memory_object_associate(pager,
5399 VM_OBJECT_NULL,
5400 size,
5401 TRUE);
5402 if (object == VM_OBJECT_NULL) {
5403 return KERN_INVALID_OBJECT;
5404 }
5405
5406 /* wait for object (if any) to be ready */
5407 if (object != VM_OBJECT_NULL) {
5408 vm_object_lock(object);
5409 object->named = TRUE;
5410 while (!object->pager_ready) {
5411 vm_object_sleep(object,
5412 VM_OBJECT_EVENT_PAGER_READY,
5413 THREAD_UNINT);
5414 }
5415 *control = object->pager_control;
5416 vm_object_unlock(object);
5417 }
5418 return KERN_SUCCESS;
5419 }
5420
5421
5422 /*
5423 * Routine: memory_object_recover_named [user interface]
5424 * Purpose:
5425 * Attempt to recover a named reference for a VM object.
5426 * VM will verify that the object has not already started
5427 * down the termination path, and if it has, will optionally
5428 * wait for that to finish.
5429 * Returns:
5430 * KERN_SUCCESS - we recovered a named reference on the object
5431 * KERN_FAILURE - we could not recover a reference (object dead)
5432 * KERN_INVALID_ARGUMENT - bad memory object control
5433 */
5434 kern_return_t
5435 memory_object_recover_named(
5436 memory_object_control_t control,
5437 boolean_t wait_on_terminating)
5438 {
5439 vm_object_t object;
5440
5441 object = memory_object_control_to_vm_object(control);
5442 if (object == VM_OBJECT_NULL) {
5443 return KERN_INVALID_ARGUMENT;
5444 }
5445 restart:
5446 vm_object_lock(object);
5447
5448 if (object->terminating && wait_on_terminating) {
5449 vm_object_wait(object,
5450 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
5451 THREAD_UNINT);
5452 goto restart;
5453 }
5454
5455 if (!object->alive) {
5456 vm_object_unlock(object);
5457 return KERN_FAILURE;
5458 }
5459
5460 if (object->named == TRUE) {
5461 vm_object_unlock(object);
5462 return KERN_SUCCESS;
5463 }
5464 object->named = TRUE;
5465 vm_object_lock_assert_exclusive(object);
5466 object->ref_count++;
5467 while (!object->pager_ready) {
5468 vm_object_sleep(object,
5469 VM_OBJECT_EVENT_PAGER_READY,
5470 THREAD_UNINT);
5471 }
5472 vm_object_unlock(object);
5473 return KERN_SUCCESS;
5474 }
5475
5476
5477 /*
5478 * vm_object_release_name:
5479 *
5480 * Enforces name semantic on memory_object reference count decrement
5481 * This routine should not be called unless the caller holds a name
5482 * reference gained through the memory_object_create_named.
5483 *
5484 * If the TERMINATE_IDLE flag is set, the call will return if the
5485 * reference count is not 1. i.e. idle with the only remaining reference
5486 * being the name.
5487 * If the decision is made to proceed the name field flag is set to
5488 * false and the reference count is decremented. If the RESPECT_CACHE
5489 * flag is set and the reference count has gone to zero, the
5490 * memory_object is checked to see if it is cacheable otherwise when
5491 * the reference count is zero, it is simply terminated.
5492 */
5493
5494 __private_extern__ kern_return_t
5495 vm_object_release_name(
5496 vm_object_t object,
5497 int flags)
5498 {
5499 vm_object_t shadow;
5500 boolean_t original_object = TRUE;
5501
5502 while (object != VM_OBJECT_NULL) {
5503 vm_object_lock(object);
5504
5505 assert(object->alive);
5506 if (original_object) {
5507 assert(object->named);
5508 }
5509 assert(object->ref_count > 0);
5510
5511 /*
5512 * We have to wait for initialization before
5513 * destroying or caching the object.
5514 */
5515
5516 if (object->pager_created && !object->pager_initialized) {
5517 assert(!object->can_persist);
5518 vm_object_assert_wait(object,
5519 VM_OBJECT_EVENT_INITIALIZED,
5520 THREAD_UNINT);
5521 vm_object_unlock(object);
5522 thread_block(THREAD_CONTINUE_NULL);
5523 continue;
5524 }
5525
5526 if (((object->ref_count > 1)
5527 && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
5528 || (object->terminating)) {
5529 vm_object_unlock(object);
5530 return KERN_FAILURE;
5531 } else {
5532 if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
5533 vm_object_unlock(object);
5534 return KERN_SUCCESS;
5535 }
5536 }
5537
5538 if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
5539 (object->ref_count == 1)) {
5540 if (original_object) {
5541 object->named = FALSE;
5542 }
5543 vm_object_unlock(object);
5544 /* let vm_object_deallocate push this thing into */
5545 /* the cache, if that it is where it is bound */
5546 vm_object_deallocate(object);
5547 return KERN_SUCCESS;
5548 }
5549 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
5550
5551 if (object->ref_count == 1) {
5552 if (vm_object_terminate(object) != KERN_SUCCESS) {
5553 if (original_object) {
5554 return KERN_FAILURE;
5555 } else {
5556 return KERN_SUCCESS;
5557 }
5558 }
5559 if (shadow != VM_OBJECT_NULL) {
5560 original_object = FALSE;
5561 object = shadow;
5562 continue;
5563 }
5564 return KERN_SUCCESS;
5565 } else {
5566 vm_object_lock_assert_exclusive(object);
5567 object->ref_count--;
5568 assert(object->ref_count > 0);
5569 if (original_object) {
5570 object->named = FALSE;
5571 }
5572 vm_object_unlock(object);
5573 return KERN_SUCCESS;
5574 }
5575 }
5576 /*NOTREACHED*/
5577 assert(0);
5578 return KERN_FAILURE;
5579 }
5580
5581
5582 __private_extern__ kern_return_t
5583 vm_object_lock_request(
5584 vm_object_t object,
5585 vm_object_offset_t offset,
5586 vm_object_size_t size,
5587 memory_object_return_t should_return,
5588 int flags,
5589 vm_prot_t prot)
5590 {
5591 __unused boolean_t should_flush;
5592
5593 should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
5594
5595 /*
5596 * Check for bogus arguments.
5597 */
5598 if (object == VM_OBJECT_NULL) {
5599 return KERN_INVALID_ARGUMENT;
5600 }
5601
5602 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) {
5603 return KERN_INVALID_ARGUMENT;
5604 }
5605
5606 /*
5607 * XXX TODO4K
5608 * extend range for conservative operations (copy-on-write, sync, ...)
5609 * truncate range for destructive operations (purge, ...)
5610 */
5611 size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
5612 offset = vm_object_trunc_page(offset);
5613
5614 /*
5615 * Lock the object, and acquire a paging reference to
5616 * prevent the memory_object reference from being released.
5617 */
5618 vm_object_lock(object);
5619 vm_object_paging_begin(object);
5620
5621 (void)vm_object_update(object,
5622 offset, size, NULL, NULL, should_return, flags, prot);
5623
5624 vm_object_paging_end(object);
5625 vm_object_unlock(object);
5626
5627 return KERN_SUCCESS;
5628 }
5629
5630 /*
5631 * Empty a purgeable object by grabbing the physical pages assigned to it and
5632 * putting them on the free queue without writing them to backing store, etc.
5633 * When the pages are next touched they will be demand zero-fill pages. We
5634 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
5635 * skip referenced/dirty pages, pages on the active queue, etc. We're more
5636 * than happy to grab these since this is a purgeable object. We mark the
5637 * object as "empty" after reaping its pages.
5638 *
5639 * On entry the object must be locked and it must be
5640 * purgeable with no delayed copies pending.
5641 */
5642 uint64_t
5643 vm_object_purge(vm_object_t object, int flags)
5644 {
5645 unsigned int object_page_count = 0, pgcount = 0;
5646 uint64_t total_purged_pgcount = 0;
5647 boolean_t skipped_object = FALSE;
5648
5649 vm_object_lock_assert_exclusive(object);
5650
5651 if (object->purgable == VM_PURGABLE_DENY) {
5652 return 0;
5653 }
5654
5655 assert(object->copy == VM_OBJECT_NULL);
5656 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5657
5658 /*
5659 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
5660 * reaping its pages. We update vm_page_purgeable_count in bulk
5661 * and we don't want vm_page_remove() to update it again for each
5662 * page we reap later.
5663 *
5664 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
5665 * are all accounted for in the "volatile" ledgers, so this does not
5666 * make any difference.
5667 * If we transitioned directly from NONVOLATILE to EMPTY,
5668 * vm_page_purgeable_count must have been updated when the object
5669 * was dequeued from its volatile queue and the purgeable ledgers
5670 * must have also been updated accordingly at that time (in
5671 * vm_object_purgable_control()).
5672 */
5673 if (object->purgable == VM_PURGABLE_VOLATILE) {
5674 unsigned int delta;
5675 assert(object->resident_page_count >=
5676 object->wired_page_count);
5677 delta = (object->resident_page_count -
5678 object->wired_page_count);
5679 if (delta != 0) {
5680 assert(vm_page_purgeable_count >=
5681 delta);
5682 OSAddAtomic(-delta,
5683 (SInt32 *)&vm_page_purgeable_count);
5684 }
5685 if (object->wired_page_count != 0) {
5686 assert(vm_page_purgeable_wired_count >=
5687 object->wired_page_count);
5688 OSAddAtomic(-object->wired_page_count,
5689 (SInt32 *)&vm_page_purgeable_wired_count);
5690 }
5691 object->purgable = VM_PURGABLE_EMPTY;
5692 }
5693 assert(object->purgable == VM_PURGABLE_EMPTY);
5694
5695 object_page_count = object->resident_page_count;
5696
5697 vm_object_reap_pages(object, REAP_PURGEABLE);
5698
5699 if (object->resident_page_count >= object_page_count) {
5700 total_purged_pgcount = 0;
5701 } else {
5702 total_purged_pgcount = object_page_count - object->resident_page_count;
5703 }
5704
5705 if (object->pager != NULL) {
5706 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
5707
5708 if (object->activity_in_progress == 0 &&
5709 object->paging_in_progress == 0) {
5710 /*
5711 * Also reap any memory coming from this object
5712 * in the VM compressor.
5713 *
5714 * There are no operations in progress on the VM object
5715 * and no operation can start while we're holding the
5716 * VM object lock, so it's safe to reap the compressed
5717 * pages and update the page counts.
5718 */
5719 pgcount = vm_compressor_pager_get_count(object->pager);
5720 if (pgcount) {
5721 pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
5722 vm_compressor_pager_count(object->pager,
5723 -pgcount,
5724 FALSE, /* shared */
5725 object);
5726 vm_object_owner_compressed_update(object,
5727 -pgcount);
5728 }
5729 if (!(flags & C_DONT_BLOCK)) {
5730 assert(vm_compressor_pager_get_count(object->pager)
5731 == 0);
5732 }
5733 } else {
5734 /*
5735 * There's some kind of paging activity in progress
5736 * for this object, which could result in a page
5737 * being compressed or decompressed, possibly while
5738 * the VM object is not locked, so it could race
5739 * with us.
5740 *
5741 * We can't really synchronize this without possibly
5742 * causing a deadlock when the compressor needs to
5743 * allocate or free memory while compressing or
5744 * decompressing a page from a purgeable object
5745 * mapped in the kernel_map...
5746 *
5747 * So let's not attempt to purge the compressor
5748 * pager if there's any kind of operation in
5749 * progress on the VM object.
5750 */
5751 skipped_object = TRUE;
5752 }
5753 }
5754
5755 vm_object_lock_assert_exclusive(object);
5756
5757 total_purged_pgcount += pgcount;
5758
5759 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)),
5760 VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
5761 object_page_count,
5762 total_purged_pgcount,
5763 skipped_object,
5764 0);
5765
5766 return total_purged_pgcount;
5767 }
5768
5769
5770 /*
5771 * vm_object_purgeable_control() allows the caller to control and investigate the
5772 * state of a purgeable object. A purgeable object is created via a call to
5773 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
5774 * never be coalesced with any other object -- even other purgeable objects --
5775 * and will thus always remain a distinct object. A purgeable object has
5776 * special semantics when its reference count is exactly 1. If its reference
5777 * count is greater than 1, then a purgeable object will behave like a normal
5778 * object and attempts to use this interface will result in an error return
5779 * of KERN_INVALID_ARGUMENT.
5780 *
5781 * A purgeable object may be put into a "volatile" state which will make the
5782 * object's pages elligable for being reclaimed without paging to backing
5783 * store if the system runs low on memory. If the pages in a volatile
5784 * purgeable object are reclaimed, the purgeable object is said to have been
5785 * "emptied." When a purgeable object is emptied the system will reclaim as
5786 * many pages from the object as it can in a convenient manner (pages already
5787 * en route to backing store or busy for other reasons are left as is). When
5788 * a purgeable object is made volatile, its pages will generally be reclaimed
5789 * before other pages in the application's working set. This semantic is
5790 * generally used by applications which can recreate the data in the object
5791 * faster than it can be paged in. One such example might be media assets
5792 * which can be reread from a much faster RAID volume.
5793 *
5794 * A purgeable object may be designated as "non-volatile" which means it will
5795 * behave like all other objects in the system with pages being written to and
5796 * read from backing store as needed to satisfy system memory needs. If the
5797 * object was emptied before the object was made non-volatile, that fact will
5798 * be returned as the old state of the purgeable object (see
5799 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
5800 * were reclaimed as part of emptying the object will be refaulted in as
5801 * zero-fill on demand. It is up to the application to note that an object
5802 * was emptied and recreate the objects contents if necessary. When a
5803 * purgeable object is made non-volatile, its pages will generally not be paged
5804 * out to backing store in the immediate future. A purgeable object may also
5805 * be manually emptied.
5806 *
5807 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
5808 * volatile purgeable object may be queried at any time. This information may
5809 * be used as a control input to let the application know when the system is
5810 * experiencing memory pressure and is reclaiming memory.
5811 *
5812 * The specified address may be any address within the purgeable object. If
5813 * the specified address does not represent any object in the target task's
5814 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
5815 * object containing the specified address is not a purgeable object, then
5816 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
5817 * returned.
5818 *
5819 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
5820 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
5821 * state is used to set the new state of the purgeable object and return its
5822 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
5823 * object is returned in the parameter state.
5824 *
5825 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
5826 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
5827 * the non-volatile, volatile and volatile/empty states described above.
5828 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
5829 * immediately reclaim as many pages in the object as can be conveniently
5830 * collected (some may have already been written to backing store or be
5831 * otherwise busy).
5832 *
5833 * The process of making a purgeable object non-volatile and determining its
5834 * previous state is atomic. Thus, if a purgeable object is made
5835 * VM_PURGABLE_NONVOLATILE and the old state is returned as
5836 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
5837 * completely intact and will remain so until the object is made volatile
5838 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
5839 * was reclaimed while it was in a volatile state and its previous contents
5840 * have been lost.
5841 */
5842 /*
5843 * The object must be locked.
5844 */
5845 kern_return_t
5846 vm_object_purgable_control(
5847 vm_object_t object,
5848 vm_purgable_t control,
5849 int *state)
5850 {
5851 int old_state;
5852 int new_state;
5853
5854 if (object == VM_OBJECT_NULL) {
5855 /*
5856 * Object must already be present or it can't be purgeable.
5857 */
5858 return KERN_INVALID_ARGUMENT;
5859 }
5860
5861 vm_object_lock_assert_exclusive(object);
5862
5863 /*
5864 * Get current state of the purgeable object.
5865 */
5866 old_state = object->purgable;
5867 if (old_state == VM_PURGABLE_DENY) {
5868 return KERN_INVALID_ARGUMENT;
5869 }
5870
5871 /* purgeable cant have delayed copies - now or in the future */
5872 assert(object->copy == VM_OBJECT_NULL);
5873 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5874
5875 /*
5876 * Execute the desired operation.
5877 */
5878 if (control == VM_PURGABLE_GET_STATE) {
5879 *state = old_state;
5880 return KERN_SUCCESS;
5881 }
5882
5883 if (control == VM_PURGABLE_SET_STATE &&
5884 object->purgeable_only_by_kernel) {
5885 return KERN_PROTECTION_FAILURE;
5886 }
5887
5888 if (control != VM_PURGABLE_SET_STATE &&
5889 control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
5890 return KERN_INVALID_ARGUMENT;
5891 }
5892
5893 if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
5894 object->volatile_empty = TRUE;
5895 }
5896 if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
5897 object->volatile_fault = TRUE;
5898 }
5899
5900 new_state = *state & VM_PURGABLE_STATE_MASK;
5901 if (new_state == VM_PURGABLE_VOLATILE) {
5902 if (old_state == VM_PURGABLE_EMPTY) {
5903 /* what's been emptied must stay empty */
5904 new_state = VM_PURGABLE_EMPTY;
5905 }
5906 if (object->volatile_empty) {
5907 /* debugging mode: go straight to empty */
5908 new_state = VM_PURGABLE_EMPTY;
5909 }
5910 }
5911
5912 switch (new_state) {
5913 case VM_PURGABLE_DENY:
5914 /*
5915 * Attempting to convert purgeable memory to non-purgeable:
5916 * not allowed.
5917 */
5918 return KERN_INVALID_ARGUMENT;
5919 case VM_PURGABLE_NONVOLATILE:
5920 object->purgable = new_state;
5921
5922 if (old_state == VM_PURGABLE_VOLATILE) {
5923 unsigned int delta;
5924
5925 assert(object->resident_page_count >=
5926 object->wired_page_count);
5927 delta = (object->resident_page_count -
5928 object->wired_page_count);
5929
5930 assert(vm_page_purgeable_count >= delta);
5931
5932 if (delta != 0) {
5933 OSAddAtomic(-delta,
5934 (SInt32 *)&vm_page_purgeable_count);
5935 }
5936 if (object->wired_page_count != 0) {
5937 assert(vm_page_purgeable_wired_count >=
5938 object->wired_page_count);
5939 OSAddAtomic(-object->wired_page_count,
5940 (SInt32 *)&vm_page_purgeable_wired_count);
5941 }
5942
5943 vm_page_lock_queues();
5944
5945 /* object should be on a queue */
5946 assert(object->objq.next != NULL &&
5947 object->objq.prev != NULL);
5948 purgeable_q_t queue;
5949
5950 /*
5951 * Move object from its volatile queue to the
5952 * non-volatile queue...
5953 */
5954 queue = vm_purgeable_object_remove(object);
5955 assert(queue);
5956
5957 if (object->purgeable_when_ripe) {
5958 vm_purgeable_token_delete_last(queue);
5959 }
5960 assert(queue->debug_count_objects >= 0);
5961
5962 vm_page_unlock_queues();
5963 }
5964 if (old_state == VM_PURGABLE_VOLATILE ||
5965 old_state == VM_PURGABLE_EMPTY) {
5966 /*
5967 * Transfer the object's pages from the volatile to
5968 * non-volatile ledgers.
5969 */
5970 vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE);
5971 }
5972
5973 break;
5974
5975 case VM_PURGABLE_VOLATILE:
5976 if (object->volatile_fault) {
5977 vm_page_t p;
5978 int refmod;
5979
5980 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
5981 if (p->vmp_busy ||
5982 VM_PAGE_WIRED(p) ||
5983 p->vmp_fictitious) {
5984 continue;
5985 }
5986 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5987 if ((refmod & VM_MEM_MODIFIED) &&
5988 !p->vmp_dirty) {
5989 SET_PAGE_DIRTY(p, FALSE);
5990 }
5991 }
5992 }
5993
5994 assert(old_state != VM_PURGABLE_EMPTY);
5995
5996 purgeable_q_t queue;
5997
5998 /* find the correct queue */
5999 if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) {
6000 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
6001 } else {
6002 if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) {
6003 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
6004 } else {
6005 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
6006 }
6007 }
6008
6009 if (old_state == VM_PURGABLE_NONVOLATILE ||
6010 old_state == VM_PURGABLE_EMPTY) {
6011 unsigned int delta;
6012
6013 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6014 VM_PURGABLE_NO_AGING) {
6015 object->purgeable_when_ripe = FALSE;
6016 } else {
6017 object->purgeable_when_ripe = TRUE;
6018 }
6019
6020 if (object->purgeable_when_ripe) {
6021 kern_return_t result;
6022
6023 /* try to add token... this can fail */
6024 vm_page_lock_queues();
6025
6026 result = vm_purgeable_token_add(queue);
6027 if (result != KERN_SUCCESS) {
6028 vm_page_unlock_queues();
6029 return result;
6030 }
6031 vm_page_unlock_queues();
6032 }
6033
6034 assert(object->resident_page_count >=
6035 object->wired_page_count);
6036 delta = (object->resident_page_count -
6037 object->wired_page_count);
6038
6039 if (delta != 0) {
6040 OSAddAtomic(delta,
6041 &vm_page_purgeable_count);
6042 }
6043 if (object->wired_page_count != 0) {
6044 OSAddAtomic(object->wired_page_count,
6045 &vm_page_purgeable_wired_count);
6046 }
6047
6048 object->purgable = new_state;
6049
6050 /* object should be on "non-volatile" queue */
6051 assert(object->objq.next != NULL);
6052 assert(object->objq.prev != NULL);
6053 } else if (old_state == VM_PURGABLE_VOLATILE) {
6054 purgeable_q_t old_queue;
6055 boolean_t purgeable_when_ripe;
6056
6057 /*
6058 * if reassigning priorities / purgeable groups, we don't change the
6059 * token queue. So moving priorities will not make pages stay around longer.
6060 * Reasoning is that the algorithm gives most priority to the most important
6061 * object. If a new token is added, the most important object' priority is boosted.
6062 * This biases the system already for purgeable queues that move a lot.
6063 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
6064 */
6065 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
6066
6067 old_queue = vm_purgeable_object_remove(object);
6068 assert(old_queue);
6069
6070 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6071 VM_PURGABLE_NO_AGING) {
6072 purgeable_when_ripe = FALSE;
6073 } else {
6074 purgeable_when_ripe = TRUE;
6075 }
6076
6077 if (old_queue != queue ||
6078 (purgeable_when_ripe !=
6079 object->purgeable_when_ripe)) {
6080 kern_return_t result;
6081
6082 /* Changing queue. Have to move token. */
6083 vm_page_lock_queues();
6084 if (object->purgeable_when_ripe) {
6085 vm_purgeable_token_delete_last(old_queue);
6086 }
6087 object->purgeable_when_ripe = purgeable_when_ripe;
6088 if (object->purgeable_when_ripe) {
6089 result = vm_purgeable_token_add(queue);
6090 assert(result == KERN_SUCCESS); /* this should never fail since we just freed a token */
6091 }
6092 vm_page_unlock_queues();
6093 }
6094 }
6095 ;
6096 vm_purgeable_object_add(object, queue, (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT );
6097 if (old_state == VM_PURGABLE_NONVOLATILE) {
6098 vm_purgeable_accounting(object,
6099 VM_PURGABLE_NONVOLATILE);
6100 }
6101
6102 assert(queue->debug_count_objects >= 0);
6103
6104 break;
6105
6106
6107 case VM_PURGABLE_EMPTY:
6108 if (object->volatile_fault) {
6109 vm_page_t p;
6110 int refmod;
6111
6112 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
6113 if (p->vmp_busy ||
6114 VM_PAGE_WIRED(p) ||
6115 p->vmp_fictitious) {
6116 continue;
6117 }
6118 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
6119 if ((refmod & VM_MEM_MODIFIED) &&
6120 !p->vmp_dirty) {
6121 SET_PAGE_DIRTY(p, FALSE);
6122 }
6123 }
6124 }
6125
6126 if (old_state == VM_PURGABLE_VOLATILE) {
6127 purgeable_q_t old_queue;
6128
6129 /* object should be on a queue */
6130 assert(object->objq.next != NULL &&
6131 object->objq.prev != NULL);
6132
6133 old_queue = vm_purgeable_object_remove(object);
6134 assert(old_queue);
6135 if (object->purgeable_when_ripe) {
6136 vm_page_lock_queues();
6137 vm_purgeable_token_delete_first(old_queue);
6138 vm_page_unlock_queues();
6139 }
6140 }
6141
6142 if (old_state == VM_PURGABLE_NONVOLATILE) {
6143 /*
6144 * This object's pages were previously accounted as
6145 * "non-volatile" and now need to be accounted as
6146 * "volatile".
6147 */
6148 vm_purgeable_accounting(object,
6149 VM_PURGABLE_NONVOLATILE);
6150 /*
6151 * Set to VM_PURGABLE_EMPTY because the pages are no
6152 * longer accounted in the "non-volatile" ledger
6153 * and are also not accounted for in
6154 * "vm_page_purgeable_count".
6155 */
6156 object->purgable = VM_PURGABLE_EMPTY;
6157 }
6158
6159 (void) vm_object_purge(object, 0);
6160 assert(object->purgable == VM_PURGABLE_EMPTY);
6161
6162 break;
6163 }
6164
6165 *state = old_state;
6166
6167 vm_object_lock_assert_exclusive(object);
6168
6169 return KERN_SUCCESS;
6170 }
6171
6172 kern_return_t
6173 vm_object_get_page_counts(
6174 vm_object_t object,
6175 vm_object_offset_t offset,
6176 vm_object_size_t size,
6177 unsigned int *resident_page_count,
6178 unsigned int *dirty_page_count)
6179 {
6180 kern_return_t kr = KERN_SUCCESS;
6181 boolean_t count_dirty_pages = FALSE;
6182 vm_page_t p = VM_PAGE_NULL;
6183 unsigned int local_resident_count = 0;
6184 unsigned int local_dirty_count = 0;
6185 vm_object_offset_t cur_offset = 0;
6186 vm_object_offset_t end_offset = 0;
6187
6188 if (object == VM_OBJECT_NULL) {
6189 return KERN_INVALID_ARGUMENT;
6190 }
6191
6192
6193 cur_offset = offset;
6194
6195 end_offset = offset + size;
6196
6197 vm_object_lock_assert_exclusive(object);
6198
6199 if (dirty_page_count != NULL) {
6200 count_dirty_pages = TRUE;
6201 }
6202
6203 if (resident_page_count != NULL && count_dirty_pages == FALSE) {
6204 /*
6205 * Fast path when:
6206 * - we only want the resident page count, and,
6207 * - the entire object is exactly covered by the request.
6208 */
6209 if (offset == 0 && (object->vo_size == size)) {
6210 *resident_page_count = object->resident_page_count;
6211 goto out;
6212 }
6213 }
6214
6215 if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
6216 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
6217 if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) {
6218 local_resident_count++;
6219
6220 if (count_dirty_pages) {
6221 if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6222 local_dirty_count++;
6223 }
6224 }
6225 }
6226 }
6227 } else {
6228 for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
6229 p = vm_page_lookup(object, cur_offset);
6230
6231 if (p != VM_PAGE_NULL) {
6232 local_resident_count++;
6233
6234 if (count_dirty_pages) {
6235 if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6236 local_dirty_count++;
6237 }
6238 }
6239 }
6240 }
6241 }
6242
6243 if (resident_page_count != NULL) {
6244 *resident_page_count = local_resident_count;
6245 }
6246
6247 if (dirty_page_count != NULL) {
6248 *dirty_page_count = local_dirty_count;
6249 }
6250
6251 out:
6252 return kr;
6253 }
6254
6255
6256 /*
6257 * vm_object_reference:
6258 *
6259 * Gets another reference to the given object.
6260 */
6261 #ifdef vm_object_reference
6262 #undef vm_object_reference
6263 #endif
6264 __private_extern__ void
6265 vm_object_reference(
6266 vm_object_t object)
6267 {
6268 if (object == VM_OBJECT_NULL) {
6269 return;
6270 }
6271
6272 vm_object_lock(object);
6273 assert(object->ref_count > 0);
6274 vm_object_reference_locked(object);
6275 vm_object_unlock(object);
6276 }
6277
6278 /*
6279 * vm_object_transpose
6280 *
6281 * This routine takes two VM objects of the same size and exchanges
6282 * their backing store.
6283 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6284 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6285 *
6286 * The VM objects must not be locked by caller.
6287 */
6288 unsigned int vm_object_transpose_count = 0;
6289 kern_return_t
6290 vm_object_transpose(
6291 vm_object_t object1,
6292 vm_object_t object2,
6293 vm_object_size_t transpose_size)
6294 {
6295 vm_object_t tmp_object;
6296 kern_return_t retval;
6297 boolean_t object1_locked, object2_locked;
6298 vm_page_t page;
6299 vm_object_offset_t page_offset;
6300
6301 tmp_object = VM_OBJECT_NULL;
6302 object1_locked = FALSE; object2_locked = FALSE;
6303
6304 if (object1 == object2 ||
6305 object1 == VM_OBJECT_NULL ||
6306 object2 == VM_OBJECT_NULL) {
6307 /*
6308 * If the 2 VM objects are the same, there's
6309 * no point in exchanging their backing store.
6310 */
6311 retval = KERN_INVALID_VALUE;
6312 goto done;
6313 }
6314
6315 /*
6316 * Since we need to lock both objects at the same time,
6317 * make sure we always lock them in the same order to
6318 * avoid deadlocks.
6319 */
6320 if (object1 > object2) {
6321 tmp_object = object1;
6322 object1 = object2;
6323 object2 = tmp_object;
6324 }
6325
6326 /*
6327 * Allocate a temporary VM object to hold object1's contents
6328 * while we copy object2 to object1.
6329 */
6330 tmp_object = vm_object_allocate(transpose_size);
6331 vm_object_lock(tmp_object);
6332 tmp_object->can_persist = FALSE;
6333
6334
6335 /*
6336 * Grab control of the 1st VM object.
6337 */
6338 vm_object_lock(object1);
6339 object1_locked = TRUE;
6340 if (!object1->alive || object1->terminating ||
6341 object1->copy || object1->shadow || object1->shadowed ||
6342 object1->purgable != VM_PURGABLE_DENY) {
6343 /*
6344 * We don't deal with copy or shadow objects (yet).
6345 */
6346 retval = KERN_INVALID_VALUE;
6347 goto done;
6348 }
6349 /*
6350 * We're about to mess with the object's backing store and
6351 * taking a "paging_in_progress" reference wouldn't be enough
6352 * to prevent any paging activity on this object, so the caller should
6353 * have "quiesced" the objects beforehand, via a UPL operation with
6354 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6355 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6356 *
6357 * Wait for any paging operation to complete (but only paging, not
6358 * other kind of activities not linked to the pager). After we're
6359 * statisfied that there's no more paging in progress, we keep the
6360 * object locked, to guarantee that no one tries to access its pager.
6361 */
6362 vm_object_paging_only_wait(object1, THREAD_UNINT);
6363
6364 /*
6365 * Same as above for the 2nd object...
6366 */
6367 vm_object_lock(object2);
6368 object2_locked = TRUE;
6369 if (!object2->alive || object2->terminating ||
6370 object2->copy || object2->shadow || object2->shadowed ||
6371 object2->purgable != VM_PURGABLE_DENY) {
6372 retval = KERN_INVALID_VALUE;
6373 goto done;
6374 }
6375 vm_object_paging_only_wait(object2, THREAD_UNINT);
6376
6377
6378 if (object1->vo_size != object2->vo_size ||
6379 object1->vo_size != transpose_size) {
6380 /*
6381 * If the 2 objects don't have the same size, we can't
6382 * exchange their backing stores or one would overflow.
6383 * If their size doesn't match the caller's
6384 * "transpose_size", we can't do it either because the
6385 * transpose operation will affect the entire span of
6386 * the objects.
6387 */
6388 retval = KERN_INVALID_VALUE;
6389 goto done;
6390 }
6391
6392
6393 /*
6394 * Transpose the lists of resident pages.
6395 * This also updates the resident_page_count and the memq_hint.
6396 */
6397 if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) {
6398 /*
6399 * No pages in object1, just transfer pages
6400 * from object2 to object1. No need to go through
6401 * an intermediate object.
6402 */
6403 while (!vm_page_queue_empty(&object2->memq)) {
6404 page = (vm_page_t) vm_page_queue_first(&object2->memq);
6405 vm_page_rename(page, object1, page->vmp_offset);
6406 }
6407 assert(vm_page_queue_empty(&object2->memq));
6408 } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
6409 /*
6410 * No pages in object2, just transfer pages
6411 * from object1 to object2. No need to go through
6412 * an intermediate object.
6413 */
6414 while (!vm_page_queue_empty(&object1->memq)) {
6415 page = (vm_page_t) vm_page_queue_first(&object1->memq);
6416 vm_page_rename(page, object2, page->vmp_offset);
6417 }
6418 assert(vm_page_queue_empty(&object1->memq));
6419 } else {
6420 /* transfer object1's pages to tmp_object */
6421 while (!vm_page_queue_empty(&object1->memq)) {
6422 page = (vm_page_t) vm_page_queue_first(&object1->memq);
6423 page_offset = page->vmp_offset;
6424 vm_page_remove(page, TRUE);
6425 page->vmp_offset = page_offset;
6426 vm_page_queue_enter(&tmp_object->memq, page, vmp_listq);
6427 }
6428 assert(vm_page_queue_empty(&object1->memq));
6429 /* transfer object2's pages to object1 */
6430 while (!vm_page_queue_empty(&object2->memq)) {
6431 page = (vm_page_t) vm_page_queue_first(&object2->memq);
6432 vm_page_rename(page, object1, page->vmp_offset);
6433 }
6434 assert(vm_page_queue_empty(&object2->memq));
6435 /* transfer tmp_object's pages to object2 */
6436 while (!vm_page_queue_empty(&tmp_object->memq)) {
6437 page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
6438 vm_page_queue_remove(&tmp_object->memq, page, vmp_listq);
6439 vm_page_insert(page, object2, page->vmp_offset);
6440 }
6441 assert(vm_page_queue_empty(&tmp_object->memq));
6442 }
6443
6444 #define __TRANSPOSE_FIELD(field) \
6445 MACRO_BEGIN \
6446 tmp_object->field = object1->field; \
6447 object1->field = object2->field; \
6448 object2->field = tmp_object->field; \
6449 MACRO_END
6450
6451 /* "Lock" refers to the object not its contents */
6452 /* "size" should be identical */
6453 assert(object1->vo_size == object2->vo_size);
6454 /* "memq_hint" was updated above when transposing pages */
6455 /* "ref_count" refers to the object not its contents */
6456 assert(object1->ref_count >= 1);
6457 assert(object2->ref_count >= 1);
6458 /* "resident_page_count" was updated above when transposing pages */
6459 /* "wired_page_count" was updated above when transposing pages */
6460 #if !VM_TAG_ACTIVE_UPDATE
6461 /* "wired_objq" was dealt with along with "wired_page_count" */
6462 #endif /* ! VM_TAG_ACTIVE_UPDATE */
6463 /* "reusable_page_count" was updated above when transposing pages */
6464 /* there should be no "copy" */
6465 assert(!object1->copy);
6466 assert(!object2->copy);
6467 /* there should be no "shadow" */
6468 assert(!object1->shadow);
6469 assert(!object2->shadow);
6470 __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
6471 __TRANSPOSE_FIELD(pager);
6472 __TRANSPOSE_FIELD(paging_offset);
6473 __TRANSPOSE_FIELD(pager_control);
6474 /* update the memory_objects' pointers back to the VM objects */
6475 if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6476 memory_object_control_collapse(&object1->pager_control,
6477 object1);
6478 }
6479 if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6480 memory_object_control_collapse(&object2->pager_control,
6481 object2);
6482 }
6483 __TRANSPOSE_FIELD(copy_strategy);
6484 /* "paging_in_progress" refers to the object not its contents */
6485 assert(!object1->paging_in_progress);
6486 assert(!object2->paging_in_progress);
6487 assert(object1->activity_in_progress);
6488 assert(object2->activity_in_progress);
6489 /* "all_wanted" refers to the object not its contents */
6490 __TRANSPOSE_FIELD(pager_created);
6491 __TRANSPOSE_FIELD(pager_initialized);
6492 __TRANSPOSE_FIELD(pager_ready);
6493 __TRANSPOSE_FIELD(pager_trusted);
6494 __TRANSPOSE_FIELD(can_persist);
6495 __TRANSPOSE_FIELD(internal);
6496 __TRANSPOSE_FIELD(private);
6497 __TRANSPOSE_FIELD(pageout);
6498 /* "alive" should be set */
6499 assert(object1->alive);
6500 assert(object2->alive);
6501 /* "purgeable" should be non-purgeable */
6502 assert(object1->purgable == VM_PURGABLE_DENY);
6503 assert(object2->purgable == VM_PURGABLE_DENY);
6504 /* "shadowed" refers to the the object not its contents */
6505 __TRANSPOSE_FIELD(purgeable_when_ripe);
6506 __TRANSPOSE_FIELD(true_share);
6507 /* "terminating" should not be set */
6508 assert(!object1->terminating);
6509 assert(!object2->terminating);
6510 /* transfer "named" reference if needed */
6511 if (object1->named && !object2->named) {
6512 assert(object1->ref_count >= 2);
6513 assert(object2->ref_count >= 1);
6514 object1->ref_count--;
6515 object2->ref_count++;
6516 } else if (!object1->named && object2->named) {
6517 assert(object1->ref_count >= 1);
6518 assert(object2->ref_count >= 2);
6519 object1->ref_count++;
6520 object2->ref_count--;
6521 }
6522 __TRANSPOSE_FIELD(named);
6523 /* "shadow_severed" refers to the object not its contents */
6524 __TRANSPOSE_FIELD(phys_contiguous);
6525 __TRANSPOSE_FIELD(nophyscache);
6526 /* "cached_list.next" points to transposed object */
6527 object1->cached_list.next = (queue_entry_t) object2;
6528 object2->cached_list.next = (queue_entry_t) object1;
6529 /* "cached_list.prev" should be NULL */
6530 assert(object1->cached_list.prev == NULL);
6531 assert(object2->cached_list.prev == NULL);
6532 __TRANSPOSE_FIELD(last_alloc);
6533 __TRANSPOSE_FIELD(sequential);
6534 __TRANSPOSE_FIELD(pages_created);
6535 __TRANSPOSE_FIELD(pages_used);
6536 __TRANSPOSE_FIELD(scan_collisions);
6537 __TRANSPOSE_FIELD(cow_hint);
6538 __TRANSPOSE_FIELD(wimg_bits);
6539 __TRANSPOSE_FIELD(set_cache_attr);
6540 __TRANSPOSE_FIELD(code_signed);
6541 object1->transposed = TRUE;
6542 object2->transposed = TRUE;
6543 __TRANSPOSE_FIELD(mapping_in_progress);
6544 __TRANSPOSE_FIELD(volatile_empty);
6545 __TRANSPOSE_FIELD(volatile_fault);
6546 __TRANSPOSE_FIELD(all_reusable);
6547 assert(object1->blocked_access);
6548 assert(object2->blocked_access);
6549 __TRANSPOSE_FIELD(set_cache_attr);
6550 assert(!object1->object_is_shared_cache);
6551 assert(!object2->object_is_shared_cache);
6552 /* ignore purgeable_queue_type and purgeable_queue_group */
6553 assert(!object1->io_tracking);
6554 assert(!object2->io_tracking);
6555 #if VM_OBJECT_ACCESS_TRACKING
6556 assert(!object1->access_tracking);
6557 assert(!object2->access_tracking);
6558 #endif /* VM_OBJECT_ACCESS_TRACKING */
6559 __TRANSPOSE_FIELD(no_tag_update);
6560 #if CONFIG_SECLUDED_MEMORY
6561 assert(!object1->eligible_for_secluded);
6562 assert(!object2->eligible_for_secluded);
6563 assert(!object1->can_grab_secluded);
6564 assert(!object2->can_grab_secluded);
6565 #else /* CONFIG_SECLUDED_MEMORY */
6566 assert(object1->__object3_unused_bits == 0);
6567 assert(object2->__object3_unused_bits == 0);
6568 #endif /* CONFIG_SECLUDED_MEMORY */
6569 #if UPL_DEBUG
6570 /* "uplq" refers to the object not its contents (see upl_transpose()) */
6571 #endif
6572 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL));
6573 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL));
6574 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL));
6575 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL));
6576
6577 #undef __TRANSPOSE_FIELD
6578
6579 retval = KERN_SUCCESS;
6580
6581 done:
6582 /*
6583 * Cleanup.
6584 */
6585 if (tmp_object != VM_OBJECT_NULL) {
6586 vm_object_unlock(tmp_object);
6587 /*
6588 * Re-initialize the temporary object to avoid
6589 * deallocating a real pager.
6590 */
6591 _vm_object_allocate(transpose_size, tmp_object);
6592 vm_object_deallocate(tmp_object);
6593 tmp_object = VM_OBJECT_NULL;
6594 }
6595
6596 if (object1_locked) {
6597 vm_object_unlock(object1);
6598 object1_locked = FALSE;
6599 }
6600 if (object2_locked) {
6601 vm_object_unlock(object2);
6602 object2_locked = FALSE;
6603 }
6604
6605 vm_object_transpose_count++;
6606
6607 return retval;
6608 }
6609
6610
6611 /*
6612 * vm_object_cluster_size
6613 *
6614 * Determine how big a cluster we should issue an I/O for...
6615 *
6616 * Inputs: *start == offset of page needed
6617 * *length == maximum cluster pager can handle
6618 * Outputs: *start == beginning offset of cluster
6619 * *length == length of cluster to try
6620 *
6621 * The original *start will be encompassed by the cluster
6622 *
6623 */
6624 extern int speculative_reads_disabled;
6625
6626 /*
6627 * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
6628 * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
6629 * always be page-aligned. The derivation could involve operations (e.g. division)
6630 * that could give us non-page-size aligned values if we start out with values that
6631 * are odd multiples of PAGE_SIZE.
6632 */
6633 #if !XNU_TARGET_OS_OSX
6634 unsigned int preheat_max_bytes = (1024 * 512);
6635 #else /* !XNU_TARGET_OS_OSX */
6636 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
6637 #endif /* !XNU_TARGET_OS_OSX */
6638 unsigned int preheat_min_bytes = (1024 * 32);
6639
6640
6641 __private_extern__ void
6642 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
6643 vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
6644 {
6645 vm_size_t pre_heat_size;
6646 vm_size_t tail_size;
6647 vm_size_t head_size;
6648 vm_size_t max_length;
6649 vm_size_t cluster_size;
6650 vm_object_offset_t object_size;
6651 vm_object_offset_t orig_start;
6652 vm_object_offset_t target_start;
6653 vm_object_offset_t offset;
6654 vm_behavior_t behavior;
6655 boolean_t look_behind = TRUE;
6656 boolean_t look_ahead = TRUE;
6657 boolean_t isSSD = FALSE;
6658 uint32_t throttle_limit;
6659 int sequential_run;
6660 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6661 vm_size_t max_ph_size;
6662 vm_size_t min_ph_size;
6663
6664 assert( !(*length & PAGE_MASK));
6665 assert( !(*start & PAGE_MASK_64));
6666
6667 /*
6668 * remember maxiumum length of run requested
6669 */
6670 max_length = *length;
6671 /*
6672 * we'll always return a cluster size of at least
6673 * 1 page, since the original fault must always
6674 * be processed
6675 */
6676 *length = PAGE_SIZE;
6677 *io_streaming = 0;
6678
6679 if (speculative_reads_disabled || fault_info == NULL) {
6680 /*
6681 * no cluster... just fault the page in
6682 */
6683 return;
6684 }
6685 orig_start = *start;
6686 target_start = orig_start;
6687 cluster_size = round_page(fault_info->cluster_size);
6688 behavior = fault_info->behavior;
6689
6690 vm_object_lock(object);
6691
6692 if (object->pager == MEMORY_OBJECT_NULL) {
6693 goto out; /* pager is gone for this object, nothing more to do */
6694 }
6695 vnode_pager_get_isSSD(object->pager, &isSSD);
6696
6697 min_ph_size = round_page(preheat_min_bytes);
6698 max_ph_size = round_page(preheat_max_bytes);
6699
6700 #if XNU_TARGET_OS_OSX
6701 if (isSSD) {
6702 min_ph_size /= 2;
6703 max_ph_size /= 8;
6704
6705 if (min_ph_size & PAGE_MASK_64) {
6706 min_ph_size = trunc_page(min_ph_size);
6707 }
6708
6709 if (max_ph_size & PAGE_MASK_64) {
6710 max_ph_size = trunc_page(max_ph_size);
6711 }
6712 }
6713 #endif /* XNU_TARGET_OS_OSX */
6714
6715 if (min_ph_size < PAGE_SIZE) {
6716 min_ph_size = PAGE_SIZE;
6717 }
6718
6719 if (max_ph_size < PAGE_SIZE) {
6720 max_ph_size = PAGE_SIZE;
6721 } else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) {
6722 max_ph_size = MAX_UPL_TRANSFER_BYTES;
6723 }
6724
6725 if (max_length > max_ph_size) {
6726 max_length = max_ph_size;
6727 }
6728
6729 if (max_length <= PAGE_SIZE) {
6730 goto out;
6731 }
6732
6733 if (object->internal) {
6734 object_size = object->vo_size;
6735 } else {
6736 vnode_pager_get_object_size(object->pager, &object_size);
6737 }
6738
6739 object_size = round_page_64(object_size);
6740
6741 if (orig_start >= object_size) {
6742 /*
6743 * fault occurred beyond the EOF...
6744 * we need to punt w/o changing the
6745 * starting offset
6746 */
6747 goto out;
6748 }
6749 if (object->pages_used > object->pages_created) {
6750 /*
6751 * must have wrapped our 32 bit counters
6752 * so reset
6753 */
6754 object->pages_used = object->pages_created = 0;
6755 }
6756 if ((sequential_run = object->sequential)) {
6757 if (sequential_run < 0) {
6758 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
6759 sequential_run = 0 - sequential_run;
6760 } else {
6761 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6762 }
6763 }
6764 switch (behavior) {
6765 default:
6766 behavior = VM_BEHAVIOR_DEFAULT;
6767 OS_FALLTHROUGH;
6768
6769 case VM_BEHAVIOR_DEFAULT:
6770 if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) {
6771 goto out;
6772 }
6773
6774 if (sequential_run >= (3 * PAGE_SIZE)) {
6775 pre_heat_size = sequential_run + PAGE_SIZE;
6776
6777 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
6778 look_behind = FALSE;
6779 } else {
6780 look_ahead = FALSE;
6781 }
6782
6783 *io_streaming = 1;
6784 } else {
6785 if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
6786 /*
6787 * prime the pump
6788 */
6789 pre_heat_size = min_ph_size;
6790 } else {
6791 /*
6792 * Linear growth in PH size: The maximum size is max_length...
6793 * this cacluation will result in a size that is neither a
6794 * power of 2 nor a multiple of PAGE_SIZE... so round
6795 * it up to the nearest PAGE_SIZE boundary
6796 */
6797 pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
6798
6799 if (pre_heat_size < min_ph_size) {
6800 pre_heat_size = min_ph_size;
6801 } else {
6802 pre_heat_size = round_page(pre_heat_size);
6803 }
6804 }
6805 }
6806 break;
6807
6808 case VM_BEHAVIOR_RANDOM:
6809 if ((pre_heat_size = cluster_size) <= PAGE_SIZE) {
6810 goto out;
6811 }
6812 break;
6813
6814 case VM_BEHAVIOR_SEQUENTIAL:
6815 if ((pre_heat_size = cluster_size) == 0) {
6816 pre_heat_size = sequential_run + PAGE_SIZE;
6817 }
6818 look_behind = FALSE;
6819 *io_streaming = 1;
6820
6821 break;
6822
6823 case VM_BEHAVIOR_RSEQNTL:
6824 if ((pre_heat_size = cluster_size) == 0) {
6825 pre_heat_size = sequential_run + PAGE_SIZE;
6826 }
6827 look_ahead = FALSE;
6828 *io_streaming = 1;
6829
6830 break;
6831 }
6832 throttle_limit = (uint32_t) max_length;
6833 assert(throttle_limit == max_length);
6834
6835 if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
6836 if (max_length > throttle_limit) {
6837 max_length = throttle_limit;
6838 }
6839 }
6840 if (pre_heat_size > max_length) {
6841 pre_heat_size = max_length;
6842 }
6843
6844 if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
6845 unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
6846
6847 if (consider_free < vm_page_throttle_limit) {
6848 pre_heat_size = trunc_page(pre_heat_size / 16);
6849 } else if (consider_free < vm_page_free_target) {
6850 pre_heat_size = trunc_page(pre_heat_size / 4);
6851 }
6852
6853 if (pre_heat_size < min_ph_size) {
6854 pre_heat_size = min_ph_size;
6855 }
6856 }
6857 if (look_ahead == TRUE) {
6858 if (look_behind == TRUE) {
6859 /*
6860 * if we get here its due to a random access...
6861 * so we want to center the original fault address
6862 * within the cluster we will issue... make sure
6863 * to calculate 'head_size' as a multiple of PAGE_SIZE...
6864 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
6865 * necessarily an even number of pages so we need to truncate
6866 * the result to a PAGE_SIZE boundary
6867 */
6868 head_size = trunc_page(pre_heat_size / 2);
6869
6870 if (target_start > head_size) {
6871 target_start -= head_size;
6872 } else {
6873 target_start = 0;
6874 }
6875
6876 /*
6877 * 'target_start' at this point represents the beginning offset
6878 * of the cluster we are considering... 'orig_start' will be in
6879 * the center of this cluster if we didn't have to clip the start
6880 * due to running into the start of the file
6881 */
6882 }
6883 if ((target_start + pre_heat_size) > object_size) {
6884 pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
6885 }
6886 /*
6887 * at this point caclulate the number of pages beyond the original fault
6888 * address that we want to consider... this is guaranteed not to extend beyond
6889 * the current EOF...
6890 */
6891 assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
6892 tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
6893 } else {
6894 if (pre_heat_size > target_start) {
6895 /*
6896 * since pre_heat_size is always smaller then 2^32,
6897 * if it is larger then target_start (a 64 bit value)
6898 * it is safe to clip target_start to 32 bits
6899 */
6900 pre_heat_size = (vm_size_t) target_start;
6901 }
6902 tail_size = 0;
6903 }
6904 assert( !(target_start & PAGE_MASK_64));
6905 assert( !(pre_heat_size & PAGE_MASK_64));
6906
6907 if (pre_heat_size <= PAGE_SIZE) {
6908 goto out;
6909 }
6910
6911 if (look_behind == TRUE) {
6912 /*
6913 * take a look at the pages before the original
6914 * faulting offset... recalculate this in case
6915 * we had to clip 'pre_heat_size' above to keep
6916 * from running past the EOF.
6917 */
6918 head_size = pre_heat_size - tail_size - PAGE_SIZE;
6919
6920 for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
6921 /*
6922 * don't poke below the lowest offset
6923 */
6924 if (offset < fault_info->lo_offset) {
6925 break;
6926 }
6927 /*
6928 * for external objects or internal objects w/o a pager,
6929 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6930 */
6931 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
6932 break;
6933 }
6934 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6935 /*
6936 * don't bridge resident pages
6937 */
6938 break;
6939 }
6940 *start = offset;
6941 *length += PAGE_SIZE;
6942 }
6943 }
6944 if (look_ahead == TRUE) {
6945 for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
6946 /*
6947 * don't poke above the highest offset
6948 */
6949 if (offset >= fault_info->hi_offset) {
6950 break;
6951 }
6952 assert(offset < object_size);
6953
6954 /*
6955 * for external objects or internal objects w/o a pager,
6956 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6957 */
6958 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
6959 break;
6960 }
6961 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6962 /*
6963 * don't bridge resident pages
6964 */
6965 break;
6966 }
6967 *length += PAGE_SIZE;
6968 }
6969 }
6970 out:
6971 if (*length > max_length) {
6972 *length = max_length;
6973 }
6974
6975 vm_object_unlock(object);
6976
6977 DTRACE_VM1(clustersize, vm_size_t, *length);
6978 }
6979
6980
6981 /*
6982 * Allow manipulation of individual page state. This is actually part of
6983 * the UPL regimen but takes place on the VM object rather than on a UPL
6984 */
6985
6986 kern_return_t
6987 vm_object_page_op(
6988 vm_object_t object,
6989 vm_object_offset_t offset,
6990 int ops,
6991 ppnum_t *phys_entry,
6992 int *flags)
6993 {
6994 vm_page_t dst_page;
6995
6996 vm_object_lock(object);
6997
6998 if (ops & UPL_POP_PHYSICAL) {
6999 if (object->phys_contiguous) {
7000 if (phys_entry) {
7001 *phys_entry = (ppnum_t)
7002 (object->vo_shadow_offset >> PAGE_SHIFT);
7003 }
7004 vm_object_unlock(object);
7005 return KERN_SUCCESS;
7006 } else {
7007 vm_object_unlock(object);
7008 return KERN_INVALID_OBJECT;
7009 }
7010 }
7011 if (object->phys_contiguous) {
7012 vm_object_unlock(object);
7013 return KERN_INVALID_OBJECT;
7014 }
7015
7016 while (TRUE) {
7017 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
7018 vm_object_unlock(object);
7019 return KERN_FAILURE;
7020 }
7021
7022 /* Sync up on getting the busy bit */
7023 if ((dst_page->vmp_busy || dst_page->vmp_cleaning) &&
7024 (((ops & UPL_POP_SET) &&
7025 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
7026 /* someone else is playing with the page, we will */
7027 /* have to wait */
7028 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
7029 continue;
7030 }
7031
7032 if (ops & UPL_POP_DUMP) {
7033 if (dst_page->vmp_pmapped == TRUE) {
7034 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
7035 }
7036
7037 VM_PAGE_FREE(dst_page);
7038 break;
7039 }
7040
7041 if (flags) {
7042 *flags = 0;
7043
7044 /* Get the condition of flags before requested ops */
7045 /* are undertaken */
7046
7047 if (dst_page->vmp_dirty) {
7048 *flags |= UPL_POP_DIRTY;
7049 }
7050 if (dst_page->vmp_free_when_done) {
7051 *flags |= UPL_POP_PAGEOUT;
7052 }
7053 if (dst_page->vmp_precious) {
7054 *flags |= UPL_POP_PRECIOUS;
7055 }
7056 if (dst_page->vmp_absent) {
7057 *flags |= UPL_POP_ABSENT;
7058 }
7059 if (dst_page->vmp_busy) {
7060 *flags |= UPL_POP_BUSY;
7061 }
7062 }
7063
7064 /* The caller should have made a call either contingent with */
7065 /* or prior to this call to set UPL_POP_BUSY */
7066 if (ops & UPL_POP_SET) {
7067 /* The protection granted with this assert will */
7068 /* not be complete. If the caller violates the */
7069 /* convention and attempts to change page state */
7070 /* without first setting busy we may not see it */
7071 /* because the page may already be busy. However */
7072 /* if such violations occur we will assert sooner */
7073 /* or later. */
7074 assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY));
7075 if (ops & UPL_POP_DIRTY) {
7076 SET_PAGE_DIRTY(dst_page, FALSE);
7077 }
7078 if (ops & UPL_POP_PAGEOUT) {
7079 dst_page->vmp_free_when_done = TRUE;
7080 }
7081 if (ops & UPL_POP_PRECIOUS) {
7082 dst_page->vmp_precious = TRUE;
7083 }
7084 if (ops & UPL_POP_ABSENT) {
7085 dst_page->vmp_absent = TRUE;
7086 }
7087 if (ops & UPL_POP_BUSY) {
7088 dst_page->vmp_busy = TRUE;
7089 }
7090 }
7091
7092 if (ops & UPL_POP_CLR) {
7093 assert(dst_page->vmp_busy);
7094 if (ops & UPL_POP_DIRTY) {
7095 dst_page->vmp_dirty = FALSE;
7096 }
7097 if (ops & UPL_POP_PAGEOUT) {
7098 dst_page->vmp_free_when_done = FALSE;
7099 }
7100 if (ops & UPL_POP_PRECIOUS) {
7101 dst_page->vmp_precious = FALSE;
7102 }
7103 if (ops & UPL_POP_ABSENT) {
7104 dst_page->vmp_absent = FALSE;
7105 }
7106 if (ops & UPL_POP_BUSY) {
7107 dst_page->vmp_busy = FALSE;
7108 PAGE_WAKEUP(dst_page);
7109 }
7110 }
7111 if (phys_entry) {
7112 /*
7113 * The physical page number will remain valid
7114 * only if the page is kept busy.
7115 */
7116 assert(dst_page->vmp_busy);
7117 *phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
7118 }
7119
7120 break;
7121 }
7122
7123 vm_object_unlock(object);
7124 return KERN_SUCCESS;
7125 }
7126
7127 /*
7128 * vm_object_range_op offers performance enhancement over
7129 * vm_object_page_op for page_op functions which do not require page
7130 * level state to be returned from the call. Page_op was created to provide
7131 * a low-cost alternative to page manipulation via UPLs when only a single
7132 * page was involved. The range_op call establishes the ability in the _op
7133 * family of functions to work on multiple pages where the lack of page level
7134 * state handling allows the caller to avoid the overhead of the upl structures.
7135 */
7136
7137 kern_return_t
7138 vm_object_range_op(
7139 vm_object_t object,
7140 vm_object_offset_t offset_beg,
7141 vm_object_offset_t offset_end,
7142 int ops,
7143 uint32_t *range)
7144 {
7145 vm_object_offset_t offset;
7146 vm_page_t dst_page;
7147
7148 if (offset_end - offset_beg > (uint32_t) -1) {
7149 /* range is too big and would overflow "*range" */
7150 return KERN_INVALID_ARGUMENT;
7151 }
7152 if (object->resident_page_count == 0) {
7153 if (range) {
7154 if (ops & UPL_ROP_PRESENT) {
7155 *range = 0;
7156 } else {
7157 *range = (uint32_t) (offset_end - offset_beg);
7158 assert(*range == (offset_end - offset_beg));
7159 }
7160 }
7161 return KERN_SUCCESS;
7162 }
7163 vm_object_lock(object);
7164
7165 if (object->phys_contiguous) {
7166 vm_object_unlock(object);
7167 return KERN_INVALID_OBJECT;
7168 }
7169
7170 offset = offset_beg & ~PAGE_MASK_64;
7171
7172 while (offset < offset_end) {
7173 dst_page = vm_page_lookup(object, offset);
7174 if (dst_page != VM_PAGE_NULL) {
7175 if (ops & UPL_ROP_DUMP) {
7176 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
7177 /*
7178 * someone else is playing with the
7179 * page, we will have to wait
7180 */
7181 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
7182 /*
7183 * need to relook the page up since it's
7184 * state may have changed while we slept
7185 * it might even belong to a different object
7186 * at this point
7187 */
7188 continue;
7189 }
7190 if (dst_page->vmp_laundry) {
7191 vm_pageout_steal_laundry(dst_page, FALSE);
7192 }
7193
7194 if (dst_page->vmp_pmapped == TRUE) {
7195 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
7196 }
7197
7198 VM_PAGE_FREE(dst_page);
7199 } else if ((ops & UPL_ROP_ABSENT)
7200 && (!dst_page->vmp_absent || dst_page->vmp_busy)) {
7201 break;
7202 }
7203 } else if (ops & UPL_ROP_PRESENT) {
7204 break;
7205 }
7206
7207 offset += PAGE_SIZE;
7208 }
7209 vm_object_unlock(object);
7210
7211 if (range) {
7212 if (offset > offset_end) {
7213 offset = offset_end;
7214 }
7215 if (offset > offset_beg) {
7216 *range = (uint32_t) (offset - offset_beg);
7217 assert(*range == (offset - offset_beg));
7218 } else {
7219 *range = 0;
7220 }
7221 }
7222 return KERN_SUCCESS;
7223 }
7224
7225 /*
7226 * Used to point a pager directly to a range of memory (when the pager may be associated
7227 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
7228 * expect that the virtual address will denote the start of a range that is physically contiguous.
7229 */
7230 kern_return_t
7231 pager_map_to_phys_contiguous(
7232 memory_object_control_t object,
7233 memory_object_offset_t offset,
7234 addr64_t base_vaddr,
7235 vm_size_t size)
7236 {
7237 ppnum_t page_num;
7238 boolean_t clobbered_private;
7239 kern_return_t retval;
7240 vm_object_t pager_object;
7241
7242 page_num = pmap_find_phys(kernel_pmap, base_vaddr);
7243
7244 if (!page_num) {
7245 retval = KERN_FAILURE;
7246 goto out;
7247 }
7248
7249 pager_object = memory_object_control_to_vm_object(object);
7250
7251 if (!pager_object) {
7252 retval = KERN_FAILURE;
7253 goto out;
7254 }
7255
7256 clobbered_private = pager_object->private;
7257 if (pager_object->private != TRUE) {
7258 vm_object_lock(pager_object);
7259 pager_object->private = TRUE;
7260 vm_object_unlock(pager_object);
7261 }
7262 retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
7263
7264 if (retval != KERN_SUCCESS) {
7265 if (pager_object->private != clobbered_private) {
7266 vm_object_lock(pager_object);
7267 pager_object->private = clobbered_private;
7268 vm_object_unlock(pager_object);
7269 }
7270 }
7271
7272 out:
7273 return retval;
7274 }
7275
7276 uint32_t scan_object_collision = 0;
7277
7278 void
7279 vm_object_lock(vm_object_t object)
7280 {
7281 if (object == vm_pageout_scan_wants_object) {
7282 scan_object_collision++;
7283 mutex_pause(2);
7284 }
7285 DTRACE_VM(vm_object_lock_w);
7286 lck_rw_lock_exclusive(&object->Lock);
7287 #if DEVELOPMENT || DEBUG
7288 object->Lock_owner = current_thread();
7289 #endif
7290 }
7291
7292 boolean_t
7293 vm_object_lock_avoid(vm_object_t object)
7294 {
7295 if (object == vm_pageout_scan_wants_object) {
7296 scan_object_collision++;
7297 return TRUE;
7298 }
7299 return FALSE;
7300 }
7301
7302 boolean_t
7303 _vm_object_lock_try(vm_object_t object)
7304 {
7305 boolean_t retval;
7306
7307 retval = lck_rw_try_lock_exclusive(&object->Lock);
7308 #if DEVELOPMENT || DEBUG
7309 if (retval == TRUE) {
7310 DTRACE_VM(vm_object_lock_w);
7311 object->Lock_owner = current_thread();
7312 }
7313 #endif
7314 return retval;
7315 }
7316
7317 boolean_t
7318 vm_object_lock_try(vm_object_t object)
7319 {
7320 /*
7321 * Called from hibernate path so check before blocking.
7322 */
7323 if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
7324 mutex_pause(2);
7325 }
7326 return _vm_object_lock_try(object);
7327 }
7328
7329 /*
7330 * Lock the object exclusive.
7331 *
7332 * Returns true iff the thread had to spin or block before
7333 * acquiring the lock.
7334 */
7335 bool
7336 vm_object_lock_check_contended(vm_object_t object)
7337 {
7338 bool contended;
7339 if (object == vm_pageout_scan_wants_object) {
7340 scan_object_collision++;
7341 mutex_pause(2);
7342 }
7343 DTRACE_VM(vm_object_lock_w);
7344 contended = lck_rw_lock_exclusive_check_contended(&object->Lock);
7345 #if DEVELOPMENT || DEBUG
7346 object->Lock_owner = current_thread();
7347 #endif
7348 return contended;
7349 }
7350
7351 void
7352 vm_object_lock_shared(vm_object_t object)
7353 {
7354 if (vm_object_lock_avoid(object)) {
7355 mutex_pause(2);
7356 }
7357 DTRACE_VM(vm_object_lock_r);
7358 lck_rw_lock_shared(&object->Lock);
7359 }
7360
7361 boolean_t
7362 vm_object_lock_yield_shared(vm_object_t object)
7363 {
7364 boolean_t retval = FALSE, force_yield = FALSE;;
7365
7366 vm_object_lock_assert_shared(object);
7367
7368 force_yield = vm_object_lock_avoid(object);
7369
7370 retval = lck_rw_lock_yield_shared(&object->Lock, force_yield);
7371 if (retval) {
7372 DTRACE_VM(vm_object_lock_yield);
7373 }
7374
7375 return retval;
7376 }
7377
7378 boolean_t
7379 vm_object_lock_try_shared(vm_object_t object)
7380 {
7381 boolean_t retval;
7382
7383 if (vm_object_lock_avoid(object)) {
7384 mutex_pause(2);
7385 }
7386 retval = lck_rw_try_lock_shared(&object->Lock);
7387 if (retval) {
7388 DTRACE_VM(vm_object_lock_r);
7389 }
7390 return retval;
7391 }
7392
7393 boolean_t
7394 vm_object_lock_upgrade(vm_object_t object)
7395 {
7396 boolean_t retval;
7397
7398 retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
7399 #if DEVELOPMENT || DEBUG
7400 if (retval == TRUE) {
7401 DTRACE_VM(vm_object_lock_w);
7402 object->Lock_owner = current_thread();
7403 }
7404 #endif
7405 return retval;
7406 }
7407
7408 void
7409 vm_object_unlock(vm_object_t object)
7410 {
7411 #if DEVELOPMENT || DEBUG
7412 if (object->Lock_owner) {
7413 if (object->Lock_owner != current_thread()) {
7414 panic("vm_object_unlock: not owner - %p\n", object);
7415 }
7416 object->Lock_owner = 0;
7417 DTRACE_VM(vm_object_unlock);
7418 }
7419 #endif
7420 lck_rw_done(&object->Lock);
7421 }
7422
7423
7424 unsigned int vm_object_change_wimg_mode_count = 0;
7425
7426 /*
7427 * The object must be locked
7428 */
7429 void
7430 vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
7431 {
7432 vm_page_t p;
7433
7434 vm_object_lock_assert_exclusive(object);
7435
7436 vm_object_paging_wait(object, THREAD_UNINT);
7437
7438 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
7439 if (!p->vmp_fictitious) {
7440 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode);
7441 }
7442 }
7443 if (wimg_mode == VM_WIMG_USE_DEFAULT) {
7444 object->set_cache_attr = FALSE;
7445 } else {
7446 object->set_cache_attr = TRUE;
7447 }
7448
7449 object->wimg_bits = wimg_mode;
7450
7451 vm_object_change_wimg_mode_count++;
7452 }
7453
7454 #if CONFIG_FREEZE
7455
7456 extern struct freezer_context freezer_context_global;
7457
7458 /*
7459 * This routine does the "relocation" of previously
7460 * compressed pages belonging to this object that are
7461 * residing in a number of compressed segments into
7462 * a set of compressed segments dedicated to hold
7463 * compressed pages belonging to this object.
7464 */
7465
7466 extern AbsoluteTime c_freezer_last_yield_ts;
7467
7468 #define MAX_FREE_BATCH 32
7469 #define FREEZER_DUTY_CYCLE_ON_MS 5
7470 #define FREEZER_DUTY_CYCLE_OFF_MS 5
7471
7472 static int c_freezer_should_yield(void);
7473
7474
7475 static int
7476 c_freezer_should_yield()
7477 {
7478 AbsoluteTime cur_time;
7479 uint64_t nsecs;
7480
7481 assert(c_freezer_last_yield_ts);
7482 clock_get_uptime(&cur_time);
7483
7484 SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
7485 absolutetime_to_nanoseconds(cur_time, &nsecs);
7486
7487 if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) {
7488 return 1;
7489 }
7490 return 0;
7491 }
7492
7493
7494 void
7495 vm_object_compressed_freezer_done()
7496 {
7497 vm_compressor_finished_filling( &(freezer_context_global.freezer_ctx_chead));
7498 }
7499
7500
7501 uint32_t
7502 vm_object_compressed_freezer_pageout(
7503 vm_object_t object, uint32_t dirty_budget)
7504 {
7505 vm_page_t p;
7506 vm_page_t local_freeq = NULL;
7507 int local_freed = 0;
7508 kern_return_t retval = KERN_SUCCESS;
7509 int obj_resident_page_count_snapshot = 0;
7510 uint32_t paged_out_count = 0;
7511
7512 assert(object != VM_OBJECT_NULL);
7513 assert(object->internal);
7514
7515 vm_object_lock(object);
7516
7517 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7518 if (!object->pager_initialized) {
7519 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7520
7521 if (!object->pager_initialized) {
7522 vm_object_compressor_pager_create(object);
7523 }
7524 }
7525
7526 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7527 vm_object_unlock(object);
7528 return paged_out_count;
7529 }
7530 }
7531
7532 /*
7533 * We could be freezing a shared internal object that might
7534 * be part of some other thread's current VM operations.
7535 * We skip it if there's a paging-in-progress or activity-in-progress
7536 * because we could be here a long time with the map lock held.
7537 *
7538 * Note: We are holding the map locked while we wait.
7539 * This is fine in the freezer path because the task
7540 * is suspended and so this latency is acceptable.
7541 */
7542 if (object->paging_in_progress || object->activity_in_progress) {
7543 vm_object_unlock(object);
7544 return paged_out_count;
7545 }
7546
7547 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
7548 vm_object_offset_t curr_offset = 0;
7549
7550 /*
7551 * Go through the object and make sure that any
7552 * previously compressed pages are relocated into
7553 * a compressed segment associated with our "freezer_chead".
7554 */
7555 while (curr_offset < object->vo_size) {
7556 curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
7557
7558 if (curr_offset == (vm_object_offset_t) -1) {
7559 break;
7560 }
7561
7562 retval = vm_compressor_pager_relocate(object->pager, curr_offset, &(freezer_context_global.freezer_ctx_chead));
7563
7564 if (retval != KERN_SUCCESS) {
7565 break;
7566 }
7567
7568 curr_offset += PAGE_SIZE_64;
7569 }
7570 }
7571
7572 /*
7573 * We can't hold the object lock while heading down into the compressed pager
7574 * layer because we might need the kernel map lock down there to allocate new
7575 * compressor data structures. And if this same object is mapped in the kernel
7576 * and there's a fault on it, then that thread will want the object lock while
7577 * holding the kernel map lock.
7578 *
7579 * Since we are going to drop/grab the object lock repeatedly, we must make sure
7580 * we won't be stuck in an infinite loop if the same page(s) keep getting
7581 * decompressed. So we grab a snapshot of the number of pages in the object and
7582 * we won't process any more than that number of pages.
7583 */
7584
7585 obj_resident_page_count_snapshot = object->resident_page_count;
7586
7587 vm_object_activity_begin(object);
7588
7589 while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) {
7590 p = (vm_page_t)vm_page_queue_first(&object->memq);
7591
7592 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0);
7593
7594 vm_page_lockspin_queues();
7595
7596 if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || p->vmp_error || VM_PAGE_WIRED(p)) {
7597 vm_page_unlock_queues();
7598
7599 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0);
7600
7601 vm_page_queue_remove(&object->memq, p, vmp_listq);
7602 vm_page_queue_enter(&object->memq, p, vmp_listq);
7603
7604 continue;
7605 }
7606
7607 if (p->vmp_pmapped == TRUE) {
7608 int refmod_state, pmap_flags;
7609
7610 if (p->vmp_dirty || p->vmp_precious) {
7611 pmap_flags = PMAP_OPTIONS_COMPRESSOR;
7612 } else {
7613 pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7614 }
7615
7616 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL);
7617 if (refmod_state & VM_MEM_MODIFIED) {
7618 SET_PAGE_DIRTY(p, FALSE);
7619 }
7620 }
7621
7622 if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
7623 /*
7624 * Clean and non-precious page.
7625 */
7626 vm_page_unlock_queues();
7627 VM_PAGE_FREE(p);
7628
7629 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0);
7630 continue;
7631 }
7632
7633 if (p->vmp_laundry) {
7634 vm_pageout_steal_laundry(p, TRUE);
7635 }
7636
7637 vm_page_queues_remove(p, TRUE);
7638
7639 vm_page_unlock_queues();
7640
7641
7642 /*
7643 * In case the compressor fails to compress this page, we need it at
7644 * the back of the object memq so that we don't keep trying to process it.
7645 * Make the move here while we have the object lock held.
7646 */
7647
7648 vm_page_queue_remove(&object->memq, p, vmp_listq);
7649 vm_page_queue_enter(&object->memq, p, vmp_listq);
7650
7651 /*
7652 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
7653 *
7654 * Mark the page busy so no one messes with it while we have the object lock dropped.
7655 */
7656 p->vmp_busy = TRUE;
7657
7658 vm_object_activity_begin(object);
7659
7660 vm_object_unlock(object);
7661
7662 if (vm_pageout_compress_page(&(freezer_context_global.freezer_ctx_chead),
7663 (freezer_context_global.freezer_ctx_compressor_scratch_buf),
7664 p) == KERN_SUCCESS) {
7665 /*
7666 * page has already been un-tabled from the object via 'vm_page_remove'
7667 */
7668 p->vmp_snext = local_freeq;
7669 local_freeq = p;
7670 local_freed++;
7671 paged_out_count++;
7672
7673 if (local_freed >= MAX_FREE_BATCH) {
7674 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7675
7676 vm_page_free_list(local_freeq, TRUE);
7677
7678 local_freeq = NULL;
7679 local_freed = 0;
7680 }
7681 freezer_context_global.freezer_ctx_uncompressed_pages++;
7682 }
7683 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0);
7684
7685 if (local_freed == 0 && c_freezer_should_yield()) {
7686 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7687 clock_get_uptime(&c_freezer_last_yield_ts);
7688 }
7689
7690 vm_object_lock(object);
7691 }
7692
7693 if (local_freeq) {
7694 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7695
7696 vm_page_free_list(local_freeq, TRUE);
7697
7698 local_freeq = NULL;
7699 local_freed = 0;
7700 }
7701
7702 vm_object_activity_end(object);
7703
7704 vm_object_unlock(object);
7705
7706 if (c_freezer_should_yield()) {
7707 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7708 clock_get_uptime(&c_freezer_last_yield_ts);
7709 }
7710 return paged_out_count;
7711 }
7712
7713 #endif /* CONFIG_FREEZE */
7714
7715
7716 void
7717 vm_object_pageout(
7718 vm_object_t object)
7719 {
7720 vm_page_t p, next;
7721 struct vm_pageout_queue *iq;
7722
7723 if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) {
7724 return;
7725 }
7726
7727 iq = &vm_pageout_queue_internal;
7728
7729 assert(object != VM_OBJECT_NULL );
7730
7731 vm_object_lock(object);
7732
7733 if (!object->internal ||
7734 object->terminating ||
7735 !object->alive) {
7736 vm_object_unlock(object);
7737 return;
7738 }
7739
7740 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7741 if (!object->pager_initialized) {
7742 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7743
7744 if (!object->pager_initialized) {
7745 vm_object_compressor_pager_create(object);
7746 }
7747 }
7748
7749 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7750 vm_object_unlock(object);
7751 return;
7752 }
7753 }
7754
7755 ReScan:
7756 next = (vm_page_t)vm_page_queue_first(&object->memq);
7757
7758 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
7759 p = next;
7760 next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
7761
7762 assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q);
7763
7764 if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) ||
7765 p->vmp_cleaning ||
7766 p->vmp_laundry ||
7767 p->vmp_busy ||
7768 p->vmp_absent ||
7769 p->vmp_error ||
7770 p->vmp_fictitious ||
7771 VM_PAGE_WIRED(p)) {
7772 /*
7773 * Page is already being cleaned or can't be cleaned.
7774 */
7775 continue;
7776 }
7777 if (vm_compressor_low_on_space()) {
7778 break;
7779 }
7780
7781 /* Throw to the pageout queue */
7782
7783 vm_page_lockspin_queues();
7784
7785 if (VM_PAGE_Q_THROTTLED(iq)) {
7786 iq->pgo_draining = TRUE;
7787
7788 assert_wait((event_t) (&iq->pgo_laundry + 1),
7789 THREAD_INTERRUPTIBLE);
7790 vm_page_unlock_queues();
7791 vm_object_unlock(object);
7792
7793 thread_block(THREAD_CONTINUE_NULL);
7794
7795 vm_object_lock(object);
7796 goto ReScan;
7797 }
7798
7799 assert(!p->vmp_fictitious);
7800 assert(!p->vmp_busy);
7801 assert(!p->vmp_absent);
7802 assert(!p->vmp_unusual);
7803 assert(!p->vmp_error);
7804 assert(!VM_PAGE_WIRED(p));
7805 assert(!p->vmp_cleaning);
7806
7807 if (p->vmp_pmapped == TRUE) {
7808 int refmod_state;
7809 int pmap_options;
7810
7811 /*
7812 * Tell pmap the page should be accounted
7813 * for as "compressed" if it's been modified.
7814 */
7815 pmap_options =
7816 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7817 if (p->vmp_dirty || p->vmp_precious) {
7818 /*
7819 * We already know it's been modified,
7820 * so tell pmap to account for it
7821 * as "compressed".
7822 */
7823 pmap_options = PMAP_OPTIONS_COMPRESSOR;
7824 }
7825 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
7826 pmap_options,
7827 NULL);
7828 if (refmod_state & VM_MEM_MODIFIED) {
7829 SET_PAGE_DIRTY(p, FALSE);
7830 }
7831 }
7832
7833 if (!p->vmp_dirty && !p->vmp_precious) {
7834 vm_page_unlock_queues();
7835 VM_PAGE_FREE(p);
7836 continue;
7837 }
7838 vm_page_queues_remove(p, TRUE);
7839
7840 vm_pageout_cluster(p);
7841
7842 vm_page_unlock_queues();
7843 }
7844 vm_object_unlock(object);
7845 }
7846
7847
7848 #if CONFIG_IOSCHED
7849 void
7850 vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
7851 {
7852 io_reprioritize_req_t req;
7853 struct vnode *devvp = NULL;
7854
7855 if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
7856 return;
7857 }
7858
7859 /*
7860 * Create the request for I/O reprioritization.
7861 * We use the noblock variant of zalloc because we're holding the object
7862 * lock here and we could cause a deadlock in low memory conditions.
7863 */
7864 req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
7865 if (req == NULL) {
7866 return;
7867 }
7868 req->blkno = blkno;
7869 req->len = len;
7870 req->priority = prio;
7871 req->devvp = devvp;
7872
7873 /* Insert request into the reprioritization list */
7874 IO_REPRIORITIZE_LIST_LOCK();
7875 queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7876 IO_REPRIORITIZE_LIST_UNLOCK();
7877
7878 /* Wakeup reprioritize thread */
7879 IO_REPRIO_THREAD_WAKEUP();
7880
7881 return;
7882 }
7883
7884 void
7885 vm_decmp_upl_reprioritize(upl_t upl, int prio)
7886 {
7887 int offset;
7888 vm_object_t object;
7889 io_reprioritize_req_t req;
7890 struct vnode *devvp = NULL;
7891 uint64_t blkno;
7892 uint32_t len;
7893 upl_t io_upl;
7894 uint64_t *io_upl_reprio_info;
7895 int io_upl_size;
7896
7897 if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
7898 return;
7899 }
7900
7901 /*
7902 * We dont want to perform any allocations with the upl lock held since that might
7903 * result in a deadlock. If the system is low on memory, the pageout thread would
7904 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
7905 * be freed up by the pageout thread, it would be a deadlock.
7906 */
7907
7908
7909 /* First step is just to get the size of the upl to find out how big the reprio info is */
7910 if (!upl_try_lock(upl)) {
7911 return;
7912 }
7913
7914 if (upl->decmp_io_upl == NULL) {
7915 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7916 upl_unlock(upl);
7917 return;
7918 }
7919
7920 io_upl = upl->decmp_io_upl;
7921 assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0);
7922 assertf(page_aligned(io_upl->u_offset) && page_aligned(io_upl->u_size),
7923 "upl %p offset 0x%llx size 0x%x\n",
7924 io_upl, io_upl->u_offset, io_upl->u_size);
7925 io_upl_size = io_upl->u_size;
7926 upl_unlock(upl);
7927
7928 /* Now perform the allocation */
7929 io_upl_reprio_info = (uint64_t *)kheap_alloc(KHEAP_TEMP,
7930 sizeof(uint64_t) * atop(io_upl_size), Z_WAITOK);
7931 if (io_upl_reprio_info == NULL) {
7932 return;
7933 }
7934
7935 /* Now again take the lock, recheck the state and grab out the required info */
7936 if (!upl_try_lock(upl)) {
7937 goto out;
7938 }
7939
7940 if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
7941 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7942 upl_unlock(upl);
7943 goto out;
7944 }
7945 memcpy(io_upl_reprio_info, io_upl->upl_reprio_info,
7946 sizeof(uint64_t) * atop(io_upl_size));
7947
7948 /* Get the VM object for this UPL */
7949 if (io_upl->flags & UPL_SHADOWED) {
7950 object = io_upl->map_object->shadow;
7951 } else {
7952 object = io_upl->map_object;
7953 }
7954
7955 /* Get the dev vnode ptr for this object */
7956 if (!object || !object->pager ||
7957 vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
7958 upl_unlock(upl);
7959 goto out;
7960 }
7961
7962 upl_unlock(upl);
7963
7964 /* Now we have all the information needed to do the expedite */
7965
7966 offset = 0;
7967 while (offset < io_upl_size) {
7968 blkno = io_upl_reprio_info[atop(offset)] & UPL_REPRIO_INFO_MASK;
7969 len = (io_upl_reprio_info[atop(offset)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;
7970
7971 /*
7972 * This implementation may cause some spurious expedites due to the
7973 * fact that we dont cleanup the blkno & len from the upl_reprio_info
7974 * even after the I/O is complete.
7975 */
7976
7977 if (blkno != 0 && len != 0) {
7978 /* Create the request for I/O reprioritization */
7979 req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone);
7980 assert(req != NULL);
7981 req->blkno = blkno;
7982 req->len = len;
7983 req->priority = prio;
7984 req->devvp = devvp;
7985
7986 /* Insert request into the reprioritization list */
7987 IO_REPRIORITIZE_LIST_LOCK();
7988 queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7989 IO_REPRIORITIZE_LIST_UNLOCK();
7990
7991 offset += len;
7992 } else {
7993 offset += PAGE_SIZE;
7994 }
7995 }
7996
7997 /* Wakeup reprioritize thread */
7998 IO_REPRIO_THREAD_WAKEUP();
7999
8000 out:
8001 kheap_free(KHEAP_TEMP, io_upl_reprio_info,
8002 sizeof(uint64_t) * atop(io_upl_size));
8003 }
8004
8005 void
8006 vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m)
8007 {
8008 upl_t upl;
8009 upl_page_info_t *pl;
8010 unsigned int i, num_pages;
8011 int cur_tier;
8012
8013 cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
8014
8015 /*
8016 * Scan through all UPLs associated with the object to find the
8017 * UPL containing the contended page.
8018 */
8019 queue_iterate(&o->uplq, upl, upl_t, uplq) {
8020 if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) {
8021 continue;
8022 }
8023 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
8024 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
8025 "upl %p offset 0x%llx size 0x%x\n",
8026 upl, upl->u_offset, upl->u_size);
8027 num_pages = (upl->u_size / PAGE_SIZE);
8028
8029 /*
8030 * For each page in the UPL page list, see if it matches the contended
8031 * page and was issued as a low prio I/O.
8032 */
8033 for (i = 0; i < num_pages; i++) {
8034 if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
8035 if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
8036 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
8037 VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0);
8038 vm_decmp_upl_reprioritize(upl, cur_tier);
8039 break;
8040 }
8041 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
8042 upl->upl_reprio_info[i], upl->upl_priority, 0);
8043 if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) {
8044 vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
8045 }
8046 break;
8047 }
8048 }
8049 /* Check if we found any hits */
8050 if (i != num_pages) {
8051 break;
8052 }
8053 }
8054
8055 return;
8056 }
8057
8058 wait_result_t
8059 vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible)
8060 {
8061 wait_result_t ret;
8062
8063 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);
8064
8065 if (o->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) {
8066 /*
8067 * Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
8068 */
8069 vm_page_handle_prio_inversion(o, m);
8070 }
8071 m->vmp_wanted = TRUE;
8072 ret = thread_sleep_vm_object(o, m, interruptible);
8073 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0);
8074 return ret;
8075 }
8076
8077 static void
8078 io_reprioritize_thread(void *param __unused, wait_result_t wr __unused)
8079 {
8080 io_reprioritize_req_t req = NULL;
8081
8082 while (1) {
8083 IO_REPRIORITIZE_LIST_LOCK();
8084 if (queue_empty(&io_reprioritize_list)) {
8085 IO_REPRIORITIZE_LIST_UNLOCK();
8086 break;
8087 }
8088
8089 queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
8090 IO_REPRIORITIZE_LIST_UNLOCK();
8091
8092 vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority);
8093 zfree(io_reprioritize_req_zone, req);
8094 }
8095
8096 IO_REPRIO_THREAD_CONTINUATION();
8097 }
8098 #endif
8099
8100 #if VM_OBJECT_ACCESS_TRACKING
8101 void
8102 vm_object_access_tracking(
8103 vm_object_t object,
8104 int *access_tracking_p,
8105 uint32_t *access_tracking_reads_p,
8106 uint32_t *access_tracking_writes_p)
8107 {
8108 int access_tracking;
8109
8110 access_tracking = !!*access_tracking_p;
8111
8112 vm_object_lock(object);
8113 *access_tracking_p = object->access_tracking;
8114 if (access_tracking_reads_p) {
8115 *access_tracking_reads_p = object->access_tracking_reads;
8116 }
8117 if (access_tracking_writes_p) {
8118 *access_tracking_writes_p = object->access_tracking_writes;
8119 }
8120 object->access_tracking = access_tracking;
8121 object->access_tracking_reads = 0;
8122 object->access_tracking_writes = 0;
8123 vm_object_unlock(object);
8124
8125 if (access_tracking) {
8126 vm_object_pmap_protect_options(object,
8127 0,
8128 object->vo_size,
8129 PMAP_NULL,
8130 PAGE_SIZE,
8131 0,
8132 VM_PROT_NONE,
8133 0);
8134 }
8135 }
8136 #endif /* VM_OBJECT_ACCESS_TRACKING */
8137
8138 void
8139 vm_object_ledger_tag_ledgers(
8140 vm_object_t object,
8141 int *ledger_idx_volatile,
8142 int *ledger_idx_nonvolatile,
8143 int *ledger_idx_volatile_compressed,
8144 int *ledger_idx_nonvolatile_compressed,
8145 boolean_t *do_footprint)
8146 {
8147 assert(object->shadow == VM_OBJECT_NULL);
8148
8149 *do_footprint = !object->vo_no_footprint;
8150
8151 switch (object->vo_ledger_tag) {
8152 case VM_LEDGER_TAG_NONE:
8153 /*
8154 * Regular purgeable memory:
8155 * counts in footprint only when nonvolatile.
8156 */
8157 *do_footprint = TRUE;
8158 assert(object->purgable != VM_PURGABLE_DENY);
8159 *ledger_idx_volatile = task_ledgers.purgeable_volatile;
8160 *ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile;
8161 *ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed;
8162 *ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed;
8163 break;
8164 case VM_LEDGER_TAG_DEFAULT:
8165 /*
8166 * "default" tagged memory:
8167 * counts in footprint only when nonvolatile and not marked
8168 * as "no_footprint".
8169 */
8170 *ledger_idx_volatile = task_ledgers.tagged_nofootprint;
8171 *ledger_idx_volatile_compressed = task_ledgers.tagged_nofootprint_compressed;
8172 if (*do_footprint) {
8173 *ledger_idx_nonvolatile = task_ledgers.tagged_footprint;
8174 *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_footprint_compressed;
8175 } else {
8176 *ledger_idx_nonvolatile = task_ledgers.tagged_nofootprint;
8177 *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_nofootprint_compressed;
8178 }
8179 break;
8180 case VM_LEDGER_TAG_NETWORK:
8181 /*
8182 * "network" tagged memory:
8183 * never counts in footprint.
8184 */
8185 *do_footprint = FALSE;
8186 *ledger_idx_volatile = task_ledgers.network_volatile;
8187 *ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed;
8188 *ledger_idx_nonvolatile = task_ledgers.network_nonvolatile;
8189 *ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed;
8190 break;
8191 case VM_LEDGER_TAG_MEDIA:
8192 /*
8193 * "media" tagged memory:
8194 * counts in footprint only when nonvolatile and not marked
8195 * as "no footprint".
8196 */
8197 *ledger_idx_volatile = task_ledgers.media_nofootprint;
8198 *ledger_idx_volatile_compressed = task_ledgers.media_nofootprint_compressed;
8199 if (*do_footprint) {
8200 *ledger_idx_nonvolatile = task_ledgers.media_footprint;
8201 *ledger_idx_nonvolatile_compressed = task_ledgers.media_footprint_compressed;
8202 } else {
8203 *ledger_idx_nonvolatile = task_ledgers.media_nofootprint;
8204 *ledger_idx_nonvolatile_compressed = task_ledgers.media_nofootprint_compressed;
8205 }
8206 break;
8207 case VM_LEDGER_TAG_GRAPHICS:
8208 /*
8209 * "graphics" tagged memory:
8210 * counts in footprint only when nonvolatile and not marked
8211 * as "no footprint".
8212 */
8213 *ledger_idx_volatile = task_ledgers.graphics_nofootprint;
8214 *ledger_idx_volatile_compressed = task_ledgers.graphics_nofootprint_compressed;
8215 if (*do_footprint) {
8216 *ledger_idx_nonvolatile = task_ledgers.graphics_footprint;
8217 *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_footprint_compressed;
8218 } else {
8219 *ledger_idx_nonvolatile = task_ledgers.graphics_nofootprint;
8220 *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_nofootprint_compressed;
8221 }
8222 break;
8223 case VM_LEDGER_TAG_NEURAL:
8224 /*
8225 * "neural" tagged memory:
8226 * counts in footprint only when nonvolatile and not marked
8227 * as "no footprint".
8228 */
8229 *ledger_idx_volatile = task_ledgers.neural_nofootprint;
8230 *ledger_idx_volatile_compressed = task_ledgers.neural_nofootprint_compressed;
8231 if (*do_footprint) {
8232 *ledger_idx_nonvolatile = task_ledgers.neural_footprint;
8233 *ledger_idx_nonvolatile_compressed = task_ledgers.neural_footprint_compressed;
8234 } else {
8235 *ledger_idx_nonvolatile = task_ledgers.neural_nofootprint;
8236 *ledger_idx_nonvolatile_compressed = task_ledgers.neural_nofootprint_compressed;
8237 }
8238 break;
8239 default:
8240 panic("%s: object %p has unsupported ledger_tag %d\n",
8241 __FUNCTION__, object, object->vo_ledger_tag);
8242 }
8243 }
8244
8245 kern_return_t
8246 vm_object_ownership_change(
8247 vm_object_t object,
8248 int new_ledger_tag,
8249 task_t new_owner,
8250 int new_ledger_flags,
8251 boolean_t old_task_objq_locked)
8252 {
8253 int old_ledger_tag;
8254 task_t old_owner;
8255 int resident_count, wired_count;
8256 unsigned int compressed_count;
8257 int ledger_idx_volatile;
8258 int ledger_idx_nonvolatile;
8259 int ledger_idx_volatile_compressed;
8260 int ledger_idx_nonvolatile_compressed;
8261 int ledger_idx;
8262 int ledger_idx_compressed;
8263 boolean_t do_footprint, old_no_footprint, new_no_footprint;
8264 boolean_t new_task_objq_locked;
8265
8266 vm_object_lock_assert_exclusive(object);
8267
8268 if (!object->internal) {
8269 return KERN_INVALID_ARGUMENT;
8270 }
8271 if (new_ledger_tag == VM_LEDGER_TAG_NONE &&
8272 object->purgable == VM_PURGABLE_DENY) {
8273 /* non-purgeable memory must have a valid non-zero ledger tag */
8274 return KERN_INVALID_ARGUMENT;
8275 }
8276 if (new_ledger_tag < 0 ||
8277 new_ledger_tag > VM_LEDGER_TAG_MAX) {
8278 return KERN_INVALID_ARGUMENT;
8279 }
8280 if (new_ledger_flags & ~VM_LEDGER_FLAGS) {
8281 return KERN_INVALID_ARGUMENT;
8282 }
8283 if (object->vo_ledger_tag == VM_LEDGER_TAG_NONE &&
8284 object->purgable == VM_PURGABLE_DENY) {
8285 /*
8286 * This VM object is neither ledger-tagged nor purgeable.
8287 * We can convert it to "ledger tag" ownership iff it
8288 * has not been used at all yet (no resident pages and
8289 * no pager) and it's going to be assigned to a valid task.
8290 */
8291 if (object->resident_page_count != 0 ||
8292 object->pager != NULL ||
8293 object->pager_created ||
8294 object->ref_count != 1 ||
8295 object->vo_owner != TASK_NULL ||
8296 object->copy_strategy != MEMORY_OBJECT_COPY_NONE ||
8297 new_owner == TASK_NULL) {
8298 return KERN_FAILURE;
8299 }
8300 }
8301
8302 if (new_ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) {
8303 new_no_footprint = TRUE;
8304 } else {
8305 new_no_footprint = FALSE;
8306 }
8307 #if __arm64__
8308 if (!new_no_footprint &&
8309 object->purgable != VM_PURGABLE_DENY &&
8310 new_owner != TASK_NULL &&
8311 new_owner != VM_OBJECT_OWNER_DISOWNED &&
8312 new_owner->task_legacy_footprint) {
8313 /*
8314 * This task has been granted "legacy footprint" and should
8315 * not be charged for its IOKit purgeable memory. Since we
8316 * might now change the accounting of such memory to the
8317 * "graphics" ledger, for example, give it the "no footprint"
8318 * option.
8319 */
8320 new_no_footprint = TRUE;
8321 }
8322 #endif /* __arm64__ */
8323 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
8324 assert(object->shadow == VM_OBJECT_NULL);
8325 assert(object->copy == VM_OBJECT_NULL);
8326
8327 old_ledger_tag = object->vo_ledger_tag;
8328 old_no_footprint = object->vo_no_footprint;
8329 old_owner = VM_OBJECT_OWNER(object);
8330
8331 DTRACE_VM8(object_ownership_change,
8332 vm_object_t, object,
8333 task_t, old_owner,
8334 int, old_ledger_tag,
8335 int, old_no_footprint,
8336 task_t, new_owner,
8337 int, new_ledger_tag,
8338 int, new_no_footprint,
8339 int, VM_OBJECT_ID(object));
8340
8341 assert(object->internal);
8342 resident_count = object->resident_page_count - object->wired_page_count;
8343 wired_count = object->wired_page_count;
8344 compressed_count = vm_compressor_pager_get_count(object->pager);
8345
8346 /*
8347 * Deal with the old owner and/or ledger tag, if needed.
8348 */
8349 if (old_owner != TASK_NULL &&
8350 ((old_owner != new_owner) /* new owner ... */
8351 || /* ... or ... */
8352 (old_no_footprint != new_no_footprint) /* new "no_footprint" */
8353 || /* ... or ... */
8354 old_ledger_tag != new_ledger_tag)) { /* ... new ledger */
8355 /*
8356 * Take this object off of the old owner's ledgers.
8357 */
8358 vm_object_ledger_tag_ledgers(object,
8359 &ledger_idx_volatile,
8360 &ledger_idx_nonvolatile,
8361 &ledger_idx_volatile_compressed,
8362 &ledger_idx_nonvolatile_compressed,
8363 &do_footprint);
8364 if (object->purgable == VM_PURGABLE_VOLATILE ||
8365 object->purgable == VM_PURGABLE_EMPTY) {
8366 ledger_idx = ledger_idx_volatile;
8367 ledger_idx_compressed = ledger_idx_volatile_compressed;
8368 } else {
8369 ledger_idx = ledger_idx_nonvolatile;
8370 ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
8371 }
8372 if (resident_count) {
8373 /*
8374 * Adjust the appropriate old owners's ledgers by the
8375 * number of resident pages.
8376 */
8377 ledger_debit(old_owner->ledger,
8378 ledger_idx,
8379 ptoa_64(resident_count));
8380 /* adjust old owner's footprint */
8381 if (do_footprint &&
8382 object->purgable != VM_PURGABLE_VOLATILE &&
8383 object->purgable != VM_PURGABLE_EMPTY) {
8384 ledger_debit(old_owner->ledger,
8385 task_ledgers.phys_footprint,
8386 ptoa_64(resident_count));
8387 }
8388 }
8389 if (wired_count) {
8390 /* wired pages are always nonvolatile */
8391 ledger_debit(old_owner->ledger,
8392 ledger_idx_nonvolatile,
8393 ptoa_64(wired_count));
8394 if (do_footprint) {
8395 ledger_debit(old_owner->ledger,
8396 task_ledgers.phys_footprint,
8397 ptoa_64(wired_count));
8398 }
8399 }
8400 if (compressed_count) {
8401 /*
8402 * Adjust the appropriate old owner's ledgers
8403 * by the number of compressed pages.
8404 */
8405 ledger_debit(old_owner->ledger,
8406 ledger_idx_compressed,
8407 ptoa_64(compressed_count));
8408 if (do_footprint &&
8409 object->purgable != VM_PURGABLE_VOLATILE &&
8410 object->purgable != VM_PURGABLE_EMPTY) {
8411 ledger_debit(old_owner->ledger,
8412 task_ledgers.phys_footprint,
8413 ptoa_64(compressed_count));
8414 }
8415 }
8416 if (old_owner != new_owner) {
8417 /* remove object from old_owner's list of owned objects */
8418 DTRACE_VM2(object_owner_remove,
8419 vm_object_t, object,
8420 task_t, old_owner);
8421 if (!old_task_objq_locked) {
8422 task_objq_lock(old_owner);
8423 }
8424 old_owner->task_owned_objects--;
8425 queue_remove(&old_owner->task_objq, object,
8426 vm_object_t, task_objq);
8427 switch (object->purgable) {
8428 case VM_PURGABLE_NONVOLATILE:
8429 case VM_PURGABLE_EMPTY:
8430 vm_purgeable_nonvolatile_owner_update(old_owner,
8431 -1);
8432 break;
8433 case VM_PURGABLE_VOLATILE:
8434 vm_purgeable_volatile_owner_update(old_owner,
8435 -1);
8436 break;
8437 default:
8438 break;
8439 }
8440 if (!old_task_objq_locked) {
8441 task_objq_unlock(old_owner);
8442 }
8443 }
8444 }
8445
8446 /*
8447 * Switch to new ledger tag and/or owner.
8448 */
8449
8450 new_task_objq_locked = FALSE;
8451 if (new_owner != old_owner &&
8452 new_owner != TASK_NULL &&
8453 new_owner != VM_OBJECT_OWNER_DISOWNED) {
8454 /*
8455 * If the new owner is not accepting new objects ("disowning"),
8456 * the object becomes "disowned" and will be added to
8457 * the kernel's task_objq.
8458 *
8459 * Check first without locking, to avoid blocking while the
8460 * task is disowning its objects.
8461 */
8462 if (new_owner->task_objects_disowning) {
8463 new_owner = VM_OBJECT_OWNER_DISOWNED;
8464 } else {
8465 task_objq_lock(new_owner);
8466 /* check again now that we have the lock */
8467 if (new_owner->task_objects_disowning) {
8468 new_owner = VM_OBJECT_OWNER_DISOWNED;
8469 task_objq_unlock(new_owner);
8470 } else {
8471 new_task_objq_locked = TRUE;
8472 }
8473 }
8474 }
8475
8476 object->vo_ledger_tag = new_ledger_tag;
8477 object->vo_owner = new_owner;
8478 object->vo_no_footprint = new_no_footprint;
8479
8480 if (new_owner == VM_OBJECT_OWNER_DISOWNED) {
8481 /*
8482 * Disowned objects are added to the kernel's task_objq but
8483 * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to
8484 * differentiate them from objects intentionally owned by
8485 * the kernel.
8486 */
8487 assert(old_owner != kernel_task);
8488 new_owner = kernel_task;
8489 assert(!new_task_objq_locked);
8490 task_objq_lock(new_owner);
8491 new_task_objq_locked = TRUE;
8492 }
8493
8494 /*
8495 * Deal with the new owner and/or ledger tag, if needed.
8496 */
8497 if (new_owner != TASK_NULL &&
8498 ((new_owner != old_owner) /* new owner ... */
8499 || /* ... or ... */
8500 (new_no_footprint != old_no_footprint) /* ... new "no_footprint" */
8501 || /* ... or ... */
8502 new_ledger_tag != old_ledger_tag)) { /* ... new ledger */
8503 /*
8504 * Add this object to the new owner's ledgers.
8505 */
8506 vm_object_ledger_tag_ledgers(object,
8507 &ledger_idx_volatile,
8508 &ledger_idx_nonvolatile,
8509 &ledger_idx_volatile_compressed,
8510 &ledger_idx_nonvolatile_compressed,
8511 &do_footprint);
8512 if (object->purgable == VM_PURGABLE_VOLATILE ||
8513 object->purgable == VM_PURGABLE_EMPTY) {
8514 ledger_idx = ledger_idx_volatile;
8515 ledger_idx_compressed = ledger_idx_volatile_compressed;
8516 } else {
8517 ledger_idx = ledger_idx_nonvolatile;
8518 ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
8519 }
8520 if (resident_count) {
8521 /*
8522 * Adjust the appropriate new owners's ledgers by the
8523 * number of resident pages.
8524 */
8525 ledger_credit(new_owner->ledger,
8526 ledger_idx,
8527 ptoa_64(resident_count));
8528 /* adjust new owner's footprint */
8529 if (do_footprint &&
8530 object->purgable != VM_PURGABLE_VOLATILE &&
8531 object->purgable != VM_PURGABLE_EMPTY) {
8532 ledger_credit(new_owner->ledger,
8533 task_ledgers.phys_footprint,
8534 ptoa_64(resident_count));
8535 }
8536 }
8537 if (wired_count) {
8538 /* wired pages are always nonvolatile */
8539 ledger_credit(new_owner->ledger,
8540 ledger_idx_nonvolatile,
8541 ptoa_64(wired_count));
8542 if (do_footprint) {
8543 ledger_credit(new_owner->ledger,
8544 task_ledgers.phys_footprint,
8545 ptoa_64(wired_count));
8546 }
8547 }
8548 if (compressed_count) {
8549 /*
8550 * Adjust the new owner's ledgers by the number of
8551 * compressed pages.
8552 */
8553 ledger_credit(new_owner->ledger,
8554 ledger_idx_compressed,
8555 ptoa_64(compressed_count));
8556 if (do_footprint &&
8557 object->purgable != VM_PURGABLE_VOLATILE &&
8558 object->purgable != VM_PURGABLE_EMPTY) {
8559 ledger_credit(new_owner->ledger,
8560 task_ledgers.phys_footprint,
8561 ptoa_64(compressed_count));
8562 }
8563 }
8564 if (new_owner != old_owner) {
8565 /* add object to new_owner's list of owned objects */
8566 DTRACE_VM2(object_owner_add,
8567 vm_object_t, object,
8568 task_t, new_owner);
8569 assert(new_task_objq_locked);
8570 new_owner->task_owned_objects++;
8571 queue_enter(&new_owner->task_objq, object,
8572 vm_object_t, task_objq);
8573 switch (object->purgable) {
8574 case VM_PURGABLE_NONVOLATILE:
8575 case VM_PURGABLE_EMPTY:
8576 vm_purgeable_nonvolatile_owner_update(new_owner,
8577 +1);
8578 break;
8579 case VM_PURGABLE_VOLATILE:
8580 vm_purgeable_volatile_owner_update(new_owner,
8581 +1);
8582 break;
8583 default:
8584 break;
8585 }
8586 }
8587 }
8588
8589 if (new_task_objq_locked) {
8590 task_objq_unlock(new_owner);
8591 }
8592
8593 return KERN_SUCCESS;
8594 }
8595
8596 void
8597 vm_owned_objects_disown(
8598 task_t task)
8599 {
8600 vm_object_t next_object;
8601 vm_object_t object;
8602 int collisions;
8603 kern_return_t kr;
8604
8605 if (task == NULL) {
8606 return;
8607 }
8608
8609 collisions = 0;
8610
8611 again:
8612 if (task->task_objects_disowned) {
8613 /* task has already disowned its owned objects */
8614 assert(task->task_volatile_objects == 0);
8615 assert(task->task_nonvolatile_objects == 0);
8616 assert(task->task_owned_objects == 0);
8617 return;
8618 }
8619
8620 task_objq_lock(task);
8621
8622 task->task_objects_disowning = TRUE;
8623
8624 for (object = (vm_object_t) queue_first(&task->task_objq);
8625 !queue_end(&task->task_objq, (queue_entry_t) object);
8626 object = next_object) {
8627 if (task->task_nonvolatile_objects == 0 &&
8628 task->task_volatile_objects == 0 &&
8629 task->task_owned_objects == 0) {
8630 /* no more objects owned by "task" */
8631 break;
8632 }
8633
8634 next_object = (vm_object_t) queue_next(&object->task_objq);
8635
8636 #if DEBUG
8637 assert(object->vo_purgeable_volatilizer == NULL);
8638 #endif /* DEBUG */
8639 assert(object->vo_owner == task);
8640 if (!vm_object_lock_try(object)) {
8641 task_objq_unlock(task);
8642 mutex_pause(collisions++);
8643 goto again;
8644 }
8645 /* transfer ownership to the kernel */
8646 assert(VM_OBJECT_OWNER(object) != kernel_task);
8647 kr = vm_object_ownership_change(
8648 object,
8649 object->vo_ledger_tag, /* unchanged */
8650 VM_OBJECT_OWNER_DISOWNED, /* new owner */
8651 0, /* new_ledger_flags */
8652 TRUE); /* old_owner->task_objq locked */
8653 assert(kr == KERN_SUCCESS);
8654 assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
8655 vm_object_unlock(object);
8656 }
8657
8658 if (__improbable(task->task_owned_objects != 0)) {
8659 panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
8660 __FUNCTION__,
8661 task,
8662 task->task_volatile_objects,
8663 task->task_nonvolatile_objects,
8664 task->task_owned_objects,
8665 &task->task_objq,
8666 queue_first(&task->task_objq),
8667 queue_last(&task->task_objq));
8668 }
8669
8670 /* there shouldn't be any objects owned by task now */
8671 assert(task->task_volatile_objects == 0);
8672 assert(task->task_nonvolatile_objects == 0);
8673 assert(task->task_owned_objects == 0);
8674 assert(task->task_objects_disowning);
8675
8676 /* and we don't need to try and disown again */
8677 task->task_objects_disowned = TRUE;
8678
8679 task_objq_unlock(task);
8680 }