2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Virtual memory object module.
66 #include <mach_pagemap.h>
68 #include <mach/mach_types.h>
69 #include <mach/memory_object.h>
70 #include <mach/memory_object_default.h>
71 #include <mach/memory_object_control_server.h>
72 #include <mach/vm_param.h>
76 #include <ipc/ipc_types.h>
77 #include <ipc/ipc_port.h>
79 #include <kern/kern_types.h>
80 #include <kern/assert.h>
81 #include <kern/queue.h>
82 #include <kern/kalloc.h>
83 #include <kern/zalloc.h>
84 #include <kern/host.h>
85 #include <kern/host_statistics.h>
86 #include <kern/processor.h>
87 #include <kern/misc_protos.h>
88 #include <kern/policy_internal.h>
90 #include <vm/memory_object.h>
91 #include <vm/vm_compressor_pager.h>
92 #include <vm/vm_fault.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98 #include <vm/vm_purgeable_internal.h>
100 #include <vm/vm_compressor.h>
102 #if CONFIG_PHANTOM_CACHE
103 #include <vm/vm_phantom_cache.h>
106 #if VM_OBJECT_ACCESS_TRACKING
107 uint64_t vm_object_access_tracking_reads
= 0;
108 uint64_t vm_object_access_tracking_writes
= 0;
109 #endif /* VM_OBJECT_ACCESS_TRACKING */
111 boolean_t vm_object_collapse_compressor_allowed
= TRUE
;
113 struct vm_counters vm_counters
;
115 #if DEVELOPMENT || DEBUG
116 extern struct memory_object_pager_ops shared_region_pager_ops
;
117 extern unsigned int shared_region_pagers_resident_count
;
118 extern unsigned int shared_region_pagers_resident_peak
;
119 #endif /* DEVELOPMENT || DEBUG */
121 #if VM_OBJECT_TRACKING
122 boolean_t vm_object_tracking_inited
= FALSE
;
123 btlog_t
*vm_object_tracking_btlog
;
126 vm_object_tracking_init(void)
128 int vm_object_tracking
;
130 vm_object_tracking
= 1;
131 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking
,
132 sizeof(vm_object_tracking
));
134 if (vm_object_tracking
) {
135 vm_object_tracking_btlog
= btlog_create(
136 VM_OBJECT_TRACKING_NUM_RECORDS
,
137 VM_OBJECT_TRACKING_BTDEPTH
,
138 TRUE
/* caller_will_remove_entries_for_element? */);
139 assert(vm_object_tracking_btlog
);
140 vm_object_tracking_inited
= TRUE
;
143 #endif /* VM_OBJECT_TRACKING */
146 * Virtual memory objects maintain the actual data
147 * associated with allocated virtual memory. A given
148 * page of memory exists within exactly one object.
150 * An object is only deallocated when all "references"
153 * Associated with each object is a list of all resident
154 * memory pages belonging to that object; this list is
155 * maintained by the "vm_page" module, but locked by the object's
158 * Each object also records the memory object reference
159 * that is used by the kernel to request and write
160 * back data (the memory object, field "pager"), etc...
162 * Virtual memory objects are allocated to provide
163 * zero-filled memory (vm_allocate) or map a user-defined
164 * memory object into a virtual address space (vm_map).
166 * Virtual memory objects that refer to a user-defined
167 * memory object are called "permanent", because all changes
168 * made in virtual memory are reflected back to the
169 * memory manager, which may then store it permanently.
170 * Other virtual memory objects are called "temporary",
171 * meaning that changes need be written back only when
172 * necessary to reclaim pages, and that storage associated
173 * with the object can be discarded once it is no longer
176 * A permanent memory object may be mapped into more
177 * than one virtual address space. Moreover, two threads
178 * may attempt to make the first mapping of a memory
179 * object concurrently. Only one thread is allowed to
180 * complete this mapping; all others wait for the
181 * "pager_initialized" field is asserted, indicating
182 * that the first thread has initialized all of the
183 * necessary fields in the virtual memory object structure.
185 * The kernel relies on a *default memory manager* to
186 * provide backing storage for the zero-filled virtual
187 * memory objects. The pager memory objects associated
188 * with these temporary virtual memory objects are only
189 * requested from the default memory manager when it
190 * becomes necessary. Virtual memory objects
191 * that depend on the default memory manager are called
192 * "internal". The "pager_created" field is provided to
193 * indicate whether these ports have ever been allocated.
195 * The kernel may also create virtual memory objects to
196 * hold changed pages after a copy-on-write operation.
197 * In this case, the virtual memory object (and its
198 * backing storage -- its memory object) only contain
199 * those pages that have been changed. The "shadow"
200 * field refers to the virtual memory object that contains
201 * the remainder of the contents. The "shadow_offset"
202 * field indicates where in the "shadow" these contents begin.
203 * The "copy" field refers to a virtual memory object
204 * to which changed pages must be copied before changing
205 * this object, in order to implement another form
206 * of copy-on-write optimization.
208 * The virtual memory object structure also records
209 * the attributes associated with its memory object.
210 * The "pager_ready", "can_persist" and "copy_strategy"
211 * fields represent those attributes. The "cached_list"
212 * field is used in the implementation of the persistence
215 * ZZZ Continue this comment.
218 /* Forward declarations for internal functions. */
219 static kern_return_t
vm_object_terminate(
222 static kern_return_t
vm_object_copy_call(
223 vm_object_t src_object
,
224 vm_object_offset_t src_offset
,
225 vm_object_size_t size
,
226 vm_object_t
*_result_object
);
228 static void vm_object_do_collapse(
230 vm_object_t backing_object
);
232 static void vm_object_do_bypass(
234 vm_object_t backing_object
);
236 static void vm_object_release_pager(
237 memory_object_t pager
);
239 SECURITY_READ_ONLY_LATE(zone_t
) vm_object_zone
; /* vm backing store zone */
242 * All wired-down kernel memory belongs to a single virtual
243 * memory object (kernel_object) to avoid wasting data structures.
245 static struct vm_object kernel_object_store VM_PAGE_PACKED_ALIGNED
;
246 SECURITY_READ_ONLY_LATE(vm_object_t
) kernel_object
= &kernel_object_store
;
248 static struct vm_object compressor_object_store VM_PAGE_PACKED_ALIGNED
;
249 SECURITY_READ_ONLY_LATE(vm_object_t
) compressor_object
= &compressor_object_store
;
252 * This object holds all pages that have been retired due to errors like ECC.
253 * The system should never use the page or look at its contents. The offset
254 * in this object is the same as the page's physical address.
256 static struct vm_object retired_pages_object_store VM_PAGE_PACKED_ALIGNED
;
257 SECURITY_READ_ONLY_LATE(vm_object_t
) retired_pages_object
= &retired_pages_object_store
;
260 * The submap object is used as a placeholder for vm_map_submap
261 * operations. The object is declared in vm_map.c because it
262 * is exported by the vm_map module. The storage is declared
263 * here because it must be initialized here.
265 static struct vm_object vm_submap_object_store VM_PAGE_PACKED_ALIGNED
;
266 SECURITY_READ_ONLY_LATE(vm_object_t
) vm_submap_object
= &vm_submap_object_store
;
270 * Virtual memory objects are initialized from
271 * a template (see vm_object_allocate).
273 * When adding a new field to the virtual memory
274 * object structure, be sure to add initialization
275 * (see _vm_object_allocate()).
277 static const struct vm_object vm_object_template
= {
281 * The lock will be initialized for each allocated object in
282 * _vm_object_allocate(), so we don't need to initialize it in
283 * the vm_object_template.
285 #if DEVELOPMENT || DEBUG
289 .memq_hint
= VM_PAGE_NULL
,
291 .resident_page_count
= 0,
292 .wired_page_count
= 0,
293 .reusable_page_count
= 0,
294 .copy
= VM_OBJECT_NULL
,
295 .shadow
= VM_OBJECT_NULL
,
296 .vo_shadow_offset
= (vm_object_offset_t
) 0,
297 .pager
= MEMORY_OBJECT_NULL
,
299 .pager_control
= MEMORY_OBJECT_CONTROL_NULL
,
300 .copy_strategy
= MEMORY_OBJECT_COPY_SYMMETRIC
,
301 .paging_in_progress
= 0,
303 .__object1_unused_bits
= 0,
304 #endif /* __LP64__ */
305 .activity_in_progress
= 0,
307 /* Begin bitfields */
308 .all_wanted
= 0, /* all bits FALSE */
309 .pager_created
= FALSE
,
310 .pager_initialized
= FALSE
,
311 .pager_ready
= FALSE
,
312 .pager_trusted
= FALSE
,
313 .can_persist
= FALSE
,
318 .purgable
= VM_PURGABLE_DENY
,
319 .purgeable_when_ripe
= FALSE
,
320 .purgeable_only_by_kernel
= FALSE
,
323 .terminating
= FALSE
,
325 .shadow_severed
= FALSE
,
326 .phys_contiguous
= FALSE
,
327 .nophyscache
= FALSE
,
330 .cached_list
.prev
= NULL
,
331 .cached_list
.next
= NULL
,
333 .last_alloc
= (vm_object_offset_t
) 0,
334 .sequential
= (vm_object_offset_t
) 0,
337 .scan_collisions
= 0,
338 #if CONFIG_PHANTOM_CACHE
339 .phantom_object_id
= 0,
341 .cow_hint
= ~(vm_offset_t
)0,
343 /* cache bitfields */
344 .wimg_bits
= VM_WIMG_USE_DEFAULT
,
345 .set_cache_attr
= FALSE
,
346 .object_is_shared_cache
= FALSE
,
347 .code_signed
= FALSE
,
349 .mapping_in_progress
= FALSE
,
350 .phantom_isssd
= FALSE
,
351 .volatile_empty
= FALSE
,
352 .volatile_fault
= FALSE
,
353 .all_reusable
= FALSE
,
354 .blocked_access
= FALSE
,
355 .vo_ledger_tag
= VM_LEDGER_TAG_NONE
,
356 .vo_no_footprint
= FALSE
,
357 #if CONFIG_IOSCHED || UPL_DEBUG
360 #endif /* UPL_DEBUG */
363 #endif /* VM_PIP_DEBUG */
367 .task_objq
.next
= NULL
,
368 .task_objq
.prev
= NULL
,
370 .purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
,
371 .purgeable_queue_group
= 0,
373 .wire_tag
= VM_KERN_MEMORY_NONE
,
374 #if !VM_TAG_ACTIVE_UPDATE
375 .wired_objq
.next
= NULL
,
376 .wired_objq
.prev
= NULL
,
377 #endif /* ! VM_TAG_ACTIVE_UPDATE */
379 .io_tracking
= FALSE
,
381 #if CONFIG_SECLUDED_MEMORY
382 .eligible_for_secluded
= FALSE
,
383 .can_grab_secluded
= FALSE
,
384 #else /* CONFIG_SECLUDED_MEMORY */
385 .__object3_unused_bits
= 0,
386 #endif /* CONFIG_SECLUDED_MEMORY */
388 #if VM_OBJECT_ACCESS_TRACKING
389 .access_tracking
= FALSE
,
390 .access_tracking_reads
= 0,
391 .access_tracking_writes
= 0,
392 #endif /* VM_OBJECT_ACCESS_TRACKING */
395 .purgeable_owner_bt
= {0},
396 .vo_purgeable_volatilizer
= NULL
,
397 .purgeable_volatilizer_bt
= {0},
401 LCK_GRP_DECLARE(vm_object_lck_grp
, "vm_object");
402 LCK_GRP_DECLARE(vm_object_cache_lck_grp
, "vm_object_cache");
403 LCK_ATTR_DECLARE(vm_object_lck_attr
, 0, 0);
404 LCK_ATTR_DECLARE(kernel_object_lck_attr
, 0, LCK_ATTR_DEBUG
);
405 LCK_ATTR_DECLARE(compressor_object_lck_attr
, 0, LCK_ATTR_DEBUG
);
407 unsigned int vm_page_purged_wired
= 0;
408 unsigned int vm_page_purged_busy
= 0;
409 unsigned int vm_page_purged_others
= 0;
411 static queue_head_t vm_object_cached_list
;
412 static uint32_t vm_object_cache_pages_freed
= 0;
413 static uint32_t vm_object_cache_pages_moved
= 0;
414 static uint32_t vm_object_cache_pages_skipped
= 0;
415 static uint32_t vm_object_cache_adds
= 0;
416 static uint32_t vm_object_cached_count
= 0;
417 static LCK_MTX_EARLY_DECLARE_ATTR(vm_object_cached_lock_data
,
418 &vm_object_cache_lck_grp
, &vm_object_lck_attr
);
420 static uint32_t vm_object_page_grab_failed
= 0;
421 static uint32_t vm_object_page_grab_skipped
= 0;
422 static uint32_t vm_object_page_grab_returned
= 0;
423 static uint32_t vm_object_page_grab_pmapped
= 0;
424 static uint32_t vm_object_page_grab_reactivations
= 0;
426 #define vm_object_cache_lock_spin() \
427 lck_mtx_lock_spin(&vm_object_cached_lock_data)
428 #define vm_object_cache_unlock() \
429 lck_mtx_unlock(&vm_object_cached_lock_data)
431 static void vm_object_cache_remove_locked(vm_object_t
);
434 static void vm_object_reap(vm_object_t object
);
435 static void vm_object_reap_async(vm_object_t object
);
436 static void vm_object_reaper_thread(void);
438 static LCK_MTX_EARLY_DECLARE_ATTR(vm_object_reaper_lock_data
,
439 &vm_object_lck_grp
, &vm_object_lck_attr
);
441 static queue_head_t vm_object_reaper_queue
; /* protected by vm_object_reaper_lock() */
442 unsigned int vm_object_reap_count
= 0;
443 unsigned int vm_object_reap_count_async
= 0;
445 #define vm_object_reaper_lock() \
446 lck_mtx_lock(&vm_object_reaper_lock_data)
447 #define vm_object_reaper_lock_spin() \
448 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
449 #define vm_object_reaper_unlock() \
450 lck_mtx_unlock(&vm_object_reaper_lock_data)
453 /* I/O Re-prioritization request list */
454 queue_head_t io_reprioritize_list
= QUEUE_HEAD_INITIALIZER(io_reprioritize_list
);
456 LCK_SPIN_DECLARE_ATTR(io_reprioritize_list_lock
,
457 &vm_object_lck_grp
, &vm_object_lck_attr
);
459 #define IO_REPRIORITIZE_LIST_LOCK() \
460 lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp)
461 #define IO_REPRIORITIZE_LIST_UNLOCK() \
462 lck_spin_unlock(&io_reprioritize_list_lock)
464 #define MAX_IO_REPRIORITIZE_REQS 8192
465 ZONE_DECLARE(io_reprioritize_req_zone
, "io_reprioritize_req",
466 sizeof(struct io_reprioritize_req
), ZC_NOGC
);
468 /* I/O Re-prioritization thread */
469 int io_reprioritize_wakeup
= 0;
470 static void io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
);
472 #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup)
473 #define IO_REPRIO_THREAD_CONTINUATION() \
475 assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \
476 thread_block(io_reprioritize_thread); \
479 void vm_page_request_reprioritize(vm_object_t
, uint64_t, uint32_t, int);
480 void vm_page_handle_prio_inversion(vm_object_t
, vm_page_t
);
481 void vm_decmp_upl_reprioritize(upl_t
, int);
486 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
491 * vm_object_allocate:
493 * Returns a new object with the given size.
496 __private_extern__
void
498 vm_object_size_t size
,
501 *object
= vm_object_template
;
502 vm_page_queue_init(&object
->memq
);
503 #if UPL_DEBUG || CONFIG_IOSCHED
504 queue_init(&object
->uplq
);
506 vm_object_lock_init(object
);
507 object
->vo_size
= vm_object_round_page(size
);
509 #if VM_OBJECT_TRACKING_OP_CREATED
510 if (vm_object_tracking_inited
) {
511 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
514 numsaved
= OSBacktrace(bt
, VM_OBJECT_TRACKING_BTDEPTH
);
515 btlog_add_entry(vm_object_tracking_btlog
,
517 VM_OBJECT_TRACKING_OP_CREATED
,
521 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
524 __private_extern__ vm_object_t
526 vm_object_size_t size
)
530 object
= (vm_object_t
) zalloc(vm_object_zone
);
532 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
534 if (object
!= VM_OBJECT_NULL
) {
535 _vm_object_allocate(size
, object
);
541 TUNABLE(bool, workaround_41447923
, "workaround_41447923", false);
544 * vm_object_bootstrap:
546 * Initialize the VM objects module.
550 vm_object_bootstrap(void)
552 vm_size_t vm_object_size
;
554 assert(sizeof(mo_ipc_object_bits_t
) == sizeof(ipc_object_bits_t
));
556 vm_object_size
= (sizeof(struct vm_object
) + (VM_PAGE_PACKED_PTR_ALIGNMENT
- 1)) &
557 ~(VM_PAGE_PACKED_PTR_ALIGNMENT
- 1);
559 vm_object_zone
= zone_create_ext("vm objects", vm_object_size
,
560 ZC_NOENCRYPT
| ZC_ALIGNMENT_REQUIRED
,
561 ZONE_ID_ANY
, ^(zone_t z
){
562 #if defined(__LP64__)
563 zone_set_submap_idx(z
, Z_SUBMAP_IDX_VA_RESTRICTED
);
569 queue_init(&vm_object_cached_list
);
571 queue_init(&vm_object_reaper_queue
);
574 * Initialize the "kernel object"
578 * Note that in the following size specifications, we need to add 1 because
579 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
581 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1, kernel_object
);
582 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1, compressor_object
);
583 kernel_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
584 compressor_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
585 kernel_object
->no_tag_update
= TRUE
;
588 * The object to hold retired VM pages.
590 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1, retired_pages_object
);
591 retired_pages_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
594 * Initialize the "submap object". Make it as large as the
595 * kernel object so that no limit is imposed on submap sizes.
598 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1, vm_submap_object
);
599 vm_submap_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
602 * Create an "extra" reference to this object so that we never
603 * try to deallocate it; zfree doesn't like to be called with
606 vm_object_reference(vm_submap_object
);
611 vm_io_reprioritize_init(void)
613 kern_return_t result
;
614 thread_t thread
= THREAD_NULL
;
616 result
= kernel_thread_start_priority(io_reprioritize_thread
, NULL
, 95 /* MAXPRI_KERNEL */, &thread
);
617 if (result
== KERN_SUCCESS
) {
618 thread_set_thread_name(thread
, "VM_io_reprioritize_thread");
619 thread_deallocate(thread
);
621 panic("Could not create io_reprioritize_thread");
627 vm_object_reaper_init(void)
632 kr
= kernel_thread_start_priority(
633 (thread_continue_t
) vm_object_reaper_thread
,
637 if (kr
!= KERN_SUCCESS
) {
638 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr
);
640 thread_set_thread_name(thread
, "VM_object_reaper_thread");
641 thread_deallocate(thread
);
646 * vm_object_deallocate:
648 * Release a reference to the specified object,
649 * gained either through a vm_object_allocate
650 * or a vm_object_reference call. When all references
651 * are gone, storage associated with this object
652 * may be relinquished.
654 * No object may be locked.
656 unsigned long vm_object_deallocate_shared_successes
= 0;
657 unsigned long vm_object_deallocate_shared_failures
= 0;
658 unsigned long vm_object_deallocate_shared_swap_failures
= 0;
660 __private_extern__
void
661 vm_object_deallocate(
664 vm_object_t shadow
= VM_OBJECT_NULL
;
666 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
667 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
669 if (object
== VM_OBJECT_NULL
) {
673 if (object
== kernel_object
|| object
== compressor_object
|| object
== retired_pages_object
) {
674 vm_object_lock_shared(object
);
676 OSAddAtomic(-1, &object
->ref_count
);
678 if (object
->ref_count
== 0) {
679 if (object
== kernel_object
) {
680 panic("vm_object_deallocate: losing kernel_object\n");
681 } else if (object
== retired_pages_object
) {
682 panic("vm_object_deallocate: losing retired_pages_object\n");
684 panic("vm_object_deallocate: losing compressor_object\n");
687 vm_object_unlock(object
);
691 if (object
->ref_count
== 2 &&
694 * This "named" object's reference count is about to
696 * we'll need to call memory_object_last_unmap().
698 } else if (object
->ref_count
== 2 &&
700 object
->shadow
!= VM_OBJECT_NULL
) {
702 * This internal object's reference count is about to
703 * drop from 2 to 1 and it has a shadow object:
704 * we'll want to try and collapse this object with its
707 } else if (object
->ref_count
>= 2) {
708 UInt32 original_ref_count
;
709 volatile UInt32
*ref_count_p
;
713 * The object currently looks like it is not being
714 * kept alive solely by the reference we're about to release.
715 * Let's try and release our reference without taking
716 * all the locks we would need if we had to terminate the
717 * object (cache lock + exclusive object lock).
718 * Lock the object "shared" to make sure we don't race with
719 * anyone holding it "exclusive".
721 vm_object_lock_shared(object
);
722 ref_count_p
= (volatile UInt32
*) &object
->ref_count
;
723 original_ref_count
= object
->ref_count
;
725 * Test again as "ref_count" could have changed.
726 * "named" shouldn't change.
728 if (original_ref_count
== 2 &&
730 /* need to take slow path for m_o_last_unmap() */
732 } else if (original_ref_count
== 2 &&
734 object
->shadow
!= VM_OBJECT_NULL
) {
735 /* need to take slow path for vm_object_collapse() */
737 } else if (original_ref_count
< 2) {
738 /* need to take slow path for vm_object_terminate() */
741 /* try an atomic update with the shared lock */
742 atomic_swap
= OSCompareAndSwap(
744 original_ref_count
- 1,
745 (UInt32
*) &object
->ref_count
);
746 if (atomic_swap
== FALSE
) {
747 vm_object_deallocate_shared_swap_failures
++;
748 /* fall back to the slow path... */
752 vm_object_unlock(object
);
756 * ref_count was updated atomically !
758 vm_object_deallocate_shared_successes
++;
763 * Someone else updated the ref_count at the same
764 * time and we lost the race. Fall back to the usual
765 * slow but safe path...
767 vm_object_deallocate_shared_failures
++;
770 while (object
!= VM_OBJECT_NULL
) {
771 vm_object_lock(object
);
773 assert(object
->ref_count
> 0);
776 * If the object has a named reference, and only
777 * that reference would remain, inform the pager
778 * about the last "mapping" reference going away.
780 if ((object
->ref_count
== 2) && (object
->named
)) {
781 memory_object_t pager
= object
->pager
;
783 /* Notify the Pager that there are no */
784 /* more mappers for this object */
786 if (pager
!= MEMORY_OBJECT_NULL
) {
787 vm_object_mapping_wait(object
, THREAD_UNINT
);
788 vm_object_mapping_begin(object
);
789 vm_object_unlock(object
);
791 memory_object_last_unmap(pager
);
793 vm_object_lock(object
);
794 vm_object_mapping_end(object
);
796 assert(object
->ref_count
> 0);
800 * Lose the reference. If other references
801 * remain, then we are done, unless we need
802 * to retry a cache trim.
803 * If it is the last reference, then keep it
804 * until any pending initialization is completed.
807 /* if the object is terminating, it cannot go into */
808 /* the cache and we obviously should not call */
809 /* terminate again. */
811 if ((object
->ref_count
> 1) || object
->terminating
) {
812 vm_object_lock_assert_exclusive(object
);
815 if (object
->ref_count
== 1 &&
816 object
->shadow
!= VM_OBJECT_NULL
) {
818 * There's only one reference left on this
819 * VM object. We can't tell if it's a valid
820 * one (from a mapping for example) or if this
821 * object is just part of a possibly stale and
822 * useless shadow chain.
823 * We would like to try and collapse it into
824 * its parent, but we don't have any pointers
825 * back to this parent object.
826 * But we can try and collapse this object with
827 * its own shadows, in case these are useless
829 * We can't bypass this object though, since we
830 * don't know if this last reference on it is
833 vm_object_collapse(object
, 0, FALSE
);
835 vm_object_unlock(object
);
840 * We have to wait for initialization
841 * before destroying or caching the object.
844 if (object
->pager_created
&& !object
->pager_initialized
) {
845 assert(!object
->can_persist
);
846 vm_object_assert_wait(object
,
847 VM_OBJECT_EVENT_INITIALIZED
,
849 vm_object_unlock(object
);
851 thread_block(THREAD_CONTINUE_NULL
);
856 * Terminate this object. If it had a shadow,
857 * then deallocate it; otherwise, if we need
858 * to retry a cache trim, do so now; otherwise,
859 * we are done. "pageout" objects have a shadow,
860 * but maintain a "paging reference" rather than
861 * a normal reference.
863 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
865 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
868 if (shadow
!= VM_OBJECT_NULL
) {
886 vm_object_lock_assert_exclusive(object
);
888 next_p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
889 p_limit
= MIN(50, object
->resident_page_count
);
891 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next_p
) && --p_limit
> 0) {
893 next_p
= (vm_page_t
)vm_page_queue_next(&next_p
->vmp_listq
);
895 if (VM_PAGE_WIRED(p
) || p
->vmp_busy
|| p
->vmp_cleaning
|| p
->vmp_laundry
|| p
->vmp_fictitious
) {
896 goto move_page_in_obj
;
899 if (p
->vmp_pmapped
|| p
->vmp_dirty
|| p
->vmp_precious
) {
900 vm_page_lockspin_queues();
902 if (p
->vmp_pmapped
) {
905 vm_object_page_grab_pmapped
++;
907 if (p
->vmp_reference
== FALSE
|| p
->vmp_dirty
== FALSE
) {
908 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p
));
910 if (refmod_state
& VM_MEM_REFERENCED
) {
911 p
->vmp_reference
= TRUE
;
913 if (refmod_state
& VM_MEM_MODIFIED
) {
914 SET_PAGE_DIRTY(p
, FALSE
);
917 if (p
->vmp_dirty
== FALSE
&& p
->vmp_precious
== FALSE
) {
918 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
920 if (refmod_state
& VM_MEM_REFERENCED
) {
921 p
->vmp_reference
= TRUE
;
923 if (refmod_state
& VM_MEM_MODIFIED
) {
924 SET_PAGE_DIRTY(p
, FALSE
);
927 if (p
->vmp_dirty
== FALSE
) {
932 if ((p
->vmp_q_state
!= VM_PAGE_ON_ACTIVE_Q
) && p
->vmp_reference
== TRUE
) {
935 counter_inc(&vm_statistics_reactivations
);
936 vm_object_page_grab_reactivations
++;
938 vm_page_unlock_queues();
940 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
941 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
946 vm_page_lockspin_queues();
948 vm_page_free_prepare_queues(p
);
949 vm_object_page_grab_returned
++;
950 vm_object_page_grab_skipped
+= p_skipped
;
952 vm_page_unlock_queues();
954 vm_page_free_prepare_object(p
, TRUE
);
958 vm_object_page_grab_skipped
+= p_skipped
;
959 vm_object_page_grab_failed
++;
966 #define EVICT_PREPARE_LIMIT 64
969 static clock_sec_t vm_object_cache_aging_ts
= 0;
972 vm_object_cache_remove_locked(
975 assert(object
->purgable
== VM_PURGABLE_DENY
);
977 queue_remove(&vm_object_cached_list
, object
, vm_object_t
, cached_list
);
978 object
->cached_list
.next
= NULL
;
979 object
->cached_list
.prev
= NULL
;
981 vm_object_cached_count
--;
985 vm_object_cache_remove(
988 vm_object_cache_lock_spin();
990 if (object
->cached_list
.next
&&
991 object
->cached_list
.prev
) {
992 vm_object_cache_remove_locked(object
);
995 vm_object_cache_unlock();
1005 assert(object
->purgable
== VM_PURGABLE_DENY
);
1007 if (object
->resident_page_count
== 0) {
1010 clock_get_system_nanotime(&sec
, &nsec
);
1012 vm_object_cache_lock_spin();
1014 if (object
->cached_list
.next
== NULL
&&
1015 object
->cached_list
.prev
== NULL
) {
1016 queue_enter(&vm_object_cached_list
, object
, vm_object_t
, cached_list
);
1017 object
->vo_cache_ts
= sec
+ EVICT_AGE
;
1018 object
->vo_cache_pages_to_scan
= object
->resident_page_count
;
1020 vm_object_cached_count
++;
1021 vm_object_cache_adds
++;
1023 vm_object_cache_unlock();
1027 vm_object_cache_evict(
1029 int max_objects_to_examine
)
1031 vm_object_t object
= VM_OBJECT_NULL
;
1032 vm_object_t next_obj
= VM_OBJECT_NULL
;
1033 vm_page_t local_free_q
= VM_PAGE_NULL
;
1037 vm_page_t ep_array
[EVICT_PREPARE_LIMIT
];
1043 uint32_t ep_skipped
= 0;
1047 KERNEL_DEBUG(0x13001ec | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1049 * do a couple of quick checks to see if it's
1050 * worthwhile grabbing the lock
1052 if (queue_empty(&vm_object_cached_list
)) {
1053 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1056 clock_get_system_nanotime(&sec
, &nsec
);
1059 * the object on the head of the queue has not
1060 * yet sufficiently aged
1062 if (sec
< vm_object_cache_aging_ts
) {
1063 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1067 * don't need the queue lock to find
1068 * and lock an object on the cached list
1070 vm_page_unlock_queues();
1072 vm_object_cache_lock_spin();
1075 next_obj
= (vm_object_t
)queue_first(&vm_object_cached_list
);
1077 while (!queue_end(&vm_object_cached_list
, (queue_entry_t
)next_obj
) && object_cnt
++ < max_objects_to_examine
) {
1079 next_obj
= (vm_object_t
)queue_next(&next_obj
->cached_list
);
1081 assert(object
->purgable
== VM_PURGABLE_DENY
);
1083 if (sec
< object
->vo_cache_ts
) {
1084 KERNEL_DEBUG(0x130020c, object
, object
->resident_page_count
, object
->vo_cache_ts
, sec
, 0);
1086 vm_object_cache_aging_ts
= object
->vo_cache_ts
;
1087 object
= VM_OBJECT_NULL
;
1090 if (!vm_object_lock_try_scan(object
)) {
1092 * just skip over this guy for now... if we find
1093 * an object to steal pages from, we'll revist in a bit...
1094 * hopefully, the lock will have cleared
1096 KERNEL_DEBUG(0x13001f8, object
, object
->resident_page_count
, 0, 0, 0);
1098 object
= VM_OBJECT_NULL
;
1101 if (vm_page_queue_empty(&object
->memq
) || object
->vo_cache_pages_to_scan
== 0) {
1103 * this case really shouldn't happen, but it's not fatal
1104 * so deal with it... if we don't remove the object from
1105 * the list, we'll never move past it.
1107 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1109 vm_object_cache_remove_locked(object
);
1110 vm_object_unlock(object
);
1111 object
= VM_OBJECT_NULL
;
1115 * we have a locked object with pages...
1116 * time to start harvesting
1120 vm_object_cache_unlock();
1122 if (object
== VM_OBJECT_NULL
) {
1127 * object is locked at this point and
1128 * has resident pages
1130 next_p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
1133 * break the page scan into 2 pieces to minimize the time spent
1134 * behind the page queue lock...
1135 * the list of pages on these unused objects is likely to be cold
1136 * w/r to the cpu cache which increases the time to scan the list
1137 * tenfold... and we may have a 'run' of pages we can't utilize that
1138 * needs to be skipped over...
1140 if ((ep_limit
= num_to_evict
- (ep_freed
+ ep_moved
)) > EVICT_PREPARE_LIMIT
) {
1141 ep_limit
= EVICT_PREPARE_LIMIT
;
1145 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next_p
) && object
->vo_cache_pages_to_scan
&& ep_count
< ep_limit
) {
1147 next_p
= (vm_page_t
)vm_page_queue_next(&next_p
->vmp_listq
);
1149 object
->vo_cache_pages_to_scan
--;
1151 if (VM_PAGE_WIRED(p
) || p
->vmp_busy
|| p
->vmp_cleaning
|| p
->vmp_laundry
) {
1152 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
1153 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
1158 if (p
->vmp_wpmapped
|| p
->vmp_dirty
|| p
->vmp_precious
) {
1159 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
1160 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
1162 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p
));
1164 ep_array
[ep_count
++] = p
;
1166 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1168 vm_page_lockspin_queues();
1170 for (ep_index
= 0; ep_index
< ep_count
; ep_index
++) {
1171 p
= ep_array
[ep_index
];
1173 if (p
->vmp_wpmapped
|| p
->vmp_dirty
|| p
->vmp_precious
) {
1174 p
->vmp_reference
= FALSE
;
1175 p
->vmp_no_cache
= FALSE
;
1178 * we've already filtered out pages that are in the laundry
1179 * so if we get here, this page can't be on the pageout queue
1181 vm_page_queues_remove(p
, FALSE
);
1182 vm_page_enqueue_inactive(p
, TRUE
);
1186 #if CONFIG_PHANTOM_CACHE
1187 vm_phantom_cache_add_ghost(p
);
1189 vm_page_free_prepare_queues(p
);
1191 assert(p
->vmp_pageq
.next
== 0 && p
->vmp_pageq
.prev
== 0);
1193 * Add this page to our list of reclaimed pages,
1194 * to be freed later.
1196 p
->vmp_snext
= local_free_q
;
1202 vm_page_unlock_queues();
1204 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1207 vm_page_free_list(local_free_q
, TRUE
);
1208 local_free_q
= VM_PAGE_NULL
;
1210 if (object
->vo_cache_pages_to_scan
== 0) {
1211 KERNEL_DEBUG(0x1300208, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1213 vm_object_cache_remove(object
);
1215 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1218 * done with this object
1220 vm_object_unlock(object
);
1221 object
= VM_OBJECT_NULL
;
1224 * at this point, we are not holding any locks
1226 if ((ep_freed
+ ep_moved
) >= num_to_evict
) {
1228 * we've reached our target for the
1229 * number of pages to evict
1233 vm_object_cache_lock_spin();
1236 * put the page queues lock back to the caller's
1239 vm_page_lock_queues();
1241 vm_object_cache_pages_freed
+= ep_freed
;
1242 vm_object_cache_pages_moved
+= ep_moved
;
1243 vm_object_cache_pages_skipped
+= ep_skipped
;
1245 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, ep_freed
, 0, 0, 0, 0);
1250 * Routine: vm_object_terminate
1252 * Free all resources associated with a vm_object.
1253 * In/out conditions:
1254 * Upon entry, the object must be locked,
1255 * and the object must have exactly one reference.
1257 * The shadow object reference is left alone.
1259 * The object must be unlocked if its found that pages
1260 * must be flushed to a backing object. If someone
1261 * manages to map the object while it is being flushed
1262 * the object is returned unlocked and unchanged. Otherwise,
1263 * upon exit, the cache will be unlocked, and the
1264 * object will cease to exist.
1266 static kern_return_t
1267 vm_object_terminate(
1270 vm_object_t shadow_object
;
1272 vm_object_lock_assert_exclusive(object
);
1274 if (!object
->pageout
&& (!object
->internal
&& object
->can_persist
) &&
1275 (object
->pager
!= NULL
|| object
->shadow_severed
)) {
1277 * Clear pager_trusted bit so that the pages get yanked
1278 * out of the object instead of cleaned in place. This
1279 * prevents a deadlock in XMM and makes more sense anyway.
1281 object
->pager_trusted
= FALSE
;
1283 vm_object_reap_pages(object
, REAP_TERMINATE
);
1286 * Make sure the object isn't already being terminated
1288 if (object
->terminating
) {
1289 vm_object_lock_assert_exclusive(object
);
1290 object
->ref_count
--;
1291 assert(object
->ref_count
> 0);
1292 vm_object_unlock(object
);
1293 return KERN_FAILURE
;
1297 * Did somebody get a reference to the object while we were
1300 if (object
->ref_count
!= 1) {
1301 vm_object_lock_assert_exclusive(object
);
1302 object
->ref_count
--;
1303 assert(object
->ref_count
> 0);
1304 vm_object_unlock(object
);
1305 return KERN_FAILURE
;
1309 * Make sure no one can look us up now.
1312 object
->terminating
= TRUE
;
1313 object
->alive
= FALSE
;
1315 if (!object
->internal
&&
1316 object
->cached_list
.next
&&
1317 object
->cached_list
.prev
) {
1318 vm_object_cache_remove(object
);
1322 * Detach the object from its shadow if we are the shadow's
1323 * copy. The reference we hold on the shadow must be dropped
1326 if (((shadow_object
= object
->shadow
) != VM_OBJECT_NULL
) &&
1327 !(object
->pageout
)) {
1328 vm_object_lock(shadow_object
);
1329 if (shadow_object
->copy
== object
) {
1330 shadow_object
->copy
= VM_OBJECT_NULL
;
1332 vm_object_unlock(shadow_object
);
1335 if (object
->paging_in_progress
!= 0 ||
1336 object
->activity_in_progress
!= 0) {
1338 * There are still some paging_in_progress references
1339 * on this object, meaning that there are some paging
1340 * or other I/O operations in progress for this VM object.
1341 * Such operations take some paging_in_progress references
1342 * up front to ensure that the object doesn't go away, but
1343 * they may also need to acquire a reference on the VM object,
1344 * to map it in kernel space, for example. That means that
1345 * they may end up releasing the last reference on the VM
1346 * object, triggering its termination, while still holding
1347 * paging_in_progress references. Waiting for these
1348 * pending paging_in_progress references to go away here would
1351 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1352 * complete the VM object termination if it still holds
1353 * paging_in_progress references at this point.
1355 * No new paging_in_progress should appear now that the
1356 * VM object is "terminating" and not "alive".
1358 vm_object_reap_async(object
);
1359 vm_object_unlock(object
);
1361 * Return KERN_FAILURE to let the caller know that we
1362 * haven't completed the termination and it can't drop this
1363 * object's reference on its shadow object yet.
1364 * The reaper thread will take care of that once it has
1365 * completed this object's termination.
1367 return KERN_FAILURE
;
1370 * complete the VM object termination
1372 vm_object_reap(object
);
1373 object
= VM_OBJECT_NULL
;
1376 * the object lock was released by vm_object_reap()
1378 * KERN_SUCCESS means that this object has been terminated
1379 * and no longer needs its shadow object but still holds a
1381 * The caller is responsible for dropping that reference.
1382 * We can't call vm_object_deallocate() here because that
1383 * would create a recursion.
1385 return KERN_SUCCESS
;
1392 * Complete the termination of a VM object after it's been marked
1393 * as "terminating" and "!alive" by vm_object_terminate().
1395 * The VM object must be locked by caller.
1396 * The lock will be released on return and the VM object is no longer valid.
1403 memory_object_t pager
;
1405 vm_object_lock_assert_exclusive(object
);
1406 assert(object
->paging_in_progress
== 0);
1407 assert(object
->activity_in_progress
== 0);
1409 vm_object_reap_count
++;
1412 * Disown this purgeable object to cleanup its owner's purgeable
1413 * ledgers. We need to do this before disconnecting the object
1414 * from its pager, to properly account for compressed pages.
1416 if (object
->internal
&&
1417 (object
->purgable
!= VM_PURGABLE_DENY
||
1418 object
->vo_ledger_tag
)) {
1423 if (object
->vo_no_footprint
) {
1424 ledger_flags
|= VM_LEDGER_FLAG_NO_FOOTPRINT
;
1426 assert(!object
->alive
);
1427 assert(object
->terminating
);
1428 kr
= vm_object_ownership_change(object
,
1429 object
->vo_ledger_tag
, /* unchanged */
1430 NULL
, /* no owner */
1432 FALSE
); /* task_objq not locked */
1433 assert(kr
== KERN_SUCCESS
);
1434 assert(object
->vo_owner
== NULL
);
1437 #if DEVELOPMENT || DEBUG
1438 if (object
->object_is_shared_cache
&&
1439 object
->pager
!= NULL
&&
1440 object
->pager
->mo_pager_ops
== &shared_region_pager_ops
) {
1441 OSAddAtomic(-object
->resident_page_count
, &shared_region_pagers_resident_count
);
1443 #endif /* DEVELOPMENT || DEBUG */
1445 pager
= object
->pager
;
1446 object
->pager
= MEMORY_OBJECT_NULL
;
1448 if (pager
!= MEMORY_OBJECT_NULL
) {
1449 memory_object_control_disable(&object
->pager_control
);
1452 object
->ref_count
--;
1453 assert(object
->ref_count
== 0);
1456 * remove from purgeable queue if it's on
1458 if (object
->internal
) {
1459 assert(VM_OBJECT_OWNER(object
) == TASK_NULL
);
1461 VM_OBJECT_UNWIRED(object
);
1463 if (object
->purgable
== VM_PURGABLE_DENY
) {
1464 /* not purgeable: nothing to do */
1465 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
1466 purgeable_q_t queue
;
1468 queue
= vm_purgeable_object_remove(object
);
1471 if (object
->purgeable_when_ripe
) {
1473 * Must take page lock for this -
1474 * using it to protect token queue
1476 vm_page_lock_queues();
1477 vm_purgeable_token_delete_first(queue
);
1479 assert(queue
->debug_count_objects
>= 0);
1480 vm_page_unlock_queues();
1484 * Update "vm_page_purgeable_count" in bulk and mark
1485 * object as VM_PURGABLE_EMPTY to avoid updating
1486 * "vm_page_purgeable_count" again in vm_page_remove()
1487 * when reaping the pages.
1490 assert(object
->resident_page_count
>=
1491 object
->wired_page_count
);
1492 delta
= (object
->resident_page_count
-
1493 object
->wired_page_count
);
1495 assert(vm_page_purgeable_count
>= delta
);
1497 (SInt32
*)&vm_page_purgeable_count
);
1499 if (object
->wired_page_count
!= 0) {
1500 assert(vm_page_purgeable_wired_count
>=
1501 object
->wired_page_count
);
1502 OSAddAtomic(-object
->wired_page_count
,
1503 (SInt32
*)&vm_page_purgeable_wired_count
);
1505 object
->purgable
= VM_PURGABLE_EMPTY
;
1506 } else if (object
->purgable
== VM_PURGABLE_NONVOLATILE
||
1507 object
->purgable
== VM_PURGABLE_EMPTY
) {
1508 /* remove from nonvolatile queue */
1509 vm_purgeable_nonvolatile_dequeue(object
);
1511 panic("object %p in unexpected purgeable state 0x%x\n",
1512 object
, object
->purgable
);
1514 if (object
->transposed
&&
1515 object
->cached_list
.next
!= NULL
&&
1516 object
->cached_list
.prev
== NULL
) {
1518 * object->cached_list.next "points" to the
1519 * object that was transposed with this object.
1522 assert(object
->cached_list
.next
== NULL
);
1524 assert(object
->cached_list
.prev
== NULL
);
1527 if (object
->pageout
) {
1529 * free all remaining pages tabled on
1531 * clean up it's shadow
1533 assert(object
->shadow
!= VM_OBJECT_NULL
);
1535 vm_pageout_object_terminate(object
);
1536 } else if (object
->resident_page_count
) {
1538 * free all remaining pages tabled on
1541 vm_object_reap_pages(object
, REAP_REAP
);
1543 assert(vm_page_queue_empty(&object
->memq
));
1544 assert(object
->paging_in_progress
== 0);
1545 assert(object
->activity_in_progress
== 0);
1546 assert(object
->ref_count
== 0);
1549 * If the pager has not already been released by
1550 * vm_object_destroy, we need to terminate it and
1551 * release our reference to it here.
1553 if (pager
!= MEMORY_OBJECT_NULL
) {
1554 vm_object_unlock(object
);
1555 vm_object_release_pager(pager
);
1556 vm_object_lock(object
);
1559 /* kick off anyone waiting on terminating */
1560 object
->terminating
= FALSE
;
1561 vm_object_paging_begin(object
);
1562 vm_object_paging_end(object
);
1563 vm_object_unlock(object
);
1565 object
->shadow
= VM_OBJECT_NULL
;
1567 #if VM_OBJECT_TRACKING
1568 if (vm_object_tracking_inited
) {
1569 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
1572 #endif /* VM_OBJECT_TRACKING */
1574 vm_object_lock_destroy(object
);
1576 * Free the space for the object.
1578 zfree(vm_object_zone
, object
);
1579 object
= VM_OBJECT_NULL
;
1583 unsigned int vm_max_batch
= 256;
1585 #define V_O_R_MAX_BATCH 128
1587 #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
1590 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
1592 if (_local_free_q) { \
1593 if (do_disconnect) { \
1595 for (m = _local_free_q; \
1596 m != VM_PAGE_NULL; \
1597 m = m->vmp_snext) { \
1598 if (m->vmp_pmapped) { \
1599 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
1603 vm_page_free_list(_local_free_q, TRUE); \
1604 _local_free_q = VM_PAGE_NULL; \
1610 vm_object_reap_pages(
1616 vm_page_t local_free_q
= VM_PAGE_NULL
;
1618 boolean_t disconnect_on_release
;
1619 pmap_flush_context pmap_flush_context_storage
;
1621 if (reap_type
== REAP_DATA_FLUSH
) {
1623 * We need to disconnect pages from all pmaps before
1624 * releasing them to the free list
1626 disconnect_on_release
= TRUE
;
1629 * Either the caller has already disconnected the pages
1630 * from all pmaps, or we disconnect them here as we add
1631 * them to out local list of pages to be released.
1632 * No need to re-disconnect them when we release the pages
1635 disconnect_on_release
= FALSE
;
1638 restart_after_sleep
:
1639 if (vm_page_queue_empty(&object
->memq
)) {
1642 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
1644 if (reap_type
== REAP_PURGEABLE
) {
1645 pmap_flush_context_init(&pmap_flush_context_storage
);
1648 vm_page_lock_queues();
1650 next
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
1652 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next
)) {
1654 next
= (vm_page_t
)vm_page_queue_next(&next
->vmp_listq
);
1656 if (--loop_count
== 0) {
1657 vm_page_unlock_queues();
1660 if (reap_type
== REAP_PURGEABLE
) {
1661 pmap_flush(&pmap_flush_context_storage
);
1662 pmap_flush_context_init(&pmap_flush_context_storage
);
1665 * Free the pages we reclaimed so far
1666 * and take a little break to avoid
1667 * hogging the page queue lock too long
1669 VM_OBJ_REAP_FREELIST(local_free_q
,
1670 disconnect_on_release
);
1675 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
1677 vm_page_lock_queues();
1679 if (reap_type
== REAP_DATA_FLUSH
|| reap_type
== REAP_TERMINATE
) {
1680 if (p
->vmp_busy
|| p
->vmp_cleaning
) {
1681 vm_page_unlock_queues();
1683 * free the pages reclaimed so far
1685 VM_OBJ_REAP_FREELIST(local_free_q
,
1686 disconnect_on_release
);
1688 PAGE_SLEEP(object
, p
, THREAD_UNINT
);
1690 goto restart_after_sleep
;
1692 if (p
->vmp_laundry
) {
1693 vm_pageout_steal_laundry(p
, TRUE
);
1696 switch (reap_type
) {
1697 case REAP_DATA_FLUSH
:
1698 if (VM_PAGE_WIRED(p
)) {
1700 * this is an odd case... perhaps we should
1701 * zero-fill this page since we're conceptually
1702 * tossing its data at this point, but leaving
1703 * it on the object to honor the 'wire' contract
1709 case REAP_PURGEABLE
:
1710 if (VM_PAGE_WIRED(p
)) {
1712 * can't purge a wired page
1714 vm_page_purged_wired
++;
1717 if (p
->vmp_laundry
&& !p
->vmp_busy
&& !p
->vmp_cleaning
) {
1718 vm_pageout_steal_laundry(p
, TRUE
);
1721 if (p
->vmp_cleaning
|| p
->vmp_laundry
|| p
->vmp_absent
) {
1723 * page is being acted upon,
1724 * so don't mess with it
1726 vm_page_purged_others
++;
1731 * We can't reclaim a busy page but we can
1732 * make it more likely to be paged (it's not wired) to make
1733 * sure that it gets considered by
1734 * vm_pageout_scan() later.
1736 if (VM_PAGE_PAGEABLE(p
)) {
1737 vm_page_deactivate(p
);
1739 vm_page_purged_busy
++;
1743 assert(VM_PAGE_OBJECT(p
) != kernel_object
);
1746 * we can discard this page...
1748 if (p
->vmp_pmapped
== TRUE
) {
1752 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
), PMAP_OPTIONS_NOFLUSH
| PMAP_OPTIONS_NOREFMOD
, (void *)&pmap_flush_context_storage
);
1754 vm_page_purged_count
++;
1758 case REAP_TERMINATE
:
1759 if (p
->vmp_absent
|| p
->vmp_private
) {
1761 * For private pages, VM_PAGE_FREE just
1762 * leaves the page structure around for
1763 * its owner to clean up. For absent
1764 * pages, the structure is returned to
1765 * the appropriate pool.
1769 if (p
->vmp_fictitious
) {
1770 assert(VM_PAGE_GET_PHYS_PAGE(p
) == vm_page_guard_addr
);
1773 if (!p
->vmp_dirty
&& p
->vmp_wpmapped
) {
1774 p
->vmp_dirty
= pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
));
1777 if ((p
->vmp_dirty
|| p
->vmp_precious
) && !p
->vmp_error
&& object
->alive
) {
1778 assert(!object
->internal
);
1780 p
->vmp_free_when_done
= TRUE
;
1782 if (!p
->vmp_laundry
) {
1783 vm_page_queues_remove(p
, TRUE
);
1785 * flush page... page will be freed
1786 * upon completion of I/O
1788 vm_pageout_cluster(p
);
1790 vm_page_unlock_queues();
1792 * free the pages reclaimed so far
1794 VM_OBJ_REAP_FREELIST(local_free_q
,
1795 disconnect_on_release
);
1797 vm_object_paging_wait(object
, THREAD_UNINT
);
1799 goto restart_after_sleep
;
1806 vm_page_free_prepare_queues(p
);
1807 assert(p
->vmp_pageq
.next
== 0 && p
->vmp_pageq
.prev
== 0);
1809 * Add this page to our list of reclaimed pages,
1810 * to be freed later.
1812 p
->vmp_snext
= local_free_q
;
1815 vm_page_unlock_queues();
1818 * Free the remaining reclaimed pages
1820 if (reap_type
== REAP_PURGEABLE
) {
1821 pmap_flush(&pmap_flush_context_storage
);
1824 VM_OBJ_REAP_FREELIST(local_free_q
,
1825 disconnect_on_release
);
1830 vm_object_reap_async(
1833 vm_object_lock_assert_exclusive(object
);
1835 vm_object_reaper_lock_spin();
1837 vm_object_reap_count_async
++;
1839 /* enqueue the VM object... */
1840 queue_enter(&vm_object_reaper_queue
, object
,
1841 vm_object_t
, cached_list
);
1843 vm_object_reaper_unlock();
1845 /* ... and wake up the reaper thread */
1846 thread_wakeup((event_t
) &vm_object_reaper_queue
);
1851 vm_object_reaper_thread(void)
1853 vm_object_t object
, shadow_object
;
1855 vm_object_reaper_lock_spin();
1857 while (!queue_empty(&vm_object_reaper_queue
)) {
1858 queue_remove_first(&vm_object_reaper_queue
,
1863 vm_object_reaper_unlock();
1864 vm_object_lock(object
);
1866 assert(object
->terminating
);
1867 assert(!object
->alive
);
1870 * The pageout daemon might be playing with our pages.
1871 * Now that the object is dead, it won't touch any more
1872 * pages, but some pages might already be on their way out.
1873 * Hence, we wait until the active paging activities have
1874 * ceased before we break the association with the pager
1877 while (object
->paging_in_progress
!= 0 ||
1878 object
->activity_in_progress
!= 0) {
1879 vm_object_wait(object
,
1880 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
1882 vm_object_lock(object
);
1886 object
->pageout
? VM_OBJECT_NULL
: object
->shadow
;
1888 vm_object_reap(object
);
1889 /* cache is unlocked and object is no longer valid */
1890 object
= VM_OBJECT_NULL
;
1892 if (shadow_object
!= VM_OBJECT_NULL
) {
1894 * Drop the reference "object" was holding on
1895 * its shadow object.
1897 vm_object_deallocate(shadow_object
);
1898 shadow_object
= VM_OBJECT_NULL
;
1900 vm_object_reaper_lock_spin();
1903 /* wait for more work... */
1904 assert_wait((event_t
) &vm_object_reaper_queue
, THREAD_UNINT
);
1906 vm_object_reaper_unlock();
1908 thread_block((thread_continue_t
) vm_object_reaper_thread
);
1913 * Routine: vm_object_release_pager
1914 * Purpose: Terminate the pager and, upon completion,
1915 * release our last reference to it.
1918 vm_object_release_pager(
1919 memory_object_t pager
)
1922 * Terminate the pager.
1925 (void) memory_object_terminate(pager
);
1928 * Release reference to pager.
1930 memory_object_deallocate(pager
);
1934 * Routine: vm_object_destroy
1936 * Shut down a VM object, despite the
1937 * presence of address map (or other) references
1943 __unused kern_return_t reason
)
1945 memory_object_t old_pager
;
1947 if (object
== VM_OBJECT_NULL
) {
1948 return KERN_SUCCESS
;
1952 * Remove the pager association immediately.
1954 * This will prevent the memory manager from further
1955 * meddling. [If it wanted to flush data or make
1956 * other changes, it should have done so before performing
1957 * the destroy call.]
1960 vm_object_lock(object
);
1961 object
->can_persist
= FALSE
;
1962 object
->named
= FALSE
;
1963 object
->alive
= FALSE
;
1965 #if DEVELOPMENT || DEBUG
1966 if (object
->object_is_shared_cache
&&
1967 object
->pager
!= NULL
&&
1968 object
->pager
->mo_pager_ops
== &shared_region_pager_ops
) {
1969 OSAddAtomic(-object
->resident_page_count
, &shared_region_pagers_resident_count
);
1971 #endif /* DEVELOPMENT || DEBUG */
1973 old_pager
= object
->pager
;
1974 object
->pager
= MEMORY_OBJECT_NULL
;
1975 if (old_pager
!= MEMORY_OBJECT_NULL
) {
1976 memory_object_control_disable(&object
->pager_control
);
1980 * Wait for the existing paging activity (that got
1981 * through before we nulled out the pager) to subside.
1984 vm_object_paging_wait(object
, THREAD_UNINT
);
1985 vm_object_unlock(object
);
1988 * Terminate the object now.
1990 if (old_pager
!= MEMORY_OBJECT_NULL
) {
1991 vm_object_release_pager(old_pager
);
1994 * JMM - Release the caller's reference. This assumes the
1995 * caller had a reference to release, which is a big (but
1996 * currently valid) assumption if this is driven from the
1997 * vnode pager (it is holding a named reference when making
2000 vm_object_deallocate(object
);
2002 return KERN_SUCCESS
;
2006 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2007 * exist because of the need to handle shadow chains. When deactivating pages, we only
2008 * want to deactive the ones at the top most level in the object chain. In order to do
2009 * this efficiently, the specified address range is divided up into "chunks" and we use
2010 * a bit map to keep track of which pages have already been processed as we descend down
2011 * the shadow chain. These chunk macros hide the details of the bit map implementation
2012 * as much as we can.
2014 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2015 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2016 * order bit represents page 0 in the current range and highest order bit represents
2019 * For further convenience, we also use negative logic for the page state in the bit map.
2020 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2021 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2022 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2023 * out with all the bits set. The macros below hide all these details from the caller.
2026 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2027 /* be the same as the number of bits in */
2028 /* the chunk_state_t type. We use 64 */
2029 /* just for convenience. */
2031 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2033 typedef uint64_t chunk_state_t
;
2036 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2037 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2038 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2039 * looking at pages in that range. This can save us from unnecessarily chasing down the
2043 #define CHUNK_INIT(c, len) \
2047 (c) = 0xffffffffffffffffLL; \
2049 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2050 MARK_PAGE_HANDLED(c, p); \
2055 * Return true if all pages in the chunk have not yet been processed.
2058 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2061 * Return true if the page at offset 'p' in the bit map has already been handled
2062 * while processing a higher level object in the shadow chain.
2065 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1ULL << (p))) == 0)
2068 * Mark the page at offset 'p' in the bit map as having been processed.
2071 #define MARK_PAGE_HANDLED(c, p) \
2073 (c) = (c) & ~(1ULL << (p)); \
2078 * Return true if the page at the given offset has been paged out. Object is
2079 * locked upon entry and returned locked.
2085 vm_object_offset_t offset
)
2087 if (object
->internal
&&
2089 !object
->terminating
&&
2090 object
->pager_ready
) {
2091 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
)
2092 == VM_EXTERNAL_STATE_EXISTS
) {
2102 * madvise_free_debug
2104 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2105 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2106 * simulate the loss of the page's contents as if the page had been
2107 * reclaimed and then re-faulted.
2109 #if DEVELOPMENT || DEBUG
2110 int madvise_free_debug
= 1;
2112 int madvise_free_debug
= 0;
2115 __options_decl(deactivate_flags_t
, uint32_t, {
2116 DEACTIVATE_KILL
= 0x1,
2117 DEACTIVATE_REUSABLE
= 0x2,
2118 DEACTIVATE_ALL_REUSABLE
= 0x4,
2119 DEACTIVATE_CLEAR_REFMOD
= 0x8
2123 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2124 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2125 * a size that is less than or equal to the CHUNK_SIZE.
2129 deactivate_pages_in_object(
2131 vm_object_offset_t offset
,
2132 vm_object_size_t size
,
2133 deactivate_flags_t flags
,
2134 chunk_state_t
*chunk_state
,
2135 pmap_flush_context
*pfc
,
2137 vm_map_offset_t pmap_offset
)
2141 struct vm_page_delayed_work dw_array
;
2142 struct vm_page_delayed_work
*dwp
, *dwp_start
;
2143 bool dwp_finish_ctx
= TRUE
;
2146 unsigned int reusable
= 0;
2149 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2150 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2151 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2152 * all the pages in the chunk.
2155 dwp_start
= dwp
= NULL
;
2157 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
2158 dwp_start
= vm_page_delayed_work_get_ctx();
2159 if (dwp_start
== NULL
) {
2160 dwp_start
= &dw_array
;
2162 dwp_finish_ctx
= FALSE
;
2167 for (p
= 0; size
&& CHUNK_NOT_COMPLETE(*chunk_state
); p
++, size
-= PAGE_SIZE_64
, offset
+= PAGE_SIZE_64
, pmap_offset
+= PAGE_SIZE_64
) {
2169 * If this offset has already been found and handled in a higher level object, then don't
2170 * do anything with it in the current shadow object.
2173 if (PAGE_ALREADY_HANDLED(*chunk_state
, p
)) {
2178 * See if the page at this offset is around. First check to see if the page is resident,
2179 * then if not, check the existence map or with the pager.
2182 if ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
2184 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2185 * so that we won't bother looking for a page at this offset again if there are more
2186 * shadow objects. Then deactivate the page.
2189 MARK_PAGE_HANDLED(*chunk_state
, p
);
2191 if ((!VM_PAGE_WIRED(m
)) && (!m
->vmp_private
) && (!m
->vmp_gobbled
) && (!m
->vmp_busy
) &&
2192 (!m
->vmp_laundry
) && (!m
->vmp_cleaning
) && !(m
->vmp_free_when_done
)) {
2193 int clear_refmod_mask
;
2198 clear_refmod_mask
= VM_MEM_REFERENCED
;
2199 dwp
->dw_mask
|= DW_clear_reference
;
2201 if ((flags
& DEACTIVATE_KILL
) && (object
->internal
)) {
2202 if (madvise_free_debug
) {
2204 * zero-fill the page now
2205 * to simulate it being
2206 * reclaimed and re-faulted.
2208 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m
));
2210 m
->vmp_precious
= FALSE
;
2211 m
->vmp_dirty
= FALSE
;
2213 clear_refmod_mask
|= VM_MEM_MODIFIED
;
2214 if (m
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
) {
2216 * This page is now clean and
2217 * reclaimable. Move it out
2218 * of the throttled queue, so
2219 * that vm_pageout_scan() can
2222 dwp
->dw_mask
|= DW_move_page
;
2225 VM_COMPRESSOR_PAGER_STATE_CLR(object
, offset
);
2227 if ((flags
& DEACTIVATE_REUSABLE
) && !m
->vmp_reusable
) {
2228 assert(!(flags
& DEACTIVATE_ALL_REUSABLE
));
2229 assert(!object
->all_reusable
);
2230 m
->vmp_reusable
= TRUE
;
2231 object
->reusable_page_count
++;
2232 assert(object
->resident_page_count
>= object
->reusable_page_count
);
2235 * Tell pmap this page is now
2236 * "reusable" (to update pmap
2237 * stats for all mappings).
2239 pmap_options
|= PMAP_OPTIONS_SET_REUSABLE
;
2242 if (flags
& DEACTIVATE_CLEAR_REFMOD
) {
2244 * The caller didn't clear the refmod bits in advance.
2245 * Clear them for this page now.
2247 pmap_options
|= PMAP_OPTIONS_NOFLUSH
;
2248 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m
),
2254 if ((m
->vmp_q_state
!= VM_PAGE_ON_THROTTLED_Q
) &&
2255 !(flags
& (DEACTIVATE_REUSABLE
| DEACTIVATE_ALL_REUSABLE
))) {
2256 dwp
->dw_mask
|= DW_move_page
;
2260 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
,
2264 if (dw_count
>= dw_limit
) {
2266 OSAddAtomic(reusable
,
2267 &vm_page_stats_reusable
.reusable_count
);
2268 vm_page_stats_reusable
.reusable
+= reusable
;
2271 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, dwp_start
, dw_count
);
2279 * The page at this offset isn't memory resident, check to see if it's
2280 * been paged out. If so, mark it as handled so we don't bother looking
2281 * for it in the shadow chain.
2284 if (page_is_paged_out(object
, offset
)) {
2285 MARK_PAGE_HANDLED(*chunk_state
, p
);
2288 * If we're killing a non-resident page, then clear the page in the existence
2289 * map so we don't bother paging it back in if it's touched again in the future.
2292 if ((flags
& DEACTIVATE_KILL
) && (object
->internal
)) {
2293 VM_COMPRESSOR_PAGER_STATE_CLR(object
, offset
);
2295 if (pmap
!= PMAP_NULL
) {
2297 * Tell pmap that this page
2298 * is no longer mapped, to
2299 * adjust the footprint ledger
2300 * because this page is no
2301 * longer compressed.
2303 pmap_remove_options(
2308 PMAP_OPTIONS_REMOVE
);
2316 OSAddAtomic(reusable
, &vm_page_stats_reusable
.reusable_count
);
2317 vm_page_stats_reusable
.reusable
+= reusable
;
2322 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, dwp_start
, dw_count
);
2327 if (dwp_start
&& dwp_finish_ctx
) {
2328 vm_page_delayed_work_finish_ctx(dwp_start
);
2329 dwp_start
= dwp
= NULL
;
2335 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2336 * will always be less than or equal to the given size. The total range is divided up
2337 * into chunks for efficiency and performance related to the locks and handling the shadow
2338 * chain. This routine returns how much of the given "size" it actually processed. It's
2339 * up to the caler to loop and keep calling this routine until the entire range they want
2340 * to process has been done.
2341 * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range.
2344 static vm_object_size_t
2346 vm_object_t orig_object
,
2347 vm_object_offset_t offset
,
2348 vm_object_size_t size
,
2349 deactivate_flags_t flags
,
2350 pmap_flush_context
*pfc
,
2352 vm_map_offset_t pmap_offset
)
2355 vm_object_t tmp_object
;
2356 vm_object_size_t length
;
2357 chunk_state_t chunk_state
;
2361 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2362 * remaining size the caller asked for.
2365 length
= MIN(size
, CHUNK_SIZE
);
2368 * The chunk_state keeps track of which pages we've already processed if there's
2369 * a shadow chain on this object. At this point, we haven't done anything with this
2370 * range of pages yet, so initialize the state to indicate no pages processed yet.
2373 CHUNK_INIT(chunk_state
, length
);
2374 object
= orig_object
;
2377 * Start at the top level object and iterate around the loop once for each object
2378 * in the shadow chain. We stop processing early if we've already found all the pages
2379 * in the range. Otherwise we stop when we run out of shadow objects.
2382 while (object
&& CHUNK_NOT_COMPLETE(chunk_state
)) {
2383 vm_object_paging_begin(object
);
2385 deactivate_pages_in_object(object
, offset
, length
, flags
, &chunk_state
, pfc
, pmap
, pmap_offset
);
2387 vm_object_paging_end(object
);
2390 * We've finished with this object, see if there's a shadow object. If
2391 * there is, update the offset and lock the new object. We also turn off
2392 * kill_page at this point since we only kill pages in the top most object.
2395 tmp_object
= object
->shadow
;
2398 assert(!(flags
& DEACTIVATE_KILL
) || (flags
& DEACTIVATE_CLEAR_REFMOD
));
2399 flags
&= ~(DEACTIVATE_KILL
| DEACTIVATE_REUSABLE
| DEACTIVATE_ALL_REUSABLE
);
2400 offset
+= object
->vo_shadow_offset
;
2401 vm_object_lock(tmp_object
);
2404 if (object
!= orig_object
) {
2405 vm_object_unlock(object
);
2408 object
= tmp_object
;
2411 if (object
&& object
!= orig_object
) {
2412 vm_object_unlock(object
);
2421 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2422 * we also clear the modified status of the page and "forget" any changes that have been made
2426 __private_extern__
void
2427 vm_object_deactivate_pages(
2429 vm_object_offset_t offset
,
2430 vm_object_size_t size
,
2431 boolean_t kill_page
,
2432 boolean_t reusable_page
,
2434 vm_map_offset_t pmap_offset
)
2436 vm_object_size_t length
;
2437 boolean_t all_reusable
;
2438 pmap_flush_context pmap_flush_context_storage
;
2439 unsigned int pmap_clear_refmod_mask
= VM_MEM_REFERENCED
;
2440 unsigned int pmap_clear_refmod_options
= 0;
2441 deactivate_flags_t flags
= DEACTIVATE_CLEAR_REFMOD
;
2442 bool refmod_cleared
= false;
2444 flags
|= DEACTIVATE_KILL
;
2446 if (reusable_page
) {
2447 flags
|= DEACTIVATE_REUSABLE
;
2451 * We break the range up into chunks and do one chunk at a time. This is for
2452 * efficiency and performance while handling the shadow chains and the locks.
2453 * The deactivate_a_chunk() function returns how much of the range it processed.
2454 * We keep calling this routine until the given size is exhausted.
2458 all_reusable
= FALSE
;
2461 * For the sake of accurate "reusable" pmap stats, we need
2462 * to tell pmap about each page that is no longer "reusable",
2463 * so we can't do the "all_reusable" optimization.
2465 * If we do go with the all_reusable optimization, we can't
2466 * return if size is 0 since we could have "all_reusable == TRUE"
2467 * In this case, we save the overhead of doing the pmap_flush_context
2474 if (reusable_page
&&
2476 object
->vo_size
!= 0 &&
2477 object
->vo_size
== size
&&
2478 object
->reusable_page_count
== 0) {
2479 all_reusable
= TRUE
;
2480 reusable_page
= FALSE
;
2481 flags
|= DEACTIVATE_ALL_REUSABLE
;
2485 if ((reusable_page
|| all_reusable
) && object
->all_reusable
) {
2486 /* This means MADV_FREE_REUSABLE has been called twice, which
2487 * is probably illegal. */
2492 pmap_flush_context_init(&pmap_flush_context_storage
);
2495 * If we're deactivating multiple pages, try to perform one bulk pmap operation.
2496 * We can't do this if we're killing pages and there's a shadow chain as
2497 * we don't yet know which pages are in the top object (pages in shadow copies aren't
2499 * And we can only do this on hardware that supports it.
2501 if (size
> PAGE_SIZE
&& (!kill_page
|| !object
->shadow
)) {
2502 if (kill_page
&& object
->internal
) {
2503 pmap_clear_refmod_mask
|= VM_MEM_MODIFIED
;
2505 if (reusable_page
) {
2506 pmap_clear_refmod_options
|= PMAP_OPTIONS_SET_REUSABLE
;
2509 refmod_cleared
= pmap_clear_refmod_range_options(pmap
, pmap_offset
, pmap_offset
+ size
, pmap_clear_refmod_mask
, pmap_clear_refmod_options
);
2510 if (refmod_cleared
) {
2511 // We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it.
2512 flags
&= ~DEACTIVATE_CLEAR_REFMOD
;
2517 length
= deactivate_a_chunk(object
, offset
, size
, flags
,
2518 &pmap_flush_context_storage
, pmap
, pmap_offset
);
2522 pmap_offset
+= length
;
2524 pmap_flush(&pmap_flush_context_storage
);
2527 if (!object
->all_reusable
) {
2528 unsigned int reusable
;
2530 object
->all_reusable
= TRUE
;
2531 assert(object
->reusable_page_count
== 0);
2532 /* update global stats */
2533 reusable
= object
->resident_page_count
;
2534 OSAddAtomic(reusable
,
2535 &vm_page_stats_reusable
.reusable_count
);
2536 vm_page_stats_reusable
.reusable
+= reusable
;
2537 vm_page_stats_reusable
.all_reusable_calls
++;
2539 } else if (reusable_page
) {
2540 vm_page_stats_reusable
.partial_reusable_calls
++;
2545 vm_object_reuse_pages(
2547 vm_object_offset_t start_offset
,
2548 vm_object_offset_t end_offset
,
2549 boolean_t allow_partial_reuse
)
2551 vm_object_offset_t cur_offset
;
2553 unsigned int reused
, reusable
;
2555 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
2557 if ((m) != VM_PAGE_NULL && \
2558 (m)->vmp_reusable) { \
2559 assert((object)->reusable_page_count <= \
2560 (object)->resident_page_count); \
2561 assert((object)->reusable_page_count > 0); \
2562 (object)->reusable_page_count--; \
2563 (m)->vmp_reusable = FALSE; \
2566 * Tell pmap that this page is no longer \
2567 * "reusable", to update the "reusable" stats \
2568 * for all the pmaps that have mapped this \
2571 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
2573 (PMAP_OPTIONS_CLEAR_REUSABLE \
2574 | PMAP_OPTIONS_NOFLUSH), \
2582 vm_object_lock_assert_exclusive(object
);
2584 if (object
->all_reusable
) {
2585 panic("object %p all_reusable: can't update pmap stats\n",
2587 assert(object
->reusable_page_count
== 0);
2588 object
->all_reusable
= FALSE
;
2589 if (end_offset
- start_offset
== object
->vo_size
||
2590 !allow_partial_reuse
) {
2591 vm_page_stats_reusable
.all_reuse_calls
++;
2592 reused
= object
->resident_page_count
;
2594 vm_page_stats_reusable
.partial_reuse_calls
++;
2595 vm_page_queue_iterate(&object
->memq
, m
, vmp_listq
) {
2596 if (m
->vmp_offset
< start_offset
||
2597 m
->vmp_offset
>= end_offset
) {
2598 m
->vmp_reusable
= TRUE
;
2599 object
->reusable_page_count
++;
2600 assert(object
->resident_page_count
>= object
->reusable_page_count
);
2603 assert(!m
->vmp_reusable
);
2608 } else if (object
->resident_page_count
>
2609 ((end_offset
- start_offset
) >> PAGE_SHIFT
)) {
2610 vm_page_stats_reusable
.partial_reuse_calls
++;
2611 for (cur_offset
= start_offset
;
2612 cur_offset
< end_offset
;
2613 cur_offset
+= PAGE_SIZE_64
) {
2614 if (object
->reusable_page_count
== 0) {
2617 m
= vm_page_lookup(object
, cur_offset
);
2618 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
2621 vm_page_stats_reusable
.partial_reuse_calls
++;
2622 vm_page_queue_iterate(&object
->memq
, m
, vmp_listq
) {
2623 if (object
->reusable_page_count
== 0) {
2626 if (m
->vmp_offset
< start_offset
||
2627 m
->vmp_offset
>= end_offset
) {
2630 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
2634 /* update global stats */
2635 OSAddAtomic(reusable
- reused
, &vm_page_stats_reusable
.reusable_count
);
2636 vm_page_stats_reusable
.reused
+= reused
;
2637 vm_page_stats_reusable
.reusable
+= reusable
;
2641 * Routine: vm_object_pmap_protect
2644 * Reduces the permission for all physical
2645 * pages in the specified object range.
2647 * If removing write permission only, it is
2648 * sufficient to protect only the pages in
2649 * the top-level object; only those pages may
2650 * have write permission.
2652 * If removing all access, we must follow the
2653 * shadow chain from the top-level object to
2654 * remove access to all pages in shadowed objects.
2656 * The object must *not* be locked. The object must
2659 * If pmap is not NULL, this routine assumes that
2660 * the only mappings for the pages are in that
2664 __private_extern__
void
2665 vm_object_pmap_protect(
2667 vm_object_offset_t offset
,
2668 vm_object_size_t size
,
2670 vm_map_size_t pmap_page_size
,
2671 vm_map_offset_t pmap_start
,
2674 vm_object_pmap_protect_options(object
, offset
, size
, pmap
,
2676 pmap_start
, prot
, 0);
2679 __private_extern__
void
2680 vm_object_pmap_protect_options(
2682 vm_object_offset_t offset
,
2683 vm_object_size_t size
,
2685 vm_map_size_t pmap_page_size
,
2686 vm_map_offset_t pmap_start
,
2690 pmap_flush_context pmap_flush_context_storage
;
2691 boolean_t delayed_pmap_flush
= FALSE
;
2692 vm_object_offset_t offset_in_object
;
2693 vm_object_size_t size_in_object
;
2695 if (object
== VM_OBJECT_NULL
) {
2698 if (pmap_page_size
> PAGE_SIZE
) {
2699 /* for 16K map on 4K device... */
2700 pmap_page_size
= PAGE_SIZE
;
2703 * If we decide to work on the object itself, extend the range to
2704 * cover a full number of native pages.
2706 size_in_object
= vm_object_round_page(offset
+ size
) - vm_object_trunc_page(offset
);
2707 offset_in_object
= vm_object_trunc_page(offset
);
2709 * If we decide to work on the pmap, use the exact range specified,
2710 * so no rounding/truncating offset and size. They should already
2711 * be aligned to pmap_page_size.
2713 assertf(!(offset
& (pmap_page_size
- 1)) && !(size
& (pmap_page_size
- 1)),
2714 "offset 0x%llx size 0x%llx pmap_page_size 0x%llx",
2715 offset
, size
, (uint64_t)pmap_page_size
);
2717 vm_object_lock(object
);
2719 if (object
->phys_contiguous
) {
2721 vm_object_unlock(object
);
2722 pmap_protect_options(pmap
,
2726 options
& ~PMAP_OPTIONS_NOFLUSH
,
2729 vm_object_offset_t phys_start
, phys_end
, phys_addr
;
2731 phys_start
= object
->vo_shadow_offset
+ offset_in_object
;
2732 phys_end
= phys_start
+ size_in_object
;
2733 assert(phys_start
<= phys_end
);
2734 assert(phys_end
<= object
->vo_shadow_offset
+ object
->vo_size
);
2735 vm_object_unlock(object
);
2737 pmap_flush_context_init(&pmap_flush_context_storage
);
2738 delayed_pmap_flush
= FALSE
;
2740 for (phys_addr
= phys_start
;
2741 phys_addr
< phys_end
;
2742 phys_addr
+= PAGE_SIZE_64
) {
2743 pmap_page_protect_options(
2744 (ppnum_t
) (phys_addr
>> PAGE_SHIFT
),
2746 options
| PMAP_OPTIONS_NOFLUSH
,
2747 (void *)&pmap_flush_context_storage
);
2748 delayed_pmap_flush
= TRUE
;
2750 if (delayed_pmap_flush
== TRUE
) {
2751 pmap_flush(&pmap_flush_context_storage
);
2757 assert(object
->internal
);
2760 if (ptoa_64(object
->resident_page_count
) > size_in_object
/ 2 && pmap
!= PMAP_NULL
) {
2761 vm_object_unlock(object
);
2762 if (pmap_page_size
< PAGE_SIZE
) {
2763 DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n", pmap
, (uint64_t)pmap_start
, pmap_start
+ size
, prot
);
2765 pmap_protect_options(pmap
, pmap_start
, pmap_start
+ size
, prot
,
2766 options
& ~PMAP_OPTIONS_NOFLUSH
, NULL
);
2770 if (pmap_page_size
< PAGE_SIZE
) {
2771 DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n", pmap
, (uint64_t)pmap_start
, pmap_start
+ size
, prot
, offset
, size
, object
, offset_in_object
, size_in_object
);
2774 pmap_flush_context_init(&pmap_flush_context_storage
);
2775 delayed_pmap_flush
= FALSE
;
2778 * if we are doing large ranges with respect to resident
2779 * page count then we should interate over pages otherwise
2780 * inverse page look-up will be faster
2782 if (ptoa_64(object
->resident_page_count
/ 4) < size_in_object
) {
2784 vm_object_offset_t end
;
2786 end
= offset_in_object
+ size_in_object
;
2788 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
2789 if (!p
->vmp_fictitious
&& (offset_in_object
<= p
->vmp_offset
) && (p
->vmp_offset
< end
)) {
2790 vm_map_offset_t start
;
2793 * XXX FBDP 4K: intentionally using "offset" here instead
2794 * of "offset_in_object", since "start" is a pmap address.
2796 start
= pmap_start
+ p
->vmp_offset
- offset
;
2798 if (pmap
!= PMAP_NULL
) {
2799 vm_map_offset_t curr
;
2801 curr
< start
+ PAGE_SIZE_64
;
2802 curr
+= pmap_page_size
) {
2803 if (curr
< pmap_start
) {
2806 if (curr
>= pmap_start
+ size
) {
2809 pmap_protect_options(
2812 curr
+ pmap_page_size
,
2814 options
| PMAP_OPTIONS_NOFLUSH
,
2815 &pmap_flush_context_storage
);
2818 pmap_page_protect_options(
2819 VM_PAGE_GET_PHYS_PAGE(p
),
2821 options
| PMAP_OPTIONS_NOFLUSH
,
2822 &pmap_flush_context_storage
);
2824 delayed_pmap_flush
= TRUE
;
2829 vm_object_offset_t end
;
2830 vm_object_offset_t target_off
;
2832 end
= offset_in_object
+ size_in_object
;
2834 for (target_off
= offset_in_object
;
2835 target_off
< end
; target_off
+= PAGE_SIZE
) {
2836 p
= vm_page_lookup(object
, target_off
);
2838 if (p
!= VM_PAGE_NULL
) {
2839 vm_object_offset_t start
;
2842 * XXX FBDP 4K: intentionally using "offset" here instead
2843 * of "offset_in_object", since "start" is a pmap address.
2845 start
= pmap_start
+ (p
->vmp_offset
- offset
);
2847 if (pmap
!= PMAP_NULL
) {
2848 vm_map_offset_t curr
;
2850 curr
< start
+ PAGE_SIZE
;
2851 curr
+= pmap_page_size
) {
2852 if (curr
< pmap_start
) {
2855 if (curr
>= pmap_start
+ size
) {
2858 pmap_protect_options(
2861 curr
+ pmap_page_size
,
2863 options
| PMAP_OPTIONS_NOFLUSH
,
2864 &pmap_flush_context_storage
);
2867 pmap_page_protect_options(
2868 VM_PAGE_GET_PHYS_PAGE(p
),
2870 options
| PMAP_OPTIONS_NOFLUSH
,
2871 &pmap_flush_context_storage
);
2873 delayed_pmap_flush
= TRUE
;
2877 if (delayed_pmap_flush
== TRUE
) {
2878 pmap_flush(&pmap_flush_context_storage
);
2881 if (prot
== VM_PROT_NONE
) {
2883 * Must follow shadow chain to remove access
2884 * to pages in shadowed objects.
2886 vm_object_t next_object
;
2888 next_object
= object
->shadow
;
2889 if (next_object
!= VM_OBJECT_NULL
) {
2890 offset_in_object
+= object
->vo_shadow_offset
;
2891 offset
+= object
->vo_shadow_offset
;
2892 vm_object_lock(next_object
);
2893 vm_object_unlock(object
);
2894 object
= next_object
;
2897 * End of chain - we are done.
2903 * Pages in shadowed objects may never have
2904 * write permission - we may stop here.
2910 vm_object_unlock(object
);
2913 uint32_t vm_page_busy_absent_skipped
= 0;
2916 * Routine: vm_object_copy_slowly
2919 * Copy the specified range of the source
2920 * virtual memory object without using
2921 * protection-based optimizations (such
2922 * as copy-on-write). The pages in the
2923 * region are actually copied.
2925 * In/out conditions:
2926 * The caller must hold a reference and a lock
2927 * for the source virtual memory object. The source
2928 * object will be returned *unlocked*.
2931 * If the copy is completed successfully, KERN_SUCCESS is
2932 * returned. If the caller asserted the interruptible
2933 * argument, and an interruption occurred while waiting
2934 * for a user-generated event, MACH_SEND_INTERRUPTED is
2935 * returned. Other values may be returned to indicate
2936 * hard errors during the copy operation.
2938 * A new virtual memory object is returned in a
2939 * parameter (_result_object). The contents of this
2940 * new object, starting at a zero offset, are a copy
2941 * of the source memory region. In the event of
2942 * an error, this parameter will contain the value
2945 __private_extern__ kern_return_t
2946 vm_object_copy_slowly(
2947 vm_object_t src_object
,
2948 vm_object_offset_t src_offset
,
2949 vm_object_size_t size
,
2950 boolean_t interruptible
,
2951 vm_object_t
*_result_object
) /* OUT */
2953 vm_object_t new_object
;
2954 vm_object_offset_t new_offset
;
2956 struct vm_object_fault_info fault_info
= {};
2959 vm_object_unlock(src_object
);
2960 *_result_object
= VM_OBJECT_NULL
;
2961 return KERN_INVALID_ARGUMENT
;
2965 * Prevent destruction of the source object while we copy.
2968 vm_object_reference_locked(src_object
);
2969 vm_object_unlock(src_object
);
2972 * Create a new object to hold the copied pages.
2974 * We fill the new object starting at offset 0,
2975 * regardless of the input offset.
2976 * We don't bother to lock the new object within
2977 * this routine, since we have the only reference.
2980 size
= vm_object_round_page(src_offset
+ size
) - vm_object_trunc_page(src_offset
);
2981 src_offset
= vm_object_trunc_page(src_offset
);
2982 new_object
= vm_object_allocate(size
);
2985 assert(size
== trunc_page_64(size
)); /* Will the loop terminate? */
2987 fault_info
.interruptible
= interruptible
;
2988 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
2989 fault_info
.lo_offset
= src_offset
;
2990 fault_info
.hi_offset
= src_offset
+ size
;
2991 fault_info
.stealth
= TRUE
;
2995 src_offset
+= PAGE_SIZE_64
,
2996 new_offset
+= PAGE_SIZE_64
, size
-= PAGE_SIZE_64
2999 vm_fault_return_t result
;
3001 vm_object_lock(new_object
);
3003 while ((new_page
= vm_page_alloc(new_object
, new_offset
))
3005 vm_object_unlock(new_object
);
3007 if (!vm_page_wait(interruptible
)) {
3008 vm_object_deallocate(new_object
);
3009 vm_object_deallocate(src_object
);
3010 *_result_object
= VM_OBJECT_NULL
;
3011 return MACH_SEND_INTERRUPTED
;
3013 vm_object_lock(new_object
);
3015 vm_object_unlock(new_object
);
3018 vm_prot_t prot
= VM_PROT_READ
;
3019 vm_page_t _result_page
;
3021 vm_page_t result_page
;
3022 kern_return_t error_code
;
3023 vm_object_t result_page_object
;
3026 vm_object_lock(src_object
);
3028 if (src_object
->internal
&&
3029 src_object
->shadow
== VM_OBJECT_NULL
&&
3030 (src_object
->pager
== NULL
||
3031 (VM_COMPRESSOR_PAGER_STATE_GET(src_object
,
3033 VM_EXTERNAL_STATE_ABSENT
))) {
3034 boolean_t can_skip_page
;
3036 _result_page
= vm_page_lookup(src_object
,
3038 if (_result_page
== VM_PAGE_NULL
) {
3040 * This page is neither resident nor
3041 * compressed and there's no shadow
3042 * object below "src_object", so this
3043 * page is really missing.
3044 * There's no need to zero-fill it just
3045 * to copy it: let's leave it missing
3046 * in "new_object" and get zero-filled
3049 can_skip_page
= TRUE
;
3050 } else if (workaround_41447923
&&
3051 src_object
->pager
== NULL
&&
3052 _result_page
!= VM_PAGE_NULL
&&
3053 _result_page
->vmp_busy
&&
3054 _result_page
->vmp_absent
&&
3055 src_object
->purgable
== VM_PURGABLE_DENY
&&
3056 !src_object
->blocked_access
) {
3058 * This page is "busy" and "absent"
3059 * but not because we're waiting for
3060 * it to be decompressed. It must
3061 * be because it's a "no zero fill"
3062 * page that is currently not
3063 * accessible until it gets overwritten
3064 * by a device driver.
3065 * Since its initial state would have
3066 * been "zero-filled", let's leave the
3067 * copy page missing and get zero-filled
3070 assert(src_object
->internal
);
3071 assert(src_object
->shadow
== NULL
);
3072 assert(src_object
->pager
== NULL
);
3073 can_skip_page
= TRUE
;
3074 vm_page_busy_absent_skipped
++;
3076 can_skip_page
= FALSE
;
3078 if (can_skip_page
) {
3079 vm_object_unlock(src_object
);
3080 /* free the unused "new_page"... */
3081 vm_object_lock(new_object
);
3082 VM_PAGE_FREE(new_page
);
3083 new_page
= VM_PAGE_NULL
;
3084 vm_object_unlock(new_object
);
3085 /* ...and go to next page in "src_object" */
3086 result
= VM_FAULT_SUCCESS
;
3091 vm_object_paging_begin(src_object
);
3093 /* cap size at maximum UPL size */
3094 upl_size_t cluster_size
;
3095 if (os_convert_overflow(size
, &cluster_size
)) {
3096 cluster_size
= 0 - (upl_size_t
)PAGE_SIZE
;
3098 fault_info
.cluster_size
= cluster_size
;
3100 _result_page
= VM_PAGE_NULL
;
3101 result
= vm_fault_page(src_object
, src_offset
,
3102 VM_PROT_READ
, FALSE
,
3103 FALSE
, /* page not looked up */
3104 &prot
, &_result_page
, &top_page
,
3106 &error_code
, FALSE
, FALSE
, &fault_info
);
3109 case VM_FAULT_SUCCESS
:
3110 result_page
= _result_page
;
3111 result_page_object
= VM_PAGE_OBJECT(result_page
);
3114 * Copy the page to the new object.
3117 * If result_page is clean,
3118 * we could steal it instead
3122 vm_page_copy(result_page
, new_page
);
3123 vm_object_unlock(result_page_object
);
3126 * Let go of both pages (make them
3127 * not busy, perform wakeup, activate).
3129 vm_object_lock(new_object
);
3130 SET_PAGE_DIRTY(new_page
, FALSE
);
3131 PAGE_WAKEUP_DONE(new_page
);
3132 vm_object_unlock(new_object
);
3134 vm_object_lock(result_page_object
);
3135 PAGE_WAKEUP_DONE(result_page
);
3137 vm_page_lockspin_queues();
3138 if ((result_page
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ||
3139 (result_page
->vmp_q_state
== VM_PAGE_NOT_ON_Q
)) {
3140 vm_page_activate(result_page
);
3142 vm_page_activate(new_page
);
3143 vm_page_unlock_queues();
3146 * Release paging references and
3147 * top-level placeholder page, if any.
3150 vm_fault_cleanup(result_page_object
,
3155 case VM_FAULT_RETRY
:
3158 case VM_FAULT_MEMORY_SHORTAGE
:
3159 if (vm_page_wait(interruptible
)) {
3164 case VM_FAULT_INTERRUPTED
:
3165 vm_object_lock(new_object
);
3166 VM_PAGE_FREE(new_page
);
3167 vm_object_unlock(new_object
);
3169 vm_object_deallocate(new_object
);
3170 vm_object_deallocate(src_object
);
3171 *_result_object
= VM_OBJECT_NULL
;
3172 return MACH_SEND_INTERRUPTED
;
3174 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
3175 /* success but no VM page: fail */
3176 vm_object_paging_end(src_object
);
3177 vm_object_unlock(src_object
);
3179 case VM_FAULT_MEMORY_ERROR
:
3182 * (a) ignore pages that we can't
3184 * (b) return the null object if
3185 * any page fails [chosen]
3188 vm_object_lock(new_object
);
3189 VM_PAGE_FREE(new_page
);
3190 vm_object_unlock(new_object
);
3192 vm_object_deallocate(new_object
);
3193 vm_object_deallocate(src_object
);
3194 *_result_object
= VM_OBJECT_NULL
;
3195 return error_code
? error_code
:
3199 panic("vm_object_copy_slowly: unexpected error"
3200 " 0x%x from vm_fault_page()\n", result
);
3202 } while (result
!= VM_FAULT_SUCCESS
);
3206 * Lose the extra reference, and return our object.
3208 vm_object_deallocate(src_object
);
3209 *_result_object
= new_object
;
3210 return KERN_SUCCESS
;
3214 * Routine: vm_object_copy_quickly
3217 * Copy the specified range of the source virtual
3218 * memory object, if it can be done without waiting
3219 * for user-generated events.
3222 * If the copy is successful, the copy is returned in
3223 * the arguments; otherwise, the arguments are not
3226 * In/out conditions:
3227 * The object should be unlocked on entry and exit.
3231 __private_extern__ boolean_t
3232 vm_object_copy_quickly(
3233 vm_object_t
*_object
, /* INOUT */
3234 __unused vm_object_offset_t offset
, /* IN */
3235 __unused vm_object_size_t size
, /* IN */
3236 boolean_t
*_src_needs_copy
, /* OUT */
3237 boolean_t
*_dst_needs_copy
) /* OUT */
3239 vm_object_t object
= *_object
;
3240 memory_object_copy_strategy_t copy_strategy
;
3242 if (object
== VM_OBJECT_NULL
) {
3243 *_src_needs_copy
= FALSE
;
3244 *_dst_needs_copy
= FALSE
;
3248 vm_object_lock(object
);
3250 copy_strategy
= object
->copy_strategy
;
3252 switch (copy_strategy
) {
3253 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3256 * Symmetric copy strategy.
3257 * Make another reference to the object.
3258 * Leave object/offset unchanged.
3261 vm_object_reference_locked(object
);
3262 object
->shadowed
= TRUE
;
3263 vm_object_unlock(object
);
3266 * Both source and destination must make
3267 * shadows, and the source must be made
3268 * read-only if not already.
3271 *_src_needs_copy
= TRUE
;
3272 *_dst_needs_copy
= TRUE
;
3276 case MEMORY_OBJECT_COPY_DELAY
:
3277 vm_object_unlock(object
);
3281 vm_object_unlock(object
);
3287 static int copy_call_count
= 0;
3288 static int copy_call_sleep_count
= 0;
3289 static int copy_call_restart_count
= 0;
3292 * Routine: vm_object_copy_call [internal]
3295 * Copy the source object (src_object), using the
3296 * user-managed copy algorithm.
3298 * In/out conditions:
3299 * The source object must be locked on entry. It
3300 * will be *unlocked* on exit.
3303 * If the copy is successful, KERN_SUCCESS is returned.
3304 * A new object that represents the copied virtual
3305 * memory is returned in a parameter (*_result_object).
3306 * If the return value indicates an error, this parameter
3309 static kern_return_t
3310 vm_object_copy_call(
3311 vm_object_t src_object
,
3312 vm_object_offset_t src_offset
,
3313 vm_object_size_t size
,
3314 vm_object_t
*_result_object
) /* OUT */
3318 boolean_t check_ready
= FALSE
;
3319 uint32_t try_failed_count
= 0;
3322 * If a copy is already in progress, wait and retry.
3325 * Consider making this call interruptable, as Mike
3326 * intended it to be.
3329 * Need a counter or version or something to allow
3330 * us to use the copy that the currently requesting
3331 * thread is obtaining -- is it worth adding to the
3332 * vm object structure? Depends how common this case it.
3335 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3336 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3338 copy_call_restart_count
++;
3342 * Indicate (for the benefit of memory_object_create_copy)
3343 * that we want a copy for src_object. (Note that we cannot
3344 * do a real assert_wait before calling memory_object_copy,
3345 * so we simply set the flag.)
3348 vm_object_set_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
);
3349 vm_object_unlock(src_object
);
3352 * Ask the memory manager to give us a memory object
3353 * which represents a copy of the src object.
3354 * The memory manager may give us a memory object
3355 * which we already have, or it may give us a
3356 * new memory object. This memory object will arrive
3357 * via memory_object_create_copy.
3360 kr
= KERN_FAILURE
; /* XXX need to change memory_object.defs */
3361 if (kr
!= KERN_SUCCESS
) {
3366 * Wait for the copy to arrive.
3368 vm_object_lock(src_object
);
3369 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3370 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3372 copy_call_sleep_count
++;
3375 assert(src_object
->copy
!= VM_OBJECT_NULL
);
3376 copy
= src_object
->copy
;
3377 if (!vm_object_lock_try(copy
)) {
3378 vm_object_unlock(src_object
);
3381 mutex_pause(try_failed_count
); /* wait a bit */
3383 vm_object_lock(src_object
);
3386 if (copy
->vo_size
< src_offset
+ size
) {
3387 assertf(page_aligned(src_offset
+ size
),
3388 "object %p size 0x%llx",
3389 copy
, (uint64_t)(src_offset
+ size
));
3390 copy
->vo_size
= src_offset
+ size
;
3393 if (!copy
->pager_ready
) {
3400 *_result_object
= copy
;
3401 vm_object_unlock(copy
);
3402 vm_object_unlock(src_object
);
3404 /* Wait for the copy to be ready. */
3405 if (check_ready
== TRUE
) {
3406 vm_object_lock(copy
);
3407 while (!copy
->pager_ready
) {
3408 vm_object_sleep(copy
, VM_OBJECT_EVENT_PAGER_READY
, THREAD_UNINT
);
3410 vm_object_unlock(copy
);
3413 return KERN_SUCCESS
;
3416 static int copy_delayed_lock_collisions
= 0;
3417 static int copy_delayed_max_collisions
= 0;
3418 static int copy_delayed_lock_contention
= 0;
3419 static int copy_delayed_protect_iterate
= 0;
3422 * Routine: vm_object_copy_delayed [internal]
3425 * Copy the specified virtual memory object, using
3426 * the asymmetric copy-on-write algorithm.
3428 * In/out conditions:
3429 * The src_object must be locked on entry. It will be unlocked
3430 * on exit - so the caller must also hold a reference to it.
3432 * This routine will not block waiting for user-generated
3433 * events. It is not interruptible.
3435 __private_extern__ vm_object_t
3436 vm_object_copy_delayed(
3437 vm_object_t src_object
,
3438 vm_object_offset_t src_offset
,
3439 vm_object_size_t size
,
3440 boolean_t src_object_shared
)
3442 vm_object_t new_copy
= VM_OBJECT_NULL
;
3443 vm_object_t old_copy
;
3445 vm_object_size_t copy_size
= src_offset
+ size
;
3446 pmap_flush_context pmap_flush_context_storage
;
3447 boolean_t delayed_pmap_flush
= FALSE
;
3452 * The user-level memory manager wants to see all of the changes
3453 * to this object, but it has promised not to make any changes on
3456 * Perform an asymmetric copy-on-write, as follows:
3457 * Create a new object, called a "copy object" to hold
3458 * pages modified by the new mapping (i.e., the copy,
3459 * not the original mapping).
3460 * Record the original object as the backing object for
3461 * the copy object. If the original mapping does not
3462 * change a page, it may be used read-only by the copy.
3463 * Record the copy object in the original object.
3464 * When the original mapping causes a page to be modified,
3465 * it must be copied to a new page that is "pushed" to
3467 * Mark the new mapping (the copy object) copy-on-write.
3468 * This makes the copy object itself read-only, allowing
3469 * it to be reused if the original mapping makes no
3470 * changes, and simplifying the synchronization required
3471 * in the "push" operation described above.
3473 * The copy-on-write is said to be assymetric because the original
3474 * object is *not* marked copy-on-write. A copied page is pushed
3475 * to the copy object, regardless which party attempted to modify
3478 * Repeated asymmetric copy operations may be done. If the
3479 * original object has not been changed since the last copy, its
3480 * copy object can be reused. Otherwise, a new copy object can be
3481 * inserted between the original object and its previous copy
3482 * object. Since any copy object is read-only, this cannot affect
3483 * affect the contents of the previous copy object.
3485 * Note that a copy object is higher in the object tree than the
3486 * original object; therefore, use of the copy object recorded in
3487 * the original object must be done carefully, to avoid deadlock.
3490 copy_size
= vm_object_round_page(copy_size
);
3494 * Wait for paging in progress.
3496 if (!src_object
->true_share
&&
3497 (src_object
->paging_in_progress
!= 0 ||
3498 src_object
->activity_in_progress
!= 0)) {
3499 if (src_object_shared
== TRUE
) {
3500 vm_object_unlock(src_object
);
3501 vm_object_lock(src_object
);
3502 src_object_shared
= FALSE
;
3505 vm_object_paging_wait(src_object
, THREAD_UNINT
);
3508 * See whether we can reuse the result of a previous
3512 old_copy
= src_object
->copy
;
3513 if (old_copy
!= VM_OBJECT_NULL
) {
3517 * Try to get the locks (out of order)
3519 if (src_object_shared
== TRUE
) {
3520 lock_granted
= vm_object_lock_try_shared(old_copy
);
3522 lock_granted
= vm_object_lock_try(old_copy
);
3525 if (!lock_granted
) {
3526 vm_object_unlock(src_object
);
3528 if (collisions
++ == 0) {
3529 copy_delayed_lock_contention
++;
3531 mutex_pause(collisions
);
3533 /* Heisenberg Rules */
3534 copy_delayed_lock_collisions
++;
3536 if (collisions
> copy_delayed_max_collisions
) {
3537 copy_delayed_max_collisions
= collisions
;
3540 if (src_object_shared
== TRUE
) {
3541 vm_object_lock_shared(src_object
);
3543 vm_object_lock(src_object
);
3550 * Determine whether the old copy object has
3554 if (old_copy
->resident_page_count
== 0 &&
3555 !old_copy
->pager_created
) {
3557 * It has not been modified.
3559 * Return another reference to
3560 * the existing copy-object if
3561 * we can safely grow it (if
3565 if (old_copy
->vo_size
< copy_size
) {
3566 if (src_object_shared
== TRUE
) {
3567 vm_object_unlock(old_copy
);
3568 vm_object_unlock(src_object
);
3570 vm_object_lock(src_object
);
3571 src_object_shared
= FALSE
;
3575 * We can't perform a delayed copy if any of the
3576 * pages in the extended range are wired (because
3577 * we can't safely take write permission away from
3578 * wired pages). If the pages aren't wired, then
3579 * go ahead and protect them.
3581 copy_delayed_protect_iterate
++;
3583 pmap_flush_context_init(&pmap_flush_context_storage
);
3584 delayed_pmap_flush
= FALSE
;
3586 vm_page_queue_iterate(&src_object
->memq
, p
, vmp_listq
) {
3587 if (!p
->vmp_fictitious
&&
3588 p
->vmp_offset
>= old_copy
->vo_size
&&
3589 p
->vmp_offset
< copy_size
) {
3590 if (VM_PAGE_WIRED(p
)) {
3591 vm_object_unlock(old_copy
);
3592 vm_object_unlock(src_object
);
3594 if (new_copy
!= VM_OBJECT_NULL
) {
3595 vm_object_unlock(new_copy
);
3596 vm_object_deallocate(new_copy
);
3598 if (delayed_pmap_flush
== TRUE
) {
3599 pmap_flush(&pmap_flush_context_storage
);
3602 return VM_OBJECT_NULL
;
3604 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p
), (VM_PROT_ALL
& ~VM_PROT_WRITE
),
3605 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
3606 delayed_pmap_flush
= TRUE
;
3610 if (delayed_pmap_flush
== TRUE
) {
3611 pmap_flush(&pmap_flush_context_storage
);
3614 assertf(page_aligned(copy_size
),
3615 "object %p size 0x%llx",
3616 old_copy
, (uint64_t)copy_size
);
3617 old_copy
->vo_size
= copy_size
;
3619 if (src_object_shared
== TRUE
) {
3620 vm_object_reference_shared(old_copy
);
3622 vm_object_reference_locked(old_copy
);
3624 vm_object_unlock(old_copy
);
3625 vm_object_unlock(src_object
);
3627 if (new_copy
!= VM_OBJECT_NULL
) {
3628 vm_object_unlock(new_copy
);
3629 vm_object_deallocate(new_copy
);
3637 * Adjust the size argument so that the newly-created
3638 * copy object will be large enough to back either the
3639 * old copy object or the new mapping.
3641 if (old_copy
->vo_size
> copy_size
) {
3642 copy_size
= old_copy
->vo_size
;
3645 if (new_copy
== VM_OBJECT_NULL
) {
3646 vm_object_unlock(old_copy
);
3647 vm_object_unlock(src_object
);
3648 new_copy
= vm_object_allocate(copy_size
);
3649 vm_object_lock(src_object
);
3650 vm_object_lock(new_copy
);
3652 src_object_shared
= FALSE
;
3655 assertf(page_aligned(copy_size
),
3656 "object %p size 0x%llx",
3657 new_copy
, (uint64_t)copy_size
);
3658 new_copy
->vo_size
= copy_size
;
3661 * The copy-object is always made large enough to
3662 * completely shadow the original object, since
3663 * it may have several users who want to shadow
3664 * the original object at different points.
3667 assert((old_copy
->shadow
== src_object
) &&
3668 (old_copy
->vo_shadow_offset
== (vm_object_offset_t
) 0));
3669 } else if (new_copy
== VM_OBJECT_NULL
) {
3670 vm_object_unlock(src_object
);
3671 new_copy
= vm_object_allocate(copy_size
);
3672 vm_object_lock(src_object
);
3673 vm_object_lock(new_copy
);
3675 src_object_shared
= FALSE
;
3680 * We now have the src object locked, and the new copy object
3681 * allocated and locked (and potentially the old copy locked).
3682 * Before we go any further, make sure we can still perform
3683 * a delayed copy, as the situation may have changed.
3685 * Specifically, we can't perform a delayed copy if any of the
3686 * pages in the range are wired (because we can't safely take
3687 * write permission away from wired pages). If the pages aren't
3688 * wired, then go ahead and protect them.
3690 copy_delayed_protect_iterate
++;
3692 pmap_flush_context_init(&pmap_flush_context_storage
);
3693 delayed_pmap_flush
= FALSE
;
3695 vm_page_queue_iterate(&src_object
->memq
, p
, vmp_listq
) {
3696 if (!p
->vmp_fictitious
&& p
->vmp_offset
< copy_size
) {
3697 if (VM_PAGE_WIRED(p
)) {
3699 vm_object_unlock(old_copy
);
3701 vm_object_unlock(src_object
);
3702 vm_object_unlock(new_copy
);
3703 vm_object_deallocate(new_copy
);
3705 if (delayed_pmap_flush
== TRUE
) {
3706 pmap_flush(&pmap_flush_context_storage
);
3709 return VM_OBJECT_NULL
;
3711 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p
), (VM_PROT_ALL
& ~VM_PROT_WRITE
),
3712 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
3713 delayed_pmap_flush
= TRUE
;
3717 if (delayed_pmap_flush
== TRUE
) {
3718 pmap_flush(&pmap_flush_context_storage
);
3721 if (old_copy
!= VM_OBJECT_NULL
) {
3723 * Make the old copy-object shadow the new one.
3724 * It will receive no more pages from the original
3728 /* remove ref. from old_copy */
3729 vm_object_lock_assert_exclusive(src_object
);
3730 src_object
->ref_count
--;
3731 assert(src_object
->ref_count
> 0);
3732 vm_object_lock_assert_exclusive(old_copy
);
3733 old_copy
->shadow
= new_copy
;
3734 vm_object_lock_assert_exclusive(new_copy
);
3735 assert(new_copy
->ref_count
> 0);
3736 new_copy
->ref_count
++; /* for old_copy->shadow ref. */
3738 vm_object_unlock(old_copy
); /* done with old_copy */
3742 * Point the new copy at the existing object.
3744 vm_object_lock_assert_exclusive(new_copy
);
3745 new_copy
->shadow
= src_object
;
3746 new_copy
->vo_shadow_offset
= 0;
3747 new_copy
->shadowed
= TRUE
; /* caller must set needs_copy */
3749 vm_object_lock_assert_exclusive(src_object
);
3750 vm_object_reference_locked(src_object
);
3751 src_object
->copy
= new_copy
;
3752 vm_object_unlock(src_object
);
3753 vm_object_unlock(new_copy
);
3759 * Routine: vm_object_copy_strategically
3762 * Perform a copy according to the source object's
3763 * declared strategy. This operation may block,
3764 * and may be interrupted.
3766 __private_extern__ kern_return_t
3767 vm_object_copy_strategically(
3768 vm_object_t src_object
,
3769 vm_object_offset_t src_offset
,
3770 vm_object_size_t size
,
3771 vm_object_t
*dst_object
, /* OUT */
3772 vm_object_offset_t
*dst_offset
, /* OUT */
3773 boolean_t
*dst_needs_copy
) /* OUT */
3776 boolean_t interruptible
= THREAD_ABORTSAFE
; /* XXX */
3777 boolean_t object_lock_shared
= FALSE
;
3778 memory_object_copy_strategy_t copy_strategy
;
3780 assert(src_object
!= VM_OBJECT_NULL
);
3782 copy_strategy
= src_object
->copy_strategy
;
3784 if (copy_strategy
== MEMORY_OBJECT_COPY_DELAY
) {
3785 vm_object_lock_shared(src_object
);
3786 object_lock_shared
= TRUE
;
3788 vm_object_lock(src_object
);
3792 * The copy strategy is only valid if the memory manager
3793 * is "ready". Internal objects are always ready.
3796 while (!src_object
->internal
&& !src_object
->pager_ready
) {
3797 wait_result_t wait_result
;
3799 if (object_lock_shared
== TRUE
) {
3800 vm_object_unlock(src_object
);
3801 vm_object_lock(src_object
);
3802 object_lock_shared
= FALSE
;
3805 wait_result
= vm_object_sleep( src_object
,
3806 VM_OBJECT_EVENT_PAGER_READY
,
3808 if (wait_result
!= THREAD_AWAKENED
) {
3809 vm_object_unlock(src_object
);
3810 *dst_object
= VM_OBJECT_NULL
;
3812 *dst_needs_copy
= FALSE
;
3813 return MACH_SEND_INTERRUPTED
;
3818 * Use the appropriate copy strategy.
3821 switch (copy_strategy
) {
3822 case MEMORY_OBJECT_COPY_DELAY
:
3823 *dst_object
= vm_object_copy_delayed(src_object
,
3824 src_offset
, size
, object_lock_shared
);
3825 if (*dst_object
!= VM_OBJECT_NULL
) {
3826 *dst_offset
= src_offset
;
3827 *dst_needs_copy
= TRUE
;
3828 result
= KERN_SUCCESS
;
3831 vm_object_lock(src_object
);
3832 OS_FALLTHROUGH
; /* fall thru when delayed copy not allowed */
3834 case MEMORY_OBJECT_COPY_NONE
:
3835 result
= vm_object_copy_slowly(src_object
, src_offset
, size
,
3836 interruptible
, dst_object
);
3837 if (result
== KERN_SUCCESS
) {
3838 *dst_offset
= src_offset
- vm_object_trunc_page(src_offset
);
3839 *dst_needs_copy
= FALSE
;
3843 case MEMORY_OBJECT_COPY_CALL
:
3844 result
= vm_object_copy_call(src_object
, src_offset
, size
,
3846 if (result
== KERN_SUCCESS
) {
3847 *dst_offset
= src_offset
;
3848 *dst_needs_copy
= TRUE
;
3852 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3853 vm_object_unlock(src_object
);
3854 result
= KERN_MEMORY_RESTART_COPY
;
3858 panic("copy_strategically: bad strategy");
3859 result
= KERN_INVALID_ARGUMENT
;
3867 * Create a new object which is backed by the
3868 * specified existing object range. The source
3869 * object reference is deallocated.
3871 * The new object and offset into that object
3872 * are returned in the source parameters.
3874 boolean_t vm_object_shadow_check
= TRUE
;
3876 __private_extern__ boolean_t
3878 vm_object_t
*object
, /* IN/OUT */
3879 vm_object_offset_t
*offset
, /* IN/OUT */
3880 vm_object_size_t length
)
3886 assert(source
!= VM_OBJECT_NULL
);
3887 if (source
== VM_OBJECT_NULL
) {
3894 * This assertion is valid but it gets triggered by Rosetta for example
3895 * due to a combination of vm_remap() that changes a VM object's
3896 * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
3897 * that then sets "needs_copy" on its map entry. This creates a
3898 * mapping situation that VM should never see and doesn't know how to
3900 * It's not clear if this can create any real problem but we should
3901 * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
3902 * do more than just set "needs_copy" to handle the copy-on-write...
3903 * In the meantime, let's disable the assertion.
3905 assert(source
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
);
3909 * Determine if we really need a shadow.
3911 * If the source object is larger than what we are trying
3912 * to create, then force the shadow creation even if the
3913 * ref count is 1. This will allow us to [potentially]
3914 * collapse the underlying object away in the future
3915 * (freeing up the extra data it might contain and that
3919 assert(source
->copy_strategy
!= MEMORY_OBJECT_COPY_NONE
); /* Purgeable objects shouldn't have shadow objects. */
3923 * The following optimization does not work in the context of submaps
3924 * (the shared region, in particular).
3925 * This object might have only 1 reference (in the submap) but that
3926 * submap can itself be mapped multiple times, so the object is
3927 * actually indirectly referenced more than once...
3929 if (vm_object_shadow_check
&&
3930 source
->vo_size
== length
&&
3931 source
->ref_count
== 1) {
3933 * Lock the object and check again.
3934 * We also check to see if there's
3935 * a shadow or copy object involved.
3936 * We can't do that earlier because
3937 * without the object locked, there
3938 * could be a collapse and the chain
3939 * gets modified leaving us with an
3942 vm_object_lock(source
);
3943 if (source
->vo_size
== length
&&
3944 source
->ref_count
== 1 &&
3945 (source
->shadow
== VM_OBJECT_NULL
||
3946 source
->shadow
->copy
== VM_OBJECT_NULL
)) {
3947 source
->shadowed
= FALSE
;
3948 vm_object_unlock(source
);
3951 /* things changed while we were locking "source"... */
3952 vm_object_unlock(source
);
3957 * *offset is the map entry's offset into the VM object and
3958 * is aligned to the map's page size.
3959 * VM objects need to be aligned to the system's page size.
3960 * Record the necessary adjustment and re-align the offset so
3961 * that result->vo_shadow_offset is properly page-aligned.
3963 vm_object_offset_t offset_adjustment
;
3964 offset_adjustment
= *offset
- vm_object_trunc_page(*offset
);
3965 length
= vm_object_round_page(length
+ offset_adjustment
);
3966 *offset
= vm_object_trunc_page(*offset
);
3969 * Allocate a new object with the given length
3972 if ((result
= vm_object_allocate(length
)) == VM_OBJECT_NULL
) {
3973 panic("vm_object_shadow: no object for shadowing");
3977 * The new object shadows the source object, adding
3978 * a reference to it. Our caller changes his reference
3979 * to point to the new object, removing a reference to
3980 * the source object. Net result: no change of reference
3983 result
->shadow
= source
;
3986 * Store the offset into the source object,
3987 * and fix up the offset into the new object.
3990 result
->vo_shadow_offset
= *offset
;
3991 assertf(page_aligned(result
->vo_shadow_offset
),
3992 "result %p shadow offset 0x%llx",
3993 result
, result
->vo_shadow_offset
);
3996 * Return the new things
4000 if (offset_adjustment
) {
4002 * Make the map entry point to the equivalent offset
4003 * in the new object.
4005 DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n", offset
, *offset
, *offset
+ offset_adjustment
, result
, length
);
4006 *offset
+= offset_adjustment
;
4013 * The relationship between vm_object structures and
4014 * the memory_object requires careful synchronization.
4016 * All associations are created by memory_object_create_named
4017 * for external pagers and vm_object_compressor_pager_create for internal
4018 * objects as follows:
4020 * pager: the memory_object itself, supplied by
4021 * the user requesting a mapping (or the kernel,
4022 * when initializing internal objects); the
4023 * kernel simulates holding send rights by keeping
4027 * the memory object control port,
4028 * created by the kernel; the kernel holds
4029 * receive (and ownership) rights to this
4030 * port, but no other references.
4032 * When initialization is complete, the "initialized" field
4033 * is asserted. Other mappings using a particular memory object,
4034 * and any references to the vm_object gained through the
4035 * port association must wait for this initialization to occur.
4037 * In order to allow the memory manager to set attributes before
4038 * requests (notably virtual copy operations, but also data or
4039 * unlock requests) are made, a "ready" attribute is made available.
4040 * Only the memory manager may affect the value of this attribute.
4041 * Its value does not affect critical kernel functions, such as
4042 * internal object initialization or destruction. [Furthermore,
4043 * memory objects created by the kernel are assumed to be ready
4044 * immediately; the default memory manager need not explicitly
4045 * set the "ready" attribute.]
4047 * [Both the "initialized" and "ready" attribute wait conditions
4048 * use the "pager" field as the wait event.]
4050 * The port associations can be broken down by any of the
4051 * following routines:
4052 * vm_object_terminate:
4053 * No references to the vm_object remain, and
4054 * the object cannot (or will not) be cached.
4055 * This is the normal case, and is done even
4056 * though one of the other cases has already been
4058 * memory_object_destroy:
4059 * The memory manager has requested that the
4060 * kernel relinquish references to the memory
4061 * object. [The memory manager may not want to
4062 * destroy the memory object, but may wish to
4063 * refuse or tear down existing memory mappings.]
4065 * Each routine that breaks an association must break all of
4066 * them at once. At some later time, that routine must clear
4067 * the pager field and release the memory object references.
4068 * [Furthermore, each routine must cope with the simultaneous
4069 * or previous operations of the others.]
4071 * Because the pager field may be cleared spontaneously, it
4072 * cannot be used to determine whether a memory object has
4073 * ever been associated with a particular vm_object. [This
4074 * knowledge is important to the shadow object mechanism.]
4075 * For this reason, an additional "created" attribute is
4078 * During various paging operations, the pager reference found in the
4079 * vm_object must be valid. To prevent this from being released,
4080 * (other than being removed, i.e., made null), routines may use
4081 * the vm_object_paging_begin/end routines [actually, macros].
4082 * The implementation uses the "paging_in_progress" and "wanted" fields.
4083 * [Operations that alter the validity of the pager values include the
4084 * termination routines and vm_object_collapse.]
4089 * Routine: vm_object_memory_object_associate
4091 * Associate a VM object to the given pager.
4092 * If a VM object is not provided, create one.
4093 * Initialize the pager.
4096 vm_object_memory_object_associate(
4097 memory_object_t pager
,
4099 vm_object_size_t size
,
4102 memory_object_control_t control
;
4104 assert(pager
!= MEMORY_OBJECT_NULL
);
4106 if (object
!= VM_OBJECT_NULL
) {
4107 assert(object
->internal
);
4108 assert(object
->pager_created
);
4109 assert(!object
->pager_initialized
);
4110 assert(!object
->pager_ready
);
4111 assert(object
->pager_trusted
);
4113 object
= vm_object_allocate(size
);
4114 assert(object
!= VM_OBJECT_NULL
);
4115 object
->internal
= FALSE
;
4116 object
->pager_trusted
= FALSE
;
4117 /* copy strategy invalid until set by memory manager */
4118 object
->copy_strategy
= MEMORY_OBJECT_COPY_INVALID
;
4122 * Allocate request port.
4125 control
= memory_object_control_allocate(object
);
4126 assert(control
!= MEMORY_OBJECT_CONTROL_NULL
);
4128 vm_object_lock(object
);
4130 assert(!object
->pager_ready
);
4131 assert(!object
->pager_initialized
);
4132 assert(object
->pager
== NULL
);
4133 assert(object
->pager_control
== NULL
);
4136 * Copy the reference we were given.
4139 memory_object_reference(pager
);
4140 object
->pager_created
= TRUE
;
4141 object
->pager
= pager
;
4142 object
->pager_control
= control
;
4143 object
->pager_ready
= FALSE
;
4145 vm_object_unlock(object
);
4148 * Let the pager know we're using it.
4151 (void) memory_object_init(pager
,
4152 object
->pager_control
,
4155 vm_object_lock(object
);
4157 object
->named
= TRUE
;
4159 if (object
->internal
) {
4160 object
->pager_ready
= TRUE
;
4161 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
4164 object
->pager_initialized
= TRUE
;
4165 vm_object_wakeup(object
, VM_OBJECT_EVENT_INITIALIZED
);
4167 vm_object_unlock(object
);
4173 * Routine: vm_object_compressor_pager_create
4175 * Create a memory object for an internal object.
4176 * In/out conditions:
4177 * The object is locked on entry and exit;
4178 * it may be unlocked within this call.
4180 * Only one thread may be performing a
4181 * vm_object_compressor_pager_create on an object at
4182 * a time. Presumably, only the pageout
4183 * daemon will be using this routine.
4187 vm_object_compressor_pager_create(
4190 memory_object_t pager
;
4191 vm_object_t pager_object
= VM_OBJECT_NULL
;
4193 assert(object
!= kernel_object
);
4196 * Prevent collapse or termination by holding a paging reference
4199 vm_object_paging_begin(object
);
4200 if (object
->pager_created
) {
4202 * Someone else got to it first...
4203 * wait for them to finish initializing the ports
4205 while (!object
->pager_initialized
) {
4206 vm_object_sleep(object
,
4207 VM_OBJECT_EVENT_INITIALIZED
,
4210 vm_object_paging_end(object
);
4214 if ((uint32_t) (object
->vo_size
/ PAGE_SIZE
) !=
4215 (object
->vo_size
/ PAGE_SIZE
)) {
4216 #if DEVELOPMENT || DEBUG
4217 printf("vm_object_compressor_pager_create(%p): "
4218 "object size 0x%llx >= 0x%llx\n",
4220 (uint64_t) object
->vo_size
,
4221 0x0FFFFFFFFULL
* PAGE_SIZE
);
4222 #endif /* DEVELOPMENT || DEBUG */
4223 vm_object_paging_end(object
);
4228 * Indicate that a memory object has been assigned
4229 * before dropping the lock, to prevent a race.
4232 object
->pager_created
= TRUE
;
4233 object
->pager_trusted
= TRUE
;
4234 object
->paging_offset
= 0;
4236 vm_object_unlock(object
);
4239 * Create the [internal] pager, and associate it with this object.
4241 * We make the association here so that vm_object_enter()
4242 * can look up the object to complete initializing it. No
4243 * user will ever map this object.
4246 /* create our new memory object */
4247 assert((uint32_t) (object
->vo_size
/ PAGE_SIZE
) ==
4248 (object
->vo_size
/ PAGE_SIZE
));
4249 (void) compressor_memory_object_create(
4250 (memory_object_size_t
) object
->vo_size
,
4252 if (pager
== NULL
) {
4253 panic("vm_object_compressor_pager_create(): "
4254 "no pager for object %p size 0x%llx\n",
4255 object
, (uint64_t) object
->vo_size
);
4260 * A reference was returned by
4261 * memory_object_create(), and it is
4262 * copied by vm_object_memory_object_associate().
4265 pager_object
= vm_object_memory_object_associate(pager
,
4269 if (pager_object
!= object
) {
4270 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager
, pager_object
, object
, (uint64_t) object
->vo_size
);
4274 * Drop the reference we were passed.
4276 memory_object_deallocate(pager
);
4278 vm_object_lock(object
);
4281 * Release the paging reference
4283 vm_object_paging_end(object
);
4287 * Global variables for vm_object_collapse():
4289 * Counts for normal collapses and bypasses.
4290 * Debugging variables, to watch or disable collapse.
4292 static long object_collapses
= 0;
4293 static long object_bypasses
= 0;
4295 static boolean_t vm_object_collapse_allowed
= TRUE
;
4296 static boolean_t vm_object_bypass_allowed
= TRUE
;
4298 void vm_object_do_collapse_compressor(vm_object_t object
,
4299 vm_object_t backing_object
);
4301 vm_object_do_collapse_compressor(
4303 vm_object_t backing_object
)
4305 vm_object_offset_t new_offset
, backing_offset
;
4306 vm_object_size_t size
;
4308 vm_counters
.do_collapse_compressor
++;
4310 vm_object_lock_assert_exclusive(object
);
4311 vm_object_lock_assert_exclusive(backing_object
);
4313 size
= object
->vo_size
;
4316 * Move all compressed pages from backing_object
4320 for (backing_offset
= object
->vo_shadow_offset
;
4321 backing_offset
< object
->vo_shadow_offset
+ object
->vo_size
;
4322 backing_offset
+= PAGE_SIZE
) {
4323 memory_object_offset_t backing_pager_offset
;
4325 /* find the next compressed page at or after this offset */
4326 backing_pager_offset
= (backing_offset
+
4327 backing_object
->paging_offset
);
4328 backing_pager_offset
= vm_compressor_pager_next_compressed(
4329 backing_object
->pager
,
4330 backing_pager_offset
);
4331 if (backing_pager_offset
== (memory_object_offset_t
) -1) {
4332 /* no more compressed pages */
4335 backing_offset
= (backing_pager_offset
-
4336 backing_object
->paging_offset
);
4338 new_offset
= backing_offset
- object
->vo_shadow_offset
;
4340 if (new_offset
>= object
->vo_size
) {
4341 /* we're out of the scope of "object": done */
4345 if ((vm_page_lookup(object
, new_offset
) != VM_PAGE_NULL
) ||
4346 (vm_compressor_pager_state_get(object
->pager
,
4348 object
->paging_offset
)) ==
4349 VM_EXTERNAL_STATE_EXISTS
)) {
4351 * This page already exists in object, resident or
4353 * We don't need this compressed page in backing_object
4354 * and it will be reclaimed when we release
4361 * backing_object has this page in the VM compressor and
4362 * we need to transfer it to object.
4364 vm_counters
.do_collapse_compressor_pages
++;
4365 vm_compressor_pager_transfer(
4368 (new_offset
+ object
->paging_offset
),
4370 backing_object
->pager
,
4371 (backing_offset
+ backing_object
->paging_offset
));
4376 * Routine: vm_object_do_collapse
4378 * Collapse an object with the object backing it.
4379 * Pages in the backing object are moved into the
4380 * parent, and the backing object is deallocated.
4382 * Both objects and the cache are locked; the page
4383 * queues are unlocked.
4387 vm_object_do_collapse(
4389 vm_object_t backing_object
)
4392 vm_object_offset_t new_offset
, backing_offset
;
4393 vm_object_size_t size
;
4395 vm_object_lock_assert_exclusive(object
);
4396 vm_object_lock_assert_exclusive(backing_object
);
4398 assert(object
->purgable
== VM_PURGABLE_DENY
);
4399 assert(backing_object
->purgable
== VM_PURGABLE_DENY
);
4401 backing_offset
= object
->vo_shadow_offset
;
4402 size
= object
->vo_size
;
4405 * Move all in-memory pages from backing_object
4406 * to the parent. Pages that have been paged out
4407 * will be overwritten by any of the parent's
4408 * pages that shadow them.
4411 while (!vm_page_queue_empty(&backing_object
->memq
)) {
4412 p
= (vm_page_t
) vm_page_queue_first(&backing_object
->memq
);
4414 new_offset
= (p
->vmp_offset
- backing_offset
);
4416 assert(!p
->vmp_busy
|| p
->vmp_absent
);
4419 * If the parent has a page here, or if
4420 * this page falls outside the parent,
4423 * Otherwise, move it as planned.
4426 if (p
->vmp_offset
< backing_offset
|| new_offset
>= size
) {
4429 pp
= vm_page_lookup(object
, new_offset
);
4430 if (pp
== VM_PAGE_NULL
) {
4431 if (VM_COMPRESSOR_PAGER_STATE_GET(object
,
4433 == VM_EXTERNAL_STATE_EXISTS
) {
4435 * Parent object has this page
4436 * in the VM compressor.
4437 * Throw away the backing
4443 * Parent now has no page.
4444 * Move the backing object's page
4447 vm_page_rename(p
, object
, new_offset
);
4450 assert(!pp
->vmp_absent
);
4453 * Parent object has a real page.
4454 * Throw away the backing object's
4462 if (vm_object_collapse_compressor_allowed
&&
4463 object
->pager
!= MEMORY_OBJECT_NULL
&&
4464 backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
4465 /* move compressed pages from backing_object to object */
4466 vm_object_do_collapse_compressor(object
, backing_object
);
4467 } else if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
4468 assert((!object
->pager_created
&&
4469 (object
->pager
== MEMORY_OBJECT_NULL
)) ||
4470 (!backing_object
->pager_created
&&
4471 (backing_object
->pager
== MEMORY_OBJECT_NULL
)));
4473 * Move the pager from backing_object to object.
4475 * XXX We're only using part of the paging space
4476 * for keeps now... we ought to discard the
4480 assert(!object
->paging_in_progress
);
4481 assert(!object
->activity_in_progress
);
4482 assert(!object
->pager_created
);
4483 assert(object
->pager
== NULL
);
4484 object
->pager
= backing_object
->pager
;
4486 object
->pager_created
= backing_object
->pager_created
;
4487 object
->pager_control
= backing_object
->pager_control
;
4488 object
->pager_ready
= backing_object
->pager_ready
;
4489 object
->pager_initialized
= backing_object
->pager_initialized
;
4490 object
->paging_offset
=
4491 backing_object
->paging_offset
+ backing_offset
;
4492 if (object
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
4493 memory_object_control_collapse(&object
->pager_control
,
4496 /* the backing_object has lost its pager: reset all fields */
4497 backing_object
->pager_created
= FALSE
;
4498 backing_object
->pager_control
= NULL
;
4499 backing_object
->pager_ready
= FALSE
;
4500 backing_object
->paging_offset
= 0;
4501 backing_object
->pager
= NULL
;
4504 * Object now shadows whatever backing_object did.
4505 * Note that the reference to backing_object->shadow
4506 * moves from within backing_object to within object.
4509 assert(!object
->phys_contiguous
);
4510 assert(!backing_object
->phys_contiguous
);
4511 object
->shadow
= backing_object
->shadow
;
4512 if (object
->shadow
) {
4513 assertf(page_aligned(object
->vo_shadow_offset
),
4514 "object %p shadow_offset 0x%llx",
4515 object
, object
->vo_shadow_offset
);
4516 assertf(page_aligned(backing_object
->vo_shadow_offset
),
4517 "backing_object %p shadow_offset 0x%llx",
4518 backing_object
, backing_object
->vo_shadow_offset
);
4519 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
4520 /* "backing_object" gave its shadow to "object" */
4521 backing_object
->shadow
= VM_OBJECT_NULL
;
4522 backing_object
->vo_shadow_offset
= 0;
4524 /* no shadow, therefore no shadow offset... */
4525 object
->vo_shadow_offset
= 0;
4527 assert((object
->shadow
== VM_OBJECT_NULL
) ||
4528 (object
->shadow
->copy
!= backing_object
));
4531 * Discard backing_object.
4533 * Since the backing object has no pages, no
4534 * pager left, and no object references within it,
4535 * all that is necessary is to dispose of it.
4539 assert(backing_object
->ref_count
== 1);
4540 assert(backing_object
->resident_page_count
== 0);
4541 assert(backing_object
->paging_in_progress
== 0);
4542 assert(backing_object
->activity_in_progress
== 0);
4543 assert(backing_object
->shadow
== VM_OBJECT_NULL
);
4544 assert(backing_object
->vo_shadow_offset
== 0);
4546 if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
4547 /* ... unless it has a pager; need to terminate pager too */
4548 vm_counters
.do_collapse_terminate
++;
4549 if (vm_object_terminate(backing_object
) != KERN_SUCCESS
) {
4550 vm_counters
.do_collapse_terminate_failure
++;
4555 assert(backing_object
->pager
== NULL
);
4557 backing_object
->alive
= FALSE
;
4558 vm_object_unlock(backing_object
);
4560 #if VM_OBJECT_TRACKING
4561 if (vm_object_tracking_inited
) {
4562 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
4565 #endif /* VM_OBJECT_TRACKING */
4567 vm_object_lock_destroy(backing_object
);
4569 zfree(vm_object_zone
, backing_object
);
4573 vm_object_do_bypass(
4575 vm_object_t backing_object
)
4578 * Make the parent shadow the next object
4582 vm_object_lock_assert_exclusive(object
);
4583 vm_object_lock_assert_exclusive(backing_object
);
4585 vm_object_reference(backing_object
->shadow
);
4587 assert(!object
->phys_contiguous
);
4588 assert(!backing_object
->phys_contiguous
);
4589 object
->shadow
= backing_object
->shadow
;
4590 if (object
->shadow
) {
4591 assertf(page_aligned(object
->vo_shadow_offset
),
4592 "object %p shadow_offset 0x%llx",
4593 object
, object
->vo_shadow_offset
);
4594 assertf(page_aligned(backing_object
->vo_shadow_offset
),
4595 "backing_object %p shadow_offset 0x%llx",
4596 backing_object
, backing_object
->vo_shadow_offset
);
4597 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
4599 /* no shadow, therefore no shadow offset... */
4600 object
->vo_shadow_offset
= 0;
4604 * Backing object might have had a copy pointer
4605 * to us. If it did, clear it.
4607 if (backing_object
->copy
== object
) {
4608 backing_object
->copy
= VM_OBJECT_NULL
;
4612 * Drop the reference count on backing_object.
4614 * Since its ref_count was at least 2, it
4615 * will not vanish; so we don't need to call
4616 * vm_object_deallocate.
4617 * [with a caveat for "named" objects]
4619 * The res_count on the backing object is
4620 * conditionally decremented. It's possible
4621 * (via vm_pageout_scan) to get here with
4622 * a "swapped" object, which has a 0 res_count,
4623 * in which case, the backing object res_count
4624 * is already down by one.
4626 * Don't call vm_object_deallocate unless
4627 * ref_count drops to zero.
4629 * The ref_count can drop to zero here if the
4630 * backing object could be bypassed but not
4631 * collapsed, such as when the backing object
4632 * is temporary and cachable.
4635 if (backing_object
->ref_count
> 2 ||
4636 (!backing_object
->named
&& backing_object
->ref_count
> 1)) {
4637 vm_object_lock_assert_exclusive(backing_object
);
4638 backing_object
->ref_count
--;
4639 vm_object_unlock(backing_object
);
4642 * Drop locks so that we can deallocate
4643 * the backing object.
4647 * vm_object_collapse (the caller of this function) is
4648 * now called from contexts that may not guarantee that a
4649 * valid reference is held on the object... w/o a valid
4650 * reference, it is unsafe and unwise (you will definitely
4651 * regret it) to unlock the object and then retake the lock
4652 * since the object may be terminated and recycled in between.
4653 * The "activity_in_progress" reference will keep the object
4656 vm_object_activity_begin(object
);
4657 vm_object_unlock(object
);
4659 vm_object_unlock(backing_object
);
4660 vm_object_deallocate(backing_object
);
4663 * Relock object. We don't have to reverify
4664 * its state since vm_object_collapse will
4665 * do that for us as it starts at the
4669 vm_object_lock(object
);
4670 vm_object_activity_end(object
);
4678 * vm_object_collapse:
4680 * Perform an object collapse or an object bypass if appropriate.
4681 * The real work of collapsing and bypassing is performed in
4682 * the routines vm_object_do_collapse and vm_object_do_bypass.
4684 * Requires that the object be locked and the page queues be unlocked.
4687 static unsigned long vm_object_collapse_calls
= 0;
4688 static unsigned long vm_object_collapse_objects
= 0;
4689 static unsigned long vm_object_collapse_do_collapse
= 0;
4690 static unsigned long vm_object_collapse_do_bypass
= 0;
4692 __private_extern__
void
4695 vm_object_offset_t hint_offset
,
4696 boolean_t can_bypass
)
4698 vm_object_t backing_object
;
4699 unsigned int rcount
;
4701 vm_object_t original_object
;
4702 int object_lock_type
;
4703 int backing_object_lock_type
;
4705 vm_object_collapse_calls
++;
4707 assertf(page_aligned(hint_offset
), "hint_offset 0x%llx", hint_offset
);
4709 if (!vm_object_collapse_allowed
&&
4710 !(can_bypass
&& vm_object_bypass_allowed
)) {
4714 if (object
== VM_OBJECT_NULL
) {
4718 original_object
= object
;
4721 * The top object was locked "exclusive" by the caller.
4722 * In the first pass, to determine if we can collapse the shadow chain,
4723 * take a "shared" lock on the shadow objects. If we can collapse,
4724 * we'll have to go down the chain again with exclusive locks.
4726 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4727 backing_object_lock_type
= OBJECT_LOCK_SHARED
;
4730 object
= original_object
;
4731 vm_object_lock_assert_exclusive(object
);
4734 vm_object_collapse_objects
++;
4736 * Verify that the conditions are right for either
4737 * collapse or bypass:
4741 * There is a backing object, and
4744 backing_object
= object
->shadow
;
4745 if (backing_object
== VM_OBJECT_NULL
) {
4746 if (object
!= original_object
) {
4747 vm_object_unlock(object
);
4751 if (backing_object_lock_type
== OBJECT_LOCK_SHARED
) {
4752 vm_object_lock_shared(backing_object
);
4754 vm_object_lock(backing_object
);
4758 * No pages in the object are currently
4759 * being paged out, and
4761 if (object
->paging_in_progress
!= 0 ||
4762 object
->activity_in_progress
!= 0) {
4763 /* try and collapse the rest of the shadow chain */
4764 if (object
!= original_object
) {
4765 vm_object_unlock(object
);
4767 object
= backing_object
;
4768 object_lock_type
= backing_object_lock_type
;
4774 * The backing object is not read_only,
4775 * and no pages in the backing object are
4776 * currently being paged out.
4777 * The backing object is internal.
4781 if (!backing_object
->internal
||
4782 backing_object
->paging_in_progress
!= 0 ||
4783 backing_object
->activity_in_progress
!= 0) {
4784 /* try and collapse the rest of the shadow chain */
4785 if (object
!= original_object
) {
4786 vm_object_unlock(object
);
4788 object
= backing_object
;
4789 object_lock_type
= backing_object_lock_type
;
4794 * Purgeable objects are not supposed to engage in
4795 * copy-on-write activities, so should not have
4796 * any shadow objects or be a shadow object to another
4798 * Collapsing a purgeable object would require some
4799 * updates to the purgeable compressed ledgers.
4801 if (object
->purgable
!= VM_PURGABLE_DENY
||
4802 backing_object
->purgable
!= VM_PURGABLE_DENY
) {
4803 panic("vm_object_collapse() attempting to collapse "
4804 "purgeable object: %p(%d) %p(%d)\n",
4805 object
, object
->purgable
,
4806 backing_object
, backing_object
->purgable
);
4807 /* try and collapse the rest of the shadow chain */
4808 if (object
!= original_object
) {
4809 vm_object_unlock(object
);
4811 object
= backing_object
;
4812 object_lock_type
= backing_object_lock_type
;
4817 * The backing object can't be a copy-object:
4818 * the shadow_offset for the copy-object must stay
4819 * as 0. Furthermore (for the 'we have all the
4820 * pages' case), if we bypass backing_object and
4821 * just shadow the next object in the chain, old
4822 * pages from that object would then have to be copied
4823 * BOTH into the (former) backing_object and into the
4826 if (backing_object
->shadow
!= VM_OBJECT_NULL
&&
4827 backing_object
->shadow
->copy
== backing_object
) {
4828 /* try and collapse the rest of the shadow chain */
4829 if (object
!= original_object
) {
4830 vm_object_unlock(object
);
4832 object
= backing_object
;
4833 object_lock_type
= backing_object_lock_type
;
4838 * We can now try to either collapse the backing
4839 * object (if the parent is the only reference to
4840 * it) or (perhaps) remove the parent's reference
4843 * If there is exactly one reference to the backing
4844 * object, we may be able to collapse it into the
4847 * As long as one of the objects is still not known
4848 * to the pager, we can collapse them.
4850 if (backing_object
->ref_count
== 1 &&
4851 (vm_object_collapse_compressor_allowed
||
4852 !object
->pager_created
4853 || (!backing_object
->pager_created
)
4854 ) && vm_object_collapse_allowed
) {
4856 * We need the exclusive lock on the VM objects.
4858 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
4860 * We have an object and its shadow locked
4861 * "shared". We can't just upgrade the locks
4862 * to "exclusive", as some other thread might
4863 * also have these objects locked "shared" and
4864 * attempt to upgrade one or the other to
4865 * "exclusive". The upgrades would block
4866 * forever waiting for the other "shared" locks
4868 * So we have to release the locks and go
4869 * down the shadow chain again (since it could
4870 * have changed) with "exclusive" locking.
4872 vm_object_unlock(backing_object
);
4873 if (object
!= original_object
) {
4874 vm_object_unlock(object
);
4876 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4877 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4882 * Collapse the object with its backing
4883 * object, and try again with the object's
4884 * new backing object.
4887 vm_object_do_collapse(object
, backing_object
);
4888 vm_object_collapse_do_collapse
++;
4893 * Collapsing the backing object was not possible
4894 * or permitted, so let's try bypassing it.
4897 if (!(can_bypass
&& vm_object_bypass_allowed
)) {
4898 /* try and collapse the rest of the shadow chain */
4899 if (object
!= original_object
) {
4900 vm_object_unlock(object
);
4902 object
= backing_object
;
4903 object_lock_type
= backing_object_lock_type
;
4909 * If the object doesn't have all its pages present,
4910 * we have to make sure no pages in the backing object
4911 * "show through" before bypassing it.
4913 size
= (unsigned int)atop(object
->vo_size
);
4914 rcount
= object
->resident_page_count
;
4916 if (rcount
!= size
) {
4917 vm_object_offset_t offset
;
4918 vm_object_offset_t backing_offset
;
4919 unsigned int backing_rcount
;
4922 * If the backing object has a pager but no pagemap,
4923 * then we cannot bypass it, because we don't know
4924 * what pages it has.
4926 if (backing_object
->pager_created
) {
4927 /* try and collapse the rest of the shadow chain */
4928 if (object
!= original_object
) {
4929 vm_object_unlock(object
);
4931 object
= backing_object
;
4932 object_lock_type
= backing_object_lock_type
;
4937 * If the object has a pager but no pagemap,
4938 * then we cannot bypass it, because we don't know
4939 * what pages it has.
4941 if (object
->pager_created
) {
4942 /* try and collapse the rest of the shadow chain */
4943 if (object
!= original_object
) {
4944 vm_object_unlock(object
);
4946 object
= backing_object
;
4947 object_lock_type
= backing_object_lock_type
;
4951 backing_offset
= object
->vo_shadow_offset
;
4952 backing_rcount
= backing_object
->resident_page_count
;
4954 if ((int)backing_rcount
- (int)(atop(backing_object
->vo_size
) - size
) > (int)rcount
) {
4956 * we have enough pages in the backing object to guarantee that
4957 * at least 1 of them must be 'uncovered' by a resident page
4958 * in the object we're evaluating, so move on and
4959 * try to collapse the rest of the shadow chain
4961 if (object
!= original_object
) {
4962 vm_object_unlock(object
);
4964 object
= backing_object
;
4965 object_lock_type
= backing_object_lock_type
;
4970 * If all of the pages in the backing object are
4971 * shadowed by the parent object, the parent
4972 * object no longer has to shadow the backing
4973 * object; it can shadow the next one in the
4976 * If the backing object has existence info,
4977 * we must check examine its existence info
4982 #define EXISTS_IN_OBJECT(obj, off, rc) \
4983 ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
4984 == VM_EXTERNAL_STATE_EXISTS) || \
4985 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
4988 * Check the hint location first
4989 * (since it is often the quickest way out of here).
4991 if (object
->cow_hint
!= ~(vm_offset_t
)0) {
4992 hint_offset
= (vm_object_offset_t
)object
->cow_hint
;
4994 hint_offset
= (hint_offset
> 8 * PAGE_SIZE_64
) ?
4995 (hint_offset
- 8 * PAGE_SIZE_64
) : 0;
4998 if (EXISTS_IN_OBJECT(backing_object
, hint_offset
+
4999 backing_offset
, backing_rcount
) &&
5000 !EXISTS_IN_OBJECT(object
, hint_offset
, rcount
)) {
5001 /* dependency right at the hint */
5002 object
->cow_hint
= (vm_offset_t
) hint_offset
; /* atomic */
5003 /* try and collapse the rest of the shadow chain */
5004 if (object
!= original_object
) {
5005 vm_object_unlock(object
);
5007 object
= backing_object
;
5008 object_lock_type
= backing_object_lock_type
;
5013 * If the object's window onto the backing_object
5014 * is large compared to the number of resident
5015 * pages in the backing object, it makes sense to
5016 * walk the backing_object's resident pages first.
5018 * NOTE: Pages may be in both the existence map and/or
5019 * resident, so if we don't find a dependency while
5020 * walking the backing object's resident page list
5021 * directly, and there is an existence map, we'll have
5022 * to run the offset based 2nd pass. Because we may
5023 * have to run both passes, we need to be careful
5024 * not to decrement 'rcount' in the 1st pass
5026 if (backing_rcount
&& backing_rcount
< (size
/ 8)) {
5027 unsigned int rc
= rcount
;
5030 backing_rcount
= backing_object
->resident_page_count
;
5031 p
= (vm_page_t
)vm_page_queue_first(&backing_object
->memq
);
5033 offset
= (p
->vmp_offset
- backing_offset
);
5035 if (offset
< object
->vo_size
&&
5036 offset
!= hint_offset
&&
5037 !EXISTS_IN_OBJECT(object
, offset
, rc
)) {
5038 /* found a dependency */
5039 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
5043 p
= (vm_page_t
) vm_page_queue_next(&p
->vmp_listq
);
5044 } while (--backing_rcount
);
5045 if (backing_rcount
!= 0) {
5046 /* try and collapse the rest of the shadow chain */
5047 if (object
!= original_object
) {
5048 vm_object_unlock(object
);
5050 object
= backing_object
;
5051 object_lock_type
= backing_object_lock_type
;
5057 * Walk through the offsets looking for pages in the
5058 * backing object that show through to the object.
5060 if (backing_rcount
) {
5061 offset
= hint_offset
;
5064 (offset
+ PAGE_SIZE_64
< object
->vo_size
) ?
5065 (offset
+ PAGE_SIZE_64
) : 0) != hint_offset
) {
5066 if (EXISTS_IN_OBJECT(backing_object
, offset
+
5067 backing_offset
, backing_rcount
) &&
5068 !EXISTS_IN_OBJECT(object
, offset
, rcount
)) {
5069 /* found a dependency */
5070 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
5074 if (offset
!= hint_offset
) {
5075 /* try and collapse the rest of the shadow chain */
5076 if (object
!= original_object
) {
5077 vm_object_unlock(object
);
5079 object
= backing_object
;
5080 object_lock_type
= backing_object_lock_type
;
5087 * We need "exclusive" locks on the 2 VM objects.
5089 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
5090 vm_object_unlock(backing_object
);
5091 if (object
!= original_object
) {
5092 vm_object_unlock(object
);
5094 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5095 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5099 /* reset the offset hint for any objects deeper in the chain */
5100 object
->cow_hint
= (vm_offset_t
)0;
5103 * All interesting pages in the backing object
5104 * already live in the parent or its pager.
5105 * Thus we can bypass the backing object.
5108 vm_object_do_bypass(object
, backing_object
);
5109 vm_object_collapse_do_bypass
++;
5112 * Try again with this object's new backing object.
5120 * if (object != original_object) {
5121 * vm_object_unlock(object);
5127 * Routine: vm_object_page_remove: [internal]
5129 * Removes all physical pages in the specified
5130 * object range from the object's list of pages.
5132 * In/out conditions:
5133 * The object must be locked.
5134 * The object must not have paging_in_progress, usually
5135 * guaranteed by not having a pager.
5137 unsigned int vm_object_page_remove_lookup
= 0;
5138 unsigned int vm_object_page_remove_iterate
= 0;
5140 __private_extern__
void
5141 vm_object_page_remove(
5143 vm_object_offset_t start
,
5144 vm_object_offset_t end
)
5149 * One and two page removals are most popular.
5150 * The factor of 16 here is somewhat arbitrary.
5151 * It balances vm_object_lookup vs iteration.
5154 if (atop_64(end
- start
) < (unsigned)object
->resident_page_count
/ 16) {
5155 vm_object_page_remove_lookup
++;
5157 for (; start
< end
; start
+= PAGE_SIZE_64
) {
5158 p
= vm_page_lookup(object
, start
);
5159 if (p
!= VM_PAGE_NULL
) {
5160 assert(!p
->vmp_cleaning
&& !p
->vmp_laundry
);
5161 if (!p
->vmp_fictitious
&& p
->vmp_pmapped
) {
5162 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5168 vm_object_page_remove_iterate
++;
5170 p
= (vm_page_t
) vm_page_queue_first(&object
->memq
);
5171 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
) p
)) {
5172 next
= (vm_page_t
) vm_page_queue_next(&p
->vmp_listq
);
5173 if ((start
<= p
->vmp_offset
) && (p
->vmp_offset
< end
)) {
5174 assert(!p
->vmp_cleaning
&& !p
->vmp_laundry
);
5175 if (!p
->vmp_fictitious
&& p
->vmp_pmapped
) {
5176 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5187 * Routine: vm_object_coalesce
5188 * Function: Coalesces two objects backing up adjoining
5189 * regions of memory into a single object.
5191 * returns TRUE if objects were combined.
5193 * NOTE: Only works at the moment if the second object is NULL -
5194 * if it's not, which object do we lock first?
5197 * prev_object First object to coalesce
5198 * prev_offset Offset into prev_object
5199 * next_object Second object into coalesce
5200 * next_offset Offset into next_object
5202 * prev_size Size of reference to prev_object
5203 * next_size Size of reference to next_object
5206 * The object(s) must *not* be locked. The map must be locked
5207 * to preserve the reference to the object(s).
5209 static int vm_object_coalesce_count
= 0;
5211 __private_extern__ boolean_t
5213 vm_object_t prev_object
,
5214 vm_object_t next_object
,
5215 vm_object_offset_t prev_offset
,
5216 __unused vm_object_offset_t next_offset
,
5217 vm_object_size_t prev_size
,
5218 vm_object_size_t next_size
)
5220 vm_object_size_t newsize
;
5226 if (next_object
!= VM_OBJECT_NULL
) {
5230 if (prev_object
== VM_OBJECT_NULL
) {
5234 vm_object_lock(prev_object
);
5237 * Try to collapse the object first
5239 vm_object_collapse(prev_object
, prev_offset
, TRUE
);
5242 * Can't coalesce if pages not mapped to
5243 * prev_entry may be in use any way:
5244 * . more than one reference
5246 * . shadows another object
5247 * . has a copy elsewhere
5249 * . paging references (pages might be in page-list)
5252 if ((prev_object
->ref_count
> 1) ||
5253 prev_object
->pager_created
||
5254 (prev_object
->shadow
!= VM_OBJECT_NULL
) ||
5255 (prev_object
->copy
!= VM_OBJECT_NULL
) ||
5256 (prev_object
->true_share
!= FALSE
) ||
5257 (prev_object
->purgable
!= VM_PURGABLE_DENY
) ||
5258 (prev_object
->paging_in_progress
!= 0) ||
5259 (prev_object
->activity_in_progress
!= 0)) {
5260 vm_object_unlock(prev_object
);
5264 vm_object_coalesce_count
++;
5267 * Remove any pages that may still be in the object from
5268 * a previous deallocation.
5270 vm_object_page_remove(prev_object
,
5271 prev_offset
+ prev_size
,
5272 prev_offset
+ prev_size
+ next_size
);
5275 * Extend the object if necessary.
5277 newsize
= prev_offset
+ prev_size
+ next_size
;
5278 if (newsize
> prev_object
->vo_size
) {
5279 assertf(page_aligned(newsize
),
5280 "object %p size 0x%llx",
5281 prev_object
, (uint64_t)newsize
);
5282 prev_object
->vo_size
= newsize
;
5285 vm_object_unlock(prev_object
);
5290 vm_object_populate_with_private(
5292 vm_object_offset_t offset
,
5297 vm_object_offset_t base_offset
;
5300 if (!object
->private) {
5301 return KERN_FAILURE
;
5304 base_page
= phys_page
;
5306 vm_object_lock(object
);
5308 if (!object
->phys_contiguous
) {
5311 if ((base_offset
= trunc_page_64(offset
)) != offset
) {
5312 vm_object_unlock(object
);
5313 return KERN_FAILURE
;
5315 base_offset
+= object
->paging_offset
;
5318 m
= vm_page_lookup(object
, base_offset
);
5320 if (m
!= VM_PAGE_NULL
) {
5321 if (m
->vmp_fictitious
) {
5322 if (VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
) {
5323 vm_page_lockspin_queues();
5324 m
->vmp_private
= TRUE
;
5325 vm_page_unlock_queues();
5327 m
->vmp_fictitious
= FALSE
;
5328 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5330 } else if (VM_PAGE_GET_PHYS_PAGE(m
) != base_page
) {
5331 if (!m
->vmp_private
) {
5333 * we'd leak a real page... that can't be right
5335 panic("vm_object_populate_with_private - %p not private", m
);
5337 if (m
->vmp_pmapped
) {
5339 * pmap call to clear old mapping
5341 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
5343 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5346 m
= vm_page_grab_fictitious(TRUE
);
5349 * private normally requires lock_queues but since we
5350 * are initializing the page, its not necessary here
5352 m
->vmp_private
= TRUE
;
5353 m
->vmp_fictitious
= FALSE
;
5354 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5355 m
->vmp_unusual
= TRUE
;
5356 m
->vmp_busy
= FALSE
;
5358 vm_page_insert(m
, object
, base_offset
);
5360 base_page
++; /* Go to the next physical page */
5361 base_offset
+= PAGE_SIZE
;
5365 /* NOTE: we should check the original settings here */
5366 /* if we have a size > zero a pmap call should be made */
5367 /* to disable the range */
5371 /* shadows on contiguous memory are not allowed */
5372 /* we therefore can use the offset field */
5373 object
->vo_shadow_offset
= (vm_object_offset_t
)phys_page
<< PAGE_SHIFT
;
5374 assertf(page_aligned(size
),
5375 "object %p size 0x%llx",
5376 object
, (uint64_t)size
);
5377 object
->vo_size
= size
;
5379 vm_object_unlock(object
);
5381 return KERN_SUCCESS
;
5386 memory_object_create_named(
5387 memory_object_t pager
,
5388 memory_object_offset_t size
,
5389 memory_object_control_t
*control
)
5393 *control
= MEMORY_OBJECT_CONTROL_NULL
;
5394 if (pager
== MEMORY_OBJECT_NULL
) {
5395 return KERN_INVALID_ARGUMENT
;
5398 object
= vm_object_memory_object_associate(pager
,
5402 if (object
== VM_OBJECT_NULL
) {
5403 return KERN_INVALID_OBJECT
;
5406 /* wait for object (if any) to be ready */
5407 if (object
!= VM_OBJECT_NULL
) {
5408 vm_object_lock(object
);
5409 object
->named
= TRUE
;
5410 while (!object
->pager_ready
) {
5411 vm_object_sleep(object
,
5412 VM_OBJECT_EVENT_PAGER_READY
,
5415 *control
= object
->pager_control
;
5416 vm_object_unlock(object
);
5418 return KERN_SUCCESS
;
5423 * Routine: memory_object_recover_named [user interface]
5425 * Attempt to recover a named reference for a VM object.
5426 * VM will verify that the object has not already started
5427 * down the termination path, and if it has, will optionally
5428 * wait for that to finish.
5430 * KERN_SUCCESS - we recovered a named reference on the object
5431 * KERN_FAILURE - we could not recover a reference (object dead)
5432 * KERN_INVALID_ARGUMENT - bad memory object control
5435 memory_object_recover_named(
5436 memory_object_control_t control
,
5437 boolean_t wait_on_terminating
)
5441 object
= memory_object_control_to_vm_object(control
);
5442 if (object
== VM_OBJECT_NULL
) {
5443 return KERN_INVALID_ARGUMENT
;
5446 vm_object_lock(object
);
5448 if (object
->terminating
&& wait_on_terminating
) {
5449 vm_object_wait(object
,
5450 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
5455 if (!object
->alive
) {
5456 vm_object_unlock(object
);
5457 return KERN_FAILURE
;
5460 if (object
->named
== TRUE
) {
5461 vm_object_unlock(object
);
5462 return KERN_SUCCESS
;
5464 object
->named
= TRUE
;
5465 vm_object_lock_assert_exclusive(object
);
5466 object
->ref_count
++;
5467 while (!object
->pager_ready
) {
5468 vm_object_sleep(object
,
5469 VM_OBJECT_EVENT_PAGER_READY
,
5472 vm_object_unlock(object
);
5473 return KERN_SUCCESS
;
5478 * vm_object_release_name:
5480 * Enforces name semantic on memory_object reference count decrement
5481 * This routine should not be called unless the caller holds a name
5482 * reference gained through the memory_object_create_named.
5484 * If the TERMINATE_IDLE flag is set, the call will return if the
5485 * reference count is not 1. i.e. idle with the only remaining reference
5487 * If the decision is made to proceed the name field flag is set to
5488 * false and the reference count is decremented. If the RESPECT_CACHE
5489 * flag is set and the reference count has gone to zero, the
5490 * memory_object is checked to see if it is cacheable otherwise when
5491 * the reference count is zero, it is simply terminated.
5494 __private_extern__ kern_return_t
5495 vm_object_release_name(
5500 boolean_t original_object
= TRUE
;
5502 while (object
!= VM_OBJECT_NULL
) {
5503 vm_object_lock(object
);
5505 assert(object
->alive
);
5506 if (original_object
) {
5507 assert(object
->named
);
5509 assert(object
->ref_count
> 0);
5512 * We have to wait for initialization before
5513 * destroying or caching the object.
5516 if (object
->pager_created
&& !object
->pager_initialized
) {
5517 assert(!object
->can_persist
);
5518 vm_object_assert_wait(object
,
5519 VM_OBJECT_EVENT_INITIALIZED
,
5521 vm_object_unlock(object
);
5522 thread_block(THREAD_CONTINUE_NULL
);
5526 if (((object
->ref_count
> 1)
5527 && (flags
& MEMORY_OBJECT_TERMINATE_IDLE
))
5528 || (object
->terminating
)) {
5529 vm_object_unlock(object
);
5530 return KERN_FAILURE
;
5532 if (flags
& MEMORY_OBJECT_RELEASE_NO_OP
) {
5533 vm_object_unlock(object
);
5534 return KERN_SUCCESS
;
5538 if ((flags
& MEMORY_OBJECT_RESPECT_CACHE
) &&
5539 (object
->ref_count
== 1)) {
5540 if (original_object
) {
5541 object
->named
= FALSE
;
5543 vm_object_unlock(object
);
5544 /* let vm_object_deallocate push this thing into */
5545 /* the cache, if that it is where it is bound */
5546 vm_object_deallocate(object
);
5547 return KERN_SUCCESS
;
5549 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
5551 if (object
->ref_count
== 1) {
5552 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
5553 if (original_object
) {
5554 return KERN_FAILURE
;
5556 return KERN_SUCCESS
;
5559 if (shadow
!= VM_OBJECT_NULL
) {
5560 original_object
= FALSE
;
5564 return KERN_SUCCESS
;
5566 vm_object_lock_assert_exclusive(object
);
5567 object
->ref_count
--;
5568 assert(object
->ref_count
> 0);
5569 if (original_object
) {
5570 object
->named
= FALSE
;
5572 vm_object_unlock(object
);
5573 return KERN_SUCCESS
;
5578 return KERN_FAILURE
;
5582 __private_extern__ kern_return_t
5583 vm_object_lock_request(
5585 vm_object_offset_t offset
,
5586 vm_object_size_t size
,
5587 memory_object_return_t should_return
,
5591 __unused boolean_t should_flush
;
5593 should_flush
= flags
& MEMORY_OBJECT_DATA_FLUSH
;
5596 * Check for bogus arguments.
5598 if (object
== VM_OBJECT_NULL
) {
5599 return KERN_INVALID_ARGUMENT
;
5602 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
) {
5603 return KERN_INVALID_ARGUMENT
;
5608 * extend range for conservative operations (copy-on-write, sync, ...)
5609 * truncate range for destructive operations (purge, ...)
5611 size
= vm_object_round_page(offset
+ size
) - vm_object_trunc_page(offset
);
5612 offset
= vm_object_trunc_page(offset
);
5615 * Lock the object, and acquire a paging reference to
5616 * prevent the memory_object reference from being released.
5618 vm_object_lock(object
);
5619 vm_object_paging_begin(object
);
5621 (void)vm_object_update(object
,
5622 offset
, size
, NULL
, NULL
, should_return
, flags
, prot
);
5624 vm_object_paging_end(object
);
5625 vm_object_unlock(object
);
5627 return KERN_SUCCESS
;
5631 * Empty a purgeable object by grabbing the physical pages assigned to it and
5632 * putting them on the free queue without writing them to backing store, etc.
5633 * When the pages are next touched they will be demand zero-fill pages. We
5634 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
5635 * skip referenced/dirty pages, pages on the active queue, etc. We're more
5636 * than happy to grab these since this is a purgeable object. We mark the
5637 * object as "empty" after reaping its pages.
5639 * On entry the object must be locked and it must be
5640 * purgeable with no delayed copies pending.
5643 vm_object_purge(vm_object_t object
, int flags
)
5645 unsigned int object_page_count
= 0, pgcount
= 0;
5646 uint64_t total_purged_pgcount
= 0;
5647 boolean_t skipped_object
= FALSE
;
5649 vm_object_lock_assert_exclusive(object
);
5651 if (object
->purgable
== VM_PURGABLE_DENY
) {
5655 assert(object
->copy
== VM_OBJECT_NULL
);
5656 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
5659 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
5660 * reaping its pages. We update vm_page_purgeable_count in bulk
5661 * and we don't want vm_page_remove() to update it again for each
5662 * page we reap later.
5664 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
5665 * are all accounted for in the "volatile" ledgers, so this does not
5666 * make any difference.
5667 * If we transitioned directly from NONVOLATILE to EMPTY,
5668 * vm_page_purgeable_count must have been updated when the object
5669 * was dequeued from its volatile queue and the purgeable ledgers
5670 * must have also been updated accordingly at that time (in
5671 * vm_object_purgable_control()).
5673 if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
5675 assert(object
->resident_page_count
>=
5676 object
->wired_page_count
);
5677 delta
= (object
->resident_page_count
-
5678 object
->wired_page_count
);
5680 assert(vm_page_purgeable_count
>=
5683 (SInt32
*)&vm_page_purgeable_count
);
5685 if (object
->wired_page_count
!= 0) {
5686 assert(vm_page_purgeable_wired_count
>=
5687 object
->wired_page_count
);
5688 OSAddAtomic(-object
->wired_page_count
,
5689 (SInt32
*)&vm_page_purgeable_wired_count
);
5691 object
->purgable
= VM_PURGABLE_EMPTY
;
5693 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
5695 object_page_count
= object
->resident_page_count
;
5697 vm_object_reap_pages(object
, REAP_PURGEABLE
);
5699 if (object
->resident_page_count
>= object_page_count
) {
5700 total_purged_pgcount
= 0;
5702 total_purged_pgcount
= object_page_count
- object
->resident_page_count
;
5705 if (object
->pager
!= NULL
) {
5706 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
5708 if (object
->activity_in_progress
== 0 &&
5709 object
->paging_in_progress
== 0) {
5711 * Also reap any memory coming from this object
5712 * in the VM compressor.
5714 * There are no operations in progress on the VM object
5715 * and no operation can start while we're holding the
5716 * VM object lock, so it's safe to reap the compressed
5717 * pages and update the page counts.
5719 pgcount
= vm_compressor_pager_get_count(object
->pager
);
5721 pgcount
= vm_compressor_pager_reap_pages(object
->pager
, flags
);
5722 vm_compressor_pager_count(object
->pager
,
5726 vm_object_owner_compressed_update(object
,
5729 if (!(flags
& C_DONT_BLOCK
)) {
5730 assert(vm_compressor_pager_get_count(object
->pager
)
5735 * There's some kind of paging activity in progress
5736 * for this object, which could result in a page
5737 * being compressed or decompressed, possibly while
5738 * the VM object is not locked, so it could race
5741 * We can't really synchronize this without possibly
5742 * causing a deadlock when the compressor needs to
5743 * allocate or free memory while compressing or
5744 * decompressing a page from a purgeable object
5745 * mapped in the kernel_map...
5747 * So let's not attempt to purge the compressor
5748 * pager if there's any kind of operation in
5749 * progress on the VM object.
5751 skipped_object
= TRUE
;
5755 vm_object_lock_assert_exclusive(object
);
5757 total_purged_pgcount
+= pgcount
;
5759 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ONE
)),
5760 VM_KERNEL_UNSLIDE_OR_PERM(object
), /* purged object */
5762 total_purged_pgcount
,
5766 return total_purged_pgcount
;
5771 * vm_object_purgeable_control() allows the caller to control and investigate the
5772 * state of a purgeable object. A purgeable object is created via a call to
5773 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
5774 * never be coalesced with any other object -- even other purgeable objects --
5775 * and will thus always remain a distinct object. A purgeable object has
5776 * special semantics when its reference count is exactly 1. If its reference
5777 * count is greater than 1, then a purgeable object will behave like a normal
5778 * object and attempts to use this interface will result in an error return
5779 * of KERN_INVALID_ARGUMENT.
5781 * A purgeable object may be put into a "volatile" state which will make the
5782 * object's pages elligable for being reclaimed without paging to backing
5783 * store if the system runs low on memory. If the pages in a volatile
5784 * purgeable object are reclaimed, the purgeable object is said to have been
5785 * "emptied." When a purgeable object is emptied the system will reclaim as
5786 * many pages from the object as it can in a convenient manner (pages already
5787 * en route to backing store or busy for other reasons are left as is). When
5788 * a purgeable object is made volatile, its pages will generally be reclaimed
5789 * before other pages in the application's working set. This semantic is
5790 * generally used by applications which can recreate the data in the object
5791 * faster than it can be paged in. One such example might be media assets
5792 * which can be reread from a much faster RAID volume.
5794 * A purgeable object may be designated as "non-volatile" which means it will
5795 * behave like all other objects in the system with pages being written to and
5796 * read from backing store as needed to satisfy system memory needs. If the
5797 * object was emptied before the object was made non-volatile, that fact will
5798 * be returned as the old state of the purgeable object (see
5799 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
5800 * were reclaimed as part of emptying the object will be refaulted in as
5801 * zero-fill on demand. It is up to the application to note that an object
5802 * was emptied and recreate the objects contents if necessary. When a
5803 * purgeable object is made non-volatile, its pages will generally not be paged
5804 * out to backing store in the immediate future. A purgeable object may also
5805 * be manually emptied.
5807 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
5808 * volatile purgeable object may be queried at any time. This information may
5809 * be used as a control input to let the application know when the system is
5810 * experiencing memory pressure and is reclaiming memory.
5812 * The specified address may be any address within the purgeable object. If
5813 * the specified address does not represent any object in the target task's
5814 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
5815 * object containing the specified address is not a purgeable object, then
5816 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
5819 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
5820 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
5821 * state is used to set the new state of the purgeable object and return its
5822 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
5823 * object is returned in the parameter state.
5825 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
5826 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
5827 * the non-volatile, volatile and volatile/empty states described above.
5828 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
5829 * immediately reclaim as many pages in the object as can be conveniently
5830 * collected (some may have already been written to backing store or be
5833 * The process of making a purgeable object non-volatile and determining its
5834 * previous state is atomic. Thus, if a purgeable object is made
5835 * VM_PURGABLE_NONVOLATILE and the old state is returned as
5836 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
5837 * completely intact and will remain so until the object is made volatile
5838 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
5839 * was reclaimed while it was in a volatile state and its previous contents
5843 * The object must be locked.
5846 vm_object_purgable_control(
5848 vm_purgable_t control
,
5854 if (object
== VM_OBJECT_NULL
) {
5856 * Object must already be present or it can't be purgeable.
5858 return KERN_INVALID_ARGUMENT
;
5861 vm_object_lock_assert_exclusive(object
);
5864 * Get current state of the purgeable object.
5866 old_state
= object
->purgable
;
5867 if (old_state
== VM_PURGABLE_DENY
) {
5868 return KERN_INVALID_ARGUMENT
;
5871 /* purgeable cant have delayed copies - now or in the future */
5872 assert(object
->copy
== VM_OBJECT_NULL
);
5873 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
5876 * Execute the desired operation.
5878 if (control
== VM_PURGABLE_GET_STATE
) {
5880 return KERN_SUCCESS
;
5883 if (control
== VM_PURGABLE_SET_STATE
&&
5884 object
->purgeable_only_by_kernel
) {
5885 return KERN_PROTECTION_FAILURE
;
5888 if (control
!= VM_PURGABLE_SET_STATE
&&
5889 control
!= VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
5890 return KERN_INVALID_ARGUMENT
;
5893 if ((*state
) & VM_PURGABLE_DEBUG_EMPTY
) {
5894 object
->volatile_empty
= TRUE
;
5896 if ((*state
) & VM_PURGABLE_DEBUG_FAULT
) {
5897 object
->volatile_fault
= TRUE
;
5900 new_state
= *state
& VM_PURGABLE_STATE_MASK
;
5901 if (new_state
== VM_PURGABLE_VOLATILE
) {
5902 if (old_state
== VM_PURGABLE_EMPTY
) {
5903 /* what's been emptied must stay empty */
5904 new_state
= VM_PURGABLE_EMPTY
;
5906 if (object
->volatile_empty
) {
5907 /* debugging mode: go straight to empty */
5908 new_state
= VM_PURGABLE_EMPTY
;
5912 switch (new_state
) {
5913 case VM_PURGABLE_DENY
:
5915 * Attempting to convert purgeable memory to non-purgeable:
5918 return KERN_INVALID_ARGUMENT
;
5919 case VM_PURGABLE_NONVOLATILE
:
5920 object
->purgable
= new_state
;
5922 if (old_state
== VM_PURGABLE_VOLATILE
) {
5925 assert(object
->resident_page_count
>=
5926 object
->wired_page_count
);
5927 delta
= (object
->resident_page_count
-
5928 object
->wired_page_count
);
5930 assert(vm_page_purgeable_count
>= delta
);
5934 (SInt32
*)&vm_page_purgeable_count
);
5936 if (object
->wired_page_count
!= 0) {
5937 assert(vm_page_purgeable_wired_count
>=
5938 object
->wired_page_count
);
5939 OSAddAtomic(-object
->wired_page_count
,
5940 (SInt32
*)&vm_page_purgeable_wired_count
);
5943 vm_page_lock_queues();
5945 /* object should be on a queue */
5946 assert(object
->objq
.next
!= NULL
&&
5947 object
->objq
.prev
!= NULL
);
5948 purgeable_q_t queue
;
5951 * Move object from its volatile queue to the
5952 * non-volatile queue...
5954 queue
= vm_purgeable_object_remove(object
);
5957 if (object
->purgeable_when_ripe
) {
5958 vm_purgeable_token_delete_last(queue
);
5960 assert(queue
->debug_count_objects
>= 0);
5962 vm_page_unlock_queues();
5964 if (old_state
== VM_PURGABLE_VOLATILE
||
5965 old_state
== VM_PURGABLE_EMPTY
) {
5967 * Transfer the object's pages from the volatile to
5968 * non-volatile ledgers.
5970 vm_purgeable_accounting(object
, VM_PURGABLE_VOLATILE
);
5975 case VM_PURGABLE_VOLATILE
:
5976 if (object
->volatile_fault
) {
5980 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
5983 p
->vmp_fictitious
) {
5986 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5987 if ((refmod
& VM_MEM_MODIFIED
) &&
5989 SET_PAGE_DIRTY(p
, FALSE
);
5994 assert(old_state
!= VM_PURGABLE_EMPTY
);
5996 purgeable_q_t queue
;
5998 /* find the correct queue */
5999 if ((*state
& VM_PURGABLE_ORDERING_MASK
) == VM_PURGABLE_ORDERING_OBSOLETE
) {
6000 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
6002 if ((*state
& VM_PURGABLE_BEHAVIOR_MASK
) == VM_PURGABLE_BEHAVIOR_FIFO
) {
6003 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
6005 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
6009 if (old_state
== VM_PURGABLE_NONVOLATILE
||
6010 old_state
== VM_PURGABLE_EMPTY
) {
6013 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
6014 VM_PURGABLE_NO_AGING
) {
6015 object
->purgeable_when_ripe
= FALSE
;
6017 object
->purgeable_when_ripe
= TRUE
;
6020 if (object
->purgeable_when_ripe
) {
6021 kern_return_t result
;
6023 /* try to add token... this can fail */
6024 vm_page_lock_queues();
6026 result
= vm_purgeable_token_add(queue
);
6027 if (result
!= KERN_SUCCESS
) {
6028 vm_page_unlock_queues();
6031 vm_page_unlock_queues();
6034 assert(object
->resident_page_count
>=
6035 object
->wired_page_count
);
6036 delta
= (object
->resident_page_count
-
6037 object
->wired_page_count
);
6041 &vm_page_purgeable_count
);
6043 if (object
->wired_page_count
!= 0) {
6044 OSAddAtomic(object
->wired_page_count
,
6045 &vm_page_purgeable_wired_count
);
6048 object
->purgable
= new_state
;
6050 /* object should be on "non-volatile" queue */
6051 assert(object
->objq
.next
!= NULL
);
6052 assert(object
->objq
.prev
!= NULL
);
6053 } else if (old_state
== VM_PURGABLE_VOLATILE
) {
6054 purgeable_q_t old_queue
;
6055 boolean_t purgeable_when_ripe
;
6058 * if reassigning priorities / purgeable groups, we don't change the
6059 * token queue. So moving priorities will not make pages stay around longer.
6060 * Reasoning is that the algorithm gives most priority to the most important
6061 * object. If a new token is added, the most important object' priority is boosted.
6062 * This biases the system already for purgeable queues that move a lot.
6063 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
6065 assert(object
->objq
.next
!= NULL
&& object
->objq
.prev
!= NULL
); /* object should be on a queue */
6067 old_queue
= vm_purgeable_object_remove(object
);
6070 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
6071 VM_PURGABLE_NO_AGING
) {
6072 purgeable_when_ripe
= FALSE
;
6074 purgeable_when_ripe
= TRUE
;
6077 if (old_queue
!= queue
||
6078 (purgeable_when_ripe
!=
6079 object
->purgeable_when_ripe
)) {
6080 kern_return_t result
;
6082 /* Changing queue. Have to move token. */
6083 vm_page_lock_queues();
6084 if (object
->purgeable_when_ripe
) {
6085 vm_purgeable_token_delete_last(old_queue
);
6087 object
->purgeable_when_ripe
= purgeable_when_ripe
;
6088 if (object
->purgeable_when_ripe
) {
6089 result
= vm_purgeable_token_add(queue
);
6090 assert(result
== KERN_SUCCESS
); /* this should never fail since we just freed a token */
6092 vm_page_unlock_queues();
6096 vm_purgeable_object_add(object
, queue
, (*state
& VM_VOLATILE_GROUP_MASK
) >> VM_VOLATILE_GROUP_SHIFT
);
6097 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
6098 vm_purgeable_accounting(object
,
6099 VM_PURGABLE_NONVOLATILE
);
6102 assert(queue
->debug_count_objects
>= 0);
6107 case VM_PURGABLE_EMPTY
:
6108 if (object
->volatile_fault
) {
6112 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
6115 p
->vmp_fictitious
) {
6118 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
6119 if ((refmod
& VM_MEM_MODIFIED
) &&
6121 SET_PAGE_DIRTY(p
, FALSE
);
6126 if (old_state
== VM_PURGABLE_VOLATILE
) {
6127 purgeable_q_t old_queue
;
6129 /* object should be on a queue */
6130 assert(object
->objq
.next
!= NULL
&&
6131 object
->objq
.prev
!= NULL
);
6133 old_queue
= vm_purgeable_object_remove(object
);
6135 if (object
->purgeable_when_ripe
) {
6136 vm_page_lock_queues();
6137 vm_purgeable_token_delete_first(old_queue
);
6138 vm_page_unlock_queues();
6142 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
6144 * This object's pages were previously accounted as
6145 * "non-volatile" and now need to be accounted as
6148 vm_purgeable_accounting(object
,
6149 VM_PURGABLE_NONVOLATILE
);
6151 * Set to VM_PURGABLE_EMPTY because the pages are no
6152 * longer accounted in the "non-volatile" ledger
6153 * and are also not accounted for in
6154 * "vm_page_purgeable_count".
6156 object
->purgable
= VM_PURGABLE_EMPTY
;
6159 (void) vm_object_purge(object
, 0);
6160 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
6167 vm_object_lock_assert_exclusive(object
);
6169 return KERN_SUCCESS
;
6173 vm_object_get_page_counts(
6175 vm_object_offset_t offset
,
6176 vm_object_size_t size
,
6177 unsigned int *resident_page_count
,
6178 unsigned int *dirty_page_count
)
6180 kern_return_t kr
= KERN_SUCCESS
;
6181 boolean_t count_dirty_pages
= FALSE
;
6182 vm_page_t p
= VM_PAGE_NULL
;
6183 unsigned int local_resident_count
= 0;
6184 unsigned int local_dirty_count
= 0;
6185 vm_object_offset_t cur_offset
= 0;
6186 vm_object_offset_t end_offset
= 0;
6188 if (object
== VM_OBJECT_NULL
) {
6189 return KERN_INVALID_ARGUMENT
;
6193 cur_offset
= offset
;
6195 end_offset
= offset
+ size
;
6197 vm_object_lock_assert_exclusive(object
);
6199 if (dirty_page_count
!= NULL
) {
6200 count_dirty_pages
= TRUE
;
6203 if (resident_page_count
!= NULL
&& count_dirty_pages
== FALSE
) {
6206 * - we only want the resident page count, and,
6207 * - the entire object is exactly covered by the request.
6209 if (offset
== 0 && (object
->vo_size
== size
)) {
6210 *resident_page_count
= object
->resident_page_count
;
6215 if (object
->resident_page_count
<= (size
>> PAGE_SHIFT
)) {
6216 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
6217 if (p
->vmp_offset
>= cur_offset
&& p
->vmp_offset
< end_offset
) {
6218 local_resident_count
++;
6220 if (count_dirty_pages
) {
6221 if (p
->vmp_dirty
|| (p
->vmp_wpmapped
&& pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
)))) {
6222 local_dirty_count
++;
6228 for (cur_offset
= offset
; cur_offset
< end_offset
; cur_offset
+= PAGE_SIZE_64
) {
6229 p
= vm_page_lookup(object
, cur_offset
);
6231 if (p
!= VM_PAGE_NULL
) {
6232 local_resident_count
++;
6234 if (count_dirty_pages
) {
6235 if (p
->vmp_dirty
|| (p
->vmp_wpmapped
&& pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
)))) {
6236 local_dirty_count
++;
6243 if (resident_page_count
!= NULL
) {
6244 *resident_page_count
= local_resident_count
;
6247 if (dirty_page_count
!= NULL
) {
6248 *dirty_page_count
= local_dirty_count
;
6257 * vm_object_reference:
6259 * Gets another reference to the given object.
6261 #ifdef vm_object_reference
6262 #undef vm_object_reference
6264 __private_extern__
void
6265 vm_object_reference(
6268 if (object
== VM_OBJECT_NULL
) {
6272 vm_object_lock(object
);
6273 assert(object
->ref_count
> 0);
6274 vm_object_reference_locked(object
);
6275 vm_object_unlock(object
);
6279 * vm_object_transpose
6281 * This routine takes two VM objects of the same size and exchanges
6282 * their backing store.
6283 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6284 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6286 * The VM objects must not be locked by caller.
6288 unsigned int vm_object_transpose_count
= 0;
6290 vm_object_transpose(
6291 vm_object_t object1
,
6292 vm_object_t object2
,
6293 vm_object_size_t transpose_size
)
6295 vm_object_t tmp_object
;
6296 kern_return_t retval
;
6297 boolean_t object1_locked
, object2_locked
;
6299 vm_object_offset_t page_offset
;
6301 tmp_object
= VM_OBJECT_NULL
;
6302 object1_locked
= FALSE
; object2_locked
= FALSE
;
6304 if (object1
== object2
||
6305 object1
== VM_OBJECT_NULL
||
6306 object2
== VM_OBJECT_NULL
) {
6308 * If the 2 VM objects are the same, there's
6309 * no point in exchanging their backing store.
6311 retval
= KERN_INVALID_VALUE
;
6316 * Since we need to lock both objects at the same time,
6317 * make sure we always lock them in the same order to
6320 if (object1
> object2
) {
6321 tmp_object
= object1
;
6323 object2
= tmp_object
;
6327 * Allocate a temporary VM object to hold object1's contents
6328 * while we copy object2 to object1.
6330 tmp_object
= vm_object_allocate(transpose_size
);
6331 vm_object_lock(tmp_object
);
6332 tmp_object
->can_persist
= FALSE
;
6336 * Grab control of the 1st VM object.
6338 vm_object_lock(object1
);
6339 object1_locked
= TRUE
;
6340 if (!object1
->alive
|| object1
->terminating
||
6341 object1
->copy
|| object1
->shadow
|| object1
->shadowed
||
6342 object1
->purgable
!= VM_PURGABLE_DENY
) {
6344 * We don't deal with copy or shadow objects (yet).
6346 retval
= KERN_INVALID_VALUE
;
6350 * We're about to mess with the object's backing store and
6351 * taking a "paging_in_progress" reference wouldn't be enough
6352 * to prevent any paging activity on this object, so the caller should
6353 * have "quiesced" the objects beforehand, via a UPL operation with
6354 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6355 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6357 * Wait for any paging operation to complete (but only paging, not
6358 * other kind of activities not linked to the pager). After we're
6359 * statisfied that there's no more paging in progress, we keep the
6360 * object locked, to guarantee that no one tries to access its pager.
6362 vm_object_paging_only_wait(object1
, THREAD_UNINT
);
6365 * Same as above for the 2nd object...
6367 vm_object_lock(object2
);
6368 object2_locked
= TRUE
;
6369 if (!object2
->alive
|| object2
->terminating
||
6370 object2
->copy
|| object2
->shadow
|| object2
->shadowed
||
6371 object2
->purgable
!= VM_PURGABLE_DENY
) {
6372 retval
= KERN_INVALID_VALUE
;
6375 vm_object_paging_only_wait(object2
, THREAD_UNINT
);
6378 if (object1
->vo_size
!= object2
->vo_size
||
6379 object1
->vo_size
!= transpose_size
) {
6381 * If the 2 objects don't have the same size, we can't
6382 * exchange their backing stores or one would overflow.
6383 * If their size doesn't match the caller's
6384 * "transpose_size", we can't do it either because the
6385 * transpose operation will affect the entire span of
6388 retval
= KERN_INVALID_VALUE
;
6394 * Transpose the lists of resident pages.
6395 * This also updates the resident_page_count and the memq_hint.
6397 if (object1
->phys_contiguous
|| vm_page_queue_empty(&object1
->memq
)) {
6399 * No pages in object1, just transfer pages
6400 * from object2 to object1. No need to go through
6401 * an intermediate object.
6403 while (!vm_page_queue_empty(&object2
->memq
)) {
6404 page
= (vm_page_t
) vm_page_queue_first(&object2
->memq
);
6405 vm_page_rename(page
, object1
, page
->vmp_offset
);
6407 assert(vm_page_queue_empty(&object2
->memq
));
6408 } else if (object2
->phys_contiguous
|| vm_page_queue_empty(&object2
->memq
)) {
6410 * No pages in object2, just transfer pages
6411 * from object1 to object2. No need to go through
6412 * an intermediate object.
6414 while (!vm_page_queue_empty(&object1
->memq
)) {
6415 page
= (vm_page_t
) vm_page_queue_first(&object1
->memq
);
6416 vm_page_rename(page
, object2
, page
->vmp_offset
);
6418 assert(vm_page_queue_empty(&object1
->memq
));
6420 /* transfer object1's pages to tmp_object */
6421 while (!vm_page_queue_empty(&object1
->memq
)) {
6422 page
= (vm_page_t
) vm_page_queue_first(&object1
->memq
);
6423 page_offset
= page
->vmp_offset
;
6424 vm_page_remove(page
, TRUE
);
6425 page
->vmp_offset
= page_offset
;
6426 vm_page_queue_enter(&tmp_object
->memq
, page
, vmp_listq
);
6428 assert(vm_page_queue_empty(&object1
->memq
));
6429 /* transfer object2's pages to object1 */
6430 while (!vm_page_queue_empty(&object2
->memq
)) {
6431 page
= (vm_page_t
) vm_page_queue_first(&object2
->memq
);
6432 vm_page_rename(page
, object1
, page
->vmp_offset
);
6434 assert(vm_page_queue_empty(&object2
->memq
));
6435 /* transfer tmp_object's pages to object2 */
6436 while (!vm_page_queue_empty(&tmp_object
->memq
)) {
6437 page
= (vm_page_t
) vm_page_queue_first(&tmp_object
->memq
);
6438 vm_page_queue_remove(&tmp_object
->memq
, page
, vmp_listq
);
6439 vm_page_insert(page
, object2
, page
->vmp_offset
);
6441 assert(vm_page_queue_empty(&tmp_object
->memq
));
6444 #define __TRANSPOSE_FIELD(field) \
6446 tmp_object->field = object1->field; \
6447 object1->field = object2->field; \
6448 object2->field = tmp_object->field; \
6451 /* "Lock" refers to the object not its contents */
6452 /* "size" should be identical */
6453 assert(object1
->vo_size
== object2
->vo_size
);
6454 /* "memq_hint" was updated above when transposing pages */
6455 /* "ref_count" refers to the object not its contents */
6456 assert(object1
->ref_count
>= 1);
6457 assert(object2
->ref_count
>= 1);
6458 /* "resident_page_count" was updated above when transposing pages */
6459 /* "wired_page_count" was updated above when transposing pages */
6460 #if !VM_TAG_ACTIVE_UPDATE
6461 /* "wired_objq" was dealt with along with "wired_page_count" */
6462 #endif /* ! VM_TAG_ACTIVE_UPDATE */
6463 /* "reusable_page_count" was updated above when transposing pages */
6464 /* there should be no "copy" */
6465 assert(!object1
->copy
);
6466 assert(!object2
->copy
);
6467 /* there should be no "shadow" */
6468 assert(!object1
->shadow
);
6469 assert(!object2
->shadow
);
6470 __TRANSPOSE_FIELD(vo_shadow_offset
); /* used by phys_contiguous objects */
6471 __TRANSPOSE_FIELD(pager
);
6472 __TRANSPOSE_FIELD(paging_offset
);
6473 __TRANSPOSE_FIELD(pager_control
);
6474 /* update the memory_objects' pointers back to the VM objects */
6475 if (object1
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
6476 memory_object_control_collapse(&object1
->pager_control
,
6479 if (object2
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
6480 memory_object_control_collapse(&object2
->pager_control
,
6483 __TRANSPOSE_FIELD(copy_strategy
);
6484 /* "paging_in_progress" refers to the object not its contents */
6485 assert(!object1
->paging_in_progress
);
6486 assert(!object2
->paging_in_progress
);
6487 assert(object1
->activity_in_progress
);
6488 assert(object2
->activity_in_progress
);
6489 /* "all_wanted" refers to the object not its contents */
6490 __TRANSPOSE_FIELD(pager_created
);
6491 __TRANSPOSE_FIELD(pager_initialized
);
6492 __TRANSPOSE_FIELD(pager_ready
);
6493 __TRANSPOSE_FIELD(pager_trusted
);
6494 __TRANSPOSE_FIELD(can_persist
);
6495 __TRANSPOSE_FIELD(internal
);
6496 __TRANSPOSE_FIELD(private);
6497 __TRANSPOSE_FIELD(pageout
);
6498 /* "alive" should be set */
6499 assert(object1
->alive
);
6500 assert(object2
->alive
);
6501 /* "purgeable" should be non-purgeable */
6502 assert(object1
->purgable
== VM_PURGABLE_DENY
);
6503 assert(object2
->purgable
== VM_PURGABLE_DENY
);
6504 /* "shadowed" refers to the the object not its contents */
6505 __TRANSPOSE_FIELD(purgeable_when_ripe
);
6506 __TRANSPOSE_FIELD(true_share
);
6507 /* "terminating" should not be set */
6508 assert(!object1
->terminating
);
6509 assert(!object2
->terminating
);
6510 /* transfer "named" reference if needed */
6511 if (object1
->named
&& !object2
->named
) {
6512 assert(object1
->ref_count
>= 2);
6513 assert(object2
->ref_count
>= 1);
6514 object1
->ref_count
--;
6515 object2
->ref_count
++;
6516 } else if (!object1
->named
&& object2
->named
) {
6517 assert(object1
->ref_count
>= 1);
6518 assert(object2
->ref_count
>= 2);
6519 object1
->ref_count
++;
6520 object2
->ref_count
--;
6522 __TRANSPOSE_FIELD(named
);
6523 /* "shadow_severed" refers to the object not its contents */
6524 __TRANSPOSE_FIELD(phys_contiguous
);
6525 __TRANSPOSE_FIELD(nophyscache
);
6526 /* "cached_list.next" points to transposed object */
6527 object1
->cached_list
.next
= (queue_entry_t
) object2
;
6528 object2
->cached_list
.next
= (queue_entry_t
) object1
;
6529 /* "cached_list.prev" should be NULL */
6530 assert(object1
->cached_list
.prev
== NULL
);
6531 assert(object2
->cached_list
.prev
== NULL
);
6532 __TRANSPOSE_FIELD(last_alloc
);
6533 __TRANSPOSE_FIELD(sequential
);
6534 __TRANSPOSE_FIELD(pages_created
);
6535 __TRANSPOSE_FIELD(pages_used
);
6536 __TRANSPOSE_FIELD(scan_collisions
);
6537 __TRANSPOSE_FIELD(cow_hint
);
6538 __TRANSPOSE_FIELD(wimg_bits
);
6539 __TRANSPOSE_FIELD(set_cache_attr
);
6540 __TRANSPOSE_FIELD(code_signed
);
6541 object1
->transposed
= TRUE
;
6542 object2
->transposed
= TRUE
;
6543 __TRANSPOSE_FIELD(mapping_in_progress
);
6544 __TRANSPOSE_FIELD(volatile_empty
);
6545 __TRANSPOSE_FIELD(volatile_fault
);
6546 __TRANSPOSE_FIELD(all_reusable
);
6547 assert(object1
->blocked_access
);
6548 assert(object2
->blocked_access
);
6549 __TRANSPOSE_FIELD(set_cache_attr
);
6550 assert(!object1
->object_is_shared_cache
);
6551 assert(!object2
->object_is_shared_cache
);
6552 /* ignore purgeable_queue_type and purgeable_queue_group */
6553 assert(!object1
->io_tracking
);
6554 assert(!object2
->io_tracking
);
6555 #if VM_OBJECT_ACCESS_TRACKING
6556 assert(!object1
->access_tracking
);
6557 assert(!object2
->access_tracking
);
6558 #endif /* VM_OBJECT_ACCESS_TRACKING */
6559 __TRANSPOSE_FIELD(no_tag_update
);
6560 #if CONFIG_SECLUDED_MEMORY
6561 assert(!object1
->eligible_for_secluded
);
6562 assert(!object2
->eligible_for_secluded
);
6563 assert(!object1
->can_grab_secluded
);
6564 assert(!object2
->can_grab_secluded
);
6565 #else /* CONFIG_SECLUDED_MEMORY */
6566 assert(object1
->__object3_unused_bits
== 0);
6567 assert(object2
->__object3_unused_bits
== 0);
6568 #endif /* CONFIG_SECLUDED_MEMORY */
6570 /* "uplq" refers to the object not its contents (see upl_transpose()) */
6572 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.next
== NULL
));
6573 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.prev
== NULL
));
6574 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.next
== NULL
));
6575 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.prev
== NULL
));
6577 #undef __TRANSPOSE_FIELD
6579 retval
= KERN_SUCCESS
;
6585 if (tmp_object
!= VM_OBJECT_NULL
) {
6586 vm_object_unlock(tmp_object
);
6588 * Re-initialize the temporary object to avoid
6589 * deallocating a real pager.
6591 _vm_object_allocate(transpose_size
, tmp_object
);
6592 vm_object_deallocate(tmp_object
);
6593 tmp_object
= VM_OBJECT_NULL
;
6596 if (object1_locked
) {
6597 vm_object_unlock(object1
);
6598 object1_locked
= FALSE
;
6600 if (object2_locked
) {
6601 vm_object_unlock(object2
);
6602 object2_locked
= FALSE
;
6605 vm_object_transpose_count
++;
6612 * vm_object_cluster_size
6614 * Determine how big a cluster we should issue an I/O for...
6616 * Inputs: *start == offset of page needed
6617 * *length == maximum cluster pager can handle
6618 * Outputs: *start == beginning offset of cluster
6619 * *length == length of cluster to try
6621 * The original *start will be encompassed by the cluster
6624 extern int speculative_reads_disabled
;
6627 * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
6628 * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
6629 * always be page-aligned. The derivation could involve operations (e.g. division)
6630 * that could give us non-page-size aligned values if we start out with values that
6631 * are odd multiples of PAGE_SIZE.
6633 #if !XNU_TARGET_OS_OSX
6634 unsigned int preheat_max_bytes
= (1024 * 512);
6635 #else /* !XNU_TARGET_OS_OSX */
6636 unsigned int preheat_max_bytes
= MAX_UPL_TRANSFER_BYTES
;
6637 #endif /* !XNU_TARGET_OS_OSX */
6638 unsigned int preheat_min_bytes
= (1024 * 32);
6641 __private_extern__
void
6642 vm_object_cluster_size(vm_object_t object
, vm_object_offset_t
*start
,
6643 vm_size_t
*length
, vm_object_fault_info_t fault_info
, uint32_t *io_streaming
)
6645 vm_size_t pre_heat_size
;
6646 vm_size_t tail_size
;
6647 vm_size_t head_size
;
6648 vm_size_t max_length
;
6649 vm_size_t cluster_size
;
6650 vm_object_offset_t object_size
;
6651 vm_object_offset_t orig_start
;
6652 vm_object_offset_t target_start
;
6653 vm_object_offset_t offset
;
6654 vm_behavior_t behavior
;
6655 boolean_t look_behind
= TRUE
;
6656 boolean_t look_ahead
= TRUE
;
6657 boolean_t isSSD
= FALSE
;
6658 uint32_t throttle_limit
;
6660 int sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
6661 vm_size_t max_ph_size
;
6662 vm_size_t min_ph_size
;
6664 assert( !(*length
& PAGE_MASK
));
6665 assert( !(*start
& PAGE_MASK_64
));
6668 * remember maxiumum length of run requested
6670 max_length
= *length
;
6672 * we'll always return a cluster size of at least
6673 * 1 page, since the original fault must always
6676 *length
= PAGE_SIZE
;
6679 if (speculative_reads_disabled
|| fault_info
== NULL
) {
6681 * no cluster... just fault the page in
6685 orig_start
= *start
;
6686 target_start
= orig_start
;
6687 cluster_size
= round_page(fault_info
->cluster_size
);
6688 behavior
= fault_info
->behavior
;
6690 vm_object_lock(object
);
6692 if (object
->pager
== MEMORY_OBJECT_NULL
) {
6693 goto out
; /* pager is gone for this object, nothing more to do */
6695 vnode_pager_get_isSSD(object
->pager
, &isSSD
);
6697 min_ph_size
= round_page(preheat_min_bytes
);
6698 max_ph_size
= round_page(preheat_max_bytes
);
6700 #if XNU_TARGET_OS_OSX
6705 if (min_ph_size
& PAGE_MASK_64
) {
6706 min_ph_size
= trunc_page(min_ph_size
);
6709 if (max_ph_size
& PAGE_MASK_64
) {
6710 max_ph_size
= trunc_page(max_ph_size
);
6713 #endif /* XNU_TARGET_OS_OSX */
6715 if (min_ph_size
< PAGE_SIZE
) {
6716 min_ph_size
= PAGE_SIZE
;
6719 if (max_ph_size
< PAGE_SIZE
) {
6720 max_ph_size
= PAGE_SIZE
;
6721 } else if (max_ph_size
> MAX_UPL_TRANSFER_BYTES
) {
6722 max_ph_size
= MAX_UPL_TRANSFER_BYTES
;
6725 if (max_length
> max_ph_size
) {
6726 max_length
= max_ph_size
;
6729 if (max_length
<= PAGE_SIZE
) {
6733 if (object
->internal
) {
6734 object_size
= object
->vo_size
;
6736 vnode_pager_get_object_size(object
->pager
, &object_size
);
6739 object_size
= round_page_64(object_size
);
6741 if (orig_start
>= object_size
) {
6743 * fault occurred beyond the EOF...
6744 * we need to punt w/o changing the
6749 if (object
->pages_used
> object
->pages_created
) {
6751 * must have wrapped our 32 bit counters
6754 object
->pages_used
= object
->pages_created
= 0;
6756 if ((sequential_run
= object
->sequential
)) {
6757 if (sequential_run
< 0) {
6758 sequential_behavior
= VM_BEHAVIOR_RSEQNTL
;
6759 sequential_run
= 0 - sequential_run
;
6761 sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
6766 behavior
= VM_BEHAVIOR_DEFAULT
;
6769 case VM_BEHAVIOR_DEFAULT
:
6770 if (object
->internal
&& fault_info
->user_tag
== VM_MEMORY_STACK
) {
6774 if (sequential_run
>= (3 * PAGE_SIZE
)) {
6775 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
6777 if (sequential_behavior
== VM_BEHAVIOR_SEQUENTIAL
) {
6778 look_behind
= FALSE
;
6785 if (object
->pages_created
< (20 * (min_ph_size
>> PAGE_SHIFT
))) {
6789 pre_heat_size
= min_ph_size
;
6792 * Linear growth in PH size: The maximum size is max_length...
6793 * this cacluation will result in a size that is neither a
6794 * power of 2 nor a multiple of PAGE_SIZE... so round
6795 * it up to the nearest PAGE_SIZE boundary
6797 pre_heat_size
= (max_length
* (uint64_t)object
->pages_used
) / object
->pages_created
;
6799 if (pre_heat_size
< min_ph_size
) {
6800 pre_heat_size
= min_ph_size
;
6802 pre_heat_size
= round_page(pre_heat_size
);
6808 case VM_BEHAVIOR_RANDOM
:
6809 if ((pre_heat_size
= cluster_size
) <= PAGE_SIZE
) {
6814 case VM_BEHAVIOR_SEQUENTIAL
:
6815 if ((pre_heat_size
= cluster_size
) == 0) {
6816 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
6818 look_behind
= FALSE
;
6823 case VM_BEHAVIOR_RSEQNTL
:
6824 if ((pre_heat_size
= cluster_size
) == 0) {
6825 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
6832 throttle_limit
= (uint32_t) max_length
;
6833 assert(throttle_limit
== max_length
);
6835 if (vnode_pager_get_throttle_io_limit(object
->pager
, &throttle_limit
) == KERN_SUCCESS
) {
6836 if (max_length
> throttle_limit
) {
6837 max_length
= throttle_limit
;
6840 if (pre_heat_size
> max_length
) {
6841 pre_heat_size
= max_length
;
6844 if (behavior
== VM_BEHAVIOR_DEFAULT
&& (pre_heat_size
> min_ph_size
)) {
6845 unsigned int consider_free
= vm_page_free_count
+ vm_page_cleaned_count
;
6847 if (consider_free
< vm_page_throttle_limit
) {
6848 pre_heat_size
= trunc_page(pre_heat_size
/ 16);
6849 } else if (consider_free
< vm_page_free_target
) {
6850 pre_heat_size
= trunc_page(pre_heat_size
/ 4);
6853 if (pre_heat_size
< min_ph_size
) {
6854 pre_heat_size
= min_ph_size
;
6857 if (look_ahead
== TRUE
) {
6858 if (look_behind
== TRUE
) {
6860 * if we get here its due to a random access...
6861 * so we want to center the original fault address
6862 * within the cluster we will issue... make sure
6863 * to calculate 'head_size' as a multiple of PAGE_SIZE...
6864 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
6865 * necessarily an even number of pages so we need to truncate
6866 * the result to a PAGE_SIZE boundary
6868 head_size
= trunc_page(pre_heat_size
/ 2);
6870 if (target_start
> head_size
) {
6871 target_start
-= head_size
;
6877 * 'target_start' at this point represents the beginning offset
6878 * of the cluster we are considering... 'orig_start' will be in
6879 * the center of this cluster if we didn't have to clip the start
6880 * due to running into the start of the file
6883 if ((target_start
+ pre_heat_size
) > object_size
) {
6884 pre_heat_size
= (vm_size_t
)(round_page_64(object_size
- target_start
));
6887 * at this point caclulate the number of pages beyond the original fault
6888 * address that we want to consider... this is guaranteed not to extend beyond
6889 * the current EOF...
6891 assert((vm_size_t
)(orig_start
- target_start
) == (orig_start
- target_start
));
6892 tail_size
= pre_heat_size
- (vm_size_t
)(orig_start
- target_start
) - PAGE_SIZE
;
6894 if (pre_heat_size
> target_start
) {
6896 * since pre_heat_size is always smaller then 2^32,
6897 * if it is larger then target_start (a 64 bit value)
6898 * it is safe to clip target_start to 32 bits
6900 pre_heat_size
= (vm_size_t
) target_start
;
6904 assert( !(target_start
& PAGE_MASK_64
));
6905 assert( !(pre_heat_size
& PAGE_MASK_64
));
6907 if (pre_heat_size
<= PAGE_SIZE
) {
6911 if (look_behind
== TRUE
) {
6913 * take a look at the pages before the original
6914 * faulting offset... recalculate this in case
6915 * we had to clip 'pre_heat_size' above to keep
6916 * from running past the EOF.
6918 head_size
= pre_heat_size
- tail_size
- PAGE_SIZE
;
6920 for (offset
= orig_start
- PAGE_SIZE_64
; head_size
; offset
-= PAGE_SIZE_64
, head_size
-= PAGE_SIZE
) {
6922 * don't poke below the lowest offset
6924 if (offset
< fault_info
->lo_offset
) {
6928 * for external objects or internal objects w/o a pager,
6929 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6931 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
6934 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
6936 * don't bridge resident pages
6941 *length
+= PAGE_SIZE
;
6944 if (look_ahead
== TRUE
) {
6945 for (offset
= orig_start
+ PAGE_SIZE_64
; tail_size
; offset
+= PAGE_SIZE_64
, tail_size
-= PAGE_SIZE
) {
6947 * don't poke above the highest offset
6949 if (offset
>= fault_info
->hi_offset
) {
6952 assert(offset
< object_size
);
6955 * for external objects or internal objects w/o a pager,
6956 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6958 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
6961 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
6963 * don't bridge resident pages
6967 *length
+= PAGE_SIZE
;
6971 if (*length
> max_length
) {
6972 *length
= max_length
;
6975 vm_object_unlock(object
);
6977 DTRACE_VM1(clustersize
, vm_size_t
, *length
);
6982 * Allow manipulation of individual page state. This is actually part of
6983 * the UPL regimen but takes place on the VM object rather than on a UPL
6989 vm_object_offset_t offset
,
6991 ppnum_t
*phys_entry
,
6996 vm_object_lock(object
);
6998 if (ops
& UPL_POP_PHYSICAL
) {
6999 if (object
->phys_contiguous
) {
7001 *phys_entry
= (ppnum_t
)
7002 (object
->vo_shadow_offset
>> PAGE_SHIFT
);
7004 vm_object_unlock(object
);
7005 return KERN_SUCCESS
;
7007 vm_object_unlock(object
);
7008 return KERN_INVALID_OBJECT
;
7011 if (object
->phys_contiguous
) {
7012 vm_object_unlock(object
);
7013 return KERN_INVALID_OBJECT
;
7017 if ((dst_page
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
) {
7018 vm_object_unlock(object
);
7019 return KERN_FAILURE
;
7022 /* Sync up on getting the busy bit */
7023 if ((dst_page
->vmp_busy
|| dst_page
->vmp_cleaning
) &&
7024 (((ops
& UPL_POP_SET
) &&
7025 (ops
& UPL_POP_BUSY
)) || (ops
& UPL_POP_DUMP
))) {
7026 /* someone else is playing with the page, we will */
7028 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
7032 if (ops
& UPL_POP_DUMP
) {
7033 if (dst_page
->vmp_pmapped
== TRUE
) {
7034 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
7037 VM_PAGE_FREE(dst_page
);
7044 /* Get the condition of flags before requested ops */
7045 /* are undertaken */
7047 if (dst_page
->vmp_dirty
) {
7048 *flags
|= UPL_POP_DIRTY
;
7050 if (dst_page
->vmp_free_when_done
) {
7051 *flags
|= UPL_POP_PAGEOUT
;
7053 if (dst_page
->vmp_precious
) {
7054 *flags
|= UPL_POP_PRECIOUS
;
7056 if (dst_page
->vmp_absent
) {
7057 *flags
|= UPL_POP_ABSENT
;
7059 if (dst_page
->vmp_busy
) {
7060 *flags
|= UPL_POP_BUSY
;
7064 /* The caller should have made a call either contingent with */
7065 /* or prior to this call to set UPL_POP_BUSY */
7066 if (ops
& UPL_POP_SET
) {
7067 /* The protection granted with this assert will */
7068 /* not be complete. If the caller violates the */
7069 /* convention and attempts to change page state */
7070 /* without first setting busy we may not see it */
7071 /* because the page may already be busy. However */
7072 /* if such violations occur we will assert sooner */
7074 assert(dst_page
->vmp_busy
|| (ops
& UPL_POP_BUSY
));
7075 if (ops
& UPL_POP_DIRTY
) {
7076 SET_PAGE_DIRTY(dst_page
, FALSE
);
7078 if (ops
& UPL_POP_PAGEOUT
) {
7079 dst_page
->vmp_free_when_done
= TRUE
;
7081 if (ops
& UPL_POP_PRECIOUS
) {
7082 dst_page
->vmp_precious
= TRUE
;
7084 if (ops
& UPL_POP_ABSENT
) {
7085 dst_page
->vmp_absent
= TRUE
;
7087 if (ops
& UPL_POP_BUSY
) {
7088 dst_page
->vmp_busy
= TRUE
;
7092 if (ops
& UPL_POP_CLR
) {
7093 assert(dst_page
->vmp_busy
);
7094 if (ops
& UPL_POP_DIRTY
) {
7095 dst_page
->vmp_dirty
= FALSE
;
7097 if (ops
& UPL_POP_PAGEOUT
) {
7098 dst_page
->vmp_free_when_done
= FALSE
;
7100 if (ops
& UPL_POP_PRECIOUS
) {
7101 dst_page
->vmp_precious
= FALSE
;
7103 if (ops
& UPL_POP_ABSENT
) {
7104 dst_page
->vmp_absent
= FALSE
;
7106 if (ops
& UPL_POP_BUSY
) {
7107 dst_page
->vmp_busy
= FALSE
;
7108 PAGE_WAKEUP(dst_page
);
7113 * The physical page number will remain valid
7114 * only if the page is kept busy.
7116 assert(dst_page
->vmp_busy
);
7117 *phys_entry
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
7123 vm_object_unlock(object
);
7124 return KERN_SUCCESS
;
7128 * vm_object_range_op offers performance enhancement over
7129 * vm_object_page_op for page_op functions which do not require page
7130 * level state to be returned from the call. Page_op was created to provide
7131 * a low-cost alternative to page manipulation via UPLs when only a single
7132 * page was involved. The range_op call establishes the ability in the _op
7133 * family of functions to work on multiple pages where the lack of page level
7134 * state handling allows the caller to avoid the overhead of the upl structures.
7140 vm_object_offset_t offset_beg
,
7141 vm_object_offset_t offset_end
,
7145 vm_object_offset_t offset
;
7148 if (offset_end
- offset_beg
> (uint32_t) -1) {
7149 /* range is too big and would overflow "*range" */
7150 return KERN_INVALID_ARGUMENT
;
7152 if (object
->resident_page_count
== 0) {
7154 if (ops
& UPL_ROP_PRESENT
) {
7157 *range
= (uint32_t) (offset_end
- offset_beg
);
7158 assert(*range
== (offset_end
- offset_beg
));
7161 return KERN_SUCCESS
;
7163 vm_object_lock(object
);
7165 if (object
->phys_contiguous
) {
7166 vm_object_unlock(object
);
7167 return KERN_INVALID_OBJECT
;
7170 offset
= offset_beg
& ~PAGE_MASK_64
;
7172 while (offset
< offset_end
) {
7173 dst_page
= vm_page_lookup(object
, offset
);
7174 if (dst_page
!= VM_PAGE_NULL
) {
7175 if (ops
& UPL_ROP_DUMP
) {
7176 if (dst_page
->vmp_busy
|| dst_page
->vmp_cleaning
) {
7178 * someone else is playing with the
7179 * page, we will have to wait
7181 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
7183 * need to relook the page up since it's
7184 * state may have changed while we slept
7185 * it might even belong to a different object
7190 if (dst_page
->vmp_laundry
) {
7191 vm_pageout_steal_laundry(dst_page
, FALSE
);
7194 if (dst_page
->vmp_pmapped
== TRUE
) {
7195 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
7198 VM_PAGE_FREE(dst_page
);
7199 } else if ((ops
& UPL_ROP_ABSENT
)
7200 && (!dst_page
->vmp_absent
|| dst_page
->vmp_busy
)) {
7203 } else if (ops
& UPL_ROP_PRESENT
) {
7207 offset
+= PAGE_SIZE
;
7209 vm_object_unlock(object
);
7212 if (offset
> offset_end
) {
7213 offset
= offset_end
;
7215 if (offset
> offset_beg
) {
7216 *range
= (uint32_t) (offset
- offset_beg
);
7217 assert(*range
== (offset
- offset_beg
));
7222 return KERN_SUCCESS
;
7226 * Used to point a pager directly to a range of memory (when the pager may be associated
7227 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
7228 * expect that the virtual address will denote the start of a range that is physically contiguous.
7231 pager_map_to_phys_contiguous(
7232 memory_object_control_t object
,
7233 memory_object_offset_t offset
,
7234 addr64_t base_vaddr
,
7238 boolean_t clobbered_private
;
7239 kern_return_t retval
;
7240 vm_object_t pager_object
;
7242 page_num
= pmap_find_phys(kernel_pmap
, base_vaddr
);
7245 retval
= KERN_FAILURE
;
7249 pager_object
= memory_object_control_to_vm_object(object
);
7251 if (!pager_object
) {
7252 retval
= KERN_FAILURE
;
7256 clobbered_private
= pager_object
->private;
7257 if (pager_object
->private != TRUE
) {
7258 vm_object_lock(pager_object
);
7259 pager_object
->private = TRUE
;
7260 vm_object_unlock(pager_object
);
7262 retval
= vm_object_populate_with_private(pager_object
, offset
, page_num
, size
);
7264 if (retval
!= KERN_SUCCESS
) {
7265 if (pager_object
->private != clobbered_private
) {
7266 vm_object_lock(pager_object
);
7267 pager_object
->private = clobbered_private
;
7268 vm_object_unlock(pager_object
);
7276 uint32_t scan_object_collision
= 0;
7279 vm_object_lock(vm_object_t object
)
7281 if (object
== vm_pageout_scan_wants_object
) {
7282 scan_object_collision
++;
7285 DTRACE_VM(vm_object_lock_w
);
7286 lck_rw_lock_exclusive(&object
->Lock
);
7287 #if DEVELOPMENT || DEBUG
7288 object
->Lock_owner
= current_thread();
7293 vm_object_lock_avoid(vm_object_t object
)
7295 if (object
== vm_pageout_scan_wants_object
) {
7296 scan_object_collision
++;
7303 _vm_object_lock_try(vm_object_t object
)
7307 retval
= lck_rw_try_lock_exclusive(&object
->Lock
);
7308 #if DEVELOPMENT || DEBUG
7309 if (retval
== TRUE
) {
7310 DTRACE_VM(vm_object_lock_w
);
7311 object
->Lock_owner
= current_thread();
7318 vm_object_lock_try(vm_object_t object
)
7321 * Called from hibernate path so check before blocking.
7323 if (vm_object_lock_avoid(object
) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
7326 return _vm_object_lock_try(object
);
7330 * Lock the object exclusive.
7332 * Returns true iff the thread had to spin or block before
7333 * acquiring the lock.
7336 vm_object_lock_check_contended(vm_object_t object
)
7339 if (object
== vm_pageout_scan_wants_object
) {
7340 scan_object_collision
++;
7343 DTRACE_VM(vm_object_lock_w
);
7344 contended
= lck_rw_lock_exclusive_check_contended(&object
->Lock
);
7345 #if DEVELOPMENT || DEBUG
7346 object
->Lock_owner
= current_thread();
7352 vm_object_lock_shared(vm_object_t object
)
7354 if (vm_object_lock_avoid(object
)) {
7357 DTRACE_VM(vm_object_lock_r
);
7358 lck_rw_lock_shared(&object
->Lock
);
7362 vm_object_lock_yield_shared(vm_object_t object
)
7364 boolean_t retval
= FALSE
, force_yield
= FALSE
;;
7366 vm_object_lock_assert_shared(object
);
7368 force_yield
= vm_object_lock_avoid(object
);
7370 retval
= lck_rw_lock_yield_shared(&object
->Lock
, force_yield
);
7372 DTRACE_VM(vm_object_lock_yield
);
7379 vm_object_lock_try_shared(vm_object_t object
)
7383 if (vm_object_lock_avoid(object
)) {
7386 retval
= lck_rw_try_lock_shared(&object
->Lock
);
7388 DTRACE_VM(vm_object_lock_r
);
7394 vm_object_lock_upgrade(vm_object_t object
)
7398 retval
= lck_rw_lock_shared_to_exclusive(&object
->Lock
);
7399 #if DEVELOPMENT || DEBUG
7400 if (retval
== TRUE
) {
7401 DTRACE_VM(vm_object_lock_w
);
7402 object
->Lock_owner
= current_thread();
7409 vm_object_unlock(vm_object_t object
)
7411 #if DEVELOPMENT || DEBUG
7412 if (object
->Lock_owner
) {
7413 if (object
->Lock_owner
!= current_thread()) {
7414 panic("vm_object_unlock: not owner - %p\n", object
);
7416 object
->Lock_owner
= 0;
7417 DTRACE_VM(vm_object_unlock
);
7420 lck_rw_done(&object
->Lock
);
7424 unsigned int vm_object_change_wimg_mode_count
= 0;
7427 * The object must be locked
7430 vm_object_change_wimg_mode(vm_object_t object
, unsigned int wimg_mode
)
7434 vm_object_lock_assert_exclusive(object
);
7436 vm_object_paging_wait(object
, THREAD_UNINT
);
7438 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
7439 if (!p
->vmp_fictitious
) {
7440 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p
), wimg_mode
);
7443 if (wimg_mode
== VM_WIMG_USE_DEFAULT
) {
7444 object
->set_cache_attr
= FALSE
;
7446 object
->set_cache_attr
= TRUE
;
7449 object
->wimg_bits
= wimg_mode
;
7451 vm_object_change_wimg_mode_count
++;
7456 extern struct freezer_context freezer_context_global
;
7459 * This routine does the "relocation" of previously
7460 * compressed pages belonging to this object that are
7461 * residing in a number of compressed segments into
7462 * a set of compressed segments dedicated to hold
7463 * compressed pages belonging to this object.
7466 extern AbsoluteTime c_freezer_last_yield_ts
;
7468 #define MAX_FREE_BATCH 32
7469 #define FREEZER_DUTY_CYCLE_ON_MS 5
7470 #define FREEZER_DUTY_CYCLE_OFF_MS 5
7472 static int c_freezer_should_yield(void);
7476 c_freezer_should_yield()
7478 AbsoluteTime cur_time
;
7481 assert(c_freezer_last_yield_ts
);
7482 clock_get_uptime(&cur_time
);
7484 SUB_ABSOLUTETIME(&cur_time
, &c_freezer_last_yield_ts
);
7485 absolutetime_to_nanoseconds(cur_time
, &nsecs
);
7487 if (nsecs
> 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS
) {
7495 vm_object_compressed_freezer_done()
7497 vm_compressor_finished_filling( &(freezer_context_global
.freezer_ctx_chead
));
7502 vm_object_compressed_freezer_pageout(
7503 vm_object_t object
, uint32_t dirty_budget
)
7506 vm_page_t local_freeq
= NULL
;
7507 int local_freed
= 0;
7508 kern_return_t retval
= KERN_SUCCESS
;
7509 int obj_resident_page_count_snapshot
= 0;
7510 uint32_t paged_out_count
= 0;
7512 assert(object
!= VM_OBJECT_NULL
);
7513 assert(object
->internal
);
7515 vm_object_lock(object
);
7517 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7518 if (!object
->pager_initialized
) {
7519 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
7521 if (!object
->pager_initialized
) {
7522 vm_object_compressor_pager_create(object
);
7526 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7527 vm_object_unlock(object
);
7528 return paged_out_count
;
7533 * We could be freezing a shared internal object that might
7534 * be part of some other thread's current VM operations.
7535 * We skip it if there's a paging-in-progress or activity-in-progress
7536 * because we could be here a long time with the map lock held.
7538 * Note: We are holding the map locked while we wait.
7539 * This is fine in the freezer path because the task
7540 * is suspended and so this latency is acceptable.
7542 if (object
->paging_in_progress
|| object
->activity_in_progress
) {
7543 vm_object_unlock(object
);
7544 return paged_out_count
;
7547 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
7548 vm_object_offset_t curr_offset
= 0;
7551 * Go through the object and make sure that any
7552 * previously compressed pages are relocated into
7553 * a compressed segment associated with our "freezer_chead".
7555 while (curr_offset
< object
->vo_size
) {
7556 curr_offset
= vm_compressor_pager_next_compressed(object
->pager
, curr_offset
);
7558 if (curr_offset
== (vm_object_offset_t
) -1) {
7562 retval
= vm_compressor_pager_relocate(object
->pager
, curr_offset
, &(freezer_context_global
.freezer_ctx_chead
));
7564 if (retval
!= KERN_SUCCESS
) {
7568 curr_offset
+= PAGE_SIZE_64
;
7573 * We can't hold the object lock while heading down into the compressed pager
7574 * layer because we might need the kernel map lock down there to allocate new
7575 * compressor data structures. And if this same object is mapped in the kernel
7576 * and there's a fault on it, then that thread will want the object lock while
7577 * holding the kernel map lock.
7579 * Since we are going to drop/grab the object lock repeatedly, we must make sure
7580 * we won't be stuck in an infinite loop if the same page(s) keep getting
7581 * decompressed. So we grab a snapshot of the number of pages in the object and
7582 * we won't process any more than that number of pages.
7585 obj_resident_page_count_snapshot
= object
->resident_page_count
;
7587 vm_object_activity_begin(object
);
7589 while ((obj_resident_page_count_snapshot
--) && !vm_page_queue_empty(&object
->memq
) && paged_out_count
< dirty_budget
) {
7590 p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
7592 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START
, object
, local_freed
, 0, 0, 0);
7594 vm_page_lockspin_queues();
7596 if (p
->vmp_cleaning
|| p
->vmp_fictitious
|| p
->vmp_busy
|| p
->vmp_absent
|| p
->vmp_unusual
|| p
->vmp_error
|| VM_PAGE_WIRED(p
)) {
7597 vm_page_unlock_queues();
7599 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 1, 0, 0);
7601 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
7602 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
7607 if (p
->vmp_pmapped
== TRUE
) {
7608 int refmod_state
, pmap_flags
;
7610 if (p
->vmp_dirty
|| p
->vmp_precious
) {
7611 pmap_flags
= PMAP_OPTIONS_COMPRESSOR
;
7613 pmap_flags
= PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
7616 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
), pmap_flags
, NULL
);
7617 if (refmod_state
& VM_MEM_MODIFIED
) {
7618 SET_PAGE_DIRTY(p
, FALSE
);
7622 if (p
->vmp_dirty
== FALSE
&& p
->vmp_precious
== FALSE
) {
7624 * Clean and non-precious page.
7626 vm_page_unlock_queues();
7629 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 2, 0, 0);
7633 if (p
->vmp_laundry
) {
7634 vm_pageout_steal_laundry(p
, TRUE
);
7637 vm_page_queues_remove(p
, TRUE
);
7639 vm_page_unlock_queues();
7643 * In case the compressor fails to compress this page, we need it at
7644 * the back of the object memq so that we don't keep trying to process it.
7645 * Make the move here while we have the object lock held.
7648 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
7649 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
7652 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
7654 * Mark the page busy so no one messes with it while we have the object lock dropped.
7658 vm_object_activity_begin(object
);
7660 vm_object_unlock(object
);
7662 if (vm_pageout_compress_page(&(freezer_context_global
.freezer_ctx_chead
),
7663 (freezer_context_global
.freezer_ctx_compressor_scratch_buf
),
7664 p
) == KERN_SUCCESS
) {
7666 * page has already been un-tabled from the object via 'vm_page_remove'
7668 p
->vmp_snext
= local_freeq
;
7673 if (local_freed
>= MAX_FREE_BATCH
) {
7674 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
7676 vm_page_free_list(local_freeq
, TRUE
);
7681 freezer_context_global
.freezer_ctx_uncompressed_pages
++;
7683 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 0, 0, 0);
7685 if (local_freed
== 0 && c_freezer_should_yield()) {
7686 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
7687 clock_get_uptime(&c_freezer_last_yield_ts
);
7690 vm_object_lock(object
);
7694 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
7696 vm_page_free_list(local_freeq
, TRUE
);
7702 vm_object_activity_end(object
);
7704 vm_object_unlock(object
);
7706 if (c_freezer_should_yield()) {
7707 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
7708 clock_get_uptime(&c_freezer_last_yield_ts
);
7710 return paged_out_count
;
7713 #endif /* CONFIG_FREEZE */
7721 struct vm_pageout_queue
*iq
;
7723 if (!VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
7727 iq
= &vm_pageout_queue_internal
;
7729 assert(object
!= VM_OBJECT_NULL
);
7731 vm_object_lock(object
);
7733 if (!object
->internal
||
7734 object
->terminating
||
7736 vm_object_unlock(object
);
7740 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7741 if (!object
->pager_initialized
) {
7742 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
7744 if (!object
->pager_initialized
) {
7745 vm_object_compressor_pager_create(object
);
7749 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7750 vm_object_unlock(object
);
7756 next
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
7758 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next
)) {
7760 next
= (vm_page_t
)vm_page_queue_next(&next
->vmp_listq
);
7762 assert(p
->vmp_q_state
!= VM_PAGE_ON_FREE_Q
);
7764 if ((p
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
) ||
7770 p
->vmp_fictitious
||
7773 * Page is already being cleaned or can't be cleaned.
7777 if (vm_compressor_low_on_space()) {
7781 /* Throw to the pageout queue */
7783 vm_page_lockspin_queues();
7785 if (VM_PAGE_Q_THROTTLED(iq
)) {
7786 iq
->pgo_draining
= TRUE
;
7788 assert_wait((event_t
) (&iq
->pgo_laundry
+ 1),
7789 THREAD_INTERRUPTIBLE
);
7790 vm_page_unlock_queues();
7791 vm_object_unlock(object
);
7793 thread_block(THREAD_CONTINUE_NULL
);
7795 vm_object_lock(object
);
7799 assert(!p
->vmp_fictitious
);
7800 assert(!p
->vmp_busy
);
7801 assert(!p
->vmp_absent
);
7802 assert(!p
->vmp_unusual
);
7803 assert(!p
->vmp_error
);
7804 assert(!VM_PAGE_WIRED(p
));
7805 assert(!p
->vmp_cleaning
);
7807 if (p
->vmp_pmapped
== TRUE
) {
7812 * Tell pmap the page should be accounted
7813 * for as "compressed" if it's been modified.
7816 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
7817 if (p
->vmp_dirty
|| p
->vmp_precious
) {
7819 * We already know it's been modified,
7820 * so tell pmap to account for it
7823 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
7825 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
),
7828 if (refmod_state
& VM_MEM_MODIFIED
) {
7829 SET_PAGE_DIRTY(p
, FALSE
);
7833 if (!p
->vmp_dirty
&& !p
->vmp_precious
) {
7834 vm_page_unlock_queues();
7838 vm_page_queues_remove(p
, TRUE
);
7840 vm_pageout_cluster(p
);
7842 vm_page_unlock_queues();
7844 vm_object_unlock(object
);
7850 vm_page_request_reprioritize(vm_object_t o
, uint64_t blkno
, uint32_t len
, int prio
)
7852 io_reprioritize_req_t req
;
7853 struct vnode
*devvp
= NULL
;
7855 if (vnode_pager_get_object_devvp(o
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
) {
7860 * Create the request for I/O reprioritization.
7861 * We use the noblock variant of zalloc because we're holding the object
7862 * lock here and we could cause a deadlock in low memory conditions.
7864 req
= (io_reprioritize_req_t
)zalloc_noblock(io_reprioritize_req_zone
);
7870 req
->priority
= prio
;
7873 /* Insert request into the reprioritization list */
7874 IO_REPRIORITIZE_LIST_LOCK();
7875 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
7876 IO_REPRIORITIZE_LIST_UNLOCK();
7878 /* Wakeup reprioritize thread */
7879 IO_REPRIO_THREAD_WAKEUP();
7885 vm_decmp_upl_reprioritize(upl_t upl
, int prio
)
7889 io_reprioritize_req_t req
;
7890 struct vnode
*devvp
= NULL
;
7894 uint64_t *io_upl_reprio_info
;
7897 if ((upl
->flags
& UPL_TRACKED_BY_OBJECT
) == 0 || (upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) {
7902 * We dont want to perform any allocations with the upl lock held since that might
7903 * result in a deadlock. If the system is low on memory, the pageout thread would
7904 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
7905 * be freed up by the pageout thread, it would be a deadlock.
7909 /* First step is just to get the size of the upl to find out how big the reprio info is */
7910 if (!upl_try_lock(upl
)) {
7914 if (upl
->decmp_io_upl
== NULL
) {
7915 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7920 io_upl
= upl
->decmp_io_upl
;
7921 assert((io_upl
->flags
& UPL_DECMP_REAL_IO
) != 0);
7922 assertf(page_aligned(io_upl
->u_offset
) && page_aligned(io_upl
->u_size
),
7923 "upl %p offset 0x%llx size 0x%x\n",
7924 io_upl
, io_upl
->u_offset
, io_upl
->u_size
);
7925 io_upl_size
= io_upl
->u_size
;
7928 /* Now perform the allocation */
7929 io_upl_reprio_info
= (uint64_t *)kheap_alloc(KHEAP_TEMP
,
7930 sizeof(uint64_t) * atop(io_upl_size
), Z_WAITOK
);
7931 if (io_upl_reprio_info
== NULL
) {
7935 /* Now again take the lock, recheck the state and grab out the required info */
7936 if (!upl_try_lock(upl
)) {
7940 if (upl
->decmp_io_upl
== NULL
|| upl
->decmp_io_upl
!= io_upl
) {
7941 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7945 memcpy(io_upl_reprio_info
, io_upl
->upl_reprio_info
,
7946 sizeof(uint64_t) * atop(io_upl_size
));
7948 /* Get the VM object for this UPL */
7949 if (io_upl
->flags
& UPL_SHADOWED
) {
7950 object
= io_upl
->map_object
->shadow
;
7952 object
= io_upl
->map_object
;
7955 /* Get the dev vnode ptr for this object */
7956 if (!object
|| !object
->pager
||
7957 vnode_pager_get_object_devvp(object
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
) {
7964 /* Now we have all the information needed to do the expedite */
7967 while (offset
< io_upl_size
) {
7968 blkno
= io_upl_reprio_info
[atop(offset
)] & UPL_REPRIO_INFO_MASK
;
7969 len
= (io_upl_reprio_info
[atop(offset
)] >> UPL_REPRIO_INFO_SHIFT
) & UPL_REPRIO_INFO_MASK
;
7972 * This implementation may cause some spurious expedites due to the
7973 * fact that we dont cleanup the blkno & len from the upl_reprio_info
7974 * even after the I/O is complete.
7977 if (blkno
!= 0 && len
!= 0) {
7978 /* Create the request for I/O reprioritization */
7979 req
= (io_reprioritize_req_t
)zalloc(io_reprioritize_req_zone
);
7980 assert(req
!= NULL
);
7983 req
->priority
= prio
;
7986 /* Insert request into the reprioritization list */
7987 IO_REPRIORITIZE_LIST_LOCK();
7988 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
7989 IO_REPRIORITIZE_LIST_UNLOCK();
7993 offset
+= PAGE_SIZE
;
7997 /* Wakeup reprioritize thread */
7998 IO_REPRIO_THREAD_WAKEUP();
8001 kheap_free(KHEAP_TEMP
, io_upl_reprio_info
,
8002 sizeof(uint64_t) * atop(io_upl_size
));
8006 vm_page_handle_prio_inversion(vm_object_t o
, vm_page_t m
)
8009 upl_page_info_t
*pl
;
8010 unsigned int i
, num_pages
;
8013 cur_tier
= proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
);
8016 * Scan through all UPLs associated with the object to find the
8017 * UPL containing the contended page.
8019 queue_iterate(&o
->uplq
, upl
, upl_t
, uplq
) {
8020 if (((upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) || upl
->upl_priority
<= cur_tier
) {
8023 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
8024 assertf(page_aligned(upl
->u_offset
) && page_aligned(upl
->u_size
),
8025 "upl %p offset 0x%llx size 0x%x\n",
8026 upl
, upl
->u_offset
, upl
->u_size
);
8027 num_pages
= (upl
->u_size
/ PAGE_SIZE
);
8030 * For each page in the UPL page list, see if it matches the contended
8031 * page and was issued as a low prio I/O.
8033 for (i
= 0; i
< num_pages
; i
++) {
8034 if (UPL_PAGE_PRESENT(pl
, i
) && VM_PAGE_GET_PHYS_PAGE(m
) == pl
[i
].phys_addr
) {
8035 if ((upl
->flags
& UPL_DECMP_REQ
) && upl
->decmp_io_upl
) {
8036 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, VM_KERNEL_UNSLIDE_OR_PERM(upl
->upl_creator
), VM_KERNEL_UNSLIDE_OR_PERM(m
),
8037 VM_KERNEL_UNSLIDE_OR_PERM(upl
), upl
->upl_priority
, 0);
8038 vm_decmp_upl_reprioritize(upl
, cur_tier
);
8041 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, VM_KERNEL_UNSLIDE_OR_PERM(upl
->upl_creator
), VM_KERNEL_UNSLIDE_OR_PERM(m
),
8042 upl
->upl_reprio_info
[i
], upl
->upl_priority
, 0);
8043 if (UPL_REPRIO_INFO_BLKNO(upl
, i
) != 0 && UPL_REPRIO_INFO_LEN(upl
, i
) != 0) {
8044 vm_page_request_reprioritize(o
, UPL_REPRIO_INFO_BLKNO(upl
, i
), UPL_REPRIO_INFO_LEN(upl
, i
), cur_tier
);
8049 /* Check if we found any hits */
8050 if (i
!= num_pages
) {
8059 vm_page_sleep(vm_object_t o
, vm_page_t m
, int interruptible
)
8063 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_START
, o
, m
, 0, 0, 0);
8065 if (o
->io_tracking
&& ((m
->vmp_busy
== TRUE
) || (m
->vmp_cleaning
== TRUE
) || VM_PAGE_WIRED(m
))) {
8067 * Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
8069 vm_page_handle_prio_inversion(o
, m
);
8071 m
->vmp_wanted
= TRUE
;
8072 ret
= thread_sleep_vm_object(o
, m
, interruptible
);
8073 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_END
, o
, m
, 0, 0, 0);
8078 io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
)
8080 io_reprioritize_req_t req
= NULL
;
8083 IO_REPRIORITIZE_LIST_LOCK();
8084 if (queue_empty(&io_reprioritize_list
)) {
8085 IO_REPRIORITIZE_LIST_UNLOCK();
8089 queue_remove_first(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
8090 IO_REPRIORITIZE_LIST_UNLOCK();
8092 vnode_pager_issue_reprioritize_io(req
->devvp
, req
->blkno
, req
->len
, req
->priority
);
8093 zfree(io_reprioritize_req_zone
, req
);
8096 IO_REPRIO_THREAD_CONTINUATION();
8100 #if VM_OBJECT_ACCESS_TRACKING
8102 vm_object_access_tracking(
8104 int *access_tracking_p
,
8105 uint32_t *access_tracking_reads_p
,
8106 uint32_t *access_tracking_writes_p
)
8108 int access_tracking
;
8110 access_tracking
= !!*access_tracking_p
;
8112 vm_object_lock(object
);
8113 *access_tracking_p
= object
->access_tracking
;
8114 if (access_tracking_reads_p
) {
8115 *access_tracking_reads_p
= object
->access_tracking_reads
;
8117 if (access_tracking_writes_p
) {
8118 *access_tracking_writes_p
= object
->access_tracking_writes
;
8120 object
->access_tracking
= access_tracking
;
8121 object
->access_tracking_reads
= 0;
8122 object
->access_tracking_writes
= 0;
8123 vm_object_unlock(object
);
8125 if (access_tracking
) {
8126 vm_object_pmap_protect_options(object
,
8136 #endif /* VM_OBJECT_ACCESS_TRACKING */
8139 vm_object_ledger_tag_ledgers(
8141 int *ledger_idx_volatile
,
8142 int *ledger_idx_nonvolatile
,
8143 int *ledger_idx_volatile_compressed
,
8144 int *ledger_idx_nonvolatile_compressed
,
8145 boolean_t
*do_footprint
)
8147 assert(object
->shadow
== VM_OBJECT_NULL
);
8149 *do_footprint
= !object
->vo_no_footprint
;
8151 switch (object
->vo_ledger_tag
) {
8152 case VM_LEDGER_TAG_NONE
:
8154 * Regular purgeable memory:
8155 * counts in footprint only when nonvolatile.
8157 *do_footprint
= TRUE
;
8158 assert(object
->purgable
!= VM_PURGABLE_DENY
);
8159 *ledger_idx_volatile
= task_ledgers
.purgeable_volatile
;
8160 *ledger_idx_nonvolatile
= task_ledgers
.purgeable_nonvolatile
;
8161 *ledger_idx_volatile_compressed
= task_ledgers
.purgeable_volatile_compressed
;
8162 *ledger_idx_nonvolatile_compressed
= task_ledgers
.purgeable_nonvolatile_compressed
;
8164 case VM_LEDGER_TAG_DEFAULT
:
8166 * "default" tagged memory:
8167 * counts in footprint only when nonvolatile and not marked
8168 * as "no_footprint".
8170 *ledger_idx_volatile
= task_ledgers
.tagged_nofootprint
;
8171 *ledger_idx_volatile_compressed
= task_ledgers
.tagged_nofootprint_compressed
;
8172 if (*do_footprint
) {
8173 *ledger_idx_nonvolatile
= task_ledgers
.tagged_footprint
;
8174 *ledger_idx_nonvolatile_compressed
= task_ledgers
.tagged_footprint_compressed
;
8176 *ledger_idx_nonvolatile
= task_ledgers
.tagged_nofootprint
;
8177 *ledger_idx_nonvolatile_compressed
= task_ledgers
.tagged_nofootprint_compressed
;
8180 case VM_LEDGER_TAG_NETWORK
:
8182 * "network" tagged memory:
8183 * never counts in footprint.
8185 *do_footprint
= FALSE
;
8186 *ledger_idx_volatile
= task_ledgers
.network_volatile
;
8187 *ledger_idx_volatile_compressed
= task_ledgers
.network_volatile_compressed
;
8188 *ledger_idx_nonvolatile
= task_ledgers
.network_nonvolatile
;
8189 *ledger_idx_nonvolatile_compressed
= task_ledgers
.network_nonvolatile_compressed
;
8191 case VM_LEDGER_TAG_MEDIA
:
8193 * "media" tagged memory:
8194 * counts in footprint only when nonvolatile and not marked
8195 * as "no footprint".
8197 *ledger_idx_volatile
= task_ledgers
.media_nofootprint
;
8198 *ledger_idx_volatile_compressed
= task_ledgers
.media_nofootprint_compressed
;
8199 if (*do_footprint
) {
8200 *ledger_idx_nonvolatile
= task_ledgers
.media_footprint
;
8201 *ledger_idx_nonvolatile_compressed
= task_ledgers
.media_footprint_compressed
;
8203 *ledger_idx_nonvolatile
= task_ledgers
.media_nofootprint
;
8204 *ledger_idx_nonvolatile_compressed
= task_ledgers
.media_nofootprint_compressed
;
8207 case VM_LEDGER_TAG_GRAPHICS
:
8209 * "graphics" tagged memory:
8210 * counts in footprint only when nonvolatile and not marked
8211 * as "no footprint".
8213 *ledger_idx_volatile
= task_ledgers
.graphics_nofootprint
;
8214 *ledger_idx_volatile_compressed
= task_ledgers
.graphics_nofootprint_compressed
;
8215 if (*do_footprint
) {
8216 *ledger_idx_nonvolatile
= task_ledgers
.graphics_footprint
;
8217 *ledger_idx_nonvolatile_compressed
= task_ledgers
.graphics_footprint_compressed
;
8219 *ledger_idx_nonvolatile
= task_ledgers
.graphics_nofootprint
;
8220 *ledger_idx_nonvolatile_compressed
= task_ledgers
.graphics_nofootprint_compressed
;
8223 case VM_LEDGER_TAG_NEURAL
:
8225 * "neural" tagged memory:
8226 * counts in footprint only when nonvolatile and not marked
8227 * as "no footprint".
8229 *ledger_idx_volatile
= task_ledgers
.neural_nofootprint
;
8230 *ledger_idx_volatile_compressed
= task_ledgers
.neural_nofootprint_compressed
;
8231 if (*do_footprint
) {
8232 *ledger_idx_nonvolatile
= task_ledgers
.neural_footprint
;
8233 *ledger_idx_nonvolatile_compressed
= task_ledgers
.neural_footprint_compressed
;
8235 *ledger_idx_nonvolatile
= task_ledgers
.neural_nofootprint
;
8236 *ledger_idx_nonvolatile_compressed
= task_ledgers
.neural_nofootprint_compressed
;
8240 panic("%s: object %p has unsupported ledger_tag %d\n",
8241 __FUNCTION__
, object
, object
->vo_ledger_tag
);
8246 vm_object_ownership_change(
8250 int new_ledger_flags
,
8251 boolean_t old_task_objq_locked
)
8255 int resident_count
, wired_count
;
8256 unsigned int compressed_count
;
8257 int ledger_idx_volatile
;
8258 int ledger_idx_nonvolatile
;
8259 int ledger_idx_volatile_compressed
;
8260 int ledger_idx_nonvolatile_compressed
;
8262 int ledger_idx_compressed
;
8263 boolean_t do_footprint
, old_no_footprint
, new_no_footprint
;
8264 boolean_t new_task_objq_locked
;
8266 vm_object_lock_assert_exclusive(object
);
8268 if (!object
->internal
) {
8269 return KERN_INVALID_ARGUMENT
;
8271 if (new_ledger_tag
== VM_LEDGER_TAG_NONE
&&
8272 object
->purgable
== VM_PURGABLE_DENY
) {
8273 /* non-purgeable memory must have a valid non-zero ledger tag */
8274 return KERN_INVALID_ARGUMENT
;
8276 if (new_ledger_tag
< 0 ||
8277 new_ledger_tag
> VM_LEDGER_TAG_MAX
) {
8278 return KERN_INVALID_ARGUMENT
;
8280 if (new_ledger_flags
& ~VM_LEDGER_FLAGS
) {
8281 return KERN_INVALID_ARGUMENT
;
8283 if (object
->vo_ledger_tag
== VM_LEDGER_TAG_NONE
&&
8284 object
->purgable
== VM_PURGABLE_DENY
) {
8286 * This VM object is neither ledger-tagged nor purgeable.
8287 * We can convert it to "ledger tag" ownership iff it
8288 * has not been used at all yet (no resident pages and
8289 * no pager) and it's going to be assigned to a valid task.
8291 if (object
->resident_page_count
!= 0 ||
8292 object
->pager
!= NULL
||
8293 object
->pager_created
||
8294 object
->ref_count
!= 1 ||
8295 object
->vo_owner
!= TASK_NULL
||
8296 object
->copy_strategy
!= MEMORY_OBJECT_COPY_NONE
||
8297 new_owner
== TASK_NULL
) {
8298 return KERN_FAILURE
;
8302 if (new_ledger_flags
& VM_LEDGER_FLAG_NO_FOOTPRINT
) {
8303 new_no_footprint
= TRUE
;
8305 new_no_footprint
= FALSE
;
8308 if (!new_no_footprint
&&
8309 object
->purgable
!= VM_PURGABLE_DENY
&&
8310 new_owner
!= TASK_NULL
&&
8311 new_owner
!= VM_OBJECT_OWNER_DISOWNED
&&
8312 new_owner
->task_legacy_footprint
) {
8314 * This task has been granted "legacy footprint" and should
8315 * not be charged for its IOKit purgeable memory. Since we
8316 * might now change the accounting of such memory to the
8317 * "graphics" ledger, for example, give it the "no footprint"
8320 new_no_footprint
= TRUE
;
8322 #endif /* __arm64__ */
8323 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
8324 assert(object
->shadow
== VM_OBJECT_NULL
);
8325 assert(object
->copy
== VM_OBJECT_NULL
);
8327 old_ledger_tag
= object
->vo_ledger_tag
;
8328 old_no_footprint
= object
->vo_no_footprint
;
8329 old_owner
= VM_OBJECT_OWNER(object
);
8331 DTRACE_VM8(object_ownership_change
,
8332 vm_object_t
, object
,
8334 int, old_ledger_tag
,
8335 int, old_no_footprint
,
8337 int, new_ledger_tag
,
8338 int, new_no_footprint
,
8339 int, VM_OBJECT_ID(object
));
8341 assert(object
->internal
);
8342 resident_count
= object
->resident_page_count
- object
->wired_page_count
;
8343 wired_count
= object
->wired_page_count
;
8344 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
8347 * Deal with the old owner and/or ledger tag, if needed.
8349 if (old_owner
!= TASK_NULL
&&
8350 ((old_owner
!= new_owner
) /* new owner ... */
8352 (old_no_footprint
!= new_no_footprint
) /* new "no_footprint" */
8354 old_ledger_tag
!= new_ledger_tag
)) { /* ... new ledger */
8356 * Take this object off of the old owner's ledgers.
8358 vm_object_ledger_tag_ledgers(object
,
8359 &ledger_idx_volatile
,
8360 &ledger_idx_nonvolatile
,
8361 &ledger_idx_volatile_compressed
,
8362 &ledger_idx_nonvolatile_compressed
,
8364 if (object
->purgable
== VM_PURGABLE_VOLATILE
||
8365 object
->purgable
== VM_PURGABLE_EMPTY
) {
8366 ledger_idx
= ledger_idx_volatile
;
8367 ledger_idx_compressed
= ledger_idx_volatile_compressed
;
8369 ledger_idx
= ledger_idx_nonvolatile
;
8370 ledger_idx_compressed
= ledger_idx_nonvolatile_compressed
;
8372 if (resident_count
) {
8374 * Adjust the appropriate old owners's ledgers by the
8375 * number of resident pages.
8377 ledger_debit(old_owner
->ledger
,
8379 ptoa_64(resident_count
));
8380 /* adjust old owner's footprint */
8382 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8383 object
->purgable
!= VM_PURGABLE_EMPTY
) {
8384 ledger_debit(old_owner
->ledger
,
8385 task_ledgers
.phys_footprint
,
8386 ptoa_64(resident_count
));
8390 /* wired pages are always nonvolatile */
8391 ledger_debit(old_owner
->ledger
,
8392 ledger_idx_nonvolatile
,
8393 ptoa_64(wired_count
));
8395 ledger_debit(old_owner
->ledger
,
8396 task_ledgers
.phys_footprint
,
8397 ptoa_64(wired_count
));
8400 if (compressed_count
) {
8402 * Adjust the appropriate old owner's ledgers
8403 * by the number of compressed pages.
8405 ledger_debit(old_owner
->ledger
,
8406 ledger_idx_compressed
,
8407 ptoa_64(compressed_count
));
8409 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8410 object
->purgable
!= VM_PURGABLE_EMPTY
) {
8411 ledger_debit(old_owner
->ledger
,
8412 task_ledgers
.phys_footprint
,
8413 ptoa_64(compressed_count
));
8416 if (old_owner
!= new_owner
) {
8417 /* remove object from old_owner's list of owned objects */
8418 DTRACE_VM2(object_owner_remove
,
8419 vm_object_t
, object
,
8421 if (!old_task_objq_locked
) {
8422 task_objq_lock(old_owner
);
8424 old_owner
->task_owned_objects
--;
8425 queue_remove(&old_owner
->task_objq
, object
,
8426 vm_object_t
, task_objq
);
8427 switch (object
->purgable
) {
8428 case VM_PURGABLE_NONVOLATILE
:
8429 case VM_PURGABLE_EMPTY
:
8430 vm_purgeable_nonvolatile_owner_update(old_owner
,
8433 case VM_PURGABLE_VOLATILE
:
8434 vm_purgeable_volatile_owner_update(old_owner
,
8440 if (!old_task_objq_locked
) {
8441 task_objq_unlock(old_owner
);
8447 * Switch to new ledger tag and/or owner.
8450 new_task_objq_locked
= FALSE
;
8451 if (new_owner
!= old_owner
&&
8452 new_owner
!= TASK_NULL
&&
8453 new_owner
!= VM_OBJECT_OWNER_DISOWNED
) {
8455 * If the new owner is not accepting new objects ("disowning"),
8456 * the object becomes "disowned" and will be added to
8457 * the kernel's task_objq.
8459 * Check first without locking, to avoid blocking while the
8460 * task is disowning its objects.
8462 if (new_owner
->task_objects_disowning
) {
8463 new_owner
= VM_OBJECT_OWNER_DISOWNED
;
8465 task_objq_lock(new_owner
);
8466 /* check again now that we have the lock */
8467 if (new_owner
->task_objects_disowning
) {
8468 new_owner
= VM_OBJECT_OWNER_DISOWNED
;
8469 task_objq_unlock(new_owner
);
8471 new_task_objq_locked
= TRUE
;
8476 object
->vo_ledger_tag
= new_ledger_tag
;
8477 object
->vo_owner
= new_owner
;
8478 object
->vo_no_footprint
= new_no_footprint
;
8480 if (new_owner
== VM_OBJECT_OWNER_DISOWNED
) {
8482 * Disowned objects are added to the kernel's task_objq but
8483 * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to
8484 * differentiate them from objects intentionally owned by
8487 assert(old_owner
!= kernel_task
);
8488 new_owner
= kernel_task
;
8489 assert(!new_task_objq_locked
);
8490 task_objq_lock(new_owner
);
8491 new_task_objq_locked
= TRUE
;
8495 * Deal with the new owner and/or ledger tag, if needed.
8497 if (new_owner
!= TASK_NULL
&&
8498 ((new_owner
!= old_owner
) /* new owner ... */
8500 (new_no_footprint
!= old_no_footprint
) /* ... new "no_footprint" */
8502 new_ledger_tag
!= old_ledger_tag
)) { /* ... new ledger */
8504 * Add this object to the new owner's ledgers.
8506 vm_object_ledger_tag_ledgers(object
,
8507 &ledger_idx_volatile
,
8508 &ledger_idx_nonvolatile
,
8509 &ledger_idx_volatile_compressed
,
8510 &ledger_idx_nonvolatile_compressed
,
8512 if (object
->purgable
== VM_PURGABLE_VOLATILE
||
8513 object
->purgable
== VM_PURGABLE_EMPTY
) {
8514 ledger_idx
= ledger_idx_volatile
;
8515 ledger_idx_compressed
= ledger_idx_volatile_compressed
;
8517 ledger_idx
= ledger_idx_nonvolatile
;
8518 ledger_idx_compressed
= ledger_idx_nonvolatile_compressed
;
8520 if (resident_count
) {
8522 * Adjust the appropriate new owners's ledgers by the
8523 * number of resident pages.
8525 ledger_credit(new_owner
->ledger
,
8527 ptoa_64(resident_count
));
8528 /* adjust new owner's footprint */
8530 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8531 object
->purgable
!= VM_PURGABLE_EMPTY
) {
8532 ledger_credit(new_owner
->ledger
,
8533 task_ledgers
.phys_footprint
,
8534 ptoa_64(resident_count
));
8538 /* wired pages are always nonvolatile */
8539 ledger_credit(new_owner
->ledger
,
8540 ledger_idx_nonvolatile
,
8541 ptoa_64(wired_count
));
8543 ledger_credit(new_owner
->ledger
,
8544 task_ledgers
.phys_footprint
,
8545 ptoa_64(wired_count
));
8548 if (compressed_count
) {
8550 * Adjust the new owner's ledgers by the number of
8553 ledger_credit(new_owner
->ledger
,
8554 ledger_idx_compressed
,
8555 ptoa_64(compressed_count
));
8557 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8558 object
->purgable
!= VM_PURGABLE_EMPTY
) {
8559 ledger_credit(new_owner
->ledger
,
8560 task_ledgers
.phys_footprint
,
8561 ptoa_64(compressed_count
));
8564 if (new_owner
!= old_owner
) {
8565 /* add object to new_owner's list of owned objects */
8566 DTRACE_VM2(object_owner_add
,
8567 vm_object_t
, object
,
8569 assert(new_task_objq_locked
);
8570 new_owner
->task_owned_objects
++;
8571 queue_enter(&new_owner
->task_objq
, object
,
8572 vm_object_t
, task_objq
);
8573 switch (object
->purgable
) {
8574 case VM_PURGABLE_NONVOLATILE
:
8575 case VM_PURGABLE_EMPTY
:
8576 vm_purgeable_nonvolatile_owner_update(new_owner
,
8579 case VM_PURGABLE_VOLATILE
:
8580 vm_purgeable_volatile_owner_update(new_owner
,
8589 if (new_task_objq_locked
) {
8590 task_objq_unlock(new_owner
);
8593 return KERN_SUCCESS
;
8597 vm_owned_objects_disown(
8600 vm_object_t next_object
;
8612 if (task
->task_objects_disowned
) {
8613 /* task has already disowned its owned objects */
8614 assert(task
->task_volatile_objects
== 0);
8615 assert(task
->task_nonvolatile_objects
== 0);
8616 assert(task
->task_owned_objects
== 0);
8620 task_objq_lock(task
);
8622 task
->task_objects_disowning
= TRUE
;
8624 for (object
= (vm_object_t
) queue_first(&task
->task_objq
);
8625 !queue_end(&task
->task_objq
, (queue_entry_t
) object
);
8626 object
= next_object
) {
8627 if (task
->task_nonvolatile_objects
== 0 &&
8628 task
->task_volatile_objects
== 0 &&
8629 task
->task_owned_objects
== 0) {
8630 /* no more objects owned by "task" */
8634 next_object
= (vm_object_t
) queue_next(&object
->task_objq
);
8637 assert(object
->vo_purgeable_volatilizer
== NULL
);
8639 assert(object
->vo_owner
== task
);
8640 if (!vm_object_lock_try(object
)) {
8641 task_objq_unlock(task
);
8642 mutex_pause(collisions
++);
8645 /* transfer ownership to the kernel */
8646 assert(VM_OBJECT_OWNER(object
) != kernel_task
);
8647 kr
= vm_object_ownership_change(
8649 object
->vo_ledger_tag
, /* unchanged */
8650 VM_OBJECT_OWNER_DISOWNED
, /* new owner */
8651 0, /* new_ledger_flags */
8652 TRUE
); /* old_owner->task_objq locked */
8653 assert(kr
== KERN_SUCCESS
);
8654 assert(object
->vo_owner
== VM_OBJECT_OWNER_DISOWNED
);
8655 vm_object_unlock(object
);
8658 if (__improbable(task
->task_owned_objects
!= 0)) {
8659 panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
8662 task
->task_volatile_objects
,
8663 task
->task_nonvolatile_objects
,
8664 task
->task_owned_objects
,
8666 queue_first(&task
->task_objq
),
8667 queue_last(&task
->task_objq
));
8670 /* there shouldn't be any objects owned by task now */
8671 assert(task
->task_volatile_objects
== 0);
8672 assert(task
->task_nonvolatile_objects
== 0);
8673 assert(task
->task_owned_objects
== 0);
8674 assert(task
->task_objects_disowning
);
8676 /* and we don't need to try and disown again */
8677 task
->task_objects_disowned
= TRUE
;
8679 task_objq_unlock(task
);