2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Virtual memory object module.
66 #include <mach_pagemap.h>
67 #include <task_swapper.h>
69 #include <mach/mach_types.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/memory_object_control_server.h>
73 #include <mach/vm_param.h>
77 #include <ipc/ipc_types.h>
78 #include <ipc/ipc_port.h>
80 #include <kern/kern_types.h>
81 #include <kern/assert.h>
82 #include <kern/queue.h>
83 #include <kern/kalloc.h>
84 #include <kern/zalloc.h>
85 #include <kern/host.h>
86 #include <kern/host_statistics.h>
87 #include <kern/processor.h>
88 #include <kern/misc_protos.h>
89 #include <kern/policy_internal.h>
91 #include <vm/memory_object.h>
92 #include <vm/vm_compressor_pager.h>
93 #include <vm/vm_fault.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_protos.h>
99 #include <vm/vm_purgeable_internal.h>
101 #include <vm/vm_compressor.h>
103 #if CONFIG_PHANTOM_CACHE
104 #include <vm/vm_phantom_cache.h>
107 #if VM_OBJECT_ACCESS_TRACKING
108 uint64_t vm_object_access_tracking_reads
= 0;
109 uint64_t vm_object_access_tracking_writes
= 0;
110 #endif /* VM_OBJECT_ACCESS_TRACKING */
112 boolean_t vm_object_collapse_compressor_allowed
= TRUE
;
114 struct vm_counters vm_counters
;
116 #if VM_OBJECT_TRACKING
117 boolean_t vm_object_tracking_inited
= FALSE
;
118 btlog_t
*vm_object_tracking_btlog
;
121 vm_object_tracking_init(void)
123 int vm_object_tracking
;
125 vm_object_tracking
= 1;
126 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking
,
127 sizeof(vm_object_tracking
));
129 if (vm_object_tracking
) {
130 vm_object_tracking_btlog
= btlog_create(
131 VM_OBJECT_TRACKING_NUM_RECORDS
,
132 VM_OBJECT_TRACKING_BTDEPTH
,
133 TRUE
/* caller_will_remove_entries_for_element? */);
134 assert(vm_object_tracking_btlog
);
135 vm_object_tracking_inited
= TRUE
;
138 #endif /* VM_OBJECT_TRACKING */
141 * Virtual memory objects maintain the actual data
142 * associated with allocated virtual memory. A given
143 * page of memory exists within exactly one object.
145 * An object is only deallocated when all "references"
148 * Associated with each object is a list of all resident
149 * memory pages belonging to that object; this list is
150 * maintained by the "vm_page" module, but locked by the object's
153 * Each object also records the memory object reference
154 * that is used by the kernel to request and write
155 * back data (the memory object, field "pager"), etc...
157 * Virtual memory objects are allocated to provide
158 * zero-filled memory (vm_allocate) or map a user-defined
159 * memory object into a virtual address space (vm_map).
161 * Virtual memory objects that refer to a user-defined
162 * memory object are called "permanent", because all changes
163 * made in virtual memory are reflected back to the
164 * memory manager, which may then store it permanently.
165 * Other virtual memory objects are called "temporary",
166 * meaning that changes need be written back only when
167 * necessary to reclaim pages, and that storage associated
168 * with the object can be discarded once it is no longer
171 * A permanent memory object may be mapped into more
172 * than one virtual address space. Moreover, two threads
173 * may attempt to make the first mapping of a memory
174 * object concurrently. Only one thread is allowed to
175 * complete this mapping; all others wait for the
176 * "pager_initialized" field is asserted, indicating
177 * that the first thread has initialized all of the
178 * necessary fields in the virtual memory object structure.
180 * The kernel relies on a *default memory manager* to
181 * provide backing storage for the zero-filled virtual
182 * memory objects. The pager memory objects associated
183 * with these temporary virtual memory objects are only
184 * requested from the default memory manager when it
185 * becomes necessary. Virtual memory objects
186 * that depend on the default memory manager are called
187 * "internal". The "pager_created" field is provided to
188 * indicate whether these ports have ever been allocated.
190 * The kernel may also create virtual memory objects to
191 * hold changed pages after a copy-on-write operation.
192 * In this case, the virtual memory object (and its
193 * backing storage -- its memory object) only contain
194 * those pages that have been changed. The "shadow"
195 * field refers to the virtual memory object that contains
196 * the remainder of the contents. The "shadow_offset"
197 * field indicates where in the "shadow" these contents begin.
198 * The "copy" field refers to a virtual memory object
199 * to which changed pages must be copied before changing
200 * this object, in order to implement another form
201 * of copy-on-write optimization.
203 * The virtual memory object structure also records
204 * the attributes associated with its memory object.
205 * The "pager_ready", "can_persist" and "copy_strategy"
206 * fields represent those attributes. The "cached_list"
207 * field is used in the implementation of the persistence
210 * ZZZ Continue this comment.
213 /* Forward declarations for internal functions. */
214 static kern_return_t
vm_object_terminate(
217 static kern_return_t
vm_object_copy_call(
218 vm_object_t src_object
,
219 vm_object_offset_t src_offset
,
220 vm_object_size_t size
,
221 vm_object_t
*_result_object
);
223 static void vm_object_do_collapse(
225 vm_object_t backing_object
);
227 static void vm_object_do_bypass(
229 vm_object_t backing_object
);
231 static void vm_object_release_pager(
232 memory_object_t pager
);
234 zone_t vm_object_zone
; /* vm backing store zone */
237 * All wired-down kernel memory belongs to a single virtual
238 * memory object (kernel_object) to avoid wasting data structures.
240 static struct vm_object kernel_object_store
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
241 vm_object_t kernel_object
;
243 static struct vm_object compressor_object_store
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
244 vm_object_t compressor_object
= &compressor_object_store
;
247 * The submap object is used as a placeholder for vm_map_submap
248 * operations. The object is declared in vm_map.c because it
249 * is exported by the vm_map module. The storage is declared
250 * here because it must be initialized here.
252 static struct vm_object vm_submap_object_store
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
255 * Virtual memory objects are initialized from
256 * a template (see vm_object_allocate).
258 * When adding a new field to the virtual memory
259 * object structure, be sure to add initialization
260 * (see _vm_object_allocate()).
262 static struct vm_object vm_object_template
;
264 unsigned int vm_page_purged_wired
= 0;
265 unsigned int vm_page_purged_busy
= 0;
266 unsigned int vm_page_purged_others
= 0;
268 static queue_head_t vm_object_cached_list
;
269 static uint32_t vm_object_cache_pages_freed
= 0;
270 static uint32_t vm_object_cache_pages_moved
= 0;
271 static uint32_t vm_object_cache_pages_skipped
= 0;
272 static uint32_t vm_object_cache_adds
= 0;
273 static uint32_t vm_object_cached_count
= 0;
274 static lck_mtx_t vm_object_cached_lock_data
;
275 static lck_mtx_ext_t vm_object_cached_lock_data_ext
;
277 static uint32_t vm_object_page_grab_failed
= 0;
278 static uint32_t vm_object_page_grab_skipped
= 0;
279 static uint32_t vm_object_page_grab_returned
= 0;
280 static uint32_t vm_object_page_grab_pmapped
= 0;
281 static uint32_t vm_object_page_grab_reactivations
= 0;
283 #define vm_object_cache_lock_spin() \
284 lck_mtx_lock_spin(&vm_object_cached_lock_data)
285 #define vm_object_cache_unlock() \
286 lck_mtx_unlock(&vm_object_cached_lock_data)
288 static void vm_object_cache_remove_locked(vm_object_t
);
291 static void vm_object_reap(vm_object_t object
);
292 static void vm_object_reap_async(vm_object_t object
);
293 static void vm_object_reaper_thread(void);
295 static lck_mtx_t vm_object_reaper_lock_data
;
296 static lck_mtx_ext_t vm_object_reaper_lock_data_ext
;
298 static queue_head_t vm_object_reaper_queue
; /* protected by vm_object_reaper_lock() */
299 unsigned int vm_object_reap_count
= 0;
300 unsigned int vm_object_reap_count_async
= 0;
302 #define vm_object_reaper_lock() \
303 lck_mtx_lock(&vm_object_reaper_lock_data)
304 #define vm_object_reaper_lock_spin() \
305 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
306 #define vm_object_reaper_unlock() \
307 lck_mtx_unlock(&vm_object_reaper_lock_data)
310 /* I/O Re-prioritization request list */
311 queue_head_t io_reprioritize_list
;
312 lck_spin_t io_reprioritize_list_lock
;
314 #define IO_REPRIORITIZE_LIST_LOCK() \
315 lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp)
316 #define IO_REPRIORITIZE_LIST_UNLOCK() \
317 lck_spin_unlock(&io_reprioritize_list_lock)
319 #define MAX_IO_REPRIORITIZE_REQS 8192
320 zone_t io_reprioritize_req_zone
;
322 /* I/O Re-prioritization thread */
323 int io_reprioritize_wakeup
= 0;
324 static void io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
);
326 #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup)
327 #define IO_REPRIO_THREAD_CONTINUATION() \
329 assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \
330 thread_block(io_reprioritize_thread); \
333 void vm_page_request_reprioritize(vm_object_t
, uint64_t, uint32_t, int);
334 void vm_page_handle_prio_inversion(vm_object_t
, vm_page_t
);
335 void vm_decmp_upl_reprioritize(upl_t
, int);
340 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
345 * vm_object_allocate:
347 * Returns a new object with the given size.
350 __private_extern__
void
352 vm_object_size_t size
,
355 *object
= vm_object_template
;
356 vm_page_queue_init(&object
->memq
);
357 #if UPL_DEBUG || CONFIG_IOSCHED
358 queue_init(&object
->uplq
);
360 vm_object_lock_init(object
);
361 object
->vo_size
= size
;
363 #if VM_OBJECT_TRACKING_OP_CREATED
364 if (vm_object_tracking_inited
) {
365 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
368 numsaved
= OSBacktrace(bt
, VM_OBJECT_TRACKING_BTDEPTH
);
369 btlog_add_entry(vm_object_tracking_btlog
,
371 VM_OBJECT_TRACKING_OP_CREATED
,
375 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
378 __private_extern__ vm_object_t
380 vm_object_size_t size
)
384 object
= (vm_object_t
) zalloc(vm_object_zone
);
386 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
388 if (object
!= VM_OBJECT_NULL
) {
389 _vm_object_allocate(size
, object
);
396 lck_grp_t vm_object_lck_grp
;
397 lck_grp_t vm_object_cache_lck_grp
;
398 lck_grp_attr_t vm_object_lck_grp_attr
;
399 lck_attr_t vm_object_lck_attr
;
400 lck_attr_t kernel_object_lck_attr
;
401 lck_attr_t compressor_object_lck_attr
;
403 extern void vm_named_entry_init(void);
405 int workaround_41447923
= 0;
408 * vm_object_bootstrap:
410 * Initialize the VM objects module.
412 __private_extern__
void
413 vm_object_bootstrap(void)
415 vm_size_t vm_object_size
;
417 assert(sizeof(mo_ipc_object_bits_t
) == sizeof(ipc_object_bits_t
));
419 vm_object_size
= (sizeof(struct vm_object
) + (VM_PACKED_POINTER_ALIGNMENT
- 1)) & ~(VM_PACKED_POINTER_ALIGNMENT
- 1);
421 vm_object_zone
= zinit(vm_object_size
,
422 round_page(512 * 1024),
423 round_page(12 * 1024),
425 zone_change(vm_object_zone
, Z_CALLERACCT
, FALSE
); /* don't charge caller */
426 zone_change(vm_object_zone
, Z_NOENCRYPT
, TRUE
);
427 zone_change(vm_object_zone
, Z_ALIGNMENT_REQUIRED
, TRUE
);
429 vm_object_init_lck_grp();
431 queue_init(&vm_object_cached_list
);
433 lck_mtx_init_ext(&vm_object_cached_lock_data
,
434 &vm_object_cached_lock_data_ext
,
435 &vm_object_cache_lck_grp
,
436 &vm_object_lck_attr
);
438 queue_init(&vm_object_reaper_queue
);
440 lck_mtx_init_ext(&vm_object_reaper_lock_data
,
441 &vm_object_reaper_lock_data_ext
,
443 &vm_object_lck_attr
);
447 * Fill in a template object, for quick initialization
450 /* memq; Lock; init after allocation */
452 vm_object_template
.memq
.prev
= 0;
453 vm_object_template
.memq
.next
= 0;
456 * We can't call vm_object_lock_init() here because that will
457 * allocate some memory and VM is not fully initialized yet.
458 * The lock will be initialized for each allocated object in
459 * _vm_object_allocate(), so we don't need to initialize it in
460 * the vm_object_template.
462 vm_object_lock_init(&vm_object_template
);
464 #if DEVELOPMENT || DEBUG
465 vm_object_template
.Lock_owner
= 0;
467 vm_object_template
.vo_size
= 0;
468 vm_object_template
.memq_hint
= VM_PAGE_NULL
;
469 vm_object_template
.ref_count
= 1;
471 vm_object_template
.res_count
= 1;
472 #endif /* TASK_SWAPPER */
473 vm_object_template
.resident_page_count
= 0;
474 vm_object_template
.wired_page_count
= 0;
475 vm_object_template
.reusable_page_count
= 0;
476 vm_object_template
.copy
= VM_OBJECT_NULL
;
477 vm_object_template
.shadow
= VM_OBJECT_NULL
;
478 vm_object_template
.vo_shadow_offset
= (vm_object_offset_t
) 0;
479 vm_object_template
.pager
= MEMORY_OBJECT_NULL
;
480 vm_object_template
.paging_offset
= 0;
481 vm_object_template
.pager_control
= MEMORY_OBJECT_CONTROL_NULL
;
482 vm_object_template
.copy_strategy
= MEMORY_OBJECT_COPY_SYMMETRIC
;
483 vm_object_template
.paging_in_progress
= 0;
485 vm_object_template
.__object1_unused_bits
= 0;
486 #endif /* __LP64__ */
487 vm_object_template
.activity_in_progress
= 0;
489 /* Begin bitfields */
490 vm_object_template
.all_wanted
= 0; /* all bits FALSE */
491 vm_object_template
.pager_created
= FALSE
;
492 vm_object_template
.pager_initialized
= FALSE
;
493 vm_object_template
.pager_ready
= FALSE
;
494 vm_object_template
.pager_trusted
= FALSE
;
495 vm_object_template
.can_persist
= FALSE
;
496 vm_object_template
.internal
= TRUE
;
497 vm_object_template
.private = FALSE
;
498 vm_object_template
.pageout
= FALSE
;
499 vm_object_template
.alive
= TRUE
;
500 vm_object_template
.purgable
= VM_PURGABLE_DENY
;
501 vm_object_template
.purgeable_when_ripe
= FALSE
;
502 vm_object_template
.purgeable_only_by_kernel
= FALSE
;
503 vm_object_template
.shadowed
= FALSE
;
504 vm_object_template
.true_share
= FALSE
;
505 vm_object_template
.terminating
= FALSE
;
506 vm_object_template
.named
= FALSE
;
507 vm_object_template
.shadow_severed
= FALSE
;
508 vm_object_template
.phys_contiguous
= FALSE
;
509 vm_object_template
.nophyscache
= FALSE
;
512 vm_object_template
.cached_list
.prev
= NULL
;
513 vm_object_template
.cached_list
.next
= NULL
;
515 vm_object_template
.last_alloc
= (vm_object_offset_t
) 0;
516 vm_object_template
.sequential
= (vm_object_offset_t
) 0;
517 vm_object_template
.pages_created
= 0;
518 vm_object_template
.pages_used
= 0;
519 vm_object_template
.scan_collisions
= 0;
520 #if CONFIG_PHANTOM_CACHE
521 vm_object_template
.phantom_object_id
= 0;
523 vm_object_template
.cow_hint
= ~(vm_offset_t
)0;
525 /* cache bitfields */
526 vm_object_template
.wimg_bits
= VM_WIMG_USE_DEFAULT
;
527 vm_object_template
.set_cache_attr
= FALSE
;
528 vm_object_template
.object_is_shared_cache
= FALSE
;
529 vm_object_template
.code_signed
= FALSE
;
530 vm_object_template
.transposed
= FALSE
;
531 vm_object_template
.mapping_in_progress
= FALSE
;
532 vm_object_template
.phantom_isssd
= FALSE
;
533 vm_object_template
.volatile_empty
= FALSE
;
534 vm_object_template
.volatile_fault
= FALSE
;
535 vm_object_template
.all_reusable
= FALSE
;
536 vm_object_template
.blocked_access
= FALSE
;
537 vm_object_template
.vo_ledger_tag
= VM_LEDGER_TAG_NONE
;
538 vm_object_template
.vo_no_footprint
= FALSE
;
539 #if CONFIG_IOSCHED || UPL_DEBUG
540 vm_object_template
.uplq
.prev
= NULL
;
541 vm_object_template
.uplq
.next
= NULL
;
542 #endif /* UPL_DEBUG */
544 bzero(&vm_object_template
.pip_holders
,
545 sizeof(vm_object_template
.pip_holders
));
546 #endif /* VM_PIP_DEBUG */
548 vm_object_template
.objq
.next
= NULL
;
549 vm_object_template
.objq
.prev
= NULL
;
550 vm_object_template
.task_objq
.next
= NULL
;
551 vm_object_template
.task_objq
.prev
= NULL
;
553 vm_object_template
.purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
554 vm_object_template
.purgeable_queue_group
= 0;
556 vm_object_template
.vo_cache_ts
= 0;
558 vm_object_template
.wire_tag
= VM_KERN_MEMORY_NONE
;
559 #if !VM_TAG_ACTIVE_UPDATE
560 vm_object_template
.wired_objq
.next
= NULL
;
561 vm_object_template
.wired_objq
.prev
= NULL
;
562 #endif /* ! VM_TAG_ACTIVE_UPDATE */
564 vm_object_template
.io_tracking
= FALSE
;
566 #if CONFIG_SECLUDED_MEMORY
567 vm_object_template
.eligible_for_secluded
= FALSE
;
568 vm_object_template
.can_grab_secluded
= FALSE
;
569 #else /* CONFIG_SECLUDED_MEMORY */
570 vm_object_template
.__object3_unused_bits
= 0;
571 #endif /* CONFIG_SECLUDED_MEMORY */
573 #if VM_OBJECT_ACCESS_TRACKING
574 vm_object_template
.access_tracking
= FALSE
;
575 vm_object_template
.access_tracking_reads
= 0;
576 vm_object_template
.access_tracking_writes
= 0;
577 #endif /* VM_OBJECT_ACCESS_TRACKING */
580 bzero(&vm_object_template
.purgeable_owner_bt
[0],
581 sizeof(vm_object_template
.purgeable_owner_bt
));
582 vm_object_template
.vo_purgeable_volatilizer
= NULL
;
583 bzero(&vm_object_template
.purgeable_volatilizer_bt
[0],
584 sizeof(vm_object_template
.purgeable_volatilizer_bt
));
588 * Initialize the "kernel object"
591 kernel_object
= &kernel_object_store
;
594 * Note that in the following size specifications, we need to add 1 because
595 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
598 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
601 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
603 kernel_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
604 compressor_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
605 kernel_object
->no_tag_update
= TRUE
;
608 * Initialize the "submap object". Make it as large as the
609 * kernel object so that no limit is imposed on submap sizes.
612 vm_submap_object
= &vm_submap_object_store
;
613 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
615 vm_submap_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
618 * Create an "extra" reference to this object so that we never
619 * try to deallocate it; zfree doesn't like to be called with
622 vm_object_reference(vm_submap_object
);
624 vm_named_entry_init();
626 PE_parse_boot_argn("workaround_41447923", &workaround_41447923
,
627 sizeof(workaround_41447923
));
632 vm_io_reprioritize_init(void)
634 kern_return_t result
;
635 thread_t thread
= THREAD_NULL
;
637 /* Initialze the I/O reprioritization subsystem */
638 lck_spin_init(&io_reprioritize_list_lock
, &vm_object_lck_grp
, &vm_object_lck_attr
);
639 queue_init(&io_reprioritize_list
);
641 io_reprioritize_req_zone
= zinit(sizeof(struct io_reprioritize_req
),
642 MAX_IO_REPRIORITIZE_REQS
* sizeof(struct io_reprioritize_req
),
643 4096, "io_reprioritize_req");
644 zone_change(io_reprioritize_req_zone
, Z_COLLECT
, FALSE
);
646 result
= kernel_thread_start_priority(io_reprioritize_thread
, NULL
, 95 /* MAXPRI_KERNEL */, &thread
);
647 if (result
== KERN_SUCCESS
) {
648 thread_set_thread_name(thread
, "VM_io_reprioritize_thread");
649 thread_deallocate(thread
);
651 panic("Could not create io_reprioritize_thread");
657 vm_object_reaper_init(void)
662 kr
= kernel_thread_start_priority(
663 (thread_continue_t
) vm_object_reaper_thread
,
667 if (kr
!= KERN_SUCCESS
) {
668 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr
);
670 thread_set_thread_name(thread
, "VM_object_reaper_thread");
671 thread_deallocate(thread
);
674 __private_extern__
void
678 * Finish initializing the kernel object.
683 __private_extern__
void
684 vm_object_init_lck_grp(void)
687 * initialze the vm_object lock world
689 lck_grp_attr_setdefault(&vm_object_lck_grp_attr
);
690 lck_grp_init(&vm_object_lck_grp
, "vm_object", &vm_object_lck_grp_attr
);
691 lck_grp_init(&vm_object_cache_lck_grp
, "vm_object_cache", &vm_object_lck_grp_attr
);
692 lck_attr_setdefault(&vm_object_lck_attr
);
693 lck_attr_setdefault(&kernel_object_lck_attr
);
694 lck_attr_cleardebug(&kernel_object_lck_attr
);
695 lck_attr_setdefault(&compressor_object_lck_attr
);
696 lck_attr_cleardebug(&compressor_object_lck_attr
);
701 * vm_object_deallocate:
703 * Release a reference to the specified object,
704 * gained either through a vm_object_allocate
705 * or a vm_object_reference call. When all references
706 * are gone, storage associated with this object
707 * may be relinquished.
709 * No object may be locked.
711 unsigned long vm_object_deallocate_shared_successes
= 0;
712 unsigned long vm_object_deallocate_shared_failures
= 0;
713 unsigned long vm_object_deallocate_shared_swap_failures
= 0;
715 __private_extern__
void
716 vm_object_deallocate(
719 vm_object_t shadow
= VM_OBJECT_NULL
;
721 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
722 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
724 if (object
== VM_OBJECT_NULL
) {
728 if (object
== kernel_object
|| object
== compressor_object
) {
729 vm_object_lock_shared(object
);
731 OSAddAtomic(-1, &object
->ref_count
);
733 if (object
->ref_count
== 0) {
734 if (object
== kernel_object
) {
735 panic("vm_object_deallocate: losing kernel_object\n");
737 panic("vm_object_deallocate: losing compressor_object\n");
740 vm_object_unlock(object
);
744 if (object
->ref_count
== 2 &&
747 * This "named" object's reference count is about to
749 * we'll need to call memory_object_last_unmap().
751 } else if (object
->ref_count
== 2 &&
753 object
->shadow
!= VM_OBJECT_NULL
) {
755 * This internal object's reference count is about to
756 * drop from 2 to 1 and it has a shadow object:
757 * we'll want to try and collapse this object with its
760 } else if (object
->ref_count
>= 2) {
761 UInt32 original_ref_count
;
762 volatile UInt32
*ref_count_p
;
766 * The object currently looks like it is not being
767 * kept alive solely by the reference we're about to release.
768 * Let's try and release our reference without taking
769 * all the locks we would need if we had to terminate the
770 * object (cache lock + exclusive object lock).
771 * Lock the object "shared" to make sure we don't race with
772 * anyone holding it "exclusive".
774 vm_object_lock_shared(object
);
775 ref_count_p
= (volatile UInt32
*) &object
->ref_count
;
776 original_ref_count
= object
->ref_count
;
778 * Test again as "ref_count" could have changed.
779 * "named" shouldn't change.
781 if (original_ref_count
== 2 &&
783 /* need to take slow path for m_o_last_unmap() */
785 } else if (original_ref_count
== 2 &&
787 object
->shadow
!= VM_OBJECT_NULL
) {
788 /* need to take slow path for vm_object_collapse() */
790 } else if (original_ref_count
< 2) {
791 /* need to take slow path for vm_object_terminate() */
794 /* try an atomic update with the shared lock */
795 atomic_swap
= OSCompareAndSwap(
797 original_ref_count
- 1,
798 (UInt32
*) &object
->ref_count
);
799 if (atomic_swap
== FALSE
) {
800 vm_object_deallocate_shared_swap_failures
++;
801 /* fall back to the slow path... */
805 vm_object_unlock(object
);
809 * ref_count was updated atomically !
811 vm_object_deallocate_shared_successes
++;
816 * Someone else updated the ref_count at the same
817 * time and we lost the race. Fall back to the usual
818 * slow but safe path...
820 vm_object_deallocate_shared_failures
++;
823 while (object
!= VM_OBJECT_NULL
) {
824 vm_object_lock(object
);
826 assert(object
->ref_count
> 0);
829 * If the object has a named reference, and only
830 * that reference would remain, inform the pager
831 * about the last "mapping" reference going away.
833 if ((object
->ref_count
== 2) && (object
->named
)) {
834 memory_object_t pager
= object
->pager
;
836 /* Notify the Pager that there are no */
837 /* more mappers for this object */
839 if (pager
!= MEMORY_OBJECT_NULL
) {
840 vm_object_mapping_wait(object
, THREAD_UNINT
);
841 vm_object_mapping_begin(object
);
842 vm_object_unlock(object
);
844 memory_object_last_unmap(pager
);
846 vm_object_lock(object
);
847 vm_object_mapping_end(object
);
849 assert(object
->ref_count
> 0);
853 * Lose the reference. If other references
854 * remain, then we are done, unless we need
855 * to retry a cache trim.
856 * If it is the last reference, then keep it
857 * until any pending initialization is completed.
860 /* if the object is terminating, it cannot go into */
861 /* the cache and we obviously should not call */
862 /* terminate again. */
864 if ((object
->ref_count
> 1) || object
->terminating
) {
865 vm_object_lock_assert_exclusive(object
);
867 vm_object_res_deallocate(object
);
869 if (object
->ref_count
== 1 &&
870 object
->shadow
!= VM_OBJECT_NULL
) {
872 * There's only one reference left on this
873 * VM object. We can't tell if it's a valid
874 * one (from a mapping for example) or if this
875 * object is just part of a possibly stale and
876 * useless shadow chain.
877 * We would like to try and collapse it into
878 * its parent, but we don't have any pointers
879 * back to this parent object.
880 * But we can try and collapse this object with
881 * its own shadows, in case these are useless
883 * We can't bypass this object though, since we
884 * don't know if this last reference on it is
887 vm_object_collapse(object
, 0, FALSE
);
889 vm_object_unlock(object
);
894 * We have to wait for initialization
895 * before destroying or caching the object.
898 if (object
->pager_created
&& !object
->pager_initialized
) {
899 assert(!object
->can_persist
);
900 vm_object_assert_wait(object
,
901 VM_OBJECT_EVENT_INITIALIZED
,
903 vm_object_unlock(object
);
905 thread_block(THREAD_CONTINUE_NULL
);
909 VM_OBJ_RES_DECR(object
); /* XXX ? */
911 * Terminate this object. If it had a shadow,
912 * then deallocate it; otherwise, if we need
913 * to retry a cache trim, do so now; otherwise,
914 * we are done. "pageout" objects have a shadow,
915 * but maintain a "paging reference" rather than
916 * a normal reference.
918 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
920 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
923 if (shadow
!= VM_OBJECT_NULL
) {
941 vm_object_lock_assert_exclusive(object
);
943 next_p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
944 p_limit
= MIN(50, object
->resident_page_count
);
946 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next_p
) && --p_limit
> 0) {
948 next_p
= (vm_page_t
)vm_page_queue_next(&next_p
->vmp_listq
);
950 if (VM_PAGE_WIRED(p
) || p
->vmp_busy
|| p
->vmp_cleaning
|| p
->vmp_laundry
|| p
->vmp_fictitious
) {
951 goto move_page_in_obj
;
954 if (p
->vmp_pmapped
|| p
->vmp_dirty
|| p
->vmp_precious
) {
955 vm_page_lockspin_queues();
957 if (p
->vmp_pmapped
) {
960 vm_object_page_grab_pmapped
++;
962 if (p
->vmp_reference
== FALSE
|| p
->vmp_dirty
== FALSE
) {
963 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p
));
965 if (refmod_state
& VM_MEM_REFERENCED
) {
966 p
->vmp_reference
= TRUE
;
968 if (refmod_state
& VM_MEM_MODIFIED
) {
969 SET_PAGE_DIRTY(p
, FALSE
);
972 if (p
->vmp_dirty
== FALSE
&& p
->vmp_precious
== FALSE
) {
973 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
975 if (refmod_state
& VM_MEM_REFERENCED
) {
976 p
->vmp_reference
= TRUE
;
978 if (refmod_state
& VM_MEM_MODIFIED
) {
979 SET_PAGE_DIRTY(p
, FALSE
);
982 if (p
->vmp_dirty
== FALSE
) {
987 if ((p
->vmp_q_state
!= VM_PAGE_ON_ACTIVE_Q
) && p
->vmp_reference
== TRUE
) {
990 VM_STAT_INCR(reactivations
);
991 vm_object_page_grab_reactivations
++;
993 vm_page_unlock_queues();
995 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
996 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
1001 vm_page_lockspin_queues();
1003 vm_page_free_prepare_queues(p
);
1004 vm_object_page_grab_returned
++;
1005 vm_object_page_grab_skipped
+= p_skipped
;
1007 vm_page_unlock_queues();
1009 vm_page_free_prepare_object(p
, TRUE
);
1013 vm_object_page_grab_skipped
+= p_skipped
;
1014 vm_object_page_grab_failed
++;
1021 #define EVICT_PREPARE_LIMIT 64
1022 #define EVICT_AGE 10
1024 static clock_sec_t vm_object_cache_aging_ts
= 0;
1027 vm_object_cache_remove_locked(
1030 assert(object
->purgable
== VM_PURGABLE_DENY
);
1032 queue_remove(&vm_object_cached_list
, object
, vm_object_t
, cached_list
);
1033 object
->cached_list
.next
= NULL
;
1034 object
->cached_list
.prev
= NULL
;
1036 vm_object_cached_count
--;
1040 vm_object_cache_remove(
1043 vm_object_cache_lock_spin();
1045 if (object
->cached_list
.next
&&
1046 object
->cached_list
.prev
) {
1047 vm_object_cache_remove_locked(object
);
1050 vm_object_cache_unlock();
1054 vm_object_cache_add(
1060 assert(object
->purgable
== VM_PURGABLE_DENY
);
1062 if (object
->resident_page_count
== 0) {
1065 clock_get_system_nanotime(&sec
, &nsec
);
1067 vm_object_cache_lock_spin();
1069 if (object
->cached_list
.next
== NULL
&&
1070 object
->cached_list
.prev
== NULL
) {
1071 queue_enter(&vm_object_cached_list
, object
, vm_object_t
, cached_list
);
1072 object
->vo_cache_ts
= sec
+ EVICT_AGE
;
1073 object
->vo_cache_pages_to_scan
= object
->resident_page_count
;
1075 vm_object_cached_count
++;
1076 vm_object_cache_adds
++;
1078 vm_object_cache_unlock();
1082 vm_object_cache_evict(
1084 int max_objects_to_examine
)
1086 vm_object_t object
= VM_OBJECT_NULL
;
1087 vm_object_t next_obj
= VM_OBJECT_NULL
;
1088 vm_page_t local_free_q
= VM_PAGE_NULL
;
1092 vm_page_t ep_array
[EVICT_PREPARE_LIMIT
];
1098 uint32_t ep_skipped
= 0;
1102 KERNEL_DEBUG(0x13001ec | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1104 * do a couple of quick checks to see if it's
1105 * worthwhile grabbing the lock
1107 if (queue_empty(&vm_object_cached_list
)) {
1108 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1111 clock_get_system_nanotime(&sec
, &nsec
);
1114 * the object on the head of the queue has not
1115 * yet sufficiently aged
1117 if (sec
< vm_object_cache_aging_ts
) {
1118 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1122 * don't need the queue lock to find
1123 * and lock an object on the cached list
1125 vm_page_unlock_queues();
1127 vm_object_cache_lock_spin();
1130 next_obj
= (vm_object_t
)queue_first(&vm_object_cached_list
);
1132 while (!queue_end(&vm_object_cached_list
, (queue_entry_t
)next_obj
) && object_cnt
++ < max_objects_to_examine
) {
1134 next_obj
= (vm_object_t
)queue_next(&next_obj
->cached_list
);
1136 assert(object
->purgable
== VM_PURGABLE_DENY
);
1138 if (sec
< object
->vo_cache_ts
) {
1139 KERNEL_DEBUG(0x130020c, object
, object
->resident_page_count
, object
->vo_cache_ts
, sec
, 0);
1141 vm_object_cache_aging_ts
= object
->vo_cache_ts
;
1142 object
= VM_OBJECT_NULL
;
1145 if (!vm_object_lock_try_scan(object
)) {
1147 * just skip over this guy for now... if we find
1148 * an object to steal pages from, we'll revist in a bit...
1149 * hopefully, the lock will have cleared
1151 KERNEL_DEBUG(0x13001f8, object
, object
->resident_page_count
, 0, 0, 0);
1153 object
= VM_OBJECT_NULL
;
1156 if (vm_page_queue_empty(&object
->memq
) || object
->vo_cache_pages_to_scan
== 0) {
1158 * this case really shouldn't happen, but it's not fatal
1159 * so deal with it... if we don't remove the object from
1160 * the list, we'll never move past it.
1162 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1164 vm_object_cache_remove_locked(object
);
1165 vm_object_unlock(object
);
1166 object
= VM_OBJECT_NULL
;
1170 * we have a locked object with pages...
1171 * time to start harvesting
1175 vm_object_cache_unlock();
1177 if (object
== VM_OBJECT_NULL
) {
1182 * object is locked at this point and
1183 * has resident pages
1185 next_p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
1188 * break the page scan into 2 pieces to minimize the time spent
1189 * behind the page queue lock...
1190 * the list of pages on these unused objects is likely to be cold
1191 * w/r to the cpu cache which increases the time to scan the list
1192 * tenfold... and we may have a 'run' of pages we can't utilize that
1193 * needs to be skipped over...
1195 if ((ep_limit
= num_to_evict
- (ep_freed
+ ep_moved
)) > EVICT_PREPARE_LIMIT
) {
1196 ep_limit
= EVICT_PREPARE_LIMIT
;
1200 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next_p
) && object
->vo_cache_pages_to_scan
&& ep_count
< ep_limit
) {
1202 next_p
= (vm_page_t
)vm_page_queue_next(&next_p
->vmp_listq
);
1204 object
->vo_cache_pages_to_scan
--;
1206 if (VM_PAGE_WIRED(p
) || p
->vmp_busy
|| p
->vmp_cleaning
|| p
->vmp_laundry
) {
1207 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
1208 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
1213 if (p
->vmp_wpmapped
|| p
->vmp_dirty
|| p
->vmp_precious
) {
1214 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
1215 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
1217 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p
));
1219 ep_array
[ep_count
++] = p
;
1221 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1223 vm_page_lockspin_queues();
1225 for (ep_index
= 0; ep_index
< ep_count
; ep_index
++) {
1226 p
= ep_array
[ep_index
];
1228 if (p
->vmp_wpmapped
|| p
->vmp_dirty
|| p
->vmp_precious
) {
1229 p
->vmp_reference
= FALSE
;
1230 p
->vmp_no_cache
= FALSE
;
1233 * we've already filtered out pages that are in the laundry
1234 * so if we get here, this page can't be on the pageout queue
1236 vm_page_queues_remove(p
, FALSE
);
1237 vm_page_enqueue_inactive(p
, TRUE
);
1241 #if CONFIG_PHANTOM_CACHE
1242 vm_phantom_cache_add_ghost(p
);
1244 vm_page_free_prepare_queues(p
);
1246 assert(p
->vmp_pageq
.next
== 0 && p
->vmp_pageq
.prev
== 0);
1248 * Add this page to our list of reclaimed pages,
1249 * to be freed later.
1251 p
->vmp_snext
= local_free_q
;
1257 vm_page_unlock_queues();
1259 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1262 vm_page_free_list(local_free_q
, TRUE
);
1263 local_free_q
= VM_PAGE_NULL
;
1265 if (object
->vo_cache_pages_to_scan
== 0) {
1266 KERNEL_DEBUG(0x1300208, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1268 vm_object_cache_remove(object
);
1270 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1273 * done with this object
1275 vm_object_unlock(object
);
1276 object
= VM_OBJECT_NULL
;
1279 * at this point, we are not holding any locks
1281 if ((ep_freed
+ ep_moved
) >= num_to_evict
) {
1283 * we've reached our target for the
1284 * number of pages to evict
1288 vm_object_cache_lock_spin();
1291 * put the page queues lock back to the caller's
1294 vm_page_lock_queues();
1296 vm_object_cache_pages_freed
+= ep_freed
;
1297 vm_object_cache_pages_moved
+= ep_moved
;
1298 vm_object_cache_pages_skipped
+= ep_skipped
;
1300 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, ep_freed
, 0, 0, 0, 0);
1305 * Routine: vm_object_terminate
1307 * Free all resources associated with a vm_object.
1308 * In/out conditions:
1309 * Upon entry, the object must be locked,
1310 * and the object must have exactly one reference.
1312 * The shadow object reference is left alone.
1314 * The object must be unlocked if its found that pages
1315 * must be flushed to a backing object. If someone
1316 * manages to map the object while it is being flushed
1317 * the object is returned unlocked and unchanged. Otherwise,
1318 * upon exit, the cache will be unlocked, and the
1319 * object will cease to exist.
1321 static kern_return_t
1322 vm_object_terminate(
1325 vm_object_t shadow_object
;
1327 vm_object_lock_assert_exclusive(object
);
1329 if (!object
->pageout
&& (!object
->internal
&& object
->can_persist
) &&
1330 (object
->pager
!= NULL
|| object
->shadow_severed
)) {
1332 * Clear pager_trusted bit so that the pages get yanked
1333 * out of the object instead of cleaned in place. This
1334 * prevents a deadlock in XMM and makes more sense anyway.
1336 object
->pager_trusted
= FALSE
;
1338 vm_object_reap_pages(object
, REAP_TERMINATE
);
1341 * Make sure the object isn't already being terminated
1343 if (object
->terminating
) {
1344 vm_object_lock_assert_exclusive(object
);
1345 object
->ref_count
--;
1346 assert(object
->ref_count
> 0);
1347 vm_object_unlock(object
);
1348 return KERN_FAILURE
;
1352 * Did somebody get a reference to the object while we were
1355 if (object
->ref_count
!= 1) {
1356 vm_object_lock_assert_exclusive(object
);
1357 object
->ref_count
--;
1358 assert(object
->ref_count
> 0);
1359 vm_object_res_deallocate(object
);
1360 vm_object_unlock(object
);
1361 return KERN_FAILURE
;
1365 * Make sure no one can look us up now.
1368 object
->terminating
= TRUE
;
1369 object
->alive
= FALSE
;
1371 if (!object
->internal
&&
1372 object
->cached_list
.next
&&
1373 object
->cached_list
.prev
) {
1374 vm_object_cache_remove(object
);
1378 * Detach the object from its shadow if we are the shadow's
1379 * copy. The reference we hold on the shadow must be dropped
1382 if (((shadow_object
= object
->shadow
) != VM_OBJECT_NULL
) &&
1383 !(object
->pageout
)) {
1384 vm_object_lock(shadow_object
);
1385 if (shadow_object
->copy
== object
) {
1386 shadow_object
->copy
= VM_OBJECT_NULL
;
1388 vm_object_unlock(shadow_object
);
1391 if (object
->paging_in_progress
!= 0 ||
1392 object
->activity_in_progress
!= 0) {
1394 * There are still some paging_in_progress references
1395 * on this object, meaning that there are some paging
1396 * or other I/O operations in progress for this VM object.
1397 * Such operations take some paging_in_progress references
1398 * up front to ensure that the object doesn't go away, but
1399 * they may also need to acquire a reference on the VM object,
1400 * to map it in kernel space, for example. That means that
1401 * they may end up releasing the last reference on the VM
1402 * object, triggering its termination, while still holding
1403 * paging_in_progress references. Waiting for these
1404 * pending paging_in_progress references to go away here would
1407 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1408 * complete the VM object termination if it still holds
1409 * paging_in_progress references at this point.
1411 * No new paging_in_progress should appear now that the
1412 * VM object is "terminating" and not "alive".
1414 vm_object_reap_async(object
);
1415 vm_object_unlock(object
);
1417 * Return KERN_FAILURE to let the caller know that we
1418 * haven't completed the termination and it can't drop this
1419 * object's reference on its shadow object yet.
1420 * The reaper thread will take care of that once it has
1421 * completed this object's termination.
1423 return KERN_FAILURE
;
1426 * complete the VM object termination
1428 vm_object_reap(object
);
1429 object
= VM_OBJECT_NULL
;
1432 * the object lock was released by vm_object_reap()
1434 * KERN_SUCCESS means that this object has been terminated
1435 * and no longer needs its shadow object but still holds a
1437 * The caller is responsible for dropping that reference.
1438 * We can't call vm_object_deallocate() here because that
1439 * would create a recursion.
1441 return KERN_SUCCESS
;
1448 * Complete the termination of a VM object after it's been marked
1449 * as "terminating" and "!alive" by vm_object_terminate().
1451 * The VM object must be locked by caller.
1452 * The lock will be released on return and the VM object is no longer valid.
1459 memory_object_t pager
;
1461 vm_object_lock_assert_exclusive(object
);
1462 assert(object
->paging_in_progress
== 0);
1463 assert(object
->activity_in_progress
== 0);
1465 vm_object_reap_count
++;
1468 * Disown this purgeable object to cleanup its owner's purgeable
1469 * ledgers. We need to do this before disconnecting the object
1470 * from its pager, to properly account for compressed pages.
1472 if (object
->internal
&&
1473 (object
->purgable
!= VM_PURGABLE_DENY
||
1474 object
->vo_ledger_tag
)) {
1479 if (object
->vo_no_footprint
) {
1480 ledger_flags
|= VM_LEDGER_FLAG_NO_FOOTPRINT
;
1482 assert(!object
->alive
);
1483 assert(object
->terminating
);
1484 kr
= vm_object_ownership_change(object
,
1485 object
->vo_ledger_tag
, /* unchanged */
1486 NULL
, /* no owner */
1488 FALSE
); /* task_objq not locked */
1489 assert(kr
== KERN_SUCCESS
);
1490 assert(object
->vo_owner
== NULL
);
1493 pager
= object
->pager
;
1494 object
->pager
= MEMORY_OBJECT_NULL
;
1496 if (pager
!= MEMORY_OBJECT_NULL
) {
1497 memory_object_control_disable(object
->pager_control
);
1500 object
->ref_count
--;
1502 assert(object
->res_count
== 0);
1503 #endif /* TASK_SWAPPER */
1505 assert(object
->ref_count
== 0);
1508 * remove from purgeable queue if it's on
1510 if (object
->internal
) {
1511 assert(VM_OBJECT_OWNER(object
) == TASK_NULL
);
1513 VM_OBJECT_UNWIRED(object
);
1515 if (object
->purgable
== VM_PURGABLE_DENY
) {
1516 /* not purgeable: nothing to do */
1517 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
1518 purgeable_q_t queue
;
1520 queue
= vm_purgeable_object_remove(object
);
1523 if (object
->purgeable_when_ripe
) {
1525 * Must take page lock for this -
1526 * using it to protect token queue
1528 vm_page_lock_queues();
1529 vm_purgeable_token_delete_first(queue
);
1531 assert(queue
->debug_count_objects
>= 0);
1532 vm_page_unlock_queues();
1536 * Update "vm_page_purgeable_count" in bulk and mark
1537 * object as VM_PURGABLE_EMPTY to avoid updating
1538 * "vm_page_purgeable_count" again in vm_page_remove()
1539 * when reaping the pages.
1542 assert(object
->resident_page_count
>=
1543 object
->wired_page_count
);
1544 delta
= (object
->resident_page_count
-
1545 object
->wired_page_count
);
1547 assert(vm_page_purgeable_count
>= delta
);
1549 (SInt32
*)&vm_page_purgeable_count
);
1551 if (object
->wired_page_count
!= 0) {
1552 assert(vm_page_purgeable_wired_count
>=
1553 object
->wired_page_count
);
1554 OSAddAtomic(-object
->wired_page_count
,
1555 (SInt32
*)&vm_page_purgeable_wired_count
);
1557 object
->purgable
= VM_PURGABLE_EMPTY
;
1558 } else if (object
->purgable
== VM_PURGABLE_NONVOLATILE
||
1559 object
->purgable
== VM_PURGABLE_EMPTY
) {
1560 /* remove from nonvolatile queue */
1561 vm_purgeable_nonvolatile_dequeue(object
);
1563 panic("object %p in unexpected purgeable state 0x%x\n",
1564 object
, object
->purgable
);
1566 if (object
->transposed
&&
1567 object
->cached_list
.next
!= NULL
&&
1568 object
->cached_list
.prev
== NULL
) {
1570 * object->cached_list.next "points" to the
1571 * object that was transposed with this object.
1574 assert(object
->cached_list
.next
== NULL
);
1576 assert(object
->cached_list
.prev
== NULL
);
1579 if (object
->pageout
) {
1581 * free all remaining pages tabled on
1583 * clean up it's shadow
1585 assert(object
->shadow
!= VM_OBJECT_NULL
);
1587 vm_pageout_object_terminate(object
);
1588 } else if (object
->resident_page_count
) {
1590 * free all remaining pages tabled on
1593 vm_object_reap_pages(object
, REAP_REAP
);
1595 assert(vm_page_queue_empty(&object
->memq
));
1596 assert(object
->paging_in_progress
== 0);
1597 assert(object
->activity_in_progress
== 0);
1598 assert(object
->ref_count
== 0);
1601 * If the pager has not already been released by
1602 * vm_object_destroy, we need to terminate it and
1603 * release our reference to it here.
1605 if (pager
!= MEMORY_OBJECT_NULL
) {
1606 vm_object_unlock(object
);
1607 vm_object_release_pager(pager
);
1608 vm_object_lock(object
);
1611 /* kick off anyone waiting on terminating */
1612 object
->terminating
= FALSE
;
1613 vm_object_paging_begin(object
);
1614 vm_object_paging_end(object
);
1615 vm_object_unlock(object
);
1617 object
->shadow
= VM_OBJECT_NULL
;
1619 #if VM_OBJECT_TRACKING
1620 if (vm_object_tracking_inited
) {
1621 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
1624 #endif /* VM_OBJECT_TRACKING */
1626 vm_object_lock_destroy(object
);
1628 * Free the space for the object.
1630 zfree(vm_object_zone
, object
);
1631 object
= VM_OBJECT_NULL
;
1635 unsigned int vm_max_batch
= 256;
1637 #define V_O_R_MAX_BATCH 128
1639 #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
1642 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
1644 if (_local_free_q) { \
1645 if (do_disconnect) { \
1647 for (m = _local_free_q; \
1648 m != VM_PAGE_NULL; \
1649 m = m->vmp_snext) { \
1650 if (m->vmp_pmapped) { \
1651 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
1655 vm_page_free_list(_local_free_q, TRUE); \
1656 _local_free_q = VM_PAGE_NULL; \
1662 vm_object_reap_pages(
1668 vm_page_t local_free_q
= VM_PAGE_NULL
;
1670 boolean_t disconnect_on_release
;
1671 pmap_flush_context pmap_flush_context_storage
;
1673 if (reap_type
== REAP_DATA_FLUSH
) {
1675 * We need to disconnect pages from all pmaps before
1676 * releasing them to the free list
1678 disconnect_on_release
= TRUE
;
1681 * Either the caller has already disconnected the pages
1682 * from all pmaps, or we disconnect them here as we add
1683 * them to out local list of pages to be released.
1684 * No need to re-disconnect them when we release the pages
1687 disconnect_on_release
= FALSE
;
1690 restart_after_sleep
:
1691 if (vm_page_queue_empty(&object
->memq
)) {
1694 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
1696 if (reap_type
== REAP_PURGEABLE
) {
1697 pmap_flush_context_init(&pmap_flush_context_storage
);
1700 vm_page_lockspin_queues();
1702 next
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
1704 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next
)) {
1706 next
= (vm_page_t
)vm_page_queue_next(&next
->vmp_listq
);
1708 if (--loop_count
== 0) {
1709 vm_page_unlock_queues();
1712 if (reap_type
== REAP_PURGEABLE
) {
1713 pmap_flush(&pmap_flush_context_storage
);
1714 pmap_flush_context_init(&pmap_flush_context_storage
);
1717 * Free the pages we reclaimed so far
1718 * and take a little break to avoid
1719 * hogging the page queue lock too long
1721 VM_OBJ_REAP_FREELIST(local_free_q
,
1722 disconnect_on_release
);
1727 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
1729 vm_page_lockspin_queues();
1731 if (reap_type
== REAP_DATA_FLUSH
|| reap_type
== REAP_TERMINATE
) {
1732 if (p
->vmp_busy
|| p
->vmp_cleaning
) {
1733 vm_page_unlock_queues();
1735 * free the pages reclaimed so far
1737 VM_OBJ_REAP_FREELIST(local_free_q
,
1738 disconnect_on_release
);
1740 PAGE_SLEEP(object
, p
, THREAD_UNINT
);
1742 goto restart_after_sleep
;
1744 if (p
->vmp_laundry
) {
1745 vm_pageout_steal_laundry(p
, TRUE
);
1748 switch (reap_type
) {
1749 case REAP_DATA_FLUSH
:
1750 if (VM_PAGE_WIRED(p
)) {
1752 * this is an odd case... perhaps we should
1753 * zero-fill this page since we're conceptually
1754 * tossing its data at this point, but leaving
1755 * it on the object to honor the 'wire' contract
1761 case REAP_PURGEABLE
:
1762 if (VM_PAGE_WIRED(p
)) {
1764 * can't purge a wired page
1766 vm_page_purged_wired
++;
1769 if (p
->vmp_laundry
&& !p
->vmp_busy
&& !p
->vmp_cleaning
) {
1770 vm_pageout_steal_laundry(p
, TRUE
);
1773 if (p
->vmp_cleaning
|| p
->vmp_laundry
|| p
->vmp_absent
) {
1775 * page is being acted upon,
1776 * so don't mess with it
1778 vm_page_purged_others
++;
1783 * We can't reclaim a busy page but we can
1784 * make it more likely to be paged (it's not wired) to make
1785 * sure that it gets considered by
1786 * vm_pageout_scan() later.
1788 if (VM_PAGE_PAGEABLE(p
)) {
1789 vm_page_deactivate(p
);
1791 vm_page_purged_busy
++;
1795 assert(VM_PAGE_OBJECT(p
) != kernel_object
);
1798 * we can discard this page...
1800 if (p
->vmp_pmapped
== TRUE
) {
1804 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
), PMAP_OPTIONS_NOFLUSH
| PMAP_OPTIONS_NOREFMOD
, (void *)&pmap_flush_context_storage
);
1806 vm_page_purged_count
++;
1810 case REAP_TERMINATE
:
1811 if (p
->vmp_absent
|| p
->vmp_private
) {
1813 * For private pages, VM_PAGE_FREE just
1814 * leaves the page structure around for
1815 * its owner to clean up. For absent
1816 * pages, the structure is returned to
1817 * the appropriate pool.
1821 if (p
->vmp_fictitious
) {
1822 assert(VM_PAGE_GET_PHYS_PAGE(p
) == vm_page_guard_addr
);
1825 if (!p
->vmp_dirty
&& p
->vmp_wpmapped
) {
1826 p
->vmp_dirty
= pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
));
1829 if ((p
->vmp_dirty
|| p
->vmp_precious
) && !p
->vmp_error
&& object
->alive
) {
1830 assert(!object
->internal
);
1832 p
->vmp_free_when_done
= TRUE
;
1834 if (!p
->vmp_laundry
) {
1835 vm_page_queues_remove(p
, TRUE
);
1837 * flush page... page will be freed
1838 * upon completion of I/O
1840 vm_pageout_cluster(p
);
1842 vm_page_unlock_queues();
1844 * free the pages reclaimed so far
1846 VM_OBJ_REAP_FREELIST(local_free_q
,
1847 disconnect_on_release
);
1849 vm_object_paging_wait(object
, THREAD_UNINT
);
1851 goto restart_after_sleep
;
1858 vm_page_free_prepare_queues(p
);
1859 assert(p
->vmp_pageq
.next
== 0 && p
->vmp_pageq
.prev
== 0);
1861 * Add this page to our list of reclaimed pages,
1862 * to be freed later.
1864 p
->vmp_snext
= local_free_q
;
1867 vm_page_unlock_queues();
1870 * Free the remaining reclaimed pages
1872 if (reap_type
== REAP_PURGEABLE
) {
1873 pmap_flush(&pmap_flush_context_storage
);
1876 VM_OBJ_REAP_FREELIST(local_free_q
,
1877 disconnect_on_release
);
1882 vm_object_reap_async(
1885 vm_object_lock_assert_exclusive(object
);
1887 vm_object_reaper_lock_spin();
1889 vm_object_reap_count_async
++;
1891 /* enqueue the VM object... */
1892 queue_enter(&vm_object_reaper_queue
, object
,
1893 vm_object_t
, cached_list
);
1895 vm_object_reaper_unlock();
1897 /* ... and wake up the reaper thread */
1898 thread_wakeup((event_t
) &vm_object_reaper_queue
);
1903 vm_object_reaper_thread(void)
1905 vm_object_t object
, shadow_object
;
1907 vm_object_reaper_lock_spin();
1909 while (!queue_empty(&vm_object_reaper_queue
)) {
1910 queue_remove_first(&vm_object_reaper_queue
,
1915 vm_object_reaper_unlock();
1916 vm_object_lock(object
);
1918 assert(object
->terminating
);
1919 assert(!object
->alive
);
1922 * The pageout daemon might be playing with our pages.
1923 * Now that the object is dead, it won't touch any more
1924 * pages, but some pages might already be on their way out.
1925 * Hence, we wait until the active paging activities have
1926 * ceased before we break the association with the pager
1929 while (object
->paging_in_progress
!= 0 ||
1930 object
->activity_in_progress
!= 0) {
1931 vm_object_wait(object
,
1932 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
1934 vm_object_lock(object
);
1938 object
->pageout
? VM_OBJECT_NULL
: object
->shadow
;
1940 vm_object_reap(object
);
1941 /* cache is unlocked and object is no longer valid */
1942 object
= VM_OBJECT_NULL
;
1944 if (shadow_object
!= VM_OBJECT_NULL
) {
1946 * Drop the reference "object" was holding on
1947 * its shadow object.
1949 vm_object_deallocate(shadow_object
);
1950 shadow_object
= VM_OBJECT_NULL
;
1952 vm_object_reaper_lock_spin();
1955 /* wait for more work... */
1956 assert_wait((event_t
) &vm_object_reaper_queue
, THREAD_UNINT
);
1958 vm_object_reaper_unlock();
1960 thread_block((thread_continue_t
) vm_object_reaper_thread
);
1965 * Routine: vm_object_release_pager
1966 * Purpose: Terminate the pager and, upon completion,
1967 * release our last reference to it.
1970 vm_object_release_pager(
1971 memory_object_t pager
)
1974 * Terminate the pager.
1977 (void) memory_object_terminate(pager
);
1980 * Release reference to pager.
1982 memory_object_deallocate(pager
);
1986 * Routine: vm_object_destroy
1988 * Shut down a VM object, despite the
1989 * presence of address map (or other) references
1995 __unused kern_return_t reason
)
1997 memory_object_t old_pager
;
1999 if (object
== VM_OBJECT_NULL
) {
2000 return KERN_SUCCESS
;
2004 * Remove the pager association immediately.
2006 * This will prevent the memory manager from further
2007 * meddling. [If it wanted to flush data or make
2008 * other changes, it should have done so before performing
2009 * the destroy call.]
2012 vm_object_lock(object
);
2013 object
->can_persist
= FALSE
;
2014 object
->named
= FALSE
;
2015 object
->alive
= FALSE
;
2017 old_pager
= object
->pager
;
2018 object
->pager
= MEMORY_OBJECT_NULL
;
2019 if (old_pager
!= MEMORY_OBJECT_NULL
) {
2020 memory_object_control_disable(object
->pager_control
);
2024 * Wait for the existing paging activity (that got
2025 * through before we nulled out the pager) to subside.
2028 vm_object_paging_wait(object
, THREAD_UNINT
);
2029 vm_object_unlock(object
);
2032 * Terminate the object now.
2034 if (old_pager
!= MEMORY_OBJECT_NULL
) {
2035 vm_object_release_pager(old_pager
);
2038 * JMM - Release the caller's reference. This assumes the
2039 * caller had a reference to release, which is a big (but
2040 * currently valid) assumption if this is driven from the
2041 * vnode pager (it is holding a named reference when making
2044 vm_object_deallocate(object
);
2046 return KERN_SUCCESS
;
2050 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2051 * exist because of the need to handle shadow chains. When deactivating pages, we only
2052 * want to deactive the ones at the top most level in the object chain. In order to do
2053 * this efficiently, the specified address range is divided up into "chunks" and we use
2054 * a bit map to keep track of which pages have already been processed as we descend down
2055 * the shadow chain. These chunk macros hide the details of the bit map implementation
2056 * as much as we can.
2058 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2059 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2060 * order bit represents page 0 in the current range and highest order bit represents
2063 * For further convenience, we also use negative logic for the page state in the bit map.
2064 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2065 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2066 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2067 * out with all the bits set. The macros below hide all these details from the caller.
2070 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2071 /* be the same as the number of bits in */
2072 /* the chunk_state_t type. We use 64 */
2073 /* just for convenience. */
2075 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2077 typedef uint64_t chunk_state_t
;
2080 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2081 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2082 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2083 * looking at pages in that range. This can save us from unnecessarily chasing down the
2087 #define CHUNK_INIT(c, len) \
2091 (c) = 0xffffffffffffffffLL; \
2093 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2094 MARK_PAGE_HANDLED(c, p); \
2099 * Return true if all pages in the chunk have not yet been processed.
2102 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2105 * Return true if the page at offset 'p' in the bit map has already been handled
2106 * while processing a higher level object in the shadow chain.
2109 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1ULL << (p))) == 0)
2112 * Mark the page at offset 'p' in the bit map as having been processed.
2115 #define MARK_PAGE_HANDLED(c, p) \
2117 (c) = (c) & ~(1ULL << (p)); \
2122 * Return true if the page at the given offset has been paged out. Object is
2123 * locked upon entry and returned locked.
2129 vm_object_offset_t offset
)
2131 if (object
->internal
&&
2133 !object
->terminating
&&
2134 object
->pager_ready
) {
2135 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
)
2136 == VM_EXTERNAL_STATE_EXISTS
) {
2146 * madvise_free_debug
2148 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2149 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2150 * simulate the loss of the page's contents as if the page had been
2151 * reclaimed and then re-faulted.
2153 #if DEVELOPMENT || DEBUG
2154 int madvise_free_debug
= 1;
2156 int madvise_free_debug
= 0;
2160 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2161 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2162 * a size that is less than or equal to the CHUNK_SIZE.
2166 deactivate_pages_in_object(
2168 vm_object_offset_t offset
,
2169 vm_object_size_t size
,
2170 boolean_t kill_page
,
2171 boolean_t reusable_page
,
2172 boolean_t all_reusable
,
2173 chunk_state_t
*chunk_state
,
2174 pmap_flush_context
*pfc
,
2176 vm_map_offset_t pmap_offset
)
2180 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
2181 struct vm_page_delayed_work
*dwp
;
2184 unsigned int reusable
= 0;
2187 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2188 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2189 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2190 * all the pages in the chunk.
2195 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
2197 for (p
= 0; size
&& CHUNK_NOT_COMPLETE(*chunk_state
); p
++, size
-= PAGE_SIZE_64
, offset
+= PAGE_SIZE_64
, pmap_offset
+= PAGE_SIZE_64
) {
2199 * If this offset has already been found and handled in a higher level object, then don't
2200 * do anything with it in the current shadow object.
2203 if (PAGE_ALREADY_HANDLED(*chunk_state
, p
)) {
2208 * See if the page at this offset is around. First check to see if the page is resident,
2209 * then if not, check the existence map or with the pager.
2212 if ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
2214 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2215 * so that we won't bother looking for a page at this offset again if there are more
2216 * shadow objects. Then deactivate the page.
2219 MARK_PAGE_HANDLED(*chunk_state
, p
);
2221 if ((!VM_PAGE_WIRED(m
)) && (!m
->vmp_private
) && (!m
->vmp_gobbled
) && (!m
->vmp_busy
) &&
2222 (!m
->vmp_laundry
) && (!m
->vmp_cleaning
) && !(m
->vmp_free_when_done
)) {
2229 clear_refmod
= VM_MEM_REFERENCED
;
2230 dwp
->dw_mask
|= DW_clear_reference
;
2232 if ((kill_page
) && (object
->internal
)) {
2233 if (madvise_free_debug
) {
2235 * zero-fill the page now
2236 * to simulate it being
2237 * reclaimed and re-faulted.
2239 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m
));
2241 m
->vmp_precious
= FALSE
;
2242 m
->vmp_dirty
= FALSE
;
2244 clear_refmod
|= VM_MEM_MODIFIED
;
2245 if (m
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
) {
2247 * This page is now clean and
2248 * reclaimable. Move it out
2249 * of the throttled queue, so
2250 * that vm_pageout_scan() can
2253 dwp
->dw_mask
|= DW_move_page
;
2256 VM_COMPRESSOR_PAGER_STATE_CLR(object
, offset
);
2258 if (reusable_page
&& !m
->vmp_reusable
) {
2259 assert(!all_reusable
);
2260 assert(!object
->all_reusable
);
2261 m
->vmp_reusable
= TRUE
;
2262 object
->reusable_page_count
++;
2263 assert(object
->resident_page_count
>= object
->reusable_page_count
);
2266 * Tell pmap this page is now
2267 * "reusable" (to update pmap
2268 * stats for all mappings).
2270 pmap_options
|= PMAP_OPTIONS_SET_REUSABLE
;
2273 pmap_options
|= PMAP_OPTIONS_NOFLUSH
;
2274 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m
),
2279 if ((m
->vmp_q_state
!= VM_PAGE_ON_THROTTLED_Q
) && !(reusable_page
|| all_reusable
)) {
2280 dwp
->dw_mask
|= DW_move_page
;
2284 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
,
2288 if (dw_count
>= dw_limit
) {
2290 OSAddAtomic(reusable
,
2291 &vm_page_stats_reusable
.reusable_count
);
2292 vm_page_stats_reusable
.reusable
+= reusable
;
2295 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
2303 * The page at this offset isn't memory resident, check to see if it's
2304 * been paged out. If so, mark it as handled so we don't bother looking
2305 * for it in the shadow chain.
2308 if (page_is_paged_out(object
, offset
)) {
2309 MARK_PAGE_HANDLED(*chunk_state
, p
);
2312 * If we're killing a non-resident page, then clear the page in the existence
2313 * map so we don't bother paging it back in if it's touched again in the future.
2316 if ((kill_page
) && (object
->internal
)) {
2317 VM_COMPRESSOR_PAGER_STATE_CLR(object
, offset
);
2319 if (pmap
!= PMAP_NULL
) {
2321 * Tell pmap that this page
2322 * is no longer mapped, to
2323 * adjust the footprint ledger
2324 * because this page is no
2325 * longer compressed.
2327 pmap_remove_options(
2332 PMAP_OPTIONS_REMOVE
);
2340 OSAddAtomic(reusable
, &vm_page_stats_reusable
.reusable_count
);
2341 vm_page_stats_reusable
.reusable
+= reusable
;
2346 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
2352 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2353 * will always be less than or equal to the given size. The total range is divided up
2354 * into chunks for efficiency and performance related to the locks and handling the shadow
2355 * chain. This routine returns how much of the given "size" it actually processed. It's
2356 * up to the caler to loop and keep calling this routine until the entire range they want
2357 * to process has been done.
2360 static vm_object_size_t
2362 vm_object_t orig_object
,
2363 vm_object_offset_t offset
,
2364 vm_object_size_t size
,
2365 boolean_t kill_page
,
2366 boolean_t reusable_page
,
2367 boolean_t all_reusable
,
2368 pmap_flush_context
*pfc
,
2370 vm_map_offset_t pmap_offset
)
2373 vm_object_t tmp_object
;
2374 vm_object_size_t length
;
2375 chunk_state_t chunk_state
;
2379 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2380 * remaining size the caller asked for.
2383 length
= MIN(size
, CHUNK_SIZE
);
2386 * The chunk_state keeps track of which pages we've already processed if there's
2387 * a shadow chain on this object. At this point, we haven't done anything with this
2388 * range of pages yet, so initialize the state to indicate no pages processed yet.
2391 CHUNK_INIT(chunk_state
, length
);
2392 object
= orig_object
;
2395 * Start at the top level object and iterate around the loop once for each object
2396 * in the shadow chain. We stop processing early if we've already found all the pages
2397 * in the range. Otherwise we stop when we run out of shadow objects.
2400 while (object
&& CHUNK_NOT_COMPLETE(chunk_state
)) {
2401 vm_object_paging_begin(object
);
2403 deactivate_pages_in_object(object
, offset
, length
, kill_page
, reusable_page
, all_reusable
, &chunk_state
, pfc
, pmap
, pmap_offset
);
2405 vm_object_paging_end(object
);
2408 * We've finished with this object, see if there's a shadow object. If
2409 * there is, update the offset and lock the new object. We also turn off
2410 * kill_page at this point since we only kill pages in the top most object.
2413 tmp_object
= object
->shadow
;
2417 reusable_page
= FALSE
;
2418 all_reusable
= FALSE
;
2419 offset
+= object
->vo_shadow_offset
;
2420 vm_object_lock(tmp_object
);
2423 if (object
!= orig_object
) {
2424 vm_object_unlock(object
);
2427 object
= tmp_object
;
2430 if (object
&& object
!= orig_object
) {
2431 vm_object_unlock(object
);
2440 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2441 * we also clear the modified status of the page and "forget" any changes that have been made
2445 __private_extern__
void
2446 vm_object_deactivate_pages(
2448 vm_object_offset_t offset
,
2449 vm_object_size_t size
,
2450 boolean_t kill_page
,
2451 boolean_t reusable_page
,
2453 vm_map_offset_t pmap_offset
)
2455 vm_object_size_t length
;
2456 boolean_t all_reusable
;
2457 pmap_flush_context pmap_flush_context_storage
;
2460 * We break the range up into chunks and do one chunk at a time. This is for
2461 * efficiency and performance while handling the shadow chains and the locks.
2462 * The deactivate_a_chunk() function returns how much of the range it processed.
2463 * We keep calling this routine until the given size is exhausted.
2467 all_reusable
= FALSE
;
2470 * For the sake of accurate "reusable" pmap stats, we need
2471 * to tell pmap about each page that is no longer "reusable",
2472 * so we can't do the "all_reusable" optimization.
2475 if (reusable_page
&&
2477 object
->vo_size
!= 0 &&
2478 object
->vo_size
== size
&&
2479 object
->reusable_page_count
== 0) {
2480 all_reusable
= TRUE
;
2481 reusable_page
= FALSE
;
2485 if ((reusable_page
|| all_reusable
) && object
->all_reusable
) {
2486 /* This means MADV_FREE_REUSABLE has been called twice, which
2487 * is probably illegal. */
2491 pmap_flush_context_init(&pmap_flush_context_storage
);
2494 length
= deactivate_a_chunk(object
, offset
, size
, kill_page
, reusable_page
, all_reusable
, &pmap_flush_context_storage
, pmap
, pmap_offset
);
2498 pmap_offset
+= length
;
2500 pmap_flush(&pmap_flush_context_storage
);
2503 if (!object
->all_reusable
) {
2504 unsigned int reusable
;
2506 object
->all_reusable
= TRUE
;
2507 assert(object
->reusable_page_count
== 0);
2508 /* update global stats */
2509 reusable
= object
->resident_page_count
;
2510 OSAddAtomic(reusable
,
2511 &vm_page_stats_reusable
.reusable_count
);
2512 vm_page_stats_reusable
.reusable
+= reusable
;
2513 vm_page_stats_reusable
.all_reusable_calls
++;
2515 } else if (reusable_page
) {
2516 vm_page_stats_reusable
.partial_reusable_calls
++;
2521 vm_object_reuse_pages(
2523 vm_object_offset_t start_offset
,
2524 vm_object_offset_t end_offset
,
2525 boolean_t allow_partial_reuse
)
2527 vm_object_offset_t cur_offset
;
2529 unsigned int reused
, reusable
;
2531 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
2533 if ((m) != VM_PAGE_NULL && \
2534 (m)->vmp_reusable) { \
2535 assert((object)->reusable_page_count <= \
2536 (object)->resident_page_count); \
2537 assert((object)->reusable_page_count > 0); \
2538 (object)->reusable_page_count--; \
2539 (m)->vmp_reusable = FALSE; \
2542 * Tell pmap that this page is no longer \
2543 * "reusable", to update the "reusable" stats \
2544 * for all the pmaps that have mapped this \
2547 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
2549 (PMAP_OPTIONS_CLEAR_REUSABLE \
2550 | PMAP_OPTIONS_NOFLUSH), \
2558 vm_object_lock_assert_exclusive(object
);
2560 if (object
->all_reusable
) {
2561 panic("object %p all_reusable: can't update pmap stats\n",
2563 assert(object
->reusable_page_count
== 0);
2564 object
->all_reusable
= FALSE
;
2565 if (end_offset
- start_offset
== object
->vo_size
||
2566 !allow_partial_reuse
) {
2567 vm_page_stats_reusable
.all_reuse_calls
++;
2568 reused
= object
->resident_page_count
;
2570 vm_page_stats_reusable
.partial_reuse_calls
++;
2571 vm_page_queue_iterate(&object
->memq
, m
, vmp_listq
) {
2572 if (m
->vmp_offset
< start_offset
||
2573 m
->vmp_offset
>= end_offset
) {
2574 m
->vmp_reusable
= TRUE
;
2575 object
->reusable_page_count
++;
2576 assert(object
->resident_page_count
>= object
->reusable_page_count
);
2579 assert(!m
->vmp_reusable
);
2584 } else if (object
->resident_page_count
>
2585 ((end_offset
- start_offset
) >> PAGE_SHIFT
)) {
2586 vm_page_stats_reusable
.partial_reuse_calls
++;
2587 for (cur_offset
= start_offset
;
2588 cur_offset
< end_offset
;
2589 cur_offset
+= PAGE_SIZE_64
) {
2590 if (object
->reusable_page_count
== 0) {
2593 m
= vm_page_lookup(object
, cur_offset
);
2594 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
2597 vm_page_stats_reusable
.partial_reuse_calls
++;
2598 vm_page_queue_iterate(&object
->memq
, m
, vmp_listq
) {
2599 if (object
->reusable_page_count
== 0) {
2602 if (m
->vmp_offset
< start_offset
||
2603 m
->vmp_offset
>= end_offset
) {
2606 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
2610 /* update global stats */
2611 OSAddAtomic(reusable
- reused
, &vm_page_stats_reusable
.reusable_count
);
2612 vm_page_stats_reusable
.reused
+= reused
;
2613 vm_page_stats_reusable
.reusable
+= reusable
;
2617 * Routine: vm_object_pmap_protect
2620 * Reduces the permission for all physical
2621 * pages in the specified object range.
2623 * If removing write permission only, it is
2624 * sufficient to protect only the pages in
2625 * the top-level object; only those pages may
2626 * have write permission.
2628 * If removing all access, we must follow the
2629 * shadow chain from the top-level object to
2630 * remove access to all pages in shadowed objects.
2632 * The object must *not* be locked. The object must
2635 * If pmap is not NULL, this routine assumes that
2636 * the only mappings for the pages are in that
2640 __private_extern__
void
2641 vm_object_pmap_protect(
2643 vm_object_offset_t offset
,
2644 vm_object_size_t size
,
2646 vm_map_offset_t pmap_start
,
2649 vm_object_pmap_protect_options(object
, offset
, size
,
2650 pmap
, pmap_start
, prot
, 0);
2653 __private_extern__
void
2654 vm_object_pmap_protect_options(
2656 vm_object_offset_t offset
,
2657 vm_object_size_t size
,
2659 vm_map_offset_t pmap_start
,
2663 pmap_flush_context pmap_flush_context_storage
;
2664 boolean_t delayed_pmap_flush
= FALSE
;
2666 if (object
== VM_OBJECT_NULL
) {
2669 size
= vm_object_round_page(size
);
2670 offset
= vm_object_trunc_page(offset
);
2672 vm_object_lock(object
);
2674 if (object
->phys_contiguous
) {
2676 vm_object_unlock(object
);
2677 pmap_protect_options(pmap
,
2681 options
& ~PMAP_OPTIONS_NOFLUSH
,
2684 vm_object_offset_t phys_start
, phys_end
, phys_addr
;
2686 phys_start
= object
->vo_shadow_offset
+ offset
;
2687 phys_end
= phys_start
+ size
;
2688 assert(phys_start
<= phys_end
);
2689 assert(phys_end
<= object
->vo_shadow_offset
+ object
->vo_size
);
2690 vm_object_unlock(object
);
2692 pmap_flush_context_init(&pmap_flush_context_storage
);
2693 delayed_pmap_flush
= FALSE
;
2695 for (phys_addr
= phys_start
;
2696 phys_addr
< phys_end
;
2697 phys_addr
+= PAGE_SIZE_64
) {
2698 pmap_page_protect_options(
2699 (ppnum_t
) (phys_addr
>> PAGE_SHIFT
),
2701 options
| PMAP_OPTIONS_NOFLUSH
,
2702 (void *)&pmap_flush_context_storage
);
2703 delayed_pmap_flush
= TRUE
;
2705 if (delayed_pmap_flush
== TRUE
) {
2706 pmap_flush(&pmap_flush_context_storage
);
2712 assert(object
->internal
);
2715 if (ptoa_64(object
->resident_page_count
) > size
/ 2 && pmap
!= PMAP_NULL
) {
2716 vm_object_unlock(object
);
2717 pmap_protect_options(pmap
, pmap_start
, pmap_start
+ size
, prot
,
2718 options
& ~PMAP_OPTIONS_NOFLUSH
, NULL
);
2722 pmap_flush_context_init(&pmap_flush_context_storage
);
2723 delayed_pmap_flush
= FALSE
;
2726 * if we are doing large ranges with respect to resident
2727 * page count then we should interate over pages otherwise
2728 * inverse page look-up will be faster
2730 if (ptoa_64(object
->resident_page_count
/ 4) < size
) {
2732 vm_object_offset_t end
;
2734 end
= offset
+ size
;
2736 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
2737 if (!p
->vmp_fictitious
&& (offset
<= p
->vmp_offset
) && (p
->vmp_offset
< end
)) {
2738 vm_map_offset_t start
;
2740 start
= pmap_start
+ p
->vmp_offset
- offset
;
2742 if (pmap
!= PMAP_NULL
) {
2743 pmap_protect_options(
2746 start
+ PAGE_SIZE_64
,
2748 options
| PMAP_OPTIONS_NOFLUSH
,
2749 &pmap_flush_context_storage
);
2751 pmap_page_protect_options(
2752 VM_PAGE_GET_PHYS_PAGE(p
),
2754 options
| PMAP_OPTIONS_NOFLUSH
,
2755 &pmap_flush_context_storage
);
2757 delayed_pmap_flush
= TRUE
;
2762 vm_object_offset_t end
;
2763 vm_object_offset_t target_off
;
2765 end
= offset
+ size
;
2767 for (target_off
= offset
;
2768 target_off
< end
; target_off
+= PAGE_SIZE
) {
2769 p
= vm_page_lookup(object
, target_off
);
2771 if (p
!= VM_PAGE_NULL
) {
2772 vm_object_offset_t start
;
2774 start
= pmap_start
+ (p
->vmp_offset
- offset
);
2776 if (pmap
!= PMAP_NULL
) {
2777 pmap_protect_options(
2780 start
+ PAGE_SIZE_64
,
2782 options
| PMAP_OPTIONS_NOFLUSH
,
2783 &pmap_flush_context_storage
);
2785 pmap_page_protect_options(
2786 VM_PAGE_GET_PHYS_PAGE(p
),
2788 options
| PMAP_OPTIONS_NOFLUSH
,
2789 &pmap_flush_context_storage
);
2791 delayed_pmap_flush
= TRUE
;
2795 if (delayed_pmap_flush
== TRUE
) {
2796 pmap_flush(&pmap_flush_context_storage
);
2799 if (prot
== VM_PROT_NONE
) {
2801 * Must follow shadow chain to remove access
2802 * to pages in shadowed objects.
2804 vm_object_t next_object
;
2806 next_object
= object
->shadow
;
2807 if (next_object
!= VM_OBJECT_NULL
) {
2808 offset
+= object
->vo_shadow_offset
;
2809 vm_object_lock(next_object
);
2810 vm_object_unlock(object
);
2811 object
= next_object
;
2814 * End of chain - we are done.
2820 * Pages in shadowed objects may never have
2821 * write permission - we may stop here.
2827 vm_object_unlock(object
);
2830 uint32_t vm_page_busy_absent_skipped
= 0;
2833 * Routine: vm_object_copy_slowly
2836 * Copy the specified range of the source
2837 * virtual memory object without using
2838 * protection-based optimizations (such
2839 * as copy-on-write). The pages in the
2840 * region are actually copied.
2842 * In/out conditions:
2843 * The caller must hold a reference and a lock
2844 * for the source virtual memory object. The source
2845 * object will be returned *unlocked*.
2848 * If the copy is completed successfully, KERN_SUCCESS is
2849 * returned. If the caller asserted the interruptible
2850 * argument, and an interruption occurred while waiting
2851 * for a user-generated event, MACH_SEND_INTERRUPTED is
2852 * returned. Other values may be returned to indicate
2853 * hard errors during the copy operation.
2855 * A new virtual memory object is returned in a
2856 * parameter (_result_object). The contents of this
2857 * new object, starting at a zero offset, are a copy
2858 * of the source memory region. In the event of
2859 * an error, this parameter will contain the value
2862 __private_extern__ kern_return_t
2863 vm_object_copy_slowly(
2864 vm_object_t src_object
,
2865 vm_object_offset_t src_offset
,
2866 vm_object_size_t size
,
2867 boolean_t interruptible
,
2868 vm_object_t
*_result_object
) /* OUT */
2870 vm_object_t new_object
;
2871 vm_object_offset_t new_offset
;
2873 struct vm_object_fault_info fault_info
= {};
2876 vm_object_unlock(src_object
);
2877 *_result_object
= VM_OBJECT_NULL
;
2878 return KERN_INVALID_ARGUMENT
;
2882 * Prevent destruction of the source object while we copy.
2885 vm_object_reference_locked(src_object
);
2886 vm_object_unlock(src_object
);
2889 * Create a new object to hold the copied pages.
2891 * We fill the new object starting at offset 0,
2892 * regardless of the input offset.
2893 * We don't bother to lock the new object within
2894 * this routine, since we have the only reference.
2897 new_object
= vm_object_allocate(size
);
2900 assert(size
== trunc_page_64(size
)); /* Will the loop terminate? */
2902 fault_info
.interruptible
= interruptible
;
2903 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
2904 fault_info
.lo_offset
= src_offset
;
2905 fault_info
.hi_offset
= src_offset
+ size
;
2906 fault_info
.stealth
= TRUE
;
2910 src_offset
+= PAGE_SIZE_64
,
2911 new_offset
+= PAGE_SIZE_64
, size
-= PAGE_SIZE_64
2914 vm_fault_return_t result
;
2916 vm_object_lock(new_object
);
2918 while ((new_page
= vm_page_alloc(new_object
, new_offset
))
2920 vm_object_unlock(new_object
);
2922 if (!vm_page_wait(interruptible
)) {
2923 vm_object_deallocate(new_object
);
2924 vm_object_deallocate(src_object
);
2925 *_result_object
= VM_OBJECT_NULL
;
2926 return MACH_SEND_INTERRUPTED
;
2928 vm_object_lock(new_object
);
2930 vm_object_unlock(new_object
);
2933 vm_prot_t prot
= VM_PROT_READ
;
2934 vm_page_t _result_page
;
2936 vm_page_t result_page
;
2937 kern_return_t error_code
;
2938 vm_object_t result_page_object
;
2941 vm_object_lock(src_object
);
2943 if (src_object
->internal
&&
2944 src_object
->shadow
== VM_OBJECT_NULL
&&
2945 (src_object
->pager
== NULL
||
2946 (VM_COMPRESSOR_PAGER_STATE_GET(src_object
,
2948 VM_EXTERNAL_STATE_ABSENT
))) {
2949 boolean_t can_skip_page
;
2951 _result_page
= vm_page_lookup(src_object
,
2953 if (_result_page
== VM_PAGE_NULL
) {
2955 * This page is neither resident nor
2956 * compressed and there's no shadow
2957 * object below "src_object", so this
2958 * page is really missing.
2959 * There's no need to zero-fill it just
2960 * to copy it: let's leave it missing
2961 * in "new_object" and get zero-filled
2964 can_skip_page
= TRUE
;
2965 } else if (workaround_41447923
&&
2966 src_object
->pager
== NULL
&&
2967 _result_page
!= VM_PAGE_NULL
&&
2968 _result_page
->vmp_busy
&&
2969 _result_page
->vmp_absent
&&
2970 src_object
->purgable
== VM_PURGABLE_DENY
&&
2971 !src_object
->blocked_access
) {
2973 * This page is "busy" and "absent"
2974 * but not because we're waiting for
2975 * it to be decompressed. It must
2976 * be because it's a "no zero fill"
2977 * page that is currently not
2978 * accessible until it gets overwritten
2979 * by a device driver.
2980 * Since its initial state would have
2981 * been "zero-filled", let's leave the
2982 * copy page missing and get zero-filled
2985 assert(src_object
->internal
);
2986 assert(src_object
->shadow
== NULL
);
2987 assert(src_object
->pager
== NULL
);
2988 can_skip_page
= TRUE
;
2989 vm_page_busy_absent_skipped
++;
2991 can_skip_page
= FALSE
;
2993 if (can_skip_page
) {
2994 vm_object_unlock(src_object
);
2995 /* free the unused "new_page"... */
2996 vm_object_lock(new_object
);
2997 VM_PAGE_FREE(new_page
);
2998 new_page
= VM_PAGE_NULL
;
2999 vm_object_unlock(new_object
);
3000 /* ...and go to next page in "src_object" */
3001 result
= VM_FAULT_SUCCESS
;
3006 vm_object_paging_begin(src_object
);
3008 /* cap size at maximum UPL size */
3009 upl_size_t cluster_size
;
3010 if (os_convert_overflow(size
, &cluster_size
)) {
3011 cluster_size
= 0 - (upl_size_t
)PAGE_SIZE
;
3013 fault_info
.cluster_size
= cluster_size
;
3015 _result_page
= VM_PAGE_NULL
;
3016 result
= vm_fault_page(src_object
, src_offset
,
3017 VM_PROT_READ
, FALSE
,
3018 FALSE
, /* page not looked up */
3019 &prot
, &_result_page
, &top_page
,
3021 &error_code
, FALSE
, FALSE
, &fault_info
);
3024 case VM_FAULT_SUCCESS
:
3025 result_page
= _result_page
;
3026 result_page_object
= VM_PAGE_OBJECT(result_page
);
3029 * Copy the page to the new object.
3032 * If result_page is clean,
3033 * we could steal it instead
3037 vm_page_copy(result_page
, new_page
);
3038 vm_object_unlock(result_page_object
);
3041 * Let go of both pages (make them
3042 * not busy, perform wakeup, activate).
3044 vm_object_lock(new_object
);
3045 SET_PAGE_DIRTY(new_page
, FALSE
);
3046 PAGE_WAKEUP_DONE(new_page
);
3047 vm_object_unlock(new_object
);
3049 vm_object_lock(result_page_object
);
3050 PAGE_WAKEUP_DONE(result_page
);
3052 vm_page_lockspin_queues();
3053 if ((result_page
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ||
3054 (result_page
->vmp_q_state
== VM_PAGE_NOT_ON_Q
)) {
3055 vm_page_activate(result_page
);
3057 vm_page_activate(new_page
);
3058 vm_page_unlock_queues();
3061 * Release paging references and
3062 * top-level placeholder page, if any.
3065 vm_fault_cleanup(result_page_object
,
3070 case VM_FAULT_RETRY
:
3073 case VM_FAULT_MEMORY_SHORTAGE
:
3074 if (vm_page_wait(interruptible
)) {
3079 case VM_FAULT_INTERRUPTED
:
3080 vm_object_lock(new_object
);
3081 VM_PAGE_FREE(new_page
);
3082 vm_object_unlock(new_object
);
3084 vm_object_deallocate(new_object
);
3085 vm_object_deallocate(src_object
);
3086 *_result_object
= VM_OBJECT_NULL
;
3087 return MACH_SEND_INTERRUPTED
;
3089 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
3090 /* success but no VM page: fail */
3091 vm_object_paging_end(src_object
);
3092 vm_object_unlock(src_object
);
3094 case VM_FAULT_MEMORY_ERROR
:
3097 * (a) ignore pages that we can't
3099 * (b) return the null object if
3100 * any page fails [chosen]
3103 vm_object_lock(new_object
);
3104 VM_PAGE_FREE(new_page
);
3105 vm_object_unlock(new_object
);
3107 vm_object_deallocate(new_object
);
3108 vm_object_deallocate(src_object
);
3109 *_result_object
= VM_OBJECT_NULL
;
3110 return error_code
? error_code
:
3114 panic("vm_object_copy_slowly: unexpected error"
3115 " 0x%x from vm_fault_page()\n", result
);
3117 } while (result
!= VM_FAULT_SUCCESS
);
3121 * Lose the extra reference, and return our object.
3123 vm_object_deallocate(src_object
);
3124 *_result_object
= new_object
;
3125 return KERN_SUCCESS
;
3129 * Routine: vm_object_copy_quickly
3132 * Copy the specified range of the source virtual
3133 * memory object, if it can be done without waiting
3134 * for user-generated events.
3137 * If the copy is successful, the copy is returned in
3138 * the arguments; otherwise, the arguments are not
3141 * In/out conditions:
3142 * The object should be unlocked on entry and exit.
3146 __private_extern__ boolean_t
3147 vm_object_copy_quickly(
3148 vm_object_t
*_object
, /* INOUT */
3149 __unused vm_object_offset_t offset
, /* IN */
3150 __unused vm_object_size_t size
, /* IN */
3151 boolean_t
*_src_needs_copy
, /* OUT */
3152 boolean_t
*_dst_needs_copy
) /* OUT */
3154 vm_object_t object
= *_object
;
3155 memory_object_copy_strategy_t copy_strategy
;
3157 if (object
== VM_OBJECT_NULL
) {
3158 *_src_needs_copy
= FALSE
;
3159 *_dst_needs_copy
= FALSE
;
3163 vm_object_lock(object
);
3165 copy_strategy
= object
->copy_strategy
;
3167 switch (copy_strategy
) {
3168 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3171 * Symmetric copy strategy.
3172 * Make another reference to the object.
3173 * Leave object/offset unchanged.
3176 vm_object_reference_locked(object
);
3177 object
->shadowed
= TRUE
;
3178 vm_object_unlock(object
);
3181 * Both source and destination must make
3182 * shadows, and the source must be made
3183 * read-only if not already.
3186 *_src_needs_copy
= TRUE
;
3187 *_dst_needs_copy
= TRUE
;
3191 case MEMORY_OBJECT_COPY_DELAY
:
3192 vm_object_unlock(object
);
3196 vm_object_unlock(object
);
3202 static int copy_call_count
= 0;
3203 static int copy_call_sleep_count
= 0;
3204 static int copy_call_restart_count
= 0;
3207 * Routine: vm_object_copy_call [internal]
3210 * Copy the source object (src_object), using the
3211 * user-managed copy algorithm.
3213 * In/out conditions:
3214 * The source object must be locked on entry. It
3215 * will be *unlocked* on exit.
3218 * If the copy is successful, KERN_SUCCESS is returned.
3219 * A new object that represents the copied virtual
3220 * memory is returned in a parameter (*_result_object).
3221 * If the return value indicates an error, this parameter
3224 static kern_return_t
3225 vm_object_copy_call(
3226 vm_object_t src_object
,
3227 vm_object_offset_t src_offset
,
3228 vm_object_size_t size
,
3229 vm_object_t
*_result_object
) /* OUT */
3233 boolean_t check_ready
= FALSE
;
3234 uint32_t try_failed_count
= 0;
3237 * If a copy is already in progress, wait and retry.
3240 * Consider making this call interruptable, as Mike
3241 * intended it to be.
3244 * Need a counter or version or something to allow
3245 * us to use the copy that the currently requesting
3246 * thread is obtaining -- is it worth adding to the
3247 * vm object structure? Depends how common this case it.
3250 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3251 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3253 copy_call_restart_count
++;
3257 * Indicate (for the benefit of memory_object_create_copy)
3258 * that we want a copy for src_object. (Note that we cannot
3259 * do a real assert_wait before calling memory_object_copy,
3260 * so we simply set the flag.)
3263 vm_object_set_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
);
3264 vm_object_unlock(src_object
);
3267 * Ask the memory manager to give us a memory object
3268 * which represents a copy of the src object.
3269 * The memory manager may give us a memory object
3270 * which we already have, or it may give us a
3271 * new memory object. This memory object will arrive
3272 * via memory_object_create_copy.
3275 kr
= KERN_FAILURE
; /* XXX need to change memory_object.defs */
3276 if (kr
!= KERN_SUCCESS
) {
3281 * Wait for the copy to arrive.
3283 vm_object_lock(src_object
);
3284 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3285 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3287 copy_call_sleep_count
++;
3290 assert(src_object
->copy
!= VM_OBJECT_NULL
);
3291 copy
= src_object
->copy
;
3292 if (!vm_object_lock_try(copy
)) {
3293 vm_object_unlock(src_object
);
3296 mutex_pause(try_failed_count
); /* wait a bit */
3298 vm_object_lock(src_object
);
3301 if (copy
->vo_size
< src_offset
+ size
) {
3302 copy
->vo_size
= src_offset
+ size
;
3305 if (!copy
->pager_ready
) {
3312 *_result_object
= copy
;
3313 vm_object_unlock(copy
);
3314 vm_object_unlock(src_object
);
3316 /* Wait for the copy to be ready. */
3317 if (check_ready
== TRUE
) {
3318 vm_object_lock(copy
);
3319 while (!copy
->pager_ready
) {
3320 vm_object_sleep(copy
, VM_OBJECT_EVENT_PAGER_READY
, THREAD_UNINT
);
3322 vm_object_unlock(copy
);
3325 return KERN_SUCCESS
;
3328 static int copy_delayed_lock_collisions
= 0;
3329 static int copy_delayed_max_collisions
= 0;
3330 static int copy_delayed_lock_contention
= 0;
3331 static int copy_delayed_protect_iterate
= 0;
3334 * Routine: vm_object_copy_delayed [internal]
3337 * Copy the specified virtual memory object, using
3338 * the asymmetric copy-on-write algorithm.
3340 * In/out conditions:
3341 * The src_object must be locked on entry. It will be unlocked
3342 * on exit - so the caller must also hold a reference to it.
3344 * This routine will not block waiting for user-generated
3345 * events. It is not interruptible.
3347 __private_extern__ vm_object_t
3348 vm_object_copy_delayed(
3349 vm_object_t src_object
,
3350 vm_object_offset_t src_offset
,
3351 vm_object_size_t size
,
3352 boolean_t src_object_shared
)
3354 vm_object_t new_copy
= VM_OBJECT_NULL
;
3355 vm_object_t old_copy
;
3357 vm_object_size_t copy_size
= src_offset
+ size
;
3358 pmap_flush_context pmap_flush_context_storage
;
3359 boolean_t delayed_pmap_flush
= FALSE
;
3364 * The user-level memory manager wants to see all of the changes
3365 * to this object, but it has promised not to make any changes on
3368 * Perform an asymmetric copy-on-write, as follows:
3369 * Create a new object, called a "copy object" to hold
3370 * pages modified by the new mapping (i.e., the copy,
3371 * not the original mapping).
3372 * Record the original object as the backing object for
3373 * the copy object. If the original mapping does not
3374 * change a page, it may be used read-only by the copy.
3375 * Record the copy object in the original object.
3376 * When the original mapping causes a page to be modified,
3377 * it must be copied to a new page that is "pushed" to
3379 * Mark the new mapping (the copy object) copy-on-write.
3380 * This makes the copy object itself read-only, allowing
3381 * it to be reused if the original mapping makes no
3382 * changes, and simplifying the synchronization required
3383 * in the "push" operation described above.
3385 * The copy-on-write is said to be assymetric because the original
3386 * object is *not* marked copy-on-write. A copied page is pushed
3387 * to the copy object, regardless which party attempted to modify
3390 * Repeated asymmetric copy operations may be done. If the
3391 * original object has not been changed since the last copy, its
3392 * copy object can be reused. Otherwise, a new copy object can be
3393 * inserted between the original object and its previous copy
3394 * object. Since any copy object is read-only, this cannot affect
3395 * affect the contents of the previous copy object.
3397 * Note that a copy object is higher in the object tree than the
3398 * original object; therefore, use of the copy object recorded in
3399 * the original object must be done carefully, to avoid deadlock.
3402 copy_size
= vm_object_round_page(copy_size
);
3406 * Wait for paging in progress.
3408 if (!src_object
->true_share
&&
3409 (src_object
->paging_in_progress
!= 0 ||
3410 src_object
->activity_in_progress
!= 0)) {
3411 if (src_object_shared
== TRUE
) {
3412 vm_object_unlock(src_object
);
3413 vm_object_lock(src_object
);
3414 src_object_shared
= FALSE
;
3417 vm_object_paging_wait(src_object
, THREAD_UNINT
);
3420 * See whether we can reuse the result of a previous
3424 old_copy
= src_object
->copy
;
3425 if (old_copy
!= VM_OBJECT_NULL
) {
3429 * Try to get the locks (out of order)
3431 if (src_object_shared
== TRUE
) {
3432 lock_granted
= vm_object_lock_try_shared(old_copy
);
3434 lock_granted
= vm_object_lock_try(old_copy
);
3437 if (!lock_granted
) {
3438 vm_object_unlock(src_object
);
3440 if (collisions
++ == 0) {
3441 copy_delayed_lock_contention
++;
3443 mutex_pause(collisions
);
3445 /* Heisenberg Rules */
3446 copy_delayed_lock_collisions
++;
3448 if (collisions
> copy_delayed_max_collisions
) {
3449 copy_delayed_max_collisions
= collisions
;
3452 if (src_object_shared
== TRUE
) {
3453 vm_object_lock_shared(src_object
);
3455 vm_object_lock(src_object
);
3462 * Determine whether the old copy object has
3466 if (old_copy
->resident_page_count
== 0 &&
3467 !old_copy
->pager_created
) {
3469 * It has not been modified.
3471 * Return another reference to
3472 * the existing copy-object if
3473 * we can safely grow it (if
3477 if (old_copy
->vo_size
< copy_size
) {
3478 if (src_object_shared
== TRUE
) {
3479 vm_object_unlock(old_copy
);
3480 vm_object_unlock(src_object
);
3482 vm_object_lock(src_object
);
3483 src_object_shared
= FALSE
;
3487 * We can't perform a delayed copy if any of the
3488 * pages in the extended range are wired (because
3489 * we can't safely take write permission away from
3490 * wired pages). If the pages aren't wired, then
3491 * go ahead and protect them.
3493 copy_delayed_protect_iterate
++;
3495 pmap_flush_context_init(&pmap_flush_context_storage
);
3496 delayed_pmap_flush
= FALSE
;
3498 vm_page_queue_iterate(&src_object
->memq
, p
, vmp_listq
) {
3499 if (!p
->vmp_fictitious
&&
3500 p
->vmp_offset
>= old_copy
->vo_size
&&
3501 p
->vmp_offset
< copy_size
) {
3502 if (VM_PAGE_WIRED(p
)) {
3503 vm_object_unlock(old_copy
);
3504 vm_object_unlock(src_object
);
3506 if (new_copy
!= VM_OBJECT_NULL
) {
3507 vm_object_unlock(new_copy
);
3508 vm_object_deallocate(new_copy
);
3510 if (delayed_pmap_flush
== TRUE
) {
3511 pmap_flush(&pmap_flush_context_storage
);
3514 return VM_OBJECT_NULL
;
3516 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p
), (VM_PROT_ALL
& ~VM_PROT_WRITE
),
3517 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
3518 delayed_pmap_flush
= TRUE
;
3522 if (delayed_pmap_flush
== TRUE
) {
3523 pmap_flush(&pmap_flush_context_storage
);
3526 old_copy
->vo_size
= copy_size
;
3528 if (src_object_shared
== TRUE
) {
3529 vm_object_reference_shared(old_copy
);
3531 vm_object_reference_locked(old_copy
);
3533 vm_object_unlock(old_copy
);
3534 vm_object_unlock(src_object
);
3536 if (new_copy
!= VM_OBJECT_NULL
) {
3537 vm_object_unlock(new_copy
);
3538 vm_object_deallocate(new_copy
);
3546 * Adjust the size argument so that the newly-created
3547 * copy object will be large enough to back either the
3548 * old copy object or the new mapping.
3550 if (old_copy
->vo_size
> copy_size
) {
3551 copy_size
= old_copy
->vo_size
;
3554 if (new_copy
== VM_OBJECT_NULL
) {
3555 vm_object_unlock(old_copy
);
3556 vm_object_unlock(src_object
);
3557 new_copy
= vm_object_allocate(copy_size
);
3558 vm_object_lock(src_object
);
3559 vm_object_lock(new_copy
);
3561 src_object_shared
= FALSE
;
3564 new_copy
->vo_size
= copy_size
;
3567 * The copy-object is always made large enough to
3568 * completely shadow the original object, since
3569 * it may have several users who want to shadow
3570 * the original object at different points.
3573 assert((old_copy
->shadow
== src_object
) &&
3574 (old_copy
->vo_shadow_offset
== (vm_object_offset_t
) 0));
3575 } else if (new_copy
== VM_OBJECT_NULL
) {
3576 vm_object_unlock(src_object
);
3577 new_copy
= vm_object_allocate(copy_size
);
3578 vm_object_lock(src_object
);
3579 vm_object_lock(new_copy
);
3581 src_object_shared
= FALSE
;
3586 * We now have the src object locked, and the new copy object
3587 * allocated and locked (and potentially the old copy locked).
3588 * Before we go any further, make sure we can still perform
3589 * a delayed copy, as the situation may have changed.
3591 * Specifically, we can't perform a delayed copy if any of the
3592 * pages in the range are wired (because we can't safely take
3593 * write permission away from wired pages). If the pages aren't
3594 * wired, then go ahead and protect them.
3596 copy_delayed_protect_iterate
++;
3598 pmap_flush_context_init(&pmap_flush_context_storage
);
3599 delayed_pmap_flush
= FALSE
;
3601 vm_page_queue_iterate(&src_object
->memq
, p
, vmp_listq
) {
3602 if (!p
->vmp_fictitious
&& p
->vmp_offset
< copy_size
) {
3603 if (VM_PAGE_WIRED(p
)) {
3605 vm_object_unlock(old_copy
);
3607 vm_object_unlock(src_object
);
3608 vm_object_unlock(new_copy
);
3609 vm_object_deallocate(new_copy
);
3611 if (delayed_pmap_flush
== TRUE
) {
3612 pmap_flush(&pmap_flush_context_storage
);
3615 return VM_OBJECT_NULL
;
3617 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p
), (VM_PROT_ALL
& ~VM_PROT_WRITE
),
3618 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
3619 delayed_pmap_flush
= TRUE
;
3623 if (delayed_pmap_flush
== TRUE
) {
3624 pmap_flush(&pmap_flush_context_storage
);
3627 if (old_copy
!= VM_OBJECT_NULL
) {
3629 * Make the old copy-object shadow the new one.
3630 * It will receive no more pages from the original
3634 /* remove ref. from old_copy */
3635 vm_object_lock_assert_exclusive(src_object
);
3636 src_object
->ref_count
--;
3637 assert(src_object
->ref_count
> 0);
3638 vm_object_lock_assert_exclusive(old_copy
);
3639 old_copy
->shadow
= new_copy
;
3640 vm_object_lock_assert_exclusive(new_copy
);
3641 assert(new_copy
->ref_count
> 0);
3642 new_copy
->ref_count
++; /* for old_copy->shadow ref. */
3645 if (old_copy
->res_count
) {
3646 VM_OBJ_RES_INCR(new_copy
);
3647 VM_OBJ_RES_DECR(src_object
);
3651 vm_object_unlock(old_copy
); /* done with old_copy */
3655 * Point the new copy at the existing object.
3657 vm_object_lock_assert_exclusive(new_copy
);
3658 new_copy
->shadow
= src_object
;
3659 new_copy
->vo_shadow_offset
= 0;
3660 new_copy
->shadowed
= TRUE
; /* caller must set needs_copy */
3662 vm_object_lock_assert_exclusive(src_object
);
3663 vm_object_reference_locked(src_object
);
3664 src_object
->copy
= new_copy
;
3665 vm_object_unlock(src_object
);
3666 vm_object_unlock(new_copy
);
3672 * Routine: vm_object_copy_strategically
3675 * Perform a copy according to the source object's
3676 * declared strategy. This operation may block,
3677 * and may be interrupted.
3679 __private_extern__ kern_return_t
3680 vm_object_copy_strategically(
3681 vm_object_t src_object
,
3682 vm_object_offset_t src_offset
,
3683 vm_object_size_t size
,
3684 vm_object_t
*dst_object
, /* OUT */
3685 vm_object_offset_t
*dst_offset
, /* OUT */
3686 boolean_t
*dst_needs_copy
) /* OUT */
3689 boolean_t interruptible
= THREAD_ABORTSAFE
; /* XXX */
3690 boolean_t object_lock_shared
= FALSE
;
3691 memory_object_copy_strategy_t copy_strategy
;
3693 assert(src_object
!= VM_OBJECT_NULL
);
3695 copy_strategy
= src_object
->copy_strategy
;
3697 if (copy_strategy
== MEMORY_OBJECT_COPY_DELAY
) {
3698 vm_object_lock_shared(src_object
);
3699 object_lock_shared
= TRUE
;
3701 vm_object_lock(src_object
);
3705 * The copy strategy is only valid if the memory manager
3706 * is "ready". Internal objects are always ready.
3709 while (!src_object
->internal
&& !src_object
->pager_ready
) {
3710 wait_result_t wait_result
;
3712 if (object_lock_shared
== TRUE
) {
3713 vm_object_unlock(src_object
);
3714 vm_object_lock(src_object
);
3715 object_lock_shared
= FALSE
;
3718 wait_result
= vm_object_sleep( src_object
,
3719 VM_OBJECT_EVENT_PAGER_READY
,
3721 if (wait_result
!= THREAD_AWAKENED
) {
3722 vm_object_unlock(src_object
);
3723 *dst_object
= VM_OBJECT_NULL
;
3725 *dst_needs_copy
= FALSE
;
3726 return MACH_SEND_INTERRUPTED
;
3731 * Use the appropriate copy strategy.
3734 switch (copy_strategy
) {
3735 case MEMORY_OBJECT_COPY_DELAY
:
3736 *dst_object
= vm_object_copy_delayed(src_object
,
3737 src_offset
, size
, object_lock_shared
);
3738 if (*dst_object
!= VM_OBJECT_NULL
) {
3739 *dst_offset
= src_offset
;
3740 *dst_needs_copy
= TRUE
;
3741 result
= KERN_SUCCESS
;
3744 vm_object_lock(src_object
);
3745 /* fall thru when delayed copy not allowed */
3747 case MEMORY_OBJECT_COPY_NONE
:
3748 result
= vm_object_copy_slowly(src_object
, src_offset
, size
,
3749 interruptible
, dst_object
);
3750 if (result
== KERN_SUCCESS
) {
3752 *dst_needs_copy
= FALSE
;
3756 case MEMORY_OBJECT_COPY_CALL
:
3757 result
= vm_object_copy_call(src_object
, src_offset
, size
,
3759 if (result
== KERN_SUCCESS
) {
3760 *dst_offset
= src_offset
;
3761 *dst_needs_copy
= TRUE
;
3765 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3766 vm_object_unlock(src_object
);
3767 result
= KERN_MEMORY_RESTART_COPY
;
3771 panic("copy_strategically: bad strategy");
3772 result
= KERN_INVALID_ARGUMENT
;
3780 * Create a new object which is backed by the
3781 * specified existing object range. The source
3782 * object reference is deallocated.
3784 * The new object and offset into that object
3785 * are returned in the source parameters.
3787 boolean_t vm_object_shadow_check
= TRUE
;
3789 __private_extern__ boolean_t
3791 vm_object_t
*object
, /* IN/OUT */
3792 vm_object_offset_t
*offset
, /* IN/OUT */
3793 vm_object_size_t length
)
3799 assert(source
!= VM_OBJECT_NULL
);
3800 if (source
== VM_OBJECT_NULL
) {
3807 * This assertion is valid but it gets triggered by Rosetta for example
3808 * due to a combination of vm_remap() that changes a VM object's
3809 * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
3810 * that then sets "needs_copy" on its map entry. This creates a
3811 * mapping situation that VM should never see and doesn't know how to
3813 * It's not clear if this can create any real problem but we should
3814 * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
3815 * do more than just set "needs_copy" to handle the copy-on-write...
3816 * In the meantime, let's disable the assertion.
3818 assert(source
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
);
3822 * Determine if we really need a shadow.
3824 * If the source object is larger than what we are trying
3825 * to create, then force the shadow creation even if the
3826 * ref count is 1. This will allow us to [potentially]
3827 * collapse the underlying object away in the future
3828 * (freeing up the extra data it might contain and that
3832 assert(source
->copy_strategy
!= MEMORY_OBJECT_COPY_NONE
); /* Purgeable objects shouldn't have shadow objects. */
3834 if (vm_object_shadow_check
&&
3835 source
->vo_size
== length
&&
3836 source
->ref_count
== 1) {
3838 * Lock the object and check again.
3839 * We also check to see if there's
3840 * a shadow or copy object involved.
3841 * We can't do that earlier because
3842 * without the object locked, there
3843 * could be a collapse and the chain
3844 * gets modified leaving us with an
3847 vm_object_lock(source
);
3848 if (source
->vo_size
== length
&&
3849 source
->ref_count
== 1 &&
3850 (source
->shadow
== VM_OBJECT_NULL
||
3851 source
->shadow
->copy
== VM_OBJECT_NULL
)) {
3852 source
->shadowed
= FALSE
;
3853 vm_object_unlock(source
);
3856 /* things changed while we were locking "source"... */
3857 vm_object_unlock(source
);
3861 * Allocate a new object with the given length
3864 if ((result
= vm_object_allocate(length
)) == VM_OBJECT_NULL
) {
3865 panic("vm_object_shadow: no object for shadowing");
3869 * The new object shadows the source object, adding
3870 * a reference to it. Our caller changes his reference
3871 * to point to the new object, removing a reference to
3872 * the source object. Net result: no change of reference
3875 result
->shadow
= source
;
3878 * Store the offset into the source object,
3879 * and fix up the offset into the new object.
3882 result
->vo_shadow_offset
= *offset
;
3885 * Return the new things
3894 * The relationship between vm_object structures and
3895 * the memory_object requires careful synchronization.
3897 * All associations are created by memory_object_create_named
3898 * for external pagers and vm_object_compressor_pager_create for internal
3899 * objects as follows:
3901 * pager: the memory_object itself, supplied by
3902 * the user requesting a mapping (or the kernel,
3903 * when initializing internal objects); the
3904 * kernel simulates holding send rights by keeping
3908 * the memory object control port,
3909 * created by the kernel; the kernel holds
3910 * receive (and ownership) rights to this
3911 * port, but no other references.
3913 * When initialization is complete, the "initialized" field
3914 * is asserted. Other mappings using a particular memory object,
3915 * and any references to the vm_object gained through the
3916 * port association must wait for this initialization to occur.
3918 * In order to allow the memory manager to set attributes before
3919 * requests (notably virtual copy operations, but also data or
3920 * unlock requests) are made, a "ready" attribute is made available.
3921 * Only the memory manager may affect the value of this attribute.
3922 * Its value does not affect critical kernel functions, such as
3923 * internal object initialization or destruction. [Furthermore,
3924 * memory objects created by the kernel are assumed to be ready
3925 * immediately; the default memory manager need not explicitly
3926 * set the "ready" attribute.]
3928 * [Both the "initialized" and "ready" attribute wait conditions
3929 * use the "pager" field as the wait event.]
3931 * The port associations can be broken down by any of the
3932 * following routines:
3933 * vm_object_terminate:
3934 * No references to the vm_object remain, and
3935 * the object cannot (or will not) be cached.
3936 * This is the normal case, and is done even
3937 * though one of the other cases has already been
3939 * memory_object_destroy:
3940 * The memory manager has requested that the
3941 * kernel relinquish references to the memory
3942 * object. [The memory manager may not want to
3943 * destroy the memory object, but may wish to
3944 * refuse or tear down existing memory mappings.]
3946 * Each routine that breaks an association must break all of
3947 * them at once. At some later time, that routine must clear
3948 * the pager field and release the memory object references.
3949 * [Furthermore, each routine must cope with the simultaneous
3950 * or previous operations of the others.]
3952 * Because the pager field may be cleared spontaneously, it
3953 * cannot be used to determine whether a memory object has
3954 * ever been associated with a particular vm_object. [This
3955 * knowledge is important to the shadow object mechanism.]
3956 * For this reason, an additional "created" attribute is
3959 * During various paging operations, the pager reference found in the
3960 * vm_object must be valid. To prevent this from being released,
3961 * (other than being removed, i.e., made null), routines may use
3962 * the vm_object_paging_begin/end routines [actually, macros].
3963 * The implementation uses the "paging_in_progress" and "wanted" fields.
3964 * [Operations that alter the validity of the pager values include the
3965 * termination routines and vm_object_collapse.]
3970 * Routine: vm_object_memory_object_associate
3972 * Associate a VM object to the given pager.
3973 * If a VM object is not provided, create one.
3974 * Initialize the pager.
3977 vm_object_memory_object_associate(
3978 memory_object_t pager
,
3980 vm_object_size_t size
,
3983 memory_object_control_t control
;
3985 assert(pager
!= MEMORY_OBJECT_NULL
);
3987 if (object
!= VM_OBJECT_NULL
) {
3988 assert(object
->internal
);
3989 assert(object
->pager_created
);
3990 assert(!object
->pager_initialized
);
3991 assert(!object
->pager_ready
);
3992 assert(object
->pager_trusted
);
3994 object
= vm_object_allocate(size
);
3995 assert(object
!= VM_OBJECT_NULL
);
3996 object
->internal
= FALSE
;
3997 object
->pager_trusted
= FALSE
;
3998 /* copy strategy invalid until set by memory manager */
3999 object
->copy_strategy
= MEMORY_OBJECT_COPY_INVALID
;
4003 * Allocate request port.
4006 control
= memory_object_control_allocate(object
);
4007 assert(control
!= MEMORY_OBJECT_CONTROL_NULL
);
4009 vm_object_lock(object
);
4011 assert(!object
->pager_ready
);
4012 assert(!object
->pager_initialized
);
4013 assert(object
->pager
== NULL
);
4014 assert(object
->pager_control
== NULL
);
4017 * Copy the reference we were given.
4020 memory_object_reference(pager
);
4021 object
->pager_created
= TRUE
;
4022 object
->pager
= pager
;
4023 object
->pager_control
= control
;
4024 object
->pager_ready
= FALSE
;
4026 vm_object_unlock(object
);
4029 * Let the pager know we're using it.
4032 (void) memory_object_init(pager
,
4033 object
->pager_control
,
4036 vm_object_lock(object
);
4038 object
->named
= TRUE
;
4040 if (object
->internal
) {
4041 object
->pager_ready
= TRUE
;
4042 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
4045 object
->pager_initialized
= TRUE
;
4046 vm_object_wakeup(object
, VM_OBJECT_EVENT_INITIALIZED
);
4048 vm_object_unlock(object
);
4054 * Routine: vm_object_compressor_pager_create
4056 * Create a memory object for an internal object.
4057 * In/out conditions:
4058 * The object is locked on entry and exit;
4059 * it may be unlocked within this call.
4061 * Only one thread may be performing a
4062 * vm_object_compressor_pager_create on an object at
4063 * a time. Presumably, only the pageout
4064 * daemon will be using this routine.
4068 vm_object_compressor_pager_create(
4071 memory_object_t pager
;
4072 vm_object_t pager_object
= VM_OBJECT_NULL
;
4074 assert(object
!= kernel_object
);
4077 * Prevent collapse or termination by holding a paging reference
4080 vm_object_paging_begin(object
);
4081 if (object
->pager_created
) {
4083 * Someone else got to it first...
4084 * wait for them to finish initializing the ports
4086 while (!object
->pager_initialized
) {
4087 vm_object_sleep(object
,
4088 VM_OBJECT_EVENT_INITIALIZED
,
4091 vm_object_paging_end(object
);
4095 if ((uint32_t) (object
->vo_size
/ PAGE_SIZE
) !=
4096 (object
->vo_size
/ PAGE_SIZE
)) {
4097 #if DEVELOPMENT || DEBUG
4098 printf("vm_object_compressor_pager_create(%p): "
4099 "object size 0x%llx >= 0x%llx\n",
4101 (uint64_t) object
->vo_size
,
4102 0x0FFFFFFFFULL
* PAGE_SIZE
);
4103 #endif /* DEVELOPMENT || DEBUG */
4104 vm_object_paging_end(object
);
4109 * Indicate that a memory object has been assigned
4110 * before dropping the lock, to prevent a race.
4113 object
->pager_created
= TRUE
;
4114 object
->pager_trusted
= TRUE
;
4115 object
->paging_offset
= 0;
4117 vm_object_unlock(object
);
4120 * Create the [internal] pager, and associate it with this object.
4122 * We make the association here so that vm_object_enter()
4123 * can look up the object to complete initializing it. No
4124 * user will ever map this object.
4127 /* create our new memory object */
4128 assert((uint32_t) (object
->vo_size
/ PAGE_SIZE
) ==
4129 (object
->vo_size
/ PAGE_SIZE
));
4130 (void) compressor_memory_object_create(
4131 (memory_object_size_t
) object
->vo_size
,
4133 if (pager
== NULL
) {
4134 panic("vm_object_compressor_pager_create(): "
4135 "no pager for object %p size 0x%llx\n",
4136 object
, (uint64_t) object
->vo_size
);
4141 * A reference was returned by
4142 * memory_object_create(), and it is
4143 * copied by vm_object_memory_object_associate().
4146 pager_object
= vm_object_memory_object_associate(pager
,
4150 if (pager_object
!= object
) {
4151 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager
, pager_object
, object
, (uint64_t) object
->vo_size
);
4155 * Drop the reference we were passed.
4157 memory_object_deallocate(pager
);
4159 vm_object_lock(object
);
4162 * Release the paging reference
4164 vm_object_paging_end(object
);
4168 * Global variables for vm_object_collapse():
4170 * Counts for normal collapses and bypasses.
4171 * Debugging variables, to watch or disable collapse.
4173 static long object_collapses
= 0;
4174 static long object_bypasses
= 0;
4176 static boolean_t vm_object_collapse_allowed
= TRUE
;
4177 static boolean_t vm_object_bypass_allowed
= TRUE
;
4179 void vm_object_do_collapse_compressor(vm_object_t object
,
4180 vm_object_t backing_object
);
4182 vm_object_do_collapse_compressor(
4184 vm_object_t backing_object
)
4186 vm_object_offset_t new_offset
, backing_offset
;
4187 vm_object_size_t size
;
4189 vm_counters
.do_collapse_compressor
++;
4191 vm_object_lock_assert_exclusive(object
);
4192 vm_object_lock_assert_exclusive(backing_object
);
4194 size
= object
->vo_size
;
4197 * Move all compressed pages from backing_object
4201 for (backing_offset
= object
->vo_shadow_offset
;
4202 backing_offset
< object
->vo_shadow_offset
+ object
->vo_size
;
4203 backing_offset
+= PAGE_SIZE
) {
4204 memory_object_offset_t backing_pager_offset
;
4206 /* find the next compressed page at or after this offset */
4207 backing_pager_offset
= (backing_offset
+
4208 backing_object
->paging_offset
);
4209 backing_pager_offset
= vm_compressor_pager_next_compressed(
4210 backing_object
->pager
,
4211 backing_pager_offset
);
4212 if (backing_pager_offset
== (memory_object_offset_t
) -1) {
4213 /* no more compressed pages */
4216 backing_offset
= (backing_pager_offset
-
4217 backing_object
->paging_offset
);
4219 new_offset
= backing_offset
- object
->vo_shadow_offset
;
4221 if (new_offset
>= object
->vo_size
) {
4222 /* we're out of the scope of "object": done */
4226 if ((vm_page_lookup(object
, new_offset
) != VM_PAGE_NULL
) ||
4227 (vm_compressor_pager_state_get(object
->pager
,
4229 object
->paging_offset
)) ==
4230 VM_EXTERNAL_STATE_EXISTS
)) {
4232 * This page already exists in object, resident or
4234 * We don't need this compressed page in backing_object
4235 * and it will be reclaimed when we release
4242 * backing_object has this page in the VM compressor and
4243 * we need to transfer it to object.
4245 vm_counters
.do_collapse_compressor_pages
++;
4246 vm_compressor_pager_transfer(
4249 (new_offset
+ object
->paging_offset
),
4251 backing_object
->pager
,
4252 (backing_offset
+ backing_object
->paging_offset
));
4257 * Routine: vm_object_do_collapse
4259 * Collapse an object with the object backing it.
4260 * Pages in the backing object are moved into the
4261 * parent, and the backing object is deallocated.
4263 * Both objects and the cache are locked; the page
4264 * queues are unlocked.
4268 vm_object_do_collapse(
4270 vm_object_t backing_object
)
4273 vm_object_offset_t new_offset
, backing_offset
;
4274 vm_object_size_t size
;
4276 vm_object_lock_assert_exclusive(object
);
4277 vm_object_lock_assert_exclusive(backing_object
);
4279 assert(object
->purgable
== VM_PURGABLE_DENY
);
4280 assert(backing_object
->purgable
== VM_PURGABLE_DENY
);
4282 backing_offset
= object
->vo_shadow_offset
;
4283 size
= object
->vo_size
;
4286 * Move all in-memory pages from backing_object
4287 * to the parent. Pages that have been paged out
4288 * will be overwritten by any of the parent's
4289 * pages that shadow them.
4292 while (!vm_page_queue_empty(&backing_object
->memq
)) {
4293 p
= (vm_page_t
) vm_page_queue_first(&backing_object
->memq
);
4295 new_offset
= (p
->vmp_offset
- backing_offset
);
4297 assert(!p
->vmp_busy
|| p
->vmp_absent
);
4300 * If the parent has a page here, or if
4301 * this page falls outside the parent,
4304 * Otherwise, move it as planned.
4307 if (p
->vmp_offset
< backing_offset
|| new_offset
>= size
) {
4310 pp
= vm_page_lookup(object
, new_offset
);
4311 if (pp
== VM_PAGE_NULL
) {
4312 if (VM_COMPRESSOR_PAGER_STATE_GET(object
,
4314 == VM_EXTERNAL_STATE_EXISTS
) {
4316 * Parent object has this page
4317 * in the VM compressor.
4318 * Throw away the backing
4324 * Parent now has no page.
4325 * Move the backing object's page
4328 vm_page_rename(p
, object
, new_offset
);
4331 assert(!pp
->vmp_absent
);
4334 * Parent object has a real page.
4335 * Throw away the backing object's
4343 if (vm_object_collapse_compressor_allowed
&&
4344 object
->pager
!= MEMORY_OBJECT_NULL
&&
4345 backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
4346 /* move compressed pages from backing_object to object */
4347 vm_object_do_collapse_compressor(object
, backing_object
);
4348 } else if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
4349 assert((!object
->pager_created
&&
4350 (object
->pager
== MEMORY_OBJECT_NULL
)) ||
4351 (!backing_object
->pager_created
&&
4352 (backing_object
->pager
== MEMORY_OBJECT_NULL
)));
4354 * Move the pager from backing_object to object.
4356 * XXX We're only using part of the paging space
4357 * for keeps now... we ought to discard the
4361 assert(!object
->paging_in_progress
);
4362 assert(!object
->activity_in_progress
);
4363 assert(!object
->pager_created
);
4364 assert(object
->pager
== NULL
);
4365 object
->pager
= backing_object
->pager
;
4367 object
->pager_created
= backing_object
->pager_created
;
4368 object
->pager_control
= backing_object
->pager_control
;
4369 object
->pager_ready
= backing_object
->pager_ready
;
4370 object
->pager_initialized
= backing_object
->pager_initialized
;
4371 object
->paging_offset
=
4372 backing_object
->paging_offset
+ backing_offset
;
4373 if (object
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
4374 memory_object_control_collapse(object
->pager_control
,
4377 /* the backing_object has lost its pager: reset all fields */
4378 backing_object
->pager_created
= FALSE
;
4379 backing_object
->pager_control
= NULL
;
4380 backing_object
->pager_ready
= FALSE
;
4381 backing_object
->paging_offset
= 0;
4382 backing_object
->pager
= NULL
;
4385 * Object now shadows whatever backing_object did.
4386 * Note that the reference to backing_object->shadow
4387 * moves from within backing_object to within object.
4390 assert(!object
->phys_contiguous
);
4391 assert(!backing_object
->phys_contiguous
);
4392 object
->shadow
= backing_object
->shadow
;
4393 if (object
->shadow
) {
4394 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
4395 /* "backing_object" gave its shadow to "object" */
4396 backing_object
->shadow
= VM_OBJECT_NULL
;
4397 backing_object
->vo_shadow_offset
= 0;
4399 /* no shadow, therefore no shadow offset... */
4400 object
->vo_shadow_offset
= 0;
4402 assert((object
->shadow
== VM_OBJECT_NULL
) ||
4403 (object
->shadow
->copy
!= backing_object
));
4406 * Discard backing_object.
4408 * Since the backing object has no pages, no
4409 * pager left, and no object references within it,
4410 * all that is necessary is to dispose of it.
4414 assert(backing_object
->ref_count
== 1);
4415 assert(backing_object
->resident_page_count
== 0);
4416 assert(backing_object
->paging_in_progress
== 0);
4417 assert(backing_object
->activity_in_progress
== 0);
4418 assert(backing_object
->shadow
== VM_OBJECT_NULL
);
4419 assert(backing_object
->vo_shadow_offset
== 0);
4421 if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
4422 /* ... unless it has a pager; need to terminate pager too */
4423 vm_counters
.do_collapse_terminate
++;
4424 if (vm_object_terminate(backing_object
) != KERN_SUCCESS
) {
4425 vm_counters
.do_collapse_terminate_failure
++;
4430 assert(backing_object
->pager
== NULL
);
4432 backing_object
->alive
= FALSE
;
4433 vm_object_unlock(backing_object
);
4435 #if VM_OBJECT_TRACKING
4436 if (vm_object_tracking_inited
) {
4437 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
4440 #endif /* VM_OBJECT_TRACKING */
4442 vm_object_lock_destroy(backing_object
);
4444 zfree(vm_object_zone
, backing_object
);
4448 vm_object_do_bypass(
4450 vm_object_t backing_object
)
4453 * Make the parent shadow the next object
4457 vm_object_lock_assert_exclusive(object
);
4458 vm_object_lock_assert_exclusive(backing_object
);
4462 * Do object reference in-line to
4463 * conditionally increment shadow's
4464 * residence count. If object is not
4465 * resident, leave residence count
4468 if (backing_object
->shadow
!= VM_OBJECT_NULL
) {
4469 vm_object_lock(backing_object
->shadow
);
4470 vm_object_lock_assert_exclusive(backing_object
->shadow
);
4471 backing_object
->shadow
->ref_count
++;
4472 if (object
->res_count
!= 0) {
4473 vm_object_res_reference(backing_object
->shadow
);
4475 vm_object_unlock(backing_object
->shadow
);
4477 #else /* TASK_SWAPPER */
4478 vm_object_reference(backing_object
->shadow
);
4479 #endif /* TASK_SWAPPER */
4481 assert(!object
->phys_contiguous
);
4482 assert(!backing_object
->phys_contiguous
);
4483 object
->shadow
= backing_object
->shadow
;
4484 if (object
->shadow
) {
4485 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
4487 /* no shadow, therefore no shadow offset... */
4488 object
->vo_shadow_offset
= 0;
4492 * Backing object might have had a copy pointer
4493 * to us. If it did, clear it.
4495 if (backing_object
->copy
== object
) {
4496 backing_object
->copy
= VM_OBJECT_NULL
;
4500 * Drop the reference count on backing_object.
4502 * Since its ref_count was at least 2, it
4503 * will not vanish; so we don't need to call
4504 * vm_object_deallocate.
4505 * [with a caveat for "named" objects]
4507 * The res_count on the backing object is
4508 * conditionally decremented. It's possible
4509 * (via vm_pageout_scan) to get here with
4510 * a "swapped" object, which has a 0 res_count,
4511 * in which case, the backing object res_count
4512 * is already down by one.
4514 * Don't call vm_object_deallocate unless
4515 * ref_count drops to zero.
4517 * The ref_count can drop to zero here if the
4518 * backing object could be bypassed but not
4519 * collapsed, such as when the backing object
4520 * is temporary and cachable.
4523 if (backing_object
->ref_count
> 2 ||
4524 (!backing_object
->named
&& backing_object
->ref_count
> 1)) {
4525 vm_object_lock_assert_exclusive(backing_object
);
4526 backing_object
->ref_count
--;
4528 if (object
->res_count
!= 0) {
4529 vm_object_res_deallocate(backing_object
);
4531 assert(backing_object
->ref_count
> 0);
4532 #endif /* TASK_SWAPPER */
4533 vm_object_unlock(backing_object
);
4536 * Drop locks so that we can deallocate
4537 * the backing object.
4541 if (object
->res_count
== 0) {
4542 /* XXX get a reference for the deallocate below */
4543 vm_object_res_reference(backing_object
);
4545 #endif /* TASK_SWAPPER */
4547 * vm_object_collapse (the caller of this function) is
4548 * now called from contexts that may not guarantee that a
4549 * valid reference is held on the object... w/o a valid
4550 * reference, it is unsafe and unwise (you will definitely
4551 * regret it) to unlock the object and then retake the lock
4552 * since the object may be terminated and recycled in between.
4553 * The "activity_in_progress" reference will keep the object
4556 vm_object_activity_begin(object
);
4557 vm_object_unlock(object
);
4559 vm_object_unlock(backing_object
);
4560 vm_object_deallocate(backing_object
);
4563 * Relock object. We don't have to reverify
4564 * its state since vm_object_collapse will
4565 * do that for us as it starts at the
4569 vm_object_lock(object
);
4570 vm_object_activity_end(object
);
4578 * vm_object_collapse:
4580 * Perform an object collapse or an object bypass if appropriate.
4581 * The real work of collapsing and bypassing is performed in
4582 * the routines vm_object_do_collapse and vm_object_do_bypass.
4584 * Requires that the object be locked and the page queues be unlocked.
4587 static unsigned long vm_object_collapse_calls
= 0;
4588 static unsigned long vm_object_collapse_objects
= 0;
4589 static unsigned long vm_object_collapse_do_collapse
= 0;
4590 static unsigned long vm_object_collapse_do_bypass
= 0;
4592 __private_extern__
void
4595 vm_object_offset_t hint_offset
,
4596 boolean_t can_bypass
)
4598 vm_object_t backing_object
;
4599 unsigned int rcount
;
4601 vm_object_t original_object
;
4602 int object_lock_type
;
4603 int backing_object_lock_type
;
4605 vm_object_collapse_calls
++;
4607 if (!vm_object_collapse_allowed
&&
4608 !(can_bypass
&& vm_object_bypass_allowed
)) {
4612 if (object
== VM_OBJECT_NULL
) {
4616 original_object
= object
;
4619 * The top object was locked "exclusive" by the caller.
4620 * In the first pass, to determine if we can collapse the shadow chain,
4621 * take a "shared" lock on the shadow objects. If we can collapse,
4622 * we'll have to go down the chain again with exclusive locks.
4624 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4625 backing_object_lock_type
= OBJECT_LOCK_SHARED
;
4628 object
= original_object
;
4629 vm_object_lock_assert_exclusive(object
);
4632 vm_object_collapse_objects
++;
4634 * Verify that the conditions are right for either
4635 * collapse or bypass:
4639 * There is a backing object, and
4642 backing_object
= object
->shadow
;
4643 if (backing_object
== VM_OBJECT_NULL
) {
4644 if (object
!= original_object
) {
4645 vm_object_unlock(object
);
4649 if (backing_object_lock_type
== OBJECT_LOCK_SHARED
) {
4650 vm_object_lock_shared(backing_object
);
4652 vm_object_lock(backing_object
);
4656 * No pages in the object are currently
4657 * being paged out, and
4659 if (object
->paging_in_progress
!= 0 ||
4660 object
->activity_in_progress
!= 0) {
4661 /* try and collapse the rest of the shadow chain */
4662 if (object
!= original_object
) {
4663 vm_object_unlock(object
);
4665 object
= backing_object
;
4666 object_lock_type
= backing_object_lock_type
;
4672 * The backing object is not read_only,
4673 * and no pages in the backing object are
4674 * currently being paged out.
4675 * The backing object is internal.
4679 if (!backing_object
->internal
||
4680 backing_object
->paging_in_progress
!= 0 ||
4681 backing_object
->activity_in_progress
!= 0) {
4682 /* try and collapse the rest of the shadow chain */
4683 if (object
!= original_object
) {
4684 vm_object_unlock(object
);
4686 object
= backing_object
;
4687 object_lock_type
= backing_object_lock_type
;
4692 * Purgeable objects are not supposed to engage in
4693 * copy-on-write activities, so should not have
4694 * any shadow objects or be a shadow object to another
4696 * Collapsing a purgeable object would require some
4697 * updates to the purgeable compressed ledgers.
4699 if (object
->purgable
!= VM_PURGABLE_DENY
||
4700 backing_object
->purgable
!= VM_PURGABLE_DENY
) {
4701 panic("vm_object_collapse() attempting to collapse "
4702 "purgeable object: %p(%d) %p(%d)\n",
4703 object
, object
->purgable
,
4704 backing_object
, backing_object
->purgable
);
4705 /* try and collapse the rest of the shadow chain */
4706 if (object
!= original_object
) {
4707 vm_object_unlock(object
);
4709 object
= backing_object
;
4710 object_lock_type
= backing_object_lock_type
;
4715 * The backing object can't be a copy-object:
4716 * the shadow_offset for the copy-object must stay
4717 * as 0. Furthermore (for the 'we have all the
4718 * pages' case), if we bypass backing_object and
4719 * just shadow the next object in the chain, old
4720 * pages from that object would then have to be copied
4721 * BOTH into the (former) backing_object and into the
4724 if (backing_object
->shadow
!= VM_OBJECT_NULL
&&
4725 backing_object
->shadow
->copy
== backing_object
) {
4726 /* try and collapse the rest of the shadow chain */
4727 if (object
!= original_object
) {
4728 vm_object_unlock(object
);
4730 object
= backing_object
;
4731 object_lock_type
= backing_object_lock_type
;
4736 * We can now try to either collapse the backing
4737 * object (if the parent is the only reference to
4738 * it) or (perhaps) remove the parent's reference
4741 * If there is exactly one reference to the backing
4742 * object, we may be able to collapse it into the
4745 * As long as one of the objects is still not known
4746 * to the pager, we can collapse them.
4748 if (backing_object
->ref_count
== 1 &&
4749 (vm_object_collapse_compressor_allowed
||
4750 !object
->pager_created
4751 || (!backing_object
->pager_created
)
4752 ) && vm_object_collapse_allowed
) {
4754 * We need the exclusive lock on the VM objects.
4756 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
4758 * We have an object and its shadow locked
4759 * "shared". We can't just upgrade the locks
4760 * to "exclusive", as some other thread might
4761 * also have these objects locked "shared" and
4762 * attempt to upgrade one or the other to
4763 * "exclusive". The upgrades would block
4764 * forever waiting for the other "shared" locks
4766 * So we have to release the locks and go
4767 * down the shadow chain again (since it could
4768 * have changed) with "exclusive" locking.
4770 vm_object_unlock(backing_object
);
4771 if (object
!= original_object
) {
4772 vm_object_unlock(object
);
4774 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4775 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4780 * Collapse the object with its backing
4781 * object, and try again with the object's
4782 * new backing object.
4785 vm_object_do_collapse(object
, backing_object
);
4786 vm_object_collapse_do_collapse
++;
4791 * Collapsing the backing object was not possible
4792 * or permitted, so let's try bypassing it.
4795 if (!(can_bypass
&& vm_object_bypass_allowed
)) {
4796 /* try and collapse the rest of the shadow chain */
4797 if (object
!= original_object
) {
4798 vm_object_unlock(object
);
4800 object
= backing_object
;
4801 object_lock_type
= backing_object_lock_type
;
4807 * If the object doesn't have all its pages present,
4808 * we have to make sure no pages in the backing object
4809 * "show through" before bypassing it.
4811 size
= (unsigned int)atop(object
->vo_size
);
4812 rcount
= object
->resident_page_count
;
4814 if (rcount
!= size
) {
4815 vm_object_offset_t offset
;
4816 vm_object_offset_t backing_offset
;
4817 unsigned int backing_rcount
;
4820 * If the backing object has a pager but no pagemap,
4821 * then we cannot bypass it, because we don't know
4822 * what pages it has.
4824 if (backing_object
->pager_created
) {
4825 /* try and collapse the rest of the shadow chain */
4826 if (object
!= original_object
) {
4827 vm_object_unlock(object
);
4829 object
= backing_object
;
4830 object_lock_type
= backing_object_lock_type
;
4835 * If the object has a pager but no pagemap,
4836 * then we cannot bypass it, because we don't know
4837 * what pages it has.
4839 if (object
->pager_created
) {
4840 /* try and collapse the rest of the shadow chain */
4841 if (object
!= original_object
) {
4842 vm_object_unlock(object
);
4844 object
= backing_object
;
4845 object_lock_type
= backing_object_lock_type
;
4849 backing_offset
= object
->vo_shadow_offset
;
4850 backing_rcount
= backing_object
->resident_page_count
;
4852 if ((int)backing_rcount
- (int)(atop(backing_object
->vo_size
) - size
) > (int)rcount
) {
4854 * we have enough pages in the backing object to guarantee that
4855 * at least 1 of them must be 'uncovered' by a resident page
4856 * in the object we're evaluating, so move on and
4857 * try to collapse the rest of the shadow chain
4859 if (object
!= original_object
) {
4860 vm_object_unlock(object
);
4862 object
= backing_object
;
4863 object_lock_type
= backing_object_lock_type
;
4868 * If all of the pages in the backing object are
4869 * shadowed by the parent object, the parent
4870 * object no longer has to shadow the backing
4871 * object; it can shadow the next one in the
4874 * If the backing object has existence info,
4875 * we must check examine its existence info
4880 #define EXISTS_IN_OBJECT(obj, off, rc) \
4881 ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
4882 == VM_EXTERNAL_STATE_EXISTS) || \
4883 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
4886 * Check the hint location first
4887 * (since it is often the quickest way out of here).
4889 if (object
->cow_hint
!= ~(vm_offset_t
)0) {
4890 hint_offset
= (vm_object_offset_t
)object
->cow_hint
;
4892 hint_offset
= (hint_offset
> 8 * PAGE_SIZE_64
) ?
4893 (hint_offset
- 8 * PAGE_SIZE_64
) : 0;
4896 if (EXISTS_IN_OBJECT(backing_object
, hint_offset
+
4897 backing_offset
, backing_rcount
) &&
4898 !EXISTS_IN_OBJECT(object
, hint_offset
, rcount
)) {
4899 /* dependency right at the hint */
4900 object
->cow_hint
= (vm_offset_t
) hint_offset
; /* atomic */
4901 /* try and collapse the rest of the shadow chain */
4902 if (object
!= original_object
) {
4903 vm_object_unlock(object
);
4905 object
= backing_object
;
4906 object_lock_type
= backing_object_lock_type
;
4911 * If the object's window onto the backing_object
4912 * is large compared to the number of resident
4913 * pages in the backing object, it makes sense to
4914 * walk the backing_object's resident pages first.
4916 * NOTE: Pages may be in both the existence map and/or
4917 * resident, so if we don't find a dependency while
4918 * walking the backing object's resident page list
4919 * directly, and there is an existence map, we'll have
4920 * to run the offset based 2nd pass. Because we may
4921 * have to run both passes, we need to be careful
4922 * not to decrement 'rcount' in the 1st pass
4924 if (backing_rcount
&& backing_rcount
< (size
/ 8)) {
4925 unsigned int rc
= rcount
;
4928 backing_rcount
= backing_object
->resident_page_count
;
4929 p
= (vm_page_t
)vm_page_queue_first(&backing_object
->memq
);
4931 offset
= (p
->vmp_offset
- backing_offset
);
4933 if (offset
< object
->vo_size
&&
4934 offset
!= hint_offset
&&
4935 !EXISTS_IN_OBJECT(object
, offset
, rc
)) {
4936 /* found a dependency */
4937 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
4941 p
= (vm_page_t
) vm_page_queue_next(&p
->vmp_listq
);
4942 } while (--backing_rcount
);
4943 if (backing_rcount
!= 0) {
4944 /* try and collapse the rest of the shadow chain */
4945 if (object
!= original_object
) {
4946 vm_object_unlock(object
);
4948 object
= backing_object
;
4949 object_lock_type
= backing_object_lock_type
;
4955 * Walk through the offsets looking for pages in the
4956 * backing object that show through to the object.
4958 if (backing_rcount
) {
4959 offset
= hint_offset
;
4962 (offset
+ PAGE_SIZE_64
< object
->vo_size
) ?
4963 (offset
+ PAGE_SIZE_64
) : 0) != hint_offset
) {
4964 if (EXISTS_IN_OBJECT(backing_object
, offset
+
4965 backing_offset
, backing_rcount
) &&
4966 !EXISTS_IN_OBJECT(object
, offset
, rcount
)) {
4967 /* found a dependency */
4968 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
4972 if (offset
!= hint_offset
) {
4973 /* try and collapse the rest of the shadow chain */
4974 if (object
!= original_object
) {
4975 vm_object_unlock(object
);
4977 object
= backing_object
;
4978 object_lock_type
= backing_object_lock_type
;
4985 * We need "exclusive" locks on the 2 VM objects.
4987 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
4988 vm_object_unlock(backing_object
);
4989 if (object
!= original_object
) {
4990 vm_object_unlock(object
);
4992 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4993 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4997 /* reset the offset hint for any objects deeper in the chain */
4998 object
->cow_hint
= (vm_offset_t
)0;
5001 * All interesting pages in the backing object
5002 * already live in the parent or its pager.
5003 * Thus we can bypass the backing object.
5006 vm_object_do_bypass(object
, backing_object
);
5007 vm_object_collapse_do_bypass
++;
5010 * Try again with this object's new backing object.
5018 * if (object != original_object) {
5019 * vm_object_unlock(object);
5025 * Routine: vm_object_page_remove: [internal]
5027 * Removes all physical pages in the specified
5028 * object range from the object's list of pages.
5030 * In/out conditions:
5031 * The object must be locked.
5032 * The object must not have paging_in_progress, usually
5033 * guaranteed by not having a pager.
5035 unsigned int vm_object_page_remove_lookup
= 0;
5036 unsigned int vm_object_page_remove_iterate
= 0;
5038 __private_extern__
void
5039 vm_object_page_remove(
5041 vm_object_offset_t start
,
5042 vm_object_offset_t end
)
5047 * One and two page removals are most popular.
5048 * The factor of 16 here is somewhat arbitrary.
5049 * It balances vm_object_lookup vs iteration.
5052 if (atop_64(end
- start
) < (unsigned)object
->resident_page_count
/ 16) {
5053 vm_object_page_remove_lookup
++;
5055 for (; start
< end
; start
+= PAGE_SIZE_64
) {
5056 p
= vm_page_lookup(object
, start
);
5057 if (p
!= VM_PAGE_NULL
) {
5058 assert(!p
->vmp_cleaning
&& !p
->vmp_laundry
);
5059 if (!p
->vmp_fictitious
&& p
->vmp_pmapped
) {
5060 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5066 vm_object_page_remove_iterate
++;
5068 p
= (vm_page_t
) vm_page_queue_first(&object
->memq
);
5069 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
) p
)) {
5070 next
= (vm_page_t
) vm_page_queue_next(&p
->vmp_listq
);
5071 if ((start
<= p
->vmp_offset
) && (p
->vmp_offset
< end
)) {
5072 assert(!p
->vmp_cleaning
&& !p
->vmp_laundry
);
5073 if (!p
->vmp_fictitious
&& p
->vmp_pmapped
) {
5074 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5085 * Routine: vm_object_coalesce
5086 * Function: Coalesces two objects backing up adjoining
5087 * regions of memory into a single object.
5089 * returns TRUE if objects were combined.
5091 * NOTE: Only works at the moment if the second object is NULL -
5092 * if it's not, which object do we lock first?
5095 * prev_object First object to coalesce
5096 * prev_offset Offset into prev_object
5097 * next_object Second object into coalesce
5098 * next_offset Offset into next_object
5100 * prev_size Size of reference to prev_object
5101 * next_size Size of reference to next_object
5104 * The object(s) must *not* be locked. The map must be locked
5105 * to preserve the reference to the object(s).
5107 static int vm_object_coalesce_count
= 0;
5109 __private_extern__ boolean_t
5111 vm_object_t prev_object
,
5112 vm_object_t next_object
,
5113 vm_object_offset_t prev_offset
,
5114 __unused vm_object_offset_t next_offset
,
5115 vm_object_size_t prev_size
,
5116 vm_object_size_t next_size
)
5118 vm_object_size_t newsize
;
5124 if (next_object
!= VM_OBJECT_NULL
) {
5128 if (prev_object
== VM_OBJECT_NULL
) {
5132 vm_object_lock(prev_object
);
5135 * Try to collapse the object first
5137 vm_object_collapse(prev_object
, prev_offset
, TRUE
);
5140 * Can't coalesce if pages not mapped to
5141 * prev_entry may be in use any way:
5142 * . more than one reference
5144 * . shadows another object
5145 * . has a copy elsewhere
5147 * . paging references (pages might be in page-list)
5150 if ((prev_object
->ref_count
> 1) ||
5151 prev_object
->pager_created
||
5152 (prev_object
->shadow
!= VM_OBJECT_NULL
) ||
5153 (prev_object
->copy
!= VM_OBJECT_NULL
) ||
5154 (prev_object
->true_share
!= FALSE
) ||
5155 (prev_object
->purgable
!= VM_PURGABLE_DENY
) ||
5156 (prev_object
->paging_in_progress
!= 0) ||
5157 (prev_object
->activity_in_progress
!= 0)) {
5158 vm_object_unlock(prev_object
);
5162 vm_object_coalesce_count
++;
5165 * Remove any pages that may still be in the object from
5166 * a previous deallocation.
5168 vm_object_page_remove(prev_object
,
5169 prev_offset
+ prev_size
,
5170 prev_offset
+ prev_size
+ next_size
);
5173 * Extend the object if necessary.
5175 newsize
= prev_offset
+ prev_size
+ next_size
;
5176 if (newsize
> prev_object
->vo_size
) {
5177 prev_object
->vo_size
= newsize
;
5180 vm_object_unlock(prev_object
);
5185 vm_object_populate_with_private(
5187 vm_object_offset_t offset
,
5192 vm_object_offset_t base_offset
;
5195 if (!object
->private) {
5196 return KERN_FAILURE
;
5199 base_page
= phys_page
;
5201 vm_object_lock(object
);
5203 if (!object
->phys_contiguous
) {
5206 if ((base_offset
= trunc_page_64(offset
)) != offset
) {
5207 vm_object_unlock(object
);
5208 return KERN_FAILURE
;
5210 base_offset
+= object
->paging_offset
;
5213 m
= vm_page_lookup(object
, base_offset
);
5215 if (m
!= VM_PAGE_NULL
) {
5216 if (m
->vmp_fictitious
) {
5217 if (VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
) {
5218 vm_page_lockspin_queues();
5219 m
->vmp_private
= TRUE
;
5220 vm_page_unlock_queues();
5222 m
->vmp_fictitious
= FALSE
;
5223 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5225 } else if (VM_PAGE_GET_PHYS_PAGE(m
) != base_page
) {
5226 if (!m
->vmp_private
) {
5228 * we'd leak a real page... that can't be right
5230 panic("vm_object_populate_with_private - %p not private", m
);
5232 if (m
->vmp_pmapped
) {
5234 * pmap call to clear old mapping
5236 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
5238 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5241 while ((m
= vm_page_grab_fictitious()) == VM_PAGE_NULL
) {
5242 vm_page_more_fictitious();
5246 * private normally requires lock_queues but since we
5247 * are initializing the page, its not necessary here
5249 m
->vmp_private
= TRUE
;
5250 m
->vmp_fictitious
= FALSE
;
5251 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5252 m
->vmp_unusual
= TRUE
;
5253 m
->vmp_busy
= FALSE
;
5255 vm_page_insert(m
, object
, base_offset
);
5257 base_page
++; /* Go to the next physical page */
5258 base_offset
+= PAGE_SIZE
;
5262 /* NOTE: we should check the original settings here */
5263 /* if we have a size > zero a pmap call should be made */
5264 /* to disable the range */
5268 /* shadows on contiguous memory are not allowed */
5269 /* we therefore can use the offset field */
5270 object
->vo_shadow_offset
= (vm_object_offset_t
)phys_page
<< PAGE_SHIFT
;
5271 object
->vo_size
= size
;
5273 vm_object_unlock(object
);
5275 return KERN_SUCCESS
;
5280 memory_object_create_named(
5281 memory_object_t pager
,
5282 memory_object_offset_t size
,
5283 memory_object_control_t
*control
)
5287 *control
= MEMORY_OBJECT_CONTROL_NULL
;
5288 if (pager
== MEMORY_OBJECT_NULL
) {
5289 return KERN_INVALID_ARGUMENT
;
5292 object
= vm_object_memory_object_associate(pager
,
5296 if (object
== VM_OBJECT_NULL
) {
5297 return KERN_INVALID_OBJECT
;
5300 /* wait for object (if any) to be ready */
5301 if (object
!= VM_OBJECT_NULL
) {
5302 vm_object_lock(object
);
5303 object
->named
= TRUE
;
5304 while (!object
->pager_ready
) {
5305 vm_object_sleep(object
,
5306 VM_OBJECT_EVENT_PAGER_READY
,
5309 *control
= object
->pager_control
;
5310 vm_object_unlock(object
);
5312 return KERN_SUCCESS
;
5317 * Routine: memory_object_recover_named [user interface]
5319 * Attempt to recover a named reference for a VM object.
5320 * VM will verify that the object has not already started
5321 * down the termination path, and if it has, will optionally
5322 * wait for that to finish.
5324 * KERN_SUCCESS - we recovered a named reference on the object
5325 * KERN_FAILURE - we could not recover a reference (object dead)
5326 * KERN_INVALID_ARGUMENT - bad memory object control
5329 memory_object_recover_named(
5330 memory_object_control_t control
,
5331 boolean_t wait_on_terminating
)
5335 object
= memory_object_control_to_vm_object(control
);
5336 if (object
== VM_OBJECT_NULL
) {
5337 return KERN_INVALID_ARGUMENT
;
5340 vm_object_lock(object
);
5342 if (object
->terminating
&& wait_on_terminating
) {
5343 vm_object_wait(object
,
5344 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
5349 if (!object
->alive
) {
5350 vm_object_unlock(object
);
5351 return KERN_FAILURE
;
5354 if (object
->named
== TRUE
) {
5355 vm_object_unlock(object
);
5356 return KERN_SUCCESS
;
5358 object
->named
= TRUE
;
5359 vm_object_lock_assert_exclusive(object
);
5360 object
->ref_count
++;
5361 vm_object_res_reference(object
);
5362 while (!object
->pager_ready
) {
5363 vm_object_sleep(object
,
5364 VM_OBJECT_EVENT_PAGER_READY
,
5367 vm_object_unlock(object
);
5368 return KERN_SUCCESS
;
5373 * vm_object_release_name:
5375 * Enforces name semantic on memory_object reference count decrement
5376 * This routine should not be called unless the caller holds a name
5377 * reference gained through the memory_object_create_named.
5379 * If the TERMINATE_IDLE flag is set, the call will return if the
5380 * reference count is not 1. i.e. idle with the only remaining reference
5382 * If the decision is made to proceed the name field flag is set to
5383 * false and the reference count is decremented. If the RESPECT_CACHE
5384 * flag is set and the reference count has gone to zero, the
5385 * memory_object is checked to see if it is cacheable otherwise when
5386 * the reference count is zero, it is simply terminated.
5389 __private_extern__ kern_return_t
5390 vm_object_release_name(
5395 boolean_t original_object
= TRUE
;
5397 while (object
!= VM_OBJECT_NULL
) {
5398 vm_object_lock(object
);
5400 assert(object
->alive
);
5401 if (original_object
) {
5402 assert(object
->named
);
5404 assert(object
->ref_count
> 0);
5407 * We have to wait for initialization before
5408 * destroying or caching the object.
5411 if (object
->pager_created
&& !object
->pager_initialized
) {
5412 assert(!object
->can_persist
);
5413 vm_object_assert_wait(object
,
5414 VM_OBJECT_EVENT_INITIALIZED
,
5416 vm_object_unlock(object
);
5417 thread_block(THREAD_CONTINUE_NULL
);
5421 if (((object
->ref_count
> 1)
5422 && (flags
& MEMORY_OBJECT_TERMINATE_IDLE
))
5423 || (object
->terminating
)) {
5424 vm_object_unlock(object
);
5425 return KERN_FAILURE
;
5427 if (flags
& MEMORY_OBJECT_RELEASE_NO_OP
) {
5428 vm_object_unlock(object
);
5429 return KERN_SUCCESS
;
5433 if ((flags
& MEMORY_OBJECT_RESPECT_CACHE
) &&
5434 (object
->ref_count
== 1)) {
5435 if (original_object
) {
5436 object
->named
= FALSE
;
5438 vm_object_unlock(object
);
5439 /* let vm_object_deallocate push this thing into */
5440 /* the cache, if that it is where it is bound */
5441 vm_object_deallocate(object
);
5442 return KERN_SUCCESS
;
5444 VM_OBJ_RES_DECR(object
);
5445 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
5447 if (object
->ref_count
== 1) {
5448 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
5449 if (original_object
) {
5450 return KERN_FAILURE
;
5452 return KERN_SUCCESS
;
5455 if (shadow
!= VM_OBJECT_NULL
) {
5456 original_object
= FALSE
;
5460 return KERN_SUCCESS
;
5462 vm_object_lock_assert_exclusive(object
);
5463 object
->ref_count
--;
5464 assert(object
->ref_count
> 0);
5465 if (original_object
) {
5466 object
->named
= FALSE
;
5468 vm_object_unlock(object
);
5469 return KERN_SUCCESS
;
5474 return KERN_FAILURE
;
5478 __private_extern__ kern_return_t
5479 vm_object_lock_request(
5481 vm_object_offset_t offset
,
5482 vm_object_size_t size
,
5483 memory_object_return_t should_return
,
5487 __unused boolean_t should_flush
;
5489 should_flush
= flags
& MEMORY_OBJECT_DATA_FLUSH
;
5492 * Check for bogus arguments.
5494 if (object
== VM_OBJECT_NULL
) {
5495 return KERN_INVALID_ARGUMENT
;
5498 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
) {
5499 return KERN_INVALID_ARGUMENT
;
5502 size
= round_page_64(size
);
5505 * Lock the object, and acquire a paging reference to
5506 * prevent the memory_object reference from being released.
5508 vm_object_lock(object
);
5509 vm_object_paging_begin(object
);
5511 (void)vm_object_update(object
,
5512 offset
, size
, NULL
, NULL
, should_return
, flags
, prot
);
5514 vm_object_paging_end(object
);
5515 vm_object_unlock(object
);
5517 return KERN_SUCCESS
;
5521 * Empty a purgeable object by grabbing the physical pages assigned to it and
5522 * putting them on the free queue without writing them to backing store, etc.
5523 * When the pages are next touched they will be demand zero-fill pages. We
5524 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
5525 * skip referenced/dirty pages, pages on the active queue, etc. We're more
5526 * than happy to grab these since this is a purgeable object. We mark the
5527 * object as "empty" after reaping its pages.
5529 * On entry the object must be locked and it must be
5530 * purgeable with no delayed copies pending.
5533 vm_object_purge(vm_object_t object
, int flags
)
5535 unsigned int object_page_count
= 0, pgcount
= 0;
5536 uint64_t total_purged_pgcount
= 0;
5537 boolean_t skipped_object
= FALSE
;
5539 vm_object_lock_assert_exclusive(object
);
5541 if (object
->purgable
== VM_PURGABLE_DENY
) {
5545 assert(object
->copy
== VM_OBJECT_NULL
);
5546 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
5549 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
5550 * reaping its pages. We update vm_page_purgeable_count in bulk
5551 * and we don't want vm_page_remove() to update it again for each
5552 * page we reap later.
5554 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
5555 * are all accounted for in the "volatile" ledgers, so this does not
5556 * make any difference.
5557 * If we transitioned directly from NONVOLATILE to EMPTY,
5558 * vm_page_purgeable_count must have been updated when the object
5559 * was dequeued from its volatile queue and the purgeable ledgers
5560 * must have also been updated accordingly at that time (in
5561 * vm_object_purgable_control()).
5563 if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
5565 assert(object
->resident_page_count
>=
5566 object
->wired_page_count
);
5567 delta
= (object
->resident_page_count
-
5568 object
->wired_page_count
);
5570 assert(vm_page_purgeable_count
>=
5573 (SInt32
*)&vm_page_purgeable_count
);
5575 if (object
->wired_page_count
!= 0) {
5576 assert(vm_page_purgeable_wired_count
>=
5577 object
->wired_page_count
);
5578 OSAddAtomic(-object
->wired_page_count
,
5579 (SInt32
*)&vm_page_purgeable_wired_count
);
5581 object
->purgable
= VM_PURGABLE_EMPTY
;
5583 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
5585 object_page_count
= object
->resident_page_count
;
5587 vm_object_reap_pages(object
, REAP_PURGEABLE
);
5589 if (object
->resident_page_count
>= object_page_count
) {
5590 total_purged_pgcount
= 0;
5592 total_purged_pgcount
= object_page_count
- object
->resident_page_count
;
5595 if (object
->pager
!= NULL
) {
5596 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
5598 if (object
->activity_in_progress
== 0 &&
5599 object
->paging_in_progress
== 0) {
5601 * Also reap any memory coming from this object
5602 * in the VM compressor.
5604 * There are no operations in progress on the VM object
5605 * and no operation can start while we're holding the
5606 * VM object lock, so it's safe to reap the compressed
5607 * pages and update the page counts.
5609 pgcount
= vm_compressor_pager_get_count(object
->pager
);
5611 pgcount
= vm_compressor_pager_reap_pages(object
->pager
, flags
);
5612 vm_compressor_pager_count(object
->pager
,
5616 vm_object_owner_compressed_update(object
,
5619 if (!(flags
& C_DONT_BLOCK
)) {
5620 assert(vm_compressor_pager_get_count(object
->pager
)
5625 * There's some kind of paging activity in progress
5626 * for this object, which could result in a page
5627 * being compressed or decompressed, possibly while
5628 * the VM object is not locked, so it could race
5631 * We can't really synchronize this without possibly
5632 * causing a deadlock when the compressor needs to
5633 * allocate or free memory while compressing or
5634 * decompressing a page from a purgeable object
5635 * mapped in the kernel_map...
5637 * So let's not attempt to purge the compressor
5638 * pager if there's any kind of operation in
5639 * progress on the VM object.
5641 skipped_object
= TRUE
;
5645 vm_object_lock_assert_exclusive(object
);
5647 total_purged_pgcount
+= pgcount
;
5649 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ONE
)),
5650 VM_KERNEL_UNSLIDE_OR_PERM(object
), /* purged object */
5652 total_purged_pgcount
,
5656 return total_purged_pgcount
;
5661 * vm_object_purgeable_control() allows the caller to control and investigate the
5662 * state of a purgeable object. A purgeable object is created via a call to
5663 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
5664 * never be coalesced with any other object -- even other purgeable objects --
5665 * and will thus always remain a distinct object. A purgeable object has
5666 * special semantics when its reference count is exactly 1. If its reference
5667 * count is greater than 1, then a purgeable object will behave like a normal
5668 * object and attempts to use this interface will result in an error return
5669 * of KERN_INVALID_ARGUMENT.
5671 * A purgeable object may be put into a "volatile" state which will make the
5672 * object's pages elligable for being reclaimed without paging to backing
5673 * store if the system runs low on memory. If the pages in a volatile
5674 * purgeable object are reclaimed, the purgeable object is said to have been
5675 * "emptied." When a purgeable object is emptied the system will reclaim as
5676 * many pages from the object as it can in a convenient manner (pages already
5677 * en route to backing store or busy for other reasons are left as is). When
5678 * a purgeable object is made volatile, its pages will generally be reclaimed
5679 * before other pages in the application's working set. This semantic is
5680 * generally used by applications which can recreate the data in the object
5681 * faster than it can be paged in. One such example might be media assets
5682 * which can be reread from a much faster RAID volume.
5684 * A purgeable object may be designated as "non-volatile" which means it will
5685 * behave like all other objects in the system with pages being written to and
5686 * read from backing store as needed to satisfy system memory needs. If the
5687 * object was emptied before the object was made non-volatile, that fact will
5688 * be returned as the old state of the purgeable object (see
5689 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
5690 * were reclaimed as part of emptying the object will be refaulted in as
5691 * zero-fill on demand. It is up to the application to note that an object
5692 * was emptied and recreate the objects contents if necessary. When a
5693 * purgeable object is made non-volatile, its pages will generally not be paged
5694 * out to backing store in the immediate future. A purgeable object may also
5695 * be manually emptied.
5697 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
5698 * volatile purgeable object may be queried at any time. This information may
5699 * be used as a control input to let the application know when the system is
5700 * experiencing memory pressure and is reclaiming memory.
5702 * The specified address may be any address within the purgeable object. If
5703 * the specified address does not represent any object in the target task's
5704 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
5705 * object containing the specified address is not a purgeable object, then
5706 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
5709 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
5710 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
5711 * state is used to set the new state of the purgeable object and return its
5712 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
5713 * object is returned in the parameter state.
5715 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
5716 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
5717 * the non-volatile, volatile and volatile/empty states described above.
5718 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
5719 * immediately reclaim as many pages in the object as can be conveniently
5720 * collected (some may have already been written to backing store or be
5723 * The process of making a purgeable object non-volatile and determining its
5724 * previous state is atomic. Thus, if a purgeable object is made
5725 * VM_PURGABLE_NONVOLATILE and the old state is returned as
5726 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
5727 * completely intact and will remain so until the object is made volatile
5728 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
5729 * was reclaimed while it was in a volatile state and its previous contents
5733 * The object must be locked.
5736 vm_object_purgable_control(
5738 vm_purgable_t control
,
5744 if (object
== VM_OBJECT_NULL
) {
5746 * Object must already be present or it can't be purgeable.
5748 return KERN_INVALID_ARGUMENT
;
5751 vm_object_lock_assert_exclusive(object
);
5754 * Get current state of the purgeable object.
5756 old_state
= object
->purgable
;
5757 if (old_state
== VM_PURGABLE_DENY
) {
5758 return KERN_INVALID_ARGUMENT
;
5761 /* purgeable cant have delayed copies - now or in the future */
5762 assert(object
->copy
== VM_OBJECT_NULL
);
5763 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
5766 * Execute the desired operation.
5768 if (control
== VM_PURGABLE_GET_STATE
) {
5770 return KERN_SUCCESS
;
5773 if (control
== VM_PURGABLE_SET_STATE
&&
5774 object
->purgeable_only_by_kernel
) {
5775 return KERN_PROTECTION_FAILURE
;
5778 if (control
!= VM_PURGABLE_SET_STATE
&&
5779 control
!= VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
5780 return KERN_INVALID_ARGUMENT
;
5783 if ((*state
) & VM_PURGABLE_DEBUG_EMPTY
) {
5784 object
->volatile_empty
= TRUE
;
5786 if ((*state
) & VM_PURGABLE_DEBUG_FAULT
) {
5787 object
->volatile_fault
= TRUE
;
5790 new_state
= *state
& VM_PURGABLE_STATE_MASK
;
5791 if (new_state
== VM_PURGABLE_VOLATILE
) {
5792 if (old_state
== VM_PURGABLE_EMPTY
) {
5793 /* what's been emptied must stay empty */
5794 new_state
= VM_PURGABLE_EMPTY
;
5796 if (object
->volatile_empty
) {
5797 /* debugging mode: go straight to empty */
5798 new_state
= VM_PURGABLE_EMPTY
;
5802 switch (new_state
) {
5803 case VM_PURGABLE_DENY
:
5805 * Attempting to convert purgeable memory to non-purgeable:
5808 return KERN_INVALID_ARGUMENT
;
5809 case VM_PURGABLE_NONVOLATILE
:
5810 object
->purgable
= new_state
;
5812 if (old_state
== VM_PURGABLE_VOLATILE
) {
5815 assert(object
->resident_page_count
>=
5816 object
->wired_page_count
);
5817 delta
= (object
->resident_page_count
-
5818 object
->wired_page_count
);
5820 assert(vm_page_purgeable_count
>= delta
);
5824 (SInt32
*)&vm_page_purgeable_count
);
5826 if (object
->wired_page_count
!= 0) {
5827 assert(vm_page_purgeable_wired_count
>=
5828 object
->wired_page_count
);
5829 OSAddAtomic(-object
->wired_page_count
,
5830 (SInt32
*)&vm_page_purgeable_wired_count
);
5833 vm_page_lock_queues();
5835 /* object should be on a queue */
5836 assert(object
->objq
.next
!= NULL
&&
5837 object
->objq
.prev
!= NULL
);
5838 purgeable_q_t queue
;
5841 * Move object from its volatile queue to the
5842 * non-volatile queue...
5844 queue
= vm_purgeable_object_remove(object
);
5847 if (object
->purgeable_when_ripe
) {
5848 vm_purgeable_token_delete_last(queue
);
5850 assert(queue
->debug_count_objects
>= 0);
5852 vm_page_unlock_queues();
5854 if (old_state
== VM_PURGABLE_VOLATILE
||
5855 old_state
== VM_PURGABLE_EMPTY
) {
5857 * Transfer the object's pages from the volatile to
5858 * non-volatile ledgers.
5860 vm_purgeable_accounting(object
, VM_PURGABLE_VOLATILE
);
5865 case VM_PURGABLE_VOLATILE
:
5866 if (object
->volatile_fault
) {
5870 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
5873 p
->vmp_fictitious
) {
5876 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5877 if ((refmod
& VM_MEM_MODIFIED
) &&
5879 SET_PAGE_DIRTY(p
, FALSE
);
5884 assert(old_state
!= VM_PURGABLE_EMPTY
);
5886 purgeable_q_t queue
;
5888 /* find the correct queue */
5889 if ((*state
& VM_PURGABLE_ORDERING_MASK
) == VM_PURGABLE_ORDERING_OBSOLETE
) {
5890 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
5892 if ((*state
& VM_PURGABLE_BEHAVIOR_MASK
) == VM_PURGABLE_BEHAVIOR_FIFO
) {
5893 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
5895 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
5899 if (old_state
== VM_PURGABLE_NONVOLATILE
||
5900 old_state
== VM_PURGABLE_EMPTY
) {
5903 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
5904 VM_PURGABLE_NO_AGING
) {
5905 object
->purgeable_when_ripe
= FALSE
;
5907 object
->purgeable_when_ripe
= TRUE
;
5910 if (object
->purgeable_when_ripe
) {
5911 kern_return_t result
;
5913 /* try to add token... this can fail */
5914 vm_page_lock_queues();
5916 result
= vm_purgeable_token_add(queue
);
5917 if (result
!= KERN_SUCCESS
) {
5918 vm_page_unlock_queues();
5921 vm_page_unlock_queues();
5924 assert(object
->resident_page_count
>=
5925 object
->wired_page_count
);
5926 delta
= (object
->resident_page_count
-
5927 object
->wired_page_count
);
5931 &vm_page_purgeable_count
);
5933 if (object
->wired_page_count
!= 0) {
5934 OSAddAtomic(object
->wired_page_count
,
5935 &vm_page_purgeable_wired_count
);
5938 object
->purgable
= new_state
;
5940 /* object should be on "non-volatile" queue */
5941 assert(object
->objq
.next
!= NULL
);
5942 assert(object
->objq
.prev
!= NULL
);
5943 } else if (old_state
== VM_PURGABLE_VOLATILE
) {
5944 purgeable_q_t old_queue
;
5945 boolean_t purgeable_when_ripe
;
5948 * if reassigning priorities / purgeable groups, we don't change the
5949 * token queue. So moving priorities will not make pages stay around longer.
5950 * Reasoning is that the algorithm gives most priority to the most important
5951 * object. If a new token is added, the most important object' priority is boosted.
5952 * This biases the system already for purgeable queues that move a lot.
5953 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
5955 assert(object
->objq
.next
!= NULL
&& object
->objq
.prev
!= NULL
); /* object should be on a queue */
5957 old_queue
= vm_purgeable_object_remove(object
);
5960 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
5961 VM_PURGABLE_NO_AGING
) {
5962 purgeable_when_ripe
= FALSE
;
5964 purgeable_when_ripe
= TRUE
;
5967 if (old_queue
!= queue
||
5968 (purgeable_when_ripe
!=
5969 object
->purgeable_when_ripe
)) {
5970 kern_return_t result
;
5972 /* Changing queue. Have to move token. */
5973 vm_page_lock_queues();
5974 if (object
->purgeable_when_ripe
) {
5975 vm_purgeable_token_delete_last(old_queue
);
5977 object
->purgeable_when_ripe
= purgeable_when_ripe
;
5978 if (object
->purgeable_when_ripe
) {
5979 result
= vm_purgeable_token_add(queue
);
5980 assert(result
== KERN_SUCCESS
); /* this should never fail since we just freed a token */
5982 vm_page_unlock_queues();
5986 vm_purgeable_object_add(object
, queue
, (*state
& VM_VOLATILE_GROUP_MASK
) >> VM_VOLATILE_GROUP_SHIFT
);
5987 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
5988 vm_purgeable_accounting(object
,
5989 VM_PURGABLE_NONVOLATILE
);
5992 assert(queue
->debug_count_objects
>= 0);
5997 case VM_PURGABLE_EMPTY
:
5998 if (object
->volatile_fault
) {
6002 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
6005 p
->vmp_fictitious
) {
6008 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
6009 if ((refmod
& VM_MEM_MODIFIED
) &&
6011 SET_PAGE_DIRTY(p
, FALSE
);
6016 if (old_state
== VM_PURGABLE_VOLATILE
) {
6017 purgeable_q_t old_queue
;
6019 /* object should be on a queue */
6020 assert(object
->objq
.next
!= NULL
&&
6021 object
->objq
.prev
!= NULL
);
6023 old_queue
= vm_purgeable_object_remove(object
);
6025 if (object
->purgeable_when_ripe
) {
6026 vm_page_lock_queues();
6027 vm_purgeable_token_delete_first(old_queue
);
6028 vm_page_unlock_queues();
6032 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
6034 * This object's pages were previously accounted as
6035 * "non-volatile" and now need to be accounted as
6038 vm_purgeable_accounting(object
,
6039 VM_PURGABLE_NONVOLATILE
);
6041 * Set to VM_PURGABLE_EMPTY because the pages are no
6042 * longer accounted in the "non-volatile" ledger
6043 * and are also not accounted for in
6044 * "vm_page_purgeable_count".
6046 object
->purgable
= VM_PURGABLE_EMPTY
;
6049 (void) vm_object_purge(object
, 0);
6050 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
6057 vm_object_lock_assert_exclusive(object
);
6059 return KERN_SUCCESS
;
6063 vm_object_get_page_counts(
6065 vm_object_offset_t offset
,
6066 vm_object_size_t size
,
6067 unsigned int *resident_page_count
,
6068 unsigned int *dirty_page_count
)
6070 kern_return_t kr
= KERN_SUCCESS
;
6071 boolean_t count_dirty_pages
= FALSE
;
6072 vm_page_t p
= VM_PAGE_NULL
;
6073 unsigned int local_resident_count
= 0;
6074 unsigned int local_dirty_count
= 0;
6075 vm_object_offset_t cur_offset
= 0;
6076 vm_object_offset_t end_offset
= 0;
6078 if (object
== VM_OBJECT_NULL
) {
6079 return KERN_INVALID_ARGUMENT
;
6083 cur_offset
= offset
;
6085 end_offset
= offset
+ size
;
6087 vm_object_lock_assert_exclusive(object
);
6089 if (dirty_page_count
!= NULL
) {
6090 count_dirty_pages
= TRUE
;
6093 if (resident_page_count
!= NULL
&& count_dirty_pages
== FALSE
) {
6096 * - we only want the resident page count, and,
6097 * - the entire object is exactly covered by the request.
6099 if (offset
== 0 && (object
->vo_size
== size
)) {
6100 *resident_page_count
= object
->resident_page_count
;
6105 if (object
->resident_page_count
<= (size
>> PAGE_SHIFT
)) {
6106 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
6107 if (p
->vmp_offset
>= cur_offset
&& p
->vmp_offset
< end_offset
) {
6108 local_resident_count
++;
6110 if (count_dirty_pages
) {
6111 if (p
->vmp_dirty
|| (p
->vmp_wpmapped
&& pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
)))) {
6112 local_dirty_count
++;
6118 for (cur_offset
= offset
; cur_offset
< end_offset
; cur_offset
+= PAGE_SIZE_64
) {
6119 p
= vm_page_lookup(object
, cur_offset
);
6121 if (p
!= VM_PAGE_NULL
) {
6122 local_resident_count
++;
6124 if (count_dirty_pages
) {
6125 if (p
->vmp_dirty
|| (p
->vmp_wpmapped
&& pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
)))) {
6126 local_dirty_count
++;
6133 if (resident_page_count
!= NULL
) {
6134 *resident_page_count
= local_resident_count
;
6137 if (dirty_page_count
!= NULL
) {
6138 *dirty_page_count
= local_dirty_count
;
6148 * vm_object_res_deallocate
6150 * (recursively) decrement residence counts on vm objects and their shadows.
6151 * Called from vm_object_deallocate and when swapping out an object.
6153 * The object is locked, and remains locked throughout the function,
6154 * even as we iterate down the shadow chain. Locks on intermediate objects
6155 * will be dropped, but not the original object.
6157 * NOTE: this function used to use recursion, rather than iteration.
6160 __private_extern__
void
6161 vm_object_res_deallocate(
6164 vm_object_t orig_object
= object
;
6166 * Object is locked so it can be called directly
6167 * from vm_object_deallocate. Original object is never
6170 assert(object
->res_count
> 0);
6171 while (--object
->res_count
== 0) {
6172 assert(object
->ref_count
>= object
->res_count
);
6173 vm_object_deactivate_all_pages(object
);
6174 /* iterate on shadow, if present */
6175 if (object
->shadow
!= VM_OBJECT_NULL
) {
6176 vm_object_t tmp_object
= object
->shadow
;
6177 vm_object_lock(tmp_object
);
6178 if (object
!= orig_object
) {
6179 vm_object_unlock(object
);
6181 object
= tmp_object
;
6182 assert(object
->res_count
> 0);
6187 if (object
!= orig_object
) {
6188 vm_object_unlock(object
);
6193 * vm_object_res_reference
6195 * Internal function to increment residence count on a vm object
6196 * and its shadows. It is called only from vm_object_reference, and
6197 * when swapping in a vm object, via vm_map_swap.
6199 * The object is locked, and remains locked throughout the function,
6200 * even as we iterate down the shadow chain. Locks on intermediate objects
6201 * will be dropped, but not the original object.
6203 * NOTE: this function used to use recursion, rather than iteration.
6206 __private_extern__
void
6207 vm_object_res_reference(
6210 vm_object_t orig_object
= object
;
6212 * Object is locked, so this can be called directly
6213 * from vm_object_reference. This lock is never released.
6215 while ((++object
->res_count
== 1) &&
6216 (object
->shadow
!= VM_OBJECT_NULL
)) {
6217 vm_object_t tmp_object
= object
->shadow
;
6219 assert(object
->ref_count
>= object
->res_count
);
6220 vm_object_lock(tmp_object
);
6221 if (object
!= orig_object
) {
6222 vm_object_unlock(object
);
6224 object
= tmp_object
;
6226 if (object
!= orig_object
) {
6227 vm_object_unlock(object
);
6229 assert(orig_object
->ref_count
>= orig_object
->res_count
);
6231 #endif /* TASK_SWAPPER */
6234 * vm_object_reference:
6236 * Gets another reference to the given object.
6238 #ifdef vm_object_reference
6239 #undef vm_object_reference
6241 __private_extern__
void
6242 vm_object_reference(
6245 if (object
== VM_OBJECT_NULL
) {
6249 vm_object_lock(object
);
6250 assert(object
->ref_count
> 0);
6251 vm_object_reference_locked(object
);
6252 vm_object_unlock(object
);
6256 * vm_object_transpose
6258 * This routine takes two VM objects of the same size and exchanges
6259 * their backing store.
6260 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6261 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6263 * The VM objects must not be locked by caller.
6265 unsigned int vm_object_transpose_count
= 0;
6267 vm_object_transpose(
6268 vm_object_t object1
,
6269 vm_object_t object2
,
6270 vm_object_size_t transpose_size
)
6272 vm_object_t tmp_object
;
6273 kern_return_t retval
;
6274 boolean_t object1_locked
, object2_locked
;
6276 vm_object_offset_t page_offset
;
6278 tmp_object
= VM_OBJECT_NULL
;
6279 object1_locked
= FALSE
; object2_locked
= FALSE
;
6281 if (object1
== object2
||
6282 object1
== VM_OBJECT_NULL
||
6283 object2
== VM_OBJECT_NULL
) {
6285 * If the 2 VM objects are the same, there's
6286 * no point in exchanging their backing store.
6288 retval
= KERN_INVALID_VALUE
;
6293 * Since we need to lock both objects at the same time,
6294 * make sure we always lock them in the same order to
6297 if (object1
> object2
) {
6298 tmp_object
= object1
;
6300 object2
= tmp_object
;
6304 * Allocate a temporary VM object to hold object1's contents
6305 * while we copy object2 to object1.
6307 tmp_object
= vm_object_allocate(transpose_size
);
6308 vm_object_lock(tmp_object
);
6309 tmp_object
->can_persist
= FALSE
;
6313 * Grab control of the 1st VM object.
6315 vm_object_lock(object1
);
6316 object1_locked
= TRUE
;
6317 if (!object1
->alive
|| object1
->terminating
||
6318 object1
->copy
|| object1
->shadow
|| object1
->shadowed
||
6319 object1
->purgable
!= VM_PURGABLE_DENY
) {
6321 * We don't deal with copy or shadow objects (yet).
6323 retval
= KERN_INVALID_VALUE
;
6327 * We're about to mess with the object's backing store and
6328 * taking a "paging_in_progress" reference wouldn't be enough
6329 * to prevent any paging activity on this object, so the caller should
6330 * have "quiesced" the objects beforehand, via a UPL operation with
6331 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6332 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6334 * Wait for any paging operation to complete (but only paging, not
6335 * other kind of activities not linked to the pager). After we're
6336 * statisfied that there's no more paging in progress, we keep the
6337 * object locked, to guarantee that no one tries to access its pager.
6339 vm_object_paging_only_wait(object1
, THREAD_UNINT
);
6342 * Same as above for the 2nd object...
6344 vm_object_lock(object2
);
6345 object2_locked
= TRUE
;
6346 if (!object2
->alive
|| object2
->terminating
||
6347 object2
->copy
|| object2
->shadow
|| object2
->shadowed
||
6348 object2
->purgable
!= VM_PURGABLE_DENY
) {
6349 retval
= KERN_INVALID_VALUE
;
6352 vm_object_paging_only_wait(object2
, THREAD_UNINT
);
6355 if (object1
->vo_size
!= object2
->vo_size
||
6356 object1
->vo_size
!= transpose_size
) {
6358 * If the 2 objects don't have the same size, we can't
6359 * exchange their backing stores or one would overflow.
6360 * If their size doesn't match the caller's
6361 * "transpose_size", we can't do it either because the
6362 * transpose operation will affect the entire span of
6365 retval
= KERN_INVALID_VALUE
;
6371 * Transpose the lists of resident pages.
6372 * This also updates the resident_page_count and the memq_hint.
6374 if (object1
->phys_contiguous
|| vm_page_queue_empty(&object1
->memq
)) {
6376 * No pages in object1, just transfer pages
6377 * from object2 to object1. No need to go through
6378 * an intermediate object.
6380 while (!vm_page_queue_empty(&object2
->memq
)) {
6381 page
= (vm_page_t
) vm_page_queue_first(&object2
->memq
);
6382 vm_page_rename(page
, object1
, page
->vmp_offset
);
6384 assert(vm_page_queue_empty(&object2
->memq
));
6385 } else if (object2
->phys_contiguous
|| vm_page_queue_empty(&object2
->memq
)) {
6387 * No pages in object2, just transfer pages
6388 * from object1 to object2. No need to go through
6389 * an intermediate object.
6391 while (!vm_page_queue_empty(&object1
->memq
)) {
6392 page
= (vm_page_t
) vm_page_queue_first(&object1
->memq
);
6393 vm_page_rename(page
, object2
, page
->vmp_offset
);
6395 assert(vm_page_queue_empty(&object1
->memq
));
6397 /* transfer object1's pages to tmp_object */
6398 while (!vm_page_queue_empty(&object1
->memq
)) {
6399 page
= (vm_page_t
) vm_page_queue_first(&object1
->memq
);
6400 page_offset
= page
->vmp_offset
;
6401 vm_page_remove(page
, TRUE
);
6402 page
->vmp_offset
= page_offset
;
6403 vm_page_queue_enter(&tmp_object
->memq
, page
, vmp_listq
);
6405 assert(vm_page_queue_empty(&object1
->memq
));
6406 /* transfer object2's pages to object1 */
6407 while (!vm_page_queue_empty(&object2
->memq
)) {
6408 page
= (vm_page_t
) vm_page_queue_first(&object2
->memq
);
6409 vm_page_rename(page
, object1
, page
->vmp_offset
);
6411 assert(vm_page_queue_empty(&object2
->memq
));
6412 /* transfer tmp_object's pages to object2 */
6413 while (!vm_page_queue_empty(&tmp_object
->memq
)) {
6414 page
= (vm_page_t
) vm_page_queue_first(&tmp_object
->memq
);
6415 vm_page_queue_remove(&tmp_object
->memq
, page
, vmp_listq
);
6416 vm_page_insert(page
, object2
, page
->vmp_offset
);
6418 assert(vm_page_queue_empty(&tmp_object
->memq
));
6421 #define __TRANSPOSE_FIELD(field) \
6423 tmp_object->field = object1->field; \
6424 object1->field = object2->field; \
6425 object2->field = tmp_object->field; \
6428 /* "Lock" refers to the object not its contents */
6429 /* "size" should be identical */
6430 assert(object1
->vo_size
== object2
->vo_size
);
6431 /* "memq_hint" was updated above when transposing pages */
6432 /* "ref_count" refers to the object not its contents */
6433 assert(object1
->ref_count
>= 1);
6434 assert(object2
->ref_count
>= 1);
6436 /* "res_count" refers to the object not its contents */
6438 /* "resident_page_count" was updated above when transposing pages */
6439 /* "wired_page_count" was updated above when transposing pages */
6440 #if !VM_TAG_ACTIVE_UPDATE
6441 /* "wired_objq" was dealt with along with "wired_page_count" */
6442 #endif /* ! VM_TAG_ACTIVE_UPDATE */
6443 /* "reusable_page_count" was updated above when transposing pages */
6444 /* there should be no "copy" */
6445 assert(!object1
->copy
);
6446 assert(!object2
->copy
);
6447 /* there should be no "shadow" */
6448 assert(!object1
->shadow
);
6449 assert(!object2
->shadow
);
6450 __TRANSPOSE_FIELD(vo_shadow_offset
); /* used by phys_contiguous objects */
6451 __TRANSPOSE_FIELD(pager
);
6452 __TRANSPOSE_FIELD(paging_offset
);
6453 __TRANSPOSE_FIELD(pager_control
);
6454 /* update the memory_objects' pointers back to the VM objects */
6455 if (object1
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
6456 memory_object_control_collapse(object1
->pager_control
,
6459 if (object2
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
6460 memory_object_control_collapse(object2
->pager_control
,
6463 __TRANSPOSE_FIELD(copy_strategy
);
6464 /* "paging_in_progress" refers to the object not its contents */
6465 assert(!object1
->paging_in_progress
);
6466 assert(!object2
->paging_in_progress
);
6467 assert(object1
->activity_in_progress
);
6468 assert(object2
->activity_in_progress
);
6469 /* "all_wanted" refers to the object not its contents */
6470 __TRANSPOSE_FIELD(pager_created
);
6471 __TRANSPOSE_FIELD(pager_initialized
);
6472 __TRANSPOSE_FIELD(pager_ready
);
6473 __TRANSPOSE_FIELD(pager_trusted
);
6474 __TRANSPOSE_FIELD(can_persist
);
6475 __TRANSPOSE_FIELD(internal
);
6476 __TRANSPOSE_FIELD(private);
6477 __TRANSPOSE_FIELD(pageout
);
6478 /* "alive" should be set */
6479 assert(object1
->alive
);
6480 assert(object2
->alive
);
6481 /* "purgeable" should be non-purgeable */
6482 assert(object1
->purgable
== VM_PURGABLE_DENY
);
6483 assert(object2
->purgable
== VM_PURGABLE_DENY
);
6484 /* "shadowed" refers to the the object not its contents */
6485 __TRANSPOSE_FIELD(purgeable_when_ripe
);
6486 __TRANSPOSE_FIELD(true_share
);
6487 /* "terminating" should not be set */
6488 assert(!object1
->terminating
);
6489 assert(!object2
->terminating
);
6490 /* transfer "named" reference if needed */
6491 if (object1
->named
&& !object2
->named
) {
6492 assert(object1
->ref_count
>= 2);
6493 assert(object2
->ref_count
>= 1);
6494 object1
->ref_count
--;
6495 object2
->ref_count
++;
6496 } else if (!object1
->named
&& object2
->named
) {
6497 assert(object1
->ref_count
>= 1);
6498 assert(object2
->ref_count
>= 2);
6499 object1
->ref_count
++;
6500 object2
->ref_count
--;
6502 __TRANSPOSE_FIELD(named
);
6503 /* "shadow_severed" refers to the object not its contents */
6504 __TRANSPOSE_FIELD(phys_contiguous
);
6505 __TRANSPOSE_FIELD(nophyscache
);
6506 /* "cached_list.next" points to transposed object */
6507 object1
->cached_list
.next
= (queue_entry_t
) object2
;
6508 object2
->cached_list
.next
= (queue_entry_t
) object1
;
6509 /* "cached_list.prev" should be NULL */
6510 assert(object1
->cached_list
.prev
== NULL
);
6511 assert(object2
->cached_list
.prev
== NULL
);
6512 __TRANSPOSE_FIELD(last_alloc
);
6513 __TRANSPOSE_FIELD(sequential
);
6514 __TRANSPOSE_FIELD(pages_created
);
6515 __TRANSPOSE_FIELD(pages_used
);
6516 __TRANSPOSE_FIELD(scan_collisions
);
6517 __TRANSPOSE_FIELD(cow_hint
);
6518 __TRANSPOSE_FIELD(wimg_bits
);
6519 __TRANSPOSE_FIELD(set_cache_attr
);
6520 __TRANSPOSE_FIELD(code_signed
);
6521 object1
->transposed
= TRUE
;
6522 object2
->transposed
= TRUE
;
6523 __TRANSPOSE_FIELD(mapping_in_progress
);
6524 __TRANSPOSE_FIELD(volatile_empty
);
6525 __TRANSPOSE_FIELD(volatile_fault
);
6526 __TRANSPOSE_FIELD(all_reusable
);
6527 assert(object1
->blocked_access
);
6528 assert(object2
->blocked_access
);
6529 __TRANSPOSE_FIELD(set_cache_attr
);
6530 assert(!object1
->object_is_shared_cache
);
6531 assert(!object2
->object_is_shared_cache
);
6532 /* ignore purgeable_queue_type and purgeable_queue_group */
6533 assert(!object1
->io_tracking
);
6534 assert(!object2
->io_tracking
);
6535 #if VM_OBJECT_ACCESS_TRACKING
6536 assert(!object1
->access_tracking
);
6537 assert(!object2
->access_tracking
);
6538 #endif /* VM_OBJECT_ACCESS_TRACKING */
6539 __TRANSPOSE_FIELD(no_tag_update
);
6540 #if CONFIG_SECLUDED_MEMORY
6541 assert(!object1
->eligible_for_secluded
);
6542 assert(!object2
->eligible_for_secluded
);
6543 assert(!object1
->can_grab_secluded
);
6544 assert(!object2
->can_grab_secluded
);
6545 #else /* CONFIG_SECLUDED_MEMORY */
6546 assert(object1
->__object3_unused_bits
== 0);
6547 assert(object2
->__object3_unused_bits
== 0);
6548 #endif /* CONFIG_SECLUDED_MEMORY */
6550 /* "uplq" refers to the object not its contents (see upl_transpose()) */
6552 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.next
== NULL
));
6553 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.prev
== NULL
));
6554 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.next
== NULL
));
6555 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.prev
== NULL
));
6557 #undef __TRANSPOSE_FIELD
6559 retval
= KERN_SUCCESS
;
6565 if (tmp_object
!= VM_OBJECT_NULL
) {
6566 vm_object_unlock(tmp_object
);
6568 * Re-initialize the temporary object to avoid
6569 * deallocating a real pager.
6571 _vm_object_allocate(transpose_size
, tmp_object
);
6572 vm_object_deallocate(tmp_object
);
6573 tmp_object
= VM_OBJECT_NULL
;
6576 if (object1_locked
) {
6577 vm_object_unlock(object1
);
6578 object1_locked
= FALSE
;
6580 if (object2_locked
) {
6581 vm_object_unlock(object2
);
6582 object2_locked
= FALSE
;
6585 vm_object_transpose_count
++;
6592 * vm_object_cluster_size
6594 * Determine how big a cluster we should issue an I/O for...
6596 * Inputs: *start == offset of page needed
6597 * *length == maximum cluster pager can handle
6598 * Outputs: *start == beginning offset of cluster
6599 * *length == length of cluster to try
6601 * The original *start will be encompassed by the cluster
6604 extern int speculative_reads_disabled
;
6607 * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
6608 * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
6609 * always be page-aligned. The derivation could involve operations (e.g. division)
6610 * that could give us non-page-size aligned values if we start out with values that
6611 * are odd multiples of PAGE_SIZE.
6614 unsigned int preheat_max_bytes
= (1024 * 512);
6615 #else /* CONFIG_EMBEDDED */
6616 unsigned int preheat_max_bytes
= MAX_UPL_TRANSFER_BYTES
;
6617 #endif /* CONFIG_EMBEDDED */
6618 unsigned int preheat_min_bytes
= (1024 * 32);
6621 __private_extern__
void
6622 vm_object_cluster_size(vm_object_t object
, vm_object_offset_t
*start
,
6623 vm_size_t
*length
, vm_object_fault_info_t fault_info
, uint32_t *io_streaming
)
6625 vm_size_t pre_heat_size
;
6626 vm_size_t tail_size
;
6627 vm_size_t head_size
;
6628 vm_size_t max_length
;
6629 vm_size_t cluster_size
;
6630 vm_object_offset_t object_size
;
6631 vm_object_offset_t orig_start
;
6632 vm_object_offset_t target_start
;
6633 vm_object_offset_t offset
;
6634 vm_behavior_t behavior
;
6635 boolean_t look_behind
= TRUE
;
6636 boolean_t look_ahead
= TRUE
;
6637 boolean_t isSSD
= FALSE
;
6638 uint32_t throttle_limit
;
6640 int sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
6641 vm_size_t max_ph_size
;
6642 vm_size_t min_ph_size
;
6644 assert( !(*length
& PAGE_MASK
));
6645 assert( !(*start
& PAGE_MASK_64
));
6648 * remember maxiumum length of run requested
6650 max_length
= *length
;
6652 * we'll always return a cluster size of at least
6653 * 1 page, since the original fault must always
6656 *length
= PAGE_SIZE
;
6659 if (speculative_reads_disabled
|| fault_info
== NULL
) {
6661 * no cluster... just fault the page in
6665 orig_start
= *start
;
6666 target_start
= orig_start
;
6667 cluster_size
= round_page(fault_info
->cluster_size
);
6668 behavior
= fault_info
->behavior
;
6670 vm_object_lock(object
);
6672 if (object
->pager
== MEMORY_OBJECT_NULL
) {
6673 goto out
; /* pager is gone for this object, nothing more to do */
6675 vnode_pager_get_isSSD(object
->pager
, &isSSD
);
6677 min_ph_size
= round_page(preheat_min_bytes
);
6678 max_ph_size
= round_page(preheat_max_bytes
);
6680 #if !CONFIG_EMBEDDED
6685 if (min_ph_size
& PAGE_MASK_64
) {
6686 min_ph_size
= trunc_page(min_ph_size
);
6689 if (max_ph_size
& PAGE_MASK_64
) {
6690 max_ph_size
= trunc_page(max_ph_size
);
6693 #endif /* !CONFIG_EMBEDDED */
6695 if (min_ph_size
< PAGE_SIZE
) {
6696 min_ph_size
= PAGE_SIZE
;
6699 if (max_ph_size
< PAGE_SIZE
) {
6700 max_ph_size
= PAGE_SIZE
;
6701 } else if (max_ph_size
> MAX_UPL_TRANSFER_BYTES
) {
6702 max_ph_size
= MAX_UPL_TRANSFER_BYTES
;
6705 if (max_length
> max_ph_size
) {
6706 max_length
= max_ph_size
;
6709 if (max_length
<= PAGE_SIZE
) {
6713 if (object
->internal
) {
6714 object_size
= object
->vo_size
;
6716 vnode_pager_get_object_size(object
->pager
, &object_size
);
6719 object_size
= round_page_64(object_size
);
6721 if (orig_start
>= object_size
) {
6723 * fault occurred beyond the EOF...
6724 * we need to punt w/o changing the
6729 if (object
->pages_used
> object
->pages_created
) {
6731 * must have wrapped our 32 bit counters
6734 object
->pages_used
= object
->pages_created
= 0;
6736 if ((sequential_run
= object
->sequential
)) {
6737 if (sequential_run
< 0) {
6738 sequential_behavior
= VM_BEHAVIOR_RSEQNTL
;
6739 sequential_run
= 0 - sequential_run
;
6741 sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
6746 behavior
= VM_BEHAVIOR_DEFAULT
;
6748 case VM_BEHAVIOR_DEFAULT
:
6749 if (object
->internal
&& fault_info
->user_tag
== VM_MEMORY_STACK
) {
6753 if (sequential_run
>= (3 * PAGE_SIZE
)) {
6754 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
6756 if (sequential_behavior
== VM_BEHAVIOR_SEQUENTIAL
) {
6757 look_behind
= FALSE
;
6764 if (object
->pages_created
< (20 * (min_ph_size
>> PAGE_SHIFT
))) {
6768 pre_heat_size
= min_ph_size
;
6771 * Linear growth in PH size: The maximum size is max_length...
6772 * this cacluation will result in a size that is neither a
6773 * power of 2 nor a multiple of PAGE_SIZE... so round
6774 * it up to the nearest PAGE_SIZE boundary
6776 pre_heat_size
= (max_length
* (uint64_t)object
->pages_used
) / object
->pages_created
;
6778 if (pre_heat_size
< min_ph_size
) {
6779 pre_heat_size
= min_ph_size
;
6781 pre_heat_size
= round_page(pre_heat_size
);
6787 case VM_BEHAVIOR_RANDOM
:
6788 if ((pre_heat_size
= cluster_size
) <= PAGE_SIZE
) {
6793 case VM_BEHAVIOR_SEQUENTIAL
:
6794 if ((pre_heat_size
= cluster_size
) == 0) {
6795 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
6797 look_behind
= FALSE
;
6802 case VM_BEHAVIOR_RSEQNTL
:
6803 if ((pre_heat_size
= cluster_size
) == 0) {
6804 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
6811 throttle_limit
= (uint32_t) max_length
;
6812 assert(throttle_limit
== max_length
);
6814 if (vnode_pager_get_throttle_io_limit(object
->pager
, &throttle_limit
) == KERN_SUCCESS
) {
6815 if (max_length
> throttle_limit
) {
6816 max_length
= throttle_limit
;
6819 if (pre_heat_size
> max_length
) {
6820 pre_heat_size
= max_length
;
6823 if (behavior
== VM_BEHAVIOR_DEFAULT
&& (pre_heat_size
> min_ph_size
)) {
6824 unsigned int consider_free
= vm_page_free_count
+ vm_page_cleaned_count
;
6826 if (consider_free
< vm_page_throttle_limit
) {
6827 pre_heat_size
= trunc_page(pre_heat_size
/ 16);
6828 } else if (consider_free
< vm_page_free_target
) {
6829 pre_heat_size
= trunc_page(pre_heat_size
/ 4);
6832 if (pre_heat_size
< min_ph_size
) {
6833 pre_heat_size
= min_ph_size
;
6836 if (look_ahead
== TRUE
) {
6837 if (look_behind
== TRUE
) {
6839 * if we get here its due to a random access...
6840 * so we want to center the original fault address
6841 * within the cluster we will issue... make sure
6842 * to calculate 'head_size' as a multiple of PAGE_SIZE...
6843 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
6844 * necessarily an even number of pages so we need to truncate
6845 * the result to a PAGE_SIZE boundary
6847 head_size
= trunc_page(pre_heat_size
/ 2);
6849 if (target_start
> head_size
) {
6850 target_start
-= head_size
;
6856 * 'target_start' at this point represents the beginning offset
6857 * of the cluster we are considering... 'orig_start' will be in
6858 * the center of this cluster if we didn't have to clip the start
6859 * due to running into the start of the file
6862 if ((target_start
+ pre_heat_size
) > object_size
) {
6863 pre_heat_size
= (vm_size_t
)(round_page_64(object_size
- target_start
));
6866 * at this point caclulate the number of pages beyond the original fault
6867 * address that we want to consider... this is guaranteed not to extend beyond
6868 * the current EOF...
6870 assert((vm_size_t
)(orig_start
- target_start
) == (orig_start
- target_start
));
6871 tail_size
= pre_heat_size
- (vm_size_t
)(orig_start
- target_start
) - PAGE_SIZE
;
6873 if (pre_heat_size
> target_start
) {
6875 * since pre_heat_size is always smaller then 2^32,
6876 * if it is larger then target_start (a 64 bit value)
6877 * it is safe to clip target_start to 32 bits
6879 pre_heat_size
= (vm_size_t
) target_start
;
6883 assert( !(target_start
& PAGE_MASK_64
));
6884 assert( !(pre_heat_size
& PAGE_MASK_64
));
6886 if (pre_heat_size
<= PAGE_SIZE
) {
6890 if (look_behind
== TRUE
) {
6892 * take a look at the pages before the original
6893 * faulting offset... recalculate this in case
6894 * we had to clip 'pre_heat_size' above to keep
6895 * from running past the EOF.
6897 head_size
= pre_heat_size
- tail_size
- PAGE_SIZE
;
6899 for (offset
= orig_start
- PAGE_SIZE_64
; head_size
; offset
-= PAGE_SIZE_64
, head_size
-= PAGE_SIZE
) {
6901 * don't poke below the lowest offset
6903 if (offset
< fault_info
->lo_offset
) {
6907 * for external objects or internal objects w/o a pager,
6908 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6910 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
6913 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
6915 * don't bridge resident pages
6920 *length
+= PAGE_SIZE
;
6923 if (look_ahead
== TRUE
) {
6924 for (offset
= orig_start
+ PAGE_SIZE_64
; tail_size
; offset
+= PAGE_SIZE_64
, tail_size
-= PAGE_SIZE
) {
6926 * don't poke above the highest offset
6928 if (offset
>= fault_info
->hi_offset
) {
6931 assert(offset
< object_size
);
6934 * for external objects or internal objects w/o a pager,
6935 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6937 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
6940 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
6942 * don't bridge resident pages
6946 *length
+= PAGE_SIZE
;
6950 if (*length
> max_length
) {
6951 *length
= max_length
;
6954 vm_object_unlock(object
);
6956 DTRACE_VM1(clustersize
, vm_size_t
, *length
);
6961 * Allow manipulation of individual page state. This is actually part of
6962 * the UPL regimen but takes place on the VM object rather than on a UPL
6968 vm_object_offset_t offset
,
6970 ppnum_t
*phys_entry
,
6975 vm_object_lock(object
);
6977 if (ops
& UPL_POP_PHYSICAL
) {
6978 if (object
->phys_contiguous
) {
6980 *phys_entry
= (ppnum_t
)
6981 (object
->vo_shadow_offset
>> PAGE_SHIFT
);
6983 vm_object_unlock(object
);
6984 return KERN_SUCCESS
;
6986 vm_object_unlock(object
);
6987 return KERN_INVALID_OBJECT
;
6990 if (object
->phys_contiguous
) {
6991 vm_object_unlock(object
);
6992 return KERN_INVALID_OBJECT
;
6996 if ((dst_page
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
) {
6997 vm_object_unlock(object
);
6998 return KERN_FAILURE
;
7001 /* Sync up on getting the busy bit */
7002 if ((dst_page
->vmp_busy
|| dst_page
->vmp_cleaning
) &&
7003 (((ops
& UPL_POP_SET
) &&
7004 (ops
& UPL_POP_BUSY
)) || (ops
& UPL_POP_DUMP
))) {
7005 /* someone else is playing with the page, we will */
7007 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
7011 if (ops
& UPL_POP_DUMP
) {
7012 if (dst_page
->vmp_pmapped
== TRUE
) {
7013 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
7016 VM_PAGE_FREE(dst_page
);
7023 /* Get the condition of flags before requested ops */
7024 /* are undertaken */
7026 if (dst_page
->vmp_dirty
) {
7027 *flags
|= UPL_POP_DIRTY
;
7029 if (dst_page
->vmp_free_when_done
) {
7030 *flags
|= UPL_POP_PAGEOUT
;
7032 if (dst_page
->vmp_precious
) {
7033 *flags
|= UPL_POP_PRECIOUS
;
7035 if (dst_page
->vmp_absent
) {
7036 *flags
|= UPL_POP_ABSENT
;
7038 if (dst_page
->vmp_busy
) {
7039 *flags
|= UPL_POP_BUSY
;
7043 /* The caller should have made a call either contingent with */
7044 /* or prior to this call to set UPL_POP_BUSY */
7045 if (ops
& UPL_POP_SET
) {
7046 /* The protection granted with this assert will */
7047 /* not be complete. If the caller violates the */
7048 /* convention and attempts to change page state */
7049 /* without first setting busy we may not see it */
7050 /* because the page may already be busy. However */
7051 /* if such violations occur we will assert sooner */
7053 assert(dst_page
->vmp_busy
|| (ops
& UPL_POP_BUSY
));
7054 if (ops
& UPL_POP_DIRTY
) {
7055 SET_PAGE_DIRTY(dst_page
, FALSE
);
7057 if (ops
& UPL_POP_PAGEOUT
) {
7058 dst_page
->vmp_free_when_done
= TRUE
;
7060 if (ops
& UPL_POP_PRECIOUS
) {
7061 dst_page
->vmp_precious
= TRUE
;
7063 if (ops
& UPL_POP_ABSENT
) {
7064 dst_page
->vmp_absent
= TRUE
;
7066 if (ops
& UPL_POP_BUSY
) {
7067 dst_page
->vmp_busy
= TRUE
;
7071 if (ops
& UPL_POP_CLR
) {
7072 assert(dst_page
->vmp_busy
);
7073 if (ops
& UPL_POP_DIRTY
) {
7074 dst_page
->vmp_dirty
= FALSE
;
7076 if (ops
& UPL_POP_PAGEOUT
) {
7077 dst_page
->vmp_free_when_done
= FALSE
;
7079 if (ops
& UPL_POP_PRECIOUS
) {
7080 dst_page
->vmp_precious
= FALSE
;
7082 if (ops
& UPL_POP_ABSENT
) {
7083 dst_page
->vmp_absent
= FALSE
;
7085 if (ops
& UPL_POP_BUSY
) {
7086 dst_page
->vmp_busy
= FALSE
;
7087 PAGE_WAKEUP(dst_page
);
7092 * The physical page number will remain valid
7093 * only if the page is kept busy.
7095 assert(dst_page
->vmp_busy
);
7096 *phys_entry
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
7102 vm_object_unlock(object
);
7103 return KERN_SUCCESS
;
7107 * vm_object_range_op offers performance enhancement over
7108 * vm_object_page_op for page_op functions which do not require page
7109 * level state to be returned from the call. Page_op was created to provide
7110 * a low-cost alternative to page manipulation via UPLs when only a single
7111 * page was involved. The range_op call establishes the ability in the _op
7112 * family of functions to work on multiple pages where the lack of page level
7113 * state handling allows the caller to avoid the overhead of the upl structures.
7119 vm_object_offset_t offset_beg
,
7120 vm_object_offset_t offset_end
,
7124 vm_object_offset_t offset
;
7127 if (offset_end
- offset_beg
> (uint32_t) -1) {
7128 /* range is too big and would overflow "*range" */
7129 return KERN_INVALID_ARGUMENT
;
7131 if (object
->resident_page_count
== 0) {
7133 if (ops
& UPL_ROP_PRESENT
) {
7136 *range
= (uint32_t) (offset_end
- offset_beg
);
7137 assert(*range
== (offset_end
- offset_beg
));
7140 return KERN_SUCCESS
;
7142 vm_object_lock(object
);
7144 if (object
->phys_contiguous
) {
7145 vm_object_unlock(object
);
7146 return KERN_INVALID_OBJECT
;
7149 offset
= offset_beg
& ~PAGE_MASK_64
;
7151 while (offset
< offset_end
) {
7152 dst_page
= vm_page_lookup(object
, offset
);
7153 if (dst_page
!= VM_PAGE_NULL
) {
7154 if (ops
& UPL_ROP_DUMP
) {
7155 if (dst_page
->vmp_busy
|| dst_page
->vmp_cleaning
) {
7157 * someone else is playing with the
7158 * page, we will have to wait
7160 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
7162 * need to relook the page up since it's
7163 * state may have changed while we slept
7164 * it might even belong to a different object
7169 if (dst_page
->vmp_laundry
) {
7170 vm_pageout_steal_laundry(dst_page
, FALSE
);
7173 if (dst_page
->vmp_pmapped
== TRUE
) {
7174 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
7177 VM_PAGE_FREE(dst_page
);
7178 } else if ((ops
& UPL_ROP_ABSENT
)
7179 && (!dst_page
->vmp_absent
|| dst_page
->vmp_busy
)) {
7182 } else if (ops
& UPL_ROP_PRESENT
) {
7186 offset
+= PAGE_SIZE
;
7188 vm_object_unlock(object
);
7191 if (offset
> offset_end
) {
7192 offset
= offset_end
;
7194 if (offset
> offset_beg
) {
7195 *range
= (uint32_t) (offset
- offset_beg
);
7196 assert(*range
== (offset
- offset_beg
));
7201 return KERN_SUCCESS
;
7205 * Used to point a pager directly to a range of memory (when the pager may be associated
7206 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
7207 * expect that the virtual address will denote the start of a range that is physically contiguous.
7210 pager_map_to_phys_contiguous(
7211 memory_object_control_t object
,
7212 memory_object_offset_t offset
,
7213 addr64_t base_vaddr
,
7217 boolean_t clobbered_private
;
7218 kern_return_t retval
;
7219 vm_object_t pager_object
;
7221 page_num
= pmap_find_phys(kernel_pmap
, base_vaddr
);
7224 retval
= KERN_FAILURE
;
7228 pager_object
= memory_object_control_to_vm_object(object
);
7230 if (!pager_object
) {
7231 retval
= KERN_FAILURE
;
7235 clobbered_private
= pager_object
->private;
7236 if (pager_object
->private != TRUE
) {
7237 vm_object_lock(pager_object
);
7238 pager_object
->private = TRUE
;
7239 vm_object_unlock(pager_object
);
7241 retval
= vm_object_populate_with_private(pager_object
, offset
, page_num
, size
);
7243 if (retval
!= KERN_SUCCESS
) {
7244 if (pager_object
->private != clobbered_private
) {
7245 vm_object_lock(pager_object
);
7246 pager_object
->private = clobbered_private
;
7247 vm_object_unlock(pager_object
);
7255 uint32_t scan_object_collision
= 0;
7258 vm_object_lock(vm_object_t object
)
7260 if (object
== vm_pageout_scan_wants_object
) {
7261 scan_object_collision
++;
7264 DTRACE_VM(vm_object_lock_w
);
7265 lck_rw_lock_exclusive(&object
->Lock
);
7266 #if DEVELOPMENT || DEBUG
7267 object
->Lock_owner
= current_thread();
7272 vm_object_lock_avoid(vm_object_t object
)
7274 if (object
== vm_pageout_scan_wants_object
) {
7275 scan_object_collision
++;
7282 _vm_object_lock_try(vm_object_t object
)
7286 retval
= lck_rw_try_lock_exclusive(&object
->Lock
);
7287 #if DEVELOPMENT || DEBUG
7288 if (retval
== TRUE
) {
7289 DTRACE_VM(vm_object_lock_w
);
7290 object
->Lock_owner
= current_thread();
7297 vm_object_lock_try(vm_object_t object
)
7300 * Called from hibernate path so check before blocking.
7302 if (vm_object_lock_avoid(object
) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
7305 return _vm_object_lock_try(object
);
7309 vm_object_lock_shared(vm_object_t object
)
7311 if (vm_object_lock_avoid(object
)) {
7314 DTRACE_VM(vm_object_lock_r
);
7315 lck_rw_lock_shared(&object
->Lock
);
7319 vm_object_lock_yield_shared(vm_object_t object
)
7321 boolean_t retval
= FALSE
, force_yield
= FALSE
;;
7323 vm_object_lock_assert_shared(object
);
7325 force_yield
= vm_object_lock_avoid(object
);
7327 retval
= lck_rw_lock_yield_shared(&object
->Lock
, force_yield
);
7329 DTRACE_VM(vm_object_lock_yield
);
7336 vm_object_lock_try_shared(vm_object_t object
)
7340 if (vm_object_lock_avoid(object
)) {
7343 retval
= lck_rw_try_lock_shared(&object
->Lock
);
7345 DTRACE_VM(vm_object_lock_r
);
7351 vm_object_lock_upgrade(vm_object_t object
)
7355 retval
= lck_rw_lock_shared_to_exclusive(&object
->Lock
);
7356 #if DEVELOPMENT || DEBUG
7357 if (retval
== TRUE
) {
7358 DTRACE_VM(vm_object_lock_w
);
7359 object
->Lock_owner
= current_thread();
7366 vm_object_unlock(vm_object_t object
)
7368 #if DEVELOPMENT || DEBUG
7369 if (object
->Lock_owner
) {
7370 if (object
->Lock_owner
!= current_thread()) {
7371 panic("vm_object_unlock: not owner - %p\n", object
);
7373 object
->Lock_owner
= 0;
7374 DTRACE_VM(vm_object_unlock
);
7377 lck_rw_done(&object
->Lock
);
7381 unsigned int vm_object_change_wimg_mode_count
= 0;
7384 * The object must be locked
7387 vm_object_change_wimg_mode(vm_object_t object
, unsigned int wimg_mode
)
7391 vm_object_lock_assert_exclusive(object
);
7393 vm_object_paging_wait(object
, THREAD_UNINT
);
7395 vm_page_queue_iterate(&object
->memq
, p
, vmp_listq
) {
7396 if (!p
->vmp_fictitious
) {
7397 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p
), wimg_mode
);
7400 if (wimg_mode
== VM_WIMG_USE_DEFAULT
) {
7401 object
->set_cache_attr
= FALSE
;
7403 object
->set_cache_attr
= TRUE
;
7406 object
->wimg_bits
= wimg_mode
;
7408 vm_object_change_wimg_mode_count
++;
7414 * This routine does the "relocation" of previously
7415 * compressed pages belonging to this object that are
7416 * residing in a number of compressed segments into
7417 * a set of compressed segments dedicated to hold
7418 * compressed pages belonging to this object.
7421 extern void *freezer_chead
;
7422 extern char *freezer_compressor_scratch_buf
;
7423 extern int c_freezer_compression_count
;
7424 extern AbsoluteTime c_freezer_last_yield_ts
;
7426 #define MAX_FREE_BATCH 32
7427 #define FREEZER_DUTY_CYCLE_ON_MS 5
7428 #define FREEZER_DUTY_CYCLE_OFF_MS 5
7430 static int c_freezer_should_yield(void);
7434 c_freezer_should_yield()
7436 AbsoluteTime cur_time
;
7439 assert(c_freezer_last_yield_ts
);
7440 clock_get_uptime(&cur_time
);
7442 SUB_ABSOLUTETIME(&cur_time
, &c_freezer_last_yield_ts
);
7443 absolutetime_to_nanoseconds(cur_time
, &nsecs
);
7445 if (nsecs
> 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS
) {
7453 vm_object_compressed_freezer_done()
7455 vm_compressor_finished_filling(&freezer_chead
);
7460 vm_object_compressed_freezer_pageout(
7461 vm_object_t object
, uint32_t dirty_budget
)
7464 vm_page_t local_freeq
= NULL
;
7465 int local_freed
= 0;
7466 kern_return_t retval
= KERN_SUCCESS
;
7467 int obj_resident_page_count_snapshot
= 0;
7468 uint32_t paged_out_count
= 0;
7470 assert(object
!= VM_OBJECT_NULL
);
7471 assert(object
->internal
);
7473 vm_object_lock(object
);
7475 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7476 if (!object
->pager_initialized
) {
7477 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
7479 if (!object
->pager_initialized
) {
7480 vm_object_compressor_pager_create(object
);
7484 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7485 vm_object_unlock(object
);
7486 return paged_out_count
;
7490 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
7491 vm_object_offset_t curr_offset
= 0;
7494 * Go through the object and make sure that any
7495 * previously compressed pages are relocated into
7496 * a compressed segment associated with our "freezer_chead".
7498 while (curr_offset
< object
->vo_size
) {
7499 curr_offset
= vm_compressor_pager_next_compressed(object
->pager
, curr_offset
);
7501 if (curr_offset
== (vm_object_offset_t
) -1) {
7505 retval
= vm_compressor_pager_relocate(object
->pager
, curr_offset
, &freezer_chead
);
7507 if (retval
!= KERN_SUCCESS
) {
7511 curr_offset
+= PAGE_SIZE_64
;
7516 * We can't hold the object lock while heading down into the compressed pager
7517 * layer because we might need the kernel map lock down there to allocate new
7518 * compressor data structures. And if this same object is mapped in the kernel
7519 * and there's a fault on it, then that thread will want the object lock while
7520 * holding the kernel map lock.
7522 * Since we are going to drop/grab the object lock repeatedly, we must make sure
7523 * we won't be stuck in an infinite loop if the same page(s) keep getting
7524 * decompressed. So we grab a snapshot of the number of pages in the object and
7525 * we won't process any more than that number of pages.
7528 obj_resident_page_count_snapshot
= object
->resident_page_count
;
7530 vm_object_activity_begin(object
);
7532 while ((obj_resident_page_count_snapshot
--) && !vm_page_queue_empty(&object
->memq
) && paged_out_count
< dirty_budget
) {
7533 p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
7535 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START
, object
, local_freed
, 0, 0, 0);
7537 vm_page_lockspin_queues();
7539 if (p
->vmp_cleaning
|| p
->vmp_fictitious
|| p
->vmp_busy
|| p
->vmp_absent
|| p
->vmp_unusual
|| p
->vmp_error
|| VM_PAGE_WIRED(p
)) {
7540 vm_page_unlock_queues();
7542 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 1, 0, 0);
7544 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
7545 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
7550 if (p
->vmp_pmapped
== TRUE
) {
7551 int refmod_state
, pmap_flags
;
7553 if (p
->vmp_dirty
|| p
->vmp_precious
) {
7554 pmap_flags
= PMAP_OPTIONS_COMPRESSOR
;
7556 pmap_flags
= PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
7559 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
), pmap_flags
, NULL
);
7560 if (refmod_state
& VM_MEM_MODIFIED
) {
7561 SET_PAGE_DIRTY(p
, FALSE
);
7565 if (p
->vmp_dirty
== FALSE
&& p
->vmp_precious
== FALSE
) {
7567 * Clean and non-precious page.
7569 vm_page_unlock_queues();
7572 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 2, 0, 0);
7576 if (p
->vmp_laundry
) {
7577 vm_pageout_steal_laundry(p
, TRUE
);
7580 vm_page_queues_remove(p
, TRUE
);
7582 vm_page_unlock_queues();
7586 * In case the compressor fails to compress this page, we need it at
7587 * the back of the object memq so that we don't keep trying to process it.
7588 * Make the move here while we have the object lock held.
7591 vm_page_queue_remove(&object
->memq
, p
, vmp_listq
);
7592 vm_page_queue_enter(&object
->memq
, p
, vmp_listq
);
7595 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
7597 * Mark the page busy so no one messes with it while we have the object lock dropped.
7601 vm_object_activity_begin(object
);
7603 vm_object_unlock(object
);
7605 if (vm_pageout_compress_page(&freezer_chead
, freezer_compressor_scratch_buf
, p
) == KERN_SUCCESS
) {
7607 * page has already been un-tabled from the object via 'vm_page_remove'
7609 p
->vmp_snext
= local_freeq
;
7614 if (local_freed
>= MAX_FREE_BATCH
) {
7615 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
7617 vm_page_free_list(local_freeq
, TRUE
);
7622 c_freezer_compression_count
++;
7624 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 0, 0, 0);
7626 if (local_freed
== 0 && c_freezer_should_yield()) {
7627 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
7628 clock_get_uptime(&c_freezer_last_yield_ts
);
7631 vm_object_lock(object
);
7635 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
7637 vm_page_free_list(local_freeq
, TRUE
);
7643 vm_object_activity_end(object
);
7645 vm_object_unlock(object
);
7647 if (c_freezer_should_yield()) {
7648 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
7649 clock_get_uptime(&c_freezer_last_yield_ts
);
7651 return paged_out_count
;
7654 #endif /* CONFIG_FREEZE */
7662 struct vm_pageout_queue
*iq
;
7664 if (!VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
7668 iq
= &vm_pageout_queue_internal
;
7670 assert(object
!= VM_OBJECT_NULL
);
7672 vm_object_lock(object
);
7674 if (!object
->internal
||
7675 object
->terminating
||
7677 vm_object_unlock(object
);
7681 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7682 if (!object
->pager_initialized
) {
7683 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
7685 if (!object
->pager_initialized
) {
7686 vm_object_compressor_pager_create(object
);
7690 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7691 vm_object_unlock(object
);
7697 next
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
7699 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next
)) {
7701 next
= (vm_page_t
)vm_page_queue_next(&next
->vmp_listq
);
7703 assert(p
->vmp_q_state
!= VM_PAGE_ON_FREE_Q
);
7705 if ((p
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
) ||
7711 p
->vmp_fictitious
||
7714 * Page is already being cleaned or can't be cleaned.
7718 if (vm_compressor_low_on_space()) {
7722 /* Throw to the pageout queue */
7724 vm_page_lockspin_queues();
7726 if (VM_PAGE_Q_THROTTLED(iq
)) {
7727 iq
->pgo_draining
= TRUE
;
7729 assert_wait((event_t
) (&iq
->pgo_laundry
+ 1),
7730 THREAD_INTERRUPTIBLE
);
7731 vm_page_unlock_queues();
7732 vm_object_unlock(object
);
7734 thread_block(THREAD_CONTINUE_NULL
);
7736 vm_object_lock(object
);
7740 assert(!p
->vmp_fictitious
);
7741 assert(!p
->vmp_busy
);
7742 assert(!p
->vmp_absent
);
7743 assert(!p
->vmp_unusual
);
7744 assert(!p
->vmp_error
);
7745 assert(!VM_PAGE_WIRED(p
));
7746 assert(!p
->vmp_cleaning
);
7748 if (p
->vmp_pmapped
== TRUE
) {
7753 * Tell pmap the page should be accounted
7754 * for as "compressed" if it's been modified.
7757 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
7758 if (p
->vmp_dirty
|| p
->vmp_precious
) {
7760 * We already know it's been modified,
7761 * so tell pmap to account for it
7764 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
7766 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
),
7769 if (refmod_state
& VM_MEM_MODIFIED
) {
7770 SET_PAGE_DIRTY(p
, FALSE
);
7774 if (!p
->vmp_dirty
&& !p
->vmp_precious
) {
7775 vm_page_unlock_queues();
7779 vm_page_queues_remove(p
, TRUE
);
7781 vm_pageout_cluster(p
);
7783 vm_page_unlock_queues();
7785 vm_object_unlock(object
);
7791 vm_page_request_reprioritize(vm_object_t o
, uint64_t blkno
, uint32_t len
, int prio
)
7793 io_reprioritize_req_t req
;
7794 struct vnode
*devvp
= NULL
;
7796 if (vnode_pager_get_object_devvp(o
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
) {
7801 * Create the request for I/O reprioritization.
7802 * We use the noblock variant of zalloc because we're holding the object
7803 * lock here and we could cause a deadlock in low memory conditions.
7805 req
= (io_reprioritize_req_t
)zalloc_noblock(io_reprioritize_req_zone
);
7811 req
->priority
= prio
;
7814 /* Insert request into the reprioritization list */
7815 IO_REPRIORITIZE_LIST_LOCK();
7816 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
7817 IO_REPRIORITIZE_LIST_UNLOCK();
7819 /* Wakeup reprioritize thread */
7820 IO_REPRIO_THREAD_WAKEUP();
7826 vm_decmp_upl_reprioritize(upl_t upl
, int prio
)
7830 io_reprioritize_req_t req
;
7831 struct vnode
*devvp
= NULL
;
7835 uint64_t *io_upl_reprio_info
;
7838 if ((upl
->flags
& UPL_TRACKED_BY_OBJECT
) == 0 || (upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) {
7843 * We dont want to perform any allocations with the upl lock held since that might
7844 * result in a deadlock. If the system is low on memory, the pageout thread would
7845 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
7846 * be freed up by the pageout thread, it would be a deadlock.
7850 /* First step is just to get the size of the upl to find out how big the reprio info is */
7851 if (!upl_try_lock(upl
)) {
7855 if (upl
->decmp_io_upl
== NULL
) {
7856 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7861 io_upl
= upl
->decmp_io_upl
;
7862 assert((io_upl
->flags
& UPL_DECMP_REAL_IO
) != 0);
7863 io_upl_size
= io_upl
->size
;
7866 /* Now perform the allocation */
7867 io_upl_reprio_info
= (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
7868 if (io_upl_reprio_info
== NULL
) {
7872 /* Now again take the lock, recheck the state and grab out the required info */
7873 if (!upl_try_lock(upl
)) {
7877 if (upl
->decmp_io_upl
== NULL
|| upl
->decmp_io_upl
!= io_upl
) {
7878 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7882 memcpy(io_upl_reprio_info
, io_upl
->upl_reprio_info
, sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
7884 /* Get the VM object for this UPL */
7885 if (io_upl
->flags
& UPL_SHADOWED
) {
7886 object
= io_upl
->map_object
->shadow
;
7888 object
= io_upl
->map_object
;
7891 /* Get the dev vnode ptr for this object */
7892 if (!object
|| !object
->pager
||
7893 vnode_pager_get_object_devvp(object
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
) {
7900 /* Now we have all the information needed to do the expedite */
7903 while (offset
< io_upl_size
) {
7904 blkno
= io_upl_reprio_info
[(offset
/ PAGE_SIZE
)] & UPL_REPRIO_INFO_MASK
;
7905 len
= (io_upl_reprio_info
[(offset
/ PAGE_SIZE
)] >> UPL_REPRIO_INFO_SHIFT
) & UPL_REPRIO_INFO_MASK
;
7908 * This implementation may cause some spurious expedites due to the
7909 * fact that we dont cleanup the blkno & len from the upl_reprio_info
7910 * even after the I/O is complete.
7913 if (blkno
!= 0 && len
!= 0) {
7914 /* Create the request for I/O reprioritization */
7915 req
= (io_reprioritize_req_t
)zalloc(io_reprioritize_req_zone
);
7916 assert(req
!= NULL
);
7919 req
->priority
= prio
;
7922 /* Insert request into the reprioritization list */
7923 IO_REPRIORITIZE_LIST_LOCK();
7924 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
7925 IO_REPRIORITIZE_LIST_UNLOCK();
7929 offset
+= PAGE_SIZE
;
7933 /* Wakeup reprioritize thread */
7934 IO_REPRIO_THREAD_WAKEUP();
7937 kfree(io_upl_reprio_info
, sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
7942 vm_page_handle_prio_inversion(vm_object_t o
, vm_page_t m
)
7945 upl_page_info_t
*pl
;
7946 unsigned int i
, num_pages
;
7949 cur_tier
= proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
);
7952 * Scan through all UPLs associated with the object to find the
7953 * UPL containing the contended page.
7955 queue_iterate(&o
->uplq
, upl
, upl_t
, uplq
) {
7956 if (((upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) || upl
->upl_priority
<= cur_tier
) {
7959 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
7960 num_pages
= (upl
->size
/ PAGE_SIZE
);
7963 * For each page in the UPL page list, see if it matches the contended
7964 * page and was issued as a low prio I/O.
7966 for (i
= 0; i
< num_pages
; i
++) {
7967 if (UPL_PAGE_PRESENT(pl
, i
) && VM_PAGE_GET_PHYS_PAGE(m
) == pl
[i
].phys_addr
) {
7968 if ((upl
->flags
& UPL_DECMP_REQ
) && upl
->decmp_io_upl
) {
7969 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, VM_KERNEL_UNSLIDE_OR_PERM(upl
->upl_creator
), VM_KERNEL_UNSLIDE_OR_PERM(m
),
7970 VM_KERNEL_UNSLIDE_OR_PERM(upl
), upl
->upl_priority
, 0);
7971 vm_decmp_upl_reprioritize(upl
, cur_tier
);
7974 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, VM_KERNEL_UNSLIDE_OR_PERM(upl
->upl_creator
), VM_KERNEL_UNSLIDE_OR_PERM(m
),
7975 upl
->upl_reprio_info
[i
], upl
->upl_priority
, 0);
7976 if (UPL_REPRIO_INFO_BLKNO(upl
, i
) != 0 && UPL_REPRIO_INFO_LEN(upl
, i
) != 0) {
7977 vm_page_request_reprioritize(o
, UPL_REPRIO_INFO_BLKNO(upl
, i
), UPL_REPRIO_INFO_LEN(upl
, i
), cur_tier
);
7982 /* Check if we found any hits */
7983 if (i
!= num_pages
) {
7992 vm_page_sleep(vm_object_t o
, vm_page_t m
, int interruptible
)
7996 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_START
, o
, m
, 0, 0, 0);
7998 if (o
->io_tracking
&& ((m
->vmp_busy
== TRUE
) || (m
->vmp_cleaning
== TRUE
) || VM_PAGE_WIRED(m
))) {
8000 * Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
8002 vm_page_handle_prio_inversion(o
, m
);
8004 m
->vmp_wanted
= TRUE
;
8005 ret
= thread_sleep_vm_object(o
, m
, interruptible
);
8006 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_END
, o
, m
, 0, 0, 0);
8011 io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
)
8013 io_reprioritize_req_t req
= NULL
;
8016 IO_REPRIORITIZE_LIST_LOCK();
8017 if (queue_empty(&io_reprioritize_list
)) {
8018 IO_REPRIORITIZE_LIST_UNLOCK();
8022 queue_remove_first(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
8023 IO_REPRIORITIZE_LIST_UNLOCK();
8025 vnode_pager_issue_reprioritize_io(req
->devvp
, req
->blkno
, req
->len
, req
->priority
);
8026 zfree(io_reprioritize_req_zone
, req
);
8029 IO_REPRIO_THREAD_CONTINUATION();
8033 #if VM_OBJECT_ACCESS_TRACKING
8035 vm_object_access_tracking(
8037 int *access_tracking_p
,
8038 uint32_t *access_tracking_reads_p
,
8039 uint32_t *access_tracking_writes_p
)
8041 int access_tracking
;
8043 access_tracking
= !!*access_tracking_p
;
8045 vm_object_lock(object
);
8046 *access_tracking_p
= object
->access_tracking
;
8047 if (access_tracking_reads_p
) {
8048 *access_tracking_reads_p
= object
->access_tracking_reads
;
8050 if (access_tracking_writes_p
) {
8051 *access_tracking_writes_p
= object
->access_tracking_writes
;
8053 object
->access_tracking
= access_tracking
;
8054 object
->access_tracking_reads
= 0;
8055 object
->access_tracking_writes
= 0;
8056 vm_object_unlock(object
);
8058 if (access_tracking
) {
8059 vm_object_pmap_protect_options(object
,
8068 #endif /* VM_OBJECT_ACCESS_TRACKING */
8071 vm_object_ledger_tag_ledgers(
8073 int *ledger_idx_volatile
,
8074 int *ledger_idx_nonvolatile
,
8075 int *ledger_idx_volatile_compressed
,
8076 int *ledger_idx_nonvolatile_compressed
,
8077 boolean_t
*do_footprint
)
8079 assert(object
->shadow
== VM_OBJECT_NULL
);
8081 *do_footprint
= !object
->vo_no_footprint
;
8083 switch (object
->vo_ledger_tag
) {
8084 case VM_LEDGER_TAG_NONE
:
8086 * Regular purgeable memory:
8087 * counts in footprint only when nonvolatile.
8089 *do_footprint
= TRUE
;
8090 assert(object
->purgable
!= VM_PURGABLE_DENY
);
8091 *ledger_idx_volatile
= task_ledgers
.purgeable_volatile
;
8092 *ledger_idx_nonvolatile
= task_ledgers
.purgeable_nonvolatile
;
8093 *ledger_idx_volatile_compressed
= task_ledgers
.purgeable_volatile_compressed
;
8094 *ledger_idx_nonvolatile_compressed
= task_ledgers
.purgeable_nonvolatile_compressed
;
8096 case VM_LEDGER_TAG_DEFAULT
:
8098 * "default" tagged memory:
8099 * counts in footprint only when nonvolatile and not marked
8100 * as "no_footprint".
8102 *ledger_idx_volatile
= task_ledgers
.tagged_nofootprint
;
8103 *ledger_idx_volatile_compressed
= task_ledgers
.tagged_nofootprint_compressed
;
8104 if (*do_footprint
) {
8105 *ledger_idx_nonvolatile
= task_ledgers
.tagged_footprint
;
8106 *ledger_idx_nonvolatile_compressed
= task_ledgers
.tagged_footprint_compressed
;
8108 *ledger_idx_nonvolatile
= task_ledgers
.tagged_nofootprint
;
8109 *ledger_idx_nonvolatile_compressed
= task_ledgers
.tagged_nofootprint_compressed
;
8112 case VM_LEDGER_TAG_NETWORK
:
8114 * "network" tagged memory:
8115 * never counts in footprint.
8117 *do_footprint
= FALSE
;
8118 *ledger_idx_volatile
= task_ledgers
.network_volatile
;
8119 *ledger_idx_volatile_compressed
= task_ledgers
.network_volatile_compressed
;
8120 *ledger_idx_nonvolatile
= task_ledgers
.network_nonvolatile
;
8121 *ledger_idx_nonvolatile_compressed
= task_ledgers
.network_nonvolatile_compressed
;
8123 case VM_LEDGER_TAG_MEDIA
:
8125 * "media" tagged memory:
8126 * counts in footprint only when nonvolatile and not marked
8127 * as "no footprint".
8129 *ledger_idx_volatile
= task_ledgers
.media_nofootprint
;
8130 *ledger_idx_volatile_compressed
= task_ledgers
.media_nofootprint_compressed
;
8131 if (*do_footprint
) {
8132 *ledger_idx_nonvolatile
= task_ledgers
.media_footprint
;
8133 *ledger_idx_nonvolatile_compressed
= task_ledgers
.media_footprint_compressed
;
8135 *ledger_idx_nonvolatile
= task_ledgers
.media_nofootprint
;
8136 *ledger_idx_nonvolatile_compressed
= task_ledgers
.media_nofootprint_compressed
;
8139 case VM_LEDGER_TAG_GRAPHICS
:
8141 * "graphics" tagged memory:
8142 * counts in footprint only when nonvolatile and not marked
8143 * as "no footprint".
8145 *ledger_idx_volatile
= task_ledgers
.graphics_nofootprint
;
8146 *ledger_idx_volatile_compressed
= task_ledgers
.graphics_nofootprint_compressed
;
8147 if (*do_footprint
) {
8148 *ledger_idx_nonvolatile
= task_ledgers
.graphics_footprint
;
8149 *ledger_idx_nonvolatile_compressed
= task_ledgers
.graphics_footprint_compressed
;
8151 *ledger_idx_nonvolatile
= task_ledgers
.graphics_nofootprint
;
8152 *ledger_idx_nonvolatile_compressed
= task_ledgers
.graphics_nofootprint_compressed
;
8155 case VM_LEDGER_TAG_NEURAL
:
8157 * "neural" tagged memory:
8158 * counts in footprint only when nonvolatile and not marked
8159 * as "no footprint".
8161 *ledger_idx_volatile
= task_ledgers
.neural_nofootprint
;
8162 *ledger_idx_volatile_compressed
= task_ledgers
.neural_nofootprint_compressed
;
8163 if (*do_footprint
) {
8164 *ledger_idx_nonvolatile
= task_ledgers
.neural_footprint
;
8165 *ledger_idx_nonvolatile_compressed
= task_ledgers
.neural_footprint_compressed
;
8167 *ledger_idx_nonvolatile
= task_ledgers
.neural_nofootprint
;
8168 *ledger_idx_nonvolatile_compressed
= task_ledgers
.neural_nofootprint_compressed
;
8172 panic("%s: object %p has unsupported ledger_tag %d\n",
8173 __FUNCTION__
, object
, object
->vo_ledger_tag
);
8178 vm_object_ownership_change(
8182 int new_ledger_flags
,
8183 boolean_t old_task_objq_locked
)
8187 int resident_count
, wired_count
;
8188 unsigned int compressed_count
;
8189 int ledger_idx_volatile
;
8190 int ledger_idx_nonvolatile
;
8191 int ledger_idx_volatile_compressed
;
8192 int ledger_idx_nonvolatile_compressed
;
8194 int ledger_idx_compressed
;
8195 boolean_t do_footprint
, old_no_footprint
, new_no_footprint
;
8196 boolean_t new_task_objq_locked
;
8198 vm_object_lock_assert_exclusive(object
);
8200 if (!object
->internal
) {
8201 return KERN_INVALID_ARGUMENT
;
8203 if (new_ledger_tag
== VM_LEDGER_TAG_NONE
&&
8204 object
->purgable
== VM_PURGABLE_DENY
) {
8205 /* non-purgeable memory must have a valid non-zero ledger tag */
8206 return KERN_INVALID_ARGUMENT
;
8208 if (new_ledger_tag
< 0 ||
8209 new_ledger_tag
> VM_LEDGER_TAG_MAX
) {
8210 return KERN_INVALID_ARGUMENT
;
8212 if (new_ledger_flags
& ~VM_LEDGER_FLAGS
) {
8213 return KERN_INVALID_ARGUMENT
;
8215 if (object
->vo_ledger_tag
== VM_LEDGER_TAG_NONE
&&
8216 object
->purgable
== VM_PURGABLE_DENY
) {
8218 * This VM object is neither ledger-tagged nor purgeable.
8219 * We can convert it to "ledger tag" ownership iff it
8220 * has not been used at all yet (no resident pages and
8221 * no pager) and it's going to be assigned to a valid task.
8223 if (object
->resident_page_count
!= 0 ||
8224 object
->pager
!= NULL
||
8225 object
->pager_created
||
8226 object
->ref_count
!= 1 ||
8227 object
->vo_owner
!= TASK_NULL
||
8228 object
->copy_strategy
!= MEMORY_OBJECT_COPY_NONE
||
8229 new_owner
== TASK_NULL
) {
8230 return KERN_FAILURE
;
8234 if (new_ledger_flags
& VM_LEDGER_FLAG_NO_FOOTPRINT
) {
8235 new_no_footprint
= TRUE
;
8237 new_no_footprint
= FALSE
;
8240 if (!new_no_footprint
&&
8241 object
->purgable
!= VM_PURGABLE_DENY
&&
8242 new_owner
!= TASK_NULL
&&
8243 new_owner
!= VM_OBJECT_OWNER_DISOWNED
&&
8244 new_owner
->task_legacy_footprint
) {
8246 * This task has been granted "legacy footprint" and should
8247 * not be charged for its IOKit purgeable memory. Since we
8248 * might now change the accounting of such memory to the
8249 * "graphics" ledger, for example, give it the "no footprint"
8252 new_no_footprint
= TRUE
;
8254 #endif /* __arm64__ */
8255 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
8256 assert(object
->shadow
== VM_OBJECT_NULL
);
8257 assert(object
->copy
== VM_OBJECT_NULL
);
8259 old_ledger_tag
= object
->vo_ledger_tag
;
8260 old_no_footprint
= object
->vo_no_footprint
;
8261 old_owner
= VM_OBJECT_OWNER(object
);
8263 DTRACE_VM7(object_ownership_change
,
8264 vm_object_t
, object
,
8266 int, old_ledger_tag
,
8267 int, old_no_footprint
,
8269 int, new_ledger_tag
,
8270 int, new_no_footprint
);
8272 assert(object
->internal
);
8273 resident_count
= object
->resident_page_count
- object
->wired_page_count
;
8274 wired_count
= object
->wired_page_count
;
8275 compressed_count
= vm_compressor_pager_get_count(object
->pager
);
8278 * Deal with the old owner and/or ledger tag, if needed.
8280 if (old_owner
!= TASK_NULL
&&
8281 ((old_owner
!= new_owner
) /* new owner ... */
8283 (old_no_footprint
!= new_no_footprint
) /* new "no_footprint" */
8285 old_ledger_tag
!= new_ledger_tag
)) { /* ... new ledger */
8287 * Take this object off of the old owner's ledgers.
8289 vm_object_ledger_tag_ledgers(object
,
8290 &ledger_idx_volatile
,
8291 &ledger_idx_nonvolatile
,
8292 &ledger_idx_volatile_compressed
,
8293 &ledger_idx_nonvolatile_compressed
,
8295 if (object
->purgable
== VM_PURGABLE_VOLATILE
||
8296 object
->purgable
== VM_PURGABLE_EMPTY
) {
8297 ledger_idx
= ledger_idx_volatile
;
8298 ledger_idx_compressed
= ledger_idx_volatile_compressed
;
8300 ledger_idx
= ledger_idx_nonvolatile
;
8301 ledger_idx_compressed
= ledger_idx_nonvolatile_compressed
;
8303 if (resident_count
) {
8305 * Adjust the appropriate old owners's ledgers by the
8306 * number of resident pages.
8308 ledger_debit(old_owner
->ledger
,
8310 ptoa_64(resident_count
));
8311 /* adjust old owner's footprint */
8313 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8314 object
->purgable
!= VM_PURGABLE_EMPTY
) {
8315 ledger_debit(old_owner
->ledger
,
8316 task_ledgers
.phys_footprint
,
8317 ptoa_64(resident_count
));
8321 /* wired pages are always nonvolatile */
8322 ledger_debit(old_owner
->ledger
,
8323 ledger_idx_nonvolatile
,
8324 ptoa_64(wired_count
));
8326 ledger_debit(old_owner
->ledger
,
8327 task_ledgers
.phys_footprint
,
8328 ptoa_64(wired_count
));
8331 if (compressed_count
) {
8333 * Adjust the appropriate old owner's ledgers
8334 * by the number of compressed pages.
8336 ledger_debit(old_owner
->ledger
,
8337 ledger_idx_compressed
,
8338 ptoa_64(compressed_count
));
8340 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8341 object
->purgable
!= VM_PURGABLE_EMPTY
) {
8342 ledger_debit(old_owner
->ledger
,
8343 task_ledgers
.phys_footprint
,
8344 ptoa_64(compressed_count
));
8347 if (old_owner
!= new_owner
) {
8348 /* remove object from old_owner's list of owned objects */
8349 DTRACE_VM2(object_owner_remove
,
8350 vm_object_t
, object
,
8352 if (!old_task_objq_locked
) {
8353 task_objq_lock(old_owner
);
8355 old_owner
->task_owned_objects
--;
8356 queue_remove(&old_owner
->task_objq
, object
,
8357 vm_object_t
, task_objq
);
8358 switch (object
->purgable
) {
8359 case VM_PURGABLE_NONVOLATILE
:
8360 case VM_PURGABLE_EMPTY
:
8361 vm_purgeable_nonvolatile_owner_update(old_owner
,
8364 case VM_PURGABLE_VOLATILE
:
8365 vm_purgeable_volatile_owner_update(old_owner
,
8371 if (!old_task_objq_locked
) {
8372 task_objq_unlock(old_owner
);
8378 * Switch to new ledger tag and/or owner.
8381 new_task_objq_locked
= FALSE
;
8382 if (new_owner
!= old_owner
&&
8383 new_owner
!= TASK_NULL
&&
8384 new_owner
!= VM_OBJECT_OWNER_DISOWNED
) {
8386 * If the new owner is not accepting new objects ("disowning"),
8387 * the object becomes "disowned" and will be added to
8388 * the kernel's task_objq.
8390 * Check first without locking, to avoid blocking while the
8391 * task is disowning its objects.
8393 if (new_owner
->task_objects_disowning
) {
8394 new_owner
= VM_OBJECT_OWNER_DISOWNED
;
8396 task_objq_lock(new_owner
);
8397 /* check again now that we have the lock */
8398 if (new_owner
->task_objects_disowning
) {
8399 new_owner
= VM_OBJECT_OWNER_DISOWNED
;
8400 task_objq_unlock(new_owner
);
8402 new_task_objq_locked
= TRUE
;
8407 object
->vo_ledger_tag
= new_ledger_tag
;
8408 object
->vo_owner
= new_owner
;
8409 object
->vo_no_footprint
= new_no_footprint
;
8411 if (new_owner
== VM_OBJECT_OWNER_DISOWNED
) {
8413 * Disowned objects are added to the kernel's task_objq but
8414 * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to
8415 * differentiate them from objects intentionally owned by
8418 assert(old_owner
!= kernel_task
);
8419 new_owner
= kernel_task
;
8420 assert(!new_task_objq_locked
);
8421 task_objq_lock(new_owner
);
8422 new_task_objq_locked
= TRUE
;
8426 * Deal with the new owner and/or ledger tag, if needed.
8428 if (new_owner
!= TASK_NULL
&&
8429 ((new_owner
!= old_owner
) /* new owner ... */
8431 (new_no_footprint
!= old_no_footprint
) /* ... new "no_footprint" */
8433 new_ledger_tag
!= old_ledger_tag
)) { /* ... new ledger */
8435 * Add this object to the new owner's ledgers.
8437 vm_object_ledger_tag_ledgers(object
,
8438 &ledger_idx_volatile
,
8439 &ledger_idx_nonvolatile
,
8440 &ledger_idx_volatile_compressed
,
8441 &ledger_idx_nonvolatile_compressed
,
8443 if (object
->purgable
== VM_PURGABLE_VOLATILE
||
8444 object
->purgable
== VM_PURGABLE_EMPTY
) {
8445 ledger_idx
= ledger_idx_volatile
;
8446 ledger_idx_compressed
= ledger_idx_volatile_compressed
;
8448 ledger_idx
= ledger_idx_nonvolatile
;
8449 ledger_idx_compressed
= ledger_idx_nonvolatile_compressed
;
8451 if (resident_count
) {
8453 * Adjust the appropriate new owners's ledgers by the
8454 * number of resident pages.
8456 ledger_credit(new_owner
->ledger
,
8458 ptoa_64(resident_count
));
8459 /* adjust new owner's footprint */
8461 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8462 object
->purgable
!= VM_PURGABLE_EMPTY
) {
8463 ledger_credit(new_owner
->ledger
,
8464 task_ledgers
.phys_footprint
,
8465 ptoa_64(resident_count
));
8469 /* wired pages are always nonvolatile */
8470 ledger_credit(new_owner
->ledger
,
8471 ledger_idx_nonvolatile
,
8472 ptoa_64(wired_count
));
8474 ledger_credit(new_owner
->ledger
,
8475 task_ledgers
.phys_footprint
,
8476 ptoa_64(wired_count
));
8479 if (compressed_count
) {
8481 * Adjust the new owner's ledgers by the number of
8484 ledger_credit(new_owner
->ledger
,
8485 ledger_idx_compressed
,
8486 ptoa_64(compressed_count
));
8488 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8489 object
->purgable
!= VM_PURGABLE_EMPTY
) {
8490 ledger_credit(new_owner
->ledger
,
8491 task_ledgers
.phys_footprint
,
8492 ptoa_64(compressed_count
));
8495 if (new_owner
!= old_owner
) {
8496 /* add object to new_owner's list of owned objects */
8497 DTRACE_VM2(object_owner_add
,
8498 vm_object_t
, object
,
8500 assert(new_task_objq_locked
);
8501 new_owner
->task_owned_objects
++;
8502 queue_enter(&new_owner
->task_objq
, object
,
8503 vm_object_t
, task_objq
);
8504 switch (object
->purgable
) {
8505 case VM_PURGABLE_NONVOLATILE
:
8506 case VM_PURGABLE_EMPTY
:
8507 vm_purgeable_nonvolatile_owner_update(new_owner
,
8510 case VM_PURGABLE_VOLATILE
:
8511 vm_purgeable_volatile_owner_update(new_owner
,
8520 if (new_task_objq_locked
) {
8521 task_objq_unlock(new_owner
);
8524 return KERN_SUCCESS
;
8528 vm_owned_objects_disown(
8531 vm_object_t next_object
;
8543 if (task
->task_objects_disowned
) {
8544 /* task has already disowned its owned objects */
8545 assert(task
->task_volatile_objects
== 0);
8546 assert(task
->task_nonvolatile_objects
== 0);
8547 assert(task
->task_owned_objects
== 0);
8551 task_objq_lock(task
);
8553 task
->task_objects_disowning
= TRUE
;
8555 for (object
= (vm_object_t
) queue_first(&task
->task_objq
);
8556 !queue_end(&task
->task_objq
, (queue_entry_t
) object
);
8557 object
= next_object
) {
8558 if (task
->task_nonvolatile_objects
== 0 &&
8559 task
->task_volatile_objects
== 0 &&
8560 task
->task_owned_objects
== 0) {
8561 /* no more objects owned by "task" */
8565 next_object
= (vm_object_t
) queue_next(&object
->task_objq
);
8568 assert(object
->vo_purgeable_volatilizer
== NULL
);
8570 assert(object
->vo_owner
== task
);
8571 if (!vm_object_lock_try(object
)) {
8572 task_objq_unlock(task
);
8573 mutex_pause(collisions
++);
8576 /* transfer ownership to the kernel */
8577 assert(VM_OBJECT_OWNER(object
) != kernel_task
);
8578 kr
= vm_object_ownership_change(
8580 object
->vo_ledger_tag
, /* unchanged */
8581 VM_OBJECT_OWNER_DISOWNED
, /* new owner */
8582 0, /* new_ledger_flags */
8583 TRUE
); /* old_owner->task_objq locked */
8584 assert(kr
== KERN_SUCCESS
);
8585 assert(object
->vo_owner
== VM_OBJECT_OWNER_DISOWNED
);
8586 vm_object_unlock(object
);
8589 if (__improbable(task
->task_volatile_objects
!= 0 ||
8590 task
->task_nonvolatile_objects
!= 0 ||
8591 task
->task_owned_objects
!= 0)) {
8592 panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
8595 task
->task_volatile_objects
,
8596 task
->task_nonvolatile_objects
,
8597 task
->task_owned_objects
,
8599 queue_first(&task
->task_objq
),
8600 queue_last(&task
->task_objq
));
8603 /* there shouldn't be any objects owned by task now */
8604 assert(task
->task_volatile_objects
== 0);
8605 assert(task
->task_nonvolatile_objects
== 0);
8606 assert(task
->task_owned_objects
== 0);
8607 assert(task
->task_objects_disowning
);
8609 /* and we don't need to try and disown again */
8610 task
->task_objects_disowned
= TRUE
;
8612 task_objq_unlock(task
);