2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Virtual memory object module.
66 #include <mach_pagemap.h>
67 #include <task_swapper.h>
69 #include <mach/mach_types.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/memory_object_control_server.h>
73 #include <mach/vm_param.h>
77 #include <ipc/ipc_types.h>
78 #include <ipc/ipc_port.h>
80 #include <kern/kern_types.h>
81 #include <kern/assert.h>
82 #include <kern/queue.h>
84 #include <kern/kalloc.h>
85 #include <kern/zalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/processor.h>
89 #include <kern/misc_protos.h>
90 #include <kern/policy_internal.h>
92 #include <vm/memory_object.h>
93 #include <vm/vm_compressor_pager.h>
94 #include <vm/vm_fault.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_protos.h>
100 #include <vm/vm_purgeable_internal.h>
102 #include <vm/vm_compressor.h>
104 #if CONFIG_PHANTOM_CACHE
105 #include <vm/vm_phantom_cache.h>
108 boolean_t vm_object_collapse_compressor_allowed
= TRUE
;
110 struct vm_counters vm_counters
;
112 #if VM_OBJECT_TRACKING
113 boolean_t vm_object_tracking_inited
= FALSE
;
114 btlog_t
*vm_object_tracking_btlog
;
117 vm_object_tracking_init(void)
119 int vm_object_tracking
;
121 vm_object_tracking
= 1;
122 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking
,
123 sizeof (vm_object_tracking
));
125 if (vm_object_tracking
) {
126 vm_object_tracking_btlog
= btlog_create(
127 VM_OBJECT_TRACKING_NUM_RECORDS
,
128 VM_OBJECT_TRACKING_BTDEPTH
,
129 TRUE
/* caller_will_remove_entries_for_element? */);
130 assert(vm_object_tracking_btlog
);
131 vm_object_tracking_inited
= TRUE
;
134 #endif /* VM_OBJECT_TRACKING */
137 * Virtual memory objects maintain the actual data
138 * associated with allocated virtual memory. A given
139 * page of memory exists within exactly one object.
141 * An object is only deallocated when all "references"
144 * Associated with each object is a list of all resident
145 * memory pages belonging to that object; this list is
146 * maintained by the "vm_page" module, but locked by the object's
149 * Each object also records the memory object reference
150 * that is used by the kernel to request and write
151 * back data (the memory object, field "pager"), etc...
153 * Virtual memory objects are allocated to provide
154 * zero-filled memory (vm_allocate) or map a user-defined
155 * memory object into a virtual address space (vm_map).
157 * Virtual memory objects that refer to a user-defined
158 * memory object are called "permanent", because all changes
159 * made in virtual memory are reflected back to the
160 * memory manager, which may then store it permanently.
161 * Other virtual memory objects are called "temporary",
162 * meaning that changes need be written back only when
163 * necessary to reclaim pages, and that storage associated
164 * with the object can be discarded once it is no longer
167 * A permanent memory object may be mapped into more
168 * than one virtual address space. Moreover, two threads
169 * may attempt to make the first mapping of a memory
170 * object concurrently. Only one thread is allowed to
171 * complete this mapping; all others wait for the
172 * "pager_initialized" field is asserted, indicating
173 * that the first thread has initialized all of the
174 * necessary fields in the virtual memory object structure.
176 * The kernel relies on a *default memory manager* to
177 * provide backing storage for the zero-filled virtual
178 * memory objects. The pager memory objects associated
179 * with these temporary virtual memory objects are only
180 * requested from the default memory manager when it
181 * becomes necessary. Virtual memory objects
182 * that depend on the default memory manager are called
183 * "internal". The "pager_created" field is provided to
184 * indicate whether these ports have ever been allocated.
186 * The kernel may also create virtual memory objects to
187 * hold changed pages after a copy-on-write operation.
188 * In this case, the virtual memory object (and its
189 * backing storage -- its memory object) only contain
190 * those pages that have been changed. The "shadow"
191 * field refers to the virtual memory object that contains
192 * the remainder of the contents. The "shadow_offset"
193 * field indicates where in the "shadow" these contents begin.
194 * The "copy" field refers to a virtual memory object
195 * to which changed pages must be copied before changing
196 * this object, in order to implement another form
197 * of copy-on-write optimization.
199 * The virtual memory object structure also records
200 * the attributes associated with its memory object.
201 * The "pager_ready", "can_persist" and "copy_strategy"
202 * fields represent those attributes. The "cached_list"
203 * field is used in the implementation of the persistence
206 * ZZZ Continue this comment.
209 /* Forward declarations for internal functions. */
210 static kern_return_t
vm_object_terminate(
213 extern void vm_object_remove(
216 static kern_return_t
vm_object_copy_call(
217 vm_object_t src_object
,
218 vm_object_offset_t src_offset
,
219 vm_object_size_t size
,
220 vm_object_t
*_result_object
);
222 static void vm_object_do_collapse(
224 vm_object_t backing_object
);
226 static void vm_object_do_bypass(
228 vm_object_t backing_object
);
230 static void vm_object_release_pager(
231 memory_object_t pager
,
234 static zone_t vm_object_zone
; /* vm backing store zone */
237 * All wired-down kernel memory belongs to a single virtual
238 * memory object (kernel_object) to avoid wasting data structures.
240 static struct vm_object kernel_object_store
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
241 vm_object_t kernel_object
;
243 static struct vm_object compressor_object_store
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
244 vm_object_t compressor_object
= &compressor_object_store
;
247 * The submap object is used as a placeholder for vm_map_submap
248 * operations. The object is declared in vm_map.c because it
249 * is exported by the vm_map module. The storage is declared
250 * here because it must be initialized here.
252 static struct vm_object vm_submap_object_store
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
255 * Virtual memory objects are initialized from
256 * a template (see vm_object_allocate).
258 * When adding a new field to the virtual memory
259 * object structure, be sure to add initialization
260 * (see _vm_object_allocate()).
262 static struct vm_object vm_object_template
;
264 unsigned int vm_page_purged_wired
= 0;
265 unsigned int vm_page_purged_busy
= 0;
266 unsigned int vm_page_purged_others
= 0;
270 * Virtual memory objects that are not referenced by
271 * any address maps, but that are allowed to persist
272 * (an attribute specified by the associated memory manager),
273 * are kept in a queue (vm_object_cached_list).
275 * When an object from this queue is referenced again,
276 * for example to make another address space mapping,
277 * it must be removed from the queue. That is, the
278 * queue contains *only* objects with zero references.
280 * The kernel may choose to terminate objects from this
281 * queue in order to reclaim storage. The current policy
282 * is to permit a fixed maximum number of unreferenced
283 * objects (vm_object_cached_max).
285 * A spin lock (accessed by routines
286 * vm_object_cache_{lock,lock_try,unlock}) governs the
287 * object cache. It must be held when objects are
288 * added to or removed from the cache (in vm_object_terminate).
289 * The routines that acquire a reference to a virtual
290 * memory object based on one of the memory object ports
291 * must also lock the cache.
293 * Ideally, the object cache should be more isolated
294 * from the reference mechanism, so that the lock need
295 * not be held to make simple references.
297 static vm_object_t
vm_object_cache_trim(
298 boolean_t called_from_vm_object_deallocate
);
300 static void vm_object_deactivate_all_pages(
303 static int vm_object_cached_high
; /* highest # cached objects */
304 static int vm_object_cached_max
= 512; /* may be patched*/
306 #define vm_object_cache_lock() \
307 lck_mtx_lock(&vm_object_cached_lock_data)
308 #define vm_object_cache_lock_try() \
309 lck_mtx_try_lock(&vm_object_cached_lock_data)
311 #endif /* VM_OBJECT_CACHE */
313 static queue_head_t vm_object_cached_list
;
314 static uint32_t vm_object_cache_pages_freed
= 0;
315 static uint32_t vm_object_cache_pages_moved
= 0;
316 static uint32_t vm_object_cache_pages_skipped
= 0;
317 static uint32_t vm_object_cache_adds
= 0;
318 static uint32_t vm_object_cached_count
= 0;
319 static lck_mtx_t vm_object_cached_lock_data
;
320 static lck_mtx_ext_t vm_object_cached_lock_data_ext
;
322 static uint32_t vm_object_page_grab_failed
= 0;
323 static uint32_t vm_object_page_grab_skipped
= 0;
324 static uint32_t vm_object_page_grab_returned
= 0;
325 static uint32_t vm_object_page_grab_pmapped
= 0;
326 static uint32_t vm_object_page_grab_reactivations
= 0;
328 #define vm_object_cache_lock_spin() \
329 lck_mtx_lock_spin(&vm_object_cached_lock_data)
330 #define vm_object_cache_unlock() \
331 lck_mtx_unlock(&vm_object_cached_lock_data)
333 static void vm_object_cache_remove_locked(vm_object_t
);
336 #define VM_OBJECT_HASH_COUNT 1024
337 #define VM_OBJECT_HASH_LOCK_COUNT 512
339 static lck_mtx_t vm_object_hashed_lock_data
[VM_OBJECT_HASH_LOCK_COUNT
];
340 static lck_mtx_ext_t vm_object_hashed_lock_data_ext
[VM_OBJECT_HASH_LOCK_COUNT
];
342 static queue_head_t vm_object_hashtable
[VM_OBJECT_HASH_COUNT
];
343 static struct zone
*vm_object_hash_zone
;
345 struct vm_object_hash_entry
{
346 queue_chain_t hash_link
; /* hash chain link */
347 memory_object_t pager
; /* pager we represent */
348 vm_object_t object
; /* corresponding object */
349 boolean_t waiting
; /* someone waiting for
353 typedef struct vm_object_hash_entry
*vm_object_hash_entry_t
;
354 #define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
356 #define VM_OBJECT_HASH_SHIFT 5
357 #define vm_object_hash(pager) \
358 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT))
360 #define vm_object_lock_hash(pager) \
361 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_LOCK_COUNT))
363 void vm_object_hash_entry_free(
364 vm_object_hash_entry_t entry
);
366 static void vm_object_reap(vm_object_t object
);
367 static void vm_object_reap_async(vm_object_t object
);
368 static void vm_object_reaper_thread(void);
370 static lck_mtx_t vm_object_reaper_lock_data
;
371 static lck_mtx_ext_t vm_object_reaper_lock_data_ext
;
373 static queue_head_t vm_object_reaper_queue
; /* protected by vm_object_reaper_lock() */
374 unsigned int vm_object_reap_count
= 0;
375 unsigned int vm_object_reap_count_async
= 0;
377 #define vm_object_reaper_lock() \
378 lck_mtx_lock(&vm_object_reaper_lock_data)
379 #define vm_object_reaper_lock_spin() \
380 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
381 #define vm_object_reaper_unlock() \
382 lck_mtx_unlock(&vm_object_reaper_lock_data)
385 /* I/O Re-prioritization request list */
386 queue_head_t io_reprioritize_list
;
387 lck_spin_t io_reprioritize_list_lock
;
389 #define IO_REPRIORITIZE_LIST_LOCK() \
390 lck_spin_lock(&io_reprioritize_list_lock)
391 #define IO_REPRIORITIZE_LIST_UNLOCK() \
392 lck_spin_unlock(&io_reprioritize_list_lock)
394 #define MAX_IO_REPRIORITIZE_REQS 8192
395 zone_t io_reprioritize_req_zone
;
397 /* I/O Re-prioritization thread */
398 int io_reprioritize_wakeup
= 0;
399 static void io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
);
401 #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup)
402 #define IO_REPRIO_THREAD_CONTINUATION() \
404 assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \
405 thread_block(io_reprioritize_thread); \
408 void vm_page_request_reprioritize(vm_object_t
, uint64_t, uint32_t, int);
409 void vm_page_handle_prio_inversion(vm_object_t
, vm_page_t
);
410 void vm_decmp_upl_reprioritize(upl_t
, int);
415 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
420 vm_object_hash_lock_spin(
421 memory_object_t pager
)
425 index
= vm_object_lock_hash(pager
);
427 lck_mtx_lock_spin(&vm_object_hashed_lock_data
[index
]);
429 return (&vm_object_hashed_lock_data
[index
]);
433 vm_object_hash_unlock(lck_mtx_t
*lck
)
440 * vm_object_hash_lookup looks up a pager in the hashtable
441 * and returns the corresponding entry, with optional removal.
443 static vm_object_hash_entry_t
444 vm_object_hash_lookup(
445 memory_object_t pager
,
446 boolean_t remove_entry
)
449 vm_object_hash_entry_t entry
;
451 bucket
= &vm_object_hashtable
[vm_object_hash(pager
)];
453 entry
= (vm_object_hash_entry_t
)queue_first(bucket
);
454 while (!queue_end(bucket
, (queue_entry_t
)entry
)) {
455 if (entry
->pager
== pager
) {
457 queue_remove(bucket
, entry
,
458 vm_object_hash_entry_t
, hash_link
);
462 entry
= (vm_object_hash_entry_t
)queue_next(&entry
->hash_link
);
464 return(VM_OBJECT_HASH_ENTRY_NULL
);
468 * vm_object_hash_enter enters the specified
469 * pager / cache object association in the hashtable.
473 vm_object_hash_insert(
474 vm_object_hash_entry_t entry
,
479 assert(vm_object_hash_lookup(entry
->pager
, FALSE
) == NULL
);
481 bucket
= &vm_object_hashtable
[vm_object_hash(entry
->pager
)];
483 queue_enter(bucket
, entry
, vm_object_hash_entry_t
, hash_link
);
485 if (object
->hashed
) {
487 * "hashed" was pre-set on this (new) object to avoid
488 * locking issues in vm_object_enter() (can't attempt to
489 * grab the object lock while holding the hash lock as
490 * a spinlock), so no need to set it here (and no need to
491 * hold the object's lock).
494 vm_object_lock_assert_exclusive(object
);
495 object
->hashed
= TRUE
;
498 entry
->object
= object
;
501 static vm_object_hash_entry_t
502 vm_object_hash_entry_alloc(
503 memory_object_t pager
)
505 vm_object_hash_entry_t entry
;
507 entry
= (vm_object_hash_entry_t
)zalloc(vm_object_hash_zone
);
508 entry
->pager
= pager
;
509 entry
->object
= VM_OBJECT_NULL
;
510 entry
->waiting
= FALSE
;
516 vm_object_hash_entry_free(
517 vm_object_hash_entry_t entry
)
519 zfree(vm_object_hash_zone
, entry
);
523 * vm_object_allocate:
525 * Returns a new object with the given size.
528 __private_extern__
void
530 vm_object_size_t size
,
534 "vm_object_allocate, object 0x%X size 0x%X\n",
535 object
, size
, 0,0,0);
537 *object
= vm_object_template
;
538 vm_page_queue_init(&object
->memq
);
539 queue_init(&object
->msr_q
);
540 #if UPL_DEBUG || CONFIG_IOSCHED
541 queue_init(&object
->uplq
);
543 vm_object_lock_init(object
);
544 object
->vo_size
= size
;
546 #if VM_OBJECT_TRACKING_OP_CREATED
547 if (vm_object_tracking_inited
) {
548 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
551 numsaved
= OSBacktrace(bt
, VM_OBJECT_TRACKING_BTDEPTH
);
552 btlog_add_entry(vm_object_tracking_btlog
,
554 VM_OBJECT_TRACKING_OP_CREATED
,
558 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
561 __private_extern__ vm_object_t
563 vm_object_size_t size
)
567 object
= (vm_object_t
) zalloc(vm_object_zone
);
569 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
571 if (object
!= VM_OBJECT_NULL
)
572 _vm_object_allocate(size
, object
);
578 lck_grp_t vm_object_lck_grp
;
579 lck_grp_t vm_object_cache_lck_grp
;
580 lck_grp_attr_t vm_object_lck_grp_attr
;
581 lck_attr_t vm_object_lck_attr
;
582 lck_attr_t kernel_object_lck_attr
;
583 lck_attr_t compressor_object_lck_attr
;
586 * vm_object_bootstrap:
588 * Initialize the VM objects module.
590 __private_extern__
void
591 vm_object_bootstrap(void)
594 vm_size_t vm_object_size
;
596 vm_object_size
= (sizeof(struct vm_object
) + (VM_PACKED_POINTER_ALIGNMENT
-1)) & ~(VM_PACKED_POINTER_ALIGNMENT
- 1);
598 vm_object_zone
= zinit(vm_object_size
,
599 round_page(512*1024),
602 zone_change(vm_object_zone
, Z_CALLERACCT
, FALSE
); /* don't charge caller */
603 zone_change(vm_object_zone
, Z_NOENCRYPT
, TRUE
);
605 vm_object_init_lck_grp();
607 queue_init(&vm_object_cached_list
);
609 lck_mtx_init_ext(&vm_object_cached_lock_data
,
610 &vm_object_cached_lock_data_ext
,
611 &vm_object_cache_lck_grp
,
612 &vm_object_lck_attr
);
614 queue_init(&vm_object_reaper_queue
);
616 for (i
= 0; i
< VM_OBJECT_HASH_LOCK_COUNT
; i
++) {
617 lck_mtx_init_ext(&vm_object_hashed_lock_data
[i
],
618 &vm_object_hashed_lock_data_ext
[i
],
620 &vm_object_lck_attr
);
622 lck_mtx_init_ext(&vm_object_reaper_lock_data
,
623 &vm_object_reaper_lock_data_ext
,
625 &vm_object_lck_attr
);
627 vm_object_hash_zone
=
628 zinit((vm_size_t
) sizeof (struct vm_object_hash_entry
),
629 round_page(512*1024),
631 "vm object hash entries");
632 zone_change(vm_object_hash_zone
, Z_CALLERACCT
, FALSE
);
633 zone_change(vm_object_hash_zone
, Z_NOENCRYPT
, TRUE
);
635 for (i
= 0; i
< VM_OBJECT_HASH_COUNT
; i
++)
636 queue_init(&vm_object_hashtable
[i
]);
640 * Fill in a template object, for quick initialization
643 /* memq; Lock; init after allocation */
646 vm_object_template
.memq
.prev
= 0;
647 vm_object_template
.memq
.next
= 0;
650 * We can't call vm_object_lock_init() here because that will
651 * allocate some memory and VM is not fully initialized yet.
652 * The lock will be initialized for each allocated object in
653 * _vm_object_allocate(), so we don't need to initialize it in
654 * the vm_object_template.
656 vm_object_lock_init(&vm_object_template
);
658 #if DEVELOPMENT || DEBUG
659 vm_object_template
.Lock_owner
= 0;
661 vm_object_template
.vo_size
= 0;
662 vm_object_template
.memq_hint
= VM_PAGE_NULL
;
663 vm_object_template
.ref_count
= 1;
665 vm_object_template
.res_count
= 1;
666 #endif /* TASK_SWAPPER */
667 vm_object_template
.resident_page_count
= 0;
668 vm_object_template
.wired_page_count
= 0;
669 vm_object_template
.reusable_page_count
= 0;
670 vm_object_template
.copy
= VM_OBJECT_NULL
;
671 vm_object_template
.shadow
= VM_OBJECT_NULL
;
672 vm_object_template
.vo_shadow_offset
= (vm_object_offset_t
) 0;
673 vm_object_template
.pager
= MEMORY_OBJECT_NULL
;
674 vm_object_template
.paging_offset
= 0;
675 vm_object_template
.pager_control
= MEMORY_OBJECT_CONTROL_NULL
;
676 vm_object_template
.copy_strategy
= MEMORY_OBJECT_COPY_SYMMETRIC
;
677 vm_object_template
.paging_in_progress
= 0;
679 vm_object_template
.__object1_unused_bits
= 0;
680 #endif /* __LP64__ */
681 vm_object_template
.activity_in_progress
= 0;
683 /* Begin bitfields */
684 vm_object_template
.all_wanted
= 0; /* all bits FALSE */
685 vm_object_template
.pager_created
= FALSE
;
686 vm_object_template
.pager_initialized
= FALSE
;
687 vm_object_template
.pager_ready
= FALSE
;
688 vm_object_template
.pager_trusted
= FALSE
;
689 vm_object_template
.can_persist
= FALSE
;
690 vm_object_template
.internal
= TRUE
;
691 vm_object_template
.temporary
= TRUE
;
692 vm_object_template
.private = FALSE
;
693 vm_object_template
.pageout
= FALSE
;
694 vm_object_template
.alive
= TRUE
;
695 vm_object_template
.purgable
= VM_PURGABLE_DENY
;
696 vm_object_template
.purgeable_when_ripe
= FALSE
;
697 vm_object_template
.shadowed
= FALSE
;
698 vm_object_template
.advisory_pageout
= FALSE
;
699 vm_object_template
.true_share
= FALSE
;
700 vm_object_template
.terminating
= FALSE
;
701 vm_object_template
.named
= FALSE
;
702 vm_object_template
.shadow_severed
= FALSE
;
703 vm_object_template
.phys_contiguous
= FALSE
;
704 vm_object_template
.nophyscache
= FALSE
;
707 vm_object_template
.cached_list
.prev
= NULL
;
708 vm_object_template
.cached_list
.next
= NULL
;
709 vm_object_template
.msr_q
.prev
= NULL
;
710 vm_object_template
.msr_q
.next
= NULL
;
712 vm_object_template
.last_alloc
= (vm_object_offset_t
) 0;
713 vm_object_template
.sequential
= (vm_object_offset_t
) 0;
714 vm_object_template
.pages_created
= 0;
715 vm_object_template
.pages_used
= 0;
716 vm_object_template
.scan_collisions
= 0;
717 #if CONFIG_PHANTOM_CACHE
718 vm_object_template
.phantom_object_id
= 0;
720 vm_object_template
.cow_hint
= ~(vm_offset_t
)0;
722 vm_object_template
.paging_object
= VM_OBJECT_NULL
;
723 #endif /* MACH_ASSERT */
725 /* cache bitfields */
726 vm_object_template
.wimg_bits
= VM_WIMG_USE_DEFAULT
;
727 vm_object_template
.set_cache_attr
= FALSE
;
728 vm_object_template
.object_slid
= FALSE
;
729 vm_object_template
.code_signed
= FALSE
;
730 vm_object_template
.hashed
= FALSE
;
731 vm_object_template
.transposed
= FALSE
;
732 vm_object_template
.mapping_in_progress
= FALSE
;
733 vm_object_template
.phantom_isssd
= FALSE
;
734 vm_object_template
.volatile_empty
= FALSE
;
735 vm_object_template
.volatile_fault
= FALSE
;
736 vm_object_template
.all_reusable
= FALSE
;
737 vm_object_template
.blocked_access
= FALSE
;
738 vm_object_template
.__object2_unused_bits
= 0;
739 #if CONFIG_IOSCHED || UPL_DEBUG
740 vm_object_template
.uplq
.prev
= NULL
;
741 vm_object_template
.uplq
.next
= NULL
;
742 #endif /* UPL_DEBUG */
744 bzero(&vm_object_template
.pip_holders
,
745 sizeof (vm_object_template
.pip_holders
));
746 #endif /* VM_PIP_DEBUG */
748 vm_object_template
.objq
.next
= NULL
;
749 vm_object_template
.objq
.prev
= NULL
;
751 vm_object_template
.purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
752 vm_object_template
.purgeable_queue_group
= 0;
754 vm_object_template
.vo_cache_ts
= 0;
756 vm_object_template
.wire_tag
= VM_KERN_MEMORY_NONE
;
758 vm_object_template
.io_tracking
= FALSE
;
760 #if CONFIG_SECLUDED_MEMORY
761 vm_object_template
.eligible_for_secluded
= FALSE
;
762 vm_object_template
.can_grab_secluded
= FALSE
;
763 #else /* CONFIG_SECLUDED_MEMORY */
764 vm_object_template
.__object3_unused_bits
= 0;
765 #endif /* CONFIG_SECLUDED_MEMORY */
768 bzero(&vm_object_template
.purgeable_owner_bt
[0],
769 sizeof (vm_object_template
.purgeable_owner_bt
));
770 vm_object_template
.vo_purgeable_volatilizer
= NULL
;
771 bzero(&vm_object_template
.purgeable_volatilizer_bt
[0],
772 sizeof (vm_object_template
.purgeable_volatilizer_bt
));
776 * Initialize the "kernel object"
779 kernel_object
= &kernel_object_store
;
782 * Note that in the following size specifications, we need to add 1 because
783 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
786 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
789 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
791 kernel_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
792 compressor_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
795 * Initialize the "submap object". Make it as large as the
796 * kernel object so that no limit is imposed on submap sizes.
799 vm_submap_object
= &vm_submap_object_store
;
800 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
802 vm_submap_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
805 * Create an "extra" reference to this object so that we never
806 * try to deallocate it; zfree doesn't like to be called with
809 vm_object_reference(vm_submap_object
);
814 vm_io_reprioritize_init(void)
816 kern_return_t result
;
817 thread_t thread
= THREAD_NULL
;
819 /* Initialze the I/O reprioritization subsystem */
820 lck_spin_init(&io_reprioritize_list_lock
, &vm_object_lck_grp
, &vm_object_lck_attr
);
821 queue_init(&io_reprioritize_list
);
823 io_reprioritize_req_zone
= zinit(sizeof(struct io_reprioritize_req
),
824 MAX_IO_REPRIORITIZE_REQS
* sizeof(struct io_reprioritize_req
),
825 4096, "io_reprioritize_req");
826 zone_change(io_reprioritize_req_zone
, Z_COLLECT
, FALSE
);
828 result
= kernel_thread_start_priority(io_reprioritize_thread
, NULL
, 95 /* MAXPRI_KERNEL */, &thread
);
829 if (result
== KERN_SUCCESS
) {
830 thread_deallocate(thread
);
832 panic("Could not create io_reprioritize_thread");
838 vm_object_reaper_init(void)
843 kr
= kernel_thread_start_priority(
844 (thread_continue_t
) vm_object_reaper_thread
,
848 if (kr
!= KERN_SUCCESS
) {
849 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr
);
851 thread_deallocate(thread
);
854 __private_extern__
void
858 * Finish initializing the kernel object.
863 __private_extern__
void
864 vm_object_init_lck_grp(void)
867 * initialze the vm_object lock world
869 lck_grp_attr_setdefault(&vm_object_lck_grp_attr
);
870 lck_grp_init(&vm_object_lck_grp
, "vm_object", &vm_object_lck_grp_attr
);
871 lck_grp_init(&vm_object_cache_lck_grp
, "vm_object_cache", &vm_object_lck_grp_attr
);
872 lck_attr_setdefault(&vm_object_lck_attr
);
873 lck_attr_setdefault(&kernel_object_lck_attr
);
874 lck_attr_cleardebug(&kernel_object_lck_attr
);
875 lck_attr_setdefault(&compressor_object_lck_attr
);
876 lck_attr_cleardebug(&compressor_object_lck_attr
);
880 #define MIGHT_NOT_CACHE_SHADOWS 1
881 #if MIGHT_NOT_CACHE_SHADOWS
882 static int cache_shadows
= TRUE
;
883 #endif /* MIGHT_NOT_CACHE_SHADOWS */
887 * vm_object_deallocate:
889 * Release a reference to the specified object,
890 * gained either through a vm_object_allocate
891 * or a vm_object_reference call. When all references
892 * are gone, storage associated with this object
893 * may be relinquished.
895 * No object may be locked.
897 unsigned long vm_object_deallocate_shared_successes
= 0;
898 unsigned long vm_object_deallocate_shared_failures
= 0;
899 unsigned long vm_object_deallocate_shared_swap_failures
= 0;
901 __private_extern__
void
902 vm_object_deallocate(
906 boolean_t retry_cache_trim
= FALSE
;
907 uint32_t try_failed_count
= 0;
909 vm_object_t shadow
= VM_OBJECT_NULL
;
911 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
912 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
914 if (object
== VM_OBJECT_NULL
)
917 if (object
== kernel_object
|| object
== compressor_object
) {
918 vm_object_lock_shared(object
);
920 OSAddAtomic(-1, &object
->ref_count
);
922 if (object
->ref_count
== 0) {
923 if (object
== kernel_object
)
924 panic("vm_object_deallocate: losing kernel_object\n");
926 panic("vm_object_deallocate: losing compressor_object\n");
928 vm_object_unlock(object
);
932 if (object
->ref_count
== 2 &&
935 * This "named" object's reference count is about to
937 * we'll need to call memory_object_last_unmap().
939 } else if (object
->ref_count
== 2 &&
941 object
->shadow
!= VM_OBJECT_NULL
) {
943 * This internal object's reference count is about to
944 * drop from 2 to 1 and it has a shadow object:
945 * we'll want to try and collapse this object with its
948 } else if (object
->ref_count
>= 2) {
949 UInt32 original_ref_count
;
950 volatile UInt32
*ref_count_p
;
954 * The object currently looks like it is not being
955 * kept alive solely by the reference we're about to release.
956 * Let's try and release our reference without taking
957 * all the locks we would need if we had to terminate the
958 * object (cache lock + exclusive object lock).
959 * Lock the object "shared" to make sure we don't race with
960 * anyone holding it "exclusive".
962 vm_object_lock_shared(object
);
963 ref_count_p
= (volatile UInt32
*) &object
->ref_count
;
964 original_ref_count
= object
->ref_count
;
966 * Test again as "ref_count" could have changed.
967 * "named" shouldn't change.
969 if (original_ref_count
== 2 &&
971 /* need to take slow path for m_o_last_unmap() */
973 } else if (original_ref_count
== 2 &&
975 object
->shadow
!= VM_OBJECT_NULL
) {
976 /* need to take slow path for vm_object_collapse() */
978 } else if (original_ref_count
< 2) {
979 /* need to take slow path for vm_object_terminate() */
982 /* try an atomic update with the shared lock */
983 atomic_swap
= OSCompareAndSwap(
985 original_ref_count
- 1,
986 (UInt32
*) &object
->ref_count
);
987 if (atomic_swap
== FALSE
) {
988 vm_object_deallocate_shared_swap_failures
++;
989 /* fall back to the slow path... */
993 vm_object_unlock(object
);
997 * ref_count was updated atomically !
999 vm_object_deallocate_shared_successes
++;
1004 * Someone else updated the ref_count at the same
1005 * time and we lost the race. Fall back to the usual
1006 * slow but safe path...
1008 vm_object_deallocate_shared_failures
++;
1011 while (object
!= VM_OBJECT_NULL
) {
1013 vm_object_lock(object
);
1015 assert(object
->ref_count
> 0);
1018 * If the object has a named reference, and only
1019 * that reference would remain, inform the pager
1020 * about the last "mapping" reference going away.
1022 if ((object
->ref_count
== 2) && (object
->named
)) {
1023 memory_object_t pager
= object
->pager
;
1025 /* Notify the Pager that there are no */
1026 /* more mappers for this object */
1028 if (pager
!= MEMORY_OBJECT_NULL
) {
1029 vm_object_mapping_wait(object
, THREAD_UNINT
);
1030 vm_object_mapping_begin(object
);
1031 vm_object_unlock(object
);
1033 memory_object_last_unmap(pager
);
1035 vm_object_lock(object
);
1036 vm_object_mapping_end(object
);
1038 assert(object
->ref_count
> 0);
1042 * Lose the reference. If other references
1043 * remain, then we are done, unless we need
1044 * to retry a cache trim.
1045 * If it is the last reference, then keep it
1046 * until any pending initialization is completed.
1049 /* if the object is terminating, it cannot go into */
1050 /* the cache and we obviously should not call */
1051 /* terminate again. */
1053 if ((object
->ref_count
> 1) || object
->terminating
) {
1054 vm_object_lock_assert_exclusive(object
);
1055 object
->ref_count
--;
1056 vm_object_res_deallocate(object
);
1058 if (object
->ref_count
== 1 &&
1059 object
->shadow
!= VM_OBJECT_NULL
) {
1061 * There's only one reference left on this
1062 * VM object. We can't tell if it's a valid
1063 * one (from a mapping for example) or if this
1064 * object is just part of a possibly stale and
1065 * useless shadow chain.
1066 * We would like to try and collapse it into
1067 * its parent, but we don't have any pointers
1068 * back to this parent object.
1069 * But we can try and collapse this object with
1070 * its own shadows, in case these are useless
1072 * We can't bypass this object though, since we
1073 * don't know if this last reference on it is
1074 * meaningful or not.
1076 vm_object_collapse(object
, 0, FALSE
);
1078 vm_object_unlock(object
);
1080 if (retry_cache_trim
&&
1081 ((object
= vm_object_cache_trim(TRUE
)) !=
1090 * We have to wait for initialization
1091 * before destroying or caching the object.
1094 if (object
->pager_created
&& ! object
->pager_initialized
) {
1095 assert(! object
->can_persist
);
1096 vm_object_assert_wait(object
,
1097 VM_OBJECT_EVENT_INITIALIZED
,
1099 vm_object_unlock(object
);
1101 thread_block(THREAD_CONTINUE_NULL
);
1107 * If this object can persist, then enter it in
1108 * the cache. Otherwise, terminate it.
1110 * NOTE: Only permanent objects are cached, and
1111 * permanent objects cannot have shadows. This
1112 * affects the residence counting logic in a minor
1113 * way (can do it in-line, mostly).
1116 if ((object
->can_persist
) && (object
->alive
)) {
1118 * Now it is safe to decrement reference count,
1119 * and to return if reference count is > 0.
1122 vm_object_lock_assert_exclusive(object
);
1123 if (--object
->ref_count
> 0) {
1124 vm_object_res_deallocate(object
);
1125 vm_object_unlock(object
);
1127 if (retry_cache_trim
&&
1128 ((object
= vm_object_cache_trim(TRUE
)) !=
1135 #if MIGHT_NOT_CACHE_SHADOWS
1137 * Remove shadow now if we don't
1138 * want to cache shadows.
1140 if (! cache_shadows
) {
1141 shadow
= object
->shadow
;
1142 object
->shadow
= VM_OBJECT_NULL
;
1144 #endif /* MIGHT_NOT_CACHE_SHADOWS */
1147 * Enter the object onto the queue of
1148 * cached objects, and deactivate
1151 assert(object
->shadow
== VM_OBJECT_NULL
);
1152 VM_OBJ_RES_DECR(object
);
1154 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
1156 vm_object_cached_list
.next
,
1157 vm_object_cached_list
.prev
,0,0);
1160 vm_object_unlock(object
);
1162 try_failed_count
= 0;
1164 vm_object_cache_lock();
1167 * if we try to take a regular lock here
1168 * we risk deadlocking against someone
1169 * holding a lock on this object while
1170 * trying to vm_object_deallocate a different
1173 if (vm_object_lock_try(object
))
1175 vm_object_cache_unlock();
1178 mutex_pause(try_failed_count
); /* wait a bit */
1180 vm_object_cached_count
++;
1181 if (vm_object_cached_count
> vm_object_cached_high
)
1182 vm_object_cached_high
= vm_object_cached_count
;
1183 queue_enter(&vm_object_cached_list
, object
,
1184 vm_object_t
, cached_list
);
1185 vm_object_cache_unlock();
1187 vm_object_deactivate_all_pages(object
);
1188 vm_object_unlock(object
);
1190 #if MIGHT_NOT_CACHE_SHADOWS
1192 * If we have a shadow that we need
1193 * to deallocate, do so now, remembering
1194 * to trim the cache later.
1196 if (! cache_shadows
&& shadow
!= VM_OBJECT_NULL
) {
1198 retry_cache_trim
= TRUE
;
1201 #endif /* MIGHT_NOT_CACHE_SHADOWS */
1204 * Trim the cache. If the cache trim
1205 * returns with a shadow for us to deallocate,
1206 * then remember to retry the cache trim
1207 * when we are done deallocating the shadow.
1208 * Otherwise, we are done.
1211 object
= vm_object_cache_trim(TRUE
);
1212 if (object
== VM_OBJECT_NULL
) {
1215 retry_cache_trim
= TRUE
;
1217 #endif /* VM_OBJECT_CACHE */
1220 * This object is not cachable; terminate it.
1223 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
1224 object
, object
->resident_page_count
,
1225 object
->paging_in_progress
,
1226 (void *)current_thread(),object
->ref_count
);
1228 VM_OBJ_RES_DECR(object
); /* XXX ? */
1230 * Terminate this object. If it had a shadow,
1231 * then deallocate it; otherwise, if we need
1232 * to retry a cache trim, do so now; otherwise,
1233 * we are done. "pageout" objects have a shadow,
1234 * but maintain a "paging reference" rather than
1235 * a normal reference.
1237 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
1239 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
1242 if (shadow
!= VM_OBJECT_NULL
) {
1247 if (retry_cache_trim
&&
1248 ((object
= vm_object_cache_trim(TRUE
)) !=
1257 assert(! retry_cache_trim
);
1264 vm_object_page_grab(
1267 vm_page_t p
, next_p
;
1271 vm_object_lock_assert_exclusive(object
);
1273 next_p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
1274 p_limit
= MIN(50, object
->resident_page_count
);
1276 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next_p
) && --p_limit
> 0) {
1279 next_p
= (vm_page_t
)vm_page_queue_next(&next_p
->listq
);
1281 if (VM_PAGE_WIRED(p
) || p
->busy
|| p
->cleaning
|| p
->laundry
|| p
->fictitious
)
1282 goto move_page_in_obj
;
1284 if (p
->pmapped
|| p
->dirty
|| p
->precious
) {
1285 vm_page_lockspin_queues();
1290 vm_object_page_grab_pmapped
++;
1292 if (p
->reference
== FALSE
|| p
->dirty
== FALSE
) {
1294 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p
));
1296 if (refmod_state
& VM_MEM_REFERENCED
)
1297 p
->reference
= TRUE
;
1298 if (refmod_state
& VM_MEM_MODIFIED
) {
1299 SET_PAGE_DIRTY(p
, FALSE
);
1302 if (p
->dirty
== FALSE
&& p
->precious
== FALSE
) {
1304 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
1306 if (refmod_state
& VM_MEM_REFERENCED
)
1307 p
->reference
= TRUE
;
1308 if (refmod_state
& VM_MEM_MODIFIED
) {
1309 SET_PAGE_DIRTY(p
, FALSE
);
1312 if (p
->dirty
== FALSE
)
1316 if ((p
->vm_page_q_state
!= VM_PAGE_ON_ACTIVE_Q
) && p
->reference
== TRUE
) {
1317 vm_page_activate(p
);
1319 VM_STAT_INCR(reactivations
);
1320 vm_object_page_grab_reactivations
++;
1322 vm_page_unlock_queues();
1324 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
1325 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
1330 vm_page_lockspin_queues();
1332 vm_page_free_prepare_queues(p
);
1333 vm_object_page_grab_returned
++;
1334 vm_object_page_grab_skipped
+= p_skipped
;
1336 vm_page_unlock_queues();
1338 vm_page_free_prepare_object(p
, TRUE
);
1342 vm_object_page_grab_skipped
+= p_skipped
;
1343 vm_object_page_grab_failed
++;
1350 #define EVICT_PREPARE_LIMIT 64
1351 #define EVICT_AGE 10
1353 static clock_sec_t vm_object_cache_aging_ts
= 0;
1356 vm_object_cache_remove_locked(
1359 assert(object
->purgable
== VM_PURGABLE_DENY
);
1360 assert(object
->wired_page_count
== 0);
1362 queue_remove(&vm_object_cached_list
, object
, vm_object_t
, objq
);
1363 object
->objq
.next
= NULL
;
1364 object
->objq
.prev
= NULL
;
1366 vm_object_cached_count
--;
1370 vm_object_cache_remove(
1373 vm_object_cache_lock_spin();
1375 if (object
->objq
.next
|| object
->objq
.prev
)
1376 vm_object_cache_remove_locked(object
);
1378 vm_object_cache_unlock();
1382 vm_object_cache_add(
1388 assert(object
->purgable
== VM_PURGABLE_DENY
);
1389 assert(object
->wired_page_count
== 0);
1391 if (object
->resident_page_count
== 0)
1393 clock_get_system_nanotime(&sec
, &nsec
);
1395 vm_object_cache_lock_spin();
1397 if (object
->objq
.next
== NULL
&& object
->objq
.prev
== NULL
) {
1398 queue_enter(&vm_object_cached_list
, object
, vm_object_t
, objq
);
1399 object
->vo_cache_ts
= sec
+ EVICT_AGE
;
1400 object
->vo_cache_pages_to_scan
= object
->resident_page_count
;
1402 vm_object_cached_count
++;
1403 vm_object_cache_adds
++;
1405 vm_object_cache_unlock();
1409 vm_object_cache_evict(
1411 int max_objects_to_examine
)
1413 vm_object_t object
= VM_OBJECT_NULL
;
1414 vm_object_t next_obj
= VM_OBJECT_NULL
;
1415 vm_page_t local_free_q
= VM_PAGE_NULL
;
1419 vm_page_t ep_array
[EVICT_PREPARE_LIMIT
];
1425 uint32_t ep_skipped
= 0;
1429 KERNEL_DEBUG(0x13001ec | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1431 * do a couple of quick checks to see if it's
1432 * worthwhile grabbing the lock
1434 if (queue_empty(&vm_object_cached_list
)) {
1435 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1438 clock_get_system_nanotime(&sec
, &nsec
);
1441 * the object on the head of the queue has not
1442 * yet sufficiently aged
1444 if (sec
< vm_object_cache_aging_ts
) {
1445 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1449 * don't need the queue lock to find
1450 * and lock an object on the cached list
1452 vm_page_unlock_queues();
1454 vm_object_cache_lock_spin();
1457 next_obj
= (vm_object_t
)queue_first(&vm_object_cached_list
);
1459 while (!queue_end(&vm_object_cached_list
, (queue_entry_t
)next_obj
) && object_cnt
++ < max_objects_to_examine
) {
1462 next_obj
= (vm_object_t
)queue_next(&next_obj
->objq
);
1464 assert(object
->purgable
== VM_PURGABLE_DENY
);
1465 assert(object
->wired_page_count
== 0);
1467 if (sec
< object
->vo_cache_ts
) {
1468 KERNEL_DEBUG(0x130020c, object
, object
->resident_page_count
, object
->vo_cache_ts
, sec
, 0);
1470 vm_object_cache_aging_ts
= object
->vo_cache_ts
;
1471 object
= VM_OBJECT_NULL
;
1474 if (!vm_object_lock_try_scan(object
)) {
1476 * just skip over this guy for now... if we find
1477 * an object to steal pages from, we'll revist in a bit...
1478 * hopefully, the lock will have cleared
1480 KERNEL_DEBUG(0x13001f8, object
, object
->resident_page_count
, 0, 0, 0);
1482 object
= VM_OBJECT_NULL
;
1485 if (vm_page_queue_empty(&object
->memq
) || object
->vo_cache_pages_to_scan
== 0) {
1487 * this case really shouldn't happen, but it's not fatal
1488 * so deal with it... if we don't remove the object from
1489 * the list, we'll never move past it.
1491 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1493 vm_object_cache_remove_locked(object
);
1494 vm_object_unlock(object
);
1495 object
= VM_OBJECT_NULL
;
1499 * we have a locked object with pages...
1500 * time to start harvesting
1504 vm_object_cache_unlock();
1506 if (object
== VM_OBJECT_NULL
)
1510 * object is locked at this point and
1511 * has resident pages
1513 next_p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
1516 * break the page scan into 2 pieces to minimize the time spent
1517 * behind the page queue lock...
1518 * the list of pages on these unused objects is likely to be cold
1519 * w/r to the cpu cache which increases the time to scan the list
1520 * tenfold... and we may have a 'run' of pages we can't utilize that
1521 * needs to be skipped over...
1523 if ((ep_limit
= num_to_evict
- (ep_freed
+ ep_moved
)) > EVICT_PREPARE_LIMIT
)
1524 ep_limit
= EVICT_PREPARE_LIMIT
;
1527 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next_p
) && object
->vo_cache_pages_to_scan
&& ep_count
< ep_limit
) {
1530 next_p
= (vm_page_t
)vm_page_queue_next(&next_p
->listq
);
1532 object
->vo_cache_pages_to_scan
--;
1534 if (VM_PAGE_WIRED(p
) || p
->busy
|| p
->cleaning
|| p
->laundry
) {
1535 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
1536 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
1541 if (p
->wpmapped
|| p
->dirty
|| p
->precious
) {
1542 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
1543 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
1545 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p
));
1547 ep_array
[ep_count
++] = p
;
1549 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1551 vm_page_lockspin_queues();
1553 for (ep_index
= 0; ep_index
< ep_count
; ep_index
++) {
1555 p
= ep_array
[ep_index
];
1557 if (p
->wpmapped
|| p
->dirty
|| p
->precious
) {
1558 p
->reference
= FALSE
;
1559 p
->no_cache
= FALSE
;
1562 * we've already filtered out pages that are in the laundry
1563 * so if we get here, this page can't be on the pageout queue
1565 vm_page_queues_remove(p
, FALSE
);
1566 vm_page_enqueue_inactive(p
, TRUE
);
1570 #if CONFIG_PHANTOM_CACHE
1571 vm_phantom_cache_add_ghost(p
);
1573 vm_page_free_prepare_queues(p
);
1575 assert(p
->pageq
.next
== 0 && p
->pageq
.prev
== 0);
1577 * Add this page to our list of reclaimed pages,
1578 * to be freed later.
1580 p
->snext
= local_free_q
;
1586 vm_page_unlock_queues();
1588 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1591 vm_page_free_list(local_free_q
, TRUE
);
1592 local_free_q
= VM_PAGE_NULL
;
1594 if (object
->vo_cache_pages_to_scan
== 0) {
1595 KERNEL_DEBUG(0x1300208, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1597 vm_object_cache_remove(object
);
1599 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1602 * done with this object
1604 vm_object_unlock(object
);
1605 object
= VM_OBJECT_NULL
;
1608 * at this point, we are not holding any locks
1610 if ((ep_freed
+ ep_moved
) >= num_to_evict
) {
1612 * we've reached our target for the
1613 * number of pages to evict
1617 vm_object_cache_lock_spin();
1620 * put the page queues lock back to the caller's
1623 vm_page_lock_queues();
1625 vm_object_cache_pages_freed
+= ep_freed
;
1626 vm_object_cache_pages_moved
+= ep_moved
;
1627 vm_object_cache_pages_skipped
+= ep_skipped
;
1629 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, ep_freed
, 0, 0, 0, 0);
1636 * Check to see whether we really need to trim
1637 * down the cache. If so, remove an object from
1638 * the cache, terminate it, and repeat.
1640 * Called with, and returns with, cache lock unlocked.
1643 vm_object_cache_trim(
1644 boolean_t called_from_vm_object_deallocate
)
1646 vm_object_t object
= VM_OBJECT_NULL
;
1652 * If we no longer need to trim the cache,
1655 if (vm_object_cached_count
<= vm_object_cached_max
)
1656 return VM_OBJECT_NULL
;
1658 vm_object_cache_lock();
1659 if (vm_object_cached_count
<= vm_object_cached_max
) {
1660 vm_object_cache_unlock();
1661 return VM_OBJECT_NULL
;
1665 * We must trim down the cache, so remove
1666 * the first object in the cache.
1669 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
1670 vm_object_cached_list
.next
,
1671 vm_object_cached_list
.prev
, 0, 0, 0);
1673 object
= (vm_object_t
) queue_first(&vm_object_cached_list
);
1674 if(object
== (vm_object_t
) &vm_object_cached_list
) {
1675 /* something's wrong with the calling parameter or */
1676 /* the value of vm_object_cached_count, just fix */
1678 if(vm_object_cached_max
< 0)
1679 vm_object_cached_max
= 0;
1680 vm_object_cached_count
= 0;
1681 vm_object_cache_unlock();
1682 return VM_OBJECT_NULL
;
1684 vm_object_lock(object
);
1685 queue_remove(&vm_object_cached_list
, object
, vm_object_t
,
1687 vm_object_cached_count
--;
1689 vm_object_cache_unlock();
1691 * Since this object is in the cache, we know
1692 * that it is initialized and has no references.
1693 * Take a reference to avoid recursive deallocations.
1696 assert(object
->pager_initialized
);
1697 assert(object
->ref_count
== 0);
1698 vm_object_lock_assert_exclusive(object
);
1699 object
->ref_count
++;
1702 * Terminate the object.
1703 * If the object had a shadow, we let vm_object_deallocate
1704 * deallocate it. "pageout" objects have a shadow, but
1705 * maintain a "paging reference" rather than a normal
1707 * (We are careful here to limit recursion.)
1709 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
1711 if(vm_object_terminate(object
) != KERN_SUCCESS
)
1714 if (shadow
!= VM_OBJECT_NULL
) {
1715 if (called_from_vm_object_deallocate
) {
1718 vm_object_deallocate(shadow
);
1727 * Routine: vm_object_terminate
1729 * Free all resources associated with a vm_object.
1730 * In/out conditions:
1731 * Upon entry, the object must be locked,
1732 * and the object must have exactly one reference.
1734 * The shadow object reference is left alone.
1736 * The object must be unlocked if its found that pages
1737 * must be flushed to a backing object. If someone
1738 * manages to map the object while it is being flushed
1739 * the object is returned unlocked and unchanged. Otherwise,
1740 * upon exit, the cache will be unlocked, and the
1741 * object will cease to exist.
1743 static kern_return_t
1744 vm_object_terminate(
1747 vm_object_t shadow_object
;
1749 XPR(XPR_VM_OBJECT
, "vm_object_terminate, object 0x%X ref %d\n",
1750 object
, object
->ref_count
, 0, 0, 0);
1752 vm_object_lock_assert_exclusive(object
);
1754 if (!object
->pageout
&& (!object
->temporary
|| object
->can_persist
) &&
1755 (object
->pager
!= NULL
|| object
->shadow_severed
)) {
1757 * Clear pager_trusted bit so that the pages get yanked
1758 * out of the object instead of cleaned in place. This
1759 * prevents a deadlock in XMM and makes more sense anyway.
1761 object
->pager_trusted
= FALSE
;
1763 vm_object_reap_pages(object
, REAP_TERMINATE
);
1766 * Make sure the object isn't already being terminated
1768 if (object
->terminating
) {
1769 vm_object_lock_assert_exclusive(object
);
1770 object
->ref_count
--;
1771 assert(object
->ref_count
> 0);
1772 vm_object_unlock(object
);
1773 return KERN_FAILURE
;
1777 * Did somebody get a reference to the object while we were
1780 if (object
->ref_count
!= 1) {
1781 vm_object_lock_assert_exclusive(object
);
1782 object
->ref_count
--;
1783 assert(object
->ref_count
> 0);
1784 vm_object_res_deallocate(object
);
1785 vm_object_unlock(object
);
1786 return KERN_FAILURE
;
1790 * Make sure no one can look us up now.
1793 object
->terminating
= TRUE
;
1794 object
->alive
= FALSE
;
1796 if ( !object
->internal
&& (object
->objq
.next
|| object
->objq
.prev
))
1797 vm_object_cache_remove(object
);
1799 if (object
->hashed
) {
1802 lck
= vm_object_hash_lock_spin(object
->pager
);
1803 vm_object_remove(object
);
1804 vm_object_hash_unlock(lck
);
1807 * Detach the object from its shadow if we are the shadow's
1808 * copy. The reference we hold on the shadow must be dropped
1811 if (((shadow_object
= object
->shadow
) != VM_OBJECT_NULL
) &&
1812 !(object
->pageout
)) {
1813 vm_object_lock(shadow_object
);
1814 if (shadow_object
->copy
== object
)
1815 shadow_object
->copy
= VM_OBJECT_NULL
;
1816 vm_object_unlock(shadow_object
);
1819 if (object
->paging_in_progress
!= 0 ||
1820 object
->activity_in_progress
!= 0) {
1822 * There are still some paging_in_progress references
1823 * on this object, meaning that there are some paging
1824 * or other I/O operations in progress for this VM object.
1825 * Such operations take some paging_in_progress references
1826 * up front to ensure that the object doesn't go away, but
1827 * they may also need to acquire a reference on the VM object,
1828 * to map it in kernel space, for example. That means that
1829 * they may end up releasing the last reference on the VM
1830 * object, triggering its termination, while still holding
1831 * paging_in_progress references. Waiting for these
1832 * pending paging_in_progress references to go away here would
1835 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1836 * complete the VM object termination if it still holds
1837 * paging_in_progress references at this point.
1839 * No new paging_in_progress should appear now that the
1840 * VM object is "terminating" and not "alive".
1842 vm_object_reap_async(object
);
1843 vm_object_unlock(object
);
1845 * Return KERN_FAILURE to let the caller know that we
1846 * haven't completed the termination and it can't drop this
1847 * object's reference on its shadow object yet.
1848 * The reaper thread will take care of that once it has
1849 * completed this object's termination.
1851 return KERN_FAILURE
;
1854 * complete the VM object termination
1856 vm_object_reap(object
);
1857 object
= VM_OBJECT_NULL
;
1860 * the object lock was released by vm_object_reap()
1862 * KERN_SUCCESS means that this object has been terminated
1863 * and no longer needs its shadow object but still holds a
1865 * The caller is responsible for dropping that reference.
1866 * We can't call vm_object_deallocate() here because that
1867 * would create a recursion.
1869 return KERN_SUCCESS
;
1876 * Complete the termination of a VM object after it's been marked
1877 * as "terminating" and "!alive" by vm_object_terminate().
1879 * The VM object must be locked by caller.
1880 * The lock will be released on return and the VM object is no longer valid.
1887 memory_object_t pager
;
1889 vm_object_lock_assert_exclusive(object
);
1890 assert(object
->paging_in_progress
== 0);
1891 assert(object
->activity_in_progress
== 0);
1893 vm_object_reap_count
++;
1896 * Disown this purgeable object to cleanup its owner's purgeable
1897 * ledgers. We need to do this before disconnecting the object
1898 * from its pager, to properly account for compressed pages.
1900 if (object
->internal
&&
1901 object
->purgable
!= VM_PURGABLE_DENY
) {
1902 vm_purgeable_accounting(object
,
1907 pager
= object
->pager
;
1908 object
->pager
= MEMORY_OBJECT_NULL
;
1910 if (pager
!= MEMORY_OBJECT_NULL
)
1911 memory_object_control_disable(object
->pager_control
);
1913 object
->ref_count
--;
1915 assert(object
->res_count
== 0);
1916 #endif /* TASK_SWAPPER */
1918 assert (object
->ref_count
== 0);
1921 * remove from purgeable queue if it's on
1923 if (object
->internal
) {
1926 owner
= object
->vo_purgeable_owner
;
1928 VM_OBJECT_UNWIRED(object
);
1930 if (object
->purgable
== VM_PURGABLE_DENY
) {
1931 /* not purgeable: nothing to do */
1932 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
1933 purgeable_q_t queue
;
1935 assert(object
->vo_purgeable_owner
== NULL
);
1937 queue
= vm_purgeable_object_remove(object
);
1940 if (object
->purgeable_when_ripe
) {
1942 * Must take page lock for this -
1943 * using it to protect token queue
1945 vm_page_lock_queues();
1946 vm_purgeable_token_delete_first(queue
);
1948 assert(queue
->debug_count_objects
>=0);
1949 vm_page_unlock_queues();
1953 * Update "vm_page_purgeable_count" in bulk and mark
1954 * object as VM_PURGABLE_EMPTY to avoid updating
1955 * "vm_page_purgeable_count" again in vm_page_remove()
1956 * when reaping the pages.
1959 assert(object
->resident_page_count
>=
1960 object
->wired_page_count
);
1961 delta
= (object
->resident_page_count
-
1962 object
->wired_page_count
);
1964 assert(vm_page_purgeable_count
>= delta
);
1966 (SInt32
*)&vm_page_purgeable_count
);
1968 if (object
->wired_page_count
!= 0) {
1969 assert(vm_page_purgeable_wired_count
>=
1970 object
->wired_page_count
);
1971 OSAddAtomic(-object
->wired_page_count
,
1972 (SInt32
*)&vm_page_purgeable_wired_count
);
1974 object
->purgable
= VM_PURGABLE_EMPTY
;
1976 else if (object
->purgable
== VM_PURGABLE_NONVOLATILE
||
1977 object
->purgable
== VM_PURGABLE_EMPTY
) {
1978 /* remove from nonvolatile queue */
1979 assert(object
->vo_purgeable_owner
== TASK_NULL
);
1980 vm_purgeable_nonvolatile_dequeue(object
);
1982 panic("object %p in unexpected purgeable state 0x%x\n",
1983 object
, object
->purgable
);
1985 assert(object
->objq
.next
== NULL
);
1986 assert(object
->objq
.prev
== NULL
);
1990 * Clean or free the pages, as appropriate.
1991 * It is possible for us to find busy/absent pages,
1992 * if some faults on this object were aborted.
1994 if (object
->pageout
) {
1995 assert(object
->shadow
!= VM_OBJECT_NULL
);
1997 vm_pageout_object_terminate(object
);
1999 } else if (((object
->temporary
&& !object
->can_persist
) || (pager
== MEMORY_OBJECT_NULL
))) {
2001 vm_object_reap_pages(object
, REAP_REAP
);
2003 assert(vm_page_queue_empty(&object
->memq
));
2004 assert(object
->paging_in_progress
== 0);
2005 assert(object
->activity_in_progress
== 0);
2006 assert(object
->ref_count
== 0);
2009 * If the pager has not already been released by
2010 * vm_object_destroy, we need to terminate it and
2011 * release our reference to it here.
2013 if (pager
!= MEMORY_OBJECT_NULL
) {
2014 vm_object_unlock(object
);
2015 vm_object_release_pager(pager
, object
->hashed
);
2016 vm_object_lock(object
);
2019 /* kick off anyone waiting on terminating */
2020 object
->terminating
= FALSE
;
2021 vm_object_paging_begin(object
);
2022 vm_object_paging_end(object
);
2023 vm_object_unlock(object
);
2025 object
->shadow
= VM_OBJECT_NULL
;
2027 #if VM_OBJECT_TRACKING
2028 if (vm_object_tracking_inited
) {
2029 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
2032 #endif /* VM_OBJECT_TRACKING */
2034 vm_object_lock_destroy(object
);
2036 * Free the space for the object.
2038 zfree(vm_object_zone
, object
);
2039 object
= VM_OBJECT_NULL
;
2043 unsigned int vm_max_batch
= 256;
2045 #define V_O_R_MAX_BATCH 128
2047 #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
2050 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
2052 if (_local_free_q) { \
2053 if (do_disconnect) { \
2055 for (m = _local_free_q; \
2056 m != VM_PAGE_NULL; \
2059 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
2063 vm_page_free_list(_local_free_q, TRUE); \
2064 _local_free_q = VM_PAGE_NULL; \
2070 vm_object_reap_pages(
2076 vm_page_t local_free_q
= VM_PAGE_NULL
;
2078 boolean_t disconnect_on_release
;
2079 pmap_flush_context pmap_flush_context_storage
;
2081 if (reap_type
== REAP_DATA_FLUSH
) {
2083 * We need to disconnect pages from all pmaps before
2084 * releasing them to the free list
2086 disconnect_on_release
= TRUE
;
2089 * Either the caller has already disconnected the pages
2090 * from all pmaps, or we disconnect them here as we add
2091 * them to out local list of pages to be released.
2092 * No need to re-disconnect them when we release the pages
2095 disconnect_on_release
= FALSE
;
2098 restart_after_sleep
:
2099 if (vm_page_queue_empty(&object
->memq
))
2101 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
2103 if (reap_type
== REAP_PURGEABLE
)
2104 pmap_flush_context_init(&pmap_flush_context_storage
);
2106 vm_page_lockspin_queues();
2108 next
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
2110 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next
)) {
2113 next
= (vm_page_t
)vm_page_queue_next(&next
->listq
);
2115 if (--loop_count
== 0) {
2117 vm_page_unlock_queues();
2121 if (reap_type
== REAP_PURGEABLE
) {
2122 pmap_flush(&pmap_flush_context_storage
);
2123 pmap_flush_context_init(&pmap_flush_context_storage
);
2126 * Free the pages we reclaimed so far
2127 * and take a little break to avoid
2128 * hogging the page queue lock too long
2130 VM_OBJ_REAP_FREELIST(local_free_q
,
2131 disconnect_on_release
);
2135 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
2137 vm_page_lockspin_queues();
2139 if (reap_type
== REAP_DATA_FLUSH
|| reap_type
== REAP_TERMINATE
) {
2141 if (p
->busy
|| p
->cleaning
) {
2143 vm_page_unlock_queues();
2145 * free the pages reclaimed so far
2147 VM_OBJ_REAP_FREELIST(local_free_q
,
2148 disconnect_on_release
);
2150 PAGE_SLEEP(object
, p
, THREAD_UNINT
);
2152 goto restart_after_sleep
;
2155 vm_pageout_steal_laundry(p
, TRUE
);
2157 switch (reap_type
) {
2159 case REAP_DATA_FLUSH
:
2160 if (VM_PAGE_WIRED(p
)) {
2162 * this is an odd case... perhaps we should
2163 * zero-fill this page since we're conceptually
2164 * tossing its data at this point, but leaving
2165 * it on the object to honor the 'wire' contract
2171 case REAP_PURGEABLE
:
2172 if (VM_PAGE_WIRED(p
)) {
2174 * can't purge a wired page
2176 vm_page_purged_wired
++;
2179 if (p
->laundry
&& !p
->busy
&& !p
->cleaning
)
2180 vm_pageout_steal_laundry(p
, TRUE
);
2182 if (p
->cleaning
|| p
->laundry
|| p
->absent
) {
2184 * page is being acted upon,
2185 * so don't mess with it
2187 vm_page_purged_others
++;
2192 * We can't reclaim a busy page but we can
2193 * make it more likely to be paged (it's not wired) to make
2194 * sure that it gets considered by
2195 * vm_pageout_scan() later.
2197 if (VM_PAGE_PAGEABLE(p
))
2198 vm_page_deactivate(p
);
2199 vm_page_purged_busy
++;
2203 assert(VM_PAGE_OBJECT(p
) != kernel_object
);
2206 * we can discard this page...
2208 if (p
->pmapped
== TRUE
) {
2212 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
), PMAP_OPTIONS_NOFLUSH
| PMAP_OPTIONS_NOREFMOD
, (void *)&pmap_flush_context_storage
);
2214 vm_page_purged_count
++;
2218 case REAP_TERMINATE
:
2219 if (p
->absent
|| p
->private) {
2221 * For private pages, VM_PAGE_FREE just
2222 * leaves the page structure around for
2223 * its owner to clean up. For absent
2224 * pages, the structure is returned to
2225 * the appropriate pool.
2229 if (p
->fictitious
) {
2230 assert (VM_PAGE_GET_PHYS_PAGE(p
) == vm_page_guard_addr
);
2233 if (!p
->dirty
&& p
->wpmapped
)
2234 p
->dirty
= pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
));
2236 if ((p
->dirty
|| p
->precious
) && !p
->error
&& object
->alive
) {
2238 assert(!object
->internal
);
2240 p
->free_when_done
= TRUE
;
2243 vm_page_queues_remove(p
, TRUE
);
2245 * flush page... page will be freed
2246 * upon completion of I/O
2248 (void)vm_pageout_cluster(p
, FALSE
, FALSE
);
2250 vm_page_unlock_queues();
2252 * free the pages reclaimed so far
2254 VM_OBJ_REAP_FREELIST(local_free_q
,
2255 disconnect_on_release
);
2257 vm_object_paging_wait(object
, THREAD_UNINT
);
2259 goto restart_after_sleep
;
2266 vm_page_free_prepare_queues(p
);
2267 assert(p
->pageq
.next
== 0 && p
->pageq
.prev
== 0);
2269 * Add this page to our list of reclaimed pages,
2270 * to be freed later.
2272 p
->snext
= local_free_q
;
2275 vm_page_unlock_queues();
2278 * Free the remaining reclaimed pages
2280 if (reap_type
== REAP_PURGEABLE
)
2281 pmap_flush(&pmap_flush_context_storage
);
2283 VM_OBJ_REAP_FREELIST(local_free_q
,
2284 disconnect_on_release
);
2289 vm_object_reap_async(
2292 vm_object_lock_assert_exclusive(object
);
2294 vm_object_reaper_lock_spin();
2296 vm_object_reap_count_async
++;
2298 /* enqueue the VM object... */
2299 queue_enter(&vm_object_reaper_queue
, object
,
2300 vm_object_t
, cached_list
);
2302 vm_object_reaper_unlock();
2304 /* ... and wake up the reaper thread */
2305 thread_wakeup((event_t
) &vm_object_reaper_queue
);
2310 vm_object_reaper_thread(void)
2312 vm_object_t object
, shadow_object
;
2314 vm_object_reaper_lock_spin();
2316 while (!queue_empty(&vm_object_reaper_queue
)) {
2317 queue_remove_first(&vm_object_reaper_queue
,
2322 vm_object_reaper_unlock();
2323 vm_object_lock(object
);
2325 assert(object
->terminating
);
2326 assert(!object
->alive
);
2329 * The pageout daemon might be playing with our pages.
2330 * Now that the object is dead, it won't touch any more
2331 * pages, but some pages might already be on their way out.
2332 * Hence, we wait until the active paging activities have
2333 * ceased before we break the association with the pager
2336 while (object
->paging_in_progress
!= 0 ||
2337 object
->activity_in_progress
!= 0) {
2338 vm_object_wait(object
,
2339 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
2341 vm_object_lock(object
);
2345 object
->pageout
? VM_OBJECT_NULL
: object
->shadow
;
2347 vm_object_reap(object
);
2348 /* cache is unlocked and object is no longer valid */
2349 object
= VM_OBJECT_NULL
;
2351 if (shadow_object
!= VM_OBJECT_NULL
) {
2353 * Drop the reference "object" was holding on
2354 * its shadow object.
2356 vm_object_deallocate(shadow_object
);
2357 shadow_object
= VM_OBJECT_NULL
;
2359 vm_object_reaper_lock_spin();
2362 /* wait for more work... */
2363 assert_wait((event_t
) &vm_object_reaper_queue
, THREAD_UNINT
);
2365 vm_object_reaper_unlock();
2367 thread_block((thread_continue_t
) vm_object_reaper_thread
);
2372 * Routine: vm_object_pager_wakeup
2373 * Purpose: Wake up anyone waiting for termination of a pager.
2377 vm_object_pager_wakeup(
2378 memory_object_t pager
)
2380 vm_object_hash_entry_t entry
;
2381 boolean_t waiting
= FALSE
;
2385 * If anyone was waiting for the memory_object_terminate
2386 * to be queued, wake them up now.
2388 lck
= vm_object_hash_lock_spin(pager
);
2389 entry
= vm_object_hash_lookup(pager
, TRUE
);
2390 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
2391 waiting
= entry
->waiting
;
2392 vm_object_hash_unlock(lck
);
2394 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
) {
2396 thread_wakeup((event_t
) pager
);
2397 vm_object_hash_entry_free(entry
);
2402 * Routine: vm_object_release_pager
2403 * Purpose: Terminate the pager and, upon completion,
2404 * release our last reference to it.
2405 * just like memory_object_terminate, except
2406 * that we wake up anyone blocked in vm_object_enter
2407 * waiting for termination message to be queued
2408 * before calling memory_object_init.
2411 vm_object_release_pager(
2412 memory_object_t pager
,
2417 * Terminate the pager.
2420 (void) memory_object_terminate(pager
);
2422 if (hashed
== TRUE
) {
2424 * Wakeup anyone waiting for this terminate
2425 * and remove the entry from the hash
2427 vm_object_pager_wakeup(pager
);
2430 * Release reference to pager.
2432 memory_object_deallocate(pager
);
2436 * Routine: vm_object_destroy
2438 * Shut down a VM object, despite the
2439 * presence of address map (or other) references
2445 __unused kern_return_t reason
)
2447 memory_object_t old_pager
;
2449 if (object
== VM_OBJECT_NULL
)
2450 return(KERN_SUCCESS
);
2453 * Remove the pager association immediately.
2455 * This will prevent the memory manager from further
2456 * meddling. [If it wanted to flush data or make
2457 * other changes, it should have done so before performing
2458 * the destroy call.]
2461 vm_object_lock(object
);
2462 object
->can_persist
= FALSE
;
2463 object
->named
= FALSE
;
2464 object
->alive
= FALSE
;
2466 if (object
->hashed
) {
2469 * Rip out the pager from the vm_object now...
2471 lck
= vm_object_hash_lock_spin(object
->pager
);
2472 vm_object_remove(object
);
2473 vm_object_hash_unlock(lck
);
2475 old_pager
= object
->pager
;
2476 object
->pager
= MEMORY_OBJECT_NULL
;
2477 if (old_pager
!= MEMORY_OBJECT_NULL
)
2478 memory_object_control_disable(object
->pager_control
);
2481 * Wait for the existing paging activity (that got
2482 * through before we nulled out the pager) to subside.
2485 vm_object_paging_wait(object
, THREAD_UNINT
);
2486 vm_object_unlock(object
);
2489 * Terminate the object now.
2491 if (old_pager
!= MEMORY_OBJECT_NULL
) {
2492 vm_object_release_pager(old_pager
, object
->hashed
);
2495 * JMM - Release the caller's reference. This assumes the
2496 * caller had a reference to release, which is a big (but
2497 * currently valid) assumption if this is driven from the
2498 * vnode pager (it is holding a named reference when making
2501 vm_object_deallocate(object
);
2504 return(KERN_SUCCESS
);
2510 #define VM_OBJ_DEACT_ALL_STATS DEBUG
2511 #if VM_OBJ_DEACT_ALL_STATS
2512 uint32_t vm_object_deactivate_all_pages_batches
= 0;
2513 uint32_t vm_object_deactivate_all_pages_pages
= 0;
2514 #endif /* VM_OBJ_DEACT_ALL_STATS */
2516 * vm_object_deactivate_all_pages
2518 * Deactivate all pages in the specified object. (Keep its pages
2519 * in memory even though it is no longer referenced.)
2521 * The object must be locked.
2524 vm_object_deactivate_all_pages(
2529 #if VM_OBJ_DEACT_ALL_STATS
2531 #endif /* VM_OBJ_DEACT_ALL_STATS */
2532 #define V_O_D_A_P_MAX_BATCH 256
2534 loop_count
= BATCH_LIMIT(V_O_D_A_P_MAX_BATCH
);
2535 #if VM_OBJ_DEACT_ALL_STATS
2537 #endif /* VM_OBJ_DEACT_ALL_STATS */
2538 vm_page_lock_queues();
2539 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
2540 if (--loop_count
== 0) {
2541 #if VM_OBJ_DEACT_ALL_STATS
2542 hw_atomic_add(&vm_object_deactivate_all_pages_batches
,
2544 hw_atomic_add(&vm_object_deactivate_all_pages_pages
,
2547 #endif /* VM_OBJ_DEACT_ALL_STATS */
2548 lck_mtx_yield(&vm_page_queue_lock
);
2549 loop_count
= BATCH_LIMIT(V_O_D_A_P_MAX_BATCH
);
2551 if (!p
->busy
&& (p
->vm_page_q_state
!= VM_PAGE_ON_THROTTLED_Q
)) {
2552 #if VM_OBJ_DEACT_ALL_STATS
2554 #endif /* VM_OBJ_DEACT_ALL_STATS */
2555 vm_page_deactivate(p
);
2558 #if VM_OBJ_DEACT_ALL_STATS
2560 hw_atomic_add(&vm_object_deactivate_all_pages_batches
, 1);
2561 hw_atomic_add(&vm_object_deactivate_all_pages_pages
,
2565 #endif /* VM_OBJ_DEACT_ALL_STATS */
2566 vm_page_unlock_queues();
2568 #endif /* VM_OBJECT_CACHE */
2573 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2574 * exist because of the need to handle shadow chains. When deactivating pages, we only
2575 * want to deactive the ones at the top most level in the object chain. In order to do
2576 * this efficiently, the specified address range is divided up into "chunks" and we use
2577 * a bit map to keep track of which pages have already been processed as we descend down
2578 * the shadow chain. These chunk macros hide the details of the bit map implementation
2579 * as much as we can.
2581 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2582 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2583 * order bit represents page 0 in the current range and highest order bit represents
2586 * For further convenience, we also use negative logic for the page state in the bit map.
2587 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2588 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2589 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2590 * out with all the bits set. The macros below hide all these details from the caller.
2593 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2594 /* be the same as the number of bits in */
2595 /* the chunk_state_t type. We use 64 */
2596 /* just for convenience. */
2598 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2600 typedef uint64_t chunk_state_t
;
2603 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2604 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2605 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2606 * looking at pages in that range. This can save us from unnecessarily chasing down the
2610 #define CHUNK_INIT(c, len) \
2614 (c) = 0xffffffffffffffffLL; \
2616 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2617 MARK_PAGE_HANDLED(c, p); \
2622 * Return true if all pages in the chunk have not yet been processed.
2625 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2628 * Return true if the page at offset 'p' in the bit map has already been handled
2629 * while processing a higher level object in the shadow chain.
2632 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0)
2635 * Mark the page at offset 'p' in the bit map as having been processed.
2638 #define MARK_PAGE_HANDLED(c, p) \
2640 (c) = (c) & ~(1LL << (p)); \
2645 * Return true if the page at the given offset has been paged out. Object is
2646 * locked upon entry and returned locked.
2652 vm_object_offset_t offset
)
2654 if (object
->internal
&&
2656 !object
->terminating
&&
2657 object
->pager_ready
) {
2659 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
)
2660 == VM_EXTERNAL_STATE_EXISTS
) {
2670 * madvise_free_debug
2672 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2673 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2674 * simulate the loss of the page's contents as if the page had been
2675 * reclaimed and then re-faulted.
2677 #if DEVELOPMENT || DEBUG
2678 int madvise_free_debug
= 1;
2680 int madvise_free_debug
= 0;
2684 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2685 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2686 * a size that is less than or equal to the CHUNK_SIZE.
2690 deactivate_pages_in_object(
2692 vm_object_offset_t offset
,
2693 vm_object_size_t size
,
2694 boolean_t kill_page
,
2695 boolean_t reusable_page
,
2696 boolean_t all_reusable
,
2697 chunk_state_t
*chunk_state
,
2698 pmap_flush_context
*pfc
,
2700 vm_map_offset_t pmap_offset
)
2704 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
2705 struct vm_page_delayed_work
*dwp
;
2708 unsigned int reusable
= 0;
2711 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2712 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2713 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2714 * all the pages in the chunk.
2719 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
2721 for(p
= 0; size
&& CHUNK_NOT_COMPLETE(*chunk_state
); p
++, size
-= PAGE_SIZE_64
, offset
+= PAGE_SIZE_64
, pmap_offset
+= PAGE_SIZE_64
) {
2724 * If this offset has already been found and handled in a higher level object, then don't
2725 * do anything with it in the current shadow object.
2728 if (PAGE_ALREADY_HANDLED(*chunk_state
, p
))
2732 * See if the page at this offset is around. First check to see if the page is resident,
2733 * then if not, check the existence map or with the pager.
2736 if ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
2739 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2740 * so that we won't bother looking for a page at this offset again if there are more
2741 * shadow objects. Then deactivate the page.
2744 MARK_PAGE_HANDLED(*chunk_state
, p
);
2746 if (( !VM_PAGE_WIRED(m
)) && (!m
->private) && (!m
->gobbled
) && (!m
->busy
) && (!m
->laundry
)) {
2753 clear_refmod
= VM_MEM_REFERENCED
;
2754 dwp
->dw_mask
|= DW_clear_reference
;
2756 if ((kill_page
) && (object
->internal
)) {
2757 if (madvise_free_debug
) {
2759 * zero-fill the page now
2760 * to simulate it being
2761 * reclaimed and re-faulted.
2763 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m
));
2765 m
->precious
= FALSE
;
2768 clear_refmod
|= VM_MEM_MODIFIED
;
2769 if (m
->vm_page_q_state
== VM_PAGE_ON_THROTTLED_Q
) {
2771 * This page is now clean and
2772 * reclaimable. Move it out
2773 * of the throttled queue, so
2774 * that vm_pageout_scan() can
2777 dwp
->dw_mask
|= DW_move_page
;
2780 VM_COMPRESSOR_PAGER_STATE_CLR(object
, offset
);
2782 if (reusable_page
&& !m
->reusable
) {
2783 assert(!all_reusable
);
2784 assert(!object
->all_reusable
);
2786 object
->reusable_page_count
++;
2787 assert(object
->resident_page_count
>= object
->reusable_page_count
);
2790 * Tell pmap this page is now
2791 * "reusable" (to update pmap
2792 * stats for all mappings).
2794 pmap_options
|= PMAP_OPTIONS_SET_REUSABLE
;
2797 pmap_options
|= PMAP_OPTIONS_NOFLUSH
;
2798 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m
),
2803 if ((m
->vm_page_q_state
!= VM_PAGE_ON_THROTTLED_Q
) && !(reusable_page
|| all_reusable
))
2804 dwp
->dw_mask
|= DW_move_page
;
2807 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
,
2810 if (dw_count
>= dw_limit
) {
2812 OSAddAtomic(reusable
,
2813 &vm_page_stats_reusable
.reusable_count
);
2814 vm_page_stats_reusable
.reusable
+= reusable
;
2817 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
2827 * The page at this offset isn't memory resident, check to see if it's
2828 * been paged out. If so, mark it as handled so we don't bother looking
2829 * for it in the shadow chain.
2832 if (page_is_paged_out(object
, offset
)) {
2833 MARK_PAGE_HANDLED(*chunk_state
, p
);
2836 * If we're killing a non-resident page, then clear the page in the existence
2837 * map so we don't bother paging it back in if it's touched again in the future.
2840 if ((kill_page
) && (object
->internal
)) {
2842 VM_COMPRESSOR_PAGER_STATE_CLR(object
, offset
);
2844 if (pmap
!= PMAP_NULL
) {
2846 * Tell pmap that this page
2847 * is no longer mapped, to
2848 * adjust the footprint ledger
2849 * because this page is no
2850 * longer compressed.
2852 pmap_remove_options(
2857 PMAP_OPTIONS_REMOVE
);
2865 OSAddAtomic(reusable
, &vm_page_stats_reusable
.reusable_count
);
2866 vm_page_stats_reusable
.reusable
+= reusable
;
2871 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
2876 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2877 * will always be less than or equal to the given size. The total range is divided up
2878 * into chunks for efficiency and performance related to the locks and handling the shadow
2879 * chain. This routine returns how much of the given "size" it actually processed. It's
2880 * up to the caler to loop and keep calling this routine until the entire range they want
2881 * to process has been done.
2884 static vm_object_size_t
2886 vm_object_t orig_object
,
2887 vm_object_offset_t offset
,
2888 vm_object_size_t size
,
2889 boolean_t kill_page
,
2890 boolean_t reusable_page
,
2891 boolean_t all_reusable
,
2892 pmap_flush_context
*pfc
,
2894 vm_map_offset_t pmap_offset
)
2897 vm_object_t tmp_object
;
2898 vm_object_size_t length
;
2899 chunk_state_t chunk_state
;
2903 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2904 * remaining size the caller asked for.
2907 length
= MIN(size
, CHUNK_SIZE
);
2910 * The chunk_state keeps track of which pages we've already processed if there's
2911 * a shadow chain on this object. At this point, we haven't done anything with this
2912 * range of pages yet, so initialize the state to indicate no pages processed yet.
2915 CHUNK_INIT(chunk_state
, length
);
2916 object
= orig_object
;
2919 * Start at the top level object and iterate around the loop once for each object
2920 * in the shadow chain. We stop processing early if we've already found all the pages
2921 * in the range. Otherwise we stop when we run out of shadow objects.
2924 while (object
&& CHUNK_NOT_COMPLETE(chunk_state
)) {
2925 vm_object_paging_begin(object
);
2927 deactivate_pages_in_object(object
, offset
, length
, kill_page
, reusable_page
, all_reusable
, &chunk_state
, pfc
, pmap
, pmap_offset
);
2929 vm_object_paging_end(object
);
2932 * We've finished with this object, see if there's a shadow object. If
2933 * there is, update the offset and lock the new object. We also turn off
2934 * kill_page at this point since we only kill pages in the top most object.
2937 tmp_object
= object
->shadow
;
2941 reusable_page
= FALSE
;
2942 all_reusable
= FALSE
;
2943 offset
+= object
->vo_shadow_offset
;
2944 vm_object_lock(tmp_object
);
2947 if (object
!= orig_object
)
2948 vm_object_unlock(object
);
2950 object
= tmp_object
;
2953 if (object
&& object
!= orig_object
)
2954 vm_object_unlock(object
);
2962 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2963 * we also clear the modified status of the page and "forget" any changes that have been made
2967 __private_extern__
void
2968 vm_object_deactivate_pages(
2970 vm_object_offset_t offset
,
2971 vm_object_size_t size
,
2972 boolean_t kill_page
,
2973 boolean_t reusable_page
,
2975 vm_map_offset_t pmap_offset
)
2977 vm_object_size_t length
;
2978 boolean_t all_reusable
;
2979 pmap_flush_context pmap_flush_context_storage
;
2982 * We break the range up into chunks and do one chunk at a time. This is for
2983 * efficiency and performance while handling the shadow chains and the locks.
2984 * The deactivate_a_chunk() function returns how much of the range it processed.
2985 * We keep calling this routine until the given size is exhausted.
2989 all_reusable
= FALSE
;
2992 * For the sake of accurate "reusable" pmap stats, we need
2993 * to tell pmap about each page that is no longer "reusable",
2994 * so we can't do the "all_reusable" optimization.
2997 if (reusable_page
&&
2999 object
->vo_size
!= 0 &&
3000 object
->vo_size
== size
&&
3001 object
->reusable_page_count
== 0) {
3002 all_reusable
= TRUE
;
3003 reusable_page
= FALSE
;
3007 if ((reusable_page
|| all_reusable
) && object
->all_reusable
) {
3008 /* This means MADV_FREE_REUSABLE has been called twice, which
3009 * is probably illegal. */
3013 pmap_flush_context_init(&pmap_flush_context_storage
);
3016 length
= deactivate_a_chunk(object
, offset
, size
, kill_page
, reusable_page
, all_reusable
, &pmap_flush_context_storage
, pmap
, pmap_offset
);
3020 pmap_offset
+= length
;
3022 pmap_flush(&pmap_flush_context_storage
);
3025 if (!object
->all_reusable
) {
3026 unsigned int reusable
;
3028 object
->all_reusable
= TRUE
;
3029 assert(object
->reusable_page_count
== 0);
3030 /* update global stats */
3031 reusable
= object
->resident_page_count
;
3032 OSAddAtomic(reusable
,
3033 &vm_page_stats_reusable
.reusable_count
);
3034 vm_page_stats_reusable
.reusable
+= reusable
;
3035 vm_page_stats_reusable
.all_reusable_calls
++;
3037 } else if (reusable_page
) {
3038 vm_page_stats_reusable
.partial_reusable_calls
++;
3043 vm_object_reuse_pages(
3045 vm_object_offset_t start_offset
,
3046 vm_object_offset_t end_offset
,
3047 boolean_t allow_partial_reuse
)
3049 vm_object_offset_t cur_offset
;
3051 unsigned int reused
, reusable
;
3053 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
3055 if ((m) != VM_PAGE_NULL && \
3057 assert((object)->reusable_page_count <= \
3058 (object)->resident_page_count); \
3059 assert((object)->reusable_page_count > 0); \
3060 (object)->reusable_page_count--; \
3061 (m)->reusable = FALSE; \
3064 * Tell pmap that this page is no longer \
3065 * "reusable", to update the "reusable" stats \
3066 * for all the pmaps that have mapped this \
3069 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
3071 (PMAP_OPTIONS_CLEAR_REUSABLE \
3072 | PMAP_OPTIONS_NOFLUSH), \
3080 vm_object_lock_assert_exclusive(object
);
3082 if (object
->all_reusable
) {
3083 panic("object %p all_reusable: can't update pmap stats\n",
3085 assert(object
->reusable_page_count
== 0);
3086 object
->all_reusable
= FALSE
;
3087 if (end_offset
- start_offset
== object
->vo_size
||
3088 !allow_partial_reuse
) {
3089 vm_page_stats_reusable
.all_reuse_calls
++;
3090 reused
= object
->resident_page_count
;
3092 vm_page_stats_reusable
.partial_reuse_calls
++;
3093 vm_page_queue_iterate(&object
->memq
, m
, vm_page_t
, listq
) {
3094 if (m
->offset
< start_offset
||
3095 m
->offset
>= end_offset
) {
3097 object
->reusable_page_count
++;
3098 assert(object
->resident_page_count
>= object
->reusable_page_count
);
3101 assert(!m
->reusable
);
3106 } else if (object
->resident_page_count
>
3107 ((end_offset
- start_offset
) >> PAGE_SHIFT
)) {
3108 vm_page_stats_reusable
.partial_reuse_calls
++;
3109 for (cur_offset
= start_offset
;
3110 cur_offset
< end_offset
;
3111 cur_offset
+= PAGE_SIZE_64
) {
3112 if (object
->reusable_page_count
== 0) {
3115 m
= vm_page_lookup(object
, cur_offset
);
3116 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
3119 vm_page_stats_reusable
.partial_reuse_calls
++;
3120 vm_page_queue_iterate(&object
->memq
, m
, vm_page_t
, listq
) {
3121 if (object
->reusable_page_count
== 0) {
3124 if (m
->offset
< start_offset
||
3125 m
->offset
>= end_offset
) {
3128 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
3132 /* update global stats */
3133 OSAddAtomic(reusable
-reused
, &vm_page_stats_reusable
.reusable_count
);
3134 vm_page_stats_reusable
.reused
+= reused
;
3135 vm_page_stats_reusable
.reusable
+= reusable
;
3139 * Routine: vm_object_pmap_protect
3142 * Reduces the permission for all physical
3143 * pages in the specified object range.
3145 * If removing write permission only, it is
3146 * sufficient to protect only the pages in
3147 * the top-level object; only those pages may
3148 * have write permission.
3150 * If removing all access, we must follow the
3151 * shadow chain from the top-level object to
3152 * remove access to all pages in shadowed objects.
3154 * The object must *not* be locked. The object must
3155 * be temporary/internal.
3157 * If pmap is not NULL, this routine assumes that
3158 * the only mappings for the pages are in that
3162 __private_extern__
void
3163 vm_object_pmap_protect(
3165 vm_object_offset_t offset
,
3166 vm_object_size_t size
,
3168 vm_map_offset_t pmap_start
,
3171 vm_object_pmap_protect_options(object
, offset
, size
,
3172 pmap
, pmap_start
, prot
, 0);
3175 __private_extern__
void
3176 vm_object_pmap_protect_options(
3178 vm_object_offset_t offset
,
3179 vm_object_size_t size
,
3181 vm_map_offset_t pmap_start
,
3185 pmap_flush_context pmap_flush_context_storage
;
3186 boolean_t delayed_pmap_flush
= FALSE
;
3188 if (object
== VM_OBJECT_NULL
)
3190 size
= vm_object_round_page(size
);
3191 offset
= vm_object_trunc_page(offset
);
3193 vm_object_lock(object
);
3195 if (object
->phys_contiguous
) {
3197 vm_object_unlock(object
);
3198 pmap_protect_options(pmap
,
3202 options
& ~PMAP_OPTIONS_NOFLUSH
,
3205 vm_object_offset_t phys_start
, phys_end
, phys_addr
;
3207 phys_start
= object
->vo_shadow_offset
+ offset
;
3208 phys_end
= phys_start
+ size
;
3209 assert(phys_start
<= phys_end
);
3210 assert(phys_end
<= object
->vo_shadow_offset
+ object
->vo_size
);
3211 vm_object_unlock(object
);
3213 pmap_flush_context_init(&pmap_flush_context_storage
);
3214 delayed_pmap_flush
= FALSE
;
3216 for (phys_addr
= phys_start
;
3217 phys_addr
< phys_end
;
3218 phys_addr
+= PAGE_SIZE_64
) {
3219 pmap_page_protect_options(
3220 (ppnum_t
) (phys_addr
>> PAGE_SHIFT
),
3222 options
| PMAP_OPTIONS_NOFLUSH
,
3223 (void *)&pmap_flush_context_storage
);
3224 delayed_pmap_flush
= TRUE
;
3226 if (delayed_pmap_flush
== TRUE
)
3227 pmap_flush(&pmap_flush_context_storage
);
3232 assert(object
->internal
);
3235 if (ptoa_64(object
->resident_page_count
) > size
/2 && pmap
!= PMAP_NULL
) {
3236 vm_object_unlock(object
);
3237 pmap_protect_options(pmap
, pmap_start
, pmap_start
+ size
, prot
,
3238 options
& ~PMAP_OPTIONS_NOFLUSH
, NULL
);
3242 pmap_flush_context_init(&pmap_flush_context_storage
);
3243 delayed_pmap_flush
= FALSE
;
3246 * if we are doing large ranges with respect to resident
3247 * page count then we should interate over pages otherwise
3248 * inverse page look-up will be faster
3250 if (ptoa_64(object
->resident_page_count
/ 4) < size
) {
3252 vm_object_offset_t end
;
3254 end
= offset
+ size
;
3256 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
3257 if (!p
->fictitious
&& (offset
<= p
->offset
) && (p
->offset
< end
)) {
3258 vm_map_offset_t start
;
3260 start
= pmap_start
+ p
->offset
- offset
;
3262 if (pmap
!= PMAP_NULL
)
3263 pmap_protect_options(
3266 start
+ PAGE_SIZE_64
,
3268 options
| PMAP_OPTIONS_NOFLUSH
,
3269 &pmap_flush_context_storage
);
3271 pmap_page_protect_options(
3272 VM_PAGE_GET_PHYS_PAGE(p
),
3274 options
| PMAP_OPTIONS_NOFLUSH
,
3275 &pmap_flush_context_storage
);
3276 delayed_pmap_flush
= TRUE
;
3282 vm_object_offset_t end
;
3283 vm_object_offset_t target_off
;
3285 end
= offset
+ size
;
3287 for (target_off
= offset
;
3288 target_off
< end
; target_off
+= PAGE_SIZE
) {
3290 p
= vm_page_lookup(object
, target_off
);
3292 if (p
!= VM_PAGE_NULL
) {
3293 vm_object_offset_t start
;
3295 start
= pmap_start
+ (p
->offset
- offset
);
3297 if (pmap
!= PMAP_NULL
)
3298 pmap_protect_options(
3301 start
+ PAGE_SIZE_64
,
3303 options
| PMAP_OPTIONS_NOFLUSH
,
3304 &pmap_flush_context_storage
);
3306 pmap_page_protect_options(
3307 VM_PAGE_GET_PHYS_PAGE(p
),
3309 options
| PMAP_OPTIONS_NOFLUSH
,
3310 &pmap_flush_context_storage
);
3311 delayed_pmap_flush
= TRUE
;
3315 if (delayed_pmap_flush
== TRUE
)
3316 pmap_flush(&pmap_flush_context_storage
);
3318 if (prot
== VM_PROT_NONE
) {
3320 * Must follow shadow chain to remove access
3321 * to pages in shadowed objects.
3323 vm_object_t next_object
;
3325 next_object
= object
->shadow
;
3326 if (next_object
!= VM_OBJECT_NULL
) {
3327 offset
+= object
->vo_shadow_offset
;
3328 vm_object_lock(next_object
);
3329 vm_object_unlock(object
);
3330 object
= next_object
;
3334 * End of chain - we are done.
3341 * Pages in shadowed objects may never have
3342 * write permission - we may stop here.
3348 vm_object_unlock(object
);
3352 * Routine: vm_object_copy_slowly
3355 * Copy the specified range of the source
3356 * virtual memory object without using
3357 * protection-based optimizations (such
3358 * as copy-on-write). The pages in the
3359 * region are actually copied.
3361 * In/out conditions:
3362 * The caller must hold a reference and a lock
3363 * for the source virtual memory object. The source
3364 * object will be returned *unlocked*.
3367 * If the copy is completed successfully, KERN_SUCCESS is
3368 * returned. If the caller asserted the interruptible
3369 * argument, and an interruption occurred while waiting
3370 * for a user-generated event, MACH_SEND_INTERRUPTED is
3371 * returned. Other values may be returned to indicate
3372 * hard errors during the copy operation.
3374 * A new virtual memory object is returned in a
3375 * parameter (_result_object). The contents of this
3376 * new object, starting at a zero offset, are a copy
3377 * of the source memory region. In the event of
3378 * an error, this parameter will contain the value
3381 __private_extern__ kern_return_t
3382 vm_object_copy_slowly(
3383 vm_object_t src_object
,
3384 vm_object_offset_t src_offset
,
3385 vm_object_size_t size
,
3386 boolean_t interruptible
,
3387 vm_object_t
*_result_object
) /* OUT */
3389 vm_object_t new_object
;
3390 vm_object_offset_t new_offset
;
3392 struct vm_object_fault_info fault_info
;
3394 XPR(XPR_VM_OBJECT
, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
3395 src_object
, src_offset
, size
, 0, 0);
3398 vm_object_unlock(src_object
);
3399 *_result_object
= VM_OBJECT_NULL
;
3400 return(KERN_INVALID_ARGUMENT
);
3404 * Prevent destruction of the source object while we copy.
3407 vm_object_reference_locked(src_object
);
3408 vm_object_unlock(src_object
);
3411 * Create a new object to hold the copied pages.
3413 * We fill the new object starting at offset 0,
3414 * regardless of the input offset.
3415 * We don't bother to lock the new object within
3416 * this routine, since we have the only reference.
3419 new_object
= vm_object_allocate(size
);
3422 assert(size
== trunc_page_64(size
)); /* Will the loop terminate? */
3424 fault_info
.interruptible
= interruptible
;
3425 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
3426 fault_info
.user_tag
= 0;
3427 fault_info
.pmap_options
= 0;
3428 fault_info
.lo_offset
= src_offset
;
3429 fault_info
.hi_offset
= src_offset
+ size
;
3430 fault_info
.no_cache
= FALSE
;
3431 fault_info
.stealth
= TRUE
;
3432 fault_info
.io_sync
= FALSE
;
3433 fault_info
.cs_bypass
= FALSE
;
3434 fault_info
.mark_zf_absent
= FALSE
;
3435 fault_info
.batch_pmap_op
= FALSE
;
3439 src_offset
+= PAGE_SIZE_64
,
3440 new_offset
+= PAGE_SIZE_64
, size
-= PAGE_SIZE_64
3443 vm_fault_return_t result
;
3445 vm_object_lock(new_object
);
3447 while ((new_page
= vm_page_alloc(new_object
, new_offset
))
3450 vm_object_unlock(new_object
);
3452 if (!vm_page_wait(interruptible
)) {
3453 vm_object_deallocate(new_object
);
3454 vm_object_deallocate(src_object
);
3455 *_result_object
= VM_OBJECT_NULL
;
3456 return(MACH_SEND_INTERRUPTED
);
3458 vm_object_lock(new_object
);
3460 vm_object_unlock(new_object
);
3463 vm_prot_t prot
= VM_PROT_READ
;
3464 vm_page_t _result_page
;
3466 vm_page_t result_page
;
3467 kern_return_t error_code
;
3468 vm_object_t result_page_object
;
3471 vm_object_lock(src_object
);
3473 if (src_object
->internal
&&
3474 src_object
->shadow
== VM_OBJECT_NULL
&&
3475 (vm_page_lookup(src_object
,
3476 src_offset
) == VM_PAGE_NULL
) &&
3477 (src_object
->pager
== NULL
||
3478 (VM_COMPRESSOR_PAGER_STATE_GET(src_object
,
3480 VM_EXTERNAL_STATE_ABSENT
))) {
3482 * This page is neither resident nor compressed
3483 * and there's no shadow object below
3484 * "src_object", so this page is really missing.
3485 * There's no need to zero-fill it just to copy
3486 * it: let's leave it missing in "new_object"
3487 * and get zero-filled on demand.
3489 vm_object_unlock(src_object
);
3490 /* free the unused "new_page"... */
3491 vm_object_lock(new_object
);
3492 VM_PAGE_FREE(new_page
);
3493 new_page
= VM_PAGE_NULL
;
3494 vm_object_unlock(new_object
);
3495 /* ...and go to next page in "src_object" */
3496 result
= VM_FAULT_SUCCESS
;
3500 vm_object_paging_begin(src_object
);
3502 if (size
> (vm_size_t
) -1) {
3503 /* 32-bit overflow */
3504 fault_info
.cluster_size
= (vm_size_t
) (0 - PAGE_SIZE
);
3506 fault_info
.cluster_size
= (vm_size_t
) size
;
3507 assert(fault_info
.cluster_size
== size
);
3510 XPR(XPR_VM_FAULT
,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
3511 _result_page
= VM_PAGE_NULL
;
3512 result
= vm_fault_page(src_object
, src_offset
,
3513 VM_PROT_READ
, FALSE
,
3514 FALSE
, /* page not looked up */
3515 &prot
, &_result_page
, &top_page
,
3517 &error_code
, FALSE
, FALSE
, &fault_info
);
3520 case VM_FAULT_SUCCESS
:
3521 result_page
= _result_page
;
3522 result_page_object
= VM_PAGE_OBJECT(result_page
);
3525 * Copy the page to the new object.
3528 * If result_page is clean,
3529 * we could steal it instead
3533 vm_page_copy(result_page
, new_page
);
3534 vm_object_unlock(result_page_object
);
3537 * Let go of both pages (make them
3538 * not busy, perform wakeup, activate).
3540 vm_object_lock(new_object
);
3541 SET_PAGE_DIRTY(new_page
, FALSE
);
3542 PAGE_WAKEUP_DONE(new_page
);
3543 vm_object_unlock(new_object
);
3545 vm_object_lock(result_page_object
);
3546 PAGE_WAKEUP_DONE(result_page
);
3548 vm_page_lockspin_queues();
3549 if ((result_page
->vm_page_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ||
3550 (result_page
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
)) {
3551 vm_page_activate(result_page
);
3553 vm_page_activate(new_page
);
3554 vm_page_unlock_queues();
3557 * Release paging references and
3558 * top-level placeholder page, if any.
3561 vm_fault_cleanup(result_page_object
,
3566 case VM_FAULT_RETRY
:
3569 case VM_FAULT_MEMORY_SHORTAGE
:
3570 if (vm_page_wait(interruptible
))
3574 case VM_FAULT_INTERRUPTED
:
3575 vm_object_lock(new_object
);
3576 VM_PAGE_FREE(new_page
);
3577 vm_object_unlock(new_object
);
3579 vm_object_deallocate(new_object
);
3580 vm_object_deallocate(src_object
);
3581 *_result_object
= VM_OBJECT_NULL
;
3582 return(MACH_SEND_INTERRUPTED
);
3584 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
3585 /* success but no VM page: fail */
3586 vm_object_paging_end(src_object
);
3587 vm_object_unlock(src_object
);
3589 case VM_FAULT_MEMORY_ERROR
:
3592 * (a) ignore pages that we can't
3594 * (b) return the null object if
3595 * any page fails [chosen]
3598 vm_object_lock(new_object
);
3599 VM_PAGE_FREE(new_page
);
3600 vm_object_unlock(new_object
);
3602 vm_object_deallocate(new_object
);
3603 vm_object_deallocate(src_object
);
3604 *_result_object
= VM_OBJECT_NULL
;
3605 return(error_code
? error_code
:
3609 panic("vm_object_copy_slowly: unexpected error"
3610 " 0x%x from vm_fault_page()\n", result
);
3612 } while (result
!= VM_FAULT_SUCCESS
);
3616 * Lose the extra reference, and return our object.
3618 vm_object_deallocate(src_object
);
3619 *_result_object
= new_object
;
3620 return(KERN_SUCCESS
);
3624 * Routine: vm_object_copy_quickly
3627 * Copy the specified range of the source virtual
3628 * memory object, if it can be done without waiting
3629 * for user-generated events.
3632 * If the copy is successful, the copy is returned in
3633 * the arguments; otherwise, the arguments are not
3636 * In/out conditions:
3637 * The object should be unlocked on entry and exit.
3641 __private_extern__ boolean_t
3642 vm_object_copy_quickly(
3643 vm_object_t
*_object
, /* INOUT */
3644 __unused vm_object_offset_t offset
, /* IN */
3645 __unused vm_object_size_t size
, /* IN */
3646 boolean_t
*_src_needs_copy
, /* OUT */
3647 boolean_t
*_dst_needs_copy
) /* OUT */
3649 vm_object_t object
= *_object
;
3650 memory_object_copy_strategy_t copy_strategy
;
3652 XPR(XPR_VM_OBJECT
, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
3653 *_object
, offset
, size
, 0, 0);
3654 if (object
== VM_OBJECT_NULL
) {
3655 *_src_needs_copy
= FALSE
;
3656 *_dst_needs_copy
= FALSE
;
3660 vm_object_lock(object
);
3662 copy_strategy
= object
->copy_strategy
;
3664 switch (copy_strategy
) {
3665 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3668 * Symmetric copy strategy.
3669 * Make another reference to the object.
3670 * Leave object/offset unchanged.
3673 vm_object_reference_locked(object
);
3674 object
->shadowed
= TRUE
;
3675 vm_object_unlock(object
);
3678 * Both source and destination must make
3679 * shadows, and the source must be made
3680 * read-only if not already.
3683 *_src_needs_copy
= TRUE
;
3684 *_dst_needs_copy
= TRUE
;
3688 case MEMORY_OBJECT_COPY_DELAY
:
3689 vm_object_unlock(object
);
3693 vm_object_unlock(object
);
3699 static int copy_call_count
= 0;
3700 static int copy_call_sleep_count
= 0;
3701 static int copy_call_restart_count
= 0;
3704 * Routine: vm_object_copy_call [internal]
3707 * Copy the source object (src_object), using the
3708 * user-managed copy algorithm.
3710 * In/out conditions:
3711 * The source object must be locked on entry. It
3712 * will be *unlocked* on exit.
3715 * If the copy is successful, KERN_SUCCESS is returned.
3716 * A new object that represents the copied virtual
3717 * memory is returned in a parameter (*_result_object).
3718 * If the return value indicates an error, this parameter
3721 static kern_return_t
3722 vm_object_copy_call(
3723 vm_object_t src_object
,
3724 vm_object_offset_t src_offset
,
3725 vm_object_size_t size
,
3726 vm_object_t
*_result_object
) /* OUT */
3730 boolean_t check_ready
= FALSE
;
3731 uint32_t try_failed_count
= 0;
3734 * If a copy is already in progress, wait and retry.
3737 * Consider making this call interruptable, as Mike
3738 * intended it to be.
3741 * Need a counter or version or something to allow
3742 * us to use the copy that the currently requesting
3743 * thread is obtaining -- is it worth adding to the
3744 * vm object structure? Depends how common this case it.
3747 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3748 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3750 copy_call_restart_count
++;
3754 * Indicate (for the benefit of memory_object_create_copy)
3755 * that we want a copy for src_object. (Note that we cannot
3756 * do a real assert_wait before calling memory_object_copy,
3757 * so we simply set the flag.)
3760 vm_object_set_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
);
3761 vm_object_unlock(src_object
);
3764 * Ask the memory manager to give us a memory object
3765 * which represents a copy of the src object.
3766 * The memory manager may give us a memory object
3767 * which we already have, or it may give us a
3768 * new memory object. This memory object will arrive
3769 * via memory_object_create_copy.
3772 kr
= KERN_FAILURE
; /* XXX need to change memory_object.defs */
3773 if (kr
!= KERN_SUCCESS
) {
3778 * Wait for the copy to arrive.
3780 vm_object_lock(src_object
);
3781 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3782 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3784 copy_call_sleep_count
++;
3787 assert(src_object
->copy
!= VM_OBJECT_NULL
);
3788 copy
= src_object
->copy
;
3789 if (!vm_object_lock_try(copy
)) {
3790 vm_object_unlock(src_object
);
3793 mutex_pause(try_failed_count
); /* wait a bit */
3795 vm_object_lock(src_object
);
3798 if (copy
->vo_size
< src_offset
+size
)
3799 copy
->vo_size
= src_offset
+size
;
3801 if (!copy
->pager_ready
)
3807 *_result_object
= copy
;
3808 vm_object_unlock(copy
);
3809 vm_object_unlock(src_object
);
3811 /* Wait for the copy to be ready. */
3812 if (check_ready
== TRUE
) {
3813 vm_object_lock(copy
);
3814 while (!copy
->pager_ready
) {
3815 vm_object_sleep(copy
, VM_OBJECT_EVENT_PAGER_READY
, THREAD_UNINT
);
3817 vm_object_unlock(copy
);
3820 return KERN_SUCCESS
;
3823 static int copy_delayed_lock_collisions
= 0;
3824 static int copy_delayed_max_collisions
= 0;
3825 static int copy_delayed_lock_contention
= 0;
3826 static int copy_delayed_protect_iterate
= 0;
3829 * Routine: vm_object_copy_delayed [internal]
3832 * Copy the specified virtual memory object, using
3833 * the asymmetric copy-on-write algorithm.
3835 * In/out conditions:
3836 * The src_object must be locked on entry. It will be unlocked
3837 * on exit - so the caller must also hold a reference to it.
3839 * This routine will not block waiting for user-generated
3840 * events. It is not interruptible.
3842 __private_extern__ vm_object_t
3843 vm_object_copy_delayed(
3844 vm_object_t src_object
,
3845 vm_object_offset_t src_offset
,
3846 vm_object_size_t size
,
3847 boolean_t src_object_shared
)
3849 vm_object_t new_copy
= VM_OBJECT_NULL
;
3850 vm_object_t old_copy
;
3852 vm_object_size_t copy_size
= src_offset
+ size
;
3853 pmap_flush_context pmap_flush_context_storage
;
3854 boolean_t delayed_pmap_flush
= FALSE
;
3859 * The user-level memory manager wants to see all of the changes
3860 * to this object, but it has promised not to make any changes on
3863 * Perform an asymmetric copy-on-write, as follows:
3864 * Create a new object, called a "copy object" to hold
3865 * pages modified by the new mapping (i.e., the copy,
3866 * not the original mapping).
3867 * Record the original object as the backing object for
3868 * the copy object. If the original mapping does not
3869 * change a page, it may be used read-only by the copy.
3870 * Record the copy object in the original object.
3871 * When the original mapping causes a page to be modified,
3872 * it must be copied to a new page that is "pushed" to
3874 * Mark the new mapping (the copy object) copy-on-write.
3875 * This makes the copy object itself read-only, allowing
3876 * it to be reused if the original mapping makes no
3877 * changes, and simplifying the synchronization required
3878 * in the "push" operation described above.
3880 * The copy-on-write is said to be assymetric because the original
3881 * object is *not* marked copy-on-write. A copied page is pushed
3882 * to the copy object, regardless which party attempted to modify
3885 * Repeated asymmetric copy operations may be done. If the
3886 * original object has not been changed since the last copy, its
3887 * copy object can be reused. Otherwise, a new copy object can be
3888 * inserted between the original object and its previous copy
3889 * object. Since any copy object is read-only, this cannot affect
3890 * affect the contents of the previous copy object.
3892 * Note that a copy object is higher in the object tree than the
3893 * original object; therefore, use of the copy object recorded in
3894 * the original object must be done carefully, to avoid deadlock.
3897 copy_size
= vm_object_round_page(copy_size
);
3901 * Wait for paging in progress.
3903 if (!src_object
->true_share
&&
3904 (src_object
->paging_in_progress
!= 0 ||
3905 src_object
->activity_in_progress
!= 0)) {
3906 if (src_object_shared
== TRUE
) {
3907 vm_object_unlock(src_object
);
3908 vm_object_lock(src_object
);
3909 src_object_shared
= FALSE
;
3912 vm_object_paging_wait(src_object
, THREAD_UNINT
);
3915 * See whether we can reuse the result of a previous
3919 old_copy
= src_object
->copy
;
3920 if (old_copy
!= VM_OBJECT_NULL
) {
3924 * Try to get the locks (out of order)
3926 if (src_object_shared
== TRUE
)
3927 lock_granted
= vm_object_lock_try_shared(old_copy
);
3929 lock_granted
= vm_object_lock_try(old_copy
);
3931 if (!lock_granted
) {
3932 vm_object_unlock(src_object
);
3934 if (collisions
++ == 0)
3935 copy_delayed_lock_contention
++;
3936 mutex_pause(collisions
);
3938 /* Heisenberg Rules */
3939 copy_delayed_lock_collisions
++;
3941 if (collisions
> copy_delayed_max_collisions
)
3942 copy_delayed_max_collisions
= collisions
;
3944 if (src_object_shared
== TRUE
)
3945 vm_object_lock_shared(src_object
);
3947 vm_object_lock(src_object
);
3953 * Determine whether the old copy object has
3957 if (old_copy
->resident_page_count
== 0 &&
3958 !old_copy
->pager_created
) {
3960 * It has not been modified.
3962 * Return another reference to
3963 * the existing copy-object if
3964 * we can safely grow it (if
3968 if (old_copy
->vo_size
< copy_size
) {
3969 if (src_object_shared
== TRUE
) {
3970 vm_object_unlock(old_copy
);
3971 vm_object_unlock(src_object
);
3973 vm_object_lock(src_object
);
3974 src_object_shared
= FALSE
;
3978 * We can't perform a delayed copy if any of the
3979 * pages in the extended range are wired (because
3980 * we can't safely take write permission away from
3981 * wired pages). If the pages aren't wired, then
3982 * go ahead and protect them.
3984 copy_delayed_protect_iterate
++;
3986 pmap_flush_context_init(&pmap_flush_context_storage
);
3987 delayed_pmap_flush
= FALSE
;
3989 vm_page_queue_iterate(&src_object
->memq
, p
, vm_page_t
, listq
) {
3990 if (!p
->fictitious
&&
3991 p
->offset
>= old_copy
->vo_size
&&
3992 p
->offset
< copy_size
) {
3993 if (VM_PAGE_WIRED(p
)) {
3994 vm_object_unlock(old_copy
);
3995 vm_object_unlock(src_object
);
3997 if (new_copy
!= VM_OBJECT_NULL
) {
3998 vm_object_unlock(new_copy
);
3999 vm_object_deallocate(new_copy
);
4001 if (delayed_pmap_flush
== TRUE
)
4002 pmap_flush(&pmap_flush_context_storage
);
4004 return VM_OBJECT_NULL
;
4006 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p
), (VM_PROT_ALL
& ~VM_PROT_WRITE
),
4007 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
4008 delayed_pmap_flush
= TRUE
;
4012 if (delayed_pmap_flush
== TRUE
)
4013 pmap_flush(&pmap_flush_context_storage
);
4015 old_copy
->vo_size
= copy_size
;
4017 if (src_object_shared
== TRUE
)
4018 vm_object_reference_shared(old_copy
);
4020 vm_object_reference_locked(old_copy
);
4021 vm_object_unlock(old_copy
);
4022 vm_object_unlock(src_object
);
4024 if (new_copy
!= VM_OBJECT_NULL
) {
4025 vm_object_unlock(new_copy
);
4026 vm_object_deallocate(new_copy
);
4034 * Adjust the size argument so that the newly-created
4035 * copy object will be large enough to back either the
4036 * old copy object or the new mapping.
4038 if (old_copy
->vo_size
> copy_size
)
4039 copy_size
= old_copy
->vo_size
;
4041 if (new_copy
== VM_OBJECT_NULL
) {
4042 vm_object_unlock(old_copy
);
4043 vm_object_unlock(src_object
);
4044 new_copy
= vm_object_allocate(copy_size
);
4045 vm_object_lock(src_object
);
4046 vm_object_lock(new_copy
);
4048 src_object_shared
= FALSE
;
4051 new_copy
->vo_size
= copy_size
;
4054 * The copy-object is always made large enough to
4055 * completely shadow the original object, since
4056 * it may have several users who want to shadow
4057 * the original object at different points.
4060 assert((old_copy
->shadow
== src_object
) &&
4061 (old_copy
->vo_shadow_offset
== (vm_object_offset_t
) 0));
4063 } else if (new_copy
== VM_OBJECT_NULL
) {
4064 vm_object_unlock(src_object
);
4065 new_copy
= vm_object_allocate(copy_size
);
4066 vm_object_lock(src_object
);
4067 vm_object_lock(new_copy
);
4069 src_object_shared
= FALSE
;
4074 * We now have the src object locked, and the new copy object
4075 * allocated and locked (and potentially the old copy locked).
4076 * Before we go any further, make sure we can still perform
4077 * a delayed copy, as the situation may have changed.
4079 * Specifically, we can't perform a delayed copy if any of the
4080 * pages in the range are wired (because we can't safely take
4081 * write permission away from wired pages). If the pages aren't
4082 * wired, then go ahead and protect them.
4084 copy_delayed_protect_iterate
++;
4086 pmap_flush_context_init(&pmap_flush_context_storage
);
4087 delayed_pmap_flush
= FALSE
;
4089 vm_page_queue_iterate(&src_object
->memq
, p
, vm_page_t
, listq
) {
4090 if (!p
->fictitious
&& p
->offset
< copy_size
) {
4091 if (VM_PAGE_WIRED(p
)) {
4093 vm_object_unlock(old_copy
);
4094 vm_object_unlock(src_object
);
4095 vm_object_unlock(new_copy
);
4096 vm_object_deallocate(new_copy
);
4098 if (delayed_pmap_flush
== TRUE
)
4099 pmap_flush(&pmap_flush_context_storage
);
4101 return VM_OBJECT_NULL
;
4103 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p
), (VM_PROT_ALL
& ~VM_PROT_WRITE
),
4104 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
4105 delayed_pmap_flush
= TRUE
;
4109 if (delayed_pmap_flush
== TRUE
)
4110 pmap_flush(&pmap_flush_context_storage
);
4112 if (old_copy
!= VM_OBJECT_NULL
) {
4114 * Make the old copy-object shadow the new one.
4115 * It will receive no more pages from the original
4119 /* remove ref. from old_copy */
4120 vm_object_lock_assert_exclusive(src_object
);
4121 src_object
->ref_count
--;
4122 assert(src_object
->ref_count
> 0);
4123 vm_object_lock_assert_exclusive(old_copy
);
4124 old_copy
->shadow
= new_copy
;
4125 vm_object_lock_assert_exclusive(new_copy
);
4126 assert(new_copy
->ref_count
> 0);
4127 new_copy
->ref_count
++; /* for old_copy->shadow ref. */
4130 if (old_copy
->res_count
) {
4131 VM_OBJ_RES_INCR(new_copy
);
4132 VM_OBJ_RES_DECR(src_object
);
4136 vm_object_unlock(old_copy
); /* done with old_copy */
4140 * Point the new copy at the existing object.
4142 vm_object_lock_assert_exclusive(new_copy
);
4143 new_copy
->shadow
= src_object
;
4144 new_copy
->vo_shadow_offset
= 0;
4145 new_copy
->shadowed
= TRUE
; /* caller must set needs_copy */
4147 vm_object_lock_assert_exclusive(src_object
);
4148 vm_object_reference_locked(src_object
);
4149 src_object
->copy
= new_copy
;
4150 vm_object_unlock(src_object
);
4151 vm_object_unlock(new_copy
);
4154 "vm_object_copy_delayed: used copy object %X for source %X\n",
4155 new_copy
, src_object
, 0, 0, 0);
4161 * Routine: vm_object_copy_strategically
4164 * Perform a copy according to the source object's
4165 * declared strategy. This operation may block,
4166 * and may be interrupted.
4168 __private_extern__ kern_return_t
4169 vm_object_copy_strategically(
4170 vm_object_t src_object
,
4171 vm_object_offset_t src_offset
,
4172 vm_object_size_t size
,
4173 vm_object_t
*dst_object
, /* OUT */
4174 vm_object_offset_t
*dst_offset
, /* OUT */
4175 boolean_t
*dst_needs_copy
) /* OUT */
4178 boolean_t interruptible
= THREAD_ABORTSAFE
; /* XXX */
4179 boolean_t object_lock_shared
= FALSE
;
4180 memory_object_copy_strategy_t copy_strategy
;
4182 assert(src_object
!= VM_OBJECT_NULL
);
4184 copy_strategy
= src_object
->copy_strategy
;
4186 if (copy_strategy
== MEMORY_OBJECT_COPY_DELAY
) {
4187 vm_object_lock_shared(src_object
);
4188 object_lock_shared
= TRUE
;
4190 vm_object_lock(src_object
);
4193 * The copy strategy is only valid if the memory manager
4194 * is "ready". Internal objects are always ready.
4197 while (!src_object
->internal
&& !src_object
->pager_ready
) {
4198 wait_result_t wait_result
;
4200 if (object_lock_shared
== TRUE
) {
4201 vm_object_unlock(src_object
);
4202 vm_object_lock(src_object
);
4203 object_lock_shared
= FALSE
;
4206 wait_result
= vm_object_sleep( src_object
,
4207 VM_OBJECT_EVENT_PAGER_READY
,
4209 if (wait_result
!= THREAD_AWAKENED
) {
4210 vm_object_unlock(src_object
);
4211 *dst_object
= VM_OBJECT_NULL
;
4213 *dst_needs_copy
= FALSE
;
4214 return(MACH_SEND_INTERRUPTED
);
4219 * Use the appropriate copy strategy.
4222 switch (copy_strategy
) {
4223 case MEMORY_OBJECT_COPY_DELAY
:
4224 *dst_object
= vm_object_copy_delayed(src_object
,
4225 src_offset
, size
, object_lock_shared
);
4226 if (*dst_object
!= VM_OBJECT_NULL
) {
4227 *dst_offset
= src_offset
;
4228 *dst_needs_copy
= TRUE
;
4229 result
= KERN_SUCCESS
;
4232 vm_object_lock(src_object
);
4233 /* fall thru when delayed copy not allowed */
4235 case MEMORY_OBJECT_COPY_NONE
:
4236 result
= vm_object_copy_slowly(src_object
, src_offset
, size
,
4237 interruptible
, dst_object
);
4238 if (result
== KERN_SUCCESS
) {
4240 *dst_needs_copy
= FALSE
;
4244 case MEMORY_OBJECT_COPY_CALL
:
4245 result
= vm_object_copy_call(src_object
, src_offset
, size
,
4247 if (result
== KERN_SUCCESS
) {
4248 *dst_offset
= src_offset
;
4249 *dst_needs_copy
= TRUE
;
4253 case MEMORY_OBJECT_COPY_SYMMETRIC
:
4254 XPR(XPR_VM_OBJECT
, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n", src_object
, src_offset
, size
, 0, 0);
4255 vm_object_unlock(src_object
);
4256 result
= KERN_MEMORY_RESTART_COPY
;
4260 panic("copy_strategically: bad strategy");
4261 result
= KERN_INVALID_ARGUMENT
;
4269 * Create a new object which is backed by the
4270 * specified existing object range. The source
4271 * object reference is deallocated.
4273 * The new object and offset into that object
4274 * are returned in the source parameters.
4276 boolean_t vm_object_shadow_check
= TRUE
;
4278 __private_extern__ boolean_t
4280 vm_object_t
*object
, /* IN/OUT */
4281 vm_object_offset_t
*offset
, /* IN/OUT */
4282 vm_object_size_t length
)
4288 assert(source
!= VM_OBJECT_NULL
);
4289 if (source
== VM_OBJECT_NULL
)
4295 * This assertion is valid but it gets triggered by Rosetta for example
4296 * due to a combination of vm_remap() that changes a VM object's
4297 * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
4298 * that then sets "needs_copy" on its map entry. This creates a
4299 * mapping situation that VM should never see and doesn't know how to
4301 * It's not clear if this can create any real problem but we should
4302 * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
4303 * do more than just set "needs_copy" to handle the copy-on-write...
4304 * In the meantime, let's disable the assertion.
4306 assert(source
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
);
4310 * Determine if we really need a shadow.
4312 * If the source object is larger than what we are trying
4313 * to create, then force the shadow creation even if the
4314 * ref count is 1. This will allow us to [potentially]
4315 * collapse the underlying object away in the future
4316 * (freeing up the extra data it might contain and that
4320 assert(source
->copy_strategy
!= MEMORY_OBJECT_COPY_NONE
); /* Purgeable objects shouldn't have shadow objects. */
4322 if (vm_object_shadow_check
&&
4323 source
->vo_size
== length
&&
4324 source
->ref_count
== 1 &&
4325 (source
->shadow
== VM_OBJECT_NULL
||
4326 source
->shadow
->copy
== VM_OBJECT_NULL
) )
4328 /* lock the object and check again */
4329 vm_object_lock(source
);
4330 if (source
->vo_size
== length
&&
4331 source
->ref_count
== 1 &&
4332 (source
->shadow
== VM_OBJECT_NULL
||
4333 source
->shadow
->copy
== VM_OBJECT_NULL
))
4335 source
->shadowed
= FALSE
;
4336 vm_object_unlock(source
);
4339 /* things changed while we were locking "source"... */
4340 vm_object_unlock(source
);
4344 * Allocate a new object with the given length
4347 if ((result
= vm_object_allocate(length
)) == VM_OBJECT_NULL
)
4348 panic("vm_object_shadow: no object for shadowing");
4351 * The new object shadows the source object, adding
4352 * a reference to it. Our caller changes his reference
4353 * to point to the new object, removing a reference to
4354 * the source object. Net result: no change of reference
4357 result
->shadow
= source
;
4360 * Store the offset into the source object,
4361 * and fix up the offset into the new object.
4364 result
->vo_shadow_offset
= *offset
;
4367 * Return the new things
4376 * The relationship between vm_object structures and
4377 * the memory_object requires careful synchronization.
4379 * All associations are created by memory_object_create_named
4380 * for external pagers and vm_object_compressor_pager_create for internal
4381 * objects as follows:
4383 * pager: the memory_object itself, supplied by
4384 * the user requesting a mapping (or the kernel,
4385 * when initializing internal objects); the
4386 * kernel simulates holding send rights by keeping
4390 * the memory object control port,
4391 * created by the kernel; the kernel holds
4392 * receive (and ownership) rights to this
4393 * port, but no other references.
4395 * When initialization is complete, the "initialized" field
4396 * is asserted. Other mappings using a particular memory object,
4397 * and any references to the vm_object gained through the
4398 * port association must wait for this initialization to occur.
4400 * In order to allow the memory manager to set attributes before
4401 * requests (notably virtual copy operations, but also data or
4402 * unlock requests) are made, a "ready" attribute is made available.
4403 * Only the memory manager may affect the value of this attribute.
4404 * Its value does not affect critical kernel functions, such as
4405 * internal object initialization or destruction. [Furthermore,
4406 * memory objects created by the kernel are assumed to be ready
4407 * immediately; the default memory manager need not explicitly
4408 * set the "ready" attribute.]
4410 * [Both the "initialized" and "ready" attribute wait conditions
4411 * use the "pager" field as the wait event.]
4413 * The port associations can be broken down by any of the
4414 * following routines:
4415 * vm_object_terminate:
4416 * No references to the vm_object remain, and
4417 * the object cannot (or will not) be cached.
4418 * This is the normal case, and is done even
4419 * though one of the other cases has already been
4421 * memory_object_destroy:
4422 * The memory manager has requested that the
4423 * kernel relinquish references to the memory
4424 * object. [The memory manager may not want to
4425 * destroy the memory object, but may wish to
4426 * refuse or tear down existing memory mappings.]
4428 * Each routine that breaks an association must break all of
4429 * them at once. At some later time, that routine must clear
4430 * the pager field and release the memory object references.
4431 * [Furthermore, each routine must cope with the simultaneous
4432 * or previous operations of the others.]
4434 * In addition to the lock on the object, the vm_object_hash_lock
4435 * governs the associations. References gained through the
4436 * association require use of the hash lock.
4438 * Because the pager field may be cleared spontaneously, it
4439 * cannot be used to determine whether a memory object has
4440 * ever been associated with a particular vm_object. [This
4441 * knowledge is important to the shadow object mechanism.]
4442 * For this reason, an additional "created" attribute is
4445 * During various paging operations, the pager reference found in the
4446 * vm_object must be valid. To prevent this from being released,
4447 * (other than being removed, i.e., made null), routines may use
4448 * the vm_object_paging_begin/end routines [actually, macros].
4449 * The implementation uses the "paging_in_progress" and "wanted" fields.
4450 * [Operations that alter the validity of the pager values include the
4451 * termination routines and vm_object_collapse.]
4456 * Routine: vm_object_enter
4458 * Find a VM object corresponding to the given
4459 * pager; if no such object exists, create one,
4460 * and initialize the pager.
4464 memory_object_t pager
,
4465 vm_object_size_t size
,
4471 vm_object_t new_object
;
4472 boolean_t must_init
;
4473 vm_object_hash_entry_t entry
, new_entry
;
4474 uint32_t try_failed_count
= 0;
4477 if (pager
== MEMORY_OBJECT_NULL
)
4478 return(vm_object_allocate(size
));
4480 new_object
= VM_OBJECT_NULL
;
4481 new_entry
= VM_OBJECT_HASH_ENTRY_NULL
;
4485 * Look for an object associated with this port.
4488 lck
= vm_object_hash_lock_spin(pager
);
4490 entry
= vm_object_hash_lookup(pager
, FALSE
);
4492 if (entry
== VM_OBJECT_HASH_ENTRY_NULL
) {
4493 if (new_object
== VM_OBJECT_NULL
) {
4495 * We must unlock to create a new object;
4496 * if we do so, we must try the lookup again.
4498 vm_object_hash_unlock(lck
);
4499 assert(new_entry
== VM_OBJECT_HASH_ENTRY_NULL
);
4500 new_entry
= vm_object_hash_entry_alloc(pager
);
4501 new_object
= vm_object_allocate(size
);
4503 * Set new_object->hashed now, while noone
4504 * knows about this object yet and we
4505 * don't need to lock it. Once it's in
4506 * the hash table, we would have to lock
4507 * the object to set its "hashed" bit and
4508 * we can't lock the object while holding
4509 * the hash lock as a spinlock...
4511 new_object
->hashed
= TRUE
;
4512 lck
= vm_object_hash_lock_spin(pager
);
4515 * Lookup failed twice, and we have something
4516 * to insert; set the object.
4519 * We can't lock the object here since we're
4520 * holding the hash lock as a spin lock.
4521 * We've already pre-set "new_object->hashed"
4522 * when we created "new_object" above, so we
4523 * won't need to modify the object in
4524 * vm_object_hash_insert().
4526 assert(new_object
->hashed
);
4527 vm_object_hash_insert(new_entry
, new_object
);
4529 new_entry
= VM_OBJECT_HASH_ENTRY_NULL
;
4530 new_object
= VM_OBJECT_NULL
;
4533 } else if (entry
->object
== VM_OBJECT_NULL
) {
4535 * If a previous object is being terminated,
4536 * we must wait for the termination message
4537 * to be queued (and lookup the entry again).
4539 entry
->waiting
= TRUE
;
4540 entry
= VM_OBJECT_HASH_ENTRY_NULL
;
4541 assert_wait((event_t
) pager
, THREAD_UNINT
);
4542 vm_object_hash_unlock(lck
);
4544 thread_block(THREAD_CONTINUE_NULL
);
4545 lck
= vm_object_hash_lock_spin(pager
);
4547 } while (entry
== VM_OBJECT_HASH_ENTRY_NULL
);
4549 object
= entry
->object
;
4550 assert(object
!= VM_OBJECT_NULL
);
4553 if ( !vm_object_lock_try(object
)) {
4555 vm_object_hash_unlock(lck
);
4558 mutex_pause(try_failed_count
); /* wait a bit */
4561 assert(!internal
|| object
->internal
);
4563 if (object
->ref_count
== 0) {
4564 if ( !vm_object_cache_lock_try()) {
4566 vm_object_hash_unlock(lck
);
4567 vm_object_unlock(object
);
4570 mutex_pause(try_failed_count
); /* wait a bit */
4573 XPR(XPR_VM_OBJECT_CACHE
,
4574 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
4576 vm_object_cached_list
.next
,
4577 vm_object_cached_list
.prev
, 0,0);
4578 queue_remove(&vm_object_cached_list
, object
,
4579 vm_object_t
, cached_list
);
4580 vm_object_cached_count
--;
4582 vm_object_cache_unlock();
4586 assert(!object
->named
);
4587 object
->named
= TRUE
;
4589 vm_object_lock_assert_exclusive(object
);
4590 object
->ref_count
++;
4591 vm_object_res_reference(object
);
4593 vm_object_hash_unlock(lck
);
4594 vm_object_unlock(object
);
4598 vm_object_hash_unlock(lck
);
4600 assert(object
->ref_count
> 0);
4602 VM_STAT_INCR(lookups
);
4605 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
4606 pager
, object
, must_init
, 0, 0);
4609 * If we raced to create a vm_object but lost, let's
4613 if (new_object
!= VM_OBJECT_NULL
) {
4615 * Undo the pre-setting of "new_object->hashed" before
4616 * deallocating "new_object", since we did not insert it
4617 * into the hash table after all.
4619 assert(new_object
->hashed
);
4620 new_object
->hashed
= FALSE
;
4621 vm_object_deallocate(new_object
);
4624 if (new_entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
4625 vm_object_hash_entry_free(new_entry
);
4628 memory_object_control_t control
;
4631 * Allocate request port.
4634 control
= memory_object_control_allocate(object
);
4635 assert (control
!= MEMORY_OBJECT_CONTROL_NULL
);
4637 vm_object_lock(object
);
4638 assert(object
!= kernel_object
);
4641 * Copy the reference we were given.
4644 memory_object_reference(pager
);
4645 object
->pager_created
= TRUE
;
4646 object
->pager
= pager
;
4647 object
->internal
= internal
;
4648 object
->pager_trusted
= internal
;
4650 /* copy strategy invalid until set by memory manager */
4651 object
->copy_strategy
= MEMORY_OBJECT_COPY_INVALID
;
4653 object
->pager_control
= control
;
4654 object
->pager_ready
= FALSE
;
4656 vm_object_unlock(object
);
4659 * Let the pager know we're using it.
4662 (void) memory_object_init(pager
,
4663 object
->pager_control
,
4666 vm_object_lock(object
);
4668 object
->named
= TRUE
;
4670 vm_object_lock_assert_exclusive(object
);
4671 object
->pager_ready
= TRUE
;
4672 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
4675 object
->pager_initialized
= TRUE
;
4676 vm_object_wakeup(object
, VM_OBJECT_EVENT_INITIALIZED
);
4678 vm_object_lock(object
);
4682 * [At this point, the object must be locked]
4686 * Wait for the work above to be done by the first
4687 * thread to map this object.
4690 while (!object
->pager_initialized
) {
4691 vm_object_sleep(object
,
4692 VM_OBJECT_EVENT_INITIALIZED
,
4695 vm_object_unlock(object
);
4698 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
4699 object
, object
->pager
, internal
, 0,0);
4704 * Routine: vm_object_compressor_pager_create
4706 * Create a memory object for an internal object.
4707 * In/out conditions:
4708 * The object is locked on entry and exit;
4709 * it may be unlocked within this call.
4711 * Only one thread may be performing a
4712 * vm_object_compressor_pager_create on an object at
4713 * a time. Presumably, only the pageout
4714 * daemon will be using this routine.
4718 vm_object_compressor_pager_create(
4721 memory_object_t pager
;
4722 vm_object_hash_entry_t entry
;
4724 vm_object_t pager_object
= VM_OBJECT_NULL
;
4726 assert(object
!= kernel_object
);
4729 * Prevent collapse or termination by holding a paging reference
4732 vm_object_paging_begin(object
);
4733 if (object
->pager_created
) {
4735 * Someone else got to it first...
4736 * wait for them to finish initializing the ports
4738 while (!object
->pager_initialized
) {
4739 vm_object_sleep(object
,
4740 VM_OBJECT_EVENT_INITIALIZED
,
4743 vm_object_paging_end(object
);
4747 if ((uint32_t) (object
->vo_size
/PAGE_SIZE
) !=
4748 (object
->vo_size
/PAGE_SIZE
)) {
4749 #if DEVELOPMENT || DEBUG
4750 printf("vm_object_compressor_pager_create(%p): "
4751 "object size 0x%llx >= 0x%llx\n",
4753 (uint64_t) object
->vo_size
,
4754 0x0FFFFFFFFULL
*PAGE_SIZE
);
4755 #endif /* DEVELOPMENT || DEBUG */
4756 vm_object_paging_end(object
);
4761 * Indicate that a memory object has been assigned
4762 * before dropping the lock, to prevent a race.
4765 object
->pager_created
= TRUE
;
4766 object
->paging_offset
= 0;
4768 vm_object_unlock(object
);
4771 * Create the [internal] pager, and associate it with this object.
4773 * We make the association here so that vm_object_enter()
4774 * can look up the object to complete initializing it. No
4775 * user will ever map this object.
4778 assert(object
->temporary
);
4780 /* create our new memory object */
4781 assert((uint32_t) (object
->vo_size
/PAGE_SIZE
) ==
4782 (object
->vo_size
/PAGE_SIZE
));
4783 (void) compressor_memory_object_create(
4784 (memory_object_size_t
) object
->vo_size
,
4786 if (pager
== NULL
) {
4787 panic("vm_object_compressor_pager_create(): "
4788 "no pager for object %p size 0x%llx\n",
4789 object
, (uint64_t) object
->vo_size
);
4793 entry
= vm_object_hash_entry_alloc(pager
);
4795 vm_object_lock(object
);
4796 lck
= vm_object_hash_lock_spin(pager
);
4797 vm_object_hash_insert(entry
, object
);
4798 vm_object_hash_unlock(lck
);
4799 vm_object_unlock(object
);
4802 * A reference was returned by
4803 * memory_object_create(), and it is
4804 * copied by vm_object_enter().
4807 pager_object
= vm_object_enter(pager
, object
->vo_size
, TRUE
, TRUE
, FALSE
);
4809 if (pager_object
!= object
) {
4810 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager
, pager_object
, object
, (uint64_t) object
->vo_size
);
4814 * Drop the reference we were passed.
4816 memory_object_deallocate(pager
);
4818 vm_object_lock(object
);
4821 * Release the paging reference
4823 vm_object_paging_end(object
);
4827 * Routine: vm_object_remove
4829 * Eliminate the pager/object association
4832 * The object cache must be locked.
4834 __private_extern__
void
4838 memory_object_t pager
;
4840 if ((pager
= object
->pager
) != MEMORY_OBJECT_NULL
) {
4841 vm_object_hash_entry_t entry
;
4843 entry
= vm_object_hash_lookup(pager
, FALSE
);
4844 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
4845 entry
->object
= VM_OBJECT_NULL
;
4851 * Global variables for vm_object_collapse():
4853 * Counts for normal collapses and bypasses.
4854 * Debugging variables, to watch or disable collapse.
4856 static long object_collapses
= 0;
4857 static long object_bypasses
= 0;
4859 static boolean_t vm_object_collapse_allowed
= TRUE
;
4860 static boolean_t vm_object_bypass_allowed
= TRUE
;
4862 unsigned long vm_object_collapse_encrypted
= 0;
4864 void vm_object_do_collapse_compressor(vm_object_t object
,
4865 vm_object_t backing_object
);
4867 vm_object_do_collapse_compressor(
4869 vm_object_t backing_object
)
4871 vm_object_offset_t new_offset
, backing_offset
;
4872 vm_object_size_t size
;
4874 vm_counters
.do_collapse_compressor
++;
4876 vm_object_lock_assert_exclusive(object
);
4877 vm_object_lock_assert_exclusive(backing_object
);
4879 size
= object
->vo_size
;
4882 * Move all compressed pages from backing_object
4886 for (backing_offset
= object
->vo_shadow_offset
;
4887 backing_offset
< object
->vo_shadow_offset
+ object
->vo_size
;
4888 backing_offset
+= PAGE_SIZE
) {
4889 memory_object_offset_t backing_pager_offset
;
4891 /* find the next compressed page at or after this offset */
4892 backing_pager_offset
= (backing_offset
+
4893 backing_object
->paging_offset
);
4894 backing_pager_offset
= vm_compressor_pager_next_compressed(
4895 backing_object
->pager
,
4896 backing_pager_offset
);
4897 if (backing_pager_offset
== (memory_object_offset_t
) -1) {
4898 /* no more compressed pages */
4901 backing_offset
= (backing_pager_offset
-
4902 backing_object
->paging_offset
);
4904 new_offset
= backing_offset
- object
->vo_shadow_offset
;
4906 if (new_offset
>= object
->vo_size
) {
4907 /* we're out of the scope of "object": done */
4911 if ((vm_page_lookup(object
, new_offset
) != VM_PAGE_NULL
) ||
4912 (vm_compressor_pager_state_get(object
->pager
,
4914 object
->paging_offset
)) ==
4915 VM_EXTERNAL_STATE_EXISTS
)) {
4917 * This page already exists in object, resident or
4919 * We don't need this compressed page in backing_object
4920 * and it will be reclaimed when we release
4927 * backing_object has this page in the VM compressor and
4928 * we need to transfer it to object.
4930 vm_counters
.do_collapse_compressor_pages
++;
4931 vm_compressor_pager_transfer(
4934 (new_offset
+ object
->paging_offset
),
4936 backing_object
->pager
,
4937 (backing_offset
+ backing_object
->paging_offset
));
4942 * Routine: vm_object_do_collapse
4944 * Collapse an object with the object backing it.
4945 * Pages in the backing object are moved into the
4946 * parent, and the backing object is deallocated.
4948 * Both objects and the cache are locked; the page
4949 * queues are unlocked.
4953 vm_object_do_collapse(
4955 vm_object_t backing_object
)
4958 vm_object_offset_t new_offset
, backing_offset
;
4959 vm_object_size_t size
;
4961 vm_object_lock_assert_exclusive(object
);
4962 vm_object_lock_assert_exclusive(backing_object
);
4964 assert(object
->purgable
== VM_PURGABLE_DENY
);
4965 assert(backing_object
->purgable
== VM_PURGABLE_DENY
);
4967 backing_offset
= object
->vo_shadow_offset
;
4968 size
= object
->vo_size
;
4971 * Move all in-memory pages from backing_object
4972 * to the parent. Pages that have been paged out
4973 * will be overwritten by any of the parent's
4974 * pages that shadow them.
4977 while (!vm_page_queue_empty(&backing_object
->memq
)) {
4979 p
= (vm_page_t
) vm_page_queue_first(&backing_object
->memq
);
4981 new_offset
= (p
->offset
- backing_offset
);
4983 assert(!p
->busy
|| p
->absent
);
4986 * If the parent has a page here, or if
4987 * this page falls outside the parent,
4990 * Otherwise, move it as planned.
4993 if (p
->offset
< backing_offset
|| new_offset
>= size
) {
4998 * The encryption key includes the "pager" and the
4999 * "paging_offset". These will not change during the
5000 * object collapse, so we can just move an encrypted
5001 * page from one object to the other in this case.
5002 * We can't decrypt the page here, since we can't drop
5006 vm_object_collapse_encrypted
++;
5008 pp
= vm_page_lookup(object
, new_offset
);
5009 if (pp
== VM_PAGE_NULL
) {
5011 if (VM_COMPRESSOR_PAGER_STATE_GET(object
,
5013 == VM_EXTERNAL_STATE_EXISTS
) {
5015 * Parent object has this page
5016 * in the VM compressor.
5017 * Throw away the backing
5023 * Parent now has no page.
5024 * Move the backing object's page
5027 vm_page_rename(p
, object
, new_offset
,
5031 assert(! pp
->absent
);
5034 * Parent object has a real page.
5035 * Throw away the backing object's
5043 if (vm_object_collapse_compressor_allowed
&&
5044 object
->pager
!= MEMORY_OBJECT_NULL
&&
5045 backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
5047 /* move compressed pages from backing_object to object */
5048 vm_object_do_collapse_compressor(object
, backing_object
);
5050 } else if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
5051 vm_object_hash_entry_t entry
;
5053 assert((!object
->pager_created
&&
5054 (object
->pager
== MEMORY_OBJECT_NULL
)) ||
5055 (!backing_object
->pager_created
&&
5056 (backing_object
->pager
== MEMORY_OBJECT_NULL
)));
5058 * Move the pager from backing_object to object.
5060 * XXX We're only using part of the paging space
5061 * for keeps now... we ought to discard the
5065 assert(!object
->paging_in_progress
);
5066 assert(!object
->activity_in_progress
);
5067 assert(!object
->pager_created
);
5068 assert(object
->pager
== NULL
);
5069 object
->pager
= backing_object
->pager
;
5071 if (backing_object
->hashed
) {
5074 lck
= vm_object_hash_lock_spin(backing_object
->pager
);
5075 entry
= vm_object_hash_lookup(object
->pager
, FALSE
);
5076 assert(entry
!= VM_OBJECT_HASH_ENTRY_NULL
);
5077 entry
->object
= object
;
5078 vm_object_hash_unlock(lck
);
5080 object
->hashed
= TRUE
;
5082 object
->pager_created
= backing_object
->pager_created
;
5083 object
->pager_control
= backing_object
->pager_control
;
5084 object
->pager_ready
= backing_object
->pager_ready
;
5085 object
->pager_initialized
= backing_object
->pager_initialized
;
5086 object
->paging_offset
=
5087 backing_object
->paging_offset
+ backing_offset
;
5088 if (object
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
5089 memory_object_control_collapse(object
->pager_control
,
5092 /* the backing_object has lost its pager: reset all fields */
5093 backing_object
->pager_created
= FALSE
;
5094 backing_object
->pager_control
= NULL
;
5095 backing_object
->pager_ready
= FALSE
;
5096 backing_object
->paging_offset
= 0;
5097 backing_object
->pager
= NULL
;
5100 * Object now shadows whatever backing_object did.
5101 * Note that the reference to backing_object->shadow
5102 * moves from within backing_object to within object.
5105 assert(!object
->phys_contiguous
);
5106 assert(!backing_object
->phys_contiguous
);
5107 object
->shadow
= backing_object
->shadow
;
5108 if (object
->shadow
) {
5109 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
5110 /* "backing_object" gave its shadow to "object" */
5111 backing_object
->shadow
= VM_OBJECT_NULL
;
5112 backing_object
->vo_shadow_offset
= 0;
5114 /* no shadow, therefore no shadow offset... */
5115 object
->vo_shadow_offset
= 0;
5117 assert((object
->shadow
== VM_OBJECT_NULL
) ||
5118 (object
->shadow
->copy
!= backing_object
));
5121 * Discard backing_object.
5123 * Since the backing object has no pages, no
5124 * pager left, and no object references within it,
5125 * all that is necessary is to dispose of it.
5129 assert(backing_object
->ref_count
== 1);
5130 assert(backing_object
->resident_page_count
== 0);
5131 assert(backing_object
->paging_in_progress
== 0);
5132 assert(backing_object
->activity_in_progress
== 0);
5133 assert(backing_object
->shadow
== VM_OBJECT_NULL
);
5134 assert(backing_object
->vo_shadow_offset
== 0);
5136 if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
5137 /* ... unless it has a pager; need to terminate pager too */
5138 vm_counters
.do_collapse_terminate
++;
5139 if (vm_object_terminate(backing_object
) != KERN_SUCCESS
) {
5140 vm_counters
.do_collapse_terminate_failure
++;
5145 assert(backing_object
->pager
== NULL
);
5147 backing_object
->alive
= FALSE
;
5148 vm_object_unlock(backing_object
);
5150 XPR(XPR_VM_OBJECT
, "vm_object_collapse, collapsed 0x%X\n",
5151 backing_object
, 0,0,0,0);
5153 #if VM_OBJECT_TRACKING
5154 if (vm_object_tracking_inited
) {
5155 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
5158 #endif /* VM_OBJECT_TRACKING */
5160 vm_object_lock_destroy(backing_object
);
5162 zfree(vm_object_zone
, backing_object
);
5167 vm_object_do_bypass(
5169 vm_object_t backing_object
)
5172 * Make the parent shadow the next object
5176 vm_object_lock_assert_exclusive(object
);
5177 vm_object_lock_assert_exclusive(backing_object
);
5181 * Do object reference in-line to
5182 * conditionally increment shadow's
5183 * residence count. If object is not
5184 * resident, leave residence count
5187 if (backing_object
->shadow
!= VM_OBJECT_NULL
) {
5188 vm_object_lock(backing_object
->shadow
);
5189 vm_object_lock_assert_exclusive(backing_object
->shadow
);
5190 backing_object
->shadow
->ref_count
++;
5191 if (object
->res_count
!= 0)
5192 vm_object_res_reference(backing_object
->shadow
);
5193 vm_object_unlock(backing_object
->shadow
);
5195 #else /* TASK_SWAPPER */
5196 vm_object_reference(backing_object
->shadow
);
5197 #endif /* TASK_SWAPPER */
5199 assert(!object
->phys_contiguous
);
5200 assert(!backing_object
->phys_contiguous
);
5201 object
->shadow
= backing_object
->shadow
;
5202 if (object
->shadow
) {
5203 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
5205 /* no shadow, therefore no shadow offset... */
5206 object
->vo_shadow_offset
= 0;
5210 * Backing object might have had a copy pointer
5211 * to us. If it did, clear it.
5213 if (backing_object
->copy
== object
) {
5214 backing_object
->copy
= VM_OBJECT_NULL
;
5218 * Drop the reference count on backing_object.
5220 * Since its ref_count was at least 2, it
5221 * will not vanish; so we don't need to call
5222 * vm_object_deallocate.
5223 * [with a caveat for "named" objects]
5225 * The res_count on the backing object is
5226 * conditionally decremented. It's possible
5227 * (via vm_pageout_scan) to get here with
5228 * a "swapped" object, which has a 0 res_count,
5229 * in which case, the backing object res_count
5230 * is already down by one.
5232 * Don't call vm_object_deallocate unless
5233 * ref_count drops to zero.
5235 * The ref_count can drop to zero here if the
5236 * backing object could be bypassed but not
5237 * collapsed, such as when the backing object
5238 * is temporary and cachable.
5241 if (backing_object
->ref_count
> 2 ||
5242 (!backing_object
->named
&& backing_object
->ref_count
> 1)) {
5243 vm_object_lock_assert_exclusive(backing_object
);
5244 backing_object
->ref_count
--;
5246 if (object
->res_count
!= 0)
5247 vm_object_res_deallocate(backing_object
);
5248 assert(backing_object
->ref_count
> 0);
5249 #endif /* TASK_SWAPPER */
5250 vm_object_unlock(backing_object
);
5254 * Drop locks so that we can deallocate
5255 * the backing object.
5259 if (object
->res_count
== 0) {
5260 /* XXX get a reference for the deallocate below */
5261 vm_object_res_reference(backing_object
);
5263 #endif /* TASK_SWAPPER */
5265 * vm_object_collapse (the caller of this function) is
5266 * now called from contexts that may not guarantee that a
5267 * valid reference is held on the object... w/o a valid
5268 * reference, it is unsafe and unwise (you will definitely
5269 * regret it) to unlock the object and then retake the lock
5270 * since the object may be terminated and recycled in between.
5271 * The "activity_in_progress" reference will keep the object
5274 vm_object_activity_begin(object
);
5275 vm_object_unlock(object
);
5277 vm_object_unlock(backing_object
);
5278 vm_object_deallocate(backing_object
);
5281 * Relock object. We don't have to reverify
5282 * its state since vm_object_collapse will
5283 * do that for us as it starts at the
5287 vm_object_lock(object
);
5288 vm_object_activity_end(object
);
5296 * vm_object_collapse:
5298 * Perform an object collapse or an object bypass if appropriate.
5299 * The real work of collapsing and bypassing is performed in
5300 * the routines vm_object_do_collapse and vm_object_do_bypass.
5302 * Requires that the object be locked and the page queues be unlocked.
5305 static unsigned long vm_object_collapse_calls
= 0;
5306 static unsigned long vm_object_collapse_objects
= 0;
5307 static unsigned long vm_object_collapse_do_collapse
= 0;
5308 static unsigned long vm_object_collapse_do_bypass
= 0;
5310 __private_extern__
void
5313 vm_object_offset_t hint_offset
,
5314 boolean_t can_bypass
)
5316 vm_object_t backing_object
;
5317 unsigned int rcount
;
5319 vm_object_t original_object
;
5320 int object_lock_type
;
5321 int backing_object_lock_type
;
5323 vm_object_collapse_calls
++;
5325 if (! vm_object_collapse_allowed
&&
5326 ! (can_bypass
&& vm_object_bypass_allowed
)) {
5330 XPR(XPR_VM_OBJECT
, "vm_object_collapse, obj 0x%X\n",
5333 if (object
== VM_OBJECT_NULL
)
5336 original_object
= object
;
5339 * The top object was locked "exclusive" by the caller.
5340 * In the first pass, to determine if we can collapse the shadow chain,
5341 * take a "shared" lock on the shadow objects. If we can collapse,
5342 * we'll have to go down the chain again with exclusive locks.
5344 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5345 backing_object_lock_type
= OBJECT_LOCK_SHARED
;
5348 object
= original_object
;
5349 vm_object_lock_assert_exclusive(object
);
5352 vm_object_collapse_objects
++;
5354 * Verify that the conditions are right for either
5355 * collapse or bypass:
5359 * There is a backing object, and
5362 backing_object
= object
->shadow
;
5363 if (backing_object
== VM_OBJECT_NULL
) {
5364 if (object
!= original_object
) {
5365 vm_object_unlock(object
);
5369 if (backing_object_lock_type
== OBJECT_LOCK_SHARED
) {
5370 vm_object_lock_shared(backing_object
);
5372 vm_object_lock(backing_object
);
5376 * No pages in the object are currently
5377 * being paged out, and
5379 if (object
->paging_in_progress
!= 0 ||
5380 object
->activity_in_progress
!= 0) {
5381 /* try and collapse the rest of the shadow chain */
5382 if (object
!= original_object
) {
5383 vm_object_unlock(object
);
5385 object
= backing_object
;
5386 object_lock_type
= backing_object_lock_type
;
5392 * The backing object is not read_only,
5393 * and no pages in the backing object are
5394 * currently being paged out.
5395 * The backing object is internal.
5399 if (!backing_object
->internal
||
5400 backing_object
->paging_in_progress
!= 0 ||
5401 backing_object
->activity_in_progress
!= 0) {
5402 /* try and collapse the rest of the shadow chain */
5403 if (object
!= original_object
) {
5404 vm_object_unlock(object
);
5406 object
= backing_object
;
5407 object_lock_type
= backing_object_lock_type
;
5412 * Purgeable objects are not supposed to engage in
5413 * copy-on-write activities, so should not have
5414 * any shadow objects or be a shadow object to another
5416 * Collapsing a purgeable object would require some
5417 * updates to the purgeable compressed ledgers.
5419 if (object
->purgable
!= VM_PURGABLE_DENY
||
5420 backing_object
->purgable
!= VM_PURGABLE_DENY
) {
5421 panic("vm_object_collapse() attempting to collapse "
5422 "purgeable object: %p(%d) %p(%d)\n",
5423 object
, object
->purgable
,
5424 backing_object
, backing_object
->purgable
);
5425 /* try and collapse the rest of the shadow chain */
5426 if (object
!= original_object
) {
5427 vm_object_unlock(object
);
5429 object
= backing_object
;
5430 object_lock_type
= backing_object_lock_type
;
5435 * The backing object can't be a copy-object:
5436 * the shadow_offset for the copy-object must stay
5437 * as 0. Furthermore (for the 'we have all the
5438 * pages' case), if we bypass backing_object and
5439 * just shadow the next object in the chain, old
5440 * pages from that object would then have to be copied
5441 * BOTH into the (former) backing_object and into the
5444 if (backing_object
->shadow
!= VM_OBJECT_NULL
&&
5445 backing_object
->shadow
->copy
== backing_object
) {
5446 /* try and collapse the rest of the shadow chain */
5447 if (object
!= original_object
) {
5448 vm_object_unlock(object
);
5450 object
= backing_object
;
5451 object_lock_type
= backing_object_lock_type
;
5456 * We can now try to either collapse the backing
5457 * object (if the parent is the only reference to
5458 * it) or (perhaps) remove the parent's reference
5461 * If there is exactly one reference to the backing
5462 * object, we may be able to collapse it into the
5465 * As long as one of the objects is still not known
5466 * to the pager, we can collapse them.
5468 if (backing_object
->ref_count
== 1 &&
5469 (vm_object_collapse_compressor_allowed
||
5470 !object
->pager_created
5471 || (!backing_object
->pager_created
)
5472 ) && vm_object_collapse_allowed
) {
5475 * We need the exclusive lock on the VM objects.
5477 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
5479 * We have an object and its shadow locked
5480 * "shared". We can't just upgrade the locks
5481 * to "exclusive", as some other thread might
5482 * also have these objects locked "shared" and
5483 * attempt to upgrade one or the other to
5484 * "exclusive". The upgrades would block
5485 * forever waiting for the other "shared" locks
5487 * So we have to release the locks and go
5488 * down the shadow chain again (since it could
5489 * have changed) with "exclusive" locking.
5491 vm_object_unlock(backing_object
);
5492 if (object
!= original_object
)
5493 vm_object_unlock(object
);
5494 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5495 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5500 "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
5501 backing_object
, object
,
5502 backing_object
->pager
,
5503 backing_object
->pager_control
, 0);
5506 * Collapse the object with its backing
5507 * object, and try again with the object's
5508 * new backing object.
5511 vm_object_do_collapse(object
, backing_object
);
5512 vm_object_collapse_do_collapse
++;
5517 * Collapsing the backing object was not possible
5518 * or permitted, so let's try bypassing it.
5521 if (! (can_bypass
&& vm_object_bypass_allowed
)) {
5522 /* try and collapse the rest of the shadow chain */
5523 if (object
!= original_object
) {
5524 vm_object_unlock(object
);
5526 object
= backing_object
;
5527 object_lock_type
= backing_object_lock_type
;
5533 * If the object doesn't have all its pages present,
5534 * we have to make sure no pages in the backing object
5535 * "show through" before bypassing it.
5537 size
= (unsigned int)atop(object
->vo_size
);
5538 rcount
= object
->resident_page_count
;
5540 if (rcount
!= size
) {
5541 vm_object_offset_t offset
;
5542 vm_object_offset_t backing_offset
;
5543 unsigned int backing_rcount
;
5546 * If the backing object has a pager but no pagemap,
5547 * then we cannot bypass it, because we don't know
5548 * what pages it has.
5550 if (backing_object
->pager_created
) {
5551 /* try and collapse the rest of the shadow chain */
5552 if (object
!= original_object
) {
5553 vm_object_unlock(object
);
5555 object
= backing_object
;
5556 object_lock_type
= backing_object_lock_type
;
5561 * If the object has a pager but no pagemap,
5562 * then we cannot bypass it, because we don't know
5563 * what pages it has.
5565 if (object
->pager_created
) {
5566 /* try and collapse the rest of the shadow chain */
5567 if (object
!= original_object
) {
5568 vm_object_unlock(object
);
5570 object
= backing_object
;
5571 object_lock_type
= backing_object_lock_type
;
5575 backing_offset
= object
->vo_shadow_offset
;
5576 backing_rcount
= backing_object
->resident_page_count
;
5578 if ( (int)backing_rcount
- (int)(atop(backing_object
->vo_size
) - size
) > (int)rcount
) {
5580 * we have enough pages in the backing object to guarantee that
5581 * at least 1 of them must be 'uncovered' by a resident page
5582 * in the object we're evaluating, so move on and
5583 * try to collapse the rest of the shadow chain
5585 if (object
!= original_object
) {
5586 vm_object_unlock(object
);
5588 object
= backing_object
;
5589 object_lock_type
= backing_object_lock_type
;
5594 * If all of the pages in the backing object are
5595 * shadowed by the parent object, the parent
5596 * object no longer has to shadow the backing
5597 * object; it can shadow the next one in the
5600 * If the backing object has existence info,
5601 * we must check examine its existence info
5606 #define EXISTS_IN_OBJECT(obj, off, rc) \
5607 ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
5608 == VM_EXTERNAL_STATE_EXISTS) || \
5609 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
5612 * Check the hint location first
5613 * (since it is often the quickest way out of here).
5615 if (object
->cow_hint
!= ~(vm_offset_t
)0)
5616 hint_offset
= (vm_object_offset_t
)object
->cow_hint
;
5618 hint_offset
= (hint_offset
> 8 * PAGE_SIZE_64
) ?
5619 (hint_offset
- 8 * PAGE_SIZE_64
) : 0;
5621 if (EXISTS_IN_OBJECT(backing_object
, hint_offset
+
5622 backing_offset
, backing_rcount
) &&
5623 !EXISTS_IN_OBJECT(object
, hint_offset
, rcount
)) {
5624 /* dependency right at the hint */
5625 object
->cow_hint
= (vm_offset_t
) hint_offset
; /* atomic */
5626 /* try and collapse the rest of the shadow chain */
5627 if (object
!= original_object
) {
5628 vm_object_unlock(object
);
5630 object
= backing_object
;
5631 object_lock_type
= backing_object_lock_type
;
5636 * If the object's window onto the backing_object
5637 * is large compared to the number of resident
5638 * pages in the backing object, it makes sense to
5639 * walk the backing_object's resident pages first.
5641 * NOTE: Pages may be in both the existence map and/or
5642 * resident, so if we don't find a dependency while
5643 * walking the backing object's resident page list
5644 * directly, and there is an existence map, we'll have
5645 * to run the offset based 2nd pass. Because we may
5646 * have to run both passes, we need to be careful
5647 * not to decrement 'rcount' in the 1st pass
5649 if (backing_rcount
&& backing_rcount
< (size
/ 8)) {
5650 unsigned int rc
= rcount
;
5653 backing_rcount
= backing_object
->resident_page_count
;
5654 p
= (vm_page_t
)vm_page_queue_first(&backing_object
->memq
);
5656 offset
= (p
->offset
- backing_offset
);
5658 if (offset
< object
->vo_size
&&
5659 offset
!= hint_offset
&&
5660 !EXISTS_IN_OBJECT(object
, offset
, rc
)) {
5661 /* found a dependency */
5662 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
5666 p
= (vm_page_t
) vm_page_queue_next(&p
->listq
);
5668 } while (--backing_rcount
);
5669 if (backing_rcount
!= 0 ) {
5670 /* try and collapse the rest of the shadow chain */
5671 if (object
!= original_object
) {
5672 vm_object_unlock(object
);
5674 object
= backing_object
;
5675 object_lock_type
= backing_object_lock_type
;
5681 * Walk through the offsets looking for pages in the
5682 * backing object that show through to the object.
5684 if (backing_rcount
) {
5685 offset
= hint_offset
;
5688 (offset
+ PAGE_SIZE_64
< object
->vo_size
) ?
5689 (offset
+ PAGE_SIZE_64
) : 0) != hint_offset
) {
5691 if (EXISTS_IN_OBJECT(backing_object
, offset
+
5692 backing_offset
, backing_rcount
) &&
5693 !EXISTS_IN_OBJECT(object
, offset
, rcount
)) {
5694 /* found a dependency */
5695 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
5699 if (offset
!= hint_offset
) {
5700 /* try and collapse the rest of the shadow chain */
5701 if (object
!= original_object
) {
5702 vm_object_unlock(object
);
5704 object
= backing_object
;
5705 object_lock_type
= backing_object_lock_type
;
5712 * We need "exclusive" locks on the 2 VM objects.
5714 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
5715 vm_object_unlock(backing_object
);
5716 if (object
!= original_object
)
5717 vm_object_unlock(object
);
5718 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5719 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5723 /* reset the offset hint for any objects deeper in the chain */
5724 object
->cow_hint
= (vm_offset_t
)0;
5727 * All interesting pages in the backing object
5728 * already live in the parent or its pager.
5729 * Thus we can bypass the backing object.
5732 vm_object_do_bypass(object
, backing_object
);
5733 vm_object_collapse_do_bypass
++;
5736 * Try again with this object's new backing object.
5744 if (object != original_object) {
5745 vm_object_unlock(object);
5751 * Routine: vm_object_page_remove: [internal]
5753 * Removes all physical pages in the specified
5754 * object range from the object's list of pages.
5756 * In/out conditions:
5757 * The object must be locked.
5758 * The object must not have paging_in_progress, usually
5759 * guaranteed by not having a pager.
5761 unsigned int vm_object_page_remove_lookup
= 0;
5762 unsigned int vm_object_page_remove_iterate
= 0;
5764 __private_extern__
void
5765 vm_object_page_remove(
5767 vm_object_offset_t start
,
5768 vm_object_offset_t end
)
5773 * One and two page removals are most popular.
5774 * The factor of 16 here is somewhat arbitrary.
5775 * It balances vm_object_lookup vs iteration.
5778 if (atop_64(end
- start
) < (unsigned)object
->resident_page_count
/16) {
5779 vm_object_page_remove_lookup
++;
5781 for (; start
< end
; start
+= PAGE_SIZE_64
) {
5782 p
= vm_page_lookup(object
, start
);
5783 if (p
!= VM_PAGE_NULL
) {
5784 assert(!p
->cleaning
&& !p
->laundry
);
5785 if (!p
->fictitious
&& p
->pmapped
)
5786 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5791 vm_object_page_remove_iterate
++;
5793 p
= (vm_page_t
) vm_page_queue_first(&object
->memq
);
5794 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
) p
)) {
5795 next
= (vm_page_t
) vm_page_queue_next(&p
->listq
);
5796 if ((start
<= p
->offset
) && (p
->offset
< end
)) {
5797 assert(!p
->cleaning
&& !p
->laundry
);
5798 if (!p
->fictitious
&& p
->pmapped
)
5799 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5809 * Routine: vm_object_coalesce
5810 * Function: Coalesces two objects backing up adjoining
5811 * regions of memory into a single object.
5813 * returns TRUE if objects were combined.
5815 * NOTE: Only works at the moment if the second object is NULL -
5816 * if it's not, which object do we lock first?
5819 * prev_object First object to coalesce
5820 * prev_offset Offset into prev_object
5821 * next_object Second object into coalesce
5822 * next_offset Offset into next_object
5824 * prev_size Size of reference to prev_object
5825 * next_size Size of reference to next_object
5828 * The object(s) must *not* be locked. The map must be locked
5829 * to preserve the reference to the object(s).
5831 static int vm_object_coalesce_count
= 0;
5833 __private_extern__ boolean_t
5835 vm_object_t prev_object
,
5836 vm_object_t next_object
,
5837 vm_object_offset_t prev_offset
,
5838 __unused vm_object_offset_t next_offset
,
5839 vm_object_size_t prev_size
,
5840 vm_object_size_t next_size
)
5842 vm_object_size_t newsize
;
5848 if (next_object
!= VM_OBJECT_NULL
) {
5852 if (prev_object
== VM_OBJECT_NULL
) {
5857 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
5858 prev_object
, prev_offset
, prev_size
, next_size
, 0);
5860 vm_object_lock(prev_object
);
5863 * Try to collapse the object first
5865 vm_object_collapse(prev_object
, prev_offset
, TRUE
);
5868 * Can't coalesce if pages not mapped to
5869 * prev_entry may be in use any way:
5870 * . more than one reference
5872 * . shadows another object
5873 * . has a copy elsewhere
5875 * . paging references (pages might be in page-list)
5878 if ((prev_object
->ref_count
> 1) ||
5879 prev_object
->pager_created
||
5880 (prev_object
->shadow
!= VM_OBJECT_NULL
) ||
5881 (prev_object
->copy
!= VM_OBJECT_NULL
) ||
5882 (prev_object
->true_share
!= FALSE
) ||
5883 (prev_object
->purgable
!= VM_PURGABLE_DENY
) ||
5884 (prev_object
->paging_in_progress
!= 0) ||
5885 (prev_object
->activity_in_progress
!= 0)) {
5886 vm_object_unlock(prev_object
);
5890 vm_object_coalesce_count
++;
5893 * Remove any pages that may still be in the object from
5894 * a previous deallocation.
5896 vm_object_page_remove(prev_object
,
5897 prev_offset
+ prev_size
,
5898 prev_offset
+ prev_size
+ next_size
);
5901 * Extend the object if necessary.
5903 newsize
= prev_offset
+ prev_size
+ next_size
;
5904 if (newsize
> prev_object
->vo_size
) {
5905 prev_object
->vo_size
= newsize
;
5908 vm_object_unlock(prev_object
);
5913 vm_object_populate_with_private(
5915 vm_object_offset_t offset
,
5920 vm_object_offset_t base_offset
;
5923 if (!object
->private)
5924 return KERN_FAILURE
;
5926 base_page
= phys_page
;
5928 vm_object_lock(object
);
5930 if (!object
->phys_contiguous
) {
5933 if ((base_offset
= trunc_page_64(offset
)) != offset
) {
5934 vm_object_unlock(object
);
5935 return KERN_FAILURE
;
5937 base_offset
+= object
->paging_offset
;
5940 m
= vm_page_lookup(object
, base_offset
);
5942 if (m
!= VM_PAGE_NULL
) {
5943 if (m
->fictitious
) {
5944 if (VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
) {
5946 vm_page_lockspin_queues();
5948 vm_page_unlock_queues();
5950 m
->fictitious
= FALSE
;
5951 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5953 } else if (VM_PAGE_GET_PHYS_PAGE(m
) != base_page
) {
5957 * we'd leak a real page... that can't be right
5959 panic("vm_object_populate_with_private - %p not private", m
);
5963 * pmap call to clear old mapping
5965 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
5967 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5971 * we should never see this on a ficticious or private page
5973 panic("vm_object_populate_with_private - %p encrypted", m
);
5977 while ((m
= vm_page_grab_fictitious()) == VM_PAGE_NULL
)
5978 vm_page_more_fictitious();
5981 * private normally requires lock_queues but since we
5982 * are initializing the page, its not necessary here
5985 m
->fictitious
= FALSE
;
5986 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5990 vm_page_insert(m
, object
, base_offset
);
5992 base_page
++; /* Go to the next physical page */
5993 base_offset
+= PAGE_SIZE
;
5997 /* NOTE: we should check the original settings here */
5998 /* if we have a size > zero a pmap call should be made */
5999 /* to disable the range */
6003 /* shadows on contiguous memory are not allowed */
6004 /* we therefore can use the offset field */
6005 object
->vo_shadow_offset
= (vm_object_offset_t
)phys_page
<< PAGE_SHIFT
;
6006 object
->vo_size
= size
;
6008 vm_object_unlock(object
);
6010 return KERN_SUCCESS
;
6014 * memory_object_free_from_cache:
6016 * Walk the vm_object cache list, removing and freeing vm_objects
6017 * which are backed by the pager identified by the caller, (pager_ops).
6018 * Remove up to "count" objects, if there are that may available
6021 * Walk the list at most once, return the number of vm_objects
6025 __private_extern__ kern_return_t
6026 memory_object_free_from_cache(
6027 __unused host_t host
,
6028 __unused memory_object_pager_ops_t pager_ops
,
6032 int object_released
= 0;
6034 vm_object_t object
= VM_OBJECT_NULL
;
6038 if(host == HOST_NULL)
6039 return(KERN_INVALID_ARGUMENT);
6043 vm_object_cache_lock();
6045 queue_iterate(&vm_object_cached_list
, object
,
6046 vm_object_t
, cached_list
) {
6047 if (object
->pager
&&
6048 (pager_ops
== object
->pager
->mo_pager_ops
)) {
6049 vm_object_lock(object
);
6050 queue_remove(&vm_object_cached_list
, object
,
6051 vm_object_t
, cached_list
);
6052 vm_object_cached_count
--;
6054 vm_object_cache_unlock();
6056 * Since this object is in the cache, we know
6057 * that it is initialized and has only a pager's
6058 * (implicit) reference. Take a reference to avoid
6059 * recursive deallocations.
6062 assert(object
->pager_initialized
);
6063 assert(object
->ref_count
== 0);
6064 vm_object_lock_assert_exclusive(object
);
6065 object
->ref_count
++;
6068 * Terminate the object.
6069 * If the object had a shadow, we let
6070 * vm_object_deallocate deallocate it.
6071 * "pageout" objects have a shadow, but
6072 * maintain a "paging reference" rather
6073 * than a normal reference.
6074 * (We are careful here to limit recursion.)
6076 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
6078 if ((vm_object_terminate(object
) == KERN_SUCCESS
)
6079 && (shadow
!= VM_OBJECT_NULL
)) {
6080 vm_object_deallocate(shadow
);
6083 if(object_released
++ == *count
)
6084 return KERN_SUCCESS
;
6088 vm_object_cache_unlock();
6089 *count
= object_released
;
6093 return KERN_SUCCESS
;
6099 memory_object_create_named(
6100 memory_object_t pager
,
6101 memory_object_offset_t size
,
6102 memory_object_control_t
*control
)
6105 vm_object_hash_entry_t entry
;
6108 *control
= MEMORY_OBJECT_CONTROL_NULL
;
6109 if (pager
== MEMORY_OBJECT_NULL
)
6110 return KERN_INVALID_ARGUMENT
;
6112 lck
= vm_object_hash_lock_spin(pager
);
6113 entry
= vm_object_hash_lookup(pager
, FALSE
);
6115 if ((entry
!= VM_OBJECT_HASH_ENTRY_NULL
) &&
6116 (entry
->object
!= VM_OBJECT_NULL
)) {
6117 if (entry
->object
->named
== TRUE
)
6118 panic("memory_object_create_named: caller already holds the right"); }
6119 vm_object_hash_unlock(lck
);
6121 if ((object
= vm_object_enter(pager
, size
, FALSE
, FALSE
, TRUE
)) == VM_OBJECT_NULL
) {
6122 return(KERN_INVALID_OBJECT
);
6125 /* wait for object (if any) to be ready */
6126 if (object
!= VM_OBJECT_NULL
) {
6127 vm_object_lock(object
);
6128 object
->named
= TRUE
;
6129 while (!object
->pager_ready
) {
6130 vm_object_sleep(object
,
6131 VM_OBJECT_EVENT_PAGER_READY
,
6134 *control
= object
->pager_control
;
6135 vm_object_unlock(object
);
6137 return (KERN_SUCCESS
);
6142 * Routine: memory_object_recover_named [user interface]
6144 * Attempt to recover a named reference for a VM object.
6145 * VM will verify that the object has not already started
6146 * down the termination path, and if it has, will optionally
6147 * wait for that to finish.
6149 * KERN_SUCCESS - we recovered a named reference on the object
6150 * KERN_FAILURE - we could not recover a reference (object dead)
6151 * KERN_INVALID_ARGUMENT - bad memory object control
6154 memory_object_recover_named(
6155 memory_object_control_t control
,
6156 boolean_t wait_on_terminating
)
6160 object
= memory_object_control_to_vm_object(control
);
6161 if (object
== VM_OBJECT_NULL
) {
6162 return (KERN_INVALID_ARGUMENT
);
6165 vm_object_lock(object
);
6167 if (object
->terminating
&& wait_on_terminating
) {
6168 vm_object_wait(object
,
6169 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
6174 if (!object
->alive
) {
6175 vm_object_unlock(object
);
6176 return KERN_FAILURE
;
6179 if (object
->named
== TRUE
) {
6180 vm_object_unlock(object
);
6181 return KERN_SUCCESS
;
6184 if ((object
->ref_count
== 0) && (!object
->terminating
)) {
6185 if (!vm_object_cache_lock_try()) {
6186 vm_object_unlock(object
);
6189 queue_remove(&vm_object_cached_list
, object
,
6190 vm_object_t
, cached_list
);
6191 vm_object_cached_count
--;
6192 XPR(XPR_VM_OBJECT_CACHE
,
6193 "memory_object_recover_named: removing %X, head (%X, %X)\n",
6195 vm_object_cached_list
.next
,
6196 vm_object_cached_list
.prev
, 0,0);
6198 vm_object_cache_unlock();
6201 object
->named
= TRUE
;
6202 vm_object_lock_assert_exclusive(object
);
6203 object
->ref_count
++;
6204 vm_object_res_reference(object
);
6205 while (!object
->pager_ready
) {
6206 vm_object_sleep(object
,
6207 VM_OBJECT_EVENT_PAGER_READY
,
6210 vm_object_unlock(object
);
6211 return (KERN_SUCCESS
);
6216 * vm_object_release_name:
6218 * Enforces name semantic on memory_object reference count decrement
6219 * This routine should not be called unless the caller holds a name
6220 * reference gained through the memory_object_create_named.
6222 * If the TERMINATE_IDLE flag is set, the call will return if the
6223 * reference count is not 1. i.e. idle with the only remaining reference
6225 * If the decision is made to proceed the name field flag is set to
6226 * false and the reference count is decremented. If the RESPECT_CACHE
6227 * flag is set and the reference count has gone to zero, the
6228 * memory_object is checked to see if it is cacheable otherwise when
6229 * the reference count is zero, it is simply terminated.
6232 __private_extern__ kern_return_t
6233 vm_object_release_name(
6238 boolean_t original_object
= TRUE
;
6240 while (object
!= VM_OBJECT_NULL
) {
6242 vm_object_lock(object
);
6244 assert(object
->alive
);
6245 if (original_object
)
6246 assert(object
->named
);
6247 assert(object
->ref_count
> 0);
6250 * We have to wait for initialization before
6251 * destroying or caching the object.
6254 if (object
->pager_created
&& !object
->pager_initialized
) {
6255 assert(!object
->can_persist
);
6256 vm_object_assert_wait(object
,
6257 VM_OBJECT_EVENT_INITIALIZED
,
6259 vm_object_unlock(object
);
6260 thread_block(THREAD_CONTINUE_NULL
);
6264 if (((object
->ref_count
> 1)
6265 && (flags
& MEMORY_OBJECT_TERMINATE_IDLE
))
6266 || (object
->terminating
)) {
6267 vm_object_unlock(object
);
6268 return KERN_FAILURE
;
6270 if (flags
& MEMORY_OBJECT_RELEASE_NO_OP
) {
6271 vm_object_unlock(object
);
6272 return KERN_SUCCESS
;
6276 if ((flags
& MEMORY_OBJECT_RESPECT_CACHE
) &&
6277 (object
->ref_count
== 1)) {
6278 if (original_object
)
6279 object
->named
= FALSE
;
6280 vm_object_unlock(object
);
6281 /* let vm_object_deallocate push this thing into */
6282 /* the cache, if that it is where it is bound */
6283 vm_object_deallocate(object
);
6284 return KERN_SUCCESS
;
6286 VM_OBJ_RES_DECR(object
);
6287 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
6289 if (object
->ref_count
== 1) {
6290 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
6291 if (original_object
) {
6292 return KERN_FAILURE
;
6294 return KERN_SUCCESS
;
6297 if (shadow
!= VM_OBJECT_NULL
) {
6298 original_object
= FALSE
;
6302 return KERN_SUCCESS
;
6304 vm_object_lock_assert_exclusive(object
);
6305 object
->ref_count
--;
6306 assert(object
->ref_count
> 0);
6308 object
->named
= FALSE
;
6309 vm_object_unlock(object
);
6310 return KERN_SUCCESS
;
6315 return KERN_FAILURE
;
6319 __private_extern__ kern_return_t
6320 vm_object_lock_request(
6322 vm_object_offset_t offset
,
6323 vm_object_size_t size
,
6324 memory_object_return_t should_return
,
6328 __unused boolean_t should_flush
;
6330 should_flush
= flags
& MEMORY_OBJECT_DATA_FLUSH
;
6332 XPR(XPR_MEMORY_OBJECT
,
6333 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
6334 object
, offset
, size
,
6335 (((should_return
&1)<<1)|should_flush
), prot
);
6338 * Check for bogus arguments.
6340 if (object
== VM_OBJECT_NULL
)
6341 return (KERN_INVALID_ARGUMENT
);
6343 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
6344 return (KERN_INVALID_ARGUMENT
);
6346 size
= round_page_64(size
);
6349 * Lock the object, and acquire a paging reference to
6350 * prevent the memory_object reference from being released.
6352 vm_object_lock(object
);
6353 vm_object_paging_begin(object
);
6355 (void)vm_object_update(object
,
6356 offset
, size
, NULL
, NULL
, should_return
, flags
, prot
);
6358 vm_object_paging_end(object
);
6359 vm_object_unlock(object
);
6361 return (KERN_SUCCESS
);
6365 * Empty a purgeable object by grabbing the physical pages assigned to it and
6366 * putting them on the free queue without writing them to backing store, etc.
6367 * When the pages are next touched they will be demand zero-fill pages. We
6368 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
6369 * skip referenced/dirty pages, pages on the active queue, etc. We're more
6370 * than happy to grab these since this is a purgeable object. We mark the
6371 * object as "empty" after reaping its pages.
6373 * On entry the object must be locked and it must be
6374 * purgeable with no delayed copies pending.
6377 vm_object_purge(vm_object_t object
, int flags
)
6379 unsigned int object_page_count
= 0;
6380 unsigned int pgcount
= 0;
6381 boolean_t skipped_object
= FALSE
;
6383 vm_object_lock_assert_exclusive(object
);
6385 if (object
->purgable
== VM_PURGABLE_DENY
)
6388 assert(object
->copy
== VM_OBJECT_NULL
);
6389 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
6392 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
6393 * reaping its pages. We update vm_page_purgeable_count in bulk
6394 * and we don't want vm_page_remove() to update it again for each
6395 * page we reap later.
6397 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
6398 * are all accounted for in the "volatile" ledgers, so this does not
6399 * make any difference.
6400 * If we transitioned directly from NONVOLATILE to EMPTY,
6401 * vm_page_purgeable_count must have been updated when the object
6402 * was dequeued from its volatile queue and the purgeable ledgers
6403 * must have also been updated accordingly at that time (in
6404 * vm_object_purgable_control()).
6406 if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
6408 assert(object
->resident_page_count
>=
6409 object
->wired_page_count
);
6410 delta
= (object
->resident_page_count
-
6411 object
->wired_page_count
);
6413 assert(vm_page_purgeable_count
>=
6416 (SInt32
*)&vm_page_purgeable_count
);
6418 if (object
->wired_page_count
!= 0) {
6419 assert(vm_page_purgeable_wired_count
>=
6420 object
->wired_page_count
);
6421 OSAddAtomic(-object
->wired_page_count
,
6422 (SInt32
*)&vm_page_purgeable_wired_count
);
6424 object
->purgable
= VM_PURGABLE_EMPTY
;
6426 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
6428 object_page_count
= object
->resident_page_count
;
6430 vm_object_reap_pages(object
, REAP_PURGEABLE
);
6432 if (object
->pager
!= NULL
) {
6434 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
6436 if (object
->activity_in_progress
== 0 &&
6437 object
->paging_in_progress
== 0) {
6439 * Also reap any memory coming from this object
6440 * in the VM compressor.
6442 * There are no operations in progress on the VM object
6443 * and no operation can start while we're holding the
6444 * VM object lock, so it's safe to reap the compressed
6445 * pages and update the page counts.
6447 pgcount
= vm_compressor_pager_get_count(object
->pager
);
6449 pgcount
= vm_compressor_pager_reap_pages(object
->pager
, flags
);
6450 vm_compressor_pager_count(object
->pager
,
6454 vm_purgeable_compressed_update(object
,
6457 if ( !(flags
& C_DONT_BLOCK
)) {
6458 assert(vm_compressor_pager_get_count(object
->pager
)
6463 * There's some kind of paging activity in progress
6464 * for this object, which could result in a page
6465 * being compressed or decompressed, possibly while
6466 * the VM object is not locked, so it could race
6469 * We can't really synchronize this without possibly
6470 * causing a deadlock when the compressor needs to
6471 * allocate or free memory while compressing or
6472 * decompressing a page from a purgeable object
6473 * mapped in the kernel_map...
6475 * So let's not attempt to purge the compressor
6476 * pager if there's any kind of operation in
6477 * progress on the VM object.
6479 skipped_object
= TRUE
;
6483 vm_object_lock_assert_exclusive(object
);
6485 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ONE
)),
6486 VM_KERNEL_UNSLIDE_OR_PERM(object
), /* purged object */
6496 * vm_object_purgeable_control() allows the caller to control and investigate the
6497 * state of a purgeable object. A purgeable object is created via a call to
6498 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
6499 * never be coalesced with any other object -- even other purgeable objects --
6500 * and will thus always remain a distinct object. A purgeable object has
6501 * special semantics when its reference count is exactly 1. If its reference
6502 * count is greater than 1, then a purgeable object will behave like a normal
6503 * object and attempts to use this interface will result in an error return
6504 * of KERN_INVALID_ARGUMENT.
6506 * A purgeable object may be put into a "volatile" state which will make the
6507 * object's pages elligable for being reclaimed without paging to backing
6508 * store if the system runs low on memory. If the pages in a volatile
6509 * purgeable object are reclaimed, the purgeable object is said to have been
6510 * "emptied." When a purgeable object is emptied the system will reclaim as
6511 * many pages from the object as it can in a convenient manner (pages already
6512 * en route to backing store or busy for other reasons are left as is). When
6513 * a purgeable object is made volatile, its pages will generally be reclaimed
6514 * before other pages in the application's working set. This semantic is
6515 * generally used by applications which can recreate the data in the object
6516 * faster than it can be paged in. One such example might be media assets
6517 * which can be reread from a much faster RAID volume.
6519 * A purgeable object may be designated as "non-volatile" which means it will
6520 * behave like all other objects in the system with pages being written to and
6521 * read from backing store as needed to satisfy system memory needs. If the
6522 * object was emptied before the object was made non-volatile, that fact will
6523 * be returned as the old state of the purgeable object (see
6524 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
6525 * were reclaimed as part of emptying the object will be refaulted in as
6526 * zero-fill on demand. It is up to the application to note that an object
6527 * was emptied and recreate the objects contents if necessary. When a
6528 * purgeable object is made non-volatile, its pages will generally not be paged
6529 * out to backing store in the immediate future. A purgeable object may also
6530 * be manually emptied.
6532 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
6533 * volatile purgeable object may be queried at any time. This information may
6534 * be used as a control input to let the application know when the system is
6535 * experiencing memory pressure and is reclaiming memory.
6537 * The specified address may be any address within the purgeable object. If
6538 * the specified address does not represent any object in the target task's
6539 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
6540 * object containing the specified address is not a purgeable object, then
6541 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
6544 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
6545 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
6546 * state is used to set the new state of the purgeable object and return its
6547 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
6548 * object is returned in the parameter state.
6550 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
6551 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
6552 * the non-volatile, volatile and volatile/empty states described above.
6553 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
6554 * immediately reclaim as many pages in the object as can be conveniently
6555 * collected (some may have already been written to backing store or be
6558 * The process of making a purgeable object non-volatile and determining its
6559 * previous state is atomic. Thus, if a purgeable object is made
6560 * VM_PURGABLE_NONVOLATILE and the old state is returned as
6561 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
6562 * completely intact and will remain so until the object is made volatile
6563 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
6564 * was reclaimed while it was in a volatile state and its previous contents
6568 * The object must be locked.
6571 vm_object_purgable_control(
6573 vm_purgable_t control
,
6579 if (object
== VM_OBJECT_NULL
) {
6581 * Object must already be present or it can't be purgeable.
6583 return KERN_INVALID_ARGUMENT
;
6586 vm_object_lock_assert_exclusive(object
);
6589 * Get current state of the purgeable object.
6591 old_state
= object
->purgable
;
6592 if (old_state
== VM_PURGABLE_DENY
)
6593 return KERN_INVALID_ARGUMENT
;
6595 /* purgeable cant have delayed copies - now or in the future */
6596 assert(object
->copy
== VM_OBJECT_NULL
);
6597 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
6600 * Execute the desired operation.
6602 if (control
== VM_PURGABLE_GET_STATE
) {
6604 return KERN_SUCCESS
;
6607 if ((*state
) & VM_PURGABLE_DEBUG_EMPTY
) {
6608 object
->volatile_empty
= TRUE
;
6610 if ((*state
) & VM_PURGABLE_DEBUG_FAULT
) {
6611 object
->volatile_fault
= TRUE
;
6614 new_state
= *state
& VM_PURGABLE_STATE_MASK
;
6615 if (new_state
== VM_PURGABLE_VOLATILE
) {
6616 if (old_state
== VM_PURGABLE_EMPTY
) {
6617 /* what's been emptied must stay empty */
6618 new_state
= VM_PURGABLE_EMPTY
;
6620 if (object
->volatile_empty
) {
6621 /* debugging mode: go straight to empty */
6622 new_state
= VM_PURGABLE_EMPTY
;
6626 switch (new_state
) {
6627 case VM_PURGABLE_DENY
:
6628 case VM_PURGABLE_NONVOLATILE
:
6629 object
->purgable
= new_state
;
6631 if (old_state
== VM_PURGABLE_VOLATILE
) {
6634 assert(object
->resident_page_count
>=
6635 object
->wired_page_count
);
6636 delta
= (object
->resident_page_count
-
6637 object
->wired_page_count
);
6639 assert(vm_page_purgeable_count
>= delta
);
6643 (SInt32
*)&vm_page_purgeable_count
);
6645 if (object
->wired_page_count
!= 0) {
6646 assert(vm_page_purgeable_wired_count
>=
6647 object
->wired_page_count
);
6648 OSAddAtomic(-object
->wired_page_count
,
6649 (SInt32
*)&vm_page_purgeable_wired_count
);
6652 vm_page_lock_queues();
6654 /* object should be on a queue */
6655 assert(object
->objq
.next
!= NULL
&&
6656 object
->objq
.prev
!= NULL
);
6657 purgeable_q_t queue
;
6660 * Move object from its volatile queue to the
6661 * non-volatile queue...
6663 queue
= vm_purgeable_object_remove(object
);
6666 if (object
->purgeable_when_ripe
) {
6667 vm_purgeable_token_delete_last(queue
);
6669 assert(queue
->debug_count_objects
>=0);
6671 vm_page_unlock_queues();
6673 if (old_state
== VM_PURGABLE_VOLATILE
||
6674 old_state
== VM_PURGABLE_EMPTY
) {
6676 * Transfer the object's pages from the volatile to
6677 * non-volatile ledgers.
6679 vm_purgeable_accounting(object
, VM_PURGABLE_VOLATILE
,
6685 case VM_PURGABLE_VOLATILE
:
6686 if (object
->volatile_fault
) {
6690 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
6696 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
6697 if ((refmod
& VM_MEM_MODIFIED
) &&
6699 SET_PAGE_DIRTY(p
, FALSE
);
6704 assert(old_state
!= VM_PURGABLE_EMPTY
);
6706 purgeable_q_t queue
;
6708 /* find the correct queue */
6709 if ((*state
&VM_PURGABLE_ORDERING_MASK
) == VM_PURGABLE_ORDERING_OBSOLETE
)
6710 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
6712 if ((*state
&VM_PURGABLE_BEHAVIOR_MASK
) == VM_PURGABLE_BEHAVIOR_FIFO
)
6713 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
6715 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
6718 if (old_state
== VM_PURGABLE_NONVOLATILE
||
6719 old_state
== VM_PURGABLE_EMPTY
) {
6722 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
6723 VM_PURGABLE_NO_AGING
) {
6724 object
->purgeable_when_ripe
= FALSE
;
6726 object
->purgeable_when_ripe
= TRUE
;
6729 if (object
->purgeable_when_ripe
) {
6730 kern_return_t result
;
6732 /* try to add token... this can fail */
6733 vm_page_lock_queues();
6735 result
= vm_purgeable_token_add(queue
);
6736 if (result
!= KERN_SUCCESS
) {
6737 vm_page_unlock_queues();
6740 vm_page_unlock_queues();
6743 assert(object
->resident_page_count
>=
6744 object
->wired_page_count
);
6745 delta
= (object
->resident_page_count
-
6746 object
->wired_page_count
);
6750 &vm_page_purgeable_count
);
6752 if (object
->wired_page_count
!= 0) {
6753 OSAddAtomic(object
->wired_page_count
,
6754 &vm_page_purgeable_wired_count
);
6757 object
->purgable
= new_state
;
6759 /* object should be on "non-volatile" queue */
6760 assert(object
->objq
.next
!= NULL
);
6761 assert(object
->objq
.prev
!= NULL
);
6763 else if (old_state
== VM_PURGABLE_VOLATILE
) {
6764 purgeable_q_t old_queue
;
6765 boolean_t purgeable_when_ripe
;
6768 * if reassigning priorities / purgeable groups, we don't change the
6769 * token queue. So moving priorities will not make pages stay around longer.
6770 * Reasoning is that the algorithm gives most priority to the most important
6771 * object. If a new token is added, the most important object' priority is boosted.
6772 * This biases the system already for purgeable queues that move a lot.
6773 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
6775 assert(object
->objq
.next
!= NULL
&& object
->objq
.prev
!= NULL
); /* object should be on a queue */
6777 old_queue
= vm_purgeable_object_remove(object
);
6780 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
6781 VM_PURGABLE_NO_AGING
) {
6782 purgeable_when_ripe
= FALSE
;
6784 purgeable_when_ripe
= TRUE
;
6787 if (old_queue
!= queue
||
6788 (purgeable_when_ripe
!=
6789 object
->purgeable_when_ripe
)) {
6790 kern_return_t result
;
6792 /* Changing queue. Have to move token. */
6793 vm_page_lock_queues();
6794 if (object
->purgeable_when_ripe
) {
6795 vm_purgeable_token_delete_last(old_queue
);
6797 object
->purgeable_when_ripe
= purgeable_when_ripe
;
6798 if (object
->purgeable_when_ripe
) {
6799 result
= vm_purgeable_token_add(queue
);
6800 assert(result
==KERN_SUCCESS
); /* this should never fail since we just freed a token */
6802 vm_page_unlock_queues();
6806 vm_purgeable_object_add(object
, queue
, (*state
&VM_VOLATILE_GROUP_MASK
)>>VM_VOLATILE_GROUP_SHIFT
);
6807 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
6808 vm_purgeable_accounting(object
, VM_PURGABLE_NONVOLATILE
,
6812 assert(queue
->debug_count_objects
>=0);
6817 case VM_PURGABLE_EMPTY
:
6818 if (object
->volatile_fault
) {
6822 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
6828 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
6829 if ((refmod
& VM_MEM_MODIFIED
) &&
6831 SET_PAGE_DIRTY(p
, FALSE
);
6836 if (old_state
== VM_PURGABLE_VOLATILE
) {
6837 purgeable_q_t old_queue
;
6839 /* object should be on a queue */
6840 assert(object
->objq
.next
!= NULL
&&
6841 object
->objq
.prev
!= NULL
);
6843 old_queue
= vm_purgeable_object_remove(object
);
6845 if (object
->purgeable_when_ripe
) {
6846 vm_page_lock_queues();
6847 vm_purgeable_token_delete_first(old_queue
);
6848 vm_page_unlock_queues();
6852 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
6854 * This object's pages were previously accounted as
6855 * "non-volatile" and now need to be accounted as
6858 vm_purgeable_accounting(object
, VM_PURGABLE_NONVOLATILE
,
6861 * Set to VM_PURGABLE_EMPTY because the pages are no
6862 * longer accounted in the "non-volatile" ledger
6863 * and are also not accounted for in
6864 * "vm_page_purgeable_count".
6866 object
->purgable
= VM_PURGABLE_EMPTY
;
6869 (void) vm_object_purge(object
, 0);
6870 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
6877 vm_object_lock_assert_exclusive(object
);
6879 return KERN_SUCCESS
;
6883 vm_object_get_page_counts(
6885 vm_object_offset_t offset
,
6886 vm_object_size_t size
,
6887 unsigned int *resident_page_count
,
6888 unsigned int *dirty_page_count
)
6891 kern_return_t kr
= KERN_SUCCESS
;
6892 boolean_t count_dirty_pages
= FALSE
;
6893 vm_page_t p
= VM_PAGE_NULL
;
6894 unsigned int local_resident_count
= 0;
6895 unsigned int local_dirty_count
= 0;
6896 vm_object_offset_t cur_offset
= 0;
6897 vm_object_offset_t end_offset
= 0;
6899 if (object
== VM_OBJECT_NULL
)
6900 return KERN_INVALID_ARGUMENT
;
6903 cur_offset
= offset
;
6905 end_offset
= offset
+ size
;
6907 vm_object_lock_assert_exclusive(object
);
6909 if (dirty_page_count
!= NULL
) {
6911 count_dirty_pages
= TRUE
;
6914 if (resident_page_count
!= NULL
&& count_dirty_pages
== FALSE
) {
6917 * - we only want the resident page count, and,
6918 * - the entire object is exactly covered by the request.
6920 if (offset
== 0 && (object
->vo_size
== size
)) {
6922 *resident_page_count
= object
->resident_page_count
;
6927 if (object
->resident_page_count
<= (size
>> PAGE_SHIFT
)) {
6929 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
6931 if (p
->offset
>= cur_offset
&& p
->offset
< end_offset
) {
6933 local_resident_count
++;
6935 if (count_dirty_pages
) {
6937 if (p
->dirty
|| (p
->wpmapped
&& pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
)))) {
6939 local_dirty_count
++;
6946 for (cur_offset
= offset
; cur_offset
< end_offset
; cur_offset
+= PAGE_SIZE_64
) {
6948 p
= vm_page_lookup(object
, cur_offset
);
6950 if (p
!= VM_PAGE_NULL
) {
6952 local_resident_count
++;
6954 if (count_dirty_pages
) {
6956 if (p
->dirty
|| (p
->wpmapped
&& pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
)))) {
6958 local_dirty_count
++;
6966 if (resident_page_count
!= NULL
) {
6967 *resident_page_count
= local_resident_count
;
6970 if (dirty_page_count
!= NULL
) {
6971 *dirty_page_count
= local_dirty_count
;
6981 * vm_object_res_deallocate
6983 * (recursively) decrement residence counts on vm objects and their shadows.
6984 * Called from vm_object_deallocate and when swapping out an object.
6986 * The object is locked, and remains locked throughout the function,
6987 * even as we iterate down the shadow chain. Locks on intermediate objects
6988 * will be dropped, but not the original object.
6990 * NOTE: this function used to use recursion, rather than iteration.
6993 __private_extern__
void
6994 vm_object_res_deallocate(
6997 vm_object_t orig_object
= object
;
6999 * Object is locked so it can be called directly
7000 * from vm_object_deallocate. Original object is never
7003 assert(object
->res_count
> 0);
7004 while (--object
->res_count
== 0) {
7005 assert(object
->ref_count
>= object
->res_count
);
7006 vm_object_deactivate_all_pages(object
);
7007 /* iterate on shadow, if present */
7008 if (object
->shadow
!= VM_OBJECT_NULL
) {
7009 vm_object_t tmp_object
= object
->shadow
;
7010 vm_object_lock(tmp_object
);
7011 if (object
!= orig_object
)
7012 vm_object_unlock(object
);
7013 object
= tmp_object
;
7014 assert(object
->res_count
> 0);
7018 if (object
!= orig_object
)
7019 vm_object_unlock(object
);
7023 * vm_object_res_reference
7025 * Internal function to increment residence count on a vm object
7026 * and its shadows. It is called only from vm_object_reference, and
7027 * when swapping in a vm object, via vm_map_swap.
7029 * The object is locked, and remains locked throughout the function,
7030 * even as we iterate down the shadow chain. Locks on intermediate objects
7031 * will be dropped, but not the original object.
7033 * NOTE: this function used to use recursion, rather than iteration.
7036 __private_extern__
void
7037 vm_object_res_reference(
7040 vm_object_t orig_object
= object
;
7042 * Object is locked, so this can be called directly
7043 * from vm_object_reference. This lock is never released.
7045 while ((++object
->res_count
== 1) &&
7046 (object
->shadow
!= VM_OBJECT_NULL
)) {
7047 vm_object_t tmp_object
= object
->shadow
;
7049 assert(object
->ref_count
>= object
->res_count
);
7050 vm_object_lock(tmp_object
);
7051 if (object
!= orig_object
)
7052 vm_object_unlock(object
);
7053 object
= tmp_object
;
7055 if (object
!= orig_object
)
7056 vm_object_unlock(object
);
7057 assert(orig_object
->ref_count
>= orig_object
->res_count
);
7059 #endif /* TASK_SWAPPER */
7062 * vm_object_reference:
7064 * Gets another reference to the given object.
7066 #ifdef vm_object_reference
7067 #undef vm_object_reference
7069 __private_extern__
void
7070 vm_object_reference(
7073 if (object
== VM_OBJECT_NULL
)
7076 vm_object_lock(object
);
7077 assert(object
->ref_count
> 0);
7078 vm_object_reference_locked(object
);
7079 vm_object_unlock(object
);
7084 * Scale the vm_object_cache
7085 * This is required to make sure that the vm_object_cache is big
7086 * enough to effectively cache the mapped file.
7087 * This is really important with UBC as all the regular file vnodes
7088 * have memory object associated with them. Havving this cache too
7089 * small results in rapid reclaim of vnodes and hurts performance a LOT!
7091 * This is also needed as number of vnodes can be dynamically scaled.
7094 adjust_vm_object_cache(
7095 __unused vm_size_t oval
,
7096 __unused vm_size_t nval
)
7099 vm_object_cached_max
= nval
;
7100 vm_object_cache_trim(FALSE
);
7102 return (KERN_SUCCESS
);
7104 #endif /* MACH_BSD */
7108 * vm_object_transpose
7110 * This routine takes two VM objects of the same size and exchanges
7111 * their backing store.
7112 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
7113 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
7115 * The VM objects must not be locked by caller.
7117 unsigned int vm_object_transpose_count
= 0;
7119 vm_object_transpose(
7120 vm_object_t object1
,
7121 vm_object_t object2
,
7122 vm_object_size_t transpose_size
)
7124 vm_object_t tmp_object
;
7125 kern_return_t retval
;
7126 boolean_t object1_locked
, object2_locked
;
7128 vm_object_offset_t page_offset
;
7129 lck_mtx_t
*hash_lck
;
7130 vm_object_hash_entry_t hash_entry
;
7132 tmp_object
= VM_OBJECT_NULL
;
7133 object1_locked
= FALSE
; object2_locked
= FALSE
;
7135 if (object1
== object2
||
7136 object1
== VM_OBJECT_NULL
||
7137 object2
== VM_OBJECT_NULL
) {
7139 * If the 2 VM objects are the same, there's
7140 * no point in exchanging their backing store.
7142 retval
= KERN_INVALID_VALUE
;
7147 * Since we need to lock both objects at the same time,
7148 * make sure we always lock them in the same order to
7151 if (object1
> object2
) {
7152 tmp_object
= object1
;
7154 object2
= tmp_object
;
7158 * Allocate a temporary VM object to hold object1's contents
7159 * while we copy object2 to object1.
7161 tmp_object
= vm_object_allocate(transpose_size
);
7162 vm_object_lock(tmp_object
);
7163 tmp_object
->can_persist
= FALSE
;
7167 * Grab control of the 1st VM object.
7169 vm_object_lock(object1
);
7170 object1_locked
= TRUE
;
7171 if (!object1
->alive
|| object1
->terminating
||
7172 object1
->copy
|| object1
->shadow
|| object1
->shadowed
||
7173 object1
->purgable
!= VM_PURGABLE_DENY
) {
7175 * We don't deal with copy or shadow objects (yet).
7177 retval
= KERN_INVALID_VALUE
;
7181 * We're about to mess with the object's backing store and
7182 * taking a "paging_in_progress" reference wouldn't be enough
7183 * to prevent any paging activity on this object, so the caller should
7184 * have "quiesced" the objects beforehand, via a UPL operation with
7185 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
7186 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
7188 * Wait for any paging operation to complete (but only paging, not
7189 * other kind of activities not linked to the pager). After we're
7190 * statisfied that there's no more paging in progress, we keep the
7191 * object locked, to guarantee that no one tries to access its pager.
7193 vm_object_paging_only_wait(object1
, THREAD_UNINT
);
7196 * Same as above for the 2nd object...
7198 vm_object_lock(object2
);
7199 object2_locked
= TRUE
;
7200 if (! object2
->alive
|| object2
->terminating
||
7201 object2
->copy
|| object2
->shadow
|| object2
->shadowed
||
7202 object2
->purgable
!= VM_PURGABLE_DENY
) {
7203 retval
= KERN_INVALID_VALUE
;
7206 vm_object_paging_only_wait(object2
, THREAD_UNINT
);
7209 if (object1
->vo_size
!= object2
->vo_size
||
7210 object1
->vo_size
!= transpose_size
) {
7212 * If the 2 objects don't have the same size, we can't
7213 * exchange their backing stores or one would overflow.
7214 * If their size doesn't match the caller's
7215 * "transpose_size", we can't do it either because the
7216 * transpose operation will affect the entire span of
7219 retval
= KERN_INVALID_VALUE
;
7225 * Transpose the lists of resident pages.
7226 * This also updates the resident_page_count and the memq_hint.
7228 if (object1
->phys_contiguous
|| vm_page_queue_empty(&object1
->memq
)) {
7230 * No pages in object1, just transfer pages
7231 * from object2 to object1. No need to go through
7232 * an intermediate object.
7234 while (!vm_page_queue_empty(&object2
->memq
)) {
7235 page
= (vm_page_t
) vm_page_queue_first(&object2
->memq
);
7236 vm_page_rename(page
, object1
, page
->offset
, FALSE
);
7238 assert(vm_page_queue_empty(&object2
->memq
));
7239 } else if (object2
->phys_contiguous
|| vm_page_queue_empty(&object2
->memq
)) {
7241 * No pages in object2, just transfer pages
7242 * from object1 to object2. No need to go through
7243 * an intermediate object.
7245 while (!vm_page_queue_empty(&object1
->memq
)) {
7246 page
= (vm_page_t
) vm_page_queue_first(&object1
->memq
);
7247 vm_page_rename(page
, object2
, page
->offset
, FALSE
);
7249 assert(vm_page_queue_empty(&object1
->memq
));
7251 /* transfer object1's pages to tmp_object */
7252 while (!vm_page_queue_empty(&object1
->memq
)) {
7253 page
= (vm_page_t
) vm_page_queue_first(&object1
->memq
);
7254 page_offset
= page
->offset
;
7255 vm_page_remove(page
, TRUE
);
7256 page
->offset
= page_offset
;
7257 vm_page_queue_enter(&tmp_object
->memq
, page
, vm_page_t
, listq
);
7259 assert(vm_page_queue_empty(&object1
->memq
));
7260 /* transfer object2's pages to object1 */
7261 while (!vm_page_queue_empty(&object2
->memq
)) {
7262 page
= (vm_page_t
) vm_page_queue_first(&object2
->memq
);
7263 vm_page_rename(page
, object1
, page
->offset
, FALSE
);
7265 assert(vm_page_queue_empty(&object2
->memq
));
7266 /* transfer tmp_object's pages to object2 */
7267 while (!vm_page_queue_empty(&tmp_object
->memq
)) {
7268 page
= (vm_page_t
) vm_page_queue_first(&tmp_object
->memq
);
7269 vm_page_queue_remove(&tmp_object
->memq
, page
,
7271 vm_page_insert(page
, object2
, page
->offset
);
7273 assert(vm_page_queue_empty(&tmp_object
->memq
));
7276 #define __TRANSPOSE_FIELD(field) \
7278 tmp_object->field = object1->field; \
7279 object1->field = object2->field; \
7280 object2->field = tmp_object->field; \
7283 /* "Lock" refers to the object not its contents */
7284 /* "size" should be identical */
7285 assert(object1
->vo_size
== object2
->vo_size
);
7286 /* "memq_hint" was updated above when transposing pages */
7287 /* "ref_count" refers to the object not its contents */
7289 /* "res_count" refers to the object not its contents */
7291 /* "resident_page_count" was updated above when transposing pages */
7292 /* "wired_page_count" was updated above when transposing pages */
7293 /* "reusable_page_count" was updated above when transposing pages */
7294 /* there should be no "copy" */
7295 assert(!object1
->copy
);
7296 assert(!object2
->copy
);
7297 /* there should be no "shadow" */
7298 assert(!object1
->shadow
);
7299 assert(!object2
->shadow
);
7300 __TRANSPOSE_FIELD(vo_shadow_offset
); /* used by phys_contiguous objects */
7301 __TRANSPOSE_FIELD(pager
);
7302 __TRANSPOSE_FIELD(paging_offset
);
7303 __TRANSPOSE_FIELD(pager_control
);
7304 /* update the memory_objects' pointers back to the VM objects */
7305 if (object1
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
7306 memory_object_control_collapse(object1
->pager_control
,
7309 if (object2
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
7310 memory_object_control_collapse(object2
->pager_control
,
7313 __TRANSPOSE_FIELD(copy_strategy
);
7314 /* "paging_in_progress" refers to the object not its contents */
7315 assert(!object1
->paging_in_progress
);
7316 assert(!object2
->paging_in_progress
);
7317 assert(object1
->activity_in_progress
);
7318 assert(object2
->activity_in_progress
);
7319 /* "all_wanted" refers to the object not its contents */
7320 __TRANSPOSE_FIELD(pager_created
);
7321 __TRANSPOSE_FIELD(pager_initialized
);
7322 __TRANSPOSE_FIELD(pager_ready
);
7323 __TRANSPOSE_FIELD(pager_trusted
);
7324 __TRANSPOSE_FIELD(can_persist
);
7325 __TRANSPOSE_FIELD(internal
);
7326 __TRANSPOSE_FIELD(temporary
);
7327 __TRANSPOSE_FIELD(private);
7328 __TRANSPOSE_FIELD(pageout
);
7329 /* "alive" should be set */
7330 assert(object1
->alive
);
7331 assert(object2
->alive
);
7332 /* "purgeable" should be non-purgeable */
7333 assert(object1
->purgable
== VM_PURGABLE_DENY
);
7334 assert(object2
->purgable
== VM_PURGABLE_DENY
);
7335 /* "shadowed" refers to the the object not its contents */
7336 __TRANSPOSE_FIELD(purgeable_when_ripe
);
7337 __TRANSPOSE_FIELD(advisory_pageout
);
7338 __TRANSPOSE_FIELD(true_share
);
7339 /* "terminating" should not be set */
7340 assert(!object1
->terminating
);
7341 assert(!object2
->terminating
);
7342 __TRANSPOSE_FIELD(named
);
7343 /* "shadow_severed" refers to the object not its contents */
7344 __TRANSPOSE_FIELD(phys_contiguous
);
7345 __TRANSPOSE_FIELD(nophyscache
);
7346 /* "cached_list.next" points to transposed object */
7347 object1
->cached_list
.next
= (queue_entry_t
) object2
;
7348 object2
->cached_list
.next
= (queue_entry_t
) object1
;
7349 /* "cached_list.prev" should be NULL */
7350 assert(object1
->cached_list
.prev
== NULL
);
7351 assert(object2
->cached_list
.prev
== NULL
);
7352 /* "msr_q" is linked to the object not its contents */
7353 assert(queue_empty(&object1
->msr_q
));
7354 assert(queue_empty(&object2
->msr_q
));
7355 __TRANSPOSE_FIELD(last_alloc
);
7356 __TRANSPOSE_FIELD(sequential
);
7357 __TRANSPOSE_FIELD(pages_created
);
7358 __TRANSPOSE_FIELD(pages_used
);
7359 __TRANSPOSE_FIELD(scan_collisions
);
7360 __TRANSPOSE_FIELD(cow_hint
);
7362 __TRANSPOSE_FIELD(paging_object
);
7364 __TRANSPOSE_FIELD(wimg_bits
);
7365 __TRANSPOSE_FIELD(set_cache_attr
);
7366 __TRANSPOSE_FIELD(code_signed
);
7367 if (object1
->hashed
) {
7368 hash_lck
= vm_object_hash_lock_spin(object2
->pager
);
7369 hash_entry
= vm_object_hash_lookup(object2
->pager
, FALSE
);
7370 assert(hash_entry
!= VM_OBJECT_HASH_ENTRY_NULL
);
7371 hash_entry
->object
= object2
;
7372 vm_object_hash_unlock(hash_lck
);
7374 if (object2
->hashed
) {
7375 hash_lck
= vm_object_hash_lock_spin(object1
->pager
);
7376 hash_entry
= vm_object_hash_lookup(object1
->pager
, FALSE
);
7377 assert(hash_entry
!= VM_OBJECT_HASH_ENTRY_NULL
);
7378 hash_entry
->object
= object1
;
7379 vm_object_hash_unlock(hash_lck
);
7381 __TRANSPOSE_FIELD(hashed
);
7382 object1
->transposed
= TRUE
;
7383 object2
->transposed
= TRUE
;
7384 __TRANSPOSE_FIELD(mapping_in_progress
);
7385 __TRANSPOSE_FIELD(volatile_empty
);
7386 __TRANSPOSE_FIELD(volatile_fault
);
7387 __TRANSPOSE_FIELD(all_reusable
);
7388 assert(object1
->blocked_access
);
7389 assert(object2
->blocked_access
);
7390 assert(object1
->__object2_unused_bits
== 0);
7391 assert(object2
->__object2_unused_bits
== 0);
7393 /* "uplq" refers to the object not its contents (see upl_transpose()) */
7395 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.next
== NULL
));
7396 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.prev
== NULL
));
7397 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.next
== NULL
));
7398 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.prev
== NULL
));
7400 #undef __TRANSPOSE_FIELD
7402 retval
= KERN_SUCCESS
;
7408 if (tmp_object
!= VM_OBJECT_NULL
) {
7409 vm_object_unlock(tmp_object
);
7411 * Re-initialize the temporary object to avoid
7412 * deallocating a real pager.
7414 _vm_object_allocate(transpose_size
, tmp_object
);
7415 vm_object_deallocate(tmp_object
);
7416 tmp_object
= VM_OBJECT_NULL
;
7419 if (object1_locked
) {
7420 vm_object_unlock(object1
);
7421 object1_locked
= FALSE
;
7423 if (object2_locked
) {
7424 vm_object_unlock(object2
);
7425 object2_locked
= FALSE
;
7428 vm_object_transpose_count
++;
7435 * vm_object_cluster_size
7437 * Determine how big a cluster we should issue an I/O for...
7439 * Inputs: *start == offset of page needed
7440 * *length == maximum cluster pager can handle
7441 * Outputs: *start == beginning offset of cluster
7442 * *length == length of cluster to try
7444 * The original *start will be encompassed by the cluster
7447 extern int speculative_reads_disabled
;
7448 extern int ignore_is_ssd
;
7451 * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
7452 * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
7453 * always be page-aligned. The derivation could involve operations (e.g. division)
7454 * that could give us non-page-size aligned values if we start out with values that
7455 * are odd multiples of PAGE_SIZE.
7457 unsigned int preheat_max_bytes
= MAX_UPL_TRANSFER_BYTES
;
7458 unsigned int preheat_min_bytes
= (1024 * 32);
7461 __private_extern__
void
7462 vm_object_cluster_size(vm_object_t object
, vm_object_offset_t
*start
,
7463 vm_size_t
*length
, vm_object_fault_info_t fault_info
, uint32_t *io_streaming
)
7465 vm_size_t pre_heat_size
;
7466 vm_size_t tail_size
;
7467 vm_size_t head_size
;
7468 vm_size_t max_length
;
7469 vm_size_t cluster_size
;
7470 vm_object_offset_t object_size
;
7471 vm_object_offset_t orig_start
;
7472 vm_object_offset_t target_start
;
7473 vm_object_offset_t offset
;
7474 vm_behavior_t behavior
;
7475 boolean_t look_behind
= TRUE
;
7476 boolean_t look_ahead
= TRUE
;
7477 boolean_t isSSD
= FALSE
;
7478 uint32_t throttle_limit
;
7480 int sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
7481 vm_size_t max_ph_size
;
7482 vm_size_t min_ph_size
;
7484 assert( !(*length
& PAGE_MASK
));
7485 assert( !(*start
& PAGE_MASK_64
));
7488 * remember maxiumum length of run requested
7490 max_length
= *length
;
7492 * we'll always return a cluster size of at least
7493 * 1 page, since the original fault must always
7496 *length
= PAGE_SIZE
;
7499 if (speculative_reads_disabled
|| fault_info
== NULL
) {
7501 * no cluster... just fault the page in
7505 orig_start
= *start
;
7506 target_start
= orig_start
;
7507 cluster_size
= round_page(fault_info
->cluster_size
);
7508 behavior
= fault_info
->behavior
;
7510 vm_object_lock(object
);
7512 if (object
->pager
== MEMORY_OBJECT_NULL
)
7513 goto out
; /* pager is gone for this object, nothing more to do */
7516 vnode_pager_get_isSSD(object
->pager
, &isSSD
);
7518 min_ph_size
= round_page(preheat_min_bytes
);
7519 max_ph_size
= round_page(preheat_max_bytes
);
7525 if (min_ph_size
& PAGE_MASK_64
) {
7526 min_ph_size
= trunc_page(min_ph_size
);
7529 if (max_ph_size
& PAGE_MASK_64
) {
7530 max_ph_size
= trunc_page(max_ph_size
);
7534 if (min_ph_size
< PAGE_SIZE
)
7535 min_ph_size
= PAGE_SIZE
;
7537 if (max_ph_size
< PAGE_SIZE
)
7538 max_ph_size
= PAGE_SIZE
;
7539 else if (max_ph_size
> MAX_UPL_TRANSFER_BYTES
)
7540 max_ph_size
= MAX_UPL_TRANSFER_BYTES
;
7542 if (max_length
> max_ph_size
)
7543 max_length
= max_ph_size
;
7545 if (max_length
<= PAGE_SIZE
)
7548 if (object
->internal
)
7549 object_size
= object
->vo_size
;
7551 vnode_pager_get_object_size(object
->pager
, &object_size
);
7553 object_size
= round_page_64(object_size
);
7555 if (orig_start
>= object_size
) {
7557 * fault occurred beyond the EOF...
7558 * we need to punt w/o changing the
7563 if (object
->pages_used
> object
->pages_created
) {
7565 * must have wrapped our 32 bit counters
7568 object
->pages_used
= object
->pages_created
= 0;
7570 if ((sequential_run
= object
->sequential
)) {
7571 if (sequential_run
< 0) {
7572 sequential_behavior
= VM_BEHAVIOR_RSEQNTL
;
7573 sequential_run
= 0 - sequential_run
;
7575 sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
7582 behavior
= VM_BEHAVIOR_DEFAULT
;
7584 case VM_BEHAVIOR_DEFAULT
:
7585 if (object
->internal
&& fault_info
->user_tag
== VM_MEMORY_STACK
)
7588 if (sequential_run
>= (3 * PAGE_SIZE
)) {
7589 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
7591 if (sequential_behavior
== VM_BEHAVIOR_SEQUENTIAL
)
7592 look_behind
= FALSE
;
7599 if (object
->pages_created
< (20 * (min_ph_size
>> PAGE_SHIFT
))) {
7603 pre_heat_size
= min_ph_size
;
7606 * Linear growth in PH size: The maximum size is max_length...
7607 * this cacluation will result in a size that is neither a
7608 * power of 2 nor a multiple of PAGE_SIZE... so round
7609 * it up to the nearest PAGE_SIZE boundary
7611 pre_heat_size
= (max_length
* (uint64_t)object
->pages_used
) / object
->pages_created
;
7613 if (pre_heat_size
< min_ph_size
)
7614 pre_heat_size
= min_ph_size
;
7616 pre_heat_size
= round_page(pre_heat_size
);
7621 case VM_BEHAVIOR_RANDOM
:
7622 if ((pre_heat_size
= cluster_size
) <= PAGE_SIZE
)
7626 case VM_BEHAVIOR_SEQUENTIAL
:
7627 if ((pre_heat_size
= cluster_size
) == 0)
7628 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
7629 look_behind
= FALSE
;
7634 case VM_BEHAVIOR_RSEQNTL
:
7635 if ((pre_heat_size
= cluster_size
) == 0)
7636 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
7643 throttle_limit
= (uint32_t) max_length
;
7644 assert(throttle_limit
== max_length
);
7646 if (vnode_pager_get_throttle_io_limit(object
->pager
, &throttle_limit
) == KERN_SUCCESS
) {
7647 if (max_length
> throttle_limit
)
7648 max_length
= throttle_limit
;
7650 if (pre_heat_size
> max_length
)
7651 pre_heat_size
= max_length
;
7653 if (behavior
== VM_BEHAVIOR_DEFAULT
&& (pre_heat_size
> min_ph_size
)) {
7655 unsigned int consider_free
= vm_page_free_count
+ vm_page_cleaned_count
;
7657 if (consider_free
< vm_page_throttle_limit
) {
7658 pre_heat_size
= trunc_page(pre_heat_size
/ 16);
7659 } else if (consider_free
< vm_page_free_target
) {
7660 pre_heat_size
= trunc_page(pre_heat_size
/ 4);
7663 if (pre_heat_size
< min_ph_size
)
7664 pre_heat_size
= min_ph_size
;
7666 if (look_ahead
== TRUE
) {
7667 if (look_behind
== TRUE
) {
7669 * if we get here its due to a random access...
7670 * so we want to center the original fault address
7671 * within the cluster we will issue... make sure
7672 * to calculate 'head_size' as a multiple of PAGE_SIZE...
7673 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
7674 * necessarily an even number of pages so we need to truncate
7675 * the result to a PAGE_SIZE boundary
7677 head_size
= trunc_page(pre_heat_size
/ 2);
7679 if (target_start
> head_size
)
7680 target_start
-= head_size
;
7685 * 'target_start' at this point represents the beginning offset
7686 * of the cluster we are considering... 'orig_start' will be in
7687 * the center of this cluster if we didn't have to clip the start
7688 * due to running into the start of the file
7691 if ((target_start
+ pre_heat_size
) > object_size
)
7692 pre_heat_size
= (vm_size_t
)(round_page_64(object_size
- target_start
));
7694 * at this point caclulate the number of pages beyond the original fault
7695 * address that we want to consider... this is guaranteed not to extend beyond
7696 * the current EOF...
7698 assert((vm_size_t
)(orig_start
- target_start
) == (orig_start
- target_start
));
7699 tail_size
= pre_heat_size
- (vm_size_t
)(orig_start
- target_start
) - PAGE_SIZE
;
7701 if (pre_heat_size
> target_start
) {
7703 * since pre_heat_size is always smaller then 2^32,
7704 * if it is larger then target_start (a 64 bit value)
7705 * it is safe to clip target_start to 32 bits
7707 pre_heat_size
= (vm_size_t
) target_start
;
7711 assert( !(target_start
& PAGE_MASK_64
));
7712 assert( !(pre_heat_size
& PAGE_MASK_64
));
7714 if (pre_heat_size
<= PAGE_SIZE
)
7717 if (look_behind
== TRUE
) {
7719 * take a look at the pages before the original
7720 * faulting offset... recalculate this in case
7721 * we had to clip 'pre_heat_size' above to keep
7722 * from running past the EOF.
7724 head_size
= pre_heat_size
- tail_size
- PAGE_SIZE
;
7726 for (offset
= orig_start
- PAGE_SIZE_64
; head_size
; offset
-= PAGE_SIZE_64
, head_size
-= PAGE_SIZE
) {
7728 * don't poke below the lowest offset
7730 if (offset
< fault_info
->lo_offset
)
7733 * for external objects or internal objects w/o a pager,
7734 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
7736 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
7739 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
7741 * don't bridge resident pages
7746 *length
+= PAGE_SIZE
;
7749 if (look_ahead
== TRUE
) {
7750 for (offset
= orig_start
+ PAGE_SIZE_64
; tail_size
; offset
+= PAGE_SIZE_64
, tail_size
-= PAGE_SIZE
) {
7752 * don't poke above the highest offset
7754 if (offset
>= fault_info
->hi_offset
)
7756 assert(offset
< object_size
);
7759 * for external objects or internal objects w/o a pager,
7760 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
7762 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
7765 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
7767 * don't bridge resident pages
7771 *length
+= PAGE_SIZE
;
7775 if (*length
> max_length
)
7776 *length
= max_length
;
7778 vm_object_unlock(object
);
7780 DTRACE_VM1(clustersize
, vm_size_t
, *length
);
7785 * Allow manipulation of individual page state. This is actually part of
7786 * the UPL regimen but takes place on the VM object rather than on a UPL
7792 vm_object_offset_t offset
,
7794 ppnum_t
*phys_entry
,
7799 vm_object_lock(object
);
7801 if(ops
& UPL_POP_PHYSICAL
) {
7802 if(object
->phys_contiguous
) {
7804 *phys_entry
= (ppnum_t
)
7805 (object
->vo_shadow_offset
>> PAGE_SHIFT
);
7807 vm_object_unlock(object
);
7808 return KERN_SUCCESS
;
7810 vm_object_unlock(object
);
7811 return KERN_INVALID_OBJECT
;
7814 if(object
->phys_contiguous
) {
7815 vm_object_unlock(object
);
7816 return KERN_INVALID_OBJECT
;
7820 if((dst_page
= vm_page_lookup(object
,offset
)) == VM_PAGE_NULL
) {
7821 vm_object_unlock(object
);
7822 return KERN_FAILURE
;
7825 /* Sync up on getting the busy bit */
7826 if((dst_page
->busy
|| dst_page
->cleaning
) &&
7827 (((ops
& UPL_POP_SET
) &&
7828 (ops
& UPL_POP_BUSY
)) || (ops
& UPL_POP_DUMP
))) {
7829 /* someone else is playing with the page, we will */
7831 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
7835 if (ops
& UPL_POP_DUMP
) {
7836 if (dst_page
->pmapped
== TRUE
)
7837 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
7839 VM_PAGE_FREE(dst_page
);
7846 /* Get the condition of flags before requested ops */
7847 /* are undertaken */
7849 if(dst_page
->dirty
) *flags
|= UPL_POP_DIRTY
;
7850 if(dst_page
->free_when_done
) *flags
|= UPL_POP_PAGEOUT
;
7851 if(dst_page
->precious
) *flags
|= UPL_POP_PRECIOUS
;
7852 if(dst_page
->absent
) *flags
|= UPL_POP_ABSENT
;
7853 if(dst_page
->busy
) *flags
|= UPL_POP_BUSY
;
7856 /* The caller should have made a call either contingent with */
7857 /* or prior to this call to set UPL_POP_BUSY */
7858 if(ops
& UPL_POP_SET
) {
7859 /* The protection granted with this assert will */
7860 /* not be complete. If the caller violates the */
7861 /* convention and attempts to change page state */
7862 /* without first setting busy we may not see it */
7863 /* because the page may already be busy. However */
7864 /* if such violations occur we will assert sooner */
7866 assert(dst_page
->busy
|| (ops
& UPL_POP_BUSY
));
7867 if (ops
& UPL_POP_DIRTY
) {
7868 SET_PAGE_DIRTY(dst_page
, FALSE
);
7870 if (ops
& UPL_POP_PAGEOUT
) dst_page
->free_when_done
= TRUE
;
7871 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= TRUE
;
7872 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= TRUE
;
7873 if (ops
& UPL_POP_BUSY
) dst_page
->busy
= TRUE
;
7876 if(ops
& UPL_POP_CLR
) {
7877 assert(dst_page
->busy
);
7878 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= FALSE
;
7879 if (ops
& UPL_POP_PAGEOUT
) dst_page
->free_when_done
= FALSE
;
7880 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= FALSE
;
7881 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= FALSE
;
7882 if (ops
& UPL_POP_BUSY
) {
7883 dst_page
->busy
= FALSE
;
7884 PAGE_WAKEUP(dst_page
);
7888 if (dst_page
->encrypted
) {
7891 * We need to decrypt this encrypted page before the
7892 * caller can access its contents.
7893 * But if the caller really wants to access the page's
7894 * contents, they have to keep the page "busy".
7895 * Otherwise, the page could get recycled or re-encrypted
7898 if ((ops
& UPL_POP_SET
) && (ops
& UPL_POP_BUSY
) &&
7901 * The page is stable enough to be accessed by
7902 * the caller, so make sure its contents are
7905 vm_page_decrypt(dst_page
, 0);
7908 * The page is not busy, so don't bother
7909 * decrypting it, since anything could
7910 * happen to it between now and when the
7911 * caller wants to access it.
7912 * We should not give the caller access
7915 assert(!phys_entry
);
7921 * The physical page number will remain valid
7922 * only if the page is kept busy.
7923 * ENCRYPTED SWAP: make sure we don't let the
7924 * caller access an encrypted page.
7926 assert(dst_page
->busy
);
7927 assert(!dst_page
->encrypted
);
7928 *phys_entry
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
7934 vm_object_unlock(object
);
7935 return KERN_SUCCESS
;
7940 * vm_object_range_op offers performance enhancement over
7941 * vm_object_page_op for page_op functions which do not require page
7942 * level state to be returned from the call. Page_op was created to provide
7943 * a low-cost alternative to page manipulation via UPLs when only a single
7944 * page was involved. The range_op call establishes the ability in the _op
7945 * family of functions to work on multiple pages where the lack of page level
7946 * state handling allows the caller to avoid the overhead of the upl structures.
7952 vm_object_offset_t offset_beg
,
7953 vm_object_offset_t offset_end
,
7957 vm_object_offset_t offset
;
7960 if (offset_end
- offset_beg
> (uint32_t) -1) {
7961 /* range is too big and would overflow "*range" */
7962 return KERN_INVALID_ARGUMENT
;
7964 if (object
->resident_page_count
== 0) {
7966 if (ops
& UPL_ROP_PRESENT
) {
7969 *range
= (uint32_t) (offset_end
- offset_beg
);
7970 assert(*range
== (offset_end
- offset_beg
));
7973 return KERN_SUCCESS
;
7975 vm_object_lock(object
);
7977 if (object
->phys_contiguous
) {
7978 vm_object_unlock(object
);
7979 return KERN_INVALID_OBJECT
;
7982 offset
= offset_beg
& ~PAGE_MASK_64
;
7984 while (offset
< offset_end
) {
7985 dst_page
= vm_page_lookup(object
, offset
);
7986 if (dst_page
!= VM_PAGE_NULL
) {
7987 if (ops
& UPL_ROP_DUMP
) {
7988 if (dst_page
->busy
|| dst_page
->cleaning
) {
7990 * someone else is playing with the
7991 * page, we will have to wait
7993 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
7995 * need to relook the page up since it's
7996 * state may have changed while we slept
7997 * it might even belong to a different object
8002 if (dst_page
->laundry
)
8003 vm_pageout_steal_laundry(dst_page
, FALSE
);
8005 if (dst_page
->pmapped
== TRUE
)
8006 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
8008 VM_PAGE_FREE(dst_page
);
8010 } else if ((ops
& UPL_ROP_ABSENT
)
8011 && (!dst_page
->absent
|| dst_page
->busy
)) {
8014 } else if (ops
& UPL_ROP_PRESENT
)
8017 offset
+= PAGE_SIZE
;
8019 vm_object_unlock(object
);
8022 if (offset
> offset_end
)
8023 offset
= offset_end
;
8024 if(offset
> offset_beg
) {
8025 *range
= (uint32_t) (offset
- offset_beg
);
8026 assert(*range
== (offset
- offset_beg
));
8031 return KERN_SUCCESS
;
8035 * Used to point a pager directly to a range of memory (when the pager may be associated
8036 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
8037 * expect that the virtual address will denote the start of a range that is physically contiguous.
8039 kern_return_t
pager_map_to_phys_contiguous(
8040 memory_object_control_t object
,
8041 memory_object_offset_t offset
,
8042 addr64_t base_vaddr
,
8046 boolean_t clobbered_private
;
8047 kern_return_t retval
;
8048 vm_object_t pager_object
;
8050 page_num
= pmap_find_phys(kernel_pmap
, base_vaddr
);
8053 retval
= KERN_FAILURE
;
8057 pager_object
= memory_object_control_to_vm_object(object
);
8059 if (!pager_object
) {
8060 retval
= KERN_FAILURE
;
8064 clobbered_private
= pager_object
->private;
8065 if (pager_object
->private != TRUE
) {
8066 vm_object_lock(pager_object
);
8067 pager_object
->private = TRUE
;
8068 vm_object_unlock(pager_object
);
8070 retval
= vm_object_populate_with_private(pager_object
, offset
, page_num
, size
);
8072 if (retval
!= KERN_SUCCESS
) {
8073 if (pager_object
->private != clobbered_private
) {
8074 vm_object_lock(pager_object
);
8075 pager_object
->private = clobbered_private
;
8076 vm_object_unlock(pager_object
);
8084 uint32_t scan_object_collision
= 0;
8087 vm_object_lock(vm_object_t object
)
8089 if (object
== vm_pageout_scan_wants_object
) {
8090 scan_object_collision
++;
8093 lck_rw_lock_exclusive(&object
->Lock
);
8094 #if DEVELOPMENT || DEBUG
8095 object
->Lock_owner
= current_thread();
8100 vm_object_lock_avoid(vm_object_t object
)
8102 if (object
== vm_pageout_scan_wants_object
) {
8103 scan_object_collision
++;
8110 _vm_object_lock_try(vm_object_t object
)
8114 retval
= lck_rw_try_lock_exclusive(&object
->Lock
);
8115 #if DEVELOPMENT || DEBUG
8117 object
->Lock_owner
= current_thread();
8123 vm_object_lock_try(vm_object_t object
)
8126 * Called from hibernate path so check before blocking.
8128 if (vm_object_lock_avoid(object
) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
8131 return _vm_object_lock_try(object
);
8135 vm_object_lock_shared(vm_object_t object
)
8137 if (vm_object_lock_avoid(object
)) {
8140 lck_rw_lock_shared(&object
->Lock
);
8144 vm_object_lock_try_shared(vm_object_t object
)
8146 if (vm_object_lock_avoid(object
)) {
8149 return (lck_rw_try_lock_shared(&object
->Lock
));
8153 vm_object_lock_upgrade(vm_object_t object
)
8156 retval
= lck_rw_lock_shared_to_exclusive(&object
->Lock
);
8157 #if DEVELOPMENT || DEBUG
8159 object
->Lock_owner
= current_thread();
8165 vm_object_unlock(vm_object_t object
)
8167 #if DEVELOPMENT || DEBUG
8168 if (object
->Lock_owner
) {
8169 if (object
->Lock_owner
!= current_thread())
8170 panic("vm_object_unlock: not owner - %p\n", object
);
8171 object
->Lock_owner
= 0;
8174 lck_rw_done(&object
->Lock
);
8178 unsigned int vm_object_change_wimg_mode_count
= 0;
8181 * The object must be locked
8184 vm_object_change_wimg_mode(vm_object_t object
, unsigned int wimg_mode
)
8188 vm_object_lock_assert_exclusive(object
);
8190 vm_object_paging_wait(object
, THREAD_UNINT
);
8192 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
8195 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p
), wimg_mode
);
8197 if (wimg_mode
== VM_WIMG_USE_DEFAULT
)
8198 object
->set_cache_attr
= FALSE
;
8200 object
->set_cache_attr
= TRUE
;
8202 object
->wimg_bits
= wimg_mode
;
8204 vm_object_change_wimg_mode_count
++;
8210 * This routine does the "relocation" of previously
8211 * compressed pages belonging to this object that are
8212 * residing in a number of compressed segments into
8213 * a set of compressed segments dedicated to hold
8214 * compressed pages belonging to this object.
8217 extern void *freezer_chead
;
8218 extern char *freezer_compressor_scratch_buf
;
8219 extern int c_freezer_compression_count
;
8220 extern AbsoluteTime c_freezer_last_yield_ts
;
8222 #define MAX_FREE_BATCH 32
8223 #define FREEZER_DUTY_CYCLE_ON_MS 5
8224 #define FREEZER_DUTY_CYCLE_OFF_MS 5
8226 static int c_freezer_should_yield(void);
8230 c_freezer_should_yield()
8232 AbsoluteTime cur_time
;
8235 assert(c_freezer_last_yield_ts
);
8236 clock_get_uptime(&cur_time
);
8238 SUB_ABSOLUTETIME(&cur_time
, &c_freezer_last_yield_ts
);
8239 absolutetime_to_nanoseconds(cur_time
, &nsecs
);
8241 if (nsecs
> 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS
)
8248 vm_object_compressed_freezer_done()
8250 vm_compressor_finished_filling(&freezer_chead
);
8255 vm_object_compressed_freezer_pageout(
8259 vm_page_t local_freeq
= NULL
;
8260 int local_freed
= 0;
8261 kern_return_t retval
= KERN_SUCCESS
;
8262 int obj_resident_page_count_snapshot
= 0;
8264 assert(object
!= VM_OBJECT_NULL
);
8265 assert(object
->internal
);
8267 vm_object_lock(object
);
8269 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
8271 if (!object
->pager_initialized
) {
8273 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
8275 if (!object
->pager_initialized
)
8276 vm_object_compressor_pager_create(object
);
8279 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
8280 vm_object_unlock(object
);
8285 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
8286 vm_object_offset_t curr_offset
= 0;
8289 * Go through the object and make sure that any
8290 * previously compressed pages are relocated into
8291 * a compressed segment associated with our "freezer_chead".
8293 while (curr_offset
< object
->vo_size
) {
8295 curr_offset
= vm_compressor_pager_next_compressed(object
->pager
, curr_offset
);
8297 if (curr_offset
== (vm_object_offset_t
) -1)
8300 retval
= vm_compressor_pager_relocate(object
->pager
, curr_offset
, &freezer_chead
);
8302 if (retval
!= KERN_SUCCESS
)
8305 curr_offset
+= PAGE_SIZE_64
;
8310 * We can't hold the object lock while heading down into the compressed pager
8311 * layer because we might need the kernel map lock down there to allocate new
8312 * compressor data structures. And if this same object is mapped in the kernel
8313 * and there's a fault on it, then that thread will want the object lock while
8314 * holding the kernel map lock.
8316 * Since we are going to drop/grab the object lock repeatedly, we must make sure
8317 * we won't be stuck in an infinite loop if the same page(s) keep getting
8318 * decompressed. So we grab a snapshot of the number of pages in the object and
8319 * we won't process any more than that number of pages.
8322 obj_resident_page_count_snapshot
= object
->resident_page_count
;
8324 vm_object_activity_begin(object
);
8326 while ((obj_resident_page_count_snapshot
--) && !vm_page_queue_empty(&object
->memq
)) {
8328 p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
8330 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START
, object
, local_freed
, 0, 0, 0);
8332 vm_page_lockspin_queues();
8334 if (p
->cleaning
|| p
->fictitious
|| p
->busy
|| p
->absent
|| p
->unusual
|| p
->error
|| VM_PAGE_WIRED(p
)) {
8336 vm_page_unlock_queues();
8338 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 1, 0, 0);
8340 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
8341 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
8346 if (p
->pmapped
== TRUE
) {
8347 int refmod_state
, pmap_flags
;
8349 if (p
->dirty
|| p
->precious
) {
8350 pmap_flags
= PMAP_OPTIONS_COMPRESSOR
;
8352 pmap_flags
= PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
8355 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
), pmap_flags
, NULL
);
8356 if (refmod_state
& VM_MEM_MODIFIED
) {
8357 SET_PAGE_DIRTY(p
, FALSE
);
8361 if (p
->dirty
== FALSE
&& p
->precious
== FALSE
) {
8363 * Clean and non-precious page.
8365 vm_page_unlock_queues();
8368 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 2, 0, 0);
8373 vm_pageout_steal_laundry(p
, TRUE
);
8375 vm_page_queues_remove(p
, TRUE
);
8377 vm_page_unlock_queues();
8381 * In case the compressor fails to compress this page, we need it at
8382 * the back of the object memq so that we don't keep trying to process it.
8383 * Make the move here while we have the object lock held.
8386 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
8387 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
8390 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
8392 * Mark the page busy so no one messes with it while we have the object lock dropped.
8397 vm_object_activity_begin(object
);
8399 vm_object_unlock(object
);
8402 * arg3 == FALSE tells vm_pageout_compress_page that we don't hold the object lock and the pager may not be initialized.
8404 if (vm_pageout_compress_page(&freezer_chead
, freezer_compressor_scratch_buf
, p
, FALSE
) == KERN_SUCCESS
) {
8406 * page has already been un-tabled from the object via 'vm_page_remove'
8408 p
->snext
= local_freeq
;
8412 if (local_freed
>= MAX_FREE_BATCH
) {
8414 vm_page_free_list(local_freeq
, TRUE
);
8419 c_freezer_compression_count
++;
8421 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 0, 0, 0);
8423 if (local_freed
== 0 && c_freezer_should_yield()) {
8425 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
8426 clock_get_uptime(&c_freezer_last_yield_ts
);
8429 vm_object_lock(object
);
8433 vm_page_free_list(local_freeq
, TRUE
);
8439 vm_object_activity_end(object
);
8441 vm_object_unlock(object
);
8443 if (c_freezer_should_yield()) {
8445 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
8446 clock_get_uptime(&c_freezer_last_yield_ts
);
8450 #endif /* CONFIG_FREEZE */
8458 struct vm_pageout_queue
*iq
;
8459 boolean_t need_unlock
= TRUE
;
8461 if (!VM_CONFIG_COMPRESSOR_IS_PRESENT
)
8464 iq
= &vm_pageout_queue_internal
;
8466 assert(object
!= VM_OBJECT_NULL
);
8468 vm_object_lock(object
);
8470 if (!object
->internal
||
8471 object
->terminating
||
8473 vm_object_unlock(object
);
8477 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
8479 if (!object
->pager_initialized
) {
8481 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
8483 if (!object
->pager_initialized
)
8484 vm_object_compressor_pager_create(object
);
8487 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
8488 vm_object_unlock(object
);
8494 next
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
8496 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next
)) {
8498 next
= (vm_page_t
)vm_page_queue_next(&next
->listq
);
8500 assert(p
->vm_page_q_state
!= VM_PAGE_ON_FREE_Q
);
8502 if ((p
->vm_page_q_state
== VM_PAGE_ON_THROTTLED_Q
) ||
8503 p
->encrypted_cleaning
||
8512 * Page is already being cleaned or can't be cleaned.
8517 /* Throw to the pageout queue */
8519 vm_page_lockspin_queues();
8522 if (vm_compressor_low_on_space()) {
8523 vm_page_unlock_queues();
8527 if (VM_PAGE_Q_THROTTLED(iq
)) {
8529 iq
->pgo_draining
= TRUE
;
8531 assert_wait((event_t
) (&iq
->pgo_laundry
+ 1),
8532 THREAD_INTERRUPTIBLE
);
8533 vm_page_unlock_queues();
8534 vm_object_unlock(object
);
8536 thread_block(THREAD_CONTINUE_NULL
);
8538 vm_object_lock(object
);
8542 assert(!p
->fictitious
);
8545 assert(!p
->unusual
);
8547 assert(!VM_PAGE_WIRED(p
));
8548 assert(!p
->cleaning
);
8550 if (p
->pmapped
== TRUE
) {
8555 * Tell pmap the page should be accounted
8556 * for as "compressed" if it's been modified.
8559 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
8560 if (p
->dirty
|| p
->precious
) {
8562 * We already know it's been modified,
8563 * so tell pmap to account for it
8566 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
8568 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
),
8571 if (refmod_state
& VM_MEM_MODIFIED
) {
8572 SET_PAGE_DIRTY(p
, FALSE
);
8576 if (!p
->dirty
&& !p
->precious
) {
8577 vm_page_unlock_queues();
8582 vm_page_queues_remove(p
, TRUE
);
8584 if (vm_pageout_cluster(p
, FALSE
, TRUE
))
8585 need_unlock
= FALSE
;
8587 if (need_unlock
== TRUE
)
8588 vm_page_unlock_queues();
8591 vm_object_unlock(object
);
8597 vm_page_request_reprioritize(vm_object_t o
, uint64_t blkno
, uint32_t len
, int prio
)
8599 io_reprioritize_req_t req
;
8600 struct vnode
*devvp
= NULL
;
8602 if(vnode_pager_get_object_devvp(o
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
)
8606 * Create the request for I/O reprioritization.
8607 * We use the noblock variant of zalloc because we're holding the object
8608 * lock here and we could cause a deadlock in low memory conditions.
8610 req
= (io_reprioritize_req_t
)zalloc_noblock(io_reprioritize_req_zone
);
8615 req
->priority
= prio
;
8618 /* Insert request into the reprioritization list */
8619 IO_REPRIORITIZE_LIST_LOCK();
8620 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
8621 IO_REPRIORITIZE_LIST_UNLOCK();
8623 /* Wakeup reprioritize thread */
8624 IO_REPRIO_THREAD_WAKEUP();
8630 vm_decmp_upl_reprioritize(upl_t upl
, int prio
)
8634 io_reprioritize_req_t req
;
8635 struct vnode
*devvp
= NULL
;
8639 uint64_t *io_upl_reprio_info
;
8642 if ((upl
->flags
& UPL_TRACKED_BY_OBJECT
) == 0 || (upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0)
8646 * We dont want to perform any allocations with the upl lock held since that might
8647 * result in a deadlock. If the system is low on memory, the pageout thread would
8648 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
8649 * be freed up by the pageout thread, it would be a deadlock.
8653 /* First step is just to get the size of the upl to find out how big the reprio info is */
8654 if(!upl_try_lock(upl
))
8657 if (upl
->decmp_io_upl
== NULL
) {
8658 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8663 io_upl
= upl
->decmp_io_upl
;
8664 assert((io_upl
->flags
& UPL_DECMP_REAL_IO
) != 0);
8665 io_upl_size
= io_upl
->size
;
8668 /* Now perform the allocation */
8669 io_upl_reprio_info
= (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
8670 if (io_upl_reprio_info
== NULL
)
8673 /* Now again take the lock, recheck the state and grab out the required info */
8674 if(!upl_try_lock(upl
))
8677 if (upl
->decmp_io_upl
== NULL
|| upl
->decmp_io_upl
!= io_upl
) {
8678 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8682 memcpy(io_upl_reprio_info
, io_upl
->upl_reprio_info
, sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
8684 /* Get the VM object for this UPL */
8685 if (io_upl
->flags
& UPL_SHADOWED
) {
8686 object
= io_upl
->map_object
->shadow
;
8688 object
= io_upl
->map_object
;
8691 /* Get the dev vnode ptr for this object */
8692 if(!object
|| !object
->pager
||
8693 vnode_pager_get_object_devvp(object
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
) {
8700 /* Now we have all the information needed to do the expedite */
8703 while (offset
< io_upl_size
) {
8704 blkno
= io_upl_reprio_info
[(offset
/ PAGE_SIZE
)] & UPL_REPRIO_INFO_MASK
;
8705 len
= (io_upl_reprio_info
[(offset
/ PAGE_SIZE
)] >> UPL_REPRIO_INFO_SHIFT
) & UPL_REPRIO_INFO_MASK
;
8708 * This implementation may cause some spurious expedites due to the
8709 * fact that we dont cleanup the blkno & len from the upl_reprio_info
8710 * even after the I/O is complete.
8713 if (blkno
!= 0 && len
!= 0) {
8714 /* Create the request for I/O reprioritization */
8715 req
= (io_reprioritize_req_t
)zalloc(io_reprioritize_req_zone
);
8716 assert(req
!= NULL
);
8719 req
->priority
= prio
;
8722 /* Insert request into the reprioritization list */
8723 IO_REPRIORITIZE_LIST_LOCK();
8724 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
8725 IO_REPRIORITIZE_LIST_UNLOCK();
8729 offset
+= PAGE_SIZE
;
8733 /* Wakeup reprioritize thread */
8734 IO_REPRIO_THREAD_WAKEUP();
8737 kfree(io_upl_reprio_info
, sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
8742 vm_page_handle_prio_inversion(vm_object_t o
, vm_page_t m
)
8745 upl_page_info_t
*pl
;
8746 unsigned int i
, num_pages
;
8749 cur_tier
= proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
);
8752 Scan through all UPLs associated with the object to find the
8753 UPL containing the contended page.
8755 queue_iterate(&o
->uplq
, upl
, upl_t
, uplq
) {
8756 if (((upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) || upl
->upl_priority
<= cur_tier
)
8758 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
8759 num_pages
= (upl
->size
/ PAGE_SIZE
);
8762 For each page in the UPL page list, see if it matches the contended
8763 page and was issued as a low prio I/O.
8765 for(i
=0; i
< num_pages
; i
++) {
8766 if(UPL_PAGE_PRESENT(pl
,i
) && VM_PAGE_GET_PHYS_PAGE(m
) == pl
[i
].phys_addr
) {
8767 if ((upl
->flags
& UPL_DECMP_REQ
) && upl
->decmp_io_upl
) {
8768 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, VM_KERNEL_UNSLIDE_OR_PERM(upl
->upl_creator
), VM_KERNEL_UNSLIDE_OR_PERM(m
),
8769 VM_KERNEL_UNSLIDE_OR_PERM(upl
), upl
->upl_priority
, 0);
8770 vm_decmp_upl_reprioritize(upl
, cur_tier
);
8773 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, VM_KERNEL_UNSLIDE_OR_PERM(upl
->upl_creator
), VM_KERNEL_UNSLIDE_OR_PERM(m
),
8774 upl
->upl_reprio_info
[i
], upl
->upl_priority
, 0);
8775 if (UPL_REPRIO_INFO_BLKNO(upl
, i
) != 0 && UPL_REPRIO_INFO_LEN(upl
, i
) != 0)
8776 vm_page_request_reprioritize(o
, UPL_REPRIO_INFO_BLKNO(upl
, i
), UPL_REPRIO_INFO_LEN(upl
, i
), cur_tier
);
8780 /* Check if we found any hits */
8789 vm_page_sleep(vm_object_t o
, vm_page_t m
, int interruptible
)
8793 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_START
, o
, m
, 0, 0, 0);
8795 if (o
->io_tracking
&& ((m
->busy
== TRUE
) || (m
->cleaning
== TRUE
) || VM_PAGE_WIRED(m
))) {
8797 Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
8799 vm_page_handle_prio_inversion(o
,m
);
8802 ret
= thread_sleep_vm_object(o
, m
, interruptible
);
8803 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_END
, o
, m
, 0, 0, 0);
8808 io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
)
8810 io_reprioritize_req_t req
= NULL
;
8814 IO_REPRIORITIZE_LIST_LOCK();
8815 if (queue_empty(&io_reprioritize_list
)) {
8816 IO_REPRIORITIZE_LIST_UNLOCK();
8820 queue_remove_first(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
8821 IO_REPRIORITIZE_LIST_UNLOCK();
8823 vnode_pager_issue_reprioritize_io(req
->devvp
, req
->blkno
, req
->len
, req
->priority
);
8824 zfree(io_reprioritize_req_zone
, req
);
8827 IO_REPRIO_THREAD_CONTINUATION();