2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Virtual memory object module.
66 #include <mach_pagemap.h>
67 #include <task_swapper.h>
69 #include <mach/mach_types.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/memory_object_control_server.h>
73 #include <mach/vm_param.h>
77 #include <ipc/ipc_types.h>
78 #include <ipc/ipc_port.h>
80 #include <kern/kern_types.h>
81 #include <kern/assert.h>
82 #include <kern/queue.h>
84 #include <kern/kalloc.h>
85 #include <kern/zalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/processor.h>
89 #include <kern/misc_protos.h>
91 #include <vm/memory_object.h>
92 #include <vm/vm_compressor_pager.h>
93 #include <vm/vm_fault.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_protos.h>
99 #include <vm/vm_purgeable_internal.h>
101 #include <vm/vm_compressor.h>
103 #if CONFIG_PHANTOM_CACHE
104 #include <vm/vm_phantom_cache.h>
107 boolean_t vm_object_collapse_compressor_allowed
= TRUE
;
109 struct vm_counters vm_counters
;
111 #if VM_OBJECT_TRACKING
112 boolean_t vm_object_tracking_inited
= FALSE
;
113 decl_simple_lock_data(static,vm_object_tracking_lock_data
);
114 btlog_t
*vm_object_tracking_btlog
;
116 vm_object_tracking_lock(void *context
)
118 simple_lock((simple_lock_t
)context
);
121 vm_object_tracking_unlock(void *context
)
123 simple_unlock((simple_lock_t
)context
);
126 vm_object_tracking_init(void)
128 int vm_object_tracking
;
130 vm_object_tracking
= 1;
131 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking
,
132 sizeof (vm_object_tracking
));
134 if (vm_object_tracking
) {
135 simple_lock_init(&vm_object_tracking_lock_data
, 0);
136 vm_object_tracking_btlog
= btlog_create(
138 VM_OBJECT_TRACKING_BTDEPTH
,
139 vm_object_tracking_lock
,
140 vm_object_tracking_unlock
,
141 &vm_object_tracking_lock_data
);
142 assert(vm_object_tracking_btlog
);
143 vm_object_tracking_inited
= TRUE
;
146 #endif /* VM_OBJECT_TRACKING */
149 * Virtual memory objects maintain the actual data
150 * associated with allocated virtual memory. A given
151 * page of memory exists within exactly one object.
153 * An object is only deallocated when all "references"
156 * Associated with each object is a list of all resident
157 * memory pages belonging to that object; this list is
158 * maintained by the "vm_page" module, but locked by the object's
161 * Each object also records the memory object reference
162 * that is used by the kernel to request and write
163 * back data (the memory object, field "pager"), etc...
165 * Virtual memory objects are allocated to provide
166 * zero-filled memory (vm_allocate) or map a user-defined
167 * memory object into a virtual address space (vm_map).
169 * Virtual memory objects that refer to a user-defined
170 * memory object are called "permanent", because all changes
171 * made in virtual memory are reflected back to the
172 * memory manager, which may then store it permanently.
173 * Other virtual memory objects are called "temporary",
174 * meaning that changes need be written back only when
175 * necessary to reclaim pages, and that storage associated
176 * with the object can be discarded once it is no longer
179 * A permanent memory object may be mapped into more
180 * than one virtual address space. Moreover, two threads
181 * may attempt to make the first mapping of a memory
182 * object concurrently. Only one thread is allowed to
183 * complete this mapping; all others wait for the
184 * "pager_initialized" field is asserted, indicating
185 * that the first thread has initialized all of the
186 * necessary fields in the virtual memory object structure.
188 * The kernel relies on a *default memory manager* to
189 * provide backing storage for the zero-filled virtual
190 * memory objects. The pager memory objects associated
191 * with these temporary virtual memory objects are only
192 * requested from the default memory manager when it
193 * becomes necessary. Virtual memory objects
194 * that depend on the default memory manager are called
195 * "internal". The "pager_created" field is provided to
196 * indicate whether these ports have ever been allocated.
198 * The kernel may also create virtual memory objects to
199 * hold changed pages after a copy-on-write operation.
200 * In this case, the virtual memory object (and its
201 * backing storage -- its memory object) only contain
202 * those pages that have been changed. The "shadow"
203 * field refers to the virtual memory object that contains
204 * the remainder of the contents. The "shadow_offset"
205 * field indicates where in the "shadow" these contents begin.
206 * The "copy" field refers to a virtual memory object
207 * to which changed pages must be copied before changing
208 * this object, in order to implement another form
209 * of copy-on-write optimization.
211 * The virtual memory object structure also records
212 * the attributes associated with its memory object.
213 * The "pager_ready", "can_persist" and "copy_strategy"
214 * fields represent those attributes. The "cached_list"
215 * field is used in the implementation of the persistence
218 * ZZZ Continue this comment.
221 /* Forward declarations for internal functions. */
222 static kern_return_t
vm_object_terminate(
225 extern void vm_object_remove(
228 static kern_return_t
vm_object_copy_call(
229 vm_object_t src_object
,
230 vm_object_offset_t src_offset
,
231 vm_object_size_t size
,
232 vm_object_t
*_result_object
);
234 static void vm_object_do_collapse(
236 vm_object_t backing_object
);
238 static void vm_object_do_bypass(
240 vm_object_t backing_object
);
242 static void vm_object_release_pager(
243 memory_object_t pager
,
246 static zone_t vm_object_zone
; /* vm backing store zone */
249 * All wired-down kernel memory belongs to a single virtual
250 * memory object (kernel_object) to avoid wasting data structures.
252 static struct vm_object kernel_object_store
;
253 vm_object_t kernel_object
;
255 static struct vm_object compressor_object_store
;
256 vm_object_t compressor_object
= &compressor_object_store
;
259 * The submap object is used as a placeholder for vm_map_submap
260 * operations. The object is declared in vm_map.c because it
261 * is exported by the vm_map module. The storage is declared
262 * here because it must be initialized here.
264 static struct vm_object vm_submap_object_store
;
267 * Virtual memory objects are initialized from
268 * a template (see vm_object_allocate).
270 * When adding a new field to the virtual memory
271 * object structure, be sure to add initialization
272 * (see _vm_object_allocate()).
274 static struct vm_object vm_object_template
;
276 unsigned int vm_page_purged_wired
= 0;
277 unsigned int vm_page_purged_busy
= 0;
278 unsigned int vm_page_purged_others
= 0;
282 * Virtual memory objects that are not referenced by
283 * any address maps, but that are allowed to persist
284 * (an attribute specified by the associated memory manager),
285 * are kept in a queue (vm_object_cached_list).
287 * When an object from this queue is referenced again,
288 * for example to make another address space mapping,
289 * it must be removed from the queue. That is, the
290 * queue contains *only* objects with zero references.
292 * The kernel may choose to terminate objects from this
293 * queue in order to reclaim storage. The current policy
294 * is to permit a fixed maximum number of unreferenced
295 * objects (vm_object_cached_max).
297 * A spin lock (accessed by routines
298 * vm_object_cache_{lock,lock_try,unlock}) governs the
299 * object cache. It must be held when objects are
300 * added to or removed from the cache (in vm_object_terminate).
301 * The routines that acquire a reference to a virtual
302 * memory object based on one of the memory object ports
303 * must also lock the cache.
305 * Ideally, the object cache should be more isolated
306 * from the reference mechanism, so that the lock need
307 * not be held to make simple references.
309 static vm_object_t
vm_object_cache_trim(
310 boolean_t called_from_vm_object_deallocate
);
312 static void vm_object_deactivate_all_pages(
315 static int vm_object_cached_high
; /* highest # cached objects */
316 static int vm_object_cached_max
= 512; /* may be patched*/
318 #define vm_object_cache_lock() \
319 lck_mtx_lock(&vm_object_cached_lock_data)
320 #define vm_object_cache_lock_try() \
321 lck_mtx_try_lock(&vm_object_cached_lock_data)
323 #endif /* VM_OBJECT_CACHE */
325 static queue_head_t vm_object_cached_list
;
326 static uint32_t vm_object_cache_pages_freed
= 0;
327 static uint32_t vm_object_cache_pages_moved
= 0;
328 static uint32_t vm_object_cache_pages_skipped
= 0;
329 static uint32_t vm_object_cache_adds
= 0;
330 static uint32_t vm_object_cached_count
= 0;
331 static lck_mtx_t vm_object_cached_lock_data
;
332 static lck_mtx_ext_t vm_object_cached_lock_data_ext
;
334 static uint32_t vm_object_page_grab_failed
= 0;
335 static uint32_t vm_object_page_grab_skipped
= 0;
336 static uint32_t vm_object_page_grab_returned
= 0;
337 static uint32_t vm_object_page_grab_pmapped
= 0;
338 static uint32_t vm_object_page_grab_reactivations
= 0;
340 #define vm_object_cache_lock_spin() \
341 lck_mtx_lock_spin(&vm_object_cached_lock_data)
342 #define vm_object_cache_unlock() \
343 lck_mtx_unlock(&vm_object_cached_lock_data)
345 static void vm_object_cache_remove_locked(vm_object_t
);
348 #define VM_OBJECT_HASH_COUNT 1024
349 #define VM_OBJECT_HASH_LOCK_COUNT 512
351 static lck_mtx_t vm_object_hashed_lock_data
[VM_OBJECT_HASH_LOCK_COUNT
];
352 static lck_mtx_ext_t vm_object_hashed_lock_data_ext
[VM_OBJECT_HASH_LOCK_COUNT
];
354 static queue_head_t vm_object_hashtable
[VM_OBJECT_HASH_COUNT
];
355 static struct zone
*vm_object_hash_zone
;
357 struct vm_object_hash_entry
{
358 queue_chain_t hash_link
; /* hash chain link */
359 memory_object_t pager
; /* pager we represent */
360 vm_object_t object
; /* corresponding object */
361 boolean_t waiting
; /* someone waiting for
365 typedef struct vm_object_hash_entry
*vm_object_hash_entry_t
;
366 #define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
368 #define VM_OBJECT_HASH_SHIFT 5
369 #define vm_object_hash(pager) \
370 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT))
372 #define vm_object_lock_hash(pager) \
373 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_LOCK_COUNT))
375 void vm_object_hash_entry_free(
376 vm_object_hash_entry_t entry
);
378 static void vm_object_reap(vm_object_t object
);
379 static void vm_object_reap_async(vm_object_t object
);
380 static void vm_object_reaper_thread(void);
382 static lck_mtx_t vm_object_reaper_lock_data
;
383 static lck_mtx_ext_t vm_object_reaper_lock_data_ext
;
385 static queue_head_t vm_object_reaper_queue
; /* protected by vm_object_reaper_lock() */
386 unsigned int vm_object_reap_count
= 0;
387 unsigned int vm_object_reap_count_async
= 0;
389 #define vm_object_reaper_lock() \
390 lck_mtx_lock(&vm_object_reaper_lock_data)
391 #define vm_object_reaper_lock_spin() \
392 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
393 #define vm_object_reaper_unlock() \
394 lck_mtx_unlock(&vm_object_reaper_lock_data)
397 /* I/O Re-prioritization request list */
398 queue_head_t io_reprioritize_list
;
399 lck_spin_t io_reprioritize_list_lock
;
401 #define IO_REPRIORITIZE_LIST_LOCK() \
402 lck_spin_lock(&io_reprioritize_list_lock)
403 #define IO_REPRIORITIZE_LIST_UNLOCK() \
404 lck_spin_unlock(&io_reprioritize_list_lock)
406 #define MAX_IO_REPRIORITIZE_REQS 8192
407 zone_t io_reprioritize_req_zone
;
409 /* I/O Re-prioritization thread */
410 int io_reprioritize_wakeup
= 0;
411 static void io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
);
413 #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup)
414 #define IO_REPRIO_THREAD_CONTINUATION() \
416 assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \
417 thread_block(io_reprioritize_thread); \
420 void vm_page_request_reprioritize(vm_object_t
, uint64_t, uint32_t, int);
421 void vm_page_handle_prio_inversion(vm_object_t
, vm_page_t
);
422 void vm_decmp_upl_reprioritize(upl_t
, int);
427 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
432 vm_object_hash_lock_spin(
433 memory_object_t pager
)
437 index
= vm_object_lock_hash(pager
);
439 lck_mtx_lock_spin(&vm_object_hashed_lock_data
[index
]);
441 return (&vm_object_hashed_lock_data
[index
]);
445 vm_object_hash_unlock(lck_mtx_t
*lck
)
452 * vm_object_hash_lookup looks up a pager in the hashtable
453 * and returns the corresponding entry, with optional removal.
455 static vm_object_hash_entry_t
456 vm_object_hash_lookup(
457 memory_object_t pager
,
458 boolean_t remove_entry
)
461 vm_object_hash_entry_t entry
;
463 bucket
= &vm_object_hashtable
[vm_object_hash(pager
)];
465 entry
= (vm_object_hash_entry_t
)queue_first(bucket
);
466 while (!queue_end(bucket
, (queue_entry_t
)entry
)) {
467 if (entry
->pager
== pager
) {
469 queue_remove(bucket
, entry
,
470 vm_object_hash_entry_t
, hash_link
);
474 entry
= (vm_object_hash_entry_t
)queue_next(&entry
->hash_link
);
476 return(VM_OBJECT_HASH_ENTRY_NULL
);
480 * vm_object_hash_enter enters the specified
481 * pager / cache object association in the hashtable.
485 vm_object_hash_insert(
486 vm_object_hash_entry_t entry
,
491 assert(vm_object_hash_lookup(entry
->pager
, FALSE
) == NULL
);
493 bucket
= &vm_object_hashtable
[vm_object_hash(entry
->pager
)];
495 queue_enter(bucket
, entry
, vm_object_hash_entry_t
, hash_link
);
497 if (object
->hashed
) {
499 * "hashed" was pre-set on this (new) object to avoid
500 * locking issues in vm_object_enter() (can't attempt to
501 * grab the object lock while holding the hash lock as
502 * a spinlock), so no need to set it here (and no need to
503 * hold the object's lock).
506 vm_object_lock_assert_exclusive(object
);
507 object
->hashed
= TRUE
;
510 entry
->object
= object
;
513 static vm_object_hash_entry_t
514 vm_object_hash_entry_alloc(
515 memory_object_t pager
)
517 vm_object_hash_entry_t entry
;
519 entry
= (vm_object_hash_entry_t
)zalloc(vm_object_hash_zone
);
520 entry
->pager
= pager
;
521 entry
->object
= VM_OBJECT_NULL
;
522 entry
->waiting
= FALSE
;
528 vm_object_hash_entry_free(
529 vm_object_hash_entry_t entry
)
531 zfree(vm_object_hash_zone
, entry
);
535 * vm_object_allocate:
537 * Returns a new object with the given size.
540 __private_extern__
void
542 vm_object_size_t size
,
546 "vm_object_allocate, object 0x%X size 0x%X\n",
547 object
, size
, 0,0,0);
549 *object
= vm_object_template
;
550 queue_init(&object
->memq
);
551 queue_init(&object
->msr_q
);
552 #if UPL_DEBUG || CONFIG_IOSCHED
553 queue_init(&object
->uplq
);
555 vm_object_lock_init(object
);
556 object
->vo_size
= size
;
558 #if VM_OBJECT_TRACKING_OP_CREATED
559 if (vm_object_tracking_inited
) {
560 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
563 numsaved
= OSBacktrace(bt
, VM_OBJECT_TRACKING_BTDEPTH
);
564 btlog_add_entry(vm_object_tracking_btlog
,
566 VM_OBJECT_TRACKING_OP_CREATED
,
570 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
573 __private_extern__ vm_object_t
575 vm_object_size_t size
)
577 register vm_object_t object
;
579 object
= (vm_object_t
) zalloc(vm_object_zone
);
581 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
583 if (object
!= VM_OBJECT_NULL
)
584 _vm_object_allocate(size
, object
);
590 lck_grp_t vm_object_lck_grp
;
591 lck_grp_t vm_object_cache_lck_grp
;
592 lck_grp_attr_t vm_object_lck_grp_attr
;
593 lck_attr_t vm_object_lck_attr
;
594 lck_attr_t kernel_object_lck_attr
;
595 lck_attr_t compressor_object_lck_attr
;
598 * vm_object_bootstrap:
600 * Initialize the VM objects module.
602 __private_extern__
void
603 vm_object_bootstrap(void)
607 vm_object_zone
= zinit((vm_size_t
) sizeof(struct vm_object
),
608 round_page(512*1024),
611 zone_change(vm_object_zone
, Z_CALLERACCT
, FALSE
); /* don't charge caller */
612 zone_change(vm_object_zone
, Z_NOENCRYPT
, TRUE
);
614 vm_object_init_lck_grp();
616 queue_init(&vm_object_cached_list
);
618 lck_mtx_init_ext(&vm_object_cached_lock_data
,
619 &vm_object_cached_lock_data_ext
,
620 &vm_object_cache_lck_grp
,
621 &vm_object_lck_attr
);
623 queue_init(&vm_object_reaper_queue
);
625 for (i
= 0; i
< VM_OBJECT_HASH_LOCK_COUNT
; i
++) {
626 lck_mtx_init_ext(&vm_object_hashed_lock_data
[i
],
627 &vm_object_hashed_lock_data_ext
[i
],
629 &vm_object_lck_attr
);
631 lck_mtx_init_ext(&vm_object_reaper_lock_data
,
632 &vm_object_reaper_lock_data_ext
,
634 &vm_object_lck_attr
);
636 vm_object_hash_zone
=
637 zinit((vm_size_t
) sizeof (struct vm_object_hash_entry
),
638 round_page(512*1024),
640 "vm object hash entries");
641 zone_change(vm_object_hash_zone
, Z_CALLERACCT
, FALSE
);
642 zone_change(vm_object_hash_zone
, Z_NOENCRYPT
, TRUE
);
644 for (i
= 0; i
< VM_OBJECT_HASH_COUNT
; i
++)
645 queue_init(&vm_object_hashtable
[i
]);
649 * Fill in a template object, for quick initialization
652 /* memq; Lock; init after allocation */
653 vm_object_template
.memq
.prev
= NULL
;
654 vm_object_template
.memq
.next
= NULL
;
657 * We can't call vm_object_lock_init() here because that will
658 * allocate some memory and VM is not fully initialized yet.
659 * The lock will be initialized for each allocated object in
660 * _vm_object_allocate(), so we don't need to initialize it in
661 * the vm_object_template.
663 vm_object_lock_init(&vm_object_template
);
665 vm_object_template
.vo_size
= 0;
666 vm_object_template
.memq_hint
= VM_PAGE_NULL
;
667 vm_object_template
.ref_count
= 1;
669 vm_object_template
.res_count
= 1;
670 #endif /* TASK_SWAPPER */
671 vm_object_template
.resident_page_count
= 0;
672 vm_object_template
.wired_page_count
= 0;
673 vm_object_template
.reusable_page_count
= 0;
674 vm_object_template
.copy
= VM_OBJECT_NULL
;
675 vm_object_template
.shadow
= VM_OBJECT_NULL
;
676 vm_object_template
.vo_shadow_offset
= (vm_object_offset_t
) 0;
677 vm_object_template
.pager
= MEMORY_OBJECT_NULL
;
678 vm_object_template
.paging_offset
= 0;
679 vm_object_template
.pager_control
= MEMORY_OBJECT_CONTROL_NULL
;
680 vm_object_template
.copy_strategy
= MEMORY_OBJECT_COPY_SYMMETRIC
;
681 vm_object_template
.paging_in_progress
= 0;
683 vm_object_template
.__object1_unused_bits
= 0;
684 #endif /* __LP64__ */
685 vm_object_template
.activity_in_progress
= 0;
687 /* Begin bitfields */
688 vm_object_template
.all_wanted
= 0; /* all bits FALSE */
689 vm_object_template
.pager_created
= FALSE
;
690 vm_object_template
.pager_initialized
= FALSE
;
691 vm_object_template
.pager_ready
= FALSE
;
692 vm_object_template
.pager_trusted
= FALSE
;
693 vm_object_template
.can_persist
= FALSE
;
694 vm_object_template
.internal
= TRUE
;
695 vm_object_template
.temporary
= TRUE
;
696 vm_object_template
.private = FALSE
;
697 vm_object_template
.pageout
= FALSE
;
698 vm_object_template
.alive
= TRUE
;
699 vm_object_template
.purgable
= VM_PURGABLE_DENY
;
700 vm_object_template
.purgeable_when_ripe
= FALSE
;
701 vm_object_template
.shadowed
= FALSE
;
702 vm_object_template
.advisory_pageout
= FALSE
;
703 vm_object_template
.true_share
= FALSE
;
704 vm_object_template
.terminating
= FALSE
;
705 vm_object_template
.named
= FALSE
;
706 vm_object_template
.shadow_severed
= FALSE
;
707 vm_object_template
.phys_contiguous
= FALSE
;
708 vm_object_template
.nophyscache
= FALSE
;
711 vm_object_template
.cached_list
.prev
= NULL
;
712 vm_object_template
.cached_list
.next
= NULL
;
713 vm_object_template
.msr_q
.prev
= NULL
;
714 vm_object_template
.msr_q
.next
= NULL
;
716 vm_object_template
.last_alloc
= (vm_object_offset_t
) 0;
717 vm_object_template
.sequential
= (vm_object_offset_t
) 0;
718 vm_object_template
.pages_created
= 0;
719 vm_object_template
.pages_used
= 0;
720 vm_object_template
.scan_collisions
= 0;
721 #if CONFIG_PHANTOM_CACHE
722 vm_object_template
.phantom_object_id
= 0;
725 vm_object_template
.existence_map
= VM_EXTERNAL_NULL
;
726 #endif /* MACH_PAGEMAP */
727 vm_object_template
.cow_hint
= ~(vm_offset_t
)0;
729 vm_object_template
.paging_object
= VM_OBJECT_NULL
;
730 #endif /* MACH_ASSERT */
732 /* cache bitfields */
733 vm_object_template
.wimg_bits
= VM_WIMG_USE_DEFAULT
;
734 vm_object_template
.set_cache_attr
= FALSE
;
735 vm_object_template
.object_slid
= FALSE
;
736 vm_object_template
.code_signed
= FALSE
;
737 vm_object_template
.hashed
= FALSE
;
738 vm_object_template
.transposed
= FALSE
;
739 vm_object_template
.mapping_in_progress
= FALSE
;
740 vm_object_template
.phantom_isssd
= FALSE
;
741 vm_object_template
.volatile_empty
= FALSE
;
742 vm_object_template
.volatile_fault
= FALSE
;
743 vm_object_template
.all_reusable
= FALSE
;
744 vm_object_template
.blocked_access
= FALSE
;
745 vm_object_template
.__object2_unused_bits
= 0;
746 #if CONFIG_IOSCHED || UPL_DEBUG
747 vm_object_template
.uplq
.prev
= NULL
;
748 vm_object_template
.uplq
.next
= NULL
;
749 #endif /* UPL_DEBUG */
751 bzero(&vm_object_template
.pip_holders
,
752 sizeof (vm_object_template
.pip_holders
));
753 #endif /* VM_PIP_DEBUG */
755 vm_object_template
.objq
.next
= NULL
;
756 vm_object_template
.objq
.prev
= NULL
;
758 vm_object_template
.purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
759 vm_object_template
.purgeable_queue_group
= 0;
761 vm_object_template
.vo_cache_ts
= 0;
763 vm_object_template
.wire_tag
= VM_KERN_MEMORY_NONE
;
766 bzero(&vm_object_template
.purgeable_owner_bt
[0],
767 sizeof (vm_object_template
.purgeable_owner_bt
));
768 vm_object_template
.vo_purgeable_volatilizer
= NULL
;
769 bzero(&vm_object_template
.purgeable_volatilizer_bt
[0],
770 sizeof (vm_object_template
.purgeable_volatilizer_bt
));
774 * Initialize the "kernel object"
777 kernel_object
= &kernel_object_store
;
780 * Note that in the following size specifications, we need to add 1 because
781 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
784 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
787 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
789 kernel_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
790 compressor_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
793 * Initialize the "submap object". Make it as large as the
794 * kernel object so that no limit is imposed on submap sizes.
797 vm_submap_object
= &vm_submap_object_store
;
798 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
800 vm_submap_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
803 * Create an "extra" reference to this object so that we never
804 * try to deallocate it; zfree doesn't like to be called with
807 vm_object_reference(vm_submap_object
);
810 vm_external_module_initialize();
811 #endif /* MACH_PAGEMAP */
816 vm_io_reprioritize_init(void)
818 kern_return_t result
;
819 thread_t thread
= THREAD_NULL
;
821 /* Initialze the I/O reprioritization subsystem */
822 lck_spin_init(&io_reprioritize_list_lock
, &vm_object_lck_grp
, &vm_object_lck_attr
);
823 queue_init(&io_reprioritize_list
);
825 io_reprioritize_req_zone
= zinit(sizeof(struct io_reprioritize_req
),
826 MAX_IO_REPRIORITIZE_REQS
* sizeof(struct io_reprioritize_req
),
827 4096, "io_reprioritize_req");
829 result
= kernel_thread_start_priority(io_reprioritize_thread
, NULL
, 95 /* MAXPRI_KERNEL */, &thread
);
830 if (result
== KERN_SUCCESS
) {
831 thread_deallocate(thread
);
833 panic("Could not create io_reprioritize_thread");
839 vm_object_reaper_init(void)
844 kr
= kernel_thread_start_priority(
845 (thread_continue_t
) vm_object_reaper_thread
,
849 if (kr
!= KERN_SUCCESS
) {
850 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr
);
852 thread_deallocate(thread
);
855 __private_extern__
void
859 * Finish initializing the kernel object.
864 __private_extern__
void
865 vm_object_init_lck_grp(void)
868 * initialze the vm_object lock world
870 lck_grp_attr_setdefault(&vm_object_lck_grp_attr
);
871 lck_grp_init(&vm_object_lck_grp
, "vm_object", &vm_object_lck_grp_attr
);
872 lck_grp_init(&vm_object_cache_lck_grp
, "vm_object_cache", &vm_object_lck_grp_attr
);
873 lck_attr_setdefault(&vm_object_lck_attr
);
874 lck_attr_setdefault(&kernel_object_lck_attr
);
875 lck_attr_cleardebug(&kernel_object_lck_attr
);
876 lck_attr_setdefault(&compressor_object_lck_attr
);
877 lck_attr_cleardebug(&compressor_object_lck_attr
);
881 #define MIGHT_NOT_CACHE_SHADOWS 1
882 #if MIGHT_NOT_CACHE_SHADOWS
883 static int cache_shadows
= TRUE
;
884 #endif /* MIGHT_NOT_CACHE_SHADOWS */
888 * vm_object_deallocate:
890 * Release a reference to the specified object,
891 * gained either through a vm_object_allocate
892 * or a vm_object_reference call. When all references
893 * are gone, storage associated with this object
894 * may be relinquished.
896 * No object may be locked.
898 unsigned long vm_object_deallocate_shared_successes
= 0;
899 unsigned long vm_object_deallocate_shared_failures
= 0;
900 unsigned long vm_object_deallocate_shared_swap_failures
= 0;
902 __private_extern__
void
903 vm_object_deallocate(
904 register vm_object_t object
)
907 boolean_t retry_cache_trim
= FALSE
;
908 uint32_t try_failed_count
= 0;
910 vm_object_t shadow
= VM_OBJECT_NULL
;
912 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
913 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
915 if (object
== VM_OBJECT_NULL
)
918 if (object
== kernel_object
|| object
== compressor_object
) {
919 vm_object_lock_shared(object
);
921 OSAddAtomic(-1, &object
->ref_count
);
923 if (object
->ref_count
== 0) {
924 if (object
== kernel_object
)
925 panic("vm_object_deallocate: losing kernel_object\n");
927 panic("vm_object_deallocate: losing compressor_object\n");
929 vm_object_unlock(object
);
933 if (object
->ref_count
== 2 &&
936 * This "named" object's reference count is about to
938 * we'll need to call memory_object_last_unmap().
940 } else if (object
->ref_count
== 2 &&
942 object
->shadow
!= VM_OBJECT_NULL
) {
944 * This internal object's reference count is about to
945 * drop from 2 to 1 and it has a shadow object:
946 * we'll want to try and collapse this object with its
949 } else if (object
->ref_count
>= 2) {
950 UInt32 original_ref_count
;
951 volatile UInt32
*ref_count_p
;
955 * The object currently looks like it is not being
956 * kept alive solely by the reference we're about to release.
957 * Let's try and release our reference without taking
958 * all the locks we would need if we had to terminate the
959 * object (cache lock + exclusive object lock).
960 * Lock the object "shared" to make sure we don't race with
961 * anyone holding it "exclusive".
963 vm_object_lock_shared(object
);
964 ref_count_p
= (volatile UInt32
*) &object
->ref_count
;
965 original_ref_count
= object
->ref_count
;
967 * Test again as "ref_count" could have changed.
968 * "named" shouldn't change.
970 if (original_ref_count
== 2 &&
972 /* need to take slow path for m_o_last_unmap() */
974 } else if (original_ref_count
== 2 &&
976 object
->shadow
!= VM_OBJECT_NULL
) {
977 /* need to take slow path for vm_object_collapse() */
979 } else if (original_ref_count
< 2) {
980 /* need to take slow path for vm_object_terminate() */
983 /* try an atomic update with the shared lock */
984 atomic_swap
= OSCompareAndSwap(
986 original_ref_count
- 1,
987 (UInt32
*) &object
->ref_count
);
988 if (atomic_swap
== FALSE
) {
989 vm_object_deallocate_shared_swap_failures
++;
990 /* fall back to the slow path... */
994 vm_object_unlock(object
);
998 * ref_count was updated atomically !
1000 vm_object_deallocate_shared_successes
++;
1005 * Someone else updated the ref_count at the same
1006 * time and we lost the race. Fall back to the usual
1007 * slow but safe path...
1009 vm_object_deallocate_shared_failures
++;
1012 while (object
!= VM_OBJECT_NULL
) {
1014 vm_object_lock(object
);
1016 assert(object
->ref_count
> 0);
1019 * If the object has a named reference, and only
1020 * that reference would remain, inform the pager
1021 * about the last "mapping" reference going away.
1023 if ((object
->ref_count
== 2) && (object
->named
)) {
1024 memory_object_t pager
= object
->pager
;
1026 /* Notify the Pager that there are no */
1027 /* more mappers for this object */
1029 if (pager
!= MEMORY_OBJECT_NULL
) {
1030 vm_object_mapping_wait(object
, THREAD_UNINT
);
1031 vm_object_mapping_begin(object
);
1032 vm_object_unlock(object
);
1034 memory_object_last_unmap(pager
);
1036 vm_object_lock(object
);
1037 vm_object_mapping_end(object
);
1039 assert(object
->ref_count
> 0);
1043 * Lose the reference. If other references
1044 * remain, then we are done, unless we need
1045 * to retry a cache trim.
1046 * If it is the last reference, then keep it
1047 * until any pending initialization is completed.
1050 /* if the object is terminating, it cannot go into */
1051 /* the cache and we obviously should not call */
1052 /* terminate again. */
1054 if ((object
->ref_count
> 1) || object
->terminating
) {
1055 vm_object_lock_assert_exclusive(object
);
1056 object
->ref_count
--;
1057 vm_object_res_deallocate(object
);
1059 if (object
->ref_count
== 1 &&
1060 object
->shadow
!= VM_OBJECT_NULL
) {
1062 * There's only one reference left on this
1063 * VM object. We can't tell if it's a valid
1064 * one (from a mapping for example) or if this
1065 * object is just part of a possibly stale and
1066 * useless shadow chain.
1067 * We would like to try and collapse it into
1068 * its parent, but we don't have any pointers
1069 * back to this parent object.
1070 * But we can try and collapse this object with
1071 * its own shadows, in case these are useless
1073 * We can't bypass this object though, since we
1074 * don't know if this last reference on it is
1075 * meaningful or not.
1077 vm_object_collapse(object
, 0, FALSE
);
1079 vm_object_unlock(object
);
1081 if (retry_cache_trim
&&
1082 ((object
= vm_object_cache_trim(TRUE
)) !=
1091 * We have to wait for initialization
1092 * before destroying or caching the object.
1095 if (object
->pager_created
&& ! object
->pager_initialized
) {
1096 assert(! object
->can_persist
);
1097 vm_object_assert_wait(object
,
1098 VM_OBJECT_EVENT_INITIALIZED
,
1100 vm_object_unlock(object
);
1102 thread_block(THREAD_CONTINUE_NULL
);
1108 * If this object can persist, then enter it in
1109 * the cache. Otherwise, terminate it.
1111 * NOTE: Only permanent objects are cached, and
1112 * permanent objects cannot have shadows. This
1113 * affects the residence counting logic in a minor
1114 * way (can do it in-line, mostly).
1117 if ((object
->can_persist
) && (object
->alive
)) {
1119 * Now it is safe to decrement reference count,
1120 * and to return if reference count is > 0.
1123 vm_object_lock_assert_exclusive(object
);
1124 if (--object
->ref_count
> 0) {
1125 vm_object_res_deallocate(object
);
1126 vm_object_unlock(object
);
1128 if (retry_cache_trim
&&
1129 ((object
= vm_object_cache_trim(TRUE
)) !=
1136 #if MIGHT_NOT_CACHE_SHADOWS
1138 * Remove shadow now if we don't
1139 * want to cache shadows.
1141 if (! cache_shadows
) {
1142 shadow
= object
->shadow
;
1143 object
->shadow
= VM_OBJECT_NULL
;
1145 #endif /* MIGHT_NOT_CACHE_SHADOWS */
1148 * Enter the object onto the queue of
1149 * cached objects, and deactivate
1152 assert(object
->shadow
== VM_OBJECT_NULL
);
1153 VM_OBJ_RES_DECR(object
);
1155 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
1157 vm_object_cached_list
.next
,
1158 vm_object_cached_list
.prev
,0,0);
1161 vm_object_unlock(object
);
1163 try_failed_count
= 0;
1165 vm_object_cache_lock();
1168 * if we try to take a regular lock here
1169 * we risk deadlocking against someone
1170 * holding a lock on this object while
1171 * trying to vm_object_deallocate a different
1174 if (vm_object_lock_try(object
))
1176 vm_object_cache_unlock();
1179 mutex_pause(try_failed_count
); /* wait a bit */
1181 vm_object_cached_count
++;
1182 if (vm_object_cached_count
> vm_object_cached_high
)
1183 vm_object_cached_high
= vm_object_cached_count
;
1184 queue_enter(&vm_object_cached_list
, object
,
1185 vm_object_t
, cached_list
);
1186 vm_object_cache_unlock();
1188 vm_object_deactivate_all_pages(object
);
1189 vm_object_unlock(object
);
1191 #if MIGHT_NOT_CACHE_SHADOWS
1193 * If we have a shadow that we need
1194 * to deallocate, do so now, remembering
1195 * to trim the cache later.
1197 if (! cache_shadows
&& shadow
!= VM_OBJECT_NULL
) {
1199 retry_cache_trim
= TRUE
;
1202 #endif /* MIGHT_NOT_CACHE_SHADOWS */
1205 * Trim the cache. If the cache trim
1206 * returns with a shadow for us to deallocate,
1207 * then remember to retry the cache trim
1208 * when we are done deallocating the shadow.
1209 * Otherwise, we are done.
1212 object
= vm_object_cache_trim(TRUE
);
1213 if (object
== VM_OBJECT_NULL
) {
1216 retry_cache_trim
= TRUE
;
1218 #endif /* VM_OBJECT_CACHE */
1221 * This object is not cachable; terminate it.
1224 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
1225 object
, object
->resident_page_count
,
1226 object
->paging_in_progress
,
1227 (void *)current_thread(),object
->ref_count
);
1229 VM_OBJ_RES_DECR(object
); /* XXX ? */
1231 * Terminate this object. If it had a shadow,
1232 * then deallocate it; otherwise, if we need
1233 * to retry a cache trim, do so now; otherwise,
1234 * we are done. "pageout" objects have a shadow,
1235 * but maintain a "paging reference" rather than
1236 * a normal reference.
1238 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
1240 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
1243 if (shadow
!= VM_OBJECT_NULL
) {
1248 if (retry_cache_trim
&&
1249 ((object
= vm_object_cache_trim(TRUE
)) !=
1258 assert(! retry_cache_trim
);
1265 vm_object_page_grab(
1268 vm_page_t p
, next_p
;
1272 vm_object_lock_assert_exclusive(object
);
1274 next_p
= (vm_page_t
)queue_first(&object
->memq
);
1275 p_limit
= MIN(50, object
->resident_page_count
);
1277 while (!queue_end(&object
->memq
, (queue_entry_t
)next_p
) && --p_limit
> 0) {
1280 next_p
= (vm_page_t
)queue_next(&next_p
->listq
);
1282 if (VM_PAGE_WIRED(p
) || p
->busy
|| p
->cleaning
|| p
->laundry
|| p
->fictitious
)
1283 goto move_page_in_obj
;
1285 if (p
->pmapped
|| p
->dirty
|| p
->precious
) {
1286 vm_page_lockspin_queues();
1291 vm_object_page_grab_pmapped
++;
1293 if (p
->reference
== FALSE
|| p
->dirty
== FALSE
) {
1295 refmod_state
= pmap_get_refmod(p
->phys_page
);
1297 if (refmod_state
& VM_MEM_REFERENCED
)
1298 p
->reference
= TRUE
;
1299 if (refmod_state
& VM_MEM_MODIFIED
) {
1300 SET_PAGE_DIRTY(p
, FALSE
);
1303 if (p
->dirty
== FALSE
&& p
->precious
== FALSE
) {
1305 refmod_state
= pmap_disconnect(p
->phys_page
);
1307 if (refmod_state
& VM_MEM_REFERENCED
)
1308 p
->reference
= TRUE
;
1309 if (refmod_state
& VM_MEM_MODIFIED
) {
1310 SET_PAGE_DIRTY(p
, FALSE
);
1313 if (p
->dirty
== FALSE
)
1317 if (p
->inactive
&& p
->reference
== TRUE
) {
1318 vm_page_activate(p
);
1320 VM_STAT_INCR(reactivations
);
1321 vm_object_page_grab_reactivations
++;
1323 vm_page_unlock_queues();
1325 queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
1326 queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
1331 vm_page_lockspin_queues();
1333 vm_page_free_prepare_queues(p
);
1334 vm_object_page_grab_returned
++;
1335 vm_object_page_grab_skipped
+= p_skipped
;
1337 vm_page_unlock_queues();
1339 vm_page_free_prepare_object(p
, TRUE
);
1343 vm_object_page_grab_skipped
+= p_skipped
;
1344 vm_object_page_grab_failed
++;
1351 #define EVICT_PREPARE_LIMIT 64
1352 #define EVICT_AGE 10
1354 static clock_sec_t vm_object_cache_aging_ts
= 0;
1357 vm_object_cache_remove_locked(
1360 queue_remove(&vm_object_cached_list
, object
, vm_object_t
, objq
);
1361 object
->objq
.next
= NULL
;
1362 object
->objq
.prev
= NULL
;
1364 vm_object_cached_count
--;
1368 vm_object_cache_remove(
1371 vm_object_cache_lock_spin();
1373 if (object
->objq
.next
|| object
->objq
.prev
)
1374 vm_object_cache_remove_locked(object
);
1376 vm_object_cache_unlock();
1380 vm_object_cache_add(
1386 if (object
->resident_page_count
== 0)
1388 clock_get_system_nanotime(&sec
, &nsec
);
1390 vm_object_cache_lock_spin();
1392 if (object
->objq
.next
== NULL
&& object
->objq
.prev
== NULL
) {
1393 queue_enter(&vm_object_cached_list
, object
, vm_object_t
, objq
);
1394 object
->vo_cache_ts
= sec
+ EVICT_AGE
;
1395 object
->vo_cache_pages_to_scan
= object
->resident_page_count
;
1397 vm_object_cached_count
++;
1398 vm_object_cache_adds
++;
1400 vm_object_cache_unlock();
1404 vm_object_cache_evict(
1406 int max_objects_to_examine
)
1408 vm_object_t object
= VM_OBJECT_NULL
;
1409 vm_object_t next_obj
= VM_OBJECT_NULL
;
1410 vm_page_t local_free_q
= VM_PAGE_NULL
;
1414 vm_page_t ep_array
[EVICT_PREPARE_LIMIT
];
1420 uint32_t ep_skipped
= 0;
1424 KERNEL_DEBUG(0x13001ec | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1426 * do a couple of quick checks to see if it's
1427 * worthwhile grabbing the lock
1429 if (queue_empty(&vm_object_cached_list
)) {
1430 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1433 clock_get_system_nanotime(&sec
, &nsec
);
1436 * the object on the head of the queue has not
1437 * yet sufficiently aged
1439 if (sec
< vm_object_cache_aging_ts
) {
1440 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1444 * don't need the queue lock to find
1445 * and lock an object on the cached list
1447 vm_page_unlock_queues();
1449 vm_object_cache_lock_spin();
1452 next_obj
= (vm_object_t
)queue_first(&vm_object_cached_list
);
1454 while (!queue_end(&vm_object_cached_list
, (queue_entry_t
)next_obj
) && object_cnt
++ < max_objects_to_examine
) {
1457 next_obj
= (vm_object_t
)queue_next(&next_obj
->objq
);
1459 if (sec
< object
->vo_cache_ts
) {
1460 KERNEL_DEBUG(0x130020c, object
, object
->resident_page_count
, object
->vo_cache_ts
, sec
, 0);
1462 vm_object_cache_aging_ts
= object
->vo_cache_ts
;
1463 object
= VM_OBJECT_NULL
;
1466 if (!vm_object_lock_try_scan(object
)) {
1468 * just skip over this guy for now... if we find
1469 * an object to steal pages from, we'll revist in a bit...
1470 * hopefully, the lock will have cleared
1472 KERNEL_DEBUG(0x13001f8, object
, object
->resident_page_count
, 0, 0, 0);
1474 object
= VM_OBJECT_NULL
;
1477 if (queue_empty(&object
->memq
) || object
->vo_cache_pages_to_scan
== 0) {
1479 * this case really shouldn't happen, but it's not fatal
1480 * so deal with it... if we don't remove the object from
1481 * the list, we'll never move past it.
1483 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1485 vm_object_cache_remove_locked(object
);
1486 vm_object_unlock(object
);
1487 object
= VM_OBJECT_NULL
;
1491 * we have a locked object with pages...
1492 * time to start harvesting
1496 vm_object_cache_unlock();
1498 if (object
== VM_OBJECT_NULL
)
1502 * object is locked at this point and
1503 * has resident pages
1505 next_p
= (vm_page_t
)queue_first(&object
->memq
);
1508 * break the page scan into 2 pieces to minimize the time spent
1509 * behind the page queue lock...
1510 * the list of pages on these unused objects is likely to be cold
1511 * w/r to the cpu cache which increases the time to scan the list
1512 * tenfold... and we may have a 'run' of pages we can't utilize that
1513 * needs to be skipped over...
1515 if ((ep_limit
= num_to_evict
- (ep_freed
+ ep_moved
)) > EVICT_PREPARE_LIMIT
)
1516 ep_limit
= EVICT_PREPARE_LIMIT
;
1519 while (!queue_end(&object
->memq
, (queue_entry_t
)next_p
) && object
->vo_cache_pages_to_scan
&& ep_count
< ep_limit
) {
1522 next_p
= (vm_page_t
)queue_next(&next_p
->listq
);
1524 object
->vo_cache_pages_to_scan
--;
1526 if (VM_PAGE_WIRED(p
) || p
->busy
|| p
->cleaning
|| p
->laundry
) {
1527 queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
1528 queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
1533 if (p
->wpmapped
|| p
->dirty
|| p
->precious
) {
1534 queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
1535 queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
1537 pmap_clear_reference(p
->phys_page
);
1539 ep_array
[ep_count
++] = p
;
1541 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1543 vm_page_lockspin_queues();
1545 for (ep_index
= 0; ep_index
< ep_count
; ep_index
++) {
1547 p
= ep_array
[ep_index
];
1549 if (p
->wpmapped
|| p
->dirty
|| p
->precious
) {
1550 p
->reference
= FALSE
;
1551 p
->no_cache
= FALSE
;
1554 * we've already filtered out pages that are in the laundry
1555 * so if we get here, this page can't be on the pageout queue
1557 assert(!p
->pageout_queue
);
1559 vm_page_queues_remove(p
);
1560 vm_page_enqueue_inactive(p
, TRUE
);
1564 #if CONFIG_PHANTOM_CACHE
1565 vm_phantom_cache_add_ghost(p
);
1567 vm_page_free_prepare_queues(p
);
1569 assert(p
->pageq
.next
== NULL
&& p
->pageq
.prev
== NULL
);
1571 * Add this page to our list of reclaimed pages,
1572 * to be freed later.
1574 p
->pageq
.next
= (queue_entry_t
) local_free_q
;
1580 vm_page_unlock_queues();
1582 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1585 vm_page_free_list(local_free_q
, TRUE
);
1586 local_free_q
= VM_PAGE_NULL
;
1588 if (object
->vo_cache_pages_to_scan
== 0) {
1589 KERNEL_DEBUG(0x1300208, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1591 vm_object_cache_remove(object
);
1593 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1596 * done with this object
1598 vm_object_unlock(object
);
1599 object
= VM_OBJECT_NULL
;
1602 * at this point, we are not holding any locks
1604 if ((ep_freed
+ ep_moved
) >= num_to_evict
) {
1606 * we've reached our target for the
1607 * number of pages to evict
1611 vm_object_cache_lock_spin();
1614 * put the page queues lock back to the caller's
1617 vm_page_lock_queues();
1619 vm_object_cache_pages_freed
+= ep_freed
;
1620 vm_object_cache_pages_moved
+= ep_moved
;
1621 vm_object_cache_pages_skipped
+= ep_skipped
;
1623 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, ep_freed
, 0, 0, 0, 0);
1630 * Check to see whether we really need to trim
1631 * down the cache. If so, remove an object from
1632 * the cache, terminate it, and repeat.
1634 * Called with, and returns with, cache lock unlocked.
1637 vm_object_cache_trim(
1638 boolean_t called_from_vm_object_deallocate
)
1640 register vm_object_t object
= VM_OBJECT_NULL
;
1646 * If we no longer need to trim the cache,
1649 if (vm_object_cached_count
<= vm_object_cached_max
)
1650 return VM_OBJECT_NULL
;
1652 vm_object_cache_lock();
1653 if (vm_object_cached_count
<= vm_object_cached_max
) {
1654 vm_object_cache_unlock();
1655 return VM_OBJECT_NULL
;
1659 * We must trim down the cache, so remove
1660 * the first object in the cache.
1663 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
1664 vm_object_cached_list
.next
,
1665 vm_object_cached_list
.prev
, 0, 0, 0);
1667 object
= (vm_object_t
) queue_first(&vm_object_cached_list
);
1668 if(object
== (vm_object_t
) &vm_object_cached_list
) {
1669 /* something's wrong with the calling parameter or */
1670 /* the value of vm_object_cached_count, just fix */
1672 if(vm_object_cached_max
< 0)
1673 vm_object_cached_max
= 0;
1674 vm_object_cached_count
= 0;
1675 vm_object_cache_unlock();
1676 return VM_OBJECT_NULL
;
1678 vm_object_lock(object
);
1679 queue_remove(&vm_object_cached_list
, object
, vm_object_t
,
1681 vm_object_cached_count
--;
1683 vm_object_cache_unlock();
1685 * Since this object is in the cache, we know
1686 * that it is initialized and has no references.
1687 * Take a reference to avoid recursive deallocations.
1690 assert(object
->pager_initialized
);
1691 assert(object
->ref_count
== 0);
1692 vm_object_lock_assert_exclusive(object
);
1693 object
->ref_count
++;
1696 * Terminate the object.
1697 * If the object had a shadow, we let vm_object_deallocate
1698 * deallocate it. "pageout" objects have a shadow, but
1699 * maintain a "paging reference" rather than a normal
1701 * (We are careful here to limit recursion.)
1703 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
1705 if(vm_object_terminate(object
) != KERN_SUCCESS
)
1708 if (shadow
!= VM_OBJECT_NULL
) {
1709 if (called_from_vm_object_deallocate
) {
1712 vm_object_deallocate(shadow
);
1721 * Routine: vm_object_terminate
1723 * Free all resources associated with a vm_object.
1724 * In/out conditions:
1725 * Upon entry, the object must be locked,
1726 * and the object must have exactly one reference.
1728 * The shadow object reference is left alone.
1730 * The object must be unlocked if its found that pages
1731 * must be flushed to a backing object. If someone
1732 * manages to map the object while it is being flushed
1733 * the object is returned unlocked and unchanged. Otherwise,
1734 * upon exit, the cache will be unlocked, and the
1735 * object will cease to exist.
1737 static kern_return_t
1738 vm_object_terminate(
1741 vm_object_t shadow_object
;
1743 XPR(XPR_VM_OBJECT
, "vm_object_terminate, object 0x%X ref %d\n",
1744 object
, object
->ref_count
, 0, 0, 0);
1746 if (!object
->pageout
&& (!object
->temporary
|| object
->can_persist
) &&
1747 (object
->pager
!= NULL
|| object
->shadow_severed
)) {
1749 * Clear pager_trusted bit so that the pages get yanked
1750 * out of the object instead of cleaned in place. This
1751 * prevents a deadlock in XMM and makes more sense anyway.
1753 object
->pager_trusted
= FALSE
;
1755 vm_object_reap_pages(object
, REAP_TERMINATE
);
1758 * Make sure the object isn't already being terminated
1760 if (object
->terminating
) {
1761 vm_object_lock_assert_exclusive(object
);
1762 object
->ref_count
--;
1763 assert(object
->ref_count
> 0);
1764 vm_object_unlock(object
);
1765 return KERN_FAILURE
;
1769 * Did somebody get a reference to the object while we were
1772 if (object
->ref_count
!= 1) {
1773 vm_object_lock_assert_exclusive(object
);
1774 object
->ref_count
--;
1775 assert(object
->ref_count
> 0);
1776 vm_object_res_deallocate(object
);
1777 vm_object_unlock(object
);
1778 return KERN_FAILURE
;
1782 * Make sure no one can look us up now.
1785 object
->terminating
= TRUE
;
1786 object
->alive
= FALSE
;
1788 if ( !object
->internal
&& (object
->objq
.next
|| object
->objq
.prev
))
1789 vm_object_cache_remove(object
);
1791 if (object
->hashed
) {
1794 lck
= vm_object_hash_lock_spin(object
->pager
);
1795 vm_object_remove(object
);
1796 vm_object_hash_unlock(lck
);
1799 * Detach the object from its shadow if we are the shadow's
1800 * copy. The reference we hold on the shadow must be dropped
1803 if (((shadow_object
= object
->shadow
) != VM_OBJECT_NULL
) &&
1804 !(object
->pageout
)) {
1805 vm_object_lock(shadow_object
);
1806 if (shadow_object
->copy
== object
)
1807 shadow_object
->copy
= VM_OBJECT_NULL
;
1808 vm_object_unlock(shadow_object
);
1811 if (object
->paging_in_progress
!= 0 ||
1812 object
->activity_in_progress
!= 0) {
1814 * There are still some paging_in_progress references
1815 * on this object, meaning that there are some paging
1816 * or other I/O operations in progress for this VM object.
1817 * Such operations take some paging_in_progress references
1818 * up front to ensure that the object doesn't go away, but
1819 * they may also need to acquire a reference on the VM object,
1820 * to map it in kernel space, for example. That means that
1821 * they may end up releasing the last reference on the VM
1822 * object, triggering its termination, while still holding
1823 * paging_in_progress references. Waiting for these
1824 * pending paging_in_progress references to go away here would
1827 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1828 * complete the VM object termination if it still holds
1829 * paging_in_progress references at this point.
1831 * No new paging_in_progress should appear now that the
1832 * VM object is "terminating" and not "alive".
1834 vm_object_reap_async(object
);
1835 vm_object_unlock(object
);
1837 * Return KERN_FAILURE to let the caller know that we
1838 * haven't completed the termination and it can't drop this
1839 * object's reference on its shadow object yet.
1840 * The reaper thread will take care of that once it has
1841 * completed this object's termination.
1843 return KERN_FAILURE
;
1846 * complete the VM object termination
1848 vm_object_reap(object
);
1849 object
= VM_OBJECT_NULL
;
1852 * the object lock was released by vm_object_reap()
1854 * KERN_SUCCESS means that this object has been terminated
1855 * and no longer needs its shadow object but still holds a
1857 * The caller is responsible for dropping that reference.
1858 * We can't call vm_object_deallocate() here because that
1859 * would create a recursion.
1861 return KERN_SUCCESS
;
1868 * Complete the termination of a VM object after it's been marked
1869 * as "terminating" and "!alive" by vm_object_terminate().
1871 * The VM object must be locked by caller.
1872 * The lock will be released on return and the VM object is no longer valid.
1879 memory_object_t pager
;
1881 vm_object_lock_assert_exclusive(object
);
1882 assert(object
->paging_in_progress
== 0);
1883 assert(object
->activity_in_progress
== 0);
1885 vm_object_reap_count
++;
1888 * Disown this purgeable object to cleanup its owner's purgeable
1889 * ledgers. We need to do this before disconnecting the object
1890 * from its pager, to properly account for compressed pages.
1892 if (object
->internal
&&
1893 object
->purgable
!= VM_PURGABLE_DENY
) {
1894 vm_purgeable_accounting(object
,
1899 pager
= object
->pager
;
1900 object
->pager
= MEMORY_OBJECT_NULL
;
1902 if (pager
!= MEMORY_OBJECT_NULL
)
1903 memory_object_control_disable(object
->pager_control
);
1905 object
->ref_count
--;
1907 assert(object
->res_count
== 0);
1908 #endif /* TASK_SWAPPER */
1910 assert (object
->ref_count
== 0);
1913 * remove from purgeable queue if it's on
1915 if (object
->internal
) {
1918 owner
= object
->vo_purgeable_owner
;
1920 VM_OBJECT_UNWIRED(object
);
1922 if (object
->purgable
== VM_PURGABLE_DENY
) {
1923 /* not purgeable: nothing to do */
1924 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
1925 purgeable_q_t queue
;
1927 assert(object
->vo_purgeable_owner
== NULL
);
1929 queue
= vm_purgeable_object_remove(object
);
1932 if (object
->purgeable_when_ripe
) {
1934 * Must take page lock for this -
1935 * using it to protect token queue
1937 vm_page_lock_queues();
1938 vm_purgeable_token_delete_first(queue
);
1940 assert(queue
->debug_count_objects
>=0);
1941 vm_page_unlock_queues();
1945 * Update "vm_page_purgeable_count" in bulk and mark
1946 * object as VM_PURGABLE_EMPTY to avoid updating
1947 * "vm_page_purgeable_count" again in vm_page_remove()
1948 * when reaping the pages.
1951 assert(object
->resident_page_count
>=
1952 object
->wired_page_count
);
1953 delta
= (object
->resident_page_count
-
1954 object
->wired_page_count
);
1956 assert(vm_page_purgeable_count
>= delta
);
1958 (SInt32
*)&vm_page_purgeable_count
);
1960 if (object
->wired_page_count
!= 0) {
1961 assert(vm_page_purgeable_wired_count
>=
1962 object
->wired_page_count
);
1963 OSAddAtomic(-object
->wired_page_count
,
1964 (SInt32
*)&vm_page_purgeable_wired_count
);
1966 object
->purgable
= VM_PURGABLE_EMPTY
;
1968 else if (object
->purgable
== VM_PURGABLE_NONVOLATILE
||
1969 object
->purgable
== VM_PURGABLE_EMPTY
) {
1970 /* remove from nonvolatile queue */
1971 assert(object
->vo_purgeable_owner
== TASK_NULL
);
1972 vm_purgeable_nonvolatile_dequeue(object
);
1974 panic("object %p in unexpected purgeable state 0x%x\n",
1975 object
, object
->purgable
);
1977 assert(object
->objq
.next
== NULL
);
1978 assert(object
->objq
.prev
== NULL
);
1982 * Clean or free the pages, as appropriate.
1983 * It is possible for us to find busy/absent pages,
1984 * if some faults on this object were aborted.
1986 if (object
->pageout
) {
1987 assert(object
->shadow
!= VM_OBJECT_NULL
);
1989 vm_pageout_object_terminate(object
);
1991 } else if (((object
->temporary
&& !object
->can_persist
) || (pager
== MEMORY_OBJECT_NULL
))) {
1993 vm_object_reap_pages(object
, REAP_REAP
);
1995 assert(queue_empty(&object
->memq
));
1996 assert(object
->paging_in_progress
== 0);
1997 assert(object
->activity_in_progress
== 0);
1998 assert(object
->ref_count
== 0);
2001 * If the pager has not already been released by
2002 * vm_object_destroy, we need to terminate it and
2003 * release our reference to it here.
2005 if (pager
!= MEMORY_OBJECT_NULL
) {
2006 vm_object_unlock(object
);
2007 vm_object_release_pager(pager
, object
->hashed
);
2008 vm_object_lock(object
);
2011 /* kick off anyone waiting on terminating */
2012 object
->terminating
= FALSE
;
2013 vm_object_paging_begin(object
);
2014 vm_object_paging_end(object
);
2015 vm_object_unlock(object
);
2018 vm_external_destroy(object
->existence_map
, object
->vo_size
);
2019 #endif /* MACH_PAGEMAP */
2021 object
->shadow
= VM_OBJECT_NULL
;
2023 #if VM_OBJECT_TRACKING
2024 if (vm_object_tracking_inited
) {
2025 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
2028 #endif /* VM_OBJECT_TRACKING */
2030 vm_object_lock_destroy(object
);
2032 * Free the space for the object.
2034 zfree(vm_object_zone
, object
);
2035 object
= VM_OBJECT_NULL
;
2039 unsigned int vm_max_batch
= 256;
2041 #define V_O_R_MAX_BATCH 128
2043 #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
2046 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
2048 if (_local_free_q) { \
2049 if (do_disconnect) { \
2051 for (m = _local_free_q; \
2052 m != VM_PAGE_NULL; \
2053 m = (vm_page_t) m->pageq.next) { \
2055 pmap_disconnect(m->phys_page); \
2059 vm_page_free_list(_local_free_q, TRUE); \
2060 _local_free_q = VM_PAGE_NULL; \
2066 vm_object_reap_pages(
2072 vm_page_t local_free_q
= VM_PAGE_NULL
;
2074 boolean_t disconnect_on_release
;
2075 pmap_flush_context pmap_flush_context_storage
;
2077 if (reap_type
== REAP_DATA_FLUSH
) {
2079 * We need to disconnect pages from all pmaps before
2080 * releasing them to the free list
2082 disconnect_on_release
= TRUE
;
2085 * Either the caller has already disconnected the pages
2086 * from all pmaps, or we disconnect them here as we add
2087 * them to out local list of pages to be released.
2088 * No need to re-disconnect them when we release the pages
2091 disconnect_on_release
= FALSE
;
2094 restart_after_sleep
:
2095 if (queue_empty(&object
->memq
))
2097 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
2099 if (reap_type
== REAP_PURGEABLE
)
2100 pmap_flush_context_init(&pmap_flush_context_storage
);
2102 vm_page_lockspin_queues();
2104 next
= (vm_page_t
)queue_first(&object
->memq
);
2106 while (!queue_end(&object
->memq
, (queue_entry_t
)next
)) {
2109 next
= (vm_page_t
)queue_next(&next
->listq
);
2111 if (--loop_count
== 0) {
2113 vm_page_unlock_queues();
2117 if (reap_type
== REAP_PURGEABLE
) {
2118 pmap_flush(&pmap_flush_context_storage
);
2119 pmap_flush_context_init(&pmap_flush_context_storage
);
2122 * Free the pages we reclaimed so far
2123 * and take a little break to avoid
2124 * hogging the page queue lock too long
2126 VM_OBJ_REAP_FREELIST(local_free_q
,
2127 disconnect_on_release
);
2131 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
2133 vm_page_lockspin_queues();
2135 if (reap_type
== REAP_DATA_FLUSH
|| reap_type
== REAP_TERMINATE
) {
2137 if (p
->busy
|| p
->cleaning
) {
2139 vm_page_unlock_queues();
2141 * free the pages reclaimed so far
2143 VM_OBJ_REAP_FREELIST(local_free_q
,
2144 disconnect_on_release
);
2146 PAGE_SLEEP(object
, p
, THREAD_UNINT
);
2148 goto restart_after_sleep
;
2153 vm_pageout_steal_laundry(p
, TRUE
);
2156 switch (reap_type
) {
2158 case REAP_DATA_FLUSH
:
2159 if (VM_PAGE_WIRED(p
)) {
2161 * this is an odd case... perhaps we should
2162 * zero-fill this page since we're conceptually
2163 * tossing its data at this point, but leaving
2164 * it on the object to honor the 'wire' contract
2170 case REAP_PURGEABLE
:
2171 if (VM_PAGE_WIRED(p
)) {
2173 * can't purge a wired page
2175 vm_page_purged_wired
++;
2178 if (p
->laundry
&& !p
->busy
&& !p
->cleaning
) {
2181 vm_pageout_steal_laundry(p
, TRUE
);
2183 if (p
->cleaning
|| p
->laundry
|| p
->absent
) {
2185 * page is being acted upon,
2186 * so don't mess with it
2188 vm_page_purged_others
++;
2193 * We can't reclaim a busy page but we can
2194 * make it more likely to be paged (it's not wired) to make
2195 * sure that it gets considered by
2196 * vm_pageout_scan() later.
2198 vm_page_deactivate(p
);
2199 vm_page_purged_busy
++;
2203 assert(p
->object
!= kernel_object
);
2206 * we can discard this page...
2208 if (p
->pmapped
== TRUE
) {
2212 pmap_disconnect_options(p
->phys_page
, PMAP_OPTIONS_NOFLUSH
| PMAP_OPTIONS_NOREFMOD
, (void *)&pmap_flush_context_storage
);
2214 vm_page_purged_count
++;
2218 case REAP_TERMINATE
:
2219 if (p
->absent
|| p
->private) {
2221 * For private pages, VM_PAGE_FREE just
2222 * leaves the page structure around for
2223 * its owner to clean up. For absent
2224 * pages, the structure is returned to
2225 * the appropriate pool.
2229 if (p
->fictitious
) {
2230 assert (p
->phys_page
== vm_page_guard_addr
);
2233 if (!p
->dirty
&& p
->wpmapped
)
2234 p
->dirty
= pmap_is_modified(p
->phys_page
);
2236 if ((p
->dirty
|| p
->precious
) && !p
->error
&& object
->alive
) {
2238 assert(!object
->internal
);
2241 vm_page_queues_remove(p
);
2243 * flush page... page will be freed
2244 * upon completion of I/O
2246 (void)vm_pageout_cluster(p
, TRUE
, FALSE
, FALSE
);
2248 vm_page_unlock_queues();
2250 * free the pages reclaimed so far
2252 VM_OBJ_REAP_FREELIST(local_free_q
,
2253 disconnect_on_release
);
2255 vm_object_paging_wait(object
, THREAD_UNINT
);
2257 goto restart_after_sleep
;
2264 vm_page_free_prepare_queues(p
);
2265 assert(p
->pageq
.next
== NULL
&& p
->pageq
.prev
== NULL
);
2267 * Add this page to our list of reclaimed pages,
2268 * to be freed later.
2270 p
->pageq
.next
= (queue_entry_t
) local_free_q
;
2273 vm_page_unlock_queues();
2276 * Free the remaining reclaimed pages
2278 if (reap_type
== REAP_PURGEABLE
)
2279 pmap_flush(&pmap_flush_context_storage
);
2281 VM_OBJ_REAP_FREELIST(local_free_q
,
2282 disconnect_on_release
);
2287 vm_object_reap_async(
2290 vm_object_lock_assert_exclusive(object
);
2292 vm_object_reaper_lock_spin();
2294 vm_object_reap_count_async
++;
2296 /* enqueue the VM object... */
2297 queue_enter(&vm_object_reaper_queue
, object
,
2298 vm_object_t
, cached_list
);
2300 vm_object_reaper_unlock();
2302 /* ... and wake up the reaper thread */
2303 thread_wakeup((event_t
) &vm_object_reaper_queue
);
2308 vm_object_reaper_thread(void)
2310 vm_object_t object
, shadow_object
;
2312 vm_object_reaper_lock_spin();
2314 while (!queue_empty(&vm_object_reaper_queue
)) {
2315 queue_remove_first(&vm_object_reaper_queue
,
2320 vm_object_reaper_unlock();
2321 vm_object_lock(object
);
2323 assert(object
->terminating
);
2324 assert(!object
->alive
);
2327 * The pageout daemon might be playing with our pages.
2328 * Now that the object is dead, it won't touch any more
2329 * pages, but some pages might already be on their way out.
2330 * Hence, we wait until the active paging activities have
2331 * ceased before we break the association with the pager
2334 while (object
->paging_in_progress
!= 0 ||
2335 object
->activity_in_progress
!= 0) {
2336 vm_object_wait(object
,
2337 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
2339 vm_object_lock(object
);
2343 object
->pageout
? VM_OBJECT_NULL
: object
->shadow
;
2345 vm_object_reap(object
);
2346 /* cache is unlocked and object is no longer valid */
2347 object
= VM_OBJECT_NULL
;
2349 if (shadow_object
!= VM_OBJECT_NULL
) {
2351 * Drop the reference "object" was holding on
2352 * its shadow object.
2354 vm_object_deallocate(shadow_object
);
2355 shadow_object
= VM_OBJECT_NULL
;
2357 vm_object_reaper_lock_spin();
2360 /* wait for more work... */
2361 assert_wait((event_t
) &vm_object_reaper_queue
, THREAD_UNINT
);
2363 vm_object_reaper_unlock();
2365 thread_block((thread_continue_t
) vm_object_reaper_thread
);
2370 * Routine: vm_object_pager_wakeup
2371 * Purpose: Wake up anyone waiting for termination of a pager.
2375 vm_object_pager_wakeup(
2376 memory_object_t pager
)
2378 vm_object_hash_entry_t entry
;
2379 boolean_t waiting
= FALSE
;
2383 * If anyone was waiting for the memory_object_terminate
2384 * to be queued, wake them up now.
2386 lck
= vm_object_hash_lock_spin(pager
);
2387 entry
= vm_object_hash_lookup(pager
, TRUE
);
2388 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
2389 waiting
= entry
->waiting
;
2390 vm_object_hash_unlock(lck
);
2392 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
) {
2394 thread_wakeup((event_t
) pager
);
2395 vm_object_hash_entry_free(entry
);
2400 * Routine: vm_object_release_pager
2401 * Purpose: Terminate the pager and, upon completion,
2402 * release our last reference to it.
2403 * just like memory_object_terminate, except
2404 * that we wake up anyone blocked in vm_object_enter
2405 * waiting for termination message to be queued
2406 * before calling memory_object_init.
2409 vm_object_release_pager(
2410 memory_object_t pager
,
2415 * Terminate the pager.
2418 (void) memory_object_terminate(pager
);
2420 if (hashed
== TRUE
) {
2422 * Wakeup anyone waiting for this terminate
2423 * and remove the entry from the hash
2425 vm_object_pager_wakeup(pager
);
2428 * Release reference to pager.
2430 memory_object_deallocate(pager
);
2434 * Routine: vm_object_destroy
2436 * Shut down a VM object, despite the
2437 * presence of address map (or other) references
2443 __unused kern_return_t reason
)
2445 memory_object_t old_pager
;
2447 if (object
== VM_OBJECT_NULL
)
2448 return(KERN_SUCCESS
);
2451 * Remove the pager association immediately.
2453 * This will prevent the memory manager from further
2454 * meddling. [If it wanted to flush data or make
2455 * other changes, it should have done so before performing
2456 * the destroy call.]
2459 vm_object_lock(object
);
2460 object
->can_persist
= FALSE
;
2461 object
->named
= FALSE
;
2462 object
->alive
= FALSE
;
2464 if (object
->hashed
) {
2467 * Rip out the pager from the vm_object now...
2469 lck
= vm_object_hash_lock_spin(object
->pager
);
2470 vm_object_remove(object
);
2471 vm_object_hash_unlock(lck
);
2473 old_pager
= object
->pager
;
2474 object
->pager
= MEMORY_OBJECT_NULL
;
2475 if (old_pager
!= MEMORY_OBJECT_NULL
)
2476 memory_object_control_disable(object
->pager_control
);
2479 * Wait for the existing paging activity (that got
2480 * through before we nulled out the pager) to subside.
2483 vm_object_paging_wait(object
, THREAD_UNINT
);
2484 vm_object_unlock(object
);
2487 * Terminate the object now.
2489 if (old_pager
!= MEMORY_OBJECT_NULL
) {
2490 vm_object_release_pager(old_pager
, object
->hashed
);
2493 * JMM - Release the caller's reference. This assumes the
2494 * caller had a reference to release, which is a big (but
2495 * currently valid) assumption if this is driven from the
2496 * vnode pager (it is holding a named reference when making
2499 vm_object_deallocate(object
);
2502 return(KERN_SUCCESS
);
2508 #define VM_OBJ_DEACT_ALL_STATS DEBUG
2509 #if VM_OBJ_DEACT_ALL_STATS
2510 uint32_t vm_object_deactivate_all_pages_batches
= 0;
2511 uint32_t vm_object_deactivate_all_pages_pages
= 0;
2512 #endif /* VM_OBJ_DEACT_ALL_STATS */
2514 * vm_object_deactivate_all_pages
2516 * Deactivate all pages in the specified object. (Keep its pages
2517 * in memory even though it is no longer referenced.)
2519 * The object must be locked.
2522 vm_object_deactivate_all_pages(
2523 register vm_object_t object
)
2525 register vm_page_t p
;
2527 #if VM_OBJ_DEACT_ALL_STATS
2529 #endif /* VM_OBJ_DEACT_ALL_STATS */
2530 #define V_O_D_A_P_MAX_BATCH 256
2532 loop_count
= BATCH_LIMIT(V_O_D_A_P_MAX_BATCH
);
2533 #if VM_OBJ_DEACT_ALL_STATS
2535 #endif /* VM_OBJ_DEACT_ALL_STATS */
2536 vm_page_lock_queues();
2537 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
2538 if (--loop_count
== 0) {
2539 #if VM_OBJ_DEACT_ALL_STATS
2540 hw_atomic_add(&vm_object_deactivate_all_pages_batches
,
2542 hw_atomic_add(&vm_object_deactivate_all_pages_pages
,
2545 #endif /* VM_OBJ_DEACT_ALL_STATS */
2546 lck_mtx_yield(&vm_page_queue_lock
);
2547 loop_count
= BATCH_LIMIT(V_O_D_A_P_MAX_BATCH
);
2549 if (!p
->busy
&& !p
->throttled
) {
2550 #if VM_OBJ_DEACT_ALL_STATS
2552 #endif /* VM_OBJ_DEACT_ALL_STATS */
2553 vm_page_deactivate(p
);
2556 #if VM_OBJ_DEACT_ALL_STATS
2558 hw_atomic_add(&vm_object_deactivate_all_pages_batches
, 1);
2559 hw_atomic_add(&vm_object_deactivate_all_pages_pages
,
2563 #endif /* VM_OBJ_DEACT_ALL_STATS */
2564 vm_page_unlock_queues();
2566 #endif /* VM_OBJECT_CACHE */
2571 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2572 * exist because of the need to handle shadow chains. When deactivating pages, we only
2573 * want to deactive the ones at the top most level in the object chain. In order to do
2574 * this efficiently, the specified address range is divided up into "chunks" and we use
2575 * a bit map to keep track of which pages have already been processed as we descend down
2576 * the shadow chain. These chunk macros hide the details of the bit map implementation
2577 * as much as we can.
2579 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2580 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2581 * order bit represents page 0 in the current range and highest order bit represents
2584 * For further convenience, we also use negative logic for the page state in the bit map.
2585 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2586 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2587 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2588 * out with all the bits set. The macros below hide all these details from the caller.
2591 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2592 /* be the same as the number of bits in */
2593 /* the chunk_state_t type. We use 64 */
2594 /* just for convenience. */
2596 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2598 typedef uint64_t chunk_state_t
;
2601 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2602 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2603 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2604 * looking at pages in that range. This can save us from unnecessarily chasing down the
2608 #define CHUNK_INIT(c, len) \
2612 (c) = 0xffffffffffffffffLL; \
2614 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2615 MARK_PAGE_HANDLED(c, p); \
2620 * Return true if all pages in the chunk have not yet been processed.
2623 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2626 * Return true if the page at offset 'p' in the bit map has already been handled
2627 * while processing a higher level object in the shadow chain.
2630 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0)
2633 * Mark the page at offset 'p' in the bit map as having been processed.
2636 #define MARK_PAGE_HANDLED(c, p) \
2638 (c) = (c) & ~(1LL << (p)); \
2643 * Return true if the page at the given offset has been paged out. Object is
2644 * locked upon entry and returned locked.
2650 vm_object_offset_t offset
)
2653 memory_object_t pager
;
2656 * Check the existence map for the page if we have one, otherwise
2657 * ask the pager about this page.
2661 if (object
->existence_map
) {
2662 if (vm_external_state_get(object
->existence_map
, offset
)
2663 == VM_EXTERNAL_STATE_EXISTS
) {
2671 #endif /* MACH_PAGEMAP */
2672 if (object
->internal
&&
2674 !object
->terminating
&&
2675 object
->pager_ready
) {
2677 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
2678 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
)
2679 == VM_EXTERNAL_STATE_EXISTS
) {
2687 * We're already holding a "paging in progress" reference
2688 * so the object can't disappear when we release the lock.
2691 assert(object
->paging_in_progress
);
2692 pager
= object
->pager
;
2693 vm_object_unlock(object
);
2695 kr
= memory_object_data_request(
2697 offset
+ object
->paging_offset
,
2698 0, /* just poke the pager */
2702 vm_object_lock(object
);
2704 if (kr
== KERN_SUCCESS
) {
2720 * madvise_free_debug
2722 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2723 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2724 * simulate the loss of the page's contents as if the page had been
2725 * reclaimed and then re-faulted.
2727 #if DEVELOPMENT || DEBUG
2728 int madvise_free_debug
= 1;
2730 int madvise_free_debug
= 0;
2734 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2735 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2736 * a size that is less than or equal to the CHUNK_SIZE.
2740 deactivate_pages_in_object(
2742 vm_object_offset_t offset
,
2743 vm_object_size_t size
,
2744 boolean_t kill_page
,
2745 boolean_t reusable_page
,
2746 boolean_t all_reusable
,
2747 chunk_state_t
*chunk_state
,
2748 pmap_flush_context
*pfc
,
2750 vm_map_offset_t pmap_offset
)
2754 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
2755 struct vm_page_delayed_work
*dwp
;
2758 unsigned int reusable
= 0;
2761 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2762 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2763 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2764 * all the pages in the chunk.
2769 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
2771 for(p
= 0; size
&& CHUNK_NOT_COMPLETE(*chunk_state
); p
++, size
-= PAGE_SIZE_64
, offset
+= PAGE_SIZE_64
, pmap_offset
+= PAGE_SIZE_64
) {
2774 * If this offset has already been found and handled in a higher level object, then don't
2775 * do anything with it in the current shadow object.
2778 if (PAGE_ALREADY_HANDLED(*chunk_state
, p
))
2782 * See if the page at this offset is around. First check to see if the page is resident,
2783 * then if not, check the existence map or with the pager.
2786 if ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
2789 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2790 * so that we won't bother looking for a page at this offset again if there are more
2791 * shadow objects. Then deactivate the page.
2794 MARK_PAGE_HANDLED(*chunk_state
, p
);
2796 if (( !VM_PAGE_WIRED(m
)) && (!m
->private) && (!m
->gobbled
) && (!m
->busy
) && (!m
->laundry
)) {
2803 clear_refmod
= VM_MEM_REFERENCED
;
2804 dwp
->dw_mask
|= DW_clear_reference
;
2806 if ((kill_page
) && (object
->internal
)) {
2807 if (madvise_free_debug
) {
2809 * zero-fill the page now
2810 * to simulate it being
2811 * reclaimed and re-faulted.
2813 pmap_zero_page(m
->phys_page
);
2815 m
->precious
= FALSE
;
2818 clear_refmod
|= VM_MEM_MODIFIED
;
2821 * This page is now clean and
2822 * reclaimable. Move it out
2823 * of the throttled queue, so
2824 * that vm_pageout_scan() can
2827 dwp
->dw_mask
|= DW_move_page
;
2830 vm_external_state_clr(object
->existence_map
, offset
);
2831 #endif /* MACH_PAGEMAP */
2832 VM_COMPRESSOR_PAGER_STATE_CLR(object
,
2835 if (reusable_page
&& !m
->reusable
) {
2836 assert(!all_reusable
);
2837 assert(!object
->all_reusable
);
2839 object
->reusable_page_count
++;
2840 assert(object
->resident_page_count
>= object
->reusable_page_count
);
2843 * Tell pmap this page is now
2844 * "reusable" (to update pmap
2845 * stats for all mappings).
2847 pmap_options
|= PMAP_OPTIONS_SET_REUSABLE
;
2850 pmap_options
|= PMAP_OPTIONS_NOFLUSH
;
2851 pmap_clear_refmod_options(m
->phys_page
,
2856 if (!m
->throttled
&& !(reusable_page
|| all_reusable
))
2857 dwp
->dw_mask
|= DW_move_page
;
2860 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
,
2863 if (dw_count
>= dw_limit
) {
2865 OSAddAtomic(reusable
,
2866 &vm_page_stats_reusable
.reusable_count
);
2867 vm_page_stats_reusable
.reusable
+= reusable
;
2870 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
2880 * The page at this offset isn't memory resident, check to see if it's
2881 * been paged out. If so, mark it as handled so we don't bother looking
2882 * for it in the shadow chain.
2885 if (page_is_paged_out(object
, offset
)) {
2886 MARK_PAGE_HANDLED(*chunk_state
, p
);
2889 * If we're killing a non-resident page, then clear the page in the existence
2890 * map so we don't bother paging it back in if it's touched again in the future.
2893 if ((kill_page
) && (object
->internal
)) {
2895 vm_external_state_clr(object
->existence_map
, offset
);
2896 #endif /* MACH_PAGEMAP */
2897 VM_COMPRESSOR_PAGER_STATE_CLR(object
,
2899 if (pmap
!= PMAP_NULL
&&
2900 (COMPRESSED_PAGER_IS_ACTIVE
||
2901 DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)) {
2903 * Tell pmap that this page
2904 * is no longer mapped, to
2905 * adjust the footprint ledger
2906 * because this page is no
2907 * longer compressed.
2909 pmap_remove_options(
2914 PMAP_OPTIONS_REMOVE
);
2922 OSAddAtomic(reusable
, &vm_page_stats_reusable
.reusable_count
);
2923 vm_page_stats_reusable
.reusable
+= reusable
;
2928 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
2933 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2934 * will always be less than or equal to the given size. The total range is divided up
2935 * into chunks for efficiency and performance related to the locks and handling the shadow
2936 * chain. This routine returns how much of the given "size" it actually processed. It's
2937 * up to the caler to loop and keep calling this routine until the entire range they want
2938 * to process has been done.
2941 static vm_object_size_t
2943 vm_object_t orig_object
,
2944 vm_object_offset_t offset
,
2945 vm_object_size_t size
,
2946 boolean_t kill_page
,
2947 boolean_t reusable_page
,
2948 boolean_t all_reusable
,
2949 pmap_flush_context
*pfc
,
2951 vm_map_offset_t pmap_offset
)
2954 vm_object_t tmp_object
;
2955 vm_object_size_t length
;
2956 chunk_state_t chunk_state
;
2960 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2961 * remaining size the caller asked for.
2964 length
= MIN(size
, CHUNK_SIZE
);
2967 * The chunk_state keeps track of which pages we've already processed if there's
2968 * a shadow chain on this object. At this point, we haven't done anything with this
2969 * range of pages yet, so initialize the state to indicate no pages processed yet.
2972 CHUNK_INIT(chunk_state
, length
);
2973 object
= orig_object
;
2976 * Start at the top level object and iterate around the loop once for each object
2977 * in the shadow chain. We stop processing early if we've already found all the pages
2978 * in the range. Otherwise we stop when we run out of shadow objects.
2981 while (object
&& CHUNK_NOT_COMPLETE(chunk_state
)) {
2982 vm_object_paging_begin(object
);
2984 deactivate_pages_in_object(object
, offset
, length
, kill_page
, reusable_page
, all_reusable
, &chunk_state
, pfc
, pmap
, pmap_offset
);
2986 vm_object_paging_end(object
);
2989 * We've finished with this object, see if there's a shadow object. If
2990 * there is, update the offset and lock the new object. We also turn off
2991 * kill_page at this point since we only kill pages in the top most object.
2994 tmp_object
= object
->shadow
;
2998 reusable_page
= FALSE
;
2999 all_reusable
= FALSE
;
3000 offset
+= object
->vo_shadow_offset
;
3001 vm_object_lock(tmp_object
);
3004 if (object
!= orig_object
)
3005 vm_object_unlock(object
);
3007 object
= tmp_object
;
3010 if (object
&& object
!= orig_object
)
3011 vm_object_unlock(object
);
3019 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
3020 * we also clear the modified status of the page and "forget" any changes that have been made
3024 __private_extern__
void
3025 vm_object_deactivate_pages(
3027 vm_object_offset_t offset
,
3028 vm_object_size_t size
,
3029 boolean_t kill_page
,
3030 boolean_t reusable_page
,
3032 vm_map_offset_t pmap_offset
)
3034 vm_object_size_t length
;
3035 boolean_t all_reusable
;
3036 pmap_flush_context pmap_flush_context_storage
;
3039 * We break the range up into chunks and do one chunk at a time. This is for
3040 * efficiency and performance while handling the shadow chains and the locks.
3041 * The deactivate_a_chunk() function returns how much of the range it processed.
3042 * We keep calling this routine until the given size is exhausted.
3046 all_reusable
= FALSE
;
3049 * For the sake of accurate "reusable" pmap stats, we need
3050 * to tell pmap about each page that is no longer "reusable",
3051 * so we can't do the "all_reusable" optimization.
3054 if (reusable_page
&&
3056 object
->vo_size
!= 0 &&
3057 object
->vo_size
== size
&&
3058 object
->reusable_page_count
== 0) {
3059 all_reusable
= TRUE
;
3060 reusable_page
= FALSE
;
3064 if ((reusable_page
|| all_reusable
) && object
->all_reusable
) {
3065 /* This means MADV_FREE_REUSABLE has been called twice, which
3066 * is probably illegal. */
3070 pmap_flush_context_init(&pmap_flush_context_storage
);
3073 length
= deactivate_a_chunk(object
, offset
, size
, kill_page
, reusable_page
, all_reusable
, &pmap_flush_context_storage
, pmap
, pmap_offset
);
3077 pmap_offset
+= length
;
3079 pmap_flush(&pmap_flush_context_storage
);
3082 if (!object
->all_reusable
) {
3083 unsigned int reusable
;
3085 object
->all_reusable
= TRUE
;
3086 assert(object
->reusable_page_count
== 0);
3087 /* update global stats */
3088 reusable
= object
->resident_page_count
;
3089 OSAddAtomic(reusable
,
3090 &vm_page_stats_reusable
.reusable_count
);
3091 vm_page_stats_reusable
.reusable
+= reusable
;
3092 vm_page_stats_reusable
.all_reusable_calls
++;
3094 } else if (reusable_page
) {
3095 vm_page_stats_reusable
.partial_reusable_calls
++;
3100 vm_object_reuse_pages(
3102 vm_object_offset_t start_offset
,
3103 vm_object_offset_t end_offset
,
3104 boolean_t allow_partial_reuse
)
3106 vm_object_offset_t cur_offset
;
3108 unsigned int reused
, reusable
;
3110 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
3112 if ((m) != VM_PAGE_NULL && \
3114 assert((object)->reusable_page_count <= \
3115 (object)->resident_page_count); \
3116 assert((object)->reusable_page_count > 0); \
3117 (object)->reusable_page_count--; \
3118 (m)->reusable = FALSE; \
3121 * Tell pmap that this page is no longer \
3122 * "reusable", to update the "reusable" stats \
3123 * for all the pmaps that have mapped this \
3126 pmap_clear_refmod_options((m)->phys_page, \
3128 (PMAP_OPTIONS_CLEAR_REUSABLE \
3129 | PMAP_OPTIONS_NOFLUSH), \
3137 vm_object_lock_assert_exclusive(object
);
3139 if (object
->all_reusable
) {
3140 panic("object %p all_reusable: can't update pmap stats\n",
3142 assert(object
->reusable_page_count
== 0);
3143 object
->all_reusable
= FALSE
;
3144 if (end_offset
- start_offset
== object
->vo_size
||
3145 !allow_partial_reuse
) {
3146 vm_page_stats_reusable
.all_reuse_calls
++;
3147 reused
= object
->resident_page_count
;
3149 vm_page_stats_reusable
.partial_reuse_calls
++;
3150 queue_iterate(&object
->memq
, m
, vm_page_t
, listq
) {
3151 if (m
->offset
< start_offset
||
3152 m
->offset
>= end_offset
) {
3154 object
->reusable_page_count
++;
3155 assert(object
->resident_page_count
>= object
->reusable_page_count
);
3158 assert(!m
->reusable
);
3163 } else if (object
->resident_page_count
>
3164 ((end_offset
- start_offset
) >> PAGE_SHIFT
)) {
3165 vm_page_stats_reusable
.partial_reuse_calls
++;
3166 for (cur_offset
= start_offset
;
3167 cur_offset
< end_offset
;
3168 cur_offset
+= PAGE_SIZE_64
) {
3169 if (object
->reusable_page_count
== 0) {
3172 m
= vm_page_lookup(object
, cur_offset
);
3173 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
3176 vm_page_stats_reusable
.partial_reuse_calls
++;
3177 queue_iterate(&object
->memq
, m
, vm_page_t
, listq
) {
3178 if (object
->reusable_page_count
== 0) {
3181 if (m
->offset
< start_offset
||
3182 m
->offset
>= end_offset
) {
3185 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
3189 /* update global stats */
3190 OSAddAtomic(reusable
-reused
, &vm_page_stats_reusable
.reusable_count
);
3191 vm_page_stats_reusable
.reused
+= reused
;
3192 vm_page_stats_reusable
.reusable
+= reusable
;
3196 * Routine: vm_object_pmap_protect
3199 * Reduces the permission for all physical
3200 * pages in the specified object range.
3202 * If removing write permission only, it is
3203 * sufficient to protect only the pages in
3204 * the top-level object; only those pages may
3205 * have write permission.
3207 * If removing all access, we must follow the
3208 * shadow chain from the top-level object to
3209 * remove access to all pages in shadowed objects.
3211 * The object must *not* be locked. The object must
3212 * be temporary/internal.
3214 * If pmap is not NULL, this routine assumes that
3215 * the only mappings for the pages are in that
3219 __private_extern__
void
3220 vm_object_pmap_protect(
3221 register vm_object_t object
,
3222 register vm_object_offset_t offset
,
3223 vm_object_size_t size
,
3225 vm_map_offset_t pmap_start
,
3228 vm_object_pmap_protect_options(object
, offset
, size
,
3229 pmap
, pmap_start
, prot
, 0);
3232 __private_extern__
void
3233 vm_object_pmap_protect_options(
3234 register vm_object_t object
,
3235 register vm_object_offset_t offset
,
3236 vm_object_size_t size
,
3238 vm_map_offset_t pmap_start
,
3242 pmap_flush_context pmap_flush_context_storage
;
3243 boolean_t delayed_pmap_flush
= FALSE
;
3245 if (object
== VM_OBJECT_NULL
)
3247 size
= vm_object_round_page(size
);
3248 offset
= vm_object_trunc_page(offset
);
3250 vm_object_lock(object
);
3252 if (object
->phys_contiguous
) {
3254 vm_object_unlock(object
);
3255 pmap_protect_options(pmap
,
3259 options
& ~PMAP_OPTIONS_NOFLUSH
,
3262 vm_object_offset_t phys_start
, phys_end
, phys_addr
;
3264 phys_start
= object
->vo_shadow_offset
+ offset
;
3265 phys_end
= phys_start
+ size
;
3266 assert(phys_start
<= phys_end
);
3267 assert(phys_end
<= object
->vo_shadow_offset
+ object
->vo_size
);
3268 vm_object_unlock(object
);
3270 pmap_flush_context_init(&pmap_flush_context_storage
);
3271 delayed_pmap_flush
= FALSE
;
3273 for (phys_addr
= phys_start
;
3274 phys_addr
< phys_end
;
3275 phys_addr
+= PAGE_SIZE_64
) {
3276 pmap_page_protect_options(
3277 (ppnum_t
) (phys_addr
>> PAGE_SHIFT
),
3279 options
| PMAP_OPTIONS_NOFLUSH
,
3280 (void *)&pmap_flush_context_storage
);
3281 delayed_pmap_flush
= TRUE
;
3283 if (delayed_pmap_flush
== TRUE
)
3284 pmap_flush(&pmap_flush_context_storage
);
3289 assert(object
->internal
);
3292 if (ptoa_64(object
->resident_page_count
) > size
/2 && pmap
!= PMAP_NULL
) {
3293 vm_object_unlock(object
);
3294 pmap_protect_options(pmap
, pmap_start
, pmap_start
+ size
, prot
,
3295 options
& ~PMAP_OPTIONS_NOFLUSH
, NULL
);
3299 pmap_flush_context_init(&pmap_flush_context_storage
);
3300 delayed_pmap_flush
= FALSE
;
3303 * if we are doing large ranges with respect to resident
3304 * page count then we should interate over pages otherwise
3305 * inverse page look-up will be faster
3307 if (ptoa_64(object
->resident_page_count
/ 4) < size
) {
3309 vm_object_offset_t end
;
3311 end
= offset
+ size
;
3313 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
3314 if (!p
->fictitious
&& (offset
<= p
->offset
) && (p
->offset
< end
)) {
3315 vm_map_offset_t start
;
3317 start
= pmap_start
+ p
->offset
- offset
;
3319 if (pmap
!= PMAP_NULL
)
3320 pmap_protect_options(
3323 start
+ PAGE_SIZE_64
,
3325 options
| PMAP_OPTIONS_NOFLUSH
,
3326 &pmap_flush_context_storage
);
3328 pmap_page_protect_options(
3331 options
| PMAP_OPTIONS_NOFLUSH
,
3332 &pmap_flush_context_storage
);
3333 delayed_pmap_flush
= TRUE
;
3339 vm_object_offset_t end
;
3340 vm_object_offset_t target_off
;
3342 end
= offset
+ size
;
3344 for (target_off
= offset
;
3345 target_off
< end
; target_off
+= PAGE_SIZE
) {
3347 p
= vm_page_lookup(object
, target_off
);
3349 if (p
!= VM_PAGE_NULL
) {
3350 vm_object_offset_t start
;
3352 start
= pmap_start
+ (p
->offset
- offset
);
3354 if (pmap
!= PMAP_NULL
)
3355 pmap_protect_options(
3358 start
+ PAGE_SIZE_64
,
3360 options
| PMAP_OPTIONS_NOFLUSH
,
3361 &pmap_flush_context_storage
);
3363 pmap_page_protect_options(
3366 options
| PMAP_OPTIONS_NOFLUSH
,
3367 &pmap_flush_context_storage
);
3368 delayed_pmap_flush
= TRUE
;
3372 if (delayed_pmap_flush
== TRUE
)
3373 pmap_flush(&pmap_flush_context_storage
);
3375 if (prot
== VM_PROT_NONE
) {
3377 * Must follow shadow chain to remove access
3378 * to pages in shadowed objects.
3380 register vm_object_t next_object
;
3382 next_object
= object
->shadow
;
3383 if (next_object
!= VM_OBJECT_NULL
) {
3384 offset
+= object
->vo_shadow_offset
;
3385 vm_object_lock(next_object
);
3386 vm_object_unlock(object
);
3387 object
= next_object
;
3391 * End of chain - we are done.
3398 * Pages in shadowed objects may never have
3399 * write permission - we may stop here.
3405 vm_object_unlock(object
);
3409 * Routine: vm_object_copy_slowly
3412 * Copy the specified range of the source
3413 * virtual memory object without using
3414 * protection-based optimizations (such
3415 * as copy-on-write). The pages in the
3416 * region are actually copied.
3418 * In/out conditions:
3419 * The caller must hold a reference and a lock
3420 * for the source virtual memory object. The source
3421 * object will be returned *unlocked*.
3424 * If the copy is completed successfully, KERN_SUCCESS is
3425 * returned. If the caller asserted the interruptible
3426 * argument, and an interruption occurred while waiting
3427 * for a user-generated event, MACH_SEND_INTERRUPTED is
3428 * returned. Other values may be returned to indicate
3429 * hard errors during the copy operation.
3431 * A new virtual memory object is returned in a
3432 * parameter (_result_object). The contents of this
3433 * new object, starting at a zero offset, are a copy
3434 * of the source memory region. In the event of
3435 * an error, this parameter will contain the value
3438 __private_extern__ kern_return_t
3439 vm_object_copy_slowly(
3440 register vm_object_t src_object
,
3441 vm_object_offset_t src_offset
,
3442 vm_object_size_t size
,
3443 boolean_t interruptible
,
3444 vm_object_t
*_result_object
) /* OUT */
3446 vm_object_t new_object
;
3447 vm_object_offset_t new_offset
;
3449 struct vm_object_fault_info fault_info
;
3451 XPR(XPR_VM_OBJECT
, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
3452 src_object
, src_offset
, size
, 0, 0);
3455 vm_object_unlock(src_object
);
3456 *_result_object
= VM_OBJECT_NULL
;
3457 return(KERN_INVALID_ARGUMENT
);
3461 * Prevent destruction of the source object while we copy.
3464 vm_object_reference_locked(src_object
);
3465 vm_object_unlock(src_object
);
3468 * Create a new object to hold the copied pages.
3470 * We fill the new object starting at offset 0,
3471 * regardless of the input offset.
3472 * We don't bother to lock the new object within
3473 * this routine, since we have the only reference.
3476 new_object
= vm_object_allocate(size
);
3479 assert(size
== trunc_page_64(size
)); /* Will the loop terminate? */
3481 fault_info
.interruptible
= interruptible
;
3482 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
3483 fault_info
.user_tag
= 0;
3484 fault_info
.pmap_options
= 0;
3485 fault_info
.lo_offset
= src_offset
;
3486 fault_info
.hi_offset
= src_offset
+ size
;
3487 fault_info
.no_cache
= FALSE
;
3488 fault_info
.stealth
= TRUE
;
3489 fault_info
.io_sync
= FALSE
;
3490 fault_info
.cs_bypass
= FALSE
;
3491 fault_info
.mark_zf_absent
= FALSE
;
3492 fault_info
.batch_pmap_op
= FALSE
;
3496 src_offset
+= PAGE_SIZE_64
,
3497 new_offset
+= PAGE_SIZE_64
, size
-= PAGE_SIZE_64
3500 vm_fault_return_t result
;
3502 vm_object_lock(new_object
);
3504 while ((new_page
= vm_page_alloc(new_object
, new_offset
))
3507 vm_object_unlock(new_object
);
3509 if (!vm_page_wait(interruptible
)) {
3510 vm_object_deallocate(new_object
);
3511 vm_object_deallocate(src_object
);
3512 *_result_object
= VM_OBJECT_NULL
;
3513 return(MACH_SEND_INTERRUPTED
);
3515 vm_object_lock(new_object
);
3517 vm_object_unlock(new_object
);
3520 vm_prot_t prot
= VM_PROT_READ
;
3521 vm_page_t _result_page
;
3524 vm_page_t result_page
;
3525 kern_return_t error_code
;
3527 vm_object_lock(src_object
);
3529 if (src_object
->internal
&&
3530 src_object
->shadow
== VM_OBJECT_NULL
&&
3531 (vm_page_lookup(src_object
,
3532 src_offset
) == VM_PAGE_NULL
) &&
3533 (src_object
->pager
== NULL
||
3534 (VM_COMPRESSOR_PAGER_STATE_GET(src_object
,
3536 VM_EXTERNAL_STATE_ABSENT
))) {
3538 * This page is neither resident nor compressed
3539 * and there's no shadow object below
3540 * "src_object", so this page is really missing.
3541 * There's no need to zero-fill it just to copy
3542 * it: let's leave it missing in "new_object"
3543 * and get zero-filled on demand.
3545 vm_object_unlock(src_object
);
3546 /* free the unused "new_page"... */
3547 vm_object_lock(new_object
);
3548 VM_PAGE_FREE(new_page
);
3549 new_page
= VM_PAGE_NULL
;
3550 vm_object_unlock(new_object
);
3551 /* ...and go to next page in "src_object" */
3552 result
= VM_FAULT_SUCCESS
;
3556 vm_object_paging_begin(src_object
);
3558 if (size
> (vm_size_t
) -1) {
3559 /* 32-bit overflow */
3560 fault_info
.cluster_size
= (vm_size_t
) (0 - PAGE_SIZE
);
3562 fault_info
.cluster_size
= (vm_size_t
) size
;
3563 assert(fault_info
.cluster_size
== size
);
3566 XPR(XPR_VM_FAULT
,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
3567 _result_page
= VM_PAGE_NULL
;
3568 result
= vm_fault_page(src_object
, src_offset
,
3569 VM_PROT_READ
, FALSE
,
3570 FALSE
, /* page not looked up */
3571 &prot
, &_result_page
, &top_page
,
3573 &error_code
, FALSE
, FALSE
, &fault_info
);
3576 case VM_FAULT_SUCCESS
:
3577 result_page
= _result_page
;
3580 * Copy the page to the new object.
3583 * If result_page is clean,
3584 * we could steal it instead
3588 vm_page_copy(result_page
, new_page
);
3589 vm_object_unlock(result_page
->object
);
3592 * Let go of both pages (make them
3593 * not busy, perform wakeup, activate).
3595 vm_object_lock(new_object
);
3596 SET_PAGE_DIRTY(new_page
, FALSE
);
3597 PAGE_WAKEUP_DONE(new_page
);
3598 vm_object_unlock(new_object
);
3600 vm_object_lock(result_page
->object
);
3601 PAGE_WAKEUP_DONE(result_page
);
3603 vm_page_lockspin_queues();
3604 if (!result_page
->active
&&
3605 !result_page
->inactive
&&
3606 !result_page
->throttled
)
3607 vm_page_activate(result_page
);
3608 vm_page_activate(new_page
);
3609 vm_page_unlock_queues();
3612 * Release paging references and
3613 * top-level placeholder page, if any.
3616 vm_fault_cleanup(result_page
->object
,
3621 case VM_FAULT_RETRY
:
3624 case VM_FAULT_MEMORY_SHORTAGE
:
3625 if (vm_page_wait(interruptible
))
3629 case VM_FAULT_INTERRUPTED
:
3630 vm_object_lock(new_object
);
3631 VM_PAGE_FREE(new_page
);
3632 vm_object_unlock(new_object
);
3634 vm_object_deallocate(new_object
);
3635 vm_object_deallocate(src_object
);
3636 *_result_object
= VM_OBJECT_NULL
;
3637 return(MACH_SEND_INTERRUPTED
);
3639 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
3640 /* success but no VM page: fail */
3641 vm_object_paging_end(src_object
);
3642 vm_object_unlock(src_object
);
3644 case VM_FAULT_MEMORY_ERROR
:
3647 * (a) ignore pages that we can't
3649 * (b) return the null object if
3650 * any page fails [chosen]
3653 vm_object_lock(new_object
);
3654 VM_PAGE_FREE(new_page
);
3655 vm_object_unlock(new_object
);
3657 vm_object_deallocate(new_object
);
3658 vm_object_deallocate(src_object
);
3659 *_result_object
= VM_OBJECT_NULL
;
3660 return(error_code
? error_code
:
3664 panic("vm_object_copy_slowly: unexpected error"
3665 " 0x%x from vm_fault_page()\n", result
);
3667 } while (result
!= VM_FAULT_SUCCESS
);
3671 * Lose the extra reference, and return our object.
3673 vm_object_deallocate(src_object
);
3674 *_result_object
= new_object
;
3675 return(KERN_SUCCESS
);
3679 * Routine: vm_object_copy_quickly
3682 * Copy the specified range of the source virtual
3683 * memory object, if it can be done without waiting
3684 * for user-generated events.
3687 * If the copy is successful, the copy is returned in
3688 * the arguments; otherwise, the arguments are not
3691 * In/out conditions:
3692 * The object should be unlocked on entry and exit.
3696 __private_extern__ boolean_t
3697 vm_object_copy_quickly(
3698 vm_object_t
*_object
, /* INOUT */
3699 __unused vm_object_offset_t offset
, /* IN */
3700 __unused vm_object_size_t size
, /* IN */
3701 boolean_t
*_src_needs_copy
, /* OUT */
3702 boolean_t
*_dst_needs_copy
) /* OUT */
3704 vm_object_t object
= *_object
;
3705 memory_object_copy_strategy_t copy_strategy
;
3707 XPR(XPR_VM_OBJECT
, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
3708 *_object
, offset
, size
, 0, 0);
3709 if (object
== VM_OBJECT_NULL
) {
3710 *_src_needs_copy
= FALSE
;
3711 *_dst_needs_copy
= FALSE
;
3715 vm_object_lock(object
);
3717 copy_strategy
= object
->copy_strategy
;
3719 switch (copy_strategy
) {
3720 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3723 * Symmetric copy strategy.
3724 * Make another reference to the object.
3725 * Leave object/offset unchanged.
3728 vm_object_reference_locked(object
);
3729 object
->shadowed
= TRUE
;
3730 vm_object_unlock(object
);
3733 * Both source and destination must make
3734 * shadows, and the source must be made
3735 * read-only if not already.
3738 *_src_needs_copy
= TRUE
;
3739 *_dst_needs_copy
= TRUE
;
3743 case MEMORY_OBJECT_COPY_DELAY
:
3744 vm_object_unlock(object
);
3748 vm_object_unlock(object
);
3754 static int copy_call_count
= 0;
3755 static int copy_call_sleep_count
= 0;
3756 static int copy_call_restart_count
= 0;
3759 * Routine: vm_object_copy_call [internal]
3762 * Copy the source object (src_object), using the
3763 * user-managed copy algorithm.
3765 * In/out conditions:
3766 * The source object must be locked on entry. It
3767 * will be *unlocked* on exit.
3770 * If the copy is successful, KERN_SUCCESS is returned.
3771 * A new object that represents the copied virtual
3772 * memory is returned in a parameter (*_result_object).
3773 * If the return value indicates an error, this parameter
3776 static kern_return_t
3777 vm_object_copy_call(
3778 vm_object_t src_object
,
3779 vm_object_offset_t src_offset
,
3780 vm_object_size_t size
,
3781 vm_object_t
*_result_object
) /* OUT */
3785 boolean_t check_ready
= FALSE
;
3786 uint32_t try_failed_count
= 0;
3789 * If a copy is already in progress, wait and retry.
3792 * Consider making this call interruptable, as Mike
3793 * intended it to be.
3796 * Need a counter or version or something to allow
3797 * us to use the copy that the currently requesting
3798 * thread is obtaining -- is it worth adding to the
3799 * vm object structure? Depends how common this case it.
3802 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3803 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3805 copy_call_restart_count
++;
3809 * Indicate (for the benefit of memory_object_create_copy)
3810 * that we want a copy for src_object. (Note that we cannot
3811 * do a real assert_wait before calling memory_object_copy,
3812 * so we simply set the flag.)
3815 vm_object_set_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
);
3816 vm_object_unlock(src_object
);
3819 * Ask the memory manager to give us a memory object
3820 * which represents a copy of the src object.
3821 * The memory manager may give us a memory object
3822 * which we already have, or it may give us a
3823 * new memory object. This memory object will arrive
3824 * via memory_object_create_copy.
3827 kr
= KERN_FAILURE
; /* XXX need to change memory_object.defs */
3828 if (kr
!= KERN_SUCCESS
) {
3833 * Wait for the copy to arrive.
3835 vm_object_lock(src_object
);
3836 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3837 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3839 copy_call_sleep_count
++;
3842 assert(src_object
->copy
!= VM_OBJECT_NULL
);
3843 copy
= src_object
->copy
;
3844 if (!vm_object_lock_try(copy
)) {
3845 vm_object_unlock(src_object
);
3848 mutex_pause(try_failed_count
); /* wait a bit */
3850 vm_object_lock(src_object
);
3853 if (copy
->vo_size
< src_offset
+size
)
3854 copy
->vo_size
= src_offset
+size
;
3856 if (!copy
->pager_ready
)
3862 *_result_object
= copy
;
3863 vm_object_unlock(copy
);
3864 vm_object_unlock(src_object
);
3866 /* Wait for the copy to be ready. */
3867 if (check_ready
== TRUE
) {
3868 vm_object_lock(copy
);
3869 while (!copy
->pager_ready
) {
3870 vm_object_sleep(copy
, VM_OBJECT_EVENT_PAGER_READY
, THREAD_UNINT
);
3872 vm_object_unlock(copy
);
3875 return KERN_SUCCESS
;
3878 static int copy_delayed_lock_collisions
= 0;
3879 static int copy_delayed_max_collisions
= 0;
3880 static int copy_delayed_lock_contention
= 0;
3881 static int copy_delayed_protect_iterate
= 0;
3884 * Routine: vm_object_copy_delayed [internal]
3887 * Copy the specified virtual memory object, using
3888 * the asymmetric copy-on-write algorithm.
3890 * In/out conditions:
3891 * The src_object must be locked on entry. It will be unlocked
3892 * on exit - so the caller must also hold a reference to it.
3894 * This routine will not block waiting for user-generated
3895 * events. It is not interruptible.
3897 __private_extern__ vm_object_t
3898 vm_object_copy_delayed(
3899 vm_object_t src_object
,
3900 vm_object_offset_t src_offset
,
3901 vm_object_size_t size
,
3902 boolean_t src_object_shared
)
3904 vm_object_t new_copy
= VM_OBJECT_NULL
;
3905 vm_object_t old_copy
;
3907 vm_object_size_t copy_size
= src_offset
+ size
;
3908 pmap_flush_context pmap_flush_context_storage
;
3909 boolean_t delayed_pmap_flush
= FALSE
;
3914 * The user-level memory manager wants to see all of the changes
3915 * to this object, but it has promised not to make any changes on
3918 * Perform an asymmetric copy-on-write, as follows:
3919 * Create a new object, called a "copy object" to hold
3920 * pages modified by the new mapping (i.e., the copy,
3921 * not the original mapping).
3922 * Record the original object as the backing object for
3923 * the copy object. If the original mapping does not
3924 * change a page, it may be used read-only by the copy.
3925 * Record the copy object in the original object.
3926 * When the original mapping causes a page to be modified,
3927 * it must be copied to a new page that is "pushed" to
3929 * Mark the new mapping (the copy object) copy-on-write.
3930 * This makes the copy object itself read-only, allowing
3931 * it to be reused if the original mapping makes no
3932 * changes, and simplifying the synchronization required
3933 * in the "push" operation described above.
3935 * The copy-on-write is said to be assymetric because the original
3936 * object is *not* marked copy-on-write. A copied page is pushed
3937 * to the copy object, regardless which party attempted to modify
3940 * Repeated asymmetric copy operations may be done. If the
3941 * original object has not been changed since the last copy, its
3942 * copy object can be reused. Otherwise, a new copy object can be
3943 * inserted between the original object and its previous copy
3944 * object. Since any copy object is read-only, this cannot affect
3945 * affect the contents of the previous copy object.
3947 * Note that a copy object is higher in the object tree than the
3948 * original object; therefore, use of the copy object recorded in
3949 * the original object must be done carefully, to avoid deadlock.
3952 copy_size
= vm_object_round_page(copy_size
);
3956 * Wait for paging in progress.
3958 if (!src_object
->true_share
&&
3959 (src_object
->paging_in_progress
!= 0 ||
3960 src_object
->activity_in_progress
!= 0)) {
3961 if (src_object_shared
== TRUE
) {
3962 vm_object_unlock(src_object
);
3963 vm_object_lock(src_object
);
3964 src_object_shared
= FALSE
;
3967 vm_object_paging_wait(src_object
, THREAD_UNINT
);
3970 * See whether we can reuse the result of a previous
3974 old_copy
= src_object
->copy
;
3975 if (old_copy
!= VM_OBJECT_NULL
) {
3979 * Try to get the locks (out of order)
3981 if (src_object_shared
== TRUE
)
3982 lock_granted
= vm_object_lock_try_shared(old_copy
);
3984 lock_granted
= vm_object_lock_try(old_copy
);
3986 if (!lock_granted
) {
3987 vm_object_unlock(src_object
);
3989 if (collisions
++ == 0)
3990 copy_delayed_lock_contention
++;
3991 mutex_pause(collisions
);
3993 /* Heisenberg Rules */
3994 copy_delayed_lock_collisions
++;
3996 if (collisions
> copy_delayed_max_collisions
)
3997 copy_delayed_max_collisions
= collisions
;
3999 if (src_object_shared
== TRUE
)
4000 vm_object_lock_shared(src_object
);
4002 vm_object_lock(src_object
);
4008 * Determine whether the old copy object has
4012 if (old_copy
->resident_page_count
== 0 &&
4013 !old_copy
->pager_created
) {
4015 * It has not been modified.
4017 * Return another reference to
4018 * the existing copy-object if
4019 * we can safely grow it (if
4023 if (old_copy
->vo_size
< copy_size
) {
4024 if (src_object_shared
== TRUE
) {
4025 vm_object_unlock(old_copy
);
4026 vm_object_unlock(src_object
);
4028 vm_object_lock(src_object
);
4029 src_object_shared
= FALSE
;
4033 * We can't perform a delayed copy if any of the
4034 * pages in the extended range are wired (because
4035 * we can't safely take write permission away from
4036 * wired pages). If the pages aren't wired, then
4037 * go ahead and protect them.
4039 copy_delayed_protect_iterate
++;
4041 pmap_flush_context_init(&pmap_flush_context_storage
);
4042 delayed_pmap_flush
= FALSE
;
4044 queue_iterate(&src_object
->memq
, p
, vm_page_t
, listq
) {
4045 if (!p
->fictitious
&&
4046 p
->offset
>= old_copy
->vo_size
&&
4047 p
->offset
< copy_size
) {
4048 if (VM_PAGE_WIRED(p
)) {
4049 vm_object_unlock(old_copy
);
4050 vm_object_unlock(src_object
);
4052 if (new_copy
!= VM_OBJECT_NULL
) {
4053 vm_object_unlock(new_copy
);
4054 vm_object_deallocate(new_copy
);
4056 if (delayed_pmap_flush
== TRUE
)
4057 pmap_flush(&pmap_flush_context_storage
);
4059 return VM_OBJECT_NULL
;
4061 pmap_page_protect_options(p
->phys_page
, (VM_PROT_ALL
& ~VM_PROT_WRITE
),
4062 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
4063 delayed_pmap_flush
= TRUE
;
4067 if (delayed_pmap_flush
== TRUE
)
4068 pmap_flush(&pmap_flush_context_storage
);
4070 old_copy
->vo_size
= copy_size
;
4072 if (src_object_shared
== TRUE
)
4073 vm_object_reference_shared(old_copy
);
4075 vm_object_reference_locked(old_copy
);
4076 vm_object_unlock(old_copy
);
4077 vm_object_unlock(src_object
);
4079 if (new_copy
!= VM_OBJECT_NULL
) {
4080 vm_object_unlock(new_copy
);
4081 vm_object_deallocate(new_copy
);
4089 * Adjust the size argument so that the newly-created
4090 * copy object will be large enough to back either the
4091 * old copy object or the new mapping.
4093 if (old_copy
->vo_size
> copy_size
)
4094 copy_size
= old_copy
->vo_size
;
4096 if (new_copy
== VM_OBJECT_NULL
) {
4097 vm_object_unlock(old_copy
);
4098 vm_object_unlock(src_object
);
4099 new_copy
= vm_object_allocate(copy_size
);
4100 vm_object_lock(src_object
);
4101 vm_object_lock(new_copy
);
4103 src_object_shared
= FALSE
;
4106 new_copy
->vo_size
= copy_size
;
4109 * The copy-object is always made large enough to
4110 * completely shadow the original object, since
4111 * it may have several users who want to shadow
4112 * the original object at different points.
4115 assert((old_copy
->shadow
== src_object
) &&
4116 (old_copy
->vo_shadow_offset
== (vm_object_offset_t
) 0));
4118 } else if (new_copy
== VM_OBJECT_NULL
) {
4119 vm_object_unlock(src_object
);
4120 new_copy
= vm_object_allocate(copy_size
);
4121 vm_object_lock(src_object
);
4122 vm_object_lock(new_copy
);
4124 src_object_shared
= FALSE
;
4129 * We now have the src object locked, and the new copy object
4130 * allocated and locked (and potentially the old copy locked).
4131 * Before we go any further, make sure we can still perform
4132 * a delayed copy, as the situation may have changed.
4134 * Specifically, we can't perform a delayed copy if any of the
4135 * pages in the range are wired (because we can't safely take
4136 * write permission away from wired pages). If the pages aren't
4137 * wired, then go ahead and protect them.
4139 copy_delayed_protect_iterate
++;
4141 pmap_flush_context_init(&pmap_flush_context_storage
);
4142 delayed_pmap_flush
= FALSE
;
4144 queue_iterate(&src_object
->memq
, p
, vm_page_t
, listq
) {
4145 if (!p
->fictitious
&& p
->offset
< copy_size
) {
4146 if (VM_PAGE_WIRED(p
)) {
4148 vm_object_unlock(old_copy
);
4149 vm_object_unlock(src_object
);
4150 vm_object_unlock(new_copy
);
4151 vm_object_deallocate(new_copy
);
4153 if (delayed_pmap_flush
== TRUE
)
4154 pmap_flush(&pmap_flush_context_storage
);
4156 return VM_OBJECT_NULL
;
4158 pmap_page_protect_options(p
->phys_page
, (VM_PROT_ALL
& ~VM_PROT_WRITE
),
4159 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
4160 delayed_pmap_flush
= TRUE
;
4164 if (delayed_pmap_flush
== TRUE
)
4165 pmap_flush(&pmap_flush_context_storage
);
4167 if (old_copy
!= VM_OBJECT_NULL
) {
4169 * Make the old copy-object shadow the new one.
4170 * It will receive no more pages from the original
4174 /* remove ref. from old_copy */
4175 vm_object_lock_assert_exclusive(src_object
);
4176 src_object
->ref_count
--;
4177 assert(src_object
->ref_count
> 0);
4178 vm_object_lock_assert_exclusive(old_copy
);
4179 old_copy
->shadow
= new_copy
;
4180 vm_object_lock_assert_exclusive(new_copy
);
4181 assert(new_copy
->ref_count
> 0);
4182 new_copy
->ref_count
++; /* for old_copy->shadow ref. */
4185 if (old_copy
->res_count
) {
4186 VM_OBJ_RES_INCR(new_copy
);
4187 VM_OBJ_RES_DECR(src_object
);
4191 vm_object_unlock(old_copy
); /* done with old_copy */
4195 * Point the new copy at the existing object.
4197 vm_object_lock_assert_exclusive(new_copy
);
4198 new_copy
->shadow
= src_object
;
4199 new_copy
->vo_shadow_offset
= 0;
4200 new_copy
->shadowed
= TRUE
; /* caller must set needs_copy */
4202 vm_object_lock_assert_exclusive(src_object
);
4203 vm_object_reference_locked(src_object
);
4204 src_object
->copy
= new_copy
;
4205 vm_object_unlock(src_object
);
4206 vm_object_unlock(new_copy
);
4209 "vm_object_copy_delayed: used copy object %X for source %X\n",
4210 new_copy
, src_object
, 0, 0, 0);
4216 * Routine: vm_object_copy_strategically
4219 * Perform a copy according to the source object's
4220 * declared strategy. This operation may block,
4221 * and may be interrupted.
4223 __private_extern__ kern_return_t
4224 vm_object_copy_strategically(
4225 register vm_object_t src_object
,
4226 vm_object_offset_t src_offset
,
4227 vm_object_size_t size
,
4228 vm_object_t
*dst_object
, /* OUT */
4229 vm_object_offset_t
*dst_offset
, /* OUT */
4230 boolean_t
*dst_needs_copy
) /* OUT */
4233 boolean_t interruptible
= THREAD_ABORTSAFE
; /* XXX */
4234 boolean_t object_lock_shared
= FALSE
;
4235 memory_object_copy_strategy_t copy_strategy
;
4237 assert(src_object
!= VM_OBJECT_NULL
);
4239 copy_strategy
= src_object
->copy_strategy
;
4241 if (copy_strategy
== MEMORY_OBJECT_COPY_DELAY
) {
4242 vm_object_lock_shared(src_object
);
4243 object_lock_shared
= TRUE
;
4245 vm_object_lock(src_object
);
4248 * The copy strategy is only valid if the memory manager
4249 * is "ready". Internal objects are always ready.
4252 while (!src_object
->internal
&& !src_object
->pager_ready
) {
4253 wait_result_t wait_result
;
4255 if (object_lock_shared
== TRUE
) {
4256 vm_object_unlock(src_object
);
4257 vm_object_lock(src_object
);
4258 object_lock_shared
= FALSE
;
4261 wait_result
= vm_object_sleep( src_object
,
4262 VM_OBJECT_EVENT_PAGER_READY
,
4264 if (wait_result
!= THREAD_AWAKENED
) {
4265 vm_object_unlock(src_object
);
4266 *dst_object
= VM_OBJECT_NULL
;
4268 *dst_needs_copy
= FALSE
;
4269 return(MACH_SEND_INTERRUPTED
);
4274 * Use the appropriate copy strategy.
4277 switch (copy_strategy
) {
4278 case MEMORY_OBJECT_COPY_DELAY
:
4279 *dst_object
= vm_object_copy_delayed(src_object
,
4280 src_offset
, size
, object_lock_shared
);
4281 if (*dst_object
!= VM_OBJECT_NULL
) {
4282 *dst_offset
= src_offset
;
4283 *dst_needs_copy
= TRUE
;
4284 result
= KERN_SUCCESS
;
4287 vm_object_lock(src_object
);
4288 /* fall thru when delayed copy not allowed */
4290 case MEMORY_OBJECT_COPY_NONE
:
4291 result
= vm_object_copy_slowly(src_object
, src_offset
, size
,
4292 interruptible
, dst_object
);
4293 if (result
== KERN_SUCCESS
) {
4295 *dst_needs_copy
= FALSE
;
4299 case MEMORY_OBJECT_COPY_CALL
:
4300 result
= vm_object_copy_call(src_object
, src_offset
, size
,
4302 if (result
== KERN_SUCCESS
) {
4303 *dst_offset
= src_offset
;
4304 *dst_needs_copy
= TRUE
;
4308 case MEMORY_OBJECT_COPY_SYMMETRIC
:
4309 XPR(XPR_VM_OBJECT
, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n", src_object
, src_offset
, size
, 0, 0);
4310 vm_object_unlock(src_object
);
4311 result
= KERN_MEMORY_RESTART_COPY
;
4315 panic("copy_strategically: bad strategy");
4316 result
= KERN_INVALID_ARGUMENT
;
4324 * Create a new object which is backed by the
4325 * specified existing object range. The source
4326 * object reference is deallocated.
4328 * The new object and offset into that object
4329 * are returned in the source parameters.
4331 boolean_t vm_object_shadow_check
= TRUE
;
4333 __private_extern__ boolean_t
4335 vm_object_t
*object
, /* IN/OUT */
4336 vm_object_offset_t
*offset
, /* IN/OUT */
4337 vm_object_size_t length
)
4339 register vm_object_t source
;
4340 register vm_object_t result
;
4343 assert(source
!= VM_OBJECT_NULL
);
4344 if (source
== VM_OBJECT_NULL
)
4350 * This assertion is valid but it gets triggered by Rosetta for example
4351 * due to a combination of vm_remap() that changes a VM object's
4352 * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
4353 * that then sets "needs_copy" on its map entry. This creates a
4354 * mapping situation that VM should never see and doesn't know how to
4356 * It's not clear if this can create any real problem but we should
4357 * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
4358 * do more than just set "needs_copy" to handle the copy-on-write...
4359 * In the meantime, let's disable the assertion.
4361 assert(source
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
);
4365 * Determine if we really need a shadow.
4367 * If the source object is larger than what we are trying
4368 * to create, then force the shadow creation even if the
4369 * ref count is 1. This will allow us to [potentially]
4370 * collapse the underlying object away in the future
4371 * (freeing up the extra data it might contain and that
4374 if (vm_object_shadow_check
&&
4375 source
->vo_size
== length
&&
4376 source
->ref_count
== 1 &&
4377 (source
->shadow
== VM_OBJECT_NULL
||
4378 source
->shadow
->copy
== VM_OBJECT_NULL
) )
4380 source
->shadowed
= FALSE
;
4385 * Allocate a new object with the given length
4388 if ((result
= vm_object_allocate(length
)) == VM_OBJECT_NULL
)
4389 panic("vm_object_shadow: no object for shadowing");
4392 * The new object shadows the source object, adding
4393 * a reference to it. Our caller changes his reference
4394 * to point to the new object, removing a reference to
4395 * the source object. Net result: no change of reference
4398 result
->shadow
= source
;
4401 * Store the offset into the source object,
4402 * and fix up the offset into the new object.
4405 result
->vo_shadow_offset
= *offset
;
4408 * Return the new things
4417 * The relationship between vm_object structures and
4418 * the memory_object requires careful synchronization.
4420 * All associations are created by memory_object_create_named
4421 * for external pagers and vm_object_pager_create for internal
4422 * objects as follows:
4424 * pager: the memory_object itself, supplied by
4425 * the user requesting a mapping (or the kernel,
4426 * when initializing internal objects); the
4427 * kernel simulates holding send rights by keeping
4431 * the memory object control port,
4432 * created by the kernel; the kernel holds
4433 * receive (and ownership) rights to this
4434 * port, but no other references.
4436 * When initialization is complete, the "initialized" field
4437 * is asserted. Other mappings using a particular memory object,
4438 * and any references to the vm_object gained through the
4439 * port association must wait for this initialization to occur.
4441 * In order to allow the memory manager to set attributes before
4442 * requests (notably virtual copy operations, but also data or
4443 * unlock requests) are made, a "ready" attribute is made available.
4444 * Only the memory manager may affect the value of this attribute.
4445 * Its value does not affect critical kernel functions, such as
4446 * internal object initialization or destruction. [Furthermore,
4447 * memory objects created by the kernel are assumed to be ready
4448 * immediately; the default memory manager need not explicitly
4449 * set the "ready" attribute.]
4451 * [Both the "initialized" and "ready" attribute wait conditions
4452 * use the "pager" field as the wait event.]
4454 * The port associations can be broken down by any of the
4455 * following routines:
4456 * vm_object_terminate:
4457 * No references to the vm_object remain, and
4458 * the object cannot (or will not) be cached.
4459 * This is the normal case, and is done even
4460 * though one of the other cases has already been
4462 * memory_object_destroy:
4463 * The memory manager has requested that the
4464 * kernel relinquish references to the memory
4465 * object. [The memory manager may not want to
4466 * destroy the memory object, but may wish to
4467 * refuse or tear down existing memory mappings.]
4469 * Each routine that breaks an association must break all of
4470 * them at once. At some later time, that routine must clear
4471 * the pager field and release the memory object references.
4472 * [Furthermore, each routine must cope with the simultaneous
4473 * or previous operations of the others.]
4475 * In addition to the lock on the object, the vm_object_hash_lock
4476 * governs the associations. References gained through the
4477 * association require use of the hash lock.
4479 * Because the pager field may be cleared spontaneously, it
4480 * cannot be used to determine whether a memory object has
4481 * ever been associated with a particular vm_object. [This
4482 * knowledge is important to the shadow object mechanism.]
4483 * For this reason, an additional "created" attribute is
4486 * During various paging operations, the pager reference found in the
4487 * vm_object must be valid. To prevent this from being released,
4488 * (other than being removed, i.e., made null), routines may use
4489 * the vm_object_paging_begin/end routines [actually, macros].
4490 * The implementation uses the "paging_in_progress" and "wanted" fields.
4491 * [Operations that alter the validity of the pager values include the
4492 * termination routines and vm_object_collapse.]
4497 * Routine: vm_object_enter
4499 * Find a VM object corresponding to the given
4500 * pager; if no such object exists, create one,
4501 * and initialize the pager.
4505 memory_object_t pager
,
4506 vm_object_size_t size
,
4511 register vm_object_t object
;
4512 vm_object_t new_object
;
4513 boolean_t must_init
;
4514 vm_object_hash_entry_t entry
, new_entry
;
4515 uint32_t try_failed_count
= 0;
4518 if (pager
== MEMORY_OBJECT_NULL
)
4519 return(vm_object_allocate(size
));
4521 new_object
= VM_OBJECT_NULL
;
4522 new_entry
= VM_OBJECT_HASH_ENTRY_NULL
;
4526 * Look for an object associated with this port.
4529 lck
= vm_object_hash_lock_spin(pager
);
4531 entry
= vm_object_hash_lookup(pager
, FALSE
);
4533 if (entry
== VM_OBJECT_HASH_ENTRY_NULL
) {
4534 if (new_object
== VM_OBJECT_NULL
) {
4536 * We must unlock to create a new object;
4537 * if we do so, we must try the lookup again.
4539 vm_object_hash_unlock(lck
);
4540 assert(new_entry
== VM_OBJECT_HASH_ENTRY_NULL
);
4541 new_entry
= vm_object_hash_entry_alloc(pager
);
4542 new_object
= vm_object_allocate(size
);
4544 * Set new_object->hashed now, while noone
4545 * knows about this object yet and we
4546 * don't need to lock it. Once it's in
4547 * the hash table, we would have to lock
4548 * the object to set its "hashed" bit and
4549 * we can't lock the object while holding
4550 * the hash lock as a spinlock...
4552 new_object
->hashed
= TRUE
;
4553 lck
= vm_object_hash_lock_spin(pager
);
4556 * Lookup failed twice, and we have something
4557 * to insert; set the object.
4560 * We can't lock the object here since we're
4561 * holding the hash lock as a spin lock.
4562 * We've already pre-set "new_object->hashed"
4563 * when we created "new_object" above, so we
4564 * won't need to modify the object in
4565 * vm_object_hash_insert().
4567 assert(new_object
->hashed
);
4568 vm_object_hash_insert(new_entry
, new_object
);
4570 new_entry
= VM_OBJECT_HASH_ENTRY_NULL
;
4571 new_object
= VM_OBJECT_NULL
;
4574 } else if (entry
->object
== VM_OBJECT_NULL
) {
4576 * If a previous object is being terminated,
4577 * we must wait for the termination message
4578 * to be queued (and lookup the entry again).
4580 entry
->waiting
= TRUE
;
4581 entry
= VM_OBJECT_HASH_ENTRY_NULL
;
4582 assert_wait((event_t
) pager
, THREAD_UNINT
);
4583 vm_object_hash_unlock(lck
);
4585 thread_block(THREAD_CONTINUE_NULL
);
4586 lck
= vm_object_hash_lock_spin(pager
);
4588 } while (entry
== VM_OBJECT_HASH_ENTRY_NULL
);
4590 object
= entry
->object
;
4591 assert(object
!= VM_OBJECT_NULL
);
4594 if ( !vm_object_lock_try(object
)) {
4596 vm_object_hash_unlock(lck
);
4599 mutex_pause(try_failed_count
); /* wait a bit */
4602 assert(!internal
|| object
->internal
);
4604 if (object
->ref_count
== 0) {
4605 if ( !vm_object_cache_lock_try()) {
4607 vm_object_hash_unlock(lck
);
4608 vm_object_unlock(object
);
4611 mutex_pause(try_failed_count
); /* wait a bit */
4614 XPR(XPR_VM_OBJECT_CACHE
,
4615 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
4617 vm_object_cached_list
.next
,
4618 vm_object_cached_list
.prev
, 0,0);
4619 queue_remove(&vm_object_cached_list
, object
,
4620 vm_object_t
, cached_list
);
4621 vm_object_cached_count
--;
4623 vm_object_cache_unlock();
4627 assert(!object
->named
);
4628 object
->named
= TRUE
;
4630 vm_object_lock_assert_exclusive(object
);
4631 object
->ref_count
++;
4632 vm_object_res_reference(object
);
4634 vm_object_hash_unlock(lck
);
4635 vm_object_unlock(object
);
4639 vm_object_hash_unlock(lck
);
4641 assert(object
->ref_count
> 0);
4643 VM_STAT_INCR(lookups
);
4646 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
4647 pager
, object
, must_init
, 0, 0);
4650 * If we raced to create a vm_object but lost, let's
4654 if (new_object
!= VM_OBJECT_NULL
) {
4656 * Undo the pre-setting of "new_object->hashed" before
4657 * deallocating "new_object", since we did not insert it
4658 * into the hash table after all.
4660 assert(new_object
->hashed
);
4661 new_object
->hashed
= FALSE
;
4662 vm_object_deallocate(new_object
);
4665 if (new_entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
4666 vm_object_hash_entry_free(new_entry
);
4669 memory_object_control_t control
;
4672 * Allocate request port.
4675 control
= memory_object_control_allocate(object
);
4676 assert (control
!= MEMORY_OBJECT_CONTROL_NULL
);
4678 vm_object_lock(object
);
4679 assert(object
!= kernel_object
);
4682 * Copy the reference we were given.
4685 memory_object_reference(pager
);
4686 object
->pager_created
= TRUE
;
4687 object
->pager
= pager
;
4688 object
->internal
= internal
;
4689 object
->pager_trusted
= internal
;
4691 /* copy strategy invalid until set by memory manager */
4692 object
->copy_strategy
= MEMORY_OBJECT_COPY_INVALID
;
4694 object
->pager_control
= control
;
4695 object
->pager_ready
= FALSE
;
4697 vm_object_unlock(object
);
4700 * Let the pager know we're using it.
4703 (void) memory_object_init(pager
,
4704 object
->pager_control
,
4707 vm_object_lock(object
);
4709 object
->named
= TRUE
;
4711 object
->pager_ready
= TRUE
;
4712 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
4715 object
->pager_initialized
= TRUE
;
4716 vm_object_wakeup(object
, VM_OBJECT_EVENT_INITIALIZED
);
4718 vm_object_lock(object
);
4722 * [At this point, the object must be locked]
4726 * Wait for the work above to be done by the first
4727 * thread to map this object.
4730 while (!object
->pager_initialized
) {
4731 vm_object_sleep(object
,
4732 VM_OBJECT_EVENT_INITIALIZED
,
4735 vm_object_unlock(object
);
4738 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
4739 object
, object
->pager
, internal
, 0,0);
4744 * Routine: vm_object_pager_create
4746 * Create a memory object for an internal object.
4747 * In/out conditions:
4748 * The object is locked on entry and exit;
4749 * it may be unlocked within this call.
4751 * Only one thread may be performing a
4752 * vm_object_pager_create on an object at
4753 * a time. Presumably, only the pageout
4754 * daemon will be using this routine.
4758 vm_object_pager_create(
4759 register vm_object_t object
)
4761 memory_object_t pager
;
4762 vm_object_hash_entry_t entry
;
4765 vm_object_size_t size
;
4766 vm_external_map_t map
;
4767 #endif /* MACH_PAGEMAP */
4769 XPR(XPR_VM_OBJECT
, "vm_object_pager_create, object 0x%X\n",
4772 assert(object
!= kernel_object
);
4774 if (memory_manager_default_check() != KERN_SUCCESS
)
4778 * Prevent collapse or termination by holding a paging reference
4781 vm_object_paging_begin(object
);
4782 if (object
->pager_created
) {
4784 * Someone else got to it first...
4785 * wait for them to finish initializing the ports
4787 while (!object
->pager_initialized
) {
4788 vm_object_sleep(object
,
4789 VM_OBJECT_EVENT_INITIALIZED
,
4792 vm_object_paging_end(object
);
4797 * Indicate that a memory object has been assigned
4798 * before dropping the lock, to prevent a race.
4801 object
->pager_created
= TRUE
;
4802 object
->paging_offset
= 0;
4805 size
= object
->vo_size
;
4806 #endif /* MACH_PAGEMAP */
4807 vm_object_unlock(object
);
4810 if (DEFAULT_PAGER_IS_ACTIVE
) {
4811 map
= vm_external_create(size
);
4812 vm_object_lock(object
);
4813 assert(object
->vo_size
== size
);
4814 object
->existence_map
= map
;
4815 vm_object_unlock(object
);
4817 #endif /* MACH_PAGEMAP */
4819 if ((uint32_t) object
->vo_size
!= object
->vo_size
) {
4820 panic("vm_object_pager_create(): object size 0x%llx >= 4GB\n",
4821 (uint64_t) object
->vo_size
);
4825 * Create the [internal] pager, and associate it with this object.
4827 * We make the association here so that vm_object_enter()
4828 * can look up the object to complete initializing it. No
4829 * user will ever map this object.
4832 memory_object_default_t dmm
;
4834 /* acquire a reference for the default memory manager */
4835 dmm
= memory_manager_default_reference();
4837 assert(object
->temporary
);
4839 /* create our new memory object */
4840 assert((vm_size_t
) object
->vo_size
== object
->vo_size
);
4841 (void) memory_object_create(dmm
, (vm_size_t
) object
->vo_size
,
4844 memory_object_default_deallocate(dmm
);
4847 entry
= vm_object_hash_entry_alloc(pager
);
4849 vm_object_lock(object
);
4850 lck
= vm_object_hash_lock_spin(pager
);
4851 vm_object_hash_insert(entry
, object
);
4852 vm_object_hash_unlock(lck
);
4853 vm_object_unlock(object
);
4856 * A reference was returned by
4857 * memory_object_create(), and it is
4858 * copied by vm_object_enter().
4861 if (vm_object_enter(pager
, object
->vo_size
, TRUE
, TRUE
, FALSE
) != object
)
4862 panic("vm_object_pager_create: mismatch");
4865 * Drop the reference we were passed.
4867 memory_object_deallocate(pager
);
4869 vm_object_lock(object
);
4872 * Release the paging reference
4874 vm_object_paging_end(object
);
4878 vm_object_compressor_pager_create(
4879 register vm_object_t object
)
4881 memory_object_t pager
;
4882 vm_object_hash_entry_t entry
;
4884 vm_object_t pager_object
= VM_OBJECT_NULL
;
4886 assert(object
!= kernel_object
);
4889 * Prevent collapse or termination by holding a paging reference
4892 vm_object_paging_begin(object
);
4893 if (object
->pager_created
) {
4895 * Someone else got to it first...
4896 * wait for them to finish initializing the ports
4898 while (!object
->pager_initialized
) {
4899 vm_object_sleep(object
,
4900 VM_OBJECT_EVENT_INITIALIZED
,
4903 vm_object_paging_end(object
);
4908 * Indicate that a memory object has been assigned
4909 * before dropping the lock, to prevent a race.
4912 object
->pager_created
= TRUE
;
4913 object
->paging_offset
= 0;
4915 vm_object_unlock(object
);
4917 if ((uint32_t) (object
->vo_size
/PAGE_SIZE
) !=
4918 (object
->vo_size
/PAGE_SIZE
)) {
4919 panic("vm_object_compressor_pager_create(%p): "
4920 "object size 0x%llx >= 0x%llx\n",
4922 (uint64_t) object
->vo_size
,
4923 0x0FFFFFFFFULL
*PAGE_SIZE
);
4927 * Create the [internal] pager, and associate it with this object.
4929 * We make the association here so that vm_object_enter()
4930 * can look up the object to complete initializing it. No
4931 * user will ever map this object.
4934 assert(object
->temporary
);
4936 /* create our new memory object */
4937 assert((uint32_t) (object
->vo_size
/PAGE_SIZE
) ==
4938 (object
->vo_size
/PAGE_SIZE
));
4939 (void) compressor_memory_object_create(
4940 (memory_object_size_t
) object
->vo_size
,
4942 if (pager
== NULL
) {
4943 panic("vm_object_compressor_pager_create(): "
4944 "no pager for object %p size 0x%llx\n",
4945 object
, (uint64_t) object
->vo_size
);
4949 entry
= vm_object_hash_entry_alloc(pager
);
4951 vm_object_lock(object
);
4952 lck
= vm_object_hash_lock_spin(pager
);
4953 vm_object_hash_insert(entry
, object
);
4954 vm_object_hash_unlock(lck
);
4955 vm_object_unlock(object
);
4958 * A reference was returned by
4959 * memory_object_create(), and it is
4960 * copied by vm_object_enter().
4963 pager_object
= vm_object_enter(pager
, object
->vo_size
, TRUE
, TRUE
, FALSE
);
4965 if (pager_object
!= object
) {
4966 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager
, pager_object
, object
, (uint64_t) object
->vo_size
);
4970 * Drop the reference we were passed.
4972 memory_object_deallocate(pager
);
4974 vm_object_lock(object
);
4977 * Release the paging reference
4979 vm_object_paging_end(object
);
4983 * Routine: vm_object_remove
4985 * Eliminate the pager/object association
4988 * The object cache must be locked.
4990 __private_extern__
void
4994 memory_object_t pager
;
4996 if ((pager
= object
->pager
) != MEMORY_OBJECT_NULL
) {
4997 vm_object_hash_entry_t entry
;
4999 entry
= vm_object_hash_lookup(pager
, FALSE
);
5000 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
5001 entry
->object
= VM_OBJECT_NULL
;
5007 * Global variables for vm_object_collapse():
5009 * Counts for normal collapses and bypasses.
5010 * Debugging variables, to watch or disable collapse.
5012 static long object_collapses
= 0;
5013 static long object_bypasses
= 0;
5015 static boolean_t vm_object_collapse_allowed
= TRUE
;
5016 static boolean_t vm_object_bypass_allowed
= TRUE
;
5019 static int vm_external_discarded
;
5020 static int vm_external_collapsed
;
5023 unsigned long vm_object_collapse_encrypted
= 0;
5025 void vm_object_do_collapse_compressor(vm_object_t object
,
5026 vm_object_t backing_object
);
5028 vm_object_do_collapse_compressor(
5030 vm_object_t backing_object
)
5032 vm_object_offset_t new_offset
, backing_offset
;
5033 vm_object_size_t size
;
5035 vm_counters
.do_collapse_compressor
++;
5037 vm_object_lock_assert_exclusive(object
);
5038 vm_object_lock_assert_exclusive(backing_object
);
5040 size
= object
->vo_size
;
5043 * Move all compressed pages from backing_object
5047 for (backing_offset
= object
->vo_shadow_offset
;
5048 backing_offset
< object
->vo_shadow_offset
+ object
->vo_size
;
5049 backing_offset
+= PAGE_SIZE
) {
5050 memory_object_offset_t backing_pager_offset
;
5052 /* find the next compressed page at or after this offset */
5053 backing_pager_offset
= (backing_offset
+
5054 backing_object
->paging_offset
);
5055 backing_pager_offset
= vm_compressor_pager_next_compressed(
5056 backing_object
->pager
,
5057 backing_pager_offset
);
5058 if (backing_pager_offset
== (memory_object_offset_t
) -1) {
5059 /* no more compressed pages */
5062 backing_offset
= (backing_pager_offset
-
5063 backing_object
->paging_offset
);
5065 new_offset
= backing_offset
- object
->vo_shadow_offset
;
5067 if (new_offset
>= object
->vo_size
) {
5068 /* we're out of the scope of "object": done */
5072 if ((vm_page_lookup(object
, new_offset
) != VM_PAGE_NULL
) ||
5073 (vm_compressor_pager_state_get(object
->pager
,
5075 object
->paging_offset
)) ==
5076 VM_EXTERNAL_STATE_EXISTS
)) {
5078 * This page already exists in object, resident or
5080 * We don't need this compressed page in backing_object
5081 * and it will be reclaimed when we release
5088 * backing_object has this page in the VM compressor and
5089 * we need to transfer it to object.
5091 vm_counters
.do_collapse_compressor_pages
++;
5092 vm_compressor_pager_transfer(
5095 (new_offset
+ object
->paging_offset
),
5097 backing_object
->pager
,
5098 (backing_offset
+ backing_object
->paging_offset
));
5103 * Routine: vm_object_do_collapse
5105 * Collapse an object with the object backing it.
5106 * Pages in the backing object are moved into the
5107 * parent, and the backing object is deallocated.
5109 * Both objects and the cache are locked; the page
5110 * queues are unlocked.
5114 vm_object_do_collapse(
5116 vm_object_t backing_object
)
5119 vm_object_offset_t new_offset
, backing_offset
;
5120 vm_object_size_t size
;
5122 vm_object_lock_assert_exclusive(object
);
5123 vm_object_lock_assert_exclusive(backing_object
);
5125 assert(object
->purgable
== VM_PURGABLE_DENY
);
5126 assert(backing_object
->purgable
== VM_PURGABLE_DENY
);
5128 backing_offset
= object
->vo_shadow_offset
;
5129 size
= object
->vo_size
;
5132 * Move all in-memory pages from backing_object
5133 * to the parent. Pages that have been paged out
5134 * will be overwritten by any of the parent's
5135 * pages that shadow them.
5138 while (!queue_empty(&backing_object
->memq
)) {
5140 p
= (vm_page_t
) queue_first(&backing_object
->memq
);
5142 new_offset
= (p
->offset
- backing_offset
);
5144 assert(!p
->busy
|| p
->absent
);
5147 * If the parent has a page here, or if
5148 * this page falls outside the parent,
5151 * Otherwise, move it as planned.
5154 if (p
->offset
< backing_offset
|| new_offset
>= size
) {
5159 * The encryption key includes the "pager" and the
5160 * "paging_offset". These will not change during the
5161 * object collapse, so we can just move an encrypted
5162 * page from one object to the other in this case.
5163 * We can't decrypt the page here, since we can't drop
5167 vm_object_collapse_encrypted
++;
5169 pp
= vm_page_lookup(object
, new_offset
);
5170 if (pp
== VM_PAGE_NULL
) {
5172 if (VM_COMPRESSOR_PAGER_STATE_GET(object
,
5174 == VM_EXTERNAL_STATE_EXISTS
) {
5176 * Parent object has this page
5177 * in the VM compressor.
5178 * Throw away the backing
5184 * Parent now has no page.
5185 * Move the backing object's page
5188 vm_page_rename(p
, object
, new_offset
,
5193 } else if (pp
->absent
) {
5196 * Parent has an absent page...
5197 * it's not being paged in, so
5198 * it must really be missing from
5201 * Throw out the absent page...
5202 * any faults looking for that
5203 * page will restart with the new
5208 vm_page_rename(p
, object
, new_offset
, TRUE
);
5209 #endif /* MACH_PAGEMAP */
5211 assert(! pp
->absent
);
5214 * Parent object has a real page.
5215 * Throw away the backing object's
5223 if (vm_object_collapse_compressor_allowed
&&
5224 object
->pager
!= MEMORY_OBJECT_NULL
&&
5225 backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
5227 /* move compressed pages from backing_object to object */
5228 vm_object_do_collapse_compressor(object
, backing_object
);
5230 } else if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
5231 vm_object_hash_entry_t entry
;
5234 assert((!object
->pager_created
&&
5235 (object
->pager
== MEMORY_OBJECT_NULL
)) ||
5236 (!backing_object
->pager_created
&&
5237 (backing_object
->pager
== MEMORY_OBJECT_NULL
)));
5239 assert(!object
->pager_created
&&
5240 object
->pager
== MEMORY_OBJECT_NULL
);
5241 #endif /* !MACH_PAGEMAP */
5244 * Move the pager from backing_object to object.
5246 * XXX We're only using part of the paging space
5247 * for keeps now... we ought to discard the
5251 assert(!object
->paging_in_progress
);
5252 assert(!object
->activity_in_progress
);
5253 assert(!object
->pager_created
);
5254 assert(object
->pager
== NULL
);
5255 object
->pager
= backing_object
->pager
;
5257 if (backing_object
->hashed
) {
5260 lck
= vm_object_hash_lock_spin(backing_object
->pager
);
5261 entry
= vm_object_hash_lookup(object
->pager
, FALSE
);
5262 assert(entry
!= VM_OBJECT_HASH_ENTRY_NULL
);
5263 entry
->object
= object
;
5264 vm_object_hash_unlock(lck
);
5266 object
->hashed
= TRUE
;
5268 object
->pager_created
= backing_object
->pager_created
;
5269 object
->pager_control
= backing_object
->pager_control
;
5270 object
->pager_ready
= backing_object
->pager_ready
;
5271 object
->pager_initialized
= backing_object
->pager_initialized
;
5272 object
->paging_offset
=
5273 backing_object
->paging_offset
+ backing_offset
;
5274 if (object
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
5275 memory_object_control_collapse(object
->pager_control
,
5278 /* the backing_object has lost its pager: reset all fields */
5279 backing_object
->pager_created
= FALSE
;
5280 backing_object
->pager_control
= NULL
;
5281 backing_object
->pager_ready
= FALSE
;
5282 backing_object
->paging_offset
= 0;
5283 backing_object
->pager
= NULL
;
5288 * If the shadow offset is 0, the use the existence map from
5289 * the backing object if there is one. If the shadow offset is
5290 * not zero, toss it.
5292 * XXX - If the shadow offset is not 0 then a bit copy is needed
5293 * if the map is to be salvaged. For now, we just just toss the
5294 * old map, giving the collapsed object no map. This means that
5295 * the pager is invoked for zero fill pages. If analysis shows
5296 * that this happens frequently and is a performance hit, then
5297 * this code should be fixed to salvage the map.
5299 assert(object
->existence_map
== VM_EXTERNAL_NULL
);
5300 if (backing_offset
|| (size
!= backing_object
->vo_size
)) {
5301 vm_external_discarded
++;
5302 vm_external_destroy(backing_object
->existence_map
,
5303 backing_object
->vo_size
);
5306 vm_external_collapsed
++;
5307 object
->existence_map
= backing_object
->existence_map
;
5309 backing_object
->existence_map
= VM_EXTERNAL_NULL
;
5310 #endif /* MACH_PAGEMAP */
5313 * Object now shadows whatever backing_object did.
5314 * Note that the reference to backing_object->shadow
5315 * moves from within backing_object to within object.
5318 assert(!object
->phys_contiguous
);
5319 assert(!backing_object
->phys_contiguous
);
5320 object
->shadow
= backing_object
->shadow
;
5321 if (object
->shadow
) {
5322 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
5323 /* "backing_object" gave its shadow to "object" */
5324 backing_object
->shadow
= VM_OBJECT_NULL
;
5325 backing_object
->vo_shadow_offset
= 0;
5327 /* no shadow, therefore no shadow offset... */
5328 object
->vo_shadow_offset
= 0;
5330 assert((object
->shadow
== VM_OBJECT_NULL
) ||
5331 (object
->shadow
->copy
!= backing_object
));
5334 * Discard backing_object.
5336 * Since the backing object has no pages, no
5337 * pager left, and no object references within it,
5338 * all that is necessary is to dispose of it.
5342 assert(backing_object
->ref_count
== 1);
5343 assert(backing_object
->resident_page_count
== 0);
5344 assert(backing_object
->paging_in_progress
== 0);
5345 assert(backing_object
->activity_in_progress
== 0);
5346 assert(backing_object
->shadow
== VM_OBJECT_NULL
);
5347 assert(backing_object
->vo_shadow_offset
== 0);
5349 if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
5350 /* ... unless it has a pager; need to terminate pager too */
5351 vm_counters
.do_collapse_terminate
++;
5352 if (vm_object_terminate(backing_object
) != KERN_SUCCESS
) {
5353 vm_counters
.do_collapse_terminate_failure
++;
5358 assert(backing_object
->pager
== NULL
);
5360 backing_object
->alive
= FALSE
;
5361 vm_object_unlock(backing_object
);
5363 XPR(XPR_VM_OBJECT
, "vm_object_collapse, collapsed 0x%X\n",
5364 backing_object
, 0,0,0,0);
5366 #if VM_OBJECT_TRACKING
5367 if (vm_object_tracking_inited
) {
5368 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
5371 #endif /* VM_OBJECT_TRACKING */
5373 vm_object_lock_destroy(backing_object
);
5375 zfree(vm_object_zone
, backing_object
);
5380 vm_object_do_bypass(
5382 vm_object_t backing_object
)
5385 * Make the parent shadow the next object
5389 vm_object_lock_assert_exclusive(object
);
5390 vm_object_lock_assert_exclusive(backing_object
);
5394 * Do object reference in-line to
5395 * conditionally increment shadow's
5396 * residence count. If object is not
5397 * resident, leave residence count
5400 if (backing_object
->shadow
!= VM_OBJECT_NULL
) {
5401 vm_object_lock(backing_object
->shadow
);
5402 vm_object_lock_assert_exclusive(backing_object
->shadow
);
5403 backing_object
->shadow
->ref_count
++;
5404 if (object
->res_count
!= 0)
5405 vm_object_res_reference(backing_object
->shadow
);
5406 vm_object_unlock(backing_object
->shadow
);
5408 #else /* TASK_SWAPPER */
5409 vm_object_reference(backing_object
->shadow
);
5410 #endif /* TASK_SWAPPER */
5412 assert(!object
->phys_contiguous
);
5413 assert(!backing_object
->phys_contiguous
);
5414 object
->shadow
= backing_object
->shadow
;
5415 if (object
->shadow
) {
5416 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
5418 /* no shadow, therefore no shadow offset... */
5419 object
->vo_shadow_offset
= 0;
5423 * Backing object might have had a copy pointer
5424 * to us. If it did, clear it.
5426 if (backing_object
->copy
== object
) {
5427 backing_object
->copy
= VM_OBJECT_NULL
;
5431 * Drop the reference count on backing_object.
5433 * Since its ref_count was at least 2, it
5434 * will not vanish; so we don't need to call
5435 * vm_object_deallocate.
5436 * [with a caveat for "named" objects]
5438 * The res_count on the backing object is
5439 * conditionally decremented. It's possible
5440 * (via vm_pageout_scan) to get here with
5441 * a "swapped" object, which has a 0 res_count,
5442 * in which case, the backing object res_count
5443 * is already down by one.
5445 * Don't call vm_object_deallocate unless
5446 * ref_count drops to zero.
5448 * The ref_count can drop to zero here if the
5449 * backing object could be bypassed but not
5450 * collapsed, such as when the backing object
5451 * is temporary and cachable.
5454 if (backing_object
->ref_count
> 2 ||
5455 (!backing_object
->named
&& backing_object
->ref_count
> 1)) {
5456 vm_object_lock_assert_exclusive(backing_object
);
5457 backing_object
->ref_count
--;
5459 if (object
->res_count
!= 0)
5460 vm_object_res_deallocate(backing_object
);
5461 assert(backing_object
->ref_count
> 0);
5462 #endif /* TASK_SWAPPER */
5463 vm_object_unlock(backing_object
);
5467 * Drop locks so that we can deallocate
5468 * the backing object.
5472 if (object
->res_count
== 0) {
5473 /* XXX get a reference for the deallocate below */
5474 vm_object_res_reference(backing_object
);
5476 #endif /* TASK_SWAPPER */
5478 * vm_object_collapse (the caller of this function) is
5479 * now called from contexts that may not guarantee that a
5480 * valid reference is held on the object... w/o a valid
5481 * reference, it is unsafe and unwise (you will definitely
5482 * regret it) to unlock the object and then retake the lock
5483 * since the object may be terminated and recycled in between.
5484 * The "activity_in_progress" reference will keep the object
5487 vm_object_activity_begin(object
);
5488 vm_object_unlock(object
);
5490 vm_object_unlock(backing_object
);
5491 vm_object_deallocate(backing_object
);
5494 * Relock object. We don't have to reverify
5495 * its state since vm_object_collapse will
5496 * do that for us as it starts at the
5500 vm_object_lock(object
);
5501 vm_object_activity_end(object
);
5509 * vm_object_collapse:
5511 * Perform an object collapse or an object bypass if appropriate.
5512 * The real work of collapsing and bypassing is performed in
5513 * the routines vm_object_do_collapse and vm_object_do_bypass.
5515 * Requires that the object be locked and the page queues be unlocked.
5518 static unsigned long vm_object_collapse_calls
= 0;
5519 static unsigned long vm_object_collapse_objects
= 0;
5520 static unsigned long vm_object_collapse_do_collapse
= 0;
5521 static unsigned long vm_object_collapse_do_bypass
= 0;
5523 __private_extern__
void
5525 register vm_object_t object
,
5526 register vm_object_offset_t hint_offset
,
5527 boolean_t can_bypass
)
5529 register vm_object_t backing_object
;
5530 register unsigned int rcount
;
5531 register unsigned int size
;
5532 vm_object_t original_object
;
5533 int object_lock_type
;
5534 int backing_object_lock_type
;
5536 vm_object_collapse_calls
++;
5538 if (! vm_object_collapse_allowed
&&
5539 ! (can_bypass
&& vm_object_bypass_allowed
)) {
5543 XPR(XPR_VM_OBJECT
, "vm_object_collapse, obj 0x%X\n",
5546 if (object
== VM_OBJECT_NULL
)
5549 original_object
= object
;
5552 * The top object was locked "exclusive" by the caller.
5553 * In the first pass, to determine if we can collapse the shadow chain,
5554 * take a "shared" lock on the shadow objects. If we can collapse,
5555 * we'll have to go down the chain again with exclusive locks.
5557 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5558 backing_object_lock_type
= OBJECT_LOCK_SHARED
;
5561 object
= original_object
;
5562 vm_object_lock_assert_exclusive(object
);
5565 vm_object_collapse_objects
++;
5567 * Verify that the conditions are right for either
5568 * collapse or bypass:
5572 * There is a backing object, and
5575 backing_object
= object
->shadow
;
5576 if (backing_object
== VM_OBJECT_NULL
) {
5577 if (object
!= original_object
) {
5578 vm_object_unlock(object
);
5582 if (backing_object_lock_type
== OBJECT_LOCK_SHARED
) {
5583 vm_object_lock_shared(backing_object
);
5585 vm_object_lock(backing_object
);
5589 * No pages in the object are currently
5590 * being paged out, and
5592 if (object
->paging_in_progress
!= 0 ||
5593 object
->activity_in_progress
!= 0) {
5594 /* try and collapse the rest of the shadow chain */
5595 if (object
!= original_object
) {
5596 vm_object_unlock(object
);
5598 object
= backing_object
;
5599 object_lock_type
= backing_object_lock_type
;
5605 * The backing object is not read_only,
5606 * and no pages in the backing object are
5607 * currently being paged out.
5608 * The backing object is internal.
5612 if (!backing_object
->internal
||
5613 backing_object
->paging_in_progress
!= 0 ||
5614 backing_object
->activity_in_progress
!= 0) {
5615 /* try and collapse the rest of the shadow chain */
5616 if (object
!= original_object
) {
5617 vm_object_unlock(object
);
5619 object
= backing_object
;
5620 object_lock_type
= backing_object_lock_type
;
5625 * Purgeable objects are not supposed to engage in
5626 * copy-on-write activities, so should not have
5627 * any shadow objects or be a shadow object to another
5629 * Collapsing a purgeable object would require some
5630 * updates to the purgeable compressed ledgers.
5632 if (object
->purgable
!= VM_PURGABLE_DENY
||
5633 backing_object
->purgable
!= VM_PURGABLE_DENY
) {
5634 panic("vm_object_collapse() attempting to collapse "
5635 "purgeable object: %p(%d) %p(%d)\n",
5636 object
, object
->purgable
,
5637 backing_object
, backing_object
->purgable
);
5638 /* try and collapse the rest of the shadow chain */
5639 if (object
!= original_object
) {
5640 vm_object_unlock(object
);
5642 object
= backing_object
;
5643 object_lock_type
= backing_object_lock_type
;
5648 * The backing object can't be a copy-object:
5649 * the shadow_offset for the copy-object must stay
5650 * as 0. Furthermore (for the 'we have all the
5651 * pages' case), if we bypass backing_object and
5652 * just shadow the next object in the chain, old
5653 * pages from that object would then have to be copied
5654 * BOTH into the (former) backing_object and into the
5657 if (backing_object
->shadow
!= VM_OBJECT_NULL
&&
5658 backing_object
->shadow
->copy
== backing_object
) {
5659 /* try and collapse the rest of the shadow chain */
5660 if (object
!= original_object
) {
5661 vm_object_unlock(object
);
5663 object
= backing_object
;
5664 object_lock_type
= backing_object_lock_type
;
5669 * We can now try to either collapse the backing
5670 * object (if the parent is the only reference to
5671 * it) or (perhaps) remove the parent's reference
5674 * If there is exactly one reference to the backing
5675 * object, we may be able to collapse it into the
5678 * If MACH_PAGEMAP is defined:
5679 * The parent must not have a pager created for it,
5680 * since collapsing a backing_object dumps new pages
5681 * into the parent that its pager doesn't know about
5682 * (and the collapse code can't merge the existence
5685 * As long as one of the objects is still not known
5686 * to the pager, we can collapse them.
5688 if (backing_object
->ref_count
== 1 &&
5689 (vm_object_collapse_compressor_allowed
||
5690 !object
->pager_created
5692 || (!backing_object
->pager_created
)
5693 #endif /*!MACH_PAGEMAP */
5694 ) && vm_object_collapse_allowed
) {
5697 * We need the exclusive lock on the VM objects.
5699 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
5701 * We have an object and its shadow locked
5702 * "shared". We can't just upgrade the locks
5703 * to "exclusive", as some other thread might
5704 * also have these objects locked "shared" and
5705 * attempt to upgrade one or the other to
5706 * "exclusive". The upgrades would block
5707 * forever waiting for the other "shared" locks
5709 * So we have to release the locks and go
5710 * down the shadow chain again (since it could
5711 * have changed) with "exclusive" locking.
5713 vm_object_unlock(backing_object
);
5714 if (object
!= original_object
)
5715 vm_object_unlock(object
);
5716 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5717 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5722 "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
5723 backing_object
, object
,
5724 backing_object
->pager
,
5725 backing_object
->pager_control
, 0);
5728 * Collapse the object with its backing
5729 * object, and try again with the object's
5730 * new backing object.
5733 vm_object_do_collapse(object
, backing_object
);
5734 vm_object_collapse_do_collapse
++;
5739 * Collapsing the backing object was not possible
5740 * or permitted, so let's try bypassing it.
5743 if (! (can_bypass
&& vm_object_bypass_allowed
)) {
5744 /* try and collapse the rest of the shadow chain */
5745 if (object
!= original_object
) {
5746 vm_object_unlock(object
);
5748 object
= backing_object
;
5749 object_lock_type
= backing_object_lock_type
;
5755 * If the object doesn't have all its pages present,
5756 * we have to make sure no pages in the backing object
5757 * "show through" before bypassing it.
5759 size
= (unsigned int)atop(object
->vo_size
);
5760 rcount
= object
->resident_page_count
;
5762 if (rcount
!= size
) {
5763 vm_object_offset_t offset
;
5764 vm_object_offset_t backing_offset
;
5765 unsigned int backing_rcount
;
5768 * If the backing object has a pager but no pagemap,
5769 * then we cannot bypass it, because we don't know
5770 * what pages it has.
5772 if (backing_object
->pager_created
5774 && (backing_object
->existence_map
== VM_EXTERNAL_NULL
)
5775 #endif /* MACH_PAGEMAP */
5777 /* try and collapse the rest of the shadow chain */
5778 if (object
!= original_object
) {
5779 vm_object_unlock(object
);
5781 object
= backing_object
;
5782 object_lock_type
= backing_object_lock_type
;
5787 * If the object has a pager but no pagemap,
5788 * then we cannot bypass it, because we don't know
5789 * what pages it has.
5791 if (object
->pager_created
5793 && (object
->existence_map
== VM_EXTERNAL_NULL
)
5794 #endif /* MACH_PAGEMAP */
5796 /* try and collapse the rest of the shadow chain */
5797 if (object
!= original_object
) {
5798 vm_object_unlock(object
);
5800 object
= backing_object
;
5801 object_lock_type
= backing_object_lock_type
;
5805 backing_offset
= object
->vo_shadow_offset
;
5806 backing_rcount
= backing_object
->resident_page_count
;
5808 if ( (int)backing_rcount
- (int)(atop(backing_object
->vo_size
) - size
) > (int)rcount
) {
5810 * we have enough pages in the backing object to guarantee that
5811 * at least 1 of them must be 'uncovered' by a resident page
5812 * in the object we're evaluating, so move on and
5813 * try to collapse the rest of the shadow chain
5815 if (object
!= original_object
) {
5816 vm_object_unlock(object
);
5818 object
= backing_object
;
5819 object_lock_type
= backing_object_lock_type
;
5824 * If all of the pages in the backing object are
5825 * shadowed by the parent object, the parent
5826 * object no longer has to shadow the backing
5827 * object; it can shadow the next one in the
5830 * If the backing object has existence info,
5831 * we must check examine its existence info
5837 #define EXISTS_IN_OBJECT(obj, off, rc) \
5838 ((vm_external_state_get((obj)->existence_map, \
5839 (vm_offset_t)(off)) \
5840 == VM_EXTERNAL_STATE_EXISTS) || \
5841 (VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
5842 == VM_EXTERNAL_STATE_EXISTS) || \
5843 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
5844 #else /* MACH_PAGEMAP */
5845 #define EXISTS_IN_OBJECT(obj, off, rc) \
5846 ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
5847 == VM_EXTERNAL_STATE_EXISTS) || \
5848 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
5849 #endif /* MACH_PAGEMAP */
5852 * Check the hint location first
5853 * (since it is often the quickest way out of here).
5855 if (object
->cow_hint
!= ~(vm_offset_t
)0)
5856 hint_offset
= (vm_object_offset_t
)object
->cow_hint
;
5858 hint_offset
= (hint_offset
> 8 * PAGE_SIZE_64
) ?
5859 (hint_offset
- 8 * PAGE_SIZE_64
) : 0;
5861 if (EXISTS_IN_OBJECT(backing_object
, hint_offset
+
5862 backing_offset
, backing_rcount
) &&
5863 !EXISTS_IN_OBJECT(object
, hint_offset
, rcount
)) {
5864 /* dependency right at the hint */
5865 object
->cow_hint
= (vm_offset_t
) hint_offset
; /* atomic */
5866 /* try and collapse the rest of the shadow chain */
5867 if (object
!= original_object
) {
5868 vm_object_unlock(object
);
5870 object
= backing_object
;
5871 object_lock_type
= backing_object_lock_type
;
5876 * If the object's window onto the backing_object
5877 * is large compared to the number of resident
5878 * pages in the backing object, it makes sense to
5879 * walk the backing_object's resident pages first.
5881 * NOTE: Pages may be in both the existence map and/or
5882 * resident, so if we don't find a dependency while
5883 * walking the backing object's resident page list
5884 * directly, and there is an existence map, we'll have
5885 * to run the offset based 2nd pass. Because we may
5886 * have to run both passes, we need to be careful
5887 * not to decrement 'rcount' in the 1st pass
5889 if (backing_rcount
&& backing_rcount
< (size
/ 8)) {
5890 unsigned int rc
= rcount
;
5893 backing_rcount
= backing_object
->resident_page_count
;
5894 p
= (vm_page_t
)queue_first(&backing_object
->memq
);
5896 offset
= (p
->offset
- backing_offset
);
5898 if (offset
< object
->vo_size
&&
5899 offset
!= hint_offset
&&
5900 !EXISTS_IN_OBJECT(object
, offset
, rc
)) {
5901 /* found a dependency */
5902 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
5906 p
= (vm_page_t
) queue_next(&p
->listq
);
5908 } while (--backing_rcount
);
5909 if (backing_rcount
!= 0 ) {
5910 /* try and collapse the rest of the shadow chain */
5911 if (object
!= original_object
) {
5912 vm_object_unlock(object
);
5914 object
= backing_object
;
5915 object_lock_type
= backing_object_lock_type
;
5921 * Walk through the offsets looking for pages in the
5922 * backing object that show through to the object.
5926 || backing_object
->existence_map
5927 #endif /* MACH_PAGEMAP */
5929 offset
= hint_offset
;
5932 (offset
+ PAGE_SIZE_64
< object
->vo_size
) ?
5933 (offset
+ PAGE_SIZE_64
) : 0) != hint_offset
) {
5935 if (EXISTS_IN_OBJECT(backing_object
, offset
+
5936 backing_offset
, backing_rcount
) &&
5937 !EXISTS_IN_OBJECT(object
, offset
, rcount
)) {
5938 /* found a dependency */
5939 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
5943 if (offset
!= hint_offset
) {
5944 /* try and collapse the rest of the shadow chain */
5945 if (object
!= original_object
) {
5946 vm_object_unlock(object
);
5948 object
= backing_object
;
5949 object_lock_type
= backing_object_lock_type
;
5956 * We need "exclusive" locks on the 2 VM objects.
5958 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
5959 vm_object_unlock(backing_object
);
5960 if (object
!= original_object
)
5961 vm_object_unlock(object
);
5962 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5963 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
5967 /* reset the offset hint for any objects deeper in the chain */
5968 object
->cow_hint
= (vm_offset_t
)0;
5971 * All interesting pages in the backing object
5972 * already live in the parent or its pager.
5973 * Thus we can bypass the backing object.
5976 vm_object_do_bypass(object
, backing_object
);
5977 vm_object_collapse_do_bypass
++;
5980 * Try again with this object's new backing object.
5988 if (object != original_object) {
5989 vm_object_unlock(object);
5995 * Routine: vm_object_page_remove: [internal]
5997 * Removes all physical pages in the specified
5998 * object range from the object's list of pages.
6000 * In/out conditions:
6001 * The object must be locked.
6002 * The object must not have paging_in_progress, usually
6003 * guaranteed by not having a pager.
6005 unsigned int vm_object_page_remove_lookup
= 0;
6006 unsigned int vm_object_page_remove_iterate
= 0;
6008 __private_extern__
void
6009 vm_object_page_remove(
6010 register vm_object_t object
,
6011 register vm_object_offset_t start
,
6012 register vm_object_offset_t end
)
6014 register vm_page_t p
, next
;
6017 * One and two page removals are most popular.
6018 * The factor of 16 here is somewhat arbitrary.
6019 * It balances vm_object_lookup vs iteration.
6022 if (atop_64(end
- start
) < (unsigned)object
->resident_page_count
/16) {
6023 vm_object_page_remove_lookup
++;
6025 for (; start
< end
; start
+= PAGE_SIZE_64
) {
6026 p
= vm_page_lookup(object
, start
);
6027 if (p
!= VM_PAGE_NULL
) {
6028 assert(!p
->cleaning
&& !p
->pageout
&& !p
->laundry
);
6029 if (!p
->fictitious
&& p
->pmapped
)
6030 pmap_disconnect(p
->phys_page
);
6035 vm_object_page_remove_iterate
++;
6037 p
= (vm_page_t
) queue_first(&object
->memq
);
6038 while (!queue_end(&object
->memq
, (queue_entry_t
) p
)) {
6039 next
= (vm_page_t
) queue_next(&p
->listq
);
6040 if ((start
<= p
->offset
) && (p
->offset
< end
)) {
6041 assert(!p
->cleaning
&& !p
->pageout
&& !p
->laundry
);
6042 if (!p
->fictitious
&& p
->pmapped
)
6043 pmap_disconnect(p
->phys_page
);
6053 * Routine: vm_object_coalesce
6054 * Function: Coalesces two objects backing up adjoining
6055 * regions of memory into a single object.
6057 * returns TRUE if objects were combined.
6059 * NOTE: Only works at the moment if the second object is NULL -
6060 * if it's not, which object do we lock first?
6063 * prev_object First object to coalesce
6064 * prev_offset Offset into prev_object
6065 * next_object Second object into coalesce
6066 * next_offset Offset into next_object
6068 * prev_size Size of reference to prev_object
6069 * next_size Size of reference to next_object
6072 * The object(s) must *not* be locked. The map must be locked
6073 * to preserve the reference to the object(s).
6075 static int vm_object_coalesce_count
= 0;
6077 __private_extern__ boolean_t
6079 register vm_object_t prev_object
,
6080 vm_object_t next_object
,
6081 vm_object_offset_t prev_offset
,
6082 __unused vm_object_offset_t next_offset
,
6083 vm_object_size_t prev_size
,
6084 vm_object_size_t next_size
)
6086 vm_object_size_t newsize
;
6092 if (next_object
!= VM_OBJECT_NULL
) {
6096 if (prev_object
== VM_OBJECT_NULL
) {
6101 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
6102 prev_object
, prev_offset
, prev_size
, next_size
, 0);
6104 vm_object_lock(prev_object
);
6107 * Try to collapse the object first
6109 vm_object_collapse(prev_object
, prev_offset
, TRUE
);
6112 * Can't coalesce if pages not mapped to
6113 * prev_entry may be in use any way:
6114 * . more than one reference
6116 * . shadows another object
6117 * . has a copy elsewhere
6119 * . paging references (pages might be in page-list)
6122 if ((prev_object
->ref_count
> 1) ||
6123 prev_object
->pager_created
||
6124 (prev_object
->shadow
!= VM_OBJECT_NULL
) ||
6125 (prev_object
->copy
!= VM_OBJECT_NULL
) ||
6126 (prev_object
->true_share
!= FALSE
) ||
6127 (prev_object
->purgable
!= VM_PURGABLE_DENY
) ||
6128 (prev_object
->paging_in_progress
!= 0) ||
6129 (prev_object
->activity_in_progress
!= 0)) {
6130 vm_object_unlock(prev_object
);
6134 vm_object_coalesce_count
++;
6137 * Remove any pages that may still be in the object from
6138 * a previous deallocation.
6140 vm_object_page_remove(prev_object
,
6141 prev_offset
+ prev_size
,
6142 prev_offset
+ prev_size
+ next_size
);
6145 * Extend the object if necessary.
6147 newsize
= prev_offset
+ prev_size
+ next_size
;
6148 if (newsize
> prev_object
->vo_size
) {
6151 * We cannot extend an object that has existence info,
6152 * since the existence info might then fail to cover
6153 * the entire object.
6155 * This assertion must be true because the object
6156 * has no pager, and we only create existence info
6157 * for objects with pagers.
6159 assert(prev_object
->existence_map
== VM_EXTERNAL_NULL
);
6160 #endif /* MACH_PAGEMAP */
6161 prev_object
->vo_size
= newsize
;
6164 vm_object_unlock(prev_object
);
6169 vm_object_populate_with_private(
6171 vm_object_offset_t offset
,
6176 vm_object_offset_t base_offset
;
6179 if (!object
->private)
6180 return KERN_FAILURE
;
6182 base_page
= phys_page
;
6184 vm_object_lock(object
);
6186 if (!object
->phys_contiguous
) {
6189 if ((base_offset
= trunc_page_64(offset
)) != offset
) {
6190 vm_object_unlock(object
);
6191 return KERN_FAILURE
;
6193 base_offset
+= object
->paging_offset
;
6196 m
= vm_page_lookup(object
, base_offset
);
6198 if (m
!= VM_PAGE_NULL
) {
6199 if (m
->fictitious
) {
6200 if (m
->phys_page
!= vm_page_guard_addr
) {
6202 vm_page_lockspin_queues();
6204 vm_page_unlock_queues();
6206 m
->fictitious
= FALSE
;
6207 m
->phys_page
= base_page
;
6209 } else if (m
->phys_page
!= base_page
) {
6213 * we'd leak a real page... that can't be right
6215 panic("vm_object_populate_with_private - %p not private", m
);
6219 * pmap call to clear old mapping
6221 pmap_disconnect(m
->phys_page
);
6223 m
->phys_page
= base_page
;
6227 * we should never see this on a ficticious or private page
6229 panic("vm_object_populate_with_private - %p encrypted", m
);
6233 while ((m
= vm_page_grab_fictitious()) == VM_PAGE_NULL
)
6234 vm_page_more_fictitious();
6237 * private normally requires lock_queues but since we
6238 * are initializing the page, its not necessary here
6241 m
->fictitious
= FALSE
;
6242 m
->phys_page
= base_page
;
6246 vm_page_insert(m
, object
, base_offset
);
6248 base_page
++; /* Go to the next physical page */
6249 base_offset
+= PAGE_SIZE
;
6253 /* NOTE: we should check the original settings here */
6254 /* if we have a size > zero a pmap call should be made */
6255 /* to disable the range */
6259 /* shadows on contiguous memory are not allowed */
6260 /* we therefore can use the offset field */
6261 object
->vo_shadow_offset
= (vm_object_offset_t
)phys_page
<< PAGE_SHIFT
;
6262 object
->vo_size
= size
;
6264 vm_object_unlock(object
);
6266 return KERN_SUCCESS
;
6270 * memory_object_free_from_cache:
6272 * Walk the vm_object cache list, removing and freeing vm_objects
6273 * which are backed by the pager identified by the caller, (pager_ops).
6274 * Remove up to "count" objects, if there are that may available
6277 * Walk the list at most once, return the number of vm_objects
6281 __private_extern__ kern_return_t
6282 memory_object_free_from_cache(
6283 __unused host_t host
,
6284 __unused memory_object_pager_ops_t pager_ops
,
6288 int object_released
= 0;
6290 register vm_object_t object
= VM_OBJECT_NULL
;
6294 if(host == HOST_NULL)
6295 return(KERN_INVALID_ARGUMENT);
6299 vm_object_cache_lock();
6301 queue_iterate(&vm_object_cached_list
, object
,
6302 vm_object_t
, cached_list
) {
6303 if (object
->pager
&&
6304 (pager_ops
== object
->pager
->mo_pager_ops
)) {
6305 vm_object_lock(object
);
6306 queue_remove(&vm_object_cached_list
, object
,
6307 vm_object_t
, cached_list
);
6308 vm_object_cached_count
--;
6310 vm_object_cache_unlock();
6312 * Since this object is in the cache, we know
6313 * that it is initialized and has only a pager's
6314 * (implicit) reference. Take a reference to avoid
6315 * recursive deallocations.
6318 assert(object
->pager_initialized
);
6319 assert(object
->ref_count
== 0);
6320 vm_object_lock_assert_exclusive(object
);
6321 object
->ref_count
++;
6324 * Terminate the object.
6325 * If the object had a shadow, we let
6326 * vm_object_deallocate deallocate it.
6327 * "pageout" objects have a shadow, but
6328 * maintain a "paging reference" rather
6329 * than a normal reference.
6330 * (We are careful here to limit recursion.)
6332 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
6334 if ((vm_object_terminate(object
) == KERN_SUCCESS
)
6335 && (shadow
!= VM_OBJECT_NULL
)) {
6336 vm_object_deallocate(shadow
);
6339 if(object_released
++ == *count
)
6340 return KERN_SUCCESS
;
6344 vm_object_cache_unlock();
6345 *count
= object_released
;
6349 return KERN_SUCCESS
;
6355 memory_object_create_named(
6356 memory_object_t pager
,
6357 memory_object_offset_t size
,
6358 memory_object_control_t
*control
)
6361 vm_object_hash_entry_t entry
;
6364 *control
= MEMORY_OBJECT_CONTROL_NULL
;
6365 if (pager
== MEMORY_OBJECT_NULL
)
6366 return KERN_INVALID_ARGUMENT
;
6368 lck
= vm_object_hash_lock_spin(pager
);
6369 entry
= vm_object_hash_lookup(pager
, FALSE
);
6371 if ((entry
!= VM_OBJECT_HASH_ENTRY_NULL
) &&
6372 (entry
->object
!= VM_OBJECT_NULL
)) {
6373 if (entry
->object
->named
== TRUE
)
6374 panic("memory_object_create_named: caller already holds the right"); }
6375 vm_object_hash_unlock(lck
);
6377 if ((object
= vm_object_enter(pager
, size
, FALSE
, FALSE
, TRUE
)) == VM_OBJECT_NULL
) {
6378 return(KERN_INVALID_OBJECT
);
6381 /* wait for object (if any) to be ready */
6382 if (object
!= VM_OBJECT_NULL
) {
6383 vm_object_lock(object
);
6384 object
->named
= TRUE
;
6385 while (!object
->pager_ready
) {
6386 vm_object_sleep(object
,
6387 VM_OBJECT_EVENT_PAGER_READY
,
6390 *control
= object
->pager_control
;
6391 vm_object_unlock(object
);
6393 return (KERN_SUCCESS
);
6398 * Routine: memory_object_recover_named [user interface]
6400 * Attempt to recover a named reference for a VM object.
6401 * VM will verify that the object has not already started
6402 * down the termination path, and if it has, will optionally
6403 * wait for that to finish.
6405 * KERN_SUCCESS - we recovered a named reference on the object
6406 * KERN_FAILURE - we could not recover a reference (object dead)
6407 * KERN_INVALID_ARGUMENT - bad memory object control
6410 memory_object_recover_named(
6411 memory_object_control_t control
,
6412 boolean_t wait_on_terminating
)
6416 object
= memory_object_control_to_vm_object(control
);
6417 if (object
== VM_OBJECT_NULL
) {
6418 return (KERN_INVALID_ARGUMENT
);
6421 vm_object_lock(object
);
6423 if (object
->terminating
&& wait_on_terminating
) {
6424 vm_object_wait(object
,
6425 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
6430 if (!object
->alive
) {
6431 vm_object_unlock(object
);
6432 return KERN_FAILURE
;
6435 if (object
->named
== TRUE
) {
6436 vm_object_unlock(object
);
6437 return KERN_SUCCESS
;
6440 if ((object
->ref_count
== 0) && (!object
->terminating
)) {
6441 if (!vm_object_cache_lock_try()) {
6442 vm_object_unlock(object
);
6445 queue_remove(&vm_object_cached_list
, object
,
6446 vm_object_t
, cached_list
);
6447 vm_object_cached_count
--;
6448 XPR(XPR_VM_OBJECT_CACHE
,
6449 "memory_object_recover_named: removing %X, head (%X, %X)\n",
6451 vm_object_cached_list
.next
,
6452 vm_object_cached_list
.prev
, 0,0);
6454 vm_object_cache_unlock();
6457 object
->named
= TRUE
;
6458 vm_object_lock_assert_exclusive(object
);
6459 object
->ref_count
++;
6460 vm_object_res_reference(object
);
6461 while (!object
->pager_ready
) {
6462 vm_object_sleep(object
,
6463 VM_OBJECT_EVENT_PAGER_READY
,
6466 vm_object_unlock(object
);
6467 return (KERN_SUCCESS
);
6472 * vm_object_release_name:
6474 * Enforces name semantic on memory_object reference count decrement
6475 * This routine should not be called unless the caller holds a name
6476 * reference gained through the memory_object_create_named.
6478 * If the TERMINATE_IDLE flag is set, the call will return if the
6479 * reference count is not 1. i.e. idle with the only remaining reference
6481 * If the decision is made to proceed the name field flag is set to
6482 * false and the reference count is decremented. If the RESPECT_CACHE
6483 * flag is set and the reference count has gone to zero, the
6484 * memory_object is checked to see if it is cacheable otherwise when
6485 * the reference count is zero, it is simply terminated.
6488 __private_extern__ kern_return_t
6489 vm_object_release_name(
6494 boolean_t original_object
= TRUE
;
6496 while (object
!= VM_OBJECT_NULL
) {
6498 vm_object_lock(object
);
6500 assert(object
->alive
);
6501 if (original_object
)
6502 assert(object
->named
);
6503 assert(object
->ref_count
> 0);
6506 * We have to wait for initialization before
6507 * destroying or caching the object.
6510 if (object
->pager_created
&& !object
->pager_initialized
) {
6511 assert(!object
->can_persist
);
6512 vm_object_assert_wait(object
,
6513 VM_OBJECT_EVENT_INITIALIZED
,
6515 vm_object_unlock(object
);
6516 thread_block(THREAD_CONTINUE_NULL
);
6520 if (((object
->ref_count
> 1)
6521 && (flags
& MEMORY_OBJECT_TERMINATE_IDLE
))
6522 || (object
->terminating
)) {
6523 vm_object_unlock(object
);
6524 return KERN_FAILURE
;
6526 if (flags
& MEMORY_OBJECT_RELEASE_NO_OP
) {
6527 vm_object_unlock(object
);
6528 return KERN_SUCCESS
;
6532 if ((flags
& MEMORY_OBJECT_RESPECT_CACHE
) &&
6533 (object
->ref_count
== 1)) {
6534 if (original_object
)
6535 object
->named
= FALSE
;
6536 vm_object_unlock(object
);
6537 /* let vm_object_deallocate push this thing into */
6538 /* the cache, if that it is where it is bound */
6539 vm_object_deallocate(object
);
6540 return KERN_SUCCESS
;
6542 VM_OBJ_RES_DECR(object
);
6543 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
6545 if (object
->ref_count
== 1) {
6546 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
6547 if (original_object
) {
6548 return KERN_FAILURE
;
6550 return KERN_SUCCESS
;
6553 if (shadow
!= VM_OBJECT_NULL
) {
6554 original_object
= FALSE
;
6558 return KERN_SUCCESS
;
6560 vm_object_lock_assert_exclusive(object
);
6561 object
->ref_count
--;
6562 assert(object
->ref_count
> 0);
6564 object
->named
= FALSE
;
6565 vm_object_unlock(object
);
6566 return KERN_SUCCESS
;
6571 return KERN_FAILURE
;
6575 __private_extern__ kern_return_t
6576 vm_object_lock_request(
6578 vm_object_offset_t offset
,
6579 vm_object_size_t size
,
6580 memory_object_return_t should_return
,
6584 __unused boolean_t should_flush
;
6586 should_flush
= flags
& MEMORY_OBJECT_DATA_FLUSH
;
6588 XPR(XPR_MEMORY_OBJECT
,
6589 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
6590 object
, offset
, size
,
6591 (((should_return
&1)<<1)|should_flush
), prot
);
6594 * Check for bogus arguments.
6596 if (object
== VM_OBJECT_NULL
)
6597 return (KERN_INVALID_ARGUMENT
);
6599 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
6600 return (KERN_INVALID_ARGUMENT
);
6602 size
= round_page_64(size
);
6605 * Lock the object, and acquire a paging reference to
6606 * prevent the memory_object reference from being released.
6608 vm_object_lock(object
);
6609 vm_object_paging_begin(object
);
6611 (void)vm_object_update(object
,
6612 offset
, size
, NULL
, NULL
, should_return
, flags
, prot
);
6614 vm_object_paging_end(object
);
6615 vm_object_unlock(object
);
6617 return (KERN_SUCCESS
);
6621 * Empty a purgeable object by grabbing the physical pages assigned to it and
6622 * putting them on the free queue without writing them to backing store, etc.
6623 * When the pages are next touched they will be demand zero-fill pages. We
6624 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
6625 * skip referenced/dirty pages, pages on the active queue, etc. We're more
6626 * than happy to grab these since this is a purgeable object. We mark the
6627 * object as "empty" after reaping its pages.
6629 * On entry the object must be locked and it must be
6630 * purgeable with no delayed copies pending.
6633 vm_object_purge(vm_object_t object
, int flags
)
6635 vm_object_lock_assert_exclusive(object
);
6637 if (object
->purgable
== VM_PURGABLE_DENY
)
6640 assert(object
->copy
== VM_OBJECT_NULL
);
6641 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
6644 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
6645 * reaping its pages. We update vm_page_purgeable_count in bulk
6646 * and we don't want vm_page_remove() to update it again for each
6647 * page we reap later.
6649 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
6650 * are all accounted for in the "volatile" ledgers, so this does not
6651 * make any difference.
6652 * If we transitioned directly from NONVOLATILE to EMPTY,
6653 * vm_page_purgeable_count must have been updated when the object
6654 * was dequeued from its volatile queue and the purgeable ledgers
6655 * must have also been updated accordingly at that time (in
6656 * vm_object_purgable_control()).
6658 if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
6660 assert(object
->resident_page_count
>=
6661 object
->wired_page_count
);
6662 delta
= (object
->resident_page_count
-
6663 object
->wired_page_count
);
6665 assert(vm_page_purgeable_count
>=
6668 (SInt32
*)&vm_page_purgeable_count
);
6670 if (object
->wired_page_count
!= 0) {
6671 assert(vm_page_purgeable_wired_count
>=
6672 object
->wired_page_count
);
6673 OSAddAtomic(-object
->wired_page_count
,
6674 (SInt32
*)&vm_page_purgeable_wired_count
);
6676 object
->purgable
= VM_PURGABLE_EMPTY
;
6678 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
6680 vm_object_reap_pages(object
, REAP_PURGEABLE
);
6682 if (object
->pager
!= NULL
&&
6683 COMPRESSED_PAGER_IS_ACTIVE
) {
6684 unsigned int pgcount
;
6686 if (object
->activity_in_progress
== 0 &&
6687 object
->paging_in_progress
== 0) {
6689 * Also reap any memory coming from this object
6690 * in the VM compressor.
6692 * There are no operations in progress on the VM object
6693 * and no operation can start while we're holding the
6694 * VM object lock, so it's safe to reap the compressed
6695 * pages and update the page counts.
6697 pgcount
= vm_compressor_pager_get_count(object
->pager
);
6699 pgcount
= vm_compressor_pager_reap_pages(object
->pager
, flags
);
6700 vm_compressor_pager_count(object
->pager
,
6704 vm_purgeable_compressed_update(object
,
6707 if ( !(flags
& C_DONT_BLOCK
)) {
6708 assert(vm_compressor_pager_get_count(object
->pager
)
6713 * There's some kind of paging activity in progress
6714 * for this object, which could result in a page
6715 * being compressed or decompressed, possibly while
6716 * the VM object is not locked, so it could race
6719 * We can't really synchronize this without possibly
6720 * causing a deadlock when the compressor needs to
6721 * allocate or free memory while compressing or
6722 * decompressing a page from a purgeable object
6723 * mapped in the kernel_map...
6725 * So let's not attempt to purge the compressor
6726 * pager if there's any kind of operation in
6727 * progress on the VM object.
6732 vm_object_lock_assert_exclusive(object
);
6737 * vm_object_purgeable_control() allows the caller to control and investigate the
6738 * state of a purgeable object. A purgeable object is created via a call to
6739 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
6740 * never be coalesced with any other object -- even other purgeable objects --
6741 * and will thus always remain a distinct object. A purgeable object has
6742 * special semantics when its reference count is exactly 1. If its reference
6743 * count is greater than 1, then a purgeable object will behave like a normal
6744 * object and attempts to use this interface will result in an error return
6745 * of KERN_INVALID_ARGUMENT.
6747 * A purgeable object may be put into a "volatile" state which will make the
6748 * object's pages elligable for being reclaimed without paging to backing
6749 * store if the system runs low on memory. If the pages in a volatile
6750 * purgeable object are reclaimed, the purgeable object is said to have been
6751 * "emptied." When a purgeable object is emptied the system will reclaim as
6752 * many pages from the object as it can in a convenient manner (pages already
6753 * en route to backing store or busy for other reasons are left as is). When
6754 * a purgeable object is made volatile, its pages will generally be reclaimed
6755 * before other pages in the application's working set. This semantic is
6756 * generally used by applications which can recreate the data in the object
6757 * faster than it can be paged in. One such example might be media assets
6758 * which can be reread from a much faster RAID volume.
6760 * A purgeable object may be designated as "non-volatile" which means it will
6761 * behave like all other objects in the system with pages being written to and
6762 * read from backing store as needed to satisfy system memory needs. If the
6763 * object was emptied before the object was made non-volatile, that fact will
6764 * be returned as the old state of the purgeable object (see
6765 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
6766 * were reclaimed as part of emptying the object will be refaulted in as
6767 * zero-fill on demand. It is up to the application to note that an object
6768 * was emptied and recreate the objects contents if necessary. When a
6769 * purgeable object is made non-volatile, its pages will generally not be paged
6770 * out to backing store in the immediate future. A purgeable object may also
6771 * be manually emptied.
6773 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
6774 * volatile purgeable object may be queried at any time. This information may
6775 * be used as a control input to let the application know when the system is
6776 * experiencing memory pressure and is reclaiming memory.
6778 * The specified address may be any address within the purgeable object. If
6779 * the specified address does not represent any object in the target task's
6780 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
6781 * object containing the specified address is not a purgeable object, then
6782 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
6785 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
6786 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
6787 * state is used to set the new state of the purgeable object and return its
6788 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
6789 * object is returned in the parameter state.
6791 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
6792 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
6793 * the non-volatile, volatile and volatile/empty states described above.
6794 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
6795 * immediately reclaim as many pages in the object as can be conveniently
6796 * collected (some may have already been written to backing store or be
6799 * The process of making a purgeable object non-volatile and determining its
6800 * previous state is atomic. Thus, if a purgeable object is made
6801 * VM_PURGABLE_NONVOLATILE and the old state is returned as
6802 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
6803 * completely intact and will remain so until the object is made volatile
6804 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
6805 * was reclaimed while it was in a volatile state and its previous contents
6809 * The object must be locked.
6812 vm_object_purgable_control(
6814 vm_purgable_t control
,
6820 if (object
== VM_OBJECT_NULL
) {
6822 * Object must already be present or it can't be purgeable.
6824 return KERN_INVALID_ARGUMENT
;
6827 vm_object_lock_assert_exclusive(object
);
6830 * Get current state of the purgeable object.
6832 old_state
= object
->purgable
;
6833 if (old_state
== VM_PURGABLE_DENY
)
6834 return KERN_INVALID_ARGUMENT
;
6836 /* purgeable cant have delayed copies - now or in the future */
6837 assert(object
->copy
== VM_OBJECT_NULL
);
6838 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
6841 * Execute the desired operation.
6843 if (control
== VM_PURGABLE_GET_STATE
) {
6845 return KERN_SUCCESS
;
6848 if ((*state
) & VM_PURGABLE_DEBUG_EMPTY
) {
6849 object
->volatile_empty
= TRUE
;
6851 if ((*state
) & VM_PURGABLE_DEBUG_FAULT
) {
6852 object
->volatile_fault
= TRUE
;
6855 new_state
= *state
& VM_PURGABLE_STATE_MASK
;
6856 if (new_state
== VM_PURGABLE_VOLATILE
&&
6857 object
->volatile_empty
) {
6858 new_state
= VM_PURGABLE_EMPTY
;
6861 switch (new_state
) {
6862 case VM_PURGABLE_DENY
:
6863 case VM_PURGABLE_NONVOLATILE
:
6864 object
->purgable
= new_state
;
6866 if (old_state
== VM_PURGABLE_VOLATILE
) {
6869 assert(object
->resident_page_count
>=
6870 object
->wired_page_count
);
6871 delta
= (object
->resident_page_count
-
6872 object
->wired_page_count
);
6874 assert(vm_page_purgeable_count
>= delta
);
6878 (SInt32
*)&vm_page_purgeable_count
);
6880 if (object
->wired_page_count
!= 0) {
6881 assert(vm_page_purgeable_wired_count
>=
6882 object
->wired_page_count
);
6883 OSAddAtomic(-object
->wired_page_count
,
6884 (SInt32
*)&vm_page_purgeable_wired_count
);
6887 vm_page_lock_queues();
6889 /* object should be on a queue */
6890 assert(object
->objq
.next
!= NULL
&&
6891 object
->objq
.prev
!= NULL
);
6892 purgeable_q_t queue
;
6895 * Move object from its volatile queue to the
6896 * non-volatile queue...
6898 queue
= vm_purgeable_object_remove(object
);
6901 if (object
->purgeable_when_ripe
) {
6902 vm_purgeable_token_delete_last(queue
);
6904 assert(queue
->debug_count_objects
>=0);
6906 vm_page_unlock_queues();
6908 if (old_state
== VM_PURGABLE_VOLATILE
||
6909 old_state
== VM_PURGABLE_EMPTY
) {
6911 * Transfer the object's pages from the volatile to
6912 * non-volatile ledgers.
6914 vm_purgeable_accounting(object
, VM_PURGABLE_VOLATILE
,
6920 case VM_PURGABLE_VOLATILE
:
6921 if (object
->volatile_fault
) {
6925 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
6931 refmod
= pmap_disconnect(p
->phys_page
);
6932 if ((refmod
& VM_MEM_MODIFIED
) &&
6934 SET_PAGE_DIRTY(p
, FALSE
);
6939 if (old_state
== VM_PURGABLE_EMPTY
&&
6940 object
->resident_page_count
== 0 &&
6941 object
->pager
== NULL
)
6944 purgeable_q_t queue
;
6946 /* find the correct queue */
6947 if ((*state
&VM_PURGABLE_ORDERING_MASK
) == VM_PURGABLE_ORDERING_OBSOLETE
)
6948 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
6950 if ((*state
&VM_PURGABLE_BEHAVIOR_MASK
) == VM_PURGABLE_BEHAVIOR_FIFO
)
6951 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
6953 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
6956 if (old_state
== VM_PURGABLE_NONVOLATILE
||
6957 old_state
== VM_PURGABLE_EMPTY
) {
6960 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
6961 VM_PURGABLE_NO_AGING
) {
6962 object
->purgeable_when_ripe
= FALSE
;
6964 object
->purgeable_when_ripe
= TRUE
;
6967 if (object
->purgeable_when_ripe
) {
6968 kern_return_t result
;
6970 /* try to add token... this can fail */
6971 vm_page_lock_queues();
6973 result
= vm_purgeable_token_add(queue
);
6974 if (result
!= KERN_SUCCESS
) {
6975 vm_page_unlock_queues();
6978 vm_page_unlock_queues();
6981 assert(object
->resident_page_count
>=
6982 object
->wired_page_count
);
6983 delta
= (object
->resident_page_count
-
6984 object
->wired_page_count
);
6988 &vm_page_purgeable_count
);
6990 if (object
->wired_page_count
!= 0) {
6991 OSAddAtomic(object
->wired_page_count
,
6992 &vm_page_purgeable_wired_count
);
6995 object
->purgable
= new_state
;
6997 /* object should be on "non-volatile" queue */
6998 assert(object
->objq
.next
!= NULL
);
6999 assert(object
->objq
.prev
!= NULL
);
7001 else if (old_state
== VM_PURGABLE_VOLATILE
) {
7002 purgeable_q_t old_queue
;
7003 boolean_t purgeable_when_ripe
;
7006 * if reassigning priorities / purgeable groups, we don't change the
7007 * token queue. So moving priorities will not make pages stay around longer.
7008 * Reasoning is that the algorithm gives most priority to the most important
7009 * object. If a new token is added, the most important object' priority is boosted.
7010 * This biases the system already for purgeable queues that move a lot.
7011 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
7013 assert(object
->objq
.next
!= NULL
&& object
->objq
.prev
!= NULL
); /* object should be on a queue */
7015 old_queue
= vm_purgeable_object_remove(object
);
7018 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
7019 VM_PURGABLE_NO_AGING
) {
7020 purgeable_when_ripe
= FALSE
;
7022 purgeable_when_ripe
= TRUE
;
7025 if (old_queue
!= queue
||
7026 (purgeable_when_ripe
!=
7027 object
->purgeable_when_ripe
)) {
7028 kern_return_t result
;
7030 /* Changing queue. Have to move token. */
7031 vm_page_lock_queues();
7032 if (object
->purgeable_when_ripe
) {
7033 vm_purgeable_token_delete_last(old_queue
);
7035 object
->purgeable_when_ripe
= purgeable_when_ripe
;
7036 if (object
->purgeable_when_ripe
) {
7037 result
= vm_purgeable_token_add(queue
);
7038 assert(result
==KERN_SUCCESS
); /* this should never fail since we just freed a token */
7040 vm_page_unlock_queues();
7044 vm_purgeable_object_add(object
, queue
, (*state
&VM_VOLATILE_GROUP_MASK
)>>VM_VOLATILE_GROUP_SHIFT
);
7045 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
7046 vm_purgeable_accounting(object
, VM_PURGABLE_NONVOLATILE
,
7050 assert(queue
->debug_count_objects
>=0);
7055 case VM_PURGABLE_EMPTY
:
7056 if (object
->volatile_fault
) {
7060 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
7066 refmod
= pmap_disconnect(p
->phys_page
);
7067 if ((refmod
& VM_MEM_MODIFIED
) &&
7069 SET_PAGE_DIRTY(p
, FALSE
);
7074 if (old_state
== new_state
) {
7075 /* nothing changes */
7079 assert(old_state
== VM_PURGABLE_NONVOLATILE
||
7080 old_state
== VM_PURGABLE_VOLATILE
);
7081 if (old_state
== VM_PURGABLE_VOLATILE
) {
7082 purgeable_q_t old_queue
;
7084 /* object should be on a queue */
7085 assert(object
->objq
.next
!= NULL
&&
7086 object
->objq
.prev
!= NULL
);
7088 old_queue
= vm_purgeable_object_remove(object
);
7090 if (object
->purgeable_when_ripe
) {
7091 vm_page_lock_queues();
7092 vm_purgeable_token_delete_first(old_queue
);
7093 vm_page_unlock_queues();
7097 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
7099 * This object's pages were previously accounted as
7100 * "non-volatile" and now need to be accounted as
7103 vm_purgeable_accounting(object
, VM_PURGABLE_NONVOLATILE
,
7106 * Set to VM_PURGABLE_EMPTY because the pages are no
7107 * longer accounted in the "non-volatile" ledger
7108 * and are also not accounted for in
7109 * "vm_page_purgeable_count".
7111 object
->purgable
= VM_PURGABLE_EMPTY
;
7114 (void) vm_object_purge(object
, 0);
7115 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
7122 vm_object_lock_assert_exclusive(object
);
7124 return KERN_SUCCESS
;
7128 vm_object_get_page_counts(
7130 vm_object_offset_t offset
,
7131 vm_object_size_t size
,
7132 unsigned int *resident_page_count
,
7133 unsigned int *dirty_page_count
)
7136 kern_return_t kr
= KERN_SUCCESS
;
7137 boolean_t count_dirty_pages
= FALSE
;
7138 vm_page_t p
= VM_PAGE_NULL
;
7139 unsigned int local_resident_count
= 0;
7140 unsigned int local_dirty_count
= 0;
7141 vm_object_offset_t cur_offset
= 0;
7142 vm_object_offset_t end_offset
= 0;
7144 if (object
== VM_OBJECT_NULL
)
7145 return KERN_INVALID_ARGUMENT
;
7148 cur_offset
= offset
;
7150 end_offset
= offset
+ size
;
7152 vm_object_lock_assert_exclusive(object
);
7154 if (dirty_page_count
!= NULL
) {
7156 count_dirty_pages
= TRUE
;
7159 if (resident_page_count
!= NULL
&& count_dirty_pages
== FALSE
) {
7162 * - we only want the resident page count, and,
7163 * - the entire object is exactly covered by the request.
7165 if (offset
== 0 && (object
->vo_size
== size
)) {
7167 *resident_page_count
= object
->resident_page_count
;
7172 if (object
->resident_page_count
<= (size
>> PAGE_SHIFT
)) {
7174 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
7176 if (p
->offset
>= cur_offset
&& p
->offset
< end_offset
) {
7178 local_resident_count
++;
7180 if (count_dirty_pages
) {
7182 if (p
->dirty
|| (p
->wpmapped
&& pmap_is_modified(p
->phys_page
))) {
7184 local_dirty_count
++;
7191 for (cur_offset
= offset
; cur_offset
< end_offset
; cur_offset
+= PAGE_SIZE_64
) {
7193 p
= vm_page_lookup(object
, cur_offset
);
7195 if (p
!= VM_PAGE_NULL
) {
7197 local_resident_count
++;
7199 if (count_dirty_pages
) {
7201 if (p
->dirty
|| (p
->wpmapped
&& pmap_is_modified(p
->phys_page
))) {
7203 local_dirty_count
++;
7211 if (resident_page_count
!= NULL
) {
7212 *resident_page_count
= local_resident_count
;
7215 if (dirty_page_count
!= NULL
) {
7216 *dirty_page_count
= local_dirty_count
;
7226 * vm_object_res_deallocate
7228 * (recursively) decrement residence counts on vm objects and their shadows.
7229 * Called from vm_object_deallocate and when swapping out an object.
7231 * The object is locked, and remains locked throughout the function,
7232 * even as we iterate down the shadow chain. Locks on intermediate objects
7233 * will be dropped, but not the original object.
7235 * NOTE: this function used to use recursion, rather than iteration.
7238 __private_extern__
void
7239 vm_object_res_deallocate(
7242 vm_object_t orig_object
= object
;
7244 * Object is locked so it can be called directly
7245 * from vm_object_deallocate. Original object is never
7248 assert(object
->res_count
> 0);
7249 while (--object
->res_count
== 0) {
7250 assert(object
->ref_count
>= object
->res_count
);
7251 vm_object_deactivate_all_pages(object
);
7252 /* iterate on shadow, if present */
7253 if (object
->shadow
!= VM_OBJECT_NULL
) {
7254 vm_object_t tmp_object
= object
->shadow
;
7255 vm_object_lock(tmp_object
);
7256 if (object
!= orig_object
)
7257 vm_object_unlock(object
);
7258 object
= tmp_object
;
7259 assert(object
->res_count
> 0);
7263 if (object
!= orig_object
)
7264 vm_object_unlock(object
);
7268 * vm_object_res_reference
7270 * Internal function to increment residence count on a vm object
7271 * and its shadows. It is called only from vm_object_reference, and
7272 * when swapping in a vm object, via vm_map_swap.
7274 * The object is locked, and remains locked throughout the function,
7275 * even as we iterate down the shadow chain. Locks on intermediate objects
7276 * will be dropped, but not the original object.
7278 * NOTE: this function used to use recursion, rather than iteration.
7281 __private_extern__
void
7282 vm_object_res_reference(
7285 vm_object_t orig_object
= object
;
7287 * Object is locked, so this can be called directly
7288 * from vm_object_reference. This lock is never released.
7290 while ((++object
->res_count
== 1) &&
7291 (object
->shadow
!= VM_OBJECT_NULL
)) {
7292 vm_object_t tmp_object
= object
->shadow
;
7294 assert(object
->ref_count
>= object
->res_count
);
7295 vm_object_lock(tmp_object
);
7296 if (object
!= orig_object
)
7297 vm_object_unlock(object
);
7298 object
= tmp_object
;
7300 if (object
!= orig_object
)
7301 vm_object_unlock(object
);
7302 assert(orig_object
->ref_count
>= orig_object
->res_count
);
7304 #endif /* TASK_SWAPPER */
7307 * vm_object_reference:
7309 * Gets another reference to the given object.
7311 #ifdef vm_object_reference
7312 #undef vm_object_reference
7314 __private_extern__
void
7315 vm_object_reference(
7316 register vm_object_t object
)
7318 if (object
== VM_OBJECT_NULL
)
7321 vm_object_lock(object
);
7322 assert(object
->ref_count
> 0);
7323 vm_object_reference_locked(object
);
7324 vm_object_unlock(object
);
7329 * Scale the vm_object_cache
7330 * This is required to make sure that the vm_object_cache is big
7331 * enough to effectively cache the mapped file.
7332 * This is really important with UBC as all the regular file vnodes
7333 * have memory object associated with them. Havving this cache too
7334 * small results in rapid reclaim of vnodes and hurts performance a LOT!
7336 * This is also needed as number of vnodes can be dynamically scaled.
7339 adjust_vm_object_cache(
7340 __unused vm_size_t oval
,
7341 __unused vm_size_t nval
)
7344 vm_object_cached_max
= nval
;
7345 vm_object_cache_trim(FALSE
);
7347 return (KERN_SUCCESS
);
7349 #endif /* MACH_BSD */
7353 * vm_object_transpose
7355 * This routine takes two VM objects of the same size and exchanges
7356 * their backing store.
7357 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
7358 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
7360 * The VM objects must not be locked by caller.
7362 unsigned int vm_object_transpose_count
= 0;
7364 vm_object_transpose(
7365 vm_object_t object1
,
7366 vm_object_t object2
,
7367 vm_object_size_t transpose_size
)
7369 vm_object_t tmp_object
;
7370 kern_return_t retval
;
7371 boolean_t object1_locked
, object2_locked
;
7373 vm_object_offset_t page_offset
;
7374 lck_mtx_t
*hash_lck
;
7375 vm_object_hash_entry_t hash_entry
;
7377 tmp_object
= VM_OBJECT_NULL
;
7378 object1_locked
= FALSE
; object2_locked
= FALSE
;
7380 if (object1
== object2
||
7381 object1
== VM_OBJECT_NULL
||
7382 object2
== VM_OBJECT_NULL
) {
7384 * If the 2 VM objects are the same, there's
7385 * no point in exchanging their backing store.
7387 retval
= KERN_INVALID_VALUE
;
7392 * Since we need to lock both objects at the same time,
7393 * make sure we always lock them in the same order to
7396 if (object1
> object2
) {
7397 tmp_object
= object1
;
7399 object2
= tmp_object
;
7403 * Allocate a temporary VM object to hold object1's contents
7404 * while we copy object2 to object1.
7406 tmp_object
= vm_object_allocate(transpose_size
);
7407 vm_object_lock(tmp_object
);
7408 tmp_object
->can_persist
= FALSE
;
7412 * Grab control of the 1st VM object.
7414 vm_object_lock(object1
);
7415 object1_locked
= TRUE
;
7416 if (!object1
->alive
|| object1
->terminating
||
7417 object1
->copy
|| object1
->shadow
|| object1
->shadowed
||
7418 object1
->purgable
!= VM_PURGABLE_DENY
) {
7420 * We don't deal with copy or shadow objects (yet).
7422 retval
= KERN_INVALID_VALUE
;
7426 * We're about to mess with the object's backing store and
7427 * taking a "paging_in_progress" reference wouldn't be enough
7428 * to prevent any paging activity on this object, so the caller should
7429 * have "quiesced" the objects beforehand, via a UPL operation with
7430 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
7431 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
7433 * Wait for any paging operation to complete (but only paging, not
7434 * other kind of activities not linked to the pager). After we're
7435 * statisfied that there's no more paging in progress, we keep the
7436 * object locked, to guarantee that no one tries to access its pager.
7438 vm_object_paging_only_wait(object1
, THREAD_UNINT
);
7441 * Same as above for the 2nd object...
7443 vm_object_lock(object2
);
7444 object2_locked
= TRUE
;
7445 if (! object2
->alive
|| object2
->terminating
||
7446 object2
->copy
|| object2
->shadow
|| object2
->shadowed
||
7447 object2
->purgable
!= VM_PURGABLE_DENY
) {
7448 retval
= KERN_INVALID_VALUE
;
7451 vm_object_paging_only_wait(object2
, THREAD_UNINT
);
7454 if (object1
->vo_size
!= object2
->vo_size
||
7455 object1
->vo_size
!= transpose_size
) {
7457 * If the 2 objects don't have the same size, we can't
7458 * exchange their backing stores or one would overflow.
7459 * If their size doesn't match the caller's
7460 * "transpose_size", we can't do it either because the
7461 * transpose operation will affect the entire span of
7464 retval
= KERN_INVALID_VALUE
;
7470 * Transpose the lists of resident pages.
7471 * This also updates the resident_page_count and the memq_hint.
7473 if (object1
->phys_contiguous
|| queue_empty(&object1
->memq
)) {
7475 * No pages in object1, just transfer pages
7476 * from object2 to object1. No need to go through
7477 * an intermediate object.
7479 while (!queue_empty(&object2
->memq
)) {
7480 page
= (vm_page_t
) queue_first(&object2
->memq
);
7481 vm_page_rename(page
, object1
, page
->offset
, FALSE
);
7483 assert(queue_empty(&object2
->memq
));
7484 } else if (object2
->phys_contiguous
|| queue_empty(&object2
->memq
)) {
7486 * No pages in object2, just transfer pages
7487 * from object1 to object2. No need to go through
7488 * an intermediate object.
7490 while (!queue_empty(&object1
->memq
)) {
7491 page
= (vm_page_t
) queue_first(&object1
->memq
);
7492 vm_page_rename(page
, object2
, page
->offset
, FALSE
);
7494 assert(queue_empty(&object1
->memq
));
7496 /* transfer object1's pages to tmp_object */
7497 while (!queue_empty(&object1
->memq
)) {
7498 page
= (vm_page_t
) queue_first(&object1
->memq
);
7499 page_offset
= page
->offset
;
7500 vm_page_remove(page
, TRUE
);
7501 page
->offset
= page_offset
;
7502 queue_enter(&tmp_object
->memq
, page
, vm_page_t
, listq
);
7504 assert(queue_empty(&object1
->memq
));
7505 /* transfer object2's pages to object1 */
7506 while (!queue_empty(&object2
->memq
)) {
7507 page
= (vm_page_t
) queue_first(&object2
->memq
);
7508 vm_page_rename(page
, object1
, page
->offset
, FALSE
);
7510 assert(queue_empty(&object2
->memq
));
7511 /* transfer tmp_object's pages to object2 */
7512 while (!queue_empty(&tmp_object
->memq
)) {
7513 page
= (vm_page_t
) queue_first(&tmp_object
->memq
);
7514 queue_remove(&tmp_object
->memq
, page
,
7516 vm_page_insert(page
, object2
, page
->offset
);
7518 assert(queue_empty(&tmp_object
->memq
));
7521 #define __TRANSPOSE_FIELD(field) \
7523 tmp_object->field = object1->field; \
7524 object1->field = object2->field; \
7525 object2->field = tmp_object->field; \
7528 /* "Lock" refers to the object not its contents */
7529 /* "size" should be identical */
7530 assert(object1
->vo_size
== object2
->vo_size
);
7531 /* "memq_hint" was updated above when transposing pages */
7532 /* "ref_count" refers to the object not its contents */
7534 /* "res_count" refers to the object not its contents */
7536 /* "resident_page_count" was updated above when transposing pages */
7537 /* "wired_page_count" was updated above when transposing pages */
7538 /* "reusable_page_count" was updated above when transposing pages */
7539 /* there should be no "copy" */
7540 assert(!object1
->copy
);
7541 assert(!object2
->copy
);
7542 /* there should be no "shadow" */
7543 assert(!object1
->shadow
);
7544 assert(!object2
->shadow
);
7545 __TRANSPOSE_FIELD(vo_shadow_offset
); /* used by phys_contiguous objects */
7546 __TRANSPOSE_FIELD(pager
);
7547 __TRANSPOSE_FIELD(paging_offset
);
7548 __TRANSPOSE_FIELD(pager_control
);
7549 /* update the memory_objects' pointers back to the VM objects */
7550 if (object1
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
7551 memory_object_control_collapse(object1
->pager_control
,
7554 if (object2
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
7555 memory_object_control_collapse(object2
->pager_control
,
7558 __TRANSPOSE_FIELD(copy_strategy
);
7559 /* "paging_in_progress" refers to the object not its contents */
7560 assert(!object1
->paging_in_progress
);
7561 assert(!object2
->paging_in_progress
);
7562 assert(object1
->activity_in_progress
);
7563 assert(object2
->activity_in_progress
);
7564 /* "all_wanted" refers to the object not its contents */
7565 __TRANSPOSE_FIELD(pager_created
);
7566 __TRANSPOSE_FIELD(pager_initialized
);
7567 __TRANSPOSE_FIELD(pager_ready
);
7568 __TRANSPOSE_FIELD(pager_trusted
);
7569 __TRANSPOSE_FIELD(can_persist
);
7570 __TRANSPOSE_FIELD(internal
);
7571 __TRANSPOSE_FIELD(temporary
);
7572 __TRANSPOSE_FIELD(private);
7573 __TRANSPOSE_FIELD(pageout
);
7574 /* "alive" should be set */
7575 assert(object1
->alive
);
7576 assert(object2
->alive
);
7577 /* "purgeable" should be non-purgeable */
7578 assert(object1
->purgable
== VM_PURGABLE_DENY
);
7579 assert(object2
->purgable
== VM_PURGABLE_DENY
);
7580 /* "shadowed" refers to the the object not its contents */
7581 __TRANSPOSE_FIELD(purgeable_when_ripe
);
7582 __TRANSPOSE_FIELD(advisory_pageout
);
7583 __TRANSPOSE_FIELD(true_share
);
7584 /* "terminating" should not be set */
7585 assert(!object1
->terminating
);
7586 assert(!object2
->terminating
);
7587 __TRANSPOSE_FIELD(named
);
7588 /* "shadow_severed" refers to the object not its contents */
7589 __TRANSPOSE_FIELD(phys_contiguous
);
7590 __TRANSPOSE_FIELD(nophyscache
);
7591 /* "cached_list.next" points to transposed object */
7592 object1
->cached_list
.next
= (queue_entry_t
) object2
;
7593 object2
->cached_list
.next
= (queue_entry_t
) object1
;
7594 /* "cached_list.prev" should be NULL */
7595 assert(object1
->cached_list
.prev
== NULL
);
7596 assert(object2
->cached_list
.prev
== NULL
);
7597 /* "msr_q" is linked to the object not its contents */
7598 assert(queue_empty(&object1
->msr_q
));
7599 assert(queue_empty(&object2
->msr_q
));
7600 __TRANSPOSE_FIELD(last_alloc
);
7601 __TRANSPOSE_FIELD(sequential
);
7602 __TRANSPOSE_FIELD(pages_created
);
7603 __TRANSPOSE_FIELD(pages_used
);
7604 __TRANSPOSE_FIELD(scan_collisions
);
7606 __TRANSPOSE_FIELD(existence_map
);
7608 __TRANSPOSE_FIELD(cow_hint
);
7610 __TRANSPOSE_FIELD(paging_object
);
7612 __TRANSPOSE_FIELD(wimg_bits
);
7613 __TRANSPOSE_FIELD(set_cache_attr
);
7614 __TRANSPOSE_FIELD(code_signed
);
7615 if (object1
->hashed
) {
7616 hash_lck
= vm_object_hash_lock_spin(object2
->pager
);
7617 hash_entry
= vm_object_hash_lookup(object2
->pager
, FALSE
);
7618 assert(hash_entry
!= VM_OBJECT_HASH_ENTRY_NULL
);
7619 hash_entry
->object
= object2
;
7620 vm_object_hash_unlock(hash_lck
);
7622 if (object2
->hashed
) {
7623 hash_lck
= vm_object_hash_lock_spin(object1
->pager
);
7624 hash_entry
= vm_object_hash_lookup(object1
->pager
, FALSE
);
7625 assert(hash_entry
!= VM_OBJECT_HASH_ENTRY_NULL
);
7626 hash_entry
->object
= object1
;
7627 vm_object_hash_unlock(hash_lck
);
7629 __TRANSPOSE_FIELD(hashed
);
7630 object1
->transposed
= TRUE
;
7631 object2
->transposed
= TRUE
;
7632 __TRANSPOSE_FIELD(mapping_in_progress
);
7633 __TRANSPOSE_FIELD(volatile_empty
);
7634 __TRANSPOSE_FIELD(volatile_fault
);
7635 __TRANSPOSE_FIELD(all_reusable
);
7636 assert(object1
->blocked_access
);
7637 assert(object2
->blocked_access
);
7638 assert(object1
->__object2_unused_bits
== 0);
7639 assert(object2
->__object2_unused_bits
== 0);
7641 /* "uplq" refers to the object not its contents (see upl_transpose()) */
7643 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.next
== NULL
));
7644 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.prev
== NULL
));
7645 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.next
== NULL
));
7646 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.prev
== NULL
));
7648 #undef __TRANSPOSE_FIELD
7650 retval
= KERN_SUCCESS
;
7656 if (tmp_object
!= VM_OBJECT_NULL
) {
7657 vm_object_unlock(tmp_object
);
7659 * Re-initialize the temporary object to avoid
7660 * deallocating a real pager.
7662 _vm_object_allocate(transpose_size
, tmp_object
);
7663 vm_object_deallocate(tmp_object
);
7664 tmp_object
= VM_OBJECT_NULL
;
7667 if (object1_locked
) {
7668 vm_object_unlock(object1
);
7669 object1_locked
= FALSE
;
7671 if (object2_locked
) {
7672 vm_object_unlock(object2
);
7673 object2_locked
= FALSE
;
7676 vm_object_transpose_count
++;
7683 * vm_object_cluster_size
7685 * Determine how big a cluster we should issue an I/O for...
7687 * Inputs: *start == offset of page needed
7688 * *length == maximum cluster pager can handle
7689 * Outputs: *start == beginning offset of cluster
7690 * *length == length of cluster to try
7692 * The original *start will be encompassed by the cluster
7695 extern int speculative_reads_disabled
;
7696 extern int ignore_is_ssd
;
7698 unsigned int preheat_max_bytes
= MAX_UPL_TRANSFER_BYTES
;
7699 unsigned int preheat_min_bytes
= (1024 * 32);
7702 __private_extern__
void
7703 vm_object_cluster_size(vm_object_t object
, vm_object_offset_t
*start
,
7704 vm_size_t
*length
, vm_object_fault_info_t fault_info
, uint32_t *io_streaming
)
7706 vm_size_t pre_heat_size
;
7707 vm_size_t tail_size
;
7708 vm_size_t head_size
;
7709 vm_size_t max_length
;
7710 vm_size_t cluster_size
;
7711 vm_object_offset_t object_size
;
7712 vm_object_offset_t orig_start
;
7713 vm_object_offset_t target_start
;
7714 vm_object_offset_t offset
;
7715 vm_behavior_t behavior
;
7716 boolean_t look_behind
= TRUE
;
7717 boolean_t look_ahead
= TRUE
;
7718 boolean_t isSSD
= FALSE
;
7719 uint32_t throttle_limit
;
7721 int sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
7722 vm_size_t max_ph_size
;
7723 vm_size_t min_ph_size
;
7725 assert( !(*length
& PAGE_MASK
));
7726 assert( !(*start
& PAGE_MASK_64
));
7729 * remember maxiumum length of run requested
7731 max_length
= *length
;
7733 * we'll always return a cluster size of at least
7734 * 1 page, since the original fault must always
7737 *length
= PAGE_SIZE
;
7740 if (speculative_reads_disabled
|| fault_info
== NULL
) {
7742 * no cluster... just fault the page in
7746 orig_start
= *start
;
7747 target_start
= orig_start
;
7748 cluster_size
= round_page(fault_info
->cluster_size
);
7749 behavior
= fault_info
->behavior
;
7751 vm_object_lock(object
);
7753 if (object
->pager
== MEMORY_OBJECT_NULL
)
7754 goto out
; /* pager is gone for this object, nothing more to do */
7757 vnode_pager_get_isSSD(object
->pager
, &isSSD
);
7759 min_ph_size
= round_page(preheat_min_bytes
);
7760 max_ph_size
= round_page(preheat_max_bytes
);
7766 if (min_ph_size
< PAGE_SIZE
)
7767 min_ph_size
= PAGE_SIZE
;
7769 if (max_ph_size
< PAGE_SIZE
)
7770 max_ph_size
= PAGE_SIZE
;
7771 else if (max_ph_size
> MAX_UPL_TRANSFER_BYTES
)
7772 max_ph_size
= MAX_UPL_TRANSFER_BYTES
;
7774 if (max_length
> max_ph_size
)
7775 max_length
= max_ph_size
;
7777 if (max_length
<= PAGE_SIZE
)
7780 if (object
->internal
)
7781 object_size
= object
->vo_size
;
7783 vnode_pager_get_object_size(object
->pager
, &object_size
);
7785 object_size
= round_page_64(object_size
);
7787 if (orig_start
>= object_size
) {
7789 * fault occurred beyond the EOF...
7790 * we need to punt w/o changing the
7795 if (object
->pages_used
> object
->pages_created
) {
7797 * must have wrapped our 32 bit counters
7800 object
->pages_used
= object
->pages_created
= 0;
7802 if ((sequential_run
= object
->sequential
)) {
7803 if (sequential_run
< 0) {
7804 sequential_behavior
= VM_BEHAVIOR_RSEQNTL
;
7805 sequential_run
= 0 - sequential_run
;
7807 sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
7814 behavior
= VM_BEHAVIOR_DEFAULT
;
7816 case VM_BEHAVIOR_DEFAULT
:
7817 if (object
->internal
&& fault_info
->user_tag
== VM_MEMORY_STACK
)
7820 if (sequential_run
>= (3 * PAGE_SIZE
)) {
7821 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
7823 if (sequential_behavior
== VM_BEHAVIOR_SEQUENTIAL
)
7824 look_behind
= FALSE
;
7831 if (object
->pages_created
< (20 * (min_ph_size
>> PAGE_SHIFT
))) {
7835 pre_heat_size
= min_ph_size
;
7838 * Linear growth in PH size: The maximum size is max_length...
7839 * this cacluation will result in a size that is neither a
7840 * power of 2 nor a multiple of PAGE_SIZE... so round
7841 * it up to the nearest PAGE_SIZE boundary
7843 pre_heat_size
= (max_length
* (uint64_t)object
->pages_used
) / object
->pages_created
;
7845 if (pre_heat_size
< min_ph_size
)
7846 pre_heat_size
= min_ph_size
;
7848 pre_heat_size
= round_page(pre_heat_size
);
7853 case VM_BEHAVIOR_RANDOM
:
7854 if ((pre_heat_size
= cluster_size
) <= PAGE_SIZE
)
7858 case VM_BEHAVIOR_SEQUENTIAL
:
7859 if ((pre_heat_size
= cluster_size
) == 0)
7860 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
7861 look_behind
= FALSE
;
7866 case VM_BEHAVIOR_RSEQNTL
:
7867 if ((pre_heat_size
= cluster_size
) == 0)
7868 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
7875 throttle_limit
= (uint32_t) max_length
;
7876 assert(throttle_limit
== max_length
);
7878 if (vnode_pager_get_throttle_io_limit(object
->pager
, &throttle_limit
) == KERN_SUCCESS
) {
7879 if (max_length
> throttle_limit
)
7880 max_length
= throttle_limit
;
7882 if (pre_heat_size
> max_length
)
7883 pre_heat_size
= max_length
;
7885 if (behavior
== VM_BEHAVIOR_DEFAULT
&& (pre_heat_size
> min_ph_size
)) {
7887 unsigned int consider_free
= vm_page_free_count
+ vm_page_cleaned_count
;
7889 if (consider_free
< vm_page_throttle_limit
) {
7890 pre_heat_size
= trunc_page(pre_heat_size
/ 16);
7891 } else if (consider_free
< vm_page_free_target
) {
7892 pre_heat_size
= trunc_page(pre_heat_size
/ 4);
7895 if (pre_heat_size
< min_ph_size
)
7896 pre_heat_size
= min_ph_size
;
7898 if (look_ahead
== TRUE
) {
7899 if (look_behind
== TRUE
) {
7901 * if we get here its due to a random access...
7902 * so we want to center the original fault address
7903 * within the cluster we will issue... make sure
7904 * to calculate 'head_size' as a multiple of PAGE_SIZE...
7905 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
7906 * necessarily an even number of pages so we need to truncate
7907 * the result to a PAGE_SIZE boundary
7909 head_size
= trunc_page(pre_heat_size
/ 2);
7911 if (target_start
> head_size
)
7912 target_start
-= head_size
;
7917 * 'target_start' at this point represents the beginning offset
7918 * of the cluster we are considering... 'orig_start' will be in
7919 * the center of this cluster if we didn't have to clip the start
7920 * due to running into the start of the file
7923 if ((target_start
+ pre_heat_size
) > object_size
)
7924 pre_heat_size
= (vm_size_t
)(round_page_64(object_size
- target_start
));
7926 * at this point caclulate the number of pages beyond the original fault
7927 * address that we want to consider... this is guaranteed not to extend beyond
7928 * the current EOF...
7930 assert((vm_size_t
)(orig_start
- target_start
) == (orig_start
- target_start
));
7931 tail_size
= pre_heat_size
- (vm_size_t
)(orig_start
- target_start
) - PAGE_SIZE
;
7933 if (pre_heat_size
> target_start
) {
7935 * since pre_heat_size is always smaller then 2^32,
7936 * if it is larger then target_start (a 64 bit value)
7937 * it is safe to clip target_start to 32 bits
7939 pre_heat_size
= (vm_size_t
) target_start
;
7943 assert( !(target_start
& PAGE_MASK_64
));
7944 assert( !(pre_heat_size
& PAGE_MASK
));
7946 if (pre_heat_size
<= PAGE_SIZE
)
7949 if (look_behind
== TRUE
) {
7951 * take a look at the pages before the original
7952 * faulting offset... recalculate this in case
7953 * we had to clip 'pre_heat_size' above to keep
7954 * from running past the EOF.
7956 head_size
= pre_heat_size
- tail_size
- PAGE_SIZE
;
7958 for (offset
= orig_start
- PAGE_SIZE_64
; head_size
; offset
-= PAGE_SIZE_64
, head_size
-= PAGE_SIZE
) {
7960 * don't poke below the lowest offset
7962 if (offset
< fault_info
->lo_offset
)
7965 * for external objects and internal objects w/o an existence map
7966 * vm_externl_state_get will return VM_EXTERNAL_STATE_UNKNOWN
7969 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
7971 * we know for a fact that the pager can't provide the page
7972 * so don't include it or any pages beyond it in this cluster
7976 #endif /* MACH_PAGEMAP */
7977 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
)
7978 == VM_EXTERNAL_STATE_ABSENT
) {
7981 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
7983 * don't bridge resident pages
7988 *length
+= PAGE_SIZE
;
7991 if (look_ahead
== TRUE
) {
7992 for (offset
= orig_start
+ PAGE_SIZE_64
; tail_size
; offset
+= PAGE_SIZE_64
, tail_size
-= PAGE_SIZE
) {
7994 * don't poke above the highest offset
7996 if (offset
>= fault_info
->hi_offset
)
7998 assert(offset
< object_size
);
8001 * for external objects and internal objects w/o an existence map
8002 * vm_externl_state_get will return VM_EXTERNAL_STATE_UNKNOWN
8005 if (vm_external_state_get(object
->existence_map
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
8007 * we know for a fact that the pager can't provide the page
8008 * so don't include it or any pages beyond it in this cluster
8012 #endif /* MACH_PAGEMAP */
8013 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
8016 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
8018 * don't bridge resident pages
8022 *length
+= PAGE_SIZE
;
8026 if (*length
> max_length
)
8027 *length
= max_length
;
8029 vm_object_unlock(object
);
8031 DTRACE_VM1(clustersize
, vm_size_t
, *length
);
8036 * Allow manipulation of individual page state. This is actually part of
8037 * the UPL regimen but takes place on the VM object rather than on a UPL
8043 vm_object_offset_t offset
,
8045 ppnum_t
*phys_entry
,
8050 vm_object_lock(object
);
8052 if(ops
& UPL_POP_PHYSICAL
) {
8053 if(object
->phys_contiguous
) {
8055 *phys_entry
= (ppnum_t
)
8056 (object
->vo_shadow_offset
>> PAGE_SHIFT
);
8058 vm_object_unlock(object
);
8059 return KERN_SUCCESS
;
8061 vm_object_unlock(object
);
8062 return KERN_INVALID_OBJECT
;
8065 if(object
->phys_contiguous
) {
8066 vm_object_unlock(object
);
8067 return KERN_INVALID_OBJECT
;
8071 if((dst_page
= vm_page_lookup(object
,offset
)) == VM_PAGE_NULL
) {
8072 vm_object_unlock(object
);
8073 return KERN_FAILURE
;
8076 /* Sync up on getting the busy bit */
8077 if((dst_page
->busy
|| dst_page
->cleaning
) &&
8078 (((ops
& UPL_POP_SET
) &&
8079 (ops
& UPL_POP_BUSY
)) || (ops
& UPL_POP_DUMP
))) {
8080 /* someone else is playing with the page, we will */
8082 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
8086 if (ops
& UPL_POP_DUMP
) {
8087 if (dst_page
->pmapped
== TRUE
)
8088 pmap_disconnect(dst_page
->phys_page
);
8090 VM_PAGE_FREE(dst_page
);
8097 /* Get the condition of flags before requested ops */
8098 /* are undertaken */
8100 if(dst_page
->dirty
) *flags
|= UPL_POP_DIRTY
;
8101 if(dst_page
->pageout
) *flags
|= UPL_POP_PAGEOUT
;
8102 if(dst_page
->precious
) *flags
|= UPL_POP_PRECIOUS
;
8103 if(dst_page
->absent
) *flags
|= UPL_POP_ABSENT
;
8104 if(dst_page
->busy
) *flags
|= UPL_POP_BUSY
;
8107 /* The caller should have made a call either contingent with */
8108 /* or prior to this call to set UPL_POP_BUSY */
8109 if(ops
& UPL_POP_SET
) {
8110 /* The protection granted with this assert will */
8111 /* not be complete. If the caller violates the */
8112 /* convention and attempts to change page state */
8113 /* without first setting busy we may not see it */
8114 /* because the page may already be busy. However */
8115 /* if such violations occur we will assert sooner */
8117 assert(dst_page
->busy
|| (ops
& UPL_POP_BUSY
));
8118 if (ops
& UPL_POP_DIRTY
) {
8119 SET_PAGE_DIRTY(dst_page
, FALSE
);
8121 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= TRUE
;
8122 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= TRUE
;
8123 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= TRUE
;
8124 if (ops
& UPL_POP_BUSY
) dst_page
->busy
= TRUE
;
8127 if(ops
& UPL_POP_CLR
) {
8128 assert(dst_page
->busy
);
8129 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= FALSE
;
8130 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= FALSE
;
8131 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= FALSE
;
8132 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= FALSE
;
8133 if (ops
& UPL_POP_BUSY
) {
8134 dst_page
->busy
= FALSE
;
8135 PAGE_WAKEUP(dst_page
);
8139 if (dst_page
->encrypted
) {
8142 * We need to decrypt this encrypted page before the
8143 * caller can access its contents.
8144 * But if the caller really wants to access the page's
8145 * contents, they have to keep the page "busy".
8146 * Otherwise, the page could get recycled or re-encrypted
8149 if ((ops
& UPL_POP_SET
) && (ops
& UPL_POP_BUSY
) &&
8152 * The page is stable enough to be accessed by
8153 * the caller, so make sure its contents are
8156 vm_page_decrypt(dst_page
, 0);
8159 * The page is not busy, so don't bother
8160 * decrypting it, since anything could
8161 * happen to it between now and when the
8162 * caller wants to access it.
8163 * We should not give the caller access
8166 assert(!phys_entry
);
8172 * The physical page number will remain valid
8173 * only if the page is kept busy.
8174 * ENCRYPTED SWAP: make sure we don't let the
8175 * caller access an encrypted page.
8177 assert(dst_page
->busy
);
8178 assert(!dst_page
->encrypted
);
8179 *phys_entry
= dst_page
->phys_page
;
8185 vm_object_unlock(object
);
8186 return KERN_SUCCESS
;
8191 * vm_object_range_op offers performance enhancement over
8192 * vm_object_page_op for page_op functions which do not require page
8193 * level state to be returned from the call. Page_op was created to provide
8194 * a low-cost alternative to page manipulation via UPLs when only a single
8195 * page was involved. The range_op call establishes the ability in the _op
8196 * family of functions to work on multiple pages where the lack of page level
8197 * state handling allows the caller to avoid the overhead of the upl structures.
8203 vm_object_offset_t offset_beg
,
8204 vm_object_offset_t offset_end
,
8208 vm_object_offset_t offset
;
8211 if (offset_end
- offset_beg
> (uint32_t) -1) {
8212 /* range is too big and would overflow "*range" */
8213 return KERN_INVALID_ARGUMENT
;
8215 if (object
->resident_page_count
== 0) {
8217 if (ops
& UPL_ROP_PRESENT
) {
8220 *range
= (uint32_t) (offset_end
- offset_beg
);
8221 assert(*range
== (offset_end
- offset_beg
));
8224 return KERN_SUCCESS
;
8226 vm_object_lock(object
);
8228 if (object
->phys_contiguous
) {
8229 vm_object_unlock(object
);
8230 return KERN_INVALID_OBJECT
;
8233 offset
= offset_beg
& ~PAGE_MASK_64
;
8235 while (offset
< offset_end
) {
8236 dst_page
= vm_page_lookup(object
, offset
);
8237 if (dst_page
!= VM_PAGE_NULL
) {
8238 if (ops
& UPL_ROP_DUMP
) {
8239 if (dst_page
->busy
|| dst_page
->cleaning
) {
8241 * someone else is playing with the
8242 * page, we will have to wait
8244 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
8246 * need to relook the page up since it's
8247 * state may have changed while we slept
8248 * it might even belong to a different object
8253 if (dst_page
->laundry
) {
8254 dst_page
->pageout
= FALSE
;
8256 vm_pageout_steal_laundry(dst_page
, FALSE
);
8258 if (dst_page
->pmapped
== TRUE
)
8259 pmap_disconnect(dst_page
->phys_page
);
8261 VM_PAGE_FREE(dst_page
);
8263 } else if ((ops
& UPL_ROP_ABSENT
)
8264 && (!dst_page
->absent
|| dst_page
->busy
)) {
8267 } else if (ops
& UPL_ROP_PRESENT
)
8270 offset
+= PAGE_SIZE
;
8272 vm_object_unlock(object
);
8275 if (offset
> offset_end
)
8276 offset
= offset_end
;
8277 if(offset
> offset_beg
) {
8278 *range
= (uint32_t) (offset
- offset_beg
);
8279 assert(*range
== (offset
- offset_beg
));
8284 return KERN_SUCCESS
;
8288 * Used to point a pager directly to a range of memory (when the pager may be associated
8289 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
8290 * expect that the virtual address will denote the start of a range that is physically contiguous.
8292 kern_return_t
pager_map_to_phys_contiguous(
8293 memory_object_control_t object
,
8294 memory_object_offset_t offset
,
8295 addr64_t base_vaddr
,
8299 boolean_t clobbered_private
;
8300 kern_return_t retval
;
8301 vm_object_t pager_object
;
8303 page_num
= pmap_find_phys(kernel_pmap
, base_vaddr
);
8306 retval
= KERN_FAILURE
;
8310 pager_object
= memory_object_control_to_vm_object(object
);
8312 if (!pager_object
) {
8313 retval
= KERN_FAILURE
;
8317 clobbered_private
= pager_object
->private;
8318 pager_object
->private = TRUE
;
8319 retval
= vm_object_populate_with_private(pager_object
, offset
, page_num
, size
);
8321 if (retval
!= KERN_SUCCESS
)
8322 pager_object
->private = clobbered_private
;
8328 uint32_t scan_object_collision
= 0;
8331 vm_object_lock(vm_object_t object
)
8333 if (object
== vm_pageout_scan_wants_object
) {
8334 scan_object_collision
++;
8337 lck_rw_lock_exclusive(&object
->Lock
);
8341 vm_object_lock_avoid(vm_object_t object
)
8343 if (object
== vm_pageout_scan_wants_object
) {
8344 scan_object_collision
++;
8351 _vm_object_lock_try(vm_object_t object
)
8353 return (lck_rw_try_lock_exclusive(&object
->Lock
));
8357 vm_object_lock_try(vm_object_t object
)
8360 * Called from hibernate path so check before blocking.
8362 if (vm_object_lock_avoid(object
) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
8365 return _vm_object_lock_try(object
);
8369 vm_object_lock_shared(vm_object_t object
)
8371 if (vm_object_lock_avoid(object
)) {
8374 lck_rw_lock_shared(&object
->Lock
);
8378 vm_object_lock_try_shared(vm_object_t object
)
8380 if (vm_object_lock_avoid(object
)) {
8383 return (lck_rw_try_lock_shared(&object
->Lock
));
8387 unsigned int vm_object_change_wimg_mode_count
= 0;
8390 * The object must be locked
8393 vm_object_change_wimg_mode(vm_object_t object
, unsigned int wimg_mode
)
8397 vm_object_lock_assert_exclusive(object
);
8399 vm_object_paging_wait(object
, THREAD_UNINT
);
8401 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
8404 pmap_set_cache_attributes(p
->phys_page
, wimg_mode
);
8406 if (wimg_mode
== VM_WIMG_USE_DEFAULT
)
8407 object
->set_cache_attr
= FALSE
;
8409 object
->set_cache_attr
= TRUE
;
8411 object
->wimg_bits
= wimg_mode
;
8413 vm_object_change_wimg_mode_count
++;
8418 kern_return_t
vm_object_pack(
8419 unsigned int *purgeable_count
,
8420 unsigned int *wired_count
,
8421 unsigned int *clean_count
,
8422 unsigned int *dirty_count
,
8423 unsigned int dirty_budget
,
8425 vm_object_t src_object
,
8426 struct default_freezer_handle
*df_handle
)
8428 kern_return_t kr
= KERN_SUCCESS
;
8430 vm_object_lock(src_object
);
8432 *purgeable_count
= *wired_count
= *clean_count
= *dirty_count
= 0;
8435 if (!src_object
->alive
|| src_object
->terminating
){
8440 if (src_object
->purgable
== VM_PURGABLE_VOLATILE
) {
8441 *purgeable_count
= src_object
->resident_page_count
;
8443 /* If the default freezer handle is null, we're just walking the pages to discover how many can be hibernated */
8444 if (df_handle
!= NULL
) {
8445 purgeable_q_t queue
;
8446 /* object should be on a queue */
8447 assert(src_object
->objq
.next
!= NULL
&&
8448 src_object
->objq
.prev
!= NULL
);
8450 queue
= vm_purgeable_object_remove(src_object
);
8452 if (src_object
->purgeable_when_ripe
) {
8453 vm_page_lock_queues();
8454 vm_purgeable_token_delete_first(queue
);
8455 vm_page_unlock_queues();
8458 vm_object_purge(src_object
, 0);
8459 assert(src_object
->purgable
== VM_PURGABLE_EMPTY
);
8462 * This object was "volatile" so its pages must have
8463 * already been accounted as "volatile": no change
8464 * in accounting now that it's "empty".
8470 if (src_object
->ref_count
== 1) {
8471 vm_object_pack_pages(wired_count
, clean_count
, dirty_count
, dirty_budget
, src_object
, df_handle
);
8473 if (src_object
->internal
) {
8478 vm_object_unlock(src_object
);
8485 vm_object_pack_pages(
8486 unsigned int *wired_count
,
8487 unsigned int *clean_count
,
8488 unsigned int *dirty_count
,
8489 unsigned int dirty_budget
,
8490 vm_object_t src_object
,
8491 struct default_freezer_handle
*df_handle
)
8495 next
= (vm_page_t
)queue_first(&src_object
->memq
);
8497 while (!queue_end(&src_object
->memq
, (queue_entry_t
)next
)) {
8499 next
= (vm_page_t
)queue_next(&next
->listq
);
8501 /* Finish up if we've hit our pageout limit */
8502 if (dirty_budget
&& (dirty_budget
== *dirty_count
)) {
8505 assert(!p
->laundry
);
8507 if (p
->fictitious
|| p
->busy
)
8510 if (p
->absent
|| p
->unusual
|| p
->error
)
8513 if (VM_PAGE_WIRED(p
)) {
8518 if (df_handle
== NULL
) {
8519 if (p
->dirty
|| pmap_is_modified(p
->phys_page
)) {
8532 if (p
->pmapped
== TRUE
) {
8534 refmod_state
= pmap_disconnect(p
->phys_page
);
8535 if (refmod_state
& VM_MEM_MODIFIED
) {
8536 SET_PAGE_DIRTY(p
, FALSE
);
8541 default_freezer_pack_page(p
, df_handle
);
8553 * This routine does the "relocation" of previously
8554 * compressed pages belonging to this object that are
8555 * residing in a number of compressed segments into
8556 * a set of compressed segments dedicated to hold
8557 * compressed pages belonging to this object.
8560 extern void *freezer_chead
;
8561 extern char *freezer_compressor_scratch_buf
;
8562 extern int c_freezer_compression_count
;
8563 extern AbsoluteTime c_freezer_last_yield_ts
;
8565 #define MAX_FREE_BATCH 32
8566 #define FREEZER_DUTY_CYCLE_ON_MS 5
8567 #define FREEZER_DUTY_CYCLE_OFF_MS 5
8569 static int c_freezer_should_yield(void);
8573 c_freezer_should_yield()
8575 AbsoluteTime cur_time
;
8578 assert(c_freezer_last_yield_ts
);
8579 clock_get_uptime(&cur_time
);
8581 SUB_ABSOLUTETIME(&cur_time
, &c_freezer_last_yield_ts
);
8582 absolutetime_to_nanoseconds(cur_time
, &nsecs
);
8584 if (nsecs
> 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS
)
8591 vm_object_compressed_freezer_done()
8593 vm_compressor_finished_filling(&freezer_chead
);
8598 vm_object_compressed_freezer_pageout(
8602 vm_page_t local_freeq
= NULL
;
8603 int local_freed
= 0;
8604 kern_return_t retval
= KERN_SUCCESS
;
8605 int obj_resident_page_count_snapshot
= 0;
8607 assert(object
!= VM_OBJECT_NULL
);
8609 vm_object_lock(object
);
8611 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
8613 if (!object
->pager_initialized
) {
8615 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
8617 if (!object
->pager_initialized
)
8618 vm_object_compressor_pager_create(object
);
8621 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
8622 vm_object_unlock(object
);
8627 if (DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED
) {
8628 vm_object_offset_t curr_offset
= 0;
8631 * Go through the object and make sure that any
8632 * previously compressed pages are relocated into
8633 * a compressed segment associated with our "freezer_chead".
8635 while (curr_offset
< object
->vo_size
) {
8637 curr_offset
= vm_compressor_pager_next_compressed(object
->pager
, curr_offset
);
8639 if (curr_offset
== (vm_object_offset_t
) -1)
8642 retval
= vm_compressor_pager_relocate(object
->pager
, curr_offset
, &freezer_chead
);
8644 if (retval
!= KERN_SUCCESS
)
8647 curr_offset
+= PAGE_SIZE_64
;
8652 * We can't hold the object lock while heading down into the compressed pager
8653 * layer because we might need the kernel map lock down there to allocate new
8654 * compressor data structures. And if this same object is mapped in the kernel
8655 * and there's a fault on it, then that thread will want the object lock while
8656 * holding the kernel map lock.
8658 * Since we are going to drop/grab the object lock repeatedly, we must make sure
8659 * we won't be stuck in an infinite loop if the same page(s) keep getting
8660 * decompressed. So we grab a snapshot of the number of pages in the object and
8661 * we won't process any more than that number of pages.
8664 obj_resident_page_count_snapshot
= object
->resident_page_count
;
8666 vm_object_activity_begin(object
);
8668 while ((obj_resident_page_count_snapshot
--) && !queue_empty(&object
->memq
)) {
8670 p
= (vm_page_t
)queue_first(&object
->memq
);
8672 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START
, object
, local_freed
, 0, 0, 0);
8674 vm_page_lockspin_queues();
8676 if (p
->cleaning
|| p
->fictitious
|| p
->busy
|| p
->absent
|| p
->unusual
|| p
->error
|| VM_PAGE_WIRED(p
)) {
8680 vm_page_unlock_queues();
8682 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 1, 0, 0);
8684 queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
8685 queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
8690 if (p
->pmapped
== TRUE
) {
8691 int refmod_state
, pmap_flags
;
8693 if (p
->dirty
|| p
->precious
) {
8694 pmap_flags
= PMAP_OPTIONS_COMPRESSOR
;
8696 pmap_flags
= PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
8699 refmod_state
= pmap_disconnect_options(p
->phys_page
, pmap_flags
, NULL
);
8700 if (refmod_state
& VM_MEM_MODIFIED
) {
8701 SET_PAGE_DIRTY(p
, FALSE
);
8705 if (p
->dirty
== FALSE
&& p
->precious
== FALSE
) {
8707 * Clean and non-precious page.
8709 vm_page_unlock_queues();
8712 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 2, 0, 0);
8718 vm_pageout_steal_laundry(p
, TRUE
);
8721 vm_page_queues_remove(p
);
8722 vm_page_unlock_queues();
8726 * In case the compressor fails to compress this page, we need it at
8727 * the back of the object memq so that we don't keep trying to process it.
8728 * Make the move here while we have the object lock held.
8731 queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
8732 queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
8735 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
8737 * Mark the page busy so no one messes with it while we have the object lock dropped.
8742 vm_object_activity_begin(object
);
8744 vm_object_unlock(object
);
8747 * arg3 == FALSE tells vm_pageout_compress_page that we don't hold the object lock and the pager may not be initialized.
8749 if (vm_pageout_compress_page(&freezer_chead
, freezer_compressor_scratch_buf
, p
, FALSE
) == KERN_SUCCESS
) {
8751 * page has already been un-tabled from the object via 'vm_page_remove'
8753 p
->pageq
.next
= (queue_entry_t
)local_freeq
;
8757 if (local_freed
>= MAX_FREE_BATCH
) {
8759 vm_page_free_list(local_freeq
, TRUE
);
8764 c_freezer_compression_count
++;
8766 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 0, 0, 0);
8768 if (local_freed
== 0 && c_freezer_should_yield()) {
8770 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
8771 clock_get_uptime(&c_freezer_last_yield_ts
);
8774 vm_object_lock(object
);
8778 vm_page_free_list(local_freeq
, TRUE
);
8784 vm_object_activity_end(object
);
8786 vm_object_unlock(object
);
8788 if (c_freezer_should_yield()) {
8790 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
8791 clock_get_uptime(&c_freezer_last_yield_ts
);
8799 memory_object_t pager
;
8802 vm_object_lock(object
);
8804 pager
= object
->pager
;
8806 if (!object
->pager_ready
|| pager
== MEMORY_OBJECT_NULL
) {
8807 vm_object_unlock(object
);
8808 return KERN_FAILURE
;
8811 vm_object_paging_wait(object
, THREAD_UNINT
);
8812 vm_object_paging_begin(object
);
8814 object
->blocked_access
= TRUE
;
8815 vm_object_unlock(object
);
8817 kr
= memory_object_data_reclaim(pager
, TRUE
);
8819 vm_object_lock(object
);
8821 object
->blocked_access
= FALSE
;
8822 vm_object_paging_end(object
);
8824 vm_object_unlock(object
);
8828 #endif /* CONFIG_FREEZE */
8836 struct vm_pageout_queue
*iq
;
8837 boolean_t need_unlock
= TRUE
;
8839 iq
= &vm_pageout_queue_internal
;
8841 assert(object
!= VM_OBJECT_NULL
);
8842 assert(!DEFAULT_PAGER_IS_ACTIVE
&& !DEFAULT_FREEZER_IS_ACTIVE
);
8844 vm_object_lock(object
);
8846 if (!object
->internal
||
8847 object
->terminating
||
8849 vm_object_unlock(object
);
8853 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
8855 if (!object
->pager_initialized
) {
8857 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
8859 if (!object
->pager_initialized
)
8860 vm_object_compressor_pager_create(object
);
8863 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
8864 vm_object_unlock(object
);
8870 next
= (vm_page_t
)queue_first(&object
->memq
);
8872 while (!queue_end(&object
->memq
, (queue_entry_t
)next
)) {
8874 next
= (vm_page_t
)queue_next(&next
->listq
);
8876 if (!(p
->active
|| p
->inactive
|| p
->speculative
) ||
8877 p
->encrypted_cleaning
||
8887 * Page is already being cleaned or can't be cleaned.
8892 /* Throw to the pageout queue */
8894 vm_page_lockspin_queues();
8897 if (vm_compressor_low_on_space()) {
8898 vm_page_unlock_queues();
8902 if (VM_PAGE_Q_THROTTLED(iq
)) {
8904 iq
->pgo_draining
= TRUE
;
8906 assert_wait((event_t
) (&iq
->pgo_laundry
+ 1),
8907 THREAD_INTERRUPTIBLE
);
8908 vm_page_unlock_queues();
8909 vm_object_unlock(object
);
8911 thread_block(THREAD_CONTINUE_NULL
);
8913 vm_object_lock(object
);
8917 assert(!p
->fictitious
);
8920 assert(!p
->unusual
);
8922 assert(!VM_PAGE_WIRED(p
));
8923 assert(!p
->cleaning
);
8925 if (p
->pmapped
== TRUE
) {
8930 if (COMPRESSED_PAGER_IS_ACTIVE
||
8931 DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
8933 * Tell pmap the page should be accounted
8934 * for as "compressed" if it's been modified.
8937 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
8938 if (p
->dirty
|| p
->precious
) {
8940 * We already know it's been modified,
8941 * so tell pmap to account for it
8944 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
8947 refmod_state
= pmap_disconnect_options(p
->phys_page
,
8950 if (refmod_state
& VM_MEM_MODIFIED
) {
8951 SET_PAGE_DIRTY(p
, FALSE
);
8955 if (!p
->dirty
&& !p
->precious
) {
8956 vm_page_unlock_queues();
8961 vm_page_queues_remove(p
);
8962 if (vm_pageout_cluster(p
, TRUE
, FALSE
, TRUE
))
8963 need_unlock
= FALSE
;
8965 if (need_unlock
== TRUE
)
8966 vm_page_unlock_queues();
8969 vm_object_unlock(object
);
8975 vm_page_request_reprioritize(vm_object_t o
, uint64_t blkno
, uint32_t len
, int prio
)
8977 io_reprioritize_req_t req
;
8978 struct vnode
*devvp
= NULL
;
8980 if(vnode_pager_get_object_devvp(o
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
)
8984 * Create the request for I/O reprioritization.
8985 * We use the noblock variant of zalloc because we're holding the object
8986 * lock here and we could cause a deadlock in low memory conditions.
8988 req
= (io_reprioritize_req_t
)zalloc_noblock(io_reprioritize_req_zone
);
8993 req
->priority
= prio
;
8996 /* Insert request into the reprioritization list */
8997 IO_REPRIORITIZE_LIST_LOCK();
8998 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
8999 IO_REPRIORITIZE_LIST_UNLOCK();
9001 /* Wakeup reprioritize thread */
9002 IO_REPRIO_THREAD_WAKEUP();
9008 vm_decmp_upl_reprioritize(upl_t upl
, int prio
)
9012 io_reprioritize_req_t req
;
9013 struct vnode
*devvp
= NULL
;
9017 uint64_t *io_upl_reprio_info
;
9020 if ((upl
->flags
& UPL_TRACKED_BY_OBJECT
) == 0 || (upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0)
9024 * We dont want to perform any allocations with the upl lock held since that might
9025 * result in a deadlock. If the system is low on memory, the pageout thread would
9026 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
9027 * be freed up by the pageout thread, it would be a deadlock.
9031 /* First step is just to get the size of the upl to find out how big the reprio info is */
9032 if(!upl_try_lock(upl
))
9035 if (upl
->decmp_io_upl
== NULL
) {
9036 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
9041 io_upl
= upl
->decmp_io_upl
;
9042 assert((io_upl
->flags
& UPL_DECMP_REAL_IO
) != 0);
9043 io_upl_size
= io_upl
->size
;
9046 /* Now perform the allocation */
9047 io_upl_reprio_info
= (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
9048 if (io_upl_reprio_info
== NULL
)
9051 /* Now again take the lock, recheck the state and grab out the required info */
9052 if(!upl_try_lock(upl
))
9055 if (upl
->decmp_io_upl
== NULL
|| upl
->decmp_io_upl
!= io_upl
) {
9056 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
9060 memcpy(io_upl_reprio_info
, io_upl
->upl_reprio_info
, sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
9062 /* Get the VM object for this UPL */
9063 if (io_upl
->flags
& UPL_SHADOWED
) {
9064 object
= io_upl
->map_object
->shadow
;
9066 object
= io_upl
->map_object
;
9069 /* Get the dev vnode ptr for this object */
9070 if(!object
|| !object
->pager
||
9071 vnode_pager_get_object_devvp(object
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
) {
9078 /* Now we have all the information needed to do the expedite */
9081 while (offset
< io_upl_size
) {
9082 blkno
= io_upl_reprio_info
[(offset
/ PAGE_SIZE
)] & UPL_REPRIO_INFO_MASK
;
9083 len
= (io_upl_reprio_info
[(offset
/ PAGE_SIZE
)] >> UPL_REPRIO_INFO_SHIFT
) & UPL_REPRIO_INFO_MASK
;
9086 * This implementation may cause some spurious expedites due to the
9087 * fact that we dont cleanup the blkno & len from the upl_reprio_info
9088 * even after the I/O is complete.
9091 if (blkno
!= 0 && len
!= 0) {
9092 /* Create the request for I/O reprioritization */
9093 req
= (io_reprioritize_req_t
)zalloc(io_reprioritize_req_zone
);
9094 assert(req
!= NULL
);
9097 req
->priority
= prio
;
9100 /* Insert request into the reprioritization list */
9101 IO_REPRIORITIZE_LIST_LOCK();
9102 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
9103 IO_REPRIORITIZE_LIST_UNLOCK();
9107 offset
+= PAGE_SIZE
;
9111 /* Wakeup reprioritize thread */
9112 IO_REPRIO_THREAD_WAKEUP();
9115 kfree(io_upl_reprio_info
, sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
9120 vm_page_handle_prio_inversion(vm_object_t o
, vm_page_t m
)
9123 upl_page_info_t
*pl
;
9124 unsigned int i
, num_pages
;
9127 cur_tier
= proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
);
9130 Scan through all UPLs associated with the object to find the
9131 UPL containing the contended page.
9133 queue_iterate(&o
->uplq
, upl
, upl_t
, uplq
) {
9134 if (((upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) || upl
->upl_priority
<= cur_tier
)
9136 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
9137 num_pages
= (upl
->size
/ PAGE_SIZE
);
9140 For each page in the UPL page list, see if it matches the contended
9141 page and was issued as a low prio I/O.
9143 for(i
=0; i
< num_pages
; i
++) {
9144 if(UPL_PAGE_PRESENT(pl
,i
) && m
->phys_page
== pl
[i
].phys_addr
) {
9145 if ((upl
->flags
& UPL_DECMP_REQ
) && upl
->decmp_io_upl
) {
9146 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, upl
->upl_creator
, m
, upl
, upl
->upl_priority
, 0);
9147 vm_decmp_upl_reprioritize(upl
, cur_tier
);
9150 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, upl
->upl_creator
, m
, upl
->upl_reprio_info
[i
], upl
->upl_priority
, 0);
9151 if (UPL_REPRIO_INFO_BLKNO(upl
, i
) != 0 && UPL_REPRIO_INFO_LEN(upl
, i
) != 0)
9152 vm_page_request_reprioritize(o
, UPL_REPRIO_INFO_BLKNO(upl
, i
), UPL_REPRIO_INFO_LEN(upl
, i
), cur_tier
);
9156 /* Check if we found any hits */
9165 vm_page_sleep(vm_object_t o
, vm_page_t m
, int interruptible
)
9169 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_START
, o
, m
, 0, 0, 0);
9171 if (o
->io_tracking
&& ((m
->busy
== TRUE
) || (m
->cleaning
== TRUE
) || VM_PAGE_WIRED(m
))) {
9173 Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
9175 vm_page_handle_prio_inversion(o
,m
);
9178 ret
= thread_sleep_vm_object(o
, m
, interruptible
);
9179 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_END
, o
, m
, 0, 0, 0);
9184 io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
)
9186 io_reprioritize_req_t req
= NULL
;
9190 IO_REPRIORITIZE_LIST_LOCK();
9191 if (queue_empty(&io_reprioritize_list
)) {
9192 IO_REPRIORITIZE_LIST_UNLOCK();
9196 queue_remove_first(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
9197 IO_REPRIORITIZE_LIST_UNLOCK();
9199 vnode_pager_issue_reprioritize_io(req
->devvp
, req
->blkno
, req
->len
, req
->priority
);
9200 zfree(io_reprioritize_req_zone
, req
);
9203 IO_REPRIO_THREAD_CONTINUATION();