2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Virtual memory object module.
66 #include <mach_pagemap.h>
67 #include <task_swapper.h>
69 #include <mach/mach_types.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/memory_object_control_server.h>
73 #include <mach/vm_param.h>
77 #include <ipc/ipc_types.h>
78 #include <ipc/ipc_port.h>
80 #include <kern/kern_types.h>
81 #include <kern/assert.h>
82 #include <kern/queue.h>
84 #include <kern/kalloc.h>
85 #include <kern/zalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/processor.h>
89 #include <kern/misc_protos.h>
90 #include <kern/policy_internal.h>
92 #include <vm/memory_object.h>
93 #include <vm/vm_compressor_pager.h>
94 #include <vm/vm_fault.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_protos.h>
100 #include <vm/vm_purgeable_internal.h>
102 #include <vm/vm_compressor.h>
104 #if CONFIG_PHANTOM_CACHE
105 #include <vm/vm_phantom_cache.h>
108 boolean_t vm_object_collapse_compressor_allowed
= TRUE
;
110 struct vm_counters vm_counters
;
112 #if VM_OBJECT_TRACKING
113 boolean_t vm_object_tracking_inited
= FALSE
;
114 btlog_t
*vm_object_tracking_btlog
;
117 vm_object_tracking_init(void)
119 int vm_object_tracking
;
121 vm_object_tracking
= 1;
122 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking
,
123 sizeof (vm_object_tracking
));
125 if (vm_object_tracking
) {
126 vm_object_tracking_btlog
= btlog_create(
127 VM_OBJECT_TRACKING_NUM_RECORDS
,
128 VM_OBJECT_TRACKING_BTDEPTH
,
129 TRUE
/* caller_will_remove_entries_for_element? */);
130 assert(vm_object_tracking_btlog
);
131 vm_object_tracking_inited
= TRUE
;
134 #endif /* VM_OBJECT_TRACKING */
137 * Virtual memory objects maintain the actual data
138 * associated with allocated virtual memory. A given
139 * page of memory exists within exactly one object.
141 * An object is only deallocated when all "references"
144 * Associated with each object is a list of all resident
145 * memory pages belonging to that object; this list is
146 * maintained by the "vm_page" module, but locked by the object's
149 * Each object also records the memory object reference
150 * that is used by the kernel to request and write
151 * back data (the memory object, field "pager"), etc...
153 * Virtual memory objects are allocated to provide
154 * zero-filled memory (vm_allocate) or map a user-defined
155 * memory object into a virtual address space (vm_map).
157 * Virtual memory objects that refer to a user-defined
158 * memory object are called "permanent", because all changes
159 * made in virtual memory are reflected back to the
160 * memory manager, which may then store it permanently.
161 * Other virtual memory objects are called "temporary",
162 * meaning that changes need be written back only when
163 * necessary to reclaim pages, and that storage associated
164 * with the object can be discarded once it is no longer
167 * A permanent memory object may be mapped into more
168 * than one virtual address space. Moreover, two threads
169 * may attempt to make the first mapping of a memory
170 * object concurrently. Only one thread is allowed to
171 * complete this mapping; all others wait for the
172 * "pager_initialized" field is asserted, indicating
173 * that the first thread has initialized all of the
174 * necessary fields in the virtual memory object structure.
176 * The kernel relies on a *default memory manager* to
177 * provide backing storage for the zero-filled virtual
178 * memory objects. The pager memory objects associated
179 * with these temporary virtual memory objects are only
180 * requested from the default memory manager when it
181 * becomes necessary. Virtual memory objects
182 * that depend on the default memory manager are called
183 * "internal". The "pager_created" field is provided to
184 * indicate whether these ports have ever been allocated.
186 * The kernel may also create virtual memory objects to
187 * hold changed pages after a copy-on-write operation.
188 * In this case, the virtual memory object (and its
189 * backing storage -- its memory object) only contain
190 * those pages that have been changed. The "shadow"
191 * field refers to the virtual memory object that contains
192 * the remainder of the contents. The "shadow_offset"
193 * field indicates where in the "shadow" these contents begin.
194 * The "copy" field refers to a virtual memory object
195 * to which changed pages must be copied before changing
196 * this object, in order to implement another form
197 * of copy-on-write optimization.
199 * The virtual memory object structure also records
200 * the attributes associated with its memory object.
201 * The "pager_ready", "can_persist" and "copy_strategy"
202 * fields represent those attributes. The "cached_list"
203 * field is used in the implementation of the persistence
206 * ZZZ Continue this comment.
209 /* Forward declarations for internal functions. */
210 static kern_return_t
vm_object_terminate(
213 static kern_return_t
vm_object_copy_call(
214 vm_object_t src_object
,
215 vm_object_offset_t src_offset
,
216 vm_object_size_t size
,
217 vm_object_t
*_result_object
);
219 static void vm_object_do_collapse(
221 vm_object_t backing_object
);
223 static void vm_object_do_bypass(
225 vm_object_t backing_object
);
227 static void vm_object_release_pager(
228 memory_object_t pager
);
230 zone_t vm_object_zone
; /* vm backing store zone */
233 * All wired-down kernel memory belongs to a single virtual
234 * memory object (kernel_object) to avoid wasting data structures.
236 static struct vm_object kernel_object_store
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
237 vm_object_t kernel_object
;
239 static struct vm_object compressor_object_store
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
240 vm_object_t compressor_object
= &compressor_object_store
;
243 * The submap object is used as a placeholder for vm_map_submap
244 * operations. The object is declared in vm_map.c because it
245 * is exported by the vm_map module. The storage is declared
246 * here because it must be initialized here.
248 static struct vm_object vm_submap_object_store
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
251 * Virtual memory objects are initialized from
252 * a template (see vm_object_allocate).
254 * When adding a new field to the virtual memory
255 * object structure, be sure to add initialization
256 * (see _vm_object_allocate()).
258 static struct vm_object vm_object_template
;
260 unsigned int vm_page_purged_wired
= 0;
261 unsigned int vm_page_purged_busy
= 0;
262 unsigned int vm_page_purged_others
= 0;
264 static queue_head_t vm_object_cached_list
;
265 static uint32_t vm_object_cache_pages_freed
= 0;
266 static uint32_t vm_object_cache_pages_moved
= 0;
267 static uint32_t vm_object_cache_pages_skipped
= 0;
268 static uint32_t vm_object_cache_adds
= 0;
269 static uint32_t vm_object_cached_count
= 0;
270 static lck_mtx_t vm_object_cached_lock_data
;
271 static lck_mtx_ext_t vm_object_cached_lock_data_ext
;
273 static uint32_t vm_object_page_grab_failed
= 0;
274 static uint32_t vm_object_page_grab_skipped
= 0;
275 static uint32_t vm_object_page_grab_returned
= 0;
276 static uint32_t vm_object_page_grab_pmapped
= 0;
277 static uint32_t vm_object_page_grab_reactivations
= 0;
279 #define vm_object_cache_lock_spin() \
280 lck_mtx_lock_spin(&vm_object_cached_lock_data)
281 #define vm_object_cache_unlock() \
282 lck_mtx_unlock(&vm_object_cached_lock_data)
284 static void vm_object_cache_remove_locked(vm_object_t
);
287 static void vm_object_reap(vm_object_t object
);
288 static void vm_object_reap_async(vm_object_t object
);
289 static void vm_object_reaper_thread(void);
291 static lck_mtx_t vm_object_reaper_lock_data
;
292 static lck_mtx_ext_t vm_object_reaper_lock_data_ext
;
294 static queue_head_t vm_object_reaper_queue
; /* protected by vm_object_reaper_lock() */
295 unsigned int vm_object_reap_count
= 0;
296 unsigned int vm_object_reap_count_async
= 0;
298 #define vm_object_reaper_lock() \
299 lck_mtx_lock(&vm_object_reaper_lock_data)
300 #define vm_object_reaper_lock_spin() \
301 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
302 #define vm_object_reaper_unlock() \
303 lck_mtx_unlock(&vm_object_reaper_lock_data)
306 /* I/O Re-prioritization request list */
307 queue_head_t io_reprioritize_list
;
308 lck_spin_t io_reprioritize_list_lock
;
310 #define IO_REPRIORITIZE_LIST_LOCK() \
311 lck_spin_lock(&io_reprioritize_list_lock)
312 #define IO_REPRIORITIZE_LIST_UNLOCK() \
313 lck_spin_unlock(&io_reprioritize_list_lock)
315 #define MAX_IO_REPRIORITIZE_REQS 8192
316 zone_t io_reprioritize_req_zone
;
318 /* I/O Re-prioritization thread */
319 int io_reprioritize_wakeup
= 0;
320 static void io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
);
322 #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup)
323 #define IO_REPRIO_THREAD_CONTINUATION() \
325 assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \
326 thread_block(io_reprioritize_thread); \
329 void vm_page_request_reprioritize(vm_object_t
, uint64_t, uint32_t, int);
330 void vm_page_handle_prio_inversion(vm_object_t
, vm_page_t
);
331 void vm_decmp_upl_reprioritize(upl_t
, int);
336 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
341 * vm_object_allocate:
343 * Returns a new object with the given size.
346 __private_extern__
void
348 vm_object_size_t size
,
352 "vm_object_allocate, object 0x%X size 0x%X\n",
353 object
, size
, 0,0,0);
355 *object
= vm_object_template
;
356 vm_page_queue_init(&object
->memq
);
357 #if UPL_DEBUG || CONFIG_IOSCHED
358 queue_init(&object
->uplq
);
360 vm_object_lock_init(object
);
361 object
->vo_size
= size
;
363 #if VM_OBJECT_TRACKING_OP_CREATED
364 if (vm_object_tracking_inited
) {
365 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
368 numsaved
= OSBacktrace(bt
, VM_OBJECT_TRACKING_BTDEPTH
);
369 btlog_add_entry(vm_object_tracking_btlog
,
371 VM_OBJECT_TRACKING_OP_CREATED
,
375 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
378 __private_extern__ vm_object_t
380 vm_object_size_t size
)
384 object
= (vm_object_t
) zalloc(vm_object_zone
);
386 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
388 if (object
!= VM_OBJECT_NULL
)
389 _vm_object_allocate(size
, object
);
395 lck_grp_t vm_object_lck_grp
;
396 lck_grp_t vm_object_cache_lck_grp
;
397 lck_grp_attr_t vm_object_lck_grp_attr
;
398 lck_attr_t vm_object_lck_attr
;
399 lck_attr_t kernel_object_lck_attr
;
400 lck_attr_t compressor_object_lck_attr
;
403 * vm_object_bootstrap:
405 * Initialize the VM objects module.
407 __private_extern__
void
408 vm_object_bootstrap(void)
410 vm_size_t vm_object_size
;
412 assert(sizeof (mo_ipc_object_bits_t
) == sizeof (ipc_object_bits_t
));
414 vm_object_size
= (sizeof(struct vm_object
) + (VM_PACKED_POINTER_ALIGNMENT
-1)) & ~(VM_PACKED_POINTER_ALIGNMENT
- 1);
416 vm_object_zone
= zinit(vm_object_size
,
417 round_page(512*1024),
420 zone_change(vm_object_zone
, Z_CALLERACCT
, FALSE
); /* don't charge caller */
421 zone_change(vm_object_zone
, Z_NOENCRYPT
, TRUE
);
422 zone_change(vm_object_zone
, Z_ALIGNMENT_REQUIRED
, TRUE
);
424 vm_object_init_lck_grp();
426 queue_init(&vm_object_cached_list
);
428 lck_mtx_init_ext(&vm_object_cached_lock_data
,
429 &vm_object_cached_lock_data_ext
,
430 &vm_object_cache_lck_grp
,
431 &vm_object_lck_attr
);
433 queue_init(&vm_object_reaper_queue
);
435 lck_mtx_init_ext(&vm_object_reaper_lock_data
,
436 &vm_object_reaper_lock_data_ext
,
438 &vm_object_lck_attr
);
442 * Fill in a template object, for quick initialization
445 /* memq; Lock; init after allocation */
447 vm_object_template
.memq
.prev
= 0;
448 vm_object_template
.memq
.next
= 0;
451 * We can't call vm_object_lock_init() here because that will
452 * allocate some memory and VM is not fully initialized yet.
453 * The lock will be initialized for each allocated object in
454 * _vm_object_allocate(), so we don't need to initialize it in
455 * the vm_object_template.
457 vm_object_lock_init(&vm_object_template
);
459 #if DEVELOPMENT || DEBUG
460 vm_object_template
.Lock_owner
= 0;
462 vm_object_template
.vo_size
= 0;
463 vm_object_template
.memq_hint
= VM_PAGE_NULL
;
464 vm_object_template
.ref_count
= 1;
466 vm_object_template
.res_count
= 1;
467 #endif /* TASK_SWAPPER */
468 vm_object_template
.resident_page_count
= 0;
469 // static vm_object_template is zeroed
470 // vm_object_template.wired_page_count = 0;
471 vm_object_template
.reusable_page_count
= 0;
472 vm_object_template
.copy
= VM_OBJECT_NULL
;
473 vm_object_template
.shadow
= VM_OBJECT_NULL
;
474 vm_object_template
.vo_shadow_offset
= (vm_object_offset_t
) 0;
475 vm_object_template
.pager
= MEMORY_OBJECT_NULL
;
476 vm_object_template
.paging_offset
= 0;
477 vm_object_template
.pager_control
= MEMORY_OBJECT_CONTROL_NULL
;
478 vm_object_template
.copy_strategy
= MEMORY_OBJECT_COPY_SYMMETRIC
;
479 vm_object_template
.paging_in_progress
= 0;
481 vm_object_template
.__object1_unused_bits
= 0;
482 #endif /* __LP64__ */
483 vm_object_template
.activity_in_progress
= 0;
485 /* Begin bitfields */
486 vm_object_template
.all_wanted
= 0; /* all bits FALSE */
487 vm_object_template
.pager_created
= FALSE
;
488 vm_object_template
.pager_initialized
= FALSE
;
489 vm_object_template
.pager_ready
= FALSE
;
490 vm_object_template
.pager_trusted
= FALSE
;
491 vm_object_template
.can_persist
= FALSE
;
492 vm_object_template
.internal
= TRUE
;
493 vm_object_template
.private = FALSE
;
494 vm_object_template
.pageout
= FALSE
;
495 vm_object_template
.alive
= TRUE
;
496 vm_object_template
.purgable
= VM_PURGABLE_DENY
;
497 vm_object_template
.purgeable_when_ripe
= FALSE
;
498 vm_object_template
.purgeable_only_by_kernel
= FALSE
;
499 vm_object_template
.shadowed
= FALSE
;
500 vm_object_template
.true_share
= FALSE
;
501 vm_object_template
.terminating
= FALSE
;
502 vm_object_template
.named
= FALSE
;
503 vm_object_template
.shadow_severed
= FALSE
;
504 vm_object_template
.phys_contiguous
= FALSE
;
505 vm_object_template
.nophyscache
= FALSE
;
508 vm_object_template
.cached_list
.prev
= NULL
;
509 vm_object_template
.cached_list
.next
= NULL
;
511 vm_object_template
.last_alloc
= (vm_object_offset_t
) 0;
512 vm_object_template
.sequential
= (vm_object_offset_t
) 0;
513 vm_object_template
.pages_created
= 0;
514 vm_object_template
.pages_used
= 0;
515 vm_object_template
.scan_collisions
= 0;
516 #if CONFIG_PHANTOM_CACHE
517 vm_object_template
.phantom_object_id
= 0;
519 vm_object_template
.cow_hint
= ~(vm_offset_t
)0;
521 /* cache bitfields */
522 vm_object_template
.wimg_bits
= VM_WIMG_USE_DEFAULT
;
523 vm_object_template
.set_cache_attr
= FALSE
;
524 vm_object_template
.object_slid
= FALSE
;
525 vm_object_template
.code_signed
= FALSE
;
526 vm_object_template
.transposed
= FALSE
;
527 vm_object_template
.mapping_in_progress
= FALSE
;
528 vm_object_template
.phantom_isssd
= FALSE
;
529 vm_object_template
.volatile_empty
= FALSE
;
530 vm_object_template
.volatile_fault
= FALSE
;
531 vm_object_template
.all_reusable
= FALSE
;
532 vm_object_template
.blocked_access
= FALSE
;
533 vm_object_template
.__object2_unused_bits
= 0;
534 #if CONFIG_IOSCHED || UPL_DEBUG
535 vm_object_template
.uplq
.prev
= NULL
;
536 vm_object_template
.uplq
.next
= NULL
;
537 #endif /* UPL_DEBUG */
539 bzero(&vm_object_template
.pip_holders
,
540 sizeof (vm_object_template
.pip_holders
));
541 #endif /* VM_PIP_DEBUG */
543 vm_object_template
.objq
.next
= NULL
;
544 vm_object_template
.objq
.prev
= NULL
;
545 vm_object_template
.task_objq
.next
= NULL
;
546 vm_object_template
.task_objq
.prev
= NULL
;
548 vm_object_template
.purgeable_queue_type
= PURGEABLE_Q_TYPE_MAX
;
549 vm_object_template
.purgeable_queue_group
= 0;
551 vm_object_template
.vo_cache_ts
= 0;
553 vm_object_template
.wire_tag
= VM_KERN_MEMORY_NONE
;
555 vm_object_template
.io_tracking
= FALSE
;
557 #if CONFIG_SECLUDED_MEMORY
558 vm_object_template
.eligible_for_secluded
= FALSE
;
559 vm_object_template
.can_grab_secluded
= FALSE
;
560 #else /* CONFIG_SECLUDED_MEMORY */
561 vm_object_template
.__object3_unused_bits
= 0;
562 #endif /* CONFIG_SECLUDED_MEMORY */
565 bzero(&vm_object_template
.purgeable_owner_bt
[0],
566 sizeof (vm_object_template
.purgeable_owner_bt
));
567 vm_object_template
.vo_purgeable_volatilizer
= NULL
;
568 bzero(&vm_object_template
.purgeable_volatilizer_bt
[0],
569 sizeof (vm_object_template
.purgeable_volatilizer_bt
));
573 * Initialize the "kernel object"
576 kernel_object
= &kernel_object_store
;
579 * Note that in the following size specifications, we need to add 1 because
580 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
583 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
586 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
588 kernel_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
589 compressor_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
590 kernel_object
->no_tag_update
= TRUE
;
593 * Initialize the "submap object". Make it as large as the
594 * kernel object so that no limit is imposed on submap sizes.
597 vm_submap_object
= &vm_submap_object_store
;
598 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS
+ 1,
600 vm_submap_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
603 * Create an "extra" reference to this object so that we never
604 * try to deallocate it; zfree doesn't like to be called with
607 vm_object_reference(vm_submap_object
);
612 vm_io_reprioritize_init(void)
614 kern_return_t result
;
615 thread_t thread
= THREAD_NULL
;
617 /* Initialze the I/O reprioritization subsystem */
618 lck_spin_init(&io_reprioritize_list_lock
, &vm_object_lck_grp
, &vm_object_lck_attr
);
619 queue_init(&io_reprioritize_list
);
621 io_reprioritize_req_zone
= zinit(sizeof(struct io_reprioritize_req
),
622 MAX_IO_REPRIORITIZE_REQS
* sizeof(struct io_reprioritize_req
),
623 4096, "io_reprioritize_req");
624 zone_change(io_reprioritize_req_zone
, Z_COLLECT
, FALSE
);
626 result
= kernel_thread_start_priority(io_reprioritize_thread
, NULL
, 95 /* MAXPRI_KERNEL */, &thread
);
627 if (result
== KERN_SUCCESS
) {
628 thread_deallocate(thread
);
630 panic("Could not create io_reprioritize_thread");
636 vm_object_reaper_init(void)
641 kr
= kernel_thread_start_priority(
642 (thread_continue_t
) vm_object_reaper_thread
,
646 if (kr
!= KERN_SUCCESS
) {
647 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr
);
649 thread_deallocate(thread
);
652 __private_extern__
void
656 * Finish initializing the kernel object.
661 __private_extern__
void
662 vm_object_init_lck_grp(void)
665 * initialze the vm_object lock world
667 lck_grp_attr_setdefault(&vm_object_lck_grp_attr
);
668 lck_grp_init(&vm_object_lck_grp
, "vm_object", &vm_object_lck_grp_attr
);
669 lck_grp_init(&vm_object_cache_lck_grp
, "vm_object_cache", &vm_object_lck_grp_attr
);
670 lck_attr_setdefault(&vm_object_lck_attr
);
671 lck_attr_setdefault(&kernel_object_lck_attr
);
672 lck_attr_cleardebug(&kernel_object_lck_attr
);
673 lck_attr_setdefault(&compressor_object_lck_attr
);
674 lck_attr_cleardebug(&compressor_object_lck_attr
);
679 * vm_object_deallocate:
681 * Release a reference to the specified object,
682 * gained either through a vm_object_allocate
683 * or a vm_object_reference call. When all references
684 * are gone, storage associated with this object
685 * may be relinquished.
687 * No object may be locked.
689 unsigned long vm_object_deallocate_shared_successes
= 0;
690 unsigned long vm_object_deallocate_shared_failures
= 0;
691 unsigned long vm_object_deallocate_shared_swap_failures
= 0;
693 __private_extern__
void
694 vm_object_deallocate(
697 vm_object_t shadow
= VM_OBJECT_NULL
;
699 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
700 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
702 if (object
== VM_OBJECT_NULL
)
705 if (object
== kernel_object
|| object
== compressor_object
) {
706 vm_object_lock_shared(object
);
708 OSAddAtomic(-1, &object
->ref_count
);
710 if (object
->ref_count
== 0) {
711 if (object
== kernel_object
)
712 panic("vm_object_deallocate: losing kernel_object\n");
714 panic("vm_object_deallocate: losing compressor_object\n");
716 vm_object_unlock(object
);
720 if (object
->ref_count
== 2 &&
723 * This "named" object's reference count is about to
725 * we'll need to call memory_object_last_unmap().
727 } else if (object
->ref_count
== 2 &&
729 object
->shadow
!= VM_OBJECT_NULL
) {
731 * This internal object's reference count is about to
732 * drop from 2 to 1 and it has a shadow object:
733 * we'll want to try and collapse this object with its
736 } else if (object
->ref_count
>= 2) {
737 UInt32 original_ref_count
;
738 volatile UInt32
*ref_count_p
;
742 * The object currently looks like it is not being
743 * kept alive solely by the reference we're about to release.
744 * Let's try and release our reference without taking
745 * all the locks we would need if we had to terminate the
746 * object (cache lock + exclusive object lock).
747 * Lock the object "shared" to make sure we don't race with
748 * anyone holding it "exclusive".
750 vm_object_lock_shared(object
);
751 ref_count_p
= (volatile UInt32
*) &object
->ref_count
;
752 original_ref_count
= object
->ref_count
;
754 * Test again as "ref_count" could have changed.
755 * "named" shouldn't change.
757 if (original_ref_count
== 2 &&
759 /* need to take slow path for m_o_last_unmap() */
761 } else if (original_ref_count
== 2 &&
763 object
->shadow
!= VM_OBJECT_NULL
) {
764 /* need to take slow path for vm_object_collapse() */
766 } else if (original_ref_count
< 2) {
767 /* need to take slow path for vm_object_terminate() */
770 /* try an atomic update with the shared lock */
771 atomic_swap
= OSCompareAndSwap(
773 original_ref_count
- 1,
774 (UInt32
*) &object
->ref_count
);
775 if (atomic_swap
== FALSE
) {
776 vm_object_deallocate_shared_swap_failures
++;
777 /* fall back to the slow path... */
781 vm_object_unlock(object
);
785 * ref_count was updated atomically !
787 vm_object_deallocate_shared_successes
++;
792 * Someone else updated the ref_count at the same
793 * time and we lost the race. Fall back to the usual
794 * slow but safe path...
796 vm_object_deallocate_shared_failures
++;
799 while (object
!= VM_OBJECT_NULL
) {
801 vm_object_lock(object
);
803 assert(object
->ref_count
> 0);
806 * If the object has a named reference, and only
807 * that reference would remain, inform the pager
808 * about the last "mapping" reference going away.
810 if ((object
->ref_count
== 2) && (object
->named
)) {
811 memory_object_t pager
= object
->pager
;
813 /* Notify the Pager that there are no */
814 /* more mappers for this object */
816 if (pager
!= MEMORY_OBJECT_NULL
) {
817 vm_object_mapping_wait(object
, THREAD_UNINT
);
818 vm_object_mapping_begin(object
);
819 vm_object_unlock(object
);
821 memory_object_last_unmap(pager
);
823 vm_object_lock(object
);
824 vm_object_mapping_end(object
);
826 assert(object
->ref_count
> 0);
830 * Lose the reference. If other references
831 * remain, then we are done, unless we need
832 * to retry a cache trim.
833 * If it is the last reference, then keep it
834 * until any pending initialization is completed.
837 /* if the object is terminating, it cannot go into */
838 /* the cache and we obviously should not call */
839 /* terminate again. */
841 if ((object
->ref_count
> 1) || object
->terminating
) {
842 vm_object_lock_assert_exclusive(object
);
844 vm_object_res_deallocate(object
);
846 if (object
->ref_count
== 1 &&
847 object
->shadow
!= VM_OBJECT_NULL
) {
849 * There's only one reference left on this
850 * VM object. We can't tell if it's a valid
851 * one (from a mapping for example) or if this
852 * object is just part of a possibly stale and
853 * useless shadow chain.
854 * We would like to try and collapse it into
855 * its parent, but we don't have any pointers
856 * back to this parent object.
857 * But we can try and collapse this object with
858 * its own shadows, in case these are useless
860 * We can't bypass this object though, since we
861 * don't know if this last reference on it is
864 vm_object_collapse(object
, 0, FALSE
);
866 vm_object_unlock(object
);
871 * We have to wait for initialization
872 * before destroying or caching the object.
875 if (object
->pager_created
&& ! object
->pager_initialized
) {
876 assert(! object
->can_persist
);
877 vm_object_assert_wait(object
,
878 VM_OBJECT_EVENT_INITIALIZED
,
880 vm_object_unlock(object
);
882 thread_block(THREAD_CONTINUE_NULL
);
887 "vm_o_deallocate: 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
888 object
, object
->resident_page_count
,
889 object
->paging_in_progress
,
890 (void *)current_thread(),object
->ref_count
);
892 VM_OBJ_RES_DECR(object
); /* XXX ? */
894 * Terminate this object. If it had a shadow,
895 * then deallocate it; otherwise, if we need
896 * to retry a cache trim, do so now; otherwise,
897 * we are done. "pageout" objects have a shadow,
898 * but maintain a "paging reference" rather than
899 * a normal reference.
901 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
903 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
906 if (shadow
!= VM_OBJECT_NULL
) {
924 vm_object_lock_assert_exclusive(object
);
926 next_p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
927 p_limit
= MIN(50, object
->resident_page_count
);
929 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next_p
) && --p_limit
> 0) {
932 next_p
= (vm_page_t
)vm_page_queue_next(&next_p
->listq
);
934 if (VM_PAGE_WIRED(p
) || p
->busy
|| p
->cleaning
|| p
->laundry
|| p
->fictitious
)
935 goto move_page_in_obj
;
937 if (p
->pmapped
|| p
->dirty
|| p
->precious
) {
938 vm_page_lockspin_queues();
943 vm_object_page_grab_pmapped
++;
945 if (p
->reference
== FALSE
|| p
->dirty
== FALSE
) {
947 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p
));
949 if (refmod_state
& VM_MEM_REFERENCED
)
951 if (refmod_state
& VM_MEM_MODIFIED
) {
952 SET_PAGE_DIRTY(p
, FALSE
);
955 if (p
->dirty
== FALSE
&& p
->precious
== FALSE
) {
957 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
959 if (refmod_state
& VM_MEM_REFERENCED
)
961 if (refmod_state
& VM_MEM_MODIFIED
) {
962 SET_PAGE_DIRTY(p
, FALSE
);
965 if (p
->dirty
== FALSE
)
969 if ((p
->vm_page_q_state
!= VM_PAGE_ON_ACTIVE_Q
) && p
->reference
== TRUE
) {
972 VM_STAT_INCR(reactivations
);
973 vm_object_page_grab_reactivations
++;
975 vm_page_unlock_queues();
977 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
978 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
983 vm_page_lockspin_queues();
985 vm_page_free_prepare_queues(p
);
986 vm_object_page_grab_returned
++;
987 vm_object_page_grab_skipped
+= p_skipped
;
989 vm_page_unlock_queues();
991 vm_page_free_prepare_object(p
, TRUE
);
995 vm_object_page_grab_skipped
+= p_skipped
;
996 vm_object_page_grab_failed
++;
1003 #define EVICT_PREPARE_LIMIT 64
1004 #define EVICT_AGE 10
1006 static clock_sec_t vm_object_cache_aging_ts
= 0;
1009 vm_object_cache_remove_locked(
1012 assert(object
->purgable
== VM_PURGABLE_DENY
);
1014 queue_remove(&vm_object_cached_list
, object
, vm_object_t
, cached_list
);
1015 object
->cached_list
.next
= NULL
;
1016 object
->cached_list
.prev
= NULL
;
1018 vm_object_cached_count
--;
1022 vm_object_cache_remove(
1025 vm_object_cache_lock_spin();
1027 if (object
->cached_list
.next
&&
1028 object
->cached_list
.prev
)
1029 vm_object_cache_remove_locked(object
);
1031 vm_object_cache_unlock();
1035 vm_object_cache_add(
1041 assert(object
->purgable
== VM_PURGABLE_DENY
);
1043 if (object
->resident_page_count
== 0)
1045 clock_get_system_nanotime(&sec
, &nsec
);
1047 vm_object_cache_lock_spin();
1049 if (object
->cached_list
.next
== NULL
&&
1050 object
->cached_list
.prev
== NULL
) {
1051 queue_enter(&vm_object_cached_list
, object
, vm_object_t
, cached_list
);
1052 object
->vo_cache_ts
= sec
+ EVICT_AGE
;
1053 object
->vo_cache_pages_to_scan
= object
->resident_page_count
;
1055 vm_object_cached_count
++;
1056 vm_object_cache_adds
++;
1058 vm_object_cache_unlock();
1062 vm_object_cache_evict(
1064 int max_objects_to_examine
)
1066 vm_object_t object
= VM_OBJECT_NULL
;
1067 vm_object_t next_obj
= VM_OBJECT_NULL
;
1068 vm_page_t local_free_q
= VM_PAGE_NULL
;
1072 vm_page_t ep_array
[EVICT_PREPARE_LIMIT
];
1078 uint32_t ep_skipped
= 0;
1082 KERNEL_DEBUG(0x13001ec | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1084 * do a couple of quick checks to see if it's
1085 * worthwhile grabbing the lock
1087 if (queue_empty(&vm_object_cached_list
)) {
1088 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1091 clock_get_system_nanotime(&sec
, &nsec
);
1094 * the object on the head of the queue has not
1095 * yet sufficiently aged
1097 if (sec
< vm_object_cache_aging_ts
) {
1098 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1102 * don't need the queue lock to find
1103 * and lock an object on the cached list
1105 vm_page_unlock_queues();
1107 vm_object_cache_lock_spin();
1110 next_obj
= (vm_object_t
)queue_first(&vm_object_cached_list
);
1112 while (!queue_end(&vm_object_cached_list
, (queue_entry_t
)next_obj
) && object_cnt
++ < max_objects_to_examine
) {
1115 next_obj
= (vm_object_t
)queue_next(&next_obj
->cached_list
);
1117 assert(object
->purgable
== VM_PURGABLE_DENY
);
1118 assert(object
->wired_page_count
== 0);
1120 if (sec
< object
->vo_cache_ts
) {
1121 KERNEL_DEBUG(0x130020c, object
, object
->resident_page_count
, object
->vo_cache_ts
, sec
, 0);
1123 vm_object_cache_aging_ts
= object
->vo_cache_ts
;
1124 object
= VM_OBJECT_NULL
;
1127 if (!vm_object_lock_try_scan(object
)) {
1129 * just skip over this guy for now... if we find
1130 * an object to steal pages from, we'll revist in a bit...
1131 * hopefully, the lock will have cleared
1133 KERNEL_DEBUG(0x13001f8, object
, object
->resident_page_count
, 0, 0, 0);
1135 object
= VM_OBJECT_NULL
;
1138 if (vm_page_queue_empty(&object
->memq
) || object
->vo_cache_pages_to_scan
== 0) {
1140 * this case really shouldn't happen, but it's not fatal
1141 * so deal with it... if we don't remove the object from
1142 * the list, we'll never move past it.
1144 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1146 vm_object_cache_remove_locked(object
);
1147 vm_object_unlock(object
);
1148 object
= VM_OBJECT_NULL
;
1152 * we have a locked object with pages...
1153 * time to start harvesting
1157 vm_object_cache_unlock();
1159 if (object
== VM_OBJECT_NULL
)
1163 * object is locked at this point and
1164 * has resident pages
1166 next_p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
1169 * break the page scan into 2 pieces to minimize the time spent
1170 * behind the page queue lock...
1171 * the list of pages on these unused objects is likely to be cold
1172 * w/r to the cpu cache which increases the time to scan the list
1173 * tenfold... and we may have a 'run' of pages we can't utilize that
1174 * needs to be skipped over...
1176 if ((ep_limit
= num_to_evict
- (ep_freed
+ ep_moved
)) > EVICT_PREPARE_LIMIT
)
1177 ep_limit
= EVICT_PREPARE_LIMIT
;
1180 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next_p
) && object
->vo_cache_pages_to_scan
&& ep_count
< ep_limit
) {
1183 next_p
= (vm_page_t
)vm_page_queue_next(&next_p
->listq
);
1185 object
->vo_cache_pages_to_scan
--;
1187 if (VM_PAGE_WIRED(p
) || p
->busy
|| p
->cleaning
|| p
->laundry
) {
1188 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
1189 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
1194 if (p
->wpmapped
|| p
->dirty
|| p
->precious
) {
1195 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
1196 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
1198 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p
));
1200 ep_array
[ep_count
++] = p
;
1202 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1204 vm_page_lockspin_queues();
1206 for (ep_index
= 0; ep_index
< ep_count
; ep_index
++) {
1208 p
= ep_array
[ep_index
];
1210 if (p
->wpmapped
|| p
->dirty
|| p
->precious
) {
1211 p
->reference
= FALSE
;
1212 p
->no_cache
= FALSE
;
1215 * we've already filtered out pages that are in the laundry
1216 * so if we get here, this page can't be on the pageout queue
1218 vm_page_queues_remove(p
, FALSE
);
1219 vm_page_enqueue_inactive(p
, TRUE
);
1223 #if CONFIG_PHANTOM_CACHE
1224 vm_phantom_cache_add_ghost(p
);
1226 vm_page_free_prepare_queues(p
);
1228 assert(p
->pageq
.next
== 0 && p
->pageq
.prev
== 0);
1230 * Add this page to our list of reclaimed pages,
1231 * to be freed later.
1233 p
->snext
= local_free_q
;
1239 vm_page_unlock_queues();
1241 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END
, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1244 vm_page_free_list(local_free_q
, TRUE
);
1245 local_free_q
= VM_PAGE_NULL
;
1247 if (object
->vo_cache_pages_to_scan
== 0) {
1248 KERNEL_DEBUG(0x1300208, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1250 vm_object_cache_remove(object
);
1252 KERNEL_DEBUG(0x13001fc, object
, object
->resident_page_count
, ep_freed
, ep_moved
, 0);
1255 * done with this object
1257 vm_object_unlock(object
);
1258 object
= VM_OBJECT_NULL
;
1261 * at this point, we are not holding any locks
1263 if ((ep_freed
+ ep_moved
) >= num_to_evict
) {
1265 * we've reached our target for the
1266 * number of pages to evict
1270 vm_object_cache_lock_spin();
1273 * put the page queues lock back to the caller's
1276 vm_page_lock_queues();
1278 vm_object_cache_pages_freed
+= ep_freed
;
1279 vm_object_cache_pages_moved
+= ep_moved
;
1280 vm_object_cache_pages_skipped
+= ep_skipped
;
1282 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END
, ep_freed
, 0, 0, 0, 0);
1287 * Routine: vm_object_terminate
1289 * Free all resources associated with a vm_object.
1290 * In/out conditions:
1291 * Upon entry, the object must be locked,
1292 * and the object must have exactly one reference.
1294 * The shadow object reference is left alone.
1296 * The object must be unlocked if its found that pages
1297 * must be flushed to a backing object. If someone
1298 * manages to map the object while it is being flushed
1299 * the object is returned unlocked and unchanged. Otherwise,
1300 * upon exit, the cache will be unlocked, and the
1301 * object will cease to exist.
1303 static kern_return_t
1304 vm_object_terminate(
1307 vm_object_t shadow_object
;
1309 XPR(XPR_VM_OBJECT
, "vm_object_terminate, object 0x%X ref %d\n",
1310 object
, object
->ref_count
, 0, 0, 0);
1312 vm_object_lock_assert_exclusive(object
);
1314 if (!object
->pageout
&& (!object
->internal
&& object
->can_persist
) &&
1315 (object
->pager
!= NULL
|| object
->shadow_severed
)) {
1317 * Clear pager_trusted bit so that the pages get yanked
1318 * out of the object instead of cleaned in place. This
1319 * prevents a deadlock in XMM and makes more sense anyway.
1321 object
->pager_trusted
= FALSE
;
1323 vm_object_reap_pages(object
, REAP_TERMINATE
);
1326 * Make sure the object isn't already being terminated
1328 if (object
->terminating
) {
1329 vm_object_lock_assert_exclusive(object
);
1330 object
->ref_count
--;
1331 assert(object
->ref_count
> 0);
1332 vm_object_unlock(object
);
1333 return KERN_FAILURE
;
1337 * Did somebody get a reference to the object while we were
1340 if (object
->ref_count
!= 1) {
1341 vm_object_lock_assert_exclusive(object
);
1342 object
->ref_count
--;
1343 assert(object
->ref_count
> 0);
1344 vm_object_res_deallocate(object
);
1345 vm_object_unlock(object
);
1346 return KERN_FAILURE
;
1350 * Make sure no one can look us up now.
1353 object
->terminating
= TRUE
;
1354 object
->alive
= FALSE
;
1356 if (!object
->internal
&&
1357 object
->cached_list
.next
&&
1358 object
->cached_list
.prev
)
1359 vm_object_cache_remove(object
);
1362 * Detach the object from its shadow if we are the shadow's
1363 * copy. The reference we hold on the shadow must be dropped
1366 if (((shadow_object
= object
->shadow
) != VM_OBJECT_NULL
) &&
1367 !(object
->pageout
)) {
1368 vm_object_lock(shadow_object
);
1369 if (shadow_object
->copy
== object
)
1370 shadow_object
->copy
= VM_OBJECT_NULL
;
1371 vm_object_unlock(shadow_object
);
1374 if (object
->paging_in_progress
!= 0 ||
1375 object
->activity_in_progress
!= 0) {
1377 * There are still some paging_in_progress references
1378 * on this object, meaning that there are some paging
1379 * or other I/O operations in progress for this VM object.
1380 * Such operations take some paging_in_progress references
1381 * up front to ensure that the object doesn't go away, but
1382 * they may also need to acquire a reference on the VM object,
1383 * to map it in kernel space, for example. That means that
1384 * they may end up releasing the last reference on the VM
1385 * object, triggering its termination, while still holding
1386 * paging_in_progress references. Waiting for these
1387 * pending paging_in_progress references to go away here would
1390 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1391 * complete the VM object termination if it still holds
1392 * paging_in_progress references at this point.
1394 * No new paging_in_progress should appear now that the
1395 * VM object is "terminating" and not "alive".
1397 vm_object_reap_async(object
);
1398 vm_object_unlock(object
);
1400 * Return KERN_FAILURE to let the caller know that we
1401 * haven't completed the termination and it can't drop this
1402 * object's reference on its shadow object yet.
1403 * The reaper thread will take care of that once it has
1404 * completed this object's termination.
1406 return KERN_FAILURE
;
1409 * complete the VM object termination
1411 vm_object_reap(object
);
1412 object
= VM_OBJECT_NULL
;
1415 * the object lock was released by vm_object_reap()
1417 * KERN_SUCCESS means that this object has been terminated
1418 * and no longer needs its shadow object but still holds a
1420 * The caller is responsible for dropping that reference.
1421 * We can't call vm_object_deallocate() here because that
1422 * would create a recursion.
1424 return KERN_SUCCESS
;
1431 * Complete the termination of a VM object after it's been marked
1432 * as "terminating" and "!alive" by vm_object_terminate().
1434 * The VM object must be locked by caller.
1435 * The lock will be released on return and the VM object is no longer valid.
1442 memory_object_t pager
;
1444 vm_object_lock_assert_exclusive(object
);
1445 assert(object
->paging_in_progress
== 0);
1446 assert(object
->activity_in_progress
== 0);
1448 vm_object_reap_count
++;
1451 * Disown this purgeable object to cleanup its owner's purgeable
1452 * ledgers. We need to do this before disconnecting the object
1453 * from its pager, to properly account for compressed pages.
1455 if (object
->internal
&&
1456 object
->purgable
!= VM_PURGABLE_DENY
) {
1457 vm_purgeable_accounting(object
,
1460 FALSE
); /* task_objq locked? */
1463 pager
= object
->pager
;
1464 object
->pager
= MEMORY_OBJECT_NULL
;
1466 if (pager
!= MEMORY_OBJECT_NULL
)
1467 memory_object_control_disable(object
->pager_control
);
1469 object
->ref_count
--;
1471 assert(object
->res_count
== 0);
1472 #endif /* TASK_SWAPPER */
1474 assert (object
->ref_count
== 0);
1477 * remove from purgeable queue if it's on
1479 if (object
->internal
) {
1482 owner
= object
->vo_purgeable_owner
;
1484 VM_OBJECT_UNWIRED(object
);
1486 if (object
->purgable
== VM_PURGABLE_DENY
) {
1487 /* not purgeable: nothing to do */
1488 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
1489 purgeable_q_t queue
;
1491 assert(object
->vo_purgeable_owner
== NULL
);
1493 queue
= vm_purgeable_object_remove(object
);
1496 if (object
->purgeable_when_ripe
) {
1498 * Must take page lock for this -
1499 * using it to protect token queue
1501 vm_page_lock_queues();
1502 vm_purgeable_token_delete_first(queue
);
1504 assert(queue
->debug_count_objects
>=0);
1505 vm_page_unlock_queues();
1509 * Update "vm_page_purgeable_count" in bulk and mark
1510 * object as VM_PURGABLE_EMPTY to avoid updating
1511 * "vm_page_purgeable_count" again in vm_page_remove()
1512 * when reaping the pages.
1515 assert(object
->resident_page_count
>=
1516 object
->wired_page_count
);
1517 delta
= (object
->resident_page_count
-
1518 object
->wired_page_count
);
1520 assert(vm_page_purgeable_count
>= delta
);
1522 (SInt32
*)&vm_page_purgeable_count
);
1524 if (object
->wired_page_count
!= 0) {
1525 assert(vm_page_purgeable_wired_count
>=
1526 object
->wired_page_count
);
1527 OSAddAtomic(-object
->wired_page_count
,
1528 (SInt32
*)&vm_page_purgeable_wired_count
);
1530 object
->purgable
= VM_PURGABLE_EMPTY
;
1532 else if (object
->purgable
== VM_PURGABLE_NONVOLATILE
||
1533 object
->purgable
== VM_PURGABLE_EMPTY
) {
1534 /* remove from nonvolatile queue */
1535 assert(object
->vo_purgeable_owner
== TASK_NULL
);
1536 vm_purgeable_nonvolatile_dequeue(object
);
1538 panic("object %p in unexpected purgeable state 0x%x\n",
1539 object
, object
->purgable
);
1541 if (object
->transposed
&&
1542 object
->cached_list
.next
!= NULL
&&
1543 object
->cached_list
.prev
== NULL
) {
1545 * object->cached_list.next "points" to the
1546 * object that was transposed with this object.
1549 assert(object
->cached_list
.next
== NULL
);
1551 assert(object
->cached_list
.prev
== NULL
);
1554 if (object
->pageout
) {
1556 * free all remaining pages tabled on
1558 * clean up it's shadow
1560 assert(object
->shadow
!= VM_OBJECT_NULL
);
1562 vm_pageout_object_terminate(object
);
1564 } else if (object
->resident_page_count
) {
1566 * free all remaining pages tabled on
1569 vm_object_reap_pages(object
, REAP_REAP
);
1571 assert(vm_page_queue_empty(&object
->memq
));
1572 assert(object
->paging_in_progress
== 0);
1573 assert(object
->activity_in_progress
== 0);
1574 assert(object
->ref_count
== 0);
1577 * If the pager has not already been released by
1578 * vm_object_destroy, we need to terminate it and
1579 * release our reference to it here.
1581 if (pager
!= MEMORY_OBJECT_NULL
) {
1582 vm_object_unlock(object
);
1583 vm_object_release_pager(pager
);
1584 vm_object_lock(object
);
1587 /* kick off anyone waiting on terminating */
1588 object
->terminating
= FALSE
;
1589 vm_object_paging_begin(object
);
1590 vm_object_paging_end(object
);
1591 vm_object_unlock(object
);
1593 object
->shadow
= VM_OBJECT_NULL
;
1595 #if VM_OBJECT_TRACKING
1596 if (vm_object_tracking_inited
) {
1597 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
1600 #endif /* VM_OBJECT_TRACKING */
1602 vm_object_lock_destroy(object
);
1604 * Free the space for the object.
1606 zfree(vm_object_zone
, object
);
1607 object
= VM_OBJECT_NULL
;
1611 unsigned int vm_max_batch
= 256;
1613 #define V_O_R_MAX_BATCH 128
1615 #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
1618 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
1620 if (_local_free_q) { \
1621 if (do_disconnect) { \
1623 for (m = _local_free_q; \
1624 m != VM_PAGE_NULL; \
1627 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
1631 vm_page_free_list(_local_free_q, TRUE); \
1632 _local_free_q = VM_PAGE_NULL; \
1638 vm_object_reap_pages(
1644 vm_page_t local_free_q
= VM_PAGE_NULL
;
1646 boolean_t disconnect_on_release
;
1647 pmap_flush_context pmap_flush_context_storage
;
1649 if (reap_type
== REAP_DATA_FLUSH
) {
1651 * We need to disconnect pages from all pmaps before
1652 * releasing them to the free list
1654 disconnect_on_release
= TRUE
;
1657 * Either the caller has already disconnected the pages
1658 * from all pmaps, or we disconnect them here as we add
1659 * them to out local list of pages to be released.
1660 * No need to re-disconnect them when we release the pages
1663 disconnect_on_release
= FALSE
;
1666 restart_after_sleep
:
1667 if (vm_page_queue_empty(&object
->memq
))
1669 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
1671 if (reap_type
== REAP_PURGEABLE
)
1672 pmap_flush_context_init(&pmap_flush_context_storage
);
1674 vm_page_lockspin_queues();
1676 next
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
1678 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next
)) {
1681 next
= (vm_page_t
)vm_page_queue_next(&next
->listq
);
1683 if (--loop_count
== 0) {
1685 vm_page_unlock_queues();
1689 if (reap_type
== REAP_PURGEABLE
) {
1690 pmap_flush(&pmap_flush_context_storage
);
1691 pmap_flush_context_init(&pmap_flush_context_storage
);
1694 * Free the pages we reclaimed so far
1695 * and take a little break to avoid
1696 * hogging the page queue lock too long
1698 VM_OBJ_REAP_FREELIST(local_free_q
,
1699 disconnect_on_release
);
1703 loop_count
= BATCH_LIMIT(V_O_R_MAX_BATCH
);
1705 vm_page_lockspin_queues();
1707 if (reap_type
== REAP_DATA_FLUSH
|| reap_type
== REAP_TERMINATE
) {
1709 if (p
->busy
|| p
->cleaning
) {
1711 vm_page_unlock_queues();
1713 * free the pages reclaimed so far
1715 VM_OBJ_REAP_FREELIST(local_free_q
,
1716 disconnect_on_release
);
1718 PAGE_SLEEP(object
, p
, THREAD_UNINT
);
1720 goto restart_after_sleep
;
1723 vm_pageout_steal_laundry(p
, TRUE
);
1725 switch (reap_type
) {
1727 case REAP_DATA_FLUSH
:
1728 if (VM_PAGE_WIRED(p
)) {
1730 * this is an odd case... perhaps we should
1731 * zero-fill this page since we're conceptually
1732 * tossing its data at this point, but leaving
1733 * it on the object to honor the 'wire' contract
1739 case REAP_PURGEABLE
:
1740 if (VM_PAGE_WIRED(p
)) {
1742 * can't purge a wired page
1744 vm_page_purged_wired
++;
1747 if (p
->laundry
&& !p
->busy
&& !p
->cleaning
)
1748 vm_pageout_steal_laundry(p
, TRUE
);
1750 if (p
->cleaning
|| p
->laundry
|| p
->absent
) {
1752 * page is being acted upon,
1753 * so don't mess with it
1755 vm_page_purged_others
++;
1760 * We can't reclaim a busy page but we can
1761 * make it more likely to be paged (it's not wired) to make
1762 * sure that it gets considered by
1763 * vm_pageout_scan() later.
1765 if (VM_PAGE_PAGEABLE(p
))
1766 vm_page_deactivate(p
);
1767 vm_page_purged_busy
++;
1771 assert(VM_PAGE_OBJECT(p
) != kernel_object
);
1774 * we can discard this page...
1776 if (p
->pmapped
== TRUE
) {
1780 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
), PMAP_OPTIONS_NOFLUSH
| PMAP_OPTIONS_NOREFMOD
, (void *)&pmap_flush_context_storage
);
1782 vm_page_purged_count
++;
1786 case REAP_TERMINATE
:
1787 if (p
->absent
|| p
->private) {
1789 * For private pages, VM_PAGE_FREE just
1790 * leaves the page structure around for
1791 * its owner to clean up. For absent
1792 * pages, the structure is returned to
1793 * the appropriate pool.
1797 if (p
->fictitious
) {
1798 assert (VM_PAGE_GET_PHYS_PAGE(p
) == vm_page_guard_addr
);
1801 if (!p
->dirty
&& p
->wpmapped
)
1802 p
->dirty
= pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
));
1804 if ((p
->dirty
|| p
->precious
) && !p
->error
&& object
->alive
) {
1806 assert(!object
->internal
);
1808 p
->free_when_done
= TRUE
;
1811 vm_page_queues_remove(p
, TRUE
);
1813 * flush page... page will be freed
1814 * upon completion of I/O
1816 vm_pageout_cluster(p
);
1818 vm_page_unlock_queues();
1820 * free the pages reclaimed so far
1822 VM_OBJ_REAP_FREELIST(local_free_q
,
1823 disconnect_on_release
);
1825 vm_object_paging_wait(object
, THREAD_UNINT
);
1827 goto restart_after_sleep
;
1834 vm_page_free_prepare_queues(p
);
1835 assert(p
->pageq
.next
== 0 && p
->pageq
.prev
== 0);
1837 * Add this page to our list of reclaimed pages,
1838 * to be freed later.
1840 p
->snext
= local_free_q
;
1843 vm_page_unlock_queues();
1846 * Free the remaining reclaimed pages
1848 if (reap_type
== REAP_PURGEABLE
)
1849 pmap_flush(&pmap_flush_context_storage
);
1851 VM_OBJ_REAP_FREELIST(local_free_q
,
1852 disconnect_on_release
);
1857 vm_object_reap_async(
1860 vm_object_lock_assert_exclusive(object
);
1862 vm_object_reaper_lock_spin();
1864 vm_object_reap_count_async
++;
1866 /* enqueue the VM object... */
1867 queue_enter(&vm_object_reaper_queue
, object
,
1868 vm_object_t
, cached_list
);
1870 vm_object_reaper_unlock();
1872 /* ... and wake up the reaper thread */
1873 thread_wakeup((event_t
) &vm_object_reaper_queue
);
1878 vm_object_reaper_thread(void)
1880 vm_object_t object
, shadow_object
;
1882 vm_object_reaper_lock_spin();
1884 while (!queue_empty(&vm_object_reaper_queue
)) {
1885 queue_remove_first(&vm_object_reaper_queue
,
1890 vm_object_reaper_unlock();
1891 vm_object_lock(object
);
1893 assert(object
->terminating
);
1894 assert(!object
->alive
);
1897 * The pageout daemon might be playing with our pages.
1898 * Now that the object is dead, it won't touch any more
1899 * pages, but some pages might already be on their way out.
1900 * Hence, we wait until the active paging activities have
1901 * ceased before we break the association with the pager
1904 while (object
->paging_in_progress
!= 0 ||
1905 object
->activity_in_progress
!= 0) {
1906 vm_object_wait(object
,
1907 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
1909 vm_object_lock(object
);
1913 object
->pageout
? VM_OBJECT_NULL
: object
->shadow
;
1915 vm_object_reap(object
);
1916 /* cache is unlocked and object is no longer valid */
1917 object
= VM_OBJECT_NULL
;
1919 if (shadow_object
!= VM_OBJECT_NULL
) {
1921 * Drop the reference "object" was holding on
1922 * its shadow object.
1924 vm_object_deallocate(shadow_object
);
1925 shadow_object
= VM_OBJECT_NULL
;
1927 vm_object_reaper_lock_spin();
1930 /* wait for more work... */
1931 assert_wait((event_t
) &vm_object_reaper_queue
, THREAD_UNINT
);
1933 vm_object_reaper_unlock();
1935 thread_block((thread_continue_t
) vm_object_reaper_thread
);
1940 * Routine: vm_object_release_pager
1941 * Purpose: Terminate the pager and, upon completion,
1942 * release our last reference to it.
1945 vm_object_release_pager(
1946 memory_object_t pager
)
1950 * Terminate the pager.
1953 (void) memory_object_terminate(pager
);
1956 * Release reference to pager.
1958 memory_object_deallocate(pager
);
1962 * Routine: vm_object_destroy
1964 * Shut down a VM object, despite the
1965 * presence of address map (or other) references
1971 __unused kern_return_t reason
)
1973 memory_object_t old_pager
;
1975 if (object
== VM_OBJECT_NULL
)
1976 return(KERN_SUCCESS
);
1979 * Remove the pager association immediately.
1981 * This will prevent the memory manager from further
1982 * meddling. [If it wanted to flush data or make
1983 * other changes, it should have done so before performing
1984 * the destroy call.]
1987 vm_object_lock(object
);
1988 object
->can_persist
= FALSE
;
1989 object
->named
= FALSE
;
1990 object
->alive
= FALSE
;
1992 old_pager
= object
->pager
;
1993 object
->pager
= MEMORY_OBJECT_NULL
;
1994 if (old_pager
!= MEMORY_OBJECT_NULL
)
1995 memory_object_control_disable(object
->pager_control
);
1998 * Wait for the existing paging activity (that got
1999 * through before we nulled out the pager) to subside.
2002 vm_object_paging_wait(object
, THREAD_UNINT
);
2003 vm_object_unlock(object
);
2006 * Terminate the object now.
2008 if (old_pager
!= MEMORY_OBJECT_NULL
) {
2009 vm_object_release_pager(old_pager
);
2012 * JMM - Release the caller's reference. This assumes the
2013 * caller had a reference to release, which is a big (but
2014 * currently valid) assumption if this is driven from the
2015 * vnode pager (it is holding a named reference when making
2018 vm_object_deallocate(object
);
2021 return(KERN_SUCCESS
);
2025 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2026 * exist because of the need to handle shadow chains. When deactivating pages, we only
2027 * want to deactive the ones at the top most level in the object chain. In order to do
2028 * this efficiently, the specified address range is divided up into "chunks" and we use
2029 * a bit map to keep track of which pages have already been processed as we descend down
2030 * the shadow chain. These chunk macros hide the details of the bit map implementation
2031 * as much as we can.
2033 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2034 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2035 * order bit represents page 0 in the current range and highest order bit represents
2038 * For further convenience, we also use negative logic for the page state in the bit map.
2039 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2040 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2041 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2042 * out with all the bits set. The macros below hide all these details from the caller.
2045 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2046 /* be the same as the number of bits in */
2047 /* the chunk_state_t type. We use 64 */
2048 /* just for convenience. */
2050 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2052 typedef uint64_t chunk_state_t
;
2055 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2056 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2057 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2058 * looking at pages in that range. This can save us from unnecessarily chasing down the
2062 #define CHUNK_INIT(c, len) \
2066 (c) = 0xffffffffffffffffLL; \
2068 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2069 MARK_PAGE_HANDLED(c, p); \
2074 * Return true if all pages in the chunk have not yet been processed.
2077 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2080 * Return true if the page at offset 'p' in the bit map has already been handled
2081 * while processing a higher level object in the shadow chain.
2084 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0)
2087 * Mark the page at offset 'p' in the bit map as having been processed.
2090 #define MARK_PAGE_HANDLED(c, p) \
2092 (c) = (c) & ~(1LL << (p)); \
2097 * Return true if the page at the given offset has been paged out. Object is
2098 * locked upon entry and returned locked.
2104 vm_object_offset_t offset
)
2106 if (object
->internal
&&
2108 !object
->terminating
&&
2109 object
->pager_ready
) {
2111 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
)
2112 == VM_EXTERNAL_STATE_EXISTS
) {
2122 * madvise_free_debug
2124 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2125 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2126 * simulate the loss of the page's contents as if the page had been
2127 * reclaimed and then re-faulted.
2129 #if DEVELOPMENT || DEBUG
2130 int madvise_free_debug
= 1;
2132 int madvise_free_debug
= 0;
2136 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2137 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2138 * a size that is less than or equal to the CHUNK_SIZE.
2142 deactivate_pages_in_object(
2144 vm_object_offset_t offset
,
2145 vm_object_size_t size
,
2146 boolean_t kill_page
,
2147 boolean_t reusable_page
,
2148 boolean_t all_reusable
,
2149 chunk_state_t
*chunk_state
,
2150 pmap_flush_context
*pfc
,
2152 vm_map_offset_t pmap_offset
)
2156 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
2157 struct vm_page_delayed_work
*dwp
;
2160 unsigned int reusable
= 0;
2163 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2164 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2165 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2166 * all the pages in the chunk.
2171 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
2173 for(p
= 0; size
&& CHUNK_NOT_COMPLETE(*chunk_state
); p
++, size
-= PAGE_SIZE_64
, offset
+= PAGE_SIZE_64
, pmap_offset
+= PAGE_SIZE_64
) {
2176 * If this offset has already been found and handled in a higher level object, then don't
2177 * do anything with it in the current shadow object.
2180 if (PAGE_ALREADY_HANDLED(*chunk_state
, p
))
2184 * See if the page at this offset is around. First check to see if the page is resident,
2185 * then if not, check the existence map or with the pager.
2188 if ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
2191 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2192 * so that we won't bother looking for a page at this offset again if there are more
2193 * shadow objects. Then deactivate the page.
2196 MARK_PAGE_HANDLED(*chunk_state
, p
);
2198 if (( !VM_PAGE_WIRED(m
)) && (!m
->private) && (!m
->gobbled
) && (!m
->busy
) &&
2199 (!m
->laundry
) && (!m
->cleaning
) && !(m
->free_when_done
)) {
2206 clear_refmod
= VM_MEM_REFERENCED
;
2207 dwp
->dw_mask
|= DW_clear_reference
;
2209 if ((kill_page
) && (object
->internal
)) {
2210 if (madvise_free_debug
) {
2212 * zero-fill the page now
2213 * to simulate it being
2214 * reclaimed and re-faulted.
2216 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m
));
2218 m
->precious
= FALSE
;
2221 clear_refmod
|= VM_MEM_MODIFIED
;
2222 if (m
->vm_page_q_state
== VM_PAGE_ON_THROTTLED_Q
) {
2224 * This page is now clean and
2225 * reclaimable. Move it out
2226 * of the throttled queue, so
2227 * that vm_pageout_scan() can
2230 dwp
->dw_mask
|= DW_move_page
;
2233 VM_COMPRESSOR_PAGER_STATE_CLR(object
, offset
);
2235 if (reusable_page
&& !m
->reusable
) {
2236 assert(!all_reusable
);
2237 assert(!object
->all_reusable
);
2239 object
->reusable_page_count
++;
2240 assert(object
->resident_page_count
>= object
->reusable_page_count
);
2243 * Tell pmap this page is now
2244 * "reusable" (to update pmap
2245 * stats for all mappings).
2247 pmap_options
|= PMAP_OPTIONS_SET_REUSABLE
;
2250 pmap_options
|= PMAP_OPTIONS_NOFLUSH
;
2251 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m
),
2256 if ((m
->vm_page_q_state
!= VM_PAGE_ON_THROTTLED_Q
) && !(reusable_page
|| all_reusable
))
2257 dwp
->dw_mask
|= DW_move_page
;
2260 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
,
2263 if (dw_count
>= dw_limit
) {
2265 OSAddAtomic(reusable
,
2266 &vm_page_stats_reusable
.reusable_count
);
2267 vm_page_stats_reusable
.reusable
+= reusable
;
2270 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
2280 * The page at this offset isn't memory resident, check to see if it's
2281 * been paged out. If so, mark it as handled so we don't bother looking
2282 * for it in the shadow chain.
2285 if (page_is_paged_out(object
, offset
)) {
2286 MARK_PAGE_HANDLED(*chunk_state
, p
);
2289 * If we're killing a non-resident page, then clear the page in the existence
2290 * map so we don't bother paging it back in if it's touched again in the future.
2293 if ((kill_page
) && (object
->internal
)) {
2295 VM_COMPRESSOR_PAGER_STATE_CLR(object
, offset
);
2297 if (pmap
!= PMAP_NULL
) {
2299 * Tell pmap that this page
2300 * is no longer mapped, to
2301 * adjust the footprint ledger
2302 * because this page is no
2303 * longer compressed.
2305 pmap_remove_options(
2310 PMAP_OPTIONS_REMOVE
);
2318 OSAddAtomic(reusable
, &vm_page_stats_reusable
.reusable_count
);
2319 vm_page_stats_reusable
.reusable
+= reusable
;
2324 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
2329 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2330 * will always be less than or equal to the given size. The total range is divided up
2331 * into chunks for efficiency and performance related to the locks and handling the shadow
2332 * chain. This routine returns how much of the given "size" it actually processed. It's
2333 * up to the caler to loop and keep calling this routine until the entire range they want
2334 * to process has been done.
2337 static vm_object_size_t
2339 vm_object_t orig_object
,
2340 vm_object_offset_t offset
,
2341 vm_object_size_t size
,
2342 boolean_t kill_page
,
2343 boolean_t reusable_page
,
2344 boolean_t all_reusable
,
2345 pmap_flush_context
*pfc
,
2347 vm_map_offset_t pmap_offset
)
2350 vm_object_t tmp_object
;
2351 vm_object_size_t length
;
2352 chunk_state_t chunk_state
;
2356 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2357 * remaining size the caller asked for.
2360 length
= MIN(size
, CHUNK_SIZE
);
2363 * The chunk_state keeps track of which pages we've already processed if there's
2364 * a shadow chain on this object. At this point, we haven't done anything with this
2365 * range of pages yet, so initialize the state to indicate no pages processed yet.
2368 CHUNK_INIT(chunk_state
, length
);
2369 object
= orig_object
;
2372 * Start at the top level object and iterate around the loop once for each object
2373 * in the shadow chain. We stop processing early if we've already found all the pages
2374 * in the range. Otherwise we stop when we run out of shadow objects.
2377 while (object
&& CHUNK_NOT_COMPLETE(chunk_state
)) {
2378 vm_object_paging_begin(object
);
2380 deactivate_pages_in_object(object
, offset
, length
, kill_page
, reusable_page
, all_reusable
, &chunk_state
, pfc
, pmap
, pmap_offset
);
2382 vm_object_paging_end(object
);
2385 * We've finished with this object, see if there's a shadow object. If
2386 * there is, update the offset and lock the new object. We also turn off
2387 * kill_page at this point since we only kill pages in the top most object.
2390 tmp_object
= object
->shadow
;
2394 reusable_page
= FALSE
;
2395 all_reusable
= FALSE
;
2396 offset
+= object
->vo_shadow_offset
;
2397 vm_object_lock(tmp_object
);
2400 if (object
!= orig_object
)
2401 vm_object_unlock(object
);
2403 object
= tmp_object
;
2406 if (object
&& object
!= orig_object
)
2407 vm_object_unlock(object
);
2415 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2416 * we also clear the modified status of the page and "forget" any changes that have been made
2420 __private_extern__
void
2421 vm_object_deactivate_pages(
2423 vm_object_offset_t offset
,
2424 vm_object_size_t size
,
2425 boolean_t kill_page
,
2426 boolean_t reusable_page
,
2428 vm_map_offset_t pmap_offset
)
2430 vm_object_size_t length
;
2431 boolean_t all_reusable
;
2432 pmap_flush_context pmap_flush_context_storage
;
2435 * We break the range up into chunks and do one chunk at a time. This is for
2436 * efficiency and performance while handling the shadow chains and the locks.
2437 * The deactivate_a_chunk() function returns how much of the range it processed.
2438 * We keep calling this routine until the given size is exhausted.
2442 all_reusable
= FALSE
;
2445 * For the sake of accurate "reusable" pmap stats, we need
2446 * to tell pmap about each page that is no longer "reusable",
2447 * so we can't do the "all_reusable" optimization.
2450 if (reusable_page
&&
2452 object
->vo_size
!= 0 &&
2453 object
->vo_size
== size
&&
2454 object
->reusable_page_count
== 0) {
2455 all_reusable
= TRUE
;
2456 reusable_page
= FALSE
;
2460 if ((reusable_page
|| all_reusable
) && object
->all_reusable
) {
2461 /* This means MADV_FREE_REUSABLE has been called twice, which
2462 * is probably illegal. */
2466 pmap_flush_context_init(&pmap_flush_context_storage
);
2469 length
= deactivate_a_chunk(object
, offset
, size
, kill_page
, reusable_page
, all_reusable
, &pmap_flush_context_storage
, pmap
, pmap_offset
);
2473 pmap_offset
+= length
;
2475 pmap_flush(&pmap_flush_context_storage
);
2478 if (!object
->all_reusable
) {
2479 unsigned int reusable
;
2481 object
->all_reusable
= TRUE
;
2482 assert(object
->reusable_page_count
== 0);
2483 /* update global stats */
2484 reusable
= object
->resident_page_count
;
2485 OSAddAtomic(reusable
,
2486 &vm_page_stats_reusable
.reusable_count
);
2487 vm_page_stats_reusable
.reusable
+= reusable
;
2488 vm_page_stats_reusable
.all_reusable_calls
++;
2490 } else if (reusable_page
) {
2491 vm_page_stats_reusable
.partial_reusable_calls
++;
2496 vm_object_reuse_pages(
2498 vm_object_offset_t start_offset
,
2499 vm_object_offset_t end_offset
,
2500 boolean_t allow_partial_reuse
)
2502 vm_object_offset_t cur_offset
;
2504 unsigned int reused
, reusable
;
2506 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
2508 if ((m) != VM_PAGE_NULL && \
2510 assert((object)->reusable_page_count <= \
2511 (object)->resident_page_count); \
2512 assert((object)->reusable_page_count > 0); \
2513 (object)->reusable_page_count--; \
2514 (m)->reusable = FALSE; \
2517 * Tell pmap that this page is no longer \
2518 * "reusable", to update the "reusable" stats \
2519 * for all the pmaps that have mapped this \
2522 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
2524 (PMAP_OPTIONS_CLEAR_REUSABLE \
2525 | PMAP_OPTIONS_NOFLUSH), \
2533 vm_object_lock_assert_exclusive(object
);
2535 if (object
->all_reusable
) {
2536 panic("object %p all_reusable: can't update pmap stats\n",
2538 assert(object
->reusable_page_count
== 0);
2539 object
->all_reusable
= FALSE
;
2540 if (end_offset
- start_offset
== object
->vo_size
||
2541 !allow_partial_reuse
) {
2542 vm_page_stats_reusable
.all_reuse_calls
++;
2543 reused
= object
->resident_page_count
;
2545 vm_page_stats_reusable
.partial_reuse_calls
++;
2546 vm_page_queue_iterate(&object
->memq
, m
, vm_page_t
, listq
) {
2547 if (m
->offset
< start_offset
||
2548 m
->offset
>= end_offset
) {
2550 object
->reusable_page_count
++;
2551 assert(object
->resident_page_count
>= object
->reusable_page_count
);
2554 assert(!m
->reusable
);
2559 } else if (object
->resident_page_count
>
2560 ((end_offset
- start_offset
) >> PAGE_SHIFT
)) {
2561 vm_page_stats_reusable
.partial_reuse_calls
++;
2562 for (cur_offset
= start_offset
;
2563 cur_offset
< end_offset
;
2564 cur_offset
+= PAGE_SIZE_64
) {
2565 if (object
->reusable_page_count
== 0) {
2568 m
= vm_page_lookup(object
, cur_offset
);
2569 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
2572 vm_page_stats_reusable
.partial_reuse_calls
++;
2573 vm_page_queue_iterate(&object
->memq
, m
, vm_page_t
, listq
) {
2574 if (object
->reusable_page_count
== 0) {
2577 if (m
->offset
< start_offset
||
2578 m
->offset
>= end_offset
) {
2581 VM_OBJECT_REUSE_PAGE(object
, m
, reused
);
2585 /* update global stats */
2586 OSAddAtomic(reusable
-reused
, &vm_page_stats_reusable
.reusable_count
);
2587 vm_page_stats_reusable
.reused
+= reused
;
2588 vm_page_stats_reusable
.reusable
+= reusable
;
2592 * Routine: vm_object_pmap_protect
2595 * Reduces the permission for all physical
2596 * pages in the specified object range.
2598 * If removing write permission only, it is
2599 * sufficient to protect only the pages in
2600 * the top-level object; only those pages may
2601 * have write permission.
2603 * If removing all access, we must follow the
2604 * shadow chain from the top-level object to
2605 * remove access to all pages in shadowed objects.
2607 * The object must *not* be locked. The object must
2610 * If pmap is not NULL, this routine assumes that
2611 * the only mappings for the pages are in that
2615 __private_extern__
void
2616 vm_object_pmap_protect(
2618 vm_object_offset_t offset
,
2619 vm_object_size_t size
,
2621 vm_map_offset_t pmap_start
,
2624 vm_object_pmap_protect_options(object
, offset
, size
,
2625 pmap
, pmap_start
, prot
, 0);
2628 __private_extern__
void
2629 vm_object_pmap_protect_options(
2631 vm_object_offset_t offset
,
2632 vm_object_size_t size
,
2634 vm_map_offset_t pmap_start
,
2638 pmap_flush_context pmap_flush_context_storage
;
2639 boolean_t delayed_pmap_flush
= FALSE
;
2641 if (object
== VM_OBJECT_NULL
)
2643 size
= vm_object_round_page(size
);
2644 offset
= vm_object_trunc_page(offset
);
2646 vm_object_lock(object
);
2648 if (object
->phys_contiguous
) {
2650 vm_object_unlock(object
);
2651 pmap_protect_options(pmap
,
2655 options
& ~PMAP_OPTIONS_NOFLUSH
,
2658 vm_object_offset_t phys_start
, phys_end
, phys_addr
;
2660 phys_start
= object
->vo_shadow_offset
+ offset
;
2661 phys_end
= phys_start
+ size
;
2662 assert(phys_start
<= phys_end
);
2663 assert(phys_end
<= object
->vo_shadow_offset
+ object
->vo_size
);
2664 vm_object_unlock(object
);
2666 pmap_flush_context_init(&pmap_flush_context_storage
);
2667 delayed_pmap_flush
= FALSE
;
2669 for (phys_addr
= phys_start
;
2670 phys_addr
< phys_end
;
2671 phys_addr
+= PAGE_SIZE_64
) {
2672 pmap_page_protect_options(
2673 (ppnum_t
) (phys_addr
>> PAGE_SHIFT
),
2675 options
| PMAP_OPTIONS_NOFLUSH
,
2676 (void *)&pmap_flush_context_storage
);
2677 delayed_pmap_flush
= TRUE
;
2679 if (delayed_pmap_flush
== TRUE
)
2680 pmap_flush(&pmap_flush_context_storage
);
2685 assert(object
->internal
);
2688 if (ptoa_64(object
->resident_page_count
) > size
/2 && pmap
!= PMAP_NULL
) {
2689 vm_object_unlock(object
);
2690 pmap_protect_options(pmap
, pmap_start
, pmap_start
+ size
, prot
,
2691 options
& ~PMAP_OPTIONS_NOFLUSH
, NULL
);
2695 pmap_flush_context_init(&pmap_flush_context_storage
);
2696 delayed_pmap_flush
= FALSE
;
2699 * if we are doing large ranges with respect to resident
2700 * page count then we should interate over pages otherwise
2701 * inverse page look-up will be faster
2703 if (ptoa_64(object
->resident_page_count
/ 4) < size
) {
2705 vm_object_offset_t end
;
2707 end
= offset
+ size
;
2709 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
2710 if (!p
->fictitious
&& (offset
<= p
->offset
) && (p
->offset
< end
)) {
2711 vm_map_offset_t start
;
2713 start
= pmap_start
+ p
->offset
- offset
;
2715 if (pmap
!= PMAP_NULL
)
2716 pmap_protect_options(
2719 start
+ PAGE_SIZE_64
,
2721 options
| PMAP_OPTIONS_NOFLUSH
,
2722 &pmap_flush_context_storage
);
2724 pmap_page_protect_options(
2725 VM_PAGE_GET_PHYS_PAGE(p
),
2727 options
| PMAP_OPTIONS_NOFLUSH
,
2728 &pmap_flush_context_storage
);
2729 delayed_pmap_flush
= TRUE
;
2735 vm_object_offset_t end
;
2736 vm_object_offset_t target_off
;
2738 end
= offset
+ size
;
2740 for (target_off
= offset
;
2741 target_off
< end
; target_off
+= PAGE_SIZE
) {
2743 p
= vm_page_lookup(object
, target_off
);
2745 if (p
!= VM_PAGE_NULL
) {
2746 vm_object_offset_t start
;
2748 start
= pmap_start
+ (p
->offset
- offset
);
2750 if (pmap
!= PMAP_NULL
)
2751 pmap_protect_options(
2754 start
+ PAGE_SIZE_64
,
2756 options
| PMAP_OPTIONS_NOFLUSH
,
2757 &pmap_flush_context_storage
);
2759 pmap_page_protect_options(
2760 VM_PAGE_GET_PHYS_PAGE(p
),
2762 options
| PMAP_OPTIONS_NOFLUSH
,
2763 &pmap_flush_context_storage
);
2764 delayed_pmap_flush
= TRUE
;
2768 if (delayed_pmap_flush
== TRUE
)
2769 pmap_flush(&pmap_flush_context_storage
);
2771 if (prot
== VM_PROT_NONE
) {
2773 * Must follow shadow chain to remove access
2774 * to pages in shadowed objects.
2776 vm_object_t next_object
;
2778 next_object
= object
->shadow
;
2779 if (next_object
!= VM_OBJECT_NULL
) {
2780 offset
+= object
->vo_shadow_offset
;
2781 vm_object_lock(next_object
);
2782 vm_object_unlock(object
);
2783 object
= next_object
;
2787 * End of chain - we are done.
2794 * Pages in shadowed objects may never have
2795 * write permission - we may stop here.
2801 vm_object_unlock(object
);
2805 * Routine: vm_object_copy_slowly
2808 * Copy the specified range of the source
2809 * virtual memory object without using
2810 * protection-based optimizations (such
2811 * as copy-on-write). The pages in the
2812 * region are actually copied.
2814 * In/out conditions:
2815 * The caller must hold a reference and a lock
2816 * for the source virtual memory object. The source
2817 * object will be returned *unlocked*.
2820 * If the copy is completed successfully, KERN_SUCCESS is
2821 * returned. If the caller asserted the interruptible
2822 * argument, and an interruption occurred while waiting
2823 * for a user-generated event, MACH_SEND_INTERRUPTED is
2824 * returned. Other values may be returned to indicate
2825 * hard errors during the copy operation.
2827 * A new virtual memory object is returned in a
2828 * parameter (_result_object). The contents of this
2829 * new object, starting at a zero offset, are a copy
2830 * of the source memory region. In the event of
2831 * an error, this parameter will contain the value
2834 __private_extern__ kern_return_t
2835 vm_object_copy_slowly(
2836 vm_object_t src_object
,
2837 vm_object_offset_t src_offset
,
2838 vm_object_size_t size
,
2839 boolean_t interruptible
,
2840 vm_object_t
*_result_object
) /* OUT */
2842 vm_object_t new_object
;
2843 vm_object_offset_t new_offset
;
2845 struct vm_object_fault_info fault_info
;
2847 XPR(XPR_VM_OBJECT
, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
2848 src_object
, src_offset
, size
, 0, 0);
2851 vm_object_unlock(src_object
);
2852 *_result_object
= VM_OBJECT_NULL
;
2853 return(KERN_INVALID_ARGUMENT
);
2857 * Prevent destruction of the source object while we copy.
2860 vm_object_reference_locked(src_object
);
2861 vm_object_unlock(src_object
);
2864 * Create a new object to hold the copied pages.
2866 * We fill the new object starting at offset 0,
2867 * regardless of the input offset.
2868 * We don't bother to lock the new object within
2869 * this routine, since we have the only reference.
2872 new_object
= vm_object_allocate(size
);
2875 assert(size
== trunc_page_64(size
)); /* Will the loop terminate? */
2877 fault_info
.interruptible
= interruptible
;
2878 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
2879 fault_info
.user_tag
= 0;
2880 fault_info
.pmap_options
= 0;
2881 fault_info
.lo_offset
= src_offset
;
2882 fault_info
.hi_offset
= src_offset
+ size
;
2883 fault_info
.no_cache
= FALSE
;
2884 fault_info
.stealth
= TRUE
;
2885 fault_info
.io_sync
= FALSE
;
2886 fault_info
.cs_bypass
= FALSE
;
2887 fault_info
.mark_zf_absent
= FALSE
;
2888 fault_info
.batch_pmap_op
= FALSE
;
2892 src_offset
+= PAGE_SIZE_64
,
2893 new_offset
+= PAGE_SIZE_64
, size
-= PAGE_SIZE_64
2896 vm_fault_return_t result
;
2898 vm_object_lock(new_object
);
2900 while ((new_page
= vm_page_alloc(new_object
, new_offset
))
2903 vm_object_unlock(new_object
);
2905 if (!vm_page_wait(interruptible
)) {
2906 vm_object_deallocate(new_object
);
2907 vm_object_deallocate(src_object
);
2908 *_result_object
= VM_OBJECT_NULL
;
2909 return(MACH_SEND_INTERRUPTED
);
2911 vm_object_lock(new_object
);
2913 vm_object_unlock(new_object
);
2916 vm_prot_t prot
= VM_PROT_READ
;
2917 vm_page_t _result_page
;
2919 vm_page_t result_page
;
2920 kern_return_t error_code
;
2921 vm_object_t result_page_object
;
2924 vm_object_lock(src_object
);
2926 if (src_object
->internal
&&
2927 src_object
->shadow
== VM_OBJECT_NULL
&&
2928 (vm_page_lookup(src_object
,
2929 src_offset
) == VM_PAGE_NULL
) &&
2930 (src_object
->pager
== NULL
||
2931 (VM_COMPRESSOR_PAGER_STATE_GET(src_object
,
2933 VM_EXTERNAL_STATE_ABSENT
))) {
2935 * This page is neither resident nor compressed
2936 * and there's no shadow object below
2937 * "src_object", so this page is really missing.
2938 * There's no need to zero-fill it just to copy
2939 * it: let's leave it missing in "new_object"
2940 * and get zero-filled on demand.
2942 vm_object_unlock(src_object
);
2943 /* free the unused "new_page"... */
2944 vm_object_lock(new_object
);
2945 VM_PAGE_FREE(new_page
);
2946 new_page
= VM_PAGE_NULL
;
2947 vm_object_unlock(new_object
);
2948 /* ...and go to next page in "src_object" */
2949 result
= VM_FAULT_SUCCESS
;
2953 vm_object_paging_begin(src_object
);
2955 if (size
> (vm_size_t
) -1) {
2956 /* 32-bit overflow */
2957 fault_info
.cluster_size
= (vm_size_t
) (0 - PAGE_SIZE
);
2959 fault_info
.cluster_size
= (vm_size_t
) size
;
2960 assert(fault_info
.cluster_size
== size
);
2963 XPR(XPR_VM_FAULT
,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
2964 _result_page
= VM_PAGE_NULL
;
2965 result
= vm_fault_page(src_object
, src_offset
,
2966 VM_PROT_READ
, FALSE
,
2967 FALSE
, /* page not looked up */
2968 &prot
, &_result_page
, &top_page
,
2970 &error_code
, FALSE
, FALSE
, &fault_info
);
2973 case VM_FAULT_SUCCESS
:
2974 result_page
= _result_page
;
2975 result_page_object
= VM_PAGE_OBJECT(result_page
);
2978 * Copy the page to the new object.
2981 * If result_page is clean,
2982 * we could steal it instead
2986 vm_page_copy(result_page
, new_page
);
2987 vm_object_unlock(result_page_object
);
2990 * Let go of both pages (make them
2991 * not busy, perform wakeup, activate).
2993 vm_object_lock(new_object
);
2994 SET_PAGE_DIRTY(new_page
, FALSE
);
2995 PAGE_WAKEUP_DONE(new_page
);
2996 vm_object_unlock(new_object
);
2998 vm_object_lock(result_page_object
);
2999 PAGE_WAKEUP_DONE(result_page
);
3001 vm_page_lockspin_queues();
3002 if ((result_page
->vm_page_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ||
3003 (result_page
->vm_page_q_state
== VM_PAGE_NOT_ON_Q
)) {
3004 vm_page_activate(result_page
);
3006 vm_page_activate(new_page
);
3007 vm_page_unlock_queues();
3010 * Release paging references and
3011 * top-level placeholder page, if any.
3014 vm_fault_cleanup(result_page_object
,
3019 case VM_FAULT_RETRY
:
3022 case VM_FAULT_MEMORY_SHORTAGE
:
3023 if (vm_page_wait(interruptible
))
3027 case VM_FAULT_INTERRUPTED
:
3028 vm_object_lock(new_object
);
3029 VM_PAGE_FREE(new_page
);
3030 vm_object_unlock(new_object
);
3032 vm_object_deallocate(new_object
);
3033 vm_object_deallocate(src_object
);
3034 *_result_object
= VM_OBJECT_NULL
;
3035 return(MACH_SEND_INTERRUPTED
);
3037 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
3038 /* success but no VM page: fail */
3039 vm_object_paging_end(src_object
);
3040 vm_object_unlock(src_object
);
3042 case VM_FAULT_MEMORY_ERROR
:
3045 * (a) ignore pages that we can't
3047 * (b) return the null object if
3048 * any page fails [chosen]
3051 vm_object_lock(new_object
);
3052 VM_PAGE_FREE(new_page
);
3053 vm_object_unlock(new_object
);
3055 vm_object_deallocate(new_object
);
3056 vm_object_deallocate(src_object
);
3057 *_result_object
= VM_OBJECT_NULL
;
3058 return(error_code
? error_code
:
3062 panic("vm_object_copy_slowly: unexpected error"
3063 " 0x%x from vm_fault_page()\n", result
);
3065 } while (result
!= VM_FAULT_SUCCESS
);
3069 * Lose the extra reference, and return our object.
3071 vm_object_deallocate(src_object
);
3072 *_result_object
= new_object
;
3073 return(KERN_SUCCESS
);
3077 * Routine: vm_object_copy_quickly
3080 * Copy the specified range of the source virtual
3081 * memory object, if it can be done without waiting
3082 * for user-generated events.
3085 * If the copy is successful, the copy is returned in
3086 * the arguments; otherwise, the arguments are not
3089 * In/out conditions:
3090 * The object should be unlocked on entry and exit.
3094 __private_extern__ boolean_t
3095 vm_object_copy_quickly(
3096 vm_object_t
*_object
, /* INOUT */
3097 __unused vm_object_offset_t offset
, /* IN */
3098 __unused vm_object_size_t size
, /* IN */
3099 boolean_t
*_src_needs_copy
, /* OUT */
3100 boolean_t
*_dst_needs_copy
) /* OUT */
3102 vm_object_t object
= *_object
;
3103 memory_object_copy_strategy_t copy_strategy
;
3105 XPR(XPR_VM_OBJECT
, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
3106 *_object
, offset
, size
, 0, 0);
3107 if (object
== VM_OBJECT_NULL
) {
3108 *_src_needs_copy
= FALSE
;
3109 *_dst_needs_copy
= FALSE
;
3113 vm_object_lock(object
);
3115 copy_strategy
= object
->copy_strategy
;
3117 switch (copy_strategy
) {
3118 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3121 * Symmetric copy strategy.
3122 * Make another reference to the object.
3123 * Leave object/offset unchanged.
3126 vm_object_reference_locked(object
);
3127 object
->shadowed
= TRUE
;
3128 vm_object_unlock(object
);
3131 * Both source and destination must make
3132 * shadows, and the source must be made
3133 * read-only if not already.
3136 *_src_needs_copy
= TRUE
;
3137 *_dst_needs_copy
= TRUE
;
3141 case MEMORY_OBJECT_COPY_DELAY
:
3142 vm_object_unlock(object
);
3146 vm_object_unlock(object
);
3152 static int copy_call_count
= 0;
3153 static int copy_call_sleep_count
= 0;
3154 static int copy_call_restart_count
= 0;
3157 * Routine: vm_object_copy_call [internal]
3160 * Copy the source object (src_object), using the
3161 * user-managed copy algorithm.
3163 * In/out conditions:
3164 * The source object must be locked on entry. It
3165 * will be *unlocked* on exit.
3168 * If the copy is successful, KERN_SUCCESS is returned.
3169 * A new object that represents the copied virtual
3170 * memory is returned in a parameter (*_result_object).
3171 * If the return value indicates an error, this parameter
3174 static kern_return_t
3175 vm_object_copy_call(
3176 vm_object_t src_object
,
3177 vm_object_offset_t src_offset
,
3178 vm_object_size_t size
,
3179 vm_object_t
*_result_object
) /* OUT */
3183 boolean_t check_ready
= FALSE
;
3184 uint32_t try_failed_count
= 0;
3187 * If a copy is already in progress, wait and retry.
3190 * Consider making this call interruptable, as Mike
3191 * intended it to be.
3194 * Need a counter or version or something to allow
3195 * us to use the copy that the currently requesting
3196 * thread is obtaining -- is it worth adding to the
3197 * vm object structure? Depends how common this case it.
3200 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3201 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3203 copy_call_restart_count
++;
3207 * Indicate (for the benefit of memory_object_create_copy)
3208 * that we want a copy for src_object. (Note that we cannot
3209 * do a real assert_wait before calling memory_object_copy,
3210 * so we simply set the flag.)
3213 vm_object_set_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
);
3214 vm_object_unlock(src_object
);
3217 * Ask the memory manager to give us a memory object
3218 * which represents a copy of the src object.
3219 * The memory manager may give us a memory object
3220 * which we already have, or it may give us a
3221 * new memory object. This memory object will arrive
3222 * via memory_object_create_copy.
3225 kr
= KERN_FAILURE
; /* XXX need to change memory_object.defs */
3226 if (kr
!= KERN_SUCCESS
) {
3231 * Wait for the copy to arrive.
3233 vm_object_lock(src_object
);
3234 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3235 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
3237 copy_call_sleep_count
++;
3240 assert(src_object
->copy
!= VM_OBJECT_NULL
);
3241 copy
= src_object
->copy
;
3242 if (!vm_object_lock_try(copy
)) {
3243 vm_object_unlock(src_object
);
3246 mutex_pause(try_failed_count
); /* wait a bit */
3248 vm_object_lock(src_object
);
3251 if (copy
->vo_size
< src_offset
+size
)
3252 copy
->vo_size
= src_offset
+size
;
3254 if (!copy
->pager_ready
)
3260 *_result_object
= copy
;
3261 vm_object_unlock(copy
);
3262 vm_object_unlock(src_object
);
3264 /* Wait for the copy to be ready. */
3265 if (check_ready
== TRUE
) {
3266 vm_object_lock(copy
);
3267 while (!copy
->pager_ready
) {
3268 vm_object_sleep(copy
, VM_OBJECT_EVENT_PAGER_READY
, THREAD_UNINT
);
3270 vm_object_unlock(copy
);
3273 return KERN_SUCCESS
;
3276 static int copy_delayed_lock_collisions
= 0;
3277 static int copy_delayed_max_collisions
= 0;
3278 static int copy_delayed_lock_contention
= 0;
3279 static int copy_delayed_protect_iterate
= 0;
3282 * Routine: vm_object_copy_delayed [internal]
3285 * Copy the specified virtual memory object, using
3286 * the asymmetric copy-on-write algorithm.
3288 * In/out conditions:
3289 * The src_object must be locked on entry. It will be unlocked
3290 * on exit - so the caller must also hold a reference to it.
3292 * This routine will not block waiting for user-generated
3293 * events. It is not interruptible.
3295 __private_extern__ vm_object_t
3296 vm_object_copy_delayed(
3297 vm_object_t src_object
,
3298 vm_object_offset_t src_offset
,
3299 vm_object_size_t size
,
3300 boolean_t src_object_shared
)
3302 vm_object_t new_copy
= VM_OBJECT_NULL
;
3303 vm_object_t old_copy
;
3305 vm_object_size_t copy_size
= src_offset
+ size
;
3306 pmap_flush_context pmap_flush_context_storage
;
3307 boolean_t delayed_pmap_flush
= FALSE
;
3312 * The user-level memory manager wants to see all of the changes
3313 * to this object, but it has promised not to make any changes on
3316 * Perform an asymmetric copy-on-write, as follows:
3317 * Create a new object, called a "copy object" to hold
3318 * pages modified by the new mapping (i.e., the copy,
3319 * not the original mapping).
3320 * Record the original object as the backing object for
3321 * the copy object. If the original mapping does not
3322 * change a page, it may be used read-only by the copy.
3323 * Record the copy object in the original object.
3324 * When the original mapping causes a page to be modified,
3325 * it must be copied to a new page that is "pushed" to
3327 * Mark the new mapping (the copy object) copy-on-write.
3328 * This makes the copy object itself read-only, allowing
3329 * it to be reused if the original mapping makes no
3330 * changes, and simplifying the synchronization required
3331 * in the "push" operation described above.
3333 * The copy-on-write is said to be assymetric because the original
3334 * object is *not* marked copy-on-write. A copied page is pushed
3335 * to the copy object, regardless which party attempted to modify
3338 * Repeated asymmetric copy operations may be done. If the
3339 * original object has not been changed since the last copy, its
3340 * copy object can be reused. Otherwise, a new copy object can be
3341 * inserted between the original object and its previous copy
3342 * object. Since any copy object is read-only, this cannot affect
3343 * affect the contents of the previous copy object.
3345 * Note that a copy object is higher in the object tree than the
3346 * original object; therefore, use of the copy object recorded in
3347 * the original object must be done carefully, to avoid deadlock.
3350 copy_size
= vm_object_round_page(copy_size
);
3354 * Wait for paging in progress.
3356 if (!src_object
->true_share
&&
3357 (src_object
->paging_in_progress
!= 0 ||
3358 src_object
->activity_in_progress
!= 0)) {
3359 if (src_object_shared
== TRUE
) {
3360 vm_object_unlock(src_object
);
3361 vm_object_lock(src_object
);
3362 src_object_shared
= FALSE
;
3365 vm_object_paging_wait(src_object
, THREAD_UNINT
);
3368 * See whether we can reuse the result of a previous
3372 old_copy
= src_object
->copy
;
3373 if (old_copy
!= VM_OBJECT_NULL
) {
3377 * Try to get the locks (out of order)
3379 if (src_object_shared
== TRUE
)
3380 lock_granted
= vm_object_lock_try_shared(old_copy
);
3382 lock_granted
= vm_object_lock_try(old_copy
);
3384 if (!lock_granted
) {
3385 vm_object_unlock(src_object
);
3387 if (collisions
++ == 0)
3388 copy_delayed_lock_contention
++;
3389 mutex_pause(collisions
);
3391 /* Heisenberg Rules */
3392 copy_delayed_lock_collisions
++;
3394 if (collisions
> copy_delayed_max_collisions
)
3395 copy_delayed_max_collisions
= collisions
;
3397 if (src_object_shared
== TRUE
)
3398 vm_object_lock_shared(src_object
);
3400 vm_object_lock(src_object
);
3406 * Determine whether the old copy object has
3410 if (old_copy
->resident_page_count
== 0 &&
3411 !old_copy
->pager_created
) {
3413 * It has not been modified.
3415 * Return another reference to
3416 * the existing copy-object if
3417 * we can safely grow it (if
3421 if (old_copy
->vo_size
< copy_size
) {
3422 if (src_object_shared
== TRUE
) {
3423 vm_object_unlock(old_copy
);
3424 vm_object_unlock(src_object
);
3426 vm_object_lock(src_object
);
3427 src_object_shared
= FALSE
;
3431 * We can't perform a delayed copy if any of the
3432 * pages in the extended range are wired (because
3433 * we can't safely take write permission away from
3434 * wired pages). If the pages aren't wired, then
3435 * go ahead and protect them.
3437 copy_delayed_protect_iterate
++;
3439 pmap_flush_context_init(&pmap_flush_context_storage
);
3440 delayed_pmap_flush
= FALSE
;
3442 vm_page_queue_iterate(&src_object
->memq
, p
, vm_page_t
, listq
) {
3443 if (!p
->fictitious
&&
3444 p
->offset
>= old_copy
->vo_size
&&
3445 p
->offset
< copy_size
) {
3446 if (VM_PAGE_WIRED(p
)) {
3447 vm_object_unlock(old_copy
);
3448 vm_object_unlock(src_object
);
3450 if (new_copy
!= VM_OBJECT_NULL
) {
3451 vm_object_unlock(new_copy
);
3452 vm_object_deallocate(new_copy
);
3454 if (delayed_pmap_flush
== TRUE
)
3455 pmap_flush(&pmap_flush_context_storage
);
3457 return VM_OBJECT_NULL
;
3459 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p
), (VM_PROT_ALL
& ~VM_PROT_WRITE
),
3460 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
3461 delayed_pmap_flush
= TRUE
;
3465 if (delayed_pmap_flush
== TRUE
)
3466 pmap_flush(&pmap_flush_context_storage
);
3468 old_copy
->vo_size
= copy_size
;
3470 if (src_object_shared
== TRUE
)
3471 vm_object_reference_shared(old_copy
);
3473 vm_object_reference_locked(old_copy
);
3474 vm_object_unlock(old_copy
);
3475 vm_object_unlock(src_object
);
3477 if (new_copy
!= VM_OBJECT_NULL
) {
3478 vm_object_unlock(new_copy
);
3479 vm_object_deallocate(new_copy
);
3487 * Adjust the size argument so that the newly-created
3488 * copy object will be large enough to back either the
3489 * old copy object or the new mapping.
3491 if (old_copy
->vo_size
> copy_size
)
3492 copy_size
= old_copy
->vo_size
;
3494 if (new_copy
== VM_OBJECT_NULL
) {
3495 vm_object_unlock(old_copy
);
3496 vm_object_unlock(src_object
);
3497 new_copy
= vm_object_allocate(copy_size
);
3498 vm_object_lock(src_object
);
3499 vm_object_lock(new_copy
);
3501 src_object_shared
= FALSE
;
3504 new_copy
->vo_size
= copy_size
;
3507 * The copy-object is always made large enough to
3508 * completely shadow the original object, since
3509 * it may have several users who want to shadow
3510 * the original object at different points.
3513 assert((old_copy
->shadow
== src_object
) &&
3514 (old_copy
->vo_shadow_offset
== (vm_object_offset_t
) 0));
3516 } else if (new_copy
== VM_OBJECT_NULL
) {
3517 vm_object_unlock(src_object
);
3518 new_copy
= vm_object_allocate(copy_size
);
3519 vm_object_lock(src_object
);
3520 vm_object_lock(new_copy
);
3522 src_object_shared
= FALSE
;
3527 * We now have the src object locked, and the new copy object
3528 * allocated and locked (and potentially the old copy locked).
3529 * Before we go any further, make sure we can still perform
3530 * a delayed copy, as the situation may have changed.
3532 * Specifically, we can't perform a delayed copy if any of the
3533 * pages in the range are wired (because we can't safely take
3534 * write permission away from wired pages). If the pages aren't
3535 * wired, then go ahead and protect them.
3537 copy_delayed_protect_iterate
++;
3539 pmap_flush_context_init(&pmap_flush_context_storage
);
3540 delayed_pmap_flush
= FALSE
;
3542 vm_page_queue_iterate(&src_object
->memq
, p
, vm_page_t
, listq
) {
3543 if (!p
->fictitious
&& p
->offset
< copy_size
) {
3544 if (VM_PAGE_WIRED(p
)) {
3546 vm_object_unlock(old_copy
);
3547 vm_object_unlock(src_object
);
3548 vm_object_unlock(new_copy
);
3549 vm_object_deallocate(new_copy
);
3551 if (delayed_pmap_flush
== TRUE
)
3552 pmap_flush(&pmap_flush_context_storage
);
3554 return VM_OBJECT_NULL
;
3556 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p
), (VM_PROT_ALL
& ~VM_PROT_WRITE
),
3557 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
3558 delayed_pmap_flush
= TRUE
;
3562 if (delayed_pmap_flush
== TRUE
)
3563 pmap_flush(&pmap_flush_context_storage
);
3565 if (old_copy
!= VM_OBJECT_NULL
) {
3567 * Make the old copy-object shadow the new one.
3568 * It will receive no more pages from the original
3572 /* remove ref. from old_copy */
3573 vm_object_lock_assert_exclusive(src_object
);
3574 src_object
->ref_count
--;
3575 assert(src_object
->ref_count
> 0);
3576 vm_object_lock_assert_exclusive(old_copy
);
3577 old_copy
->shadow
= new_copy
;
3578 vm_object_lock_assert_exclusive(new_copy
);
3579 assert(new_copy
->ref_count
> 0);
3580 new_copy
->ref_count
++; /* for old_copy->shadow ref. */
3583 if (old_copy
->res_count
) {
3584 VM_OBJ_RES_INCR(new_copy
);
3585 VM_OBJ_RES_DECR(src_object
);
3589 vm_object_unlock(old_copy
); /* done with old_copy */
3593 * Point the new copy at the existing object.
3595 vm_object_lock_assert_exclusive(new_copy
);
3596 new_copy
->shadow
= src_object
;
3597 new_copy
->vo_shadow_offset
= 0;
3598 new_copy
->shadowed
= TRUE
; /* caller must set needs_copy */
3600 vm_object_lock_assert_exclusive(src_object
);
3601 vm_object_reference_locked(src_object
);
3602 src_object
->copy
= new_copy
;
3603 vm_object_unlock(src_object
);
3604 vm_object_unlock(new_copy
);
3607 "vm_object_copy_delayed: used copy object %X for source %X\n",
3608 new_copy
, src_object
, 0, 0, 0);
3614 * Routine: vm_object_copy_strategically
3617 * Perform a copy according to the source object's
3618 * declared strategy. This operation may block,
3619 * and may be interrupted.
3621 __private_extern__ kern_return_t
3622 vm_object_copy_strategically(
3623 vm_object_t src_object
,
3624 vm_object_offset_t src_offset
,
3625 vm_object_size_t size
,
3626 vm_object_t
*dst_object
, /* OUT */
3627 vm_object_offset_t
*dst_offset
, /* OUT */
3628 boolean_t
*dst_needs_copy
) /* OUT */
3631 boolean_t interruptible
= THREAD_ABORTSAFE
; /* XXX */
3632 boolean_t object_lock_shared
= FALSE
;
3633 memory_object_copy_strategy_t copy_strategy
;
3635 assert(src_object
!= VM_OBJECT_NULL
);
3637 copy_strategy
= src_object
->copy_strategy
;
3639 if (copy_strategy
== MEMORY_OBJECT_COPY_DELAY
) {
3640 vm_object_lock_shared(src_object
);
3641 object_lock_shared
= TRUE
;
3643 vm_object_lock(src_object
);
3646 * The copy strategy is only valid if the memory manager
3647 * is "ready". Internal objects are always ready.
3650 while (!src_object
->internal
&& !src_object
->pager_ready
) {
3651 wait_result_t wait_result
;
3653 if (object_lock_shared
== TRUE
) {
3654 vm_object_unlock(src_object
);
3655 vm_object_lock(src_object
);
3656 object_lock_shared
= FALSE
;
3659 wait_result
= vm_object_sleep( src_object
,
3660 VM_OBJECT_EVENT_PAGER_READY
,
3662 if (wait_result
!= THREAD_AWAKENED
) {
3663 vm_object_unlock(src_object
);
3664 *dst_object
= VM_OBJECT_NULL
;
3666 *dst_needs_copy
= FALSE
;
3667 return(MACH_SEND_INTERRUPTED
);
3672 * Use the appropriate copy strategy.
3675 switch (copy_strategy
) {
3676 case MEMORY_OBJECT_COPY_DELAY
:
3677 *dst_object
= vm_object_copy_delayed(src_object
,
3678 src_offset
, size
, object_lock_shared
);
3679 if (*dst_object
!= VM_OBJECT_NULL
) {
3680 *dst_offset
= src_offset
;
3681 *dst_needs_copy
= TRUE
;
3682 result
= KERN_SUCCESS
;
3685 vm_object_lock(src_object
);
3686 /* fall thru when delayed copy not allowed */
3688 case MEMORY_OBJECT_COPY_NONE
:
3689 result
= vm_object_copy_slowly(src_object
, src_offset
, size
,
3690 interruptible
, dst_object
);
3691 if (result
== KERN_SUCCESS
) {
3693 *dst_needs_copy
= FALSE
;
3697 case MEMORY_OBJECT_COPY_CALL
:
3698 result
= vm_object_copy_call(src_object
, src_offset
, size
,
3700 if (result
== KERN_SUCCESS
) {
3701 *dst_offset
= src_offset
;
3702 *dst_needs_copy
= TRUE
;
3706 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3707 XPR(XPR_VM_OBJECT
, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n", src_object
, src_offset
, size
, 0, 0);
3708 vm_object_unlock(src_object
);
3709 result
= KERN_MEMORY_RESTART_COPY
;
3713 panic("copy_strategically: bad strategy");
3714 result
= KERN_INVALID_ARGUMENT
;
3722 * Create a new object which is backed by the
3723 * specified existing object range. The source
3724 * object reference is deallocated.
3726 * The new object and offset into that object
3727 * are returned in the source parameters.
3729 boolean_t vm_object_shadow_check
= TRUE
;
3731 __private_extern__ boolean_t
3733 vm_object_t
*object
, /* IN/OUT */
3734 vm_object_offset_t
*offset
, /* IN/OUT */
3735 vm_object_size_t length
)
3741 assert(source
!= VM_OBJECT_NULL
);
3742 if (source
== VM_OBJECT_NULL
)
3748 * This assertion is valid but it gets triggered by Rosetta for example
3749 * due to a combination of vm_remap() that changes a VM object's
3750 * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
3751 * that then sets "needs_copy" on its map entry. This creates a
3752 * mapping situation that VM should never see and doesn't know how to
3754 * It's not clear if this can create any real problem but we should
3755 * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
3756 * do more than just set "needs_copy" to handle the copy-on-write...
3757 * In the meantime, let's disable the assertion.
3759 assert(source
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
);
3763 * Determine if we really need a shadow.
3765 * If the source object is larger than what we are trying
3766 * to create, then force the shadow creation even if the
3767 * ref count is 1. This will allow us to [potentially]
3768 * collapse the underlying object away in the future
3769 * (freeing up the extra data it might contain and that
3773 assert(source
->copy_strategy
!= MEMORY_OBJECT_COPY_NONE
); /* Purgeable objects shouldn't have shadow objects. */
3775 if (vm_object_shadow_check
&&
3776 source
->vo_size
== length
&&
3777 source
->ref_count
== 1 &&
3778 (source
->shadow
== VM_OBJECT_NULL
||
3779 source
->shadow
->copy
== VM_OBJECT_NULL
) )
3781 /* lock the object and check again */
3782 vm_object_lock(source
);
3783 if (source
->vo_size
== length
&&
3784 source
->ref_count
== 1 &&
3785 (source
->shadow
== VM_OBJECT_NULL
||
3786 source
->shadow
->copy
== VM_OBJECT_NULL
))
3788 source
->shadowed
= FALSE
;
3789 vm_object_unlock(source
);
3792 /* things changed while we were locking "source"... */
3793 vm_object_unlock(source
);
3797 * Allocate a new object with the given length
3800 if ((result
= vm_object_allocate(length
)) == VM_OBJECT_NULL
)
3801 panic("vm_object_shadow: no object for shadowing");
3804 * The new object shadows the source object, adding
3805 * a reference to it. Our caller changes his reference
3806 * to point to the new object, removing a reference to
3807 * the source object. Net result: no change of reference
3810 result
->shadow
= source
;
3813 * Store the offset into the source object,
3814 * and fix up the offset into the new object.
3817 result
->vo_shadow_offset
= *offset
;
3820 * Return the new things
3829 * The relationship between vm_object structures and
3830 * the memory_object requires careful synchronization.
3832 * All associations are created by memory_object_create_named
3833 * for external pagers and vm_object_compressor_pager_create for internal
3834 * objects as follows:
3836 * pager: the memory_object itself, supplied by
3837 * the user requesting a mapping (or the kernel,
3838 * when initializing internal objects); the
3839 * kernel simulates holding send rights by keeping
3843 * the memory object control port,
3844 * created by the kernel; the kernel holds
3845 * receive (and ownership) rights to this
3846 * port, but no other references.
3848 * When initialization is complete, the "initialized" field
3849 * is asserted. Other mappings using a particular memory object,
3850 * and any references to the vm_object gained through the
3851 * port association must wait for this initialization to occur.
3853 * In order to allow the memory manager to set attributes before
3854 * requests (notably virtual copy operations, but also data or
3855 * unlock requests) are made, a "ready" attribute is made available.
3856 * Only the memory manager may affect the value of this attribute.
3857 * Its value does not affect critical kernel functions, such as
3858 * internal object initialization or destruction. [Furthermore,
3859 * memory objects created by the kernel are assumed to be ready
3860 * immediately; the default memory manager need not explicitly
3861 * set the "ready" attribute.]
3863 * [Both the "initialized" and "ready" attribute wait conditions
3864 * use the "pager" field as the wait event.]
3866 * The port associations can be broken down by any of the
3867 * following routines:
3868 * vm_object_terminate:
3869 * No references to the vm_object remain, and
3870 * the object cannot (or will not) be cached.
3871 * This is the normal case, and is done even
3872 * though one of the other cases has already been
3874 * memory_object_destroy:
3875 * The memory manager has requested that the
3876 * kernel relinquish references to the memory
3877 * object. [The memory manager may not want to
3878 * destroy the memory object, but may wish to
3879 * refuse or tear down existing memory mappings.]
3881 * Each routine that breaks an association must break all of
3882 * them at once. At some later time, that routine must clear
3883 * the pager field and release the memory object references.
3884 * [Furthermore, each routine must cope with the simultaneous
3885 * or previous operations of the others.]
3887 * Because the pager field may be cleared spontaneously, it
3888 * cannot be used to determine whether a memory object has
3889 * ever been associated with a particular vm_object. [This
3890 * knowledge is important to the shadow object mechanism.]
3891 * For this reason, an additional "created" attribute is
3894 * During various paging operations, the pager reference found in the
3895 * vm_object must be valid. To prevent this from being released,
3896 * (other than being removed, i.e., made null), routines may use
3897 * the vm_object_paging_begin/end routines [actually, macros].
3898 * The implementation uses the "paging_in_progress" and "wanted" fields.
3899 * [Operations that alter the validity of the pager values include the
3900 * termination routines and vm_object_collapse.]
3905 * Routine: vm_object_memory_object_associate
3907 * Associate a VM object to the given pager.
3908 * If a VM object is not provided, create one.
3909 * Initialize the pager.
3912 vm_object_memory_object_associate(
3913 memory_object_t pager
,
3915 vm_object_size_t size
,
3918 memory_object_control_t control
;
3920 assert(pager
!= MEMORY_OBJECT_NULL
);
3922 if (object
!= VM_OBJECT_NULL
) {
3923 assert(object
->internal
);
3924 assert(object
->pager_created
);
3925 assert(!object
->pager_initialized
);
3926 assert(!object
->pager_ready
);
3928 object
= vm_object_allocate(size
);
3929 assert(object
!= VM_OBJECT_NULL
);
3930 object
->internal
= FALSE
;
3931 object
->pager_trusted
= FALSE
;
3932 /* copy strategy invalid until set by memory manager */
3933 object
->copy_strategy
= MEMORY_OBJECT_COPY_INVALID
;
3937 * Allocate request port.
3940 control
= memory_object_control_allocate(object
);
3941 assert (control
!= MEMORY_OBJECT_CONTROL_NULL
);
3943 vm_object_lock(object
);
3945 assert(!object
->pager_ready
);
3946 assert(!object
->pager_initialized
);
3947 assert(object
->pager
== NULL
);
3948 assert(object
->pager_control
== NULL
);
3951 * Copy the reference we were given.
3954 memory_object_reference(pager
);
3955 object
->pager_created
= TRUE
;
3956 object
->pager
= pager
;
3957 object
->pager_control
= control
;
3958 object
->pager_ready
= FALSE
;
3960 vm_object_unlock(object
);
3963 * Let the pager know we're using it.
3966 (void) memory_object_init(pager
,
3967 object
->pager_control
,
3970 vm_object_lock(object
);
3972 object
->named
= TRUE
;
3973 if (object
->internal
) {
3974 object
->pager_ready
= TRUE
;
3975 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
3978 object
->pager_initialized
= TRUE
;
3979 vm_object_wakeup(object
, VM_OBJECT_EVENT_INITIALIZED
);
3981 vm_object_unlock(object
);
3987 * Routine: vm_object_compressor_pager_create
3989 * Create a memory object for an internal object.
3990 * In/out conditions:
3991 * The object is locked on entry and exit;
3992 * it may be unlocked within this call.
3994 * Only one thread may be performing a
3995 * vm_object_compressor_pager_create on an object at
3996 * a time. Presumably, only the pageout
3997 * daemon will be using this routine.
4001 vm_object_compressor_pager_create(
4004 memory_object_t pager
;
4005 vm_object_t pager_object
= VM_OBJECT_NULL
;
4007 assert(object
!= kernel_object
);
4010 * Prevent collapse or termination by holding a paging reference
4013 vm_object_paging_begin(object
);
4014 if (object
->pager_created
) {
4016 * Someone else got to it first...
4017 * wait for them to finish initializing the ports
4019 while (!object
->pager_initialized
) {
4020 vm_object_sleep(object
,
4021 VM_OBJECT_EVENT_INITIALIZED
,
4024 vm_object_paging_end(object
);
4028 if ((uint32_t) (object
->vo_size
/PAGE_SIZE
) !=
4029 (object
->vo_size
/PAGE_SIZE
)) {
4030 #if DEVELOPMENT || DEBUG
4031 printf("vm_object_compressor_pager_create(%p): "
4032 "object size 0x%llx >= 0x%llx\n",
4034 (uint64_t) object
->vo_size
,
4035 0x0FFFFFFFFULL
*PAGE_SIZE
);
4036 #endif /* DEVELOPMENT || DEBUG */
4037 vm_object_paging_end(object
);
4042 * Indicate that a memory object has been assigned
4043 * before dropping the lock, to prevent a race.
4046 object
->pager_created
= TRUE
;
4047 object
->paging_offset
= 0;
4049 vm_object_unlock(object
);
4052 * Create the [internal] pager, and associate it with this object.
4054 * We make the association here so that vm_object_enter()
4055 * can look up the object to complete initializing it. No
4056 * user will ever map this object.
4059 /* create our new memory object */
4060 assert((uint32_t) (object
->vo_size
/PAGE_SIZE
) ==
4061 (object
->vo_size
/PAGE_SIZE
));
4062 (void) compressor_memory_object_create(
4063 (memory_object_size_t
) object
->vo_size
,
4065 if (pager
== NULL
) {
4066 panic("vm_object_compressor_pager_create(): "
4067 "no pager for object %p size 0x%llx\n",
4068 object
, (uint64_t) object
->vo_size
);
4073 * A reference was returned by
4074 * memory_object_create(), and it is
4075 * copied by vm_object_memory_object_associate().
4078 pager_object
= vm_object_memory_object_associate(pager
,
4082 if (pager_object
!= object
) {
4083 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager
, pager_object
, object
, (uint64_t) object
->vo_size
);
4087 * Drop the reference we were passed.
4089 memory_object_deallocate(pager
);
4091 vm_object_lock(object
);
4094 * Release the paging reference
4096 vm_object_paging_end(object
);
4100 * Global variables for vm_object_collapse():
4102 * Counts for normal collapses and bypasses.
4103 * Debugging variables, to watch or disable collapse.
4105 static long object_collapses
= 0;
4106 static long object_bypasses
= 0;
4108 static boolean_t vm_object_collapse_allowed
= TRUE
;
4109 static boolean_t vm_object_bypass_allowed
= TRUE
;
4111 void vm_object_do_collapse_compressor(vm_object_t object
,
4112 vm_object_t backing_object
);
4114 vm_object_do_collapse_compressor(
4116 vm_object_t backing_object
)
4118 vm_object_offset_t new_offset
, backing_offset
;
4119 vm_object_size_t size
;
4121 vm_counters
.do_collapse_compressor
++;
4123 vm_object_lock_assert_exclusive(object
);
4124 vm_object_lock_assert_exclusive(backing_object
);
4126 size
= object
->vo_size
;
4129 * Move all compressed pages from backing_object
4133 for (backing_offset
= object
->vo_shadow_offset
;
4134 backing_offset
< object
->vo_shadow_offset
+ object
->vo_size
;
4135 backing_offset
+= PAGE_SIZE
) {
4136 memory_object_offset_t backing_pager_offset
;
4138 /* find the next compressed page at or after this offset */
4139 backing_pager_offset
= (backing_offset
+
4140 backing_object
->paging_offset
);
4141 backing_pager_offset
= vm_compressor_pager_next_compressed(
4142 backing_object
->pager
,
4143 backing_pager_offset
);
4144 if (backing_pager_offset
== (memory_object_offset_t
) -1) {
4145 /* no more compressed pages */
4148 backing_offset
= (backing_pager_offset
-
4149 backing_object
->paging_offset
);
4151 new_offset
= backing_offset
- object
->vo_shadow_offset
;
4153 if (new_offset
>= object
->vo_size
) {
4154 /* we're out of the scope of "object": done */
4158 if ((vm_page_lookup(object
, new_offset
) != VM_PAGE_NULL
) ||
4159 (vm_compressor_pager_state_get(object
->pager
,
4161 object
->paging_offset
)) ==
4162 VM_EXTERNAL_STATE_EXISTS
)) {
4164 * This page already exists in object, resident or
4166 * We don't need this compressed page in backing_object
4167 * and it will be reclaimed when we release
4174 * backing_object has this page in the VM compressor and
4175 * we need to transfer it to object.
4177 vm_counters
.do_collapse_compressor_pages
++;
4178 vm_compressor_pager_transfer(
4181 (new_offset
+ object
->paging_offset
),
4183 backing_object
->pager
,
4184 (backing_offset
+ backing_object
->paging_offset
));
4189 * Routine: vm_object_do_collapse
4191 * Collapse an object with the object backing it.
4192 * Pages in the backing object are moved into the
4193 * parent, and the backing object is deallocated.
4195 * Both objects and the cache are locked; the page
4196 * queues are unlocked.
4200 vm_object_do_collapse(
4202 vm_object_t backing_object
)
4205 vm_object_offset_t new_offset
, backing_offset
;
4206 vm_object_size_t size
;
4208 vm_object_lock_assert_exclusive(object
);
4209 vm_object_lock_assert_exclusive(backing_object
);
4211 assert(object
->purgable
== VM_PURGABLE_DENY
);
4212 assert(backing_object
->purgable
== VM_PURGABLE_DENY
);
4214 backing_offset
= object
->vo_shadow_offset
;
4215 size
= object
->vo_size
;
4218 * Move all in-memory pages from backing_object
4219 * to the parent. Pages that have been paged out
4220 * will be overwritten by any of the parent's
4221 * pages that shadow them.
4224 while (!vm_page_queue_empty(&backing_object
->memq
)) {
4226 p
= (vm_page_t
) vm_page_queue_first(&backing_object
->memq
);
4228 new_offset
= (p
->offset
- backing_offset
);
4230 assert(!p
->busy
|| p
->absent
);
4233 * If the parent has a page here, or if
4234 * this page falls outside the parent,
4237 * Otherwise, move it as planned.
4240 if (p
->offset
< backing_offset
|| new_offset
>= size
) {
4243 pp
= vm_page_lookup(object
, new_offset
);
4244 if (pp
== VM_PAGE_NULL
) {
4246 if (VM_COMPRESSOR_PAGER_STATE_GET(object
,
4248 == VM_EXTERNAL_STATE_EXISTS
) {
4250 * Parent object has this page
4251 * in the VM compressor.
4252 * Throw away the backing
4258 * Parent now has no page.
4259 * Move the backing object's page
4262 vm_page_rename(p
, object
, new_offset
);
4265 assert(! pp
->absent
);
4268 * Parent object has a real page.
4269 * Throw away the backing object's
4277 if (vm_object_collapse_compressor_allowed
&&
4278 object
->pager
!= MEMORY_OBJECT_NULL
&&
4279 backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
4281 /* move compressed pages from backing_object to object */
4282 vm_object_do_collapse_compressor(object
, backing_object
);
4284 } else if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
4286 assert((!object
->pager_created
&&
4287 (object
->pager
== MEMORY_OBJECT_NULL
)) ||
4288 (!backing_object
->pager_created
&&
4289 (backing_object
->pager
== MEMORY_OBJECT_NULL
)));
4291 * Move the pager from backing_object to object.
4293 * XXX We're only using part of the paging space
4294 * for keeps now... we ought to discard the
4298 assert(!object
->paging_in_progress
);
4299 assert(!object
->activity_in_progress
);
4300 assert(!object
->pager_created
);
4301 assert(object
->pager
== NULL
);
4302 object
->pager
= backing_object
->pager
;
4304 object
->pager_created
= backing_object
->pager_created
;
4305 object
->pager_control
= backing_object
->pager_control
;
4306 object
->pager_ready
= backing_object
->pager_ready
;
4307 object
->pager_initialized
= backing_object
->pager_initialized
;
4308 object
->paging_offset
=
4309 backing_object
->paging_offset
+ backing_offset
;
4310 if (object
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
4311 memory_object_control_collapse(object
->pager_control
,
4314 /* the backing_object has lost its pager: reset all fields */
4315 backing_object
->pager_created
= FALSE
;
4316 backing_object
->pager_control
= NULL
;
4317 backing_object
->pager_ready
= FALSE
;
4318 backing_object
->paging_offset
= 0;
4319 backing_object
->pager
= NULL
;
4322 * Object now shadows whatever backing_object did.
4323 * Note that the reference to backing_object->shadow
4324 * moves from within backing_object to within object.
4327 assert(!object
->phys_contiguous
);
4328 assert(!backing_object
->phys_contiguous
);
4329 object
->shadow
= backing_object
->shadow
;
4330 if (object
->shadow
) {
4331 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
4332 /* "backing_object" gave its shadow to "object" */
4333 backing_object
->shadow
= VM_OBJECT_NULL
;
4334 backing_object
->vo_shadow_offset
= 0;
4336 /* no shadow, therefore no shadow offset... */
4337 object
->vo_shadow_offset
= 0;
4339 assert((object
->shadow
== VM_OBJECT_NULL
) ||
4340 (object
->shadow
->copy
!= backing_object
));
4343 * Discard backing_object.
4345 * Since the backing object has no pages, no
4346 * pager left, and no object references within it,
4347 * all that is necessary is to dispose of it.
4351 assert(backing_object
->ref_count
== 1);
4352 assert(backing_object
->resident_page_count
== 0);
4353 assert(backing_object
->paging_in_progress
== 0);
4354 assert(backing_object
->activity_in_progress
== 0);
4355 assert(backing_object
->shadow
== VM_OBJECT_NULL
);
4356 assert(backing_object
->vo_shadow_offset
== 0);
4358 if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
4359 /* ... unless it has a pager; need to terminate pager too */
4360 vm_counters
.do_collapse_terminate
++;
4361 if (vm_object_terminate(backing_object
) != KERN_SUCCESS
) {
4362 vm_counters
.do_collapse_terminate_failure
++;
4367 assert(backing_object
->pager
== NULL
);
4369 backing_object
->alive
= FALSE
;
4370 vm_object_unlock(backing_object
);
4372 XPR(XPR_VM_OBJECT
, "vm_object_collapse, collapsed 0x%X\n",
4373 backing_object
, 0,0,0,0);
4375 #if VM_OBJECT_TRACKING
4376 if (vm_object_tracking_inited
) {
4377 btlog_remove_entries_for_element(vm_object_tracking_btlog
,
4380 #endif /* VM_OBJECT_TRACKING */
4382 vm_object_lock_destroy(backing_object
);
4384 zfree(vm_object_zone
, backing_object
);
4389 vm_object_do_bypass(
4391 vm_object_t backing_object
)
4394 * Make the parent shadow the next object
4398 vm_object_lock_assert_exclusive(object
);
4399 vm_object_lock_assert_exclusive(backing_object
);
4403 * Do object reference in-line to
4404 * conditionally increment shadow's
4405 * residence count. If object is not
4406 * resident, leave residence count
4409 if (backing_object
->shadow
!= VM_OBJECT_NULL
) {
4410 vm_object_lock(backing_object
->shadow
);
4411 vm_object_lock_assert_exclusive(backing_object
->shadow
);
4412 backing_object
->shadow
->ref_count
++;
4413 if (object
->res_count
!= 0)
4414 vm_object_res_reference(backing_object
->shadow
);
4415 vm_object_unlock(backing_object
->shadow
);
4417 #else /* TASK_SWAPPER */
4418 vm_object_reference(backing_object
->shadow
);
4419 #endif /* TASK_SWAPPER */
4421 assert(!object
->phys_contiguous
);
4422 assert(!backing_object
->phys_contiguous
);
4423 object
->shadow
= backing_object
->shadow
;
4424 if (object
->shadow
) {
4425 object
->vo_shadow_offset
+= backing_object
->vo_shadow_offset
;
4427 /* no shadow, therefore no shadow offset... */
4428 object
->vo_shadow_offset
= 0;
4432 * Backing object might have had a copy pointer
4433 * to us. If it did, clear it.
4435 if (backing_object
->copy
== object
) {
4436 backing_object
->copy
= VM_OBJECT_NULL
;
4440 * Drop the reference count on backing_object.
4442 * Since its ref_count was at least 2, it
4443 * will not vanish; so we don't need to call
4444 * vm_object_deallocate.
4445 * [with a caveat for "named" objects]
4447 * The res_count on the backing object is
4448 * conditionally decremented. It's possible
4449 * (via vm_pageout_scan) to get here with
4450 * a "swapped" object, which has a 0 res_count,
4451 * in which case, the backing object res_count
4452 * is already down by one.
4454 * Don't call vm_object_deallocate unless
4455 * ref_count drops to zero.
4457 * The ref_count can drop to zero here if the
4458 * backing object could be bypassed but not
4459 * collapsed, such as when the backing object
4460 * is temporary and cachable.
4463 if (backing_object
->ref_count
> 2 ||
4464 (!backing_object
->named
&& backing_object
->ref_count
> 1)) {
4465 vm_object_lock_assert_exclusive(backing_object
);
4466 backing_object
->ref_count
--;
4468 if (object
->res_count
!= 0)
4469 vm_object_res_deallocate(backing_object
);
4470 assert(backing_object
->ref_count
> 0);
4471 #endif /* TASK_SWAPPER */
4472 vm_object_unlock(backing_object
);
4476 * Drop locks so that we can deallocate
4477 * the backing object.
4481 if (object
->res_count
== 0) {
4482 /* XXX get a reference for the deallocate below */
4483 vm_object_res_reference(backing_object
);
4485 #endif /* TASK_SWAPPER */
4487 * vm_object_collapse (the caller of this function) is
4488 * now called from contexts that may not guarantee that a
4489 * valid reference is held on the object... w/o a valid
4490 * reference, it is unsafe and unwise (you will definitely
4491 * regret it) to unlock the object and then retake the lock
4492 * since the object may be terminated and recycled in between.
4493 * The "activity_in_progress" reference will keep the object
4496 vm_object_activity_begin(object
);
4497 vm_object_unlock(object
);
4499 vm_object_unlock(backing_object
);
4500 vm_object_deallocate(backing_object
);
4503 * Relock object. We don't have to reverify
4504 * its state since vm_object_collapse will
4505 * do that for us as it starts at the
4509 vm_object_lock(object
);
4510 vm_object_activity_end(object
);
4518 * vm_object_collapse:
4520 * Perform an object collapse or an object bypass if appropriate.
4521 * The real work of collapsing and bypassing is performed in
4522 * the routines vm_object_do_collapse and vm_object_do_bypass.
4524 * Requires that the object be locked and the page queues be unlocked.
4527 static unsigned long vm_object_collapse_calls
= 0;
4528 static unsigned long vm_object_collapse_objects
= 0;
4529 static unsigned long vm_object_collapse_do_collapse
= 0;
4530 static unsigned long vm_object_collapse_do_bypass
= 0;
4532 __private_extern__
void
4535 vm_object_offset_t hint_offset
,
4536 boolean_t can_bypass
)
4538 vm_object_t backing_object
;
4539 unsigned int rcount
;
4541 vm_object_t original_object
;
4542 int object_lock_type
;
4543 int backing_object_lock_type
;
4545 vm_object_collapse_calls
++;
4547 if (! vm_object_collapse_allowed
&&
4548 ! (can_bypass
&& vm_object_bypass_allowed
)) {
4552 XPR(XPR_VM_OBJECT
, "vm_object_collapse, obj 0x%X\n",
4555 if (object
== VM_OBJECT_NULL
)
4558 original_object
= object
;
4561 * The top object was locked "exclusive" by the caller.
4562 * In the first pass, to determine if we can collapse the shadow chain,
4563 * take a "shared" lock on the shadow objects. If we can collapse,
4564 * we'll have to go down the chain again with exclusive locks.
4566 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4567 backing_object_lock_type
= OBJECT_LOCK_SHARED
;
4570 object
= original_object
;
4571 vm_object_lock_assert_exclusive(object
);
4574 vm_object_collapse_objects
++;
4576 * Verify that the conditions are right for either
4577 * collapse or bypass:
4581 * There is a backing object, and
4584 backing_object
= object
->shadow
;
4585 if (backing_object
== VM_OBJECT_NULL
) {
4586 if (object
!= original_object
) {
4587 vm_object_unlock(object
);
4591 if (backing_object_lock_type
== OBJECT_LOCK_SHARED
) {
4592 vm_object_lock_shared(backing_object
);
4594 vm_object_lock(backing_object
);
4598 * No pages in the object are currently
4599 * being paged out, and
4601 if (object
->paging_in_progress
!= 0 ||
4602 object
->activity_in_progress
!= 0) {
4603 /* try and collapse the rest of the shadow chain */
4604 if (object
!= original_object
) {
4605 vm_object_unlock(object
);
4607 object
= backing_object
;
4608 object_lock_type
= backing_object_lock_type
;
4614 * The backing object is not read_only,
4615 * and no pages in the backing object are
4616 * currently being paged out.
4617 * The backing object is internal.
4621 if (!backing_object
->internal
||
4622 backing_object
->paging_in_progress
!= 0 ||
4623 backing_object
->activity_in_progress
!= 0) {
4624 /* try and collapse the rest of the shadow chain */
4625 if (object
!= original_object
) {
4626 vm_object_unlock(object
);
4628 object
= backing_object
;
4629 object_lock_type
= backing_object_lock_type
;
4634 * Purgeable objects are not supposed to engage in
4635 * copy-on-write activities, so should not have
4636 * any shadow objects or be a shadow object to another
4638 * Collapsing a purgeable object would require some
4639 * updates to the purgeable compressed ledgers.
4641 if (object
->purgable
!= VM_PURGABLE_DENY
||
4642 backing_object
->purgable
!= VM_PURGABLE_DENY
) {
4643 panic("vm_object_collapse() attempting to collapse "
4644 "purgeable object: %p(%d) %p(%d)\n",
4645 object
, object
->purgable
,
4646 backing_object
, backing_object
->purgable
);
4647 /* try and collapse the rest of the shadow chain */
4648 if (object
!= original_object
) {
4649 vm_object_unlock(object
);
4651 object
= backing_object
;
4652 object_lock_type
= backing_object_lock_type
;
4657 * The backing object can't be a copy-object:
4658 * the shadow_offset for the copy-object must stay
4659 * as 0. Furthermore (for the 'we have all the
4660 * pages' case), if we bypass backing_object and
4661 * just shadow the next object in the chain, old
4662 * pages from that object would then have to be copied
4663 * BOTH into the (former) backing_object and into the
4666 if (backing_object
->shadow
!= VM_OBJECT_NULL
&&
4667 backing_object
->shadow
->copy
== backing_object
) {
4668 /* try and collapse the rest of the shadow chain */
4669 if (object
!= original_object
) {
4670 vm_object_unlock(object
);
4672 object
= backing_object
;
4673 object_lock_type
= backing_object_lock_type
;
4678 * We can now try to either collapse the backing
4679 * object (if the parent is the only reference to
4680 * it) or (perhaps) remove the parent's reference
4683 * If there is exactly one reference to the backing
4684 * object, we may be able to collapse it into the
4687 * As long as one of the objects is still not known
4688 * to the pager, we can collapse them.
4690 if (backing_object
->ref_count
== 1 &&
4691 (vm_object_collapse_compressor_allowed
||
4692 !object
->pager_created
4693 || (!backing_object
->pager_created
)
4694 ) && vm_object_collapse_allowed
) {
4697 * We need the exclusive lock on the VM objects.
4699 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
4701 * We have an object and its shadow locked
4702 * "shared". We can't just upgrade the locks
4703 * to "exclusive", as some other thread might
4704 * also have these objects locked "shared" and
4705 * attempt to upgrade one or the other to
4706 * "exclusive". The upgrades would block
4707 * forever waiting for the other "shared" locks
4709 * So we have to release the locks and go
4710 * down the shadow chain again (since it could
4711 * have changed) with "exclusive" locking.
4713 vm_object_unlock(backing_object
);
4714 if (object
!= original_object
)
4715 vm_object_unlock(object
);
4716 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4717 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4722 "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
4723 backing_object
, object
,
4724 backing_object
->pager
,
4725 backing_object
->pager_control
, 0);
4728 * Collapse the object with its backing
4729 * object, and try again with the object's
4730 * new backing object.
4733 vm_object_do_collapse(object
, backing_object
);
4734 vm_object_collapse_do_collapse
++;
4739 * Collapsing the backing object was not possible
4740 * or permitted, so let's try bypassing it.
4743 if (! (can_bypass
&& vm_object_bypass_allowed
)) {
4744 /* try and collapse the rest of the shadow chain */
4745 if (object
!= original_object
) {
4746 vm_object_unlock(object
);
4748 object
= backing_object
;
4749 object_lock_type
= backing_object_lock_type
;
4755 * If the object doesn't have all its pages present,
4756 * we have to make sure no pages in the backing object
4757 * "show through" before bypassing it.
4759 size
= (unsigned int)atop(object
->vo_size
);
4760 rcount
= object
->resident_page_count
;
4762 if (rcount
!= size
) {
4763 vm_object_offset_t offset
;
4764 vm_object_offset_t backing_offset
;
4765 unsigned int backing_rcount
;
4768 * If the backing object has a pager but no pagemap,
4769 * then we cannot bypass it, because we don't know
4770 * what pages it has.
4772 if (backing_object
->pager_created
) {
4773 /* try and collapse the rest of the shadow chain */
4774 if (object
!= original_object
) {
4775 vm_object_unlock(object
);
4777 object
= backing_object
;
4778 object_lock_type
= backing_object_lock_type
;
4783 * If the object has a pager but no pagemap,
4784 * then we cannot bypass it, because we don't know
4785 * what pages it has.
4787 if (object
->pager_created
) {
4788 /* try and collapse the rest of the shadow chain */
4789 if (object
!= original_object
) {
4790 vm_object_unlock(object
);
4792 object
= backing_object
;
4793 object_lock_type
= backing_object_lock_type
;
4797 backing_offset
= object
->vo_shadow_offset
;
4798 backing_rcount
= backing_object
->resident_page_count
;
4800 if ( (int)backing_rcount
- (int)(atop(backing_object
->vo_size
) - size
) > (int)rcount
) {
4802 * we have enough pages in the backing object to guarantee that
4803 * at least 1 of them must be 'uncovered' by a resident page
4804 * in the object we're evaluating, so move on and
4805 * try to collapse the rest of the shadow chain
4807 if (object
!= original_object
) {
4808 vm_object_unlock(object
);
4810 object
= backing_object
;
4811 object_lock_type
= backing_object_lock_type
;
4816 * If all of the pages in the backing object are
4817 * shadowed by the parent object, the parent
4818 * object no longer has to shadow the backing
4819 * object; it can shadow the next one in the
4822 * If the backing object has existence info,
4823 * we must check examine its existence info
4828 #define EXISTS_IN_OBJECT(obj, off, rc) \
4829 ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
4830 == VM_EXTERNAL_STATE_EXISTS) || \
4831 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
4834 * Check the hint location first
4835 * (since it is often the quickest way out of here).
4837 if (object
->cow_hint
!= ~(vm_offset_t
)0)
4838 hint_offset
= (vm_object_offset_t
)object
->cow_hint
;
4840 hint_offset
= (hint_offset
> 8 * PAGE_SIZE_64
) ?
4841 (hint_offset
- 8 * PAGE_SIZE_64
) : 0;
4843 if (EXISTS_IN_OBJECT(backing_object
, hint_offset
+
4844 backing_offset
, backing_rcount
) &&
4845 !EXISTS_IN_OBJECT(object
, hint_offset
, rcount
)) {
4846 /* dependency right at the hint */
4847 object
->cow_hint
= (vm_offset_t
) hint_offset
; /* atomic */
4848 /* try and collapse the rest of the shadow chain */
4849 if (object
!= original_object
) {
4850 vm_object_unlock(object
);
4852 object
= backing_object
;
4853 object_lock_type
= backing_object_lock_type
;
4858 * If the object's window onto the backing_object
4859 * is large compared to the number of resident
4860 * pages in the backing object, it makes sense to
4861 * walk the backing_object's resident pages first.
4863 * NOTE: Pages may be in both the existence map and/or
4864 * resident, so if we don't find a dependency while
4865 * walking the backing object's resident page list
4866 * directly, and there is an existence map, we'll have
4867 * to run the offset based 2nd pass. Because we may
4868 * have to run both passes, we need to be careful
4869 * not to decrement 'rcount' in the 1st pass
4871 if (backing_rcount
&& backing_rcount
< (size
/ 8)) {
4872 unsigned int rc
= rcount
;
4875 backing_rcount
= backing_object
->resident_page_count
;
4876 p
= (vm_page_t
)vm_page_queue_first(&backing_object
->memq
);
4878 offset
= (p
->offset
- backing_offset
);
4880 if (offset
< object
->vo_size
&&
4881 offset
!= hint_offset
&&
4882 !EXISTS_IN_OBJECT(object
, offset
, rc
)) {
4883 /* found a dependency */
4884 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
4888 p
= (vm_page_t
) vm_page_queue_next(&p
->listq
);
4890 } while (--backing_rcount
);
4891 if (backing_rcount
!= 0 ) {
4892 /* try and collapse the rest of the shadow chain */
4893 if (object
!= original_object
) {
4894 vm_object_unlock(object
);
4896 object
= backing_object
;
4897 object_lock_type
= backing_object_lock_type
;
4903 * Walk through the offsets looking for pages in the
4904 * backing object that show through to the object.
4906 if (backing_rcount
) {
4907 offset
= hint_offset
;
4910 (offset
+ PAGE_SIZE_64
< object
->vo_size
) ?
4911 (offset
+ PAGE_SIZE_64
) : 0) != hint_offset
) {
4913 if (EXISTS_IN_OBJECT(backing_object
, offset
+
4914 backing_offset
, backing_rcount
) &&
4915 !EXISTS_IN_OBJECT(object
, offset
, rcount
)) {
4916 /* found a dependency */
4917 object
->cow_hint
= (vm_offset_t
) offset
; /* atomic */
4921 if (offset
!= hint_offset
) {
4922 /* try and collapse the rest of the shadow chain */
4923 if (object
!= original_object
) {
4924 vm_object_unlock(object
);
4926 object
= backing_object
;
4927 object_lock_type
= backing_object_lock_type
;
4934 * We need "exclusive" locks on the 2 VM objects.
4936 if (backing_object_lock_type
!= OBJECT_LOCK_EXCLUSIVE
) {
4937 vm_object_unlock(backing_object
);
4938 if (object
!= original_object
)
4939 vm_object_unlock(object
);
4940 object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4941 backing_object_lock_type
= OBJECT_LOCK_EXCLUSIVE
;
4945 /* reset the offset hint for any objects deeper in the chain */
4946 object
->cow_hint
= (vm_offset_t
)0;
4949 * All interesting pages in the backing object
4950 * already live in the parent or its pager.
4951 * Thus we can bypass the backing object.
4954 vm_object_do_bypass(object
, backing_object
);
4955 vm_object_collapse_do_bypass
++;
4958 * Try again with this object's new backing object.
4966 if (object != original_object) {
4967 vm_object_unlock(object);
4973 * Routine: vm_object_page_remove: [internal]
4975 * Removes all physical pages in the specified
4976 * object range from the object's list of pages.
4978 * In/out conditions:
4979 * The object must be locked.
4980 * The object must not have paging_in_progress, usually
4981 * guaranteed by not having a pager.
4983 unsigned int vm_object_page_remove_lookup
= 0;
4984 unsigned int vm_object_page_remove_iterate
= 0;
4986 __private_extern__
void
4987 vm_object_page_remove(
4989 vm_object_offset_t start
,
4990 vm_object_offset_t end
)
4995 * One and two page removals are most popular.
4996 * The factor of 16 here is somewhat arbitrary.
4997 * It balances vm_object_lookup vs iteration.
5000 if (atop_64(end
- start
) < (unsigned)object
->resident_page_count
/16) {
5001 vm_object_page_remove_lookup
++;
5003 for (; start
< end
; start
+= PAGE_SIZE_64
) {
5004 p
= vm_page_lookup(object
, start
);
5005 if (p
!= VM_PAGE_NULL
) {
5006 assert(!p
->cleaning
&& !p
->laundry
);
5007 if (!p
->fictitious
&& p
->pmapped
)
5008 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5013 vm_object_page_remove_iterate
++;
5015 p
= (vm_page_t
) vm_page_queue_first(&object
->memq
);
5016 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
) p
)) {
5017 next
= (vm_page_t
) vm_page_queue_next(&p
->listq
);
5018 if ((start
<= p
->offset
) && (p
->offset
< end
)) {
5019 assert(!p
->cleaning
&& !p
->laundry
);
5020 if (!p
->fictitious
&& p
->pmapped
)
5021 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5031 * Routine: vm_object_coalesce
5032 * Function: Coalesces two objects backing up adjoining
5033 * regions of memory into a single object.
5035 * returns TRUE if objects were combined.
5037 * NOTE: Only works at the moment if the second object is NULL -
5038 * if it's not, which object do we lock first?
5041 * prev_object First object to coalesce
5042 * prev_offset Offset into prev_object
5043 * next_object Second object into coalesce
5044 * next_offset Offset into next_object
5046 * prev_size Size of reference to prev_object
5047 * next_size Size of reference to next_object
5050 * The object(s) must *not* be locked. The map must be locked
5051 * to preserve the reference to the object(s).
5053 static int vm_object_coalesce_count
= 0;
5055 __private_extern__ boolean_t
5057 vm_object_t prev_object
,
5058 vm_object_t next_object
,
5059 vm_object_offset_t prev_offset
,
5060 __unused vm_object_offset_t next_offset
,
5061 vm_object_size_t prev_size
,
5062 vm_object_size_t next_size
)
5064 vm_object_size_t newsize
;
5070 if (next_object
!= VM_OBJECT_NULL
) {
5074 if (prev_object
== VM_OBJECT_NULL
) {
5079 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
5080 prev_object
, prev_offset
, prev_size
, next_size
, 0);
5082 vm_object_lock(prev_object
);
5085 * Try to collapse the object first
5087 vm_object_collapse(prev_object
, prev_offset
, TRUE
);
5090 * Can't coalesce if pages not mapped to
5091 * prev_entry may be in use any way:
5092 * . more than one reference
5094 * . shadows another object
5095 * . has a copy elsewhere
5097 * . paging references (pages might be in page-list)
5100 if ((prev_object
->ref_count
> 1) ||
5101 prev_object
->pager_created
||
5102 (prev_object
->shadow
!= VM_OBJECT_NULL
) ||
5103 (prev_object
->copy
!= VM_OBJECT_NULL
) ||
5104 (prev_object
->true_share
!= FALSE
) ||
5105 (prev_object
->purgable
!= VM_PURGABLE_DENY
) ||
5106 (prev_object
->paging_in_progress
!= 0) ||
5107 (prev_object
->activity_in_progress
!= 0)) {
5108 vm_object_unlock(prev_object
);
5112 vm_object_coalesce_count
++;
5115 * Remove any pages that may still be in the object from
5116 * a previous deallocation.
5118 vm_object_page_remove(prev_object
,
5119 prev_offset
+ prev_size
,
5120 prev_offset
+ prev_size
+ next_size
);
5123 * Extend the object if necessary.
5125 newsize
= prev_offset
+ prev_size
+ next_size
;
5126 if (newsize
> prev_object
->vo_size
) {
5127 prev_object
->vo_size
= newsize
;
5130 vm_object_unlock(prev_object
);
5135 vm_object_populate_with_private(
5137 vm_object_offset_t offset
,
5142 vm_object_offset_t base_offset
;
5145 if (!object
->private)
5146 return KERN_FAILURE
;
5148 base_page
= phys_page
;
5150 vm_object_lock(object
);
5152 if (!object
->phys_contiguous
) {
5155 if ((base_offset
= trunc_page_64(offset
)) != offset
) {
5156 vm_object_unlock(object
);
5157 return KERN_FAILURE
;
5159 base_offset
+= object
->paging_offset
;
5162 m
= vm_page_lookup(object
, base_offset
);
5164 if (m
!= VM_PAGE_NULL
) {
5165 if (m
->fictitious
) {
5166 if (VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
) {
5168 vm_page_lockspin_queues();
5170 vm_page_unlock_queues();
5172 m
->fictitious
= FALSE
;
5173 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5175 } else if (VM_PAGE_GET_PHYS_PAGE(m
) != base_page
) {
5179 * we'd leak a real page... that can't be right
5181 panic("vm_object_populate_with_private - %p not private", m
);
5185 * pmap call to clear old mapping
5187 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
5189 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5193 while ((m
= vm_page_grab_fictitious()) == VM_PAGE_NULL
)
5194 vm_page_more_fictitious();
5197 * private normally requires lock_queues but since we
5198 * are initializing the page, its not necessary here
5201 m
->fictitious
= FALSE
;
5202 VM_PAGE_SET_PHYS_PAGE(m
, base_page
);
5206 vm_page_insert(m
, object
, base_offset
);
5208 base_page
++; /* Go to the next physical page */
5209 base_offset
+= PAGE_SIZE
;
5213 /* NOTE: we should check the original settings here */
5214 /* if we have a size > zero a pmap call should be made */
5215 /* to disable the range */
5219 /* shadows on contiguous memory are not allowed */
5220 /* we therefore can use the offset field */
5221 object
->vo_shadow_offset
= (vm_object_offset_t
)phys_page
<< PAGE_SHIFT
;
5222 object
->vo_size
= size
;
5224 vm_object_unlock(object
);
5226 return KERN_SUCCESS
;
5231 memory_object_create_named(
5232 memory_object_t pager
,
5233 memory_object_offset_t size
,
5234 memory_object_control_t
*control
)
5238 *control
= MEMORY_OBJECT_CONTROL_NULL
;
5239 if (pager
== MEMORY_OBJECT_NULL
)
5240 return KERN_INVALID_ARGUMENT
;
5242 object
= vm_object_memory_object_associate(pager
,
5246 if (object
== VM_OBJECT_NULL
) {
5247 return KERN_INVALID_OBJECT
;
5250 /* wait for object (if any) to be ready */
5251 if (object
!= VM_OBJECT_NULL
) {
5252 vm_object_lock(object
);
5253 object
->named
= TRUE
;
5254 while (!object
->pager_ready
) {
5255 vm_object_sleep(object
,
5256 VM_OBJECT_EVENT_PAGER_READY
,
5259 *control
= object
->pager_control
;
5260 vm_object_unlock(object
);
5262 return (KERN_SUCCESS
);
5267 * Routine: memory_object_recover_named [user interface]
5269 * Attempt to recover a named reference for a VM object.
5270 * VM will verify that the object has not already started
5271 * down the termination path, and if it has, will optionally
5272 * wait for that to finish.
5274 * KERN_SUCCESS - we recovered a named reference on the object
5275 * KERN_FAILURE - we could not recover a reference (object dead)
5276 * KERN_INVALID_ARGUMENT - bad memory object control
5279 memory_object_recover_named(
5280 memory_object_control_t control
,
5281 boolean_t wait_on_terminating
)
5285 object
= memory_object_control_to_vm_object(control
);
5286 if (object
== VM_OBJECT_NULL
) {
5287 return (KERN_INVALID_ARGUMENT
);
5290 vm_object_lock(object
);
5292 if (object
->terminating
&& wait_on_terminating
) {
5293 vm_object_wait(object
,
5294 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
5299 if (!object
->alive
) {
5300 vm_object_unlock(object
);
5301 return KERN_FAILURE
;
5304 if (object
->named
== TRUE
) {
5305 vm_object_unlock(object
);
5306 return KERN_SUCCESS
;
5308 object
->named
= TRUE
;
5309 vm_object_lock_assert_exclusive(object
);
5310 object
->ref_count
++;
5311 vm_object_res_reference(object
);
5312 while (!object
->pager_ready
) {
5313 vm_object_sleep(object
,
5314 VM_OBJECT_EVENT_PAGER_READY
,
5317 vm_object_unlock(object
);
5318 return (KERN_SUCCESS
);
5323 * vm_object_release_name:
5325 * Enforces name semantic on memory_object reference count decrement
5326 * This routine should not be called unless the caller holds a name
5327 * reference gained through the memory_object_create_named.
5329 * If the TERMINATE_IDLE flag is set, the call will return if the
5330 * reference count is not 1. i.e. idle with the only remaining reference
5332 * If the decision is made to proceed the name field flag is set to
5333 * false and the reference count is decremented. If the RESPECT_CACHE
5334 * flag is set and the reference count has gone to zero, the
5335 * memory_object is checked to see if it is cacheable otherwise when
5336 * the reference count is zero, it is simply terminated.
5339 __private_extern__ kern_return_t
5340 vm_object_release_name(
5345 boolean_t original_object
= TRUE
;
5347 while (object
!= VM_OBJECT_NULL
) {
5349 vm_object_lock(object
);
5351 assert(object
->alive
);
5352 if (original_object
)
5353 assert(object
->named
);
5354 assert(object
->ref_count
> 0);
5357 * We have to wait for initialization before
5358 * destroying or caching the object.
5361 if (object
->pager_created
&& !object
->pager_initialized
) {
5362 assert(!object
->can_persist
);
5363 vm_object_assert_wait(object
,
5364 VM_OBJECT_EVENT_INITIALIZED
,
5366 vm_object_unlock(object
);
5367 thread_block(THREAD_CONTINUE_NULL
);
5371 if (((object
->ref_count
> 1)
5372 && (flags
& MEMORY_OBJECT_TERMINATE_IDLE
))
5373 || (object
->terminating
)) {
5374 vm_object_unlock(object
);
5375 return KERN_FAILURE
;
5377 if (flags
& MEMORY_OBJECT_RELEASE_NO_OP
) {
5378 vm_object_unlock(object
);
5379 return KERN_SUCCESS
;
5383 if ((flags
& MEMORY_OBJECT_RESPECT_CACHE
) &&
5384 (object
->ref_count
== 1)) {
5385 if (original_object
)
5386 object
->named
= FALSE
;
5387 vm_object_unlock(object
);
5388 /* let vm_object_deallocate push this thing into */
5389 /* the cache, if that it is where it is bound */
5390 vm_object_deallocate(object
);
5391 return KERN_SUCCESS
;
5393 VM_OBJ_RES_DECR(object
);
5394 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
5396 if (object
->ref_count
== 1) {
5397 if (vm_object_terminate(object
) != KERN_SUCCESS
) {
5398 if (original_object
) {
5399 return KERN_FAILURE
;
5401 return KERN_SUCCESS
;
5404 if (shadow
!= VM_OBJECT_NULL
) {
5405 original_object
= FALSE
;
5409 return KERN_SUCCESS
;
5411 vm_object_lock_assert_exclusive(object
);
5412 object
->ref_count
--;
5413 assert(object
->ref_count
> 0);
5415 object
->named
= FALSE
;
5416 vm_object_unlock(object
);
5417 return KERN_SUCCESS
;
5422 return KERN_FAILURE
;
5426 __private_extern__ kern_return_t
5427 vm_object_lock_request(
5429 vm_object_offset_t offset
,
5430 vm_object_size_t size
,
5431 memory_object_return_t should_return
,
5435 __unused boolean_t should_flush
;
5437 should_flush
= flags
& MEMORY_OBJECT_DATA_FLUSH
;
5439 XPR(XPR_MEMORY_OBJECT
,
5440 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
5441 object
, offset
, size
,
5442 (((should_return
&1)<<1)|should_flush
), prot
);
5445 * Check for bogus arguments.
5447 if (object
== VM_OBJECT_NULL
)
5448 return (KERN_INVALID_ARGUMENT
);
5450 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
5451 return (KERN_INVALID_ARGUMENT
);
5453 size
= round_page_64(size
);
5456 * Lock the object, and acquire a paging reference to
5457 * prevent the memory_object reference from being released.
5459 vm_object_lock(object
);
5460 vm_object_paging_begin(object
);
5462 (void)vm_object_update(object
,
5463 offset
, size
, NULL
, NULL
, should_return
, flags
, prot
);
5465 vm_object_paging_end(object
);
5466 vm_object_unlock(object
);
5468 return (KERN_SUCCESS
);
5472 * Empty a purgeable object by grabbing the physical pages assigned to it and
5473 * putting them on the free queue without writing them to backing store, etc.
5474 * When the pages are next touched they will be demand zero-fill pages. We
5475 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
5476 * skip referenced/dirty pages, pages on the active queue, etc. We're more
5477 * than happy to grab these since this is a purgeable object. We mark the
5478 * object as "empty" after reaping its pages.
5480 * On entry the object must be locked and it must be
5481 * purgeable with no delayed copies pending.
5484 vm_object_purge(vm_object_t object
, int flags
)
5486 unsigned int object_page_count
= 0, pgcount
= 0;
5487 uint64_t total_purged_pgcount
= 0;
5488 boolean_t skipped_object
= FALSE
;
5490 vm_object_lock_assert_exclusive(object
);
5492 if (object
->purgable
== VM_PURGABLE_DENY
)
5495 assert(object
->copy
== VM_OBJECT_NULL
);
5496 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
5499 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
5500 * reaping its pages. We update vm_page_purgeable_count in bulk
5501 * and we don't want vm_page_remove() to update it again for each
5502 * page we reap later.
5504 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
5505 * are all accounted for in the "volatile" ledgers, so this does not
5506 * make any difference.
5507 * If we transitioned directly from NONVOLATILE to EMPTY,
5508 * vm_page_purgeable_count must have been updated when the object
5509 * was dequeued from its volatile queue and the purgeable ledgers
5510 * must have also been updated accordingly at that time (in
5511 * vm_object_purgable_control()).
5513 if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
5515 assert(object
->resident_page_count
>=
5516 object
->wired_page_count
);
5517 delta
= (object
->resident_page_count
-
5518 object
->wired_page_count
);
5520 assert(vm_page_purgeable_count
>=
5523 (SInt32
*)&vm_page_purgeable_count
);
5525 if (object
->wired_page_count
!= 0) {
5526 assert(vm_page_purgeable_wired_count
>=
5527 object
->wired_page_count
);
5528 OSAddAtomic(-object
->wired_page_count
,
5529 (SInt32
*)&vm_page_purgeable_wired_count
);
5531 object
->purgable
= VM_PURGABLE_EMPTY
;
5533 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
5535 object_page_count
= object
->resident_page_count
;
5537 vm_object_reap_pages(object
, REAP_PURGEABLE
);
5539 if (object
->resident_page_count
>= object_page_count
) {
5540 total_purged_pgcount
= 0;
5542 total_purged_pgcount
= object_page_count
- object
->resident_page_count
;
5545 if (object
->pager
!= NULL
) {
5547 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
5549 if (object
->activity_in_progress
== 0 &&
5550 object
->paging_in_progress
== 0) {
5552 * Also reap any memory coming from this object
5553 * in the VM compressor.
5555 * There are no operations in progress on the VM object
5556 * and no operation can start while we're holding the
5557 * VM object lock, so it's safe to reap the compressed
5558 * pages and update the page counts.
5560 pgcount
= vm_compressor_pager_get_count(object
->pager
);
5562 pgcount
= vm_compressor_pager_reap_pages(object
->pager
, flags
);
5563 vm_compressor_pager_count(object
->pager
,
5567 vm_purgeable_compressed_update(object
,
5570 if ( !(flags
& C_DONT_BLOCK
)) {
5571 assert(vm_compressor_pager_get_count(object
->pager
)
5576 * There's some kind of paging activity in progress
5577 * for this object, which could result in a page
5578 * being compressed or decompressed, possibly while
5579 * the VM object is not locked, so it could race
5582 * We can't really synchronize this without possibly
5583 * causing a deadlock when the compressor needs to
5584 * allocate or free memory while compressing or
5585 * decompressing a page from a purgeable object
5586 * mapped in the kernel_map...
5588 * So let's not attempt to purge the compressor
5589 * pager if there's any kind of operation in
5590 * progress on the VM object.
5592 skipped_object
= TRUE
;
5596 vm_object_lock_assert_exclusive(object
);
5598 total_purged_pgcount
+= pgcount
;
5600 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_VM
, OBJECT_PURGE_ONE
)),
5601 VM_KERNEL_UNSLIDE_OR_PERM(object
), /* purged object */
5603 total_purged_pgcount
,
5607 return total_purged_pgcount
;
5612 * vm_object_purgeable_control() allows the caller to control and investigate the
5613 * state of a purgeable object. A purgeable object is created via a call to
5614 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
5615 * never be coalesced with any other object -- even other purgeable objects --
5616 * and will thus always remain a distinct object. A purgeable object has
5617 * special semantics when its reference count is exactly 1. If its reference
5618 * count is greater than 1, then a purgeable object will behave like a normal
5619 * object and attempts to use this interface will result in an error return
5620 * of KERN_INVALID_ARGUMENT.
5622 * A purgeable object may be put into a "volatile" state which will make the
5623 * object's pages elligable for being reclaimed without paging to backing
5624 * store if the system runs low on memory. If the pages in a volatile
5625 * purgeable object are reclaimed, the purgeable object is said to have been
5626 * "emptied." When a purgeable object is emptied the system will reclaim as
5627 * many pages from the object as it can in a convenient manner (pages already
5628 * en route to backing store or busy for other reasons are left as is). When
5629 * a purgeable object is made volatile, its pages will generally be reclaimed
5630 * before other pages in the application's working set. This semantic is
5631 * generally used by applications which can recreate the data in the object
5632 * faster than it can be paged in. One such example might be media assets
5633 * which can be reread from a much faster RAID volume.
5635 * A purgeable object may be designated as "non-volatile" which means it will
5636 * behave like all other objects in the system with pages being written to and
5637 * read from backing store as needed to satisfy system memory needs. If the
5638 * object was emptied before the object was made non-volatile, that fact will
5639 * be returned as the old state of the purgeable object (see
5640 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
5641 * were reclaimed as part of emptying the object will be refaulted in as
5642 * zero-fill on demand. It is up to the application to note that an object
5643 * was emptied and recreate the objects contents if necessary. When a
5644 * purgeable object is made non-volatile, its pages will generally not be paged
5645 * out to backing store in the immediate future. A purgeable object may also
5646 * be manually emptied.
5648 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
5649 * volatile purgeable object may be queried at any time. This information may
5650 * be used as a control input to let the application know when the system is
5651 * experiencing memory pressure and is reclaiming memory.
5653 * The specified address may be any address within the purgeable object. If
5654 * the specified address does not represent any object in the target task's
5655 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
5656 * object containing the specified address is not a purgeable object, then
5657 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
5660 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
5661 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
5662 * state is used to set the new state of the purgeable object and return its
5663 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
5664 * object is returned in the parameter state.
5666 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
5667 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
5668 * the non-volatile, volatile and volatile/empty states described above.
5669 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
5670 * immediately reclaim as many pages in the object as can be conveniently
5671 * collected (some may have already been written to backing store or be
5674 * The process of making a purgeable object non-volatile and determining its
5675 * previous state is atomic. Thus, if a purgeable object is made
5676 * VM_PURGABLE_NONVOLATILE and the old state is returned as
5677 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
5678 * completely intact and will remain so until the object is made volatile
5679 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
5680 * was reclaimed while it was in a volatile state and its previous contents
5684 * The object must be locked.
5687 vm_object_purgable_control(
5689 vm_purgable_t control
,
5695 if (object
== VM_OBJECT_NULL
) {
5697 * Object must already be present or it can't be purgeable.
5699 return KERN_INVALID_ARGUMENT
;
5702 vm_object_lock_assert_exclusive(object
);
5705 * Get current state of the purgeable object.
5707 old_state
= object
->purgable
;
5708 if (old_state
== VM_PURGABLE_DENY
)
5709 return KERN_INVALID_ARGUMENT
;
5711 /* purgeable cant have delayed copies - now or in the future */
5712 assert(object
->copy
== VM_OBJECT_NULL
);
5713 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_NONE
);
5716 * Execute the desired operation.
5718 if (control
== VM_PURGABLE_GET_STATE
) {
5720 return KERN_SUCCESS
;
5723 if (control
== VM_PURGABLE_SET_STATE
&&
5724 object
->purgeable_only_by_kernel
) {
5725 return KERN_PROTECTION_FAILURE
;
5728 if (control
!= VM_PURGABLE_SET_STATE
&&
5729 control
!= VM_PURGABLE_SET_STATE_FROM_KERNEL
) {
5730 return KERN_INVALID_ARGUMENT
;
5733 if ((*state
) & VM_PURGABLE_DEBUG_EMPTY
) {
5734 object
->volatile_empty
= TRUE
;
5736 if ((*state
) & VM_PURGABLE_DEBUG_FAULT
) {
5737 object
->volatile_fault
= TRUE
;
5740 new_state
= *state
& VM_PURGABLE_STATE_MASK
;
5741 if (new_state
== VM_PURGABLE_VOLATILE
) {
5742 if (old_state
== VM_PURGABLE_EMPTY
) {
5743 /* what's been emptied must stay empty */
5744 new_state
= VM_PURGABLE_EMPTY
;
5746 if (object
->volatile_empty
) {
5747 /* debugging mode: go straight to empty */
5748 new_state
= VM_PURGABLE_EMPTY
;
5752 switch (new_state
) {
5753 case VM_PURGABLE_DENY
:
5755 * Attempting to convert purgeable memory to non-purgeable:
5758 return KERN_INVALID_ARGUMENT
;
5759 case VM_PURGABLE_NONVOLATILE
:
5760 object
->purgable
= new_state
;
5762 if (old_state
== VM_PURGABLE_VOLATILE
) {
5765 assert(object
->resident_page_count
>=
5766 object
->wired_page_count
);
5767 delta
= (object
->resident_page_count
-
5768 object
->wired_page_count
);
5770 assert(vm_page_purgeable_count
>= delta
);
5774 (SInt32
*)&vm_page_purgeable_count
);
5776 if (object
->wired_page_count
!= 0) {
5777 assert(vm_page_purgeable_wired_count
>=
5778 object
->wired_page_count
);
5779 OSAddAtomic(-object
->wired_page_count
,
5780 (SInt32
*)&vm_page_purgeable_wired_count
);
5783 vm_page_lock_queues();
5785 /* object should be on a queue */
5786 assert(object
->objq
.next
!= NULL
&&
5787 object
->objq
.prev
!= NULL
);
5788 purgeable_q_t queue
;
5791 * Move object from its volatile queue to the
5792 * non-volatile queue...
5794 queue
= vm_purgeable_object_remove(object
);
5797 if (object
->purgeable_when_ripe
) {
5798 vm_purgeable_token_delete_last(queue
);
5800 assert(queue
->debug_count_objects
>=0);
5802 vm_page_unlock_queues();
5804 if (old_state
== VM_PURGABLE_VOLATILE
||
5805 old_state
== VM_PURGABLE_EMPTY
) {
5807 * Transfer the object's pages from the volatile to
5808 * non-volatile ledgers.
5810 vm_purgeable_accounting(object
, VM_PURGABLE_VOLATILE
,
5812 FALSE
); /* task_objq locked? */
5817 case VM_PURGABLE_VOLATILE
:
5818 if (object
->volatile_fault
) {
5822 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
5828 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5829 if ((refmod
& VM_MEM_MODIFIED
) &&
5831 SET_PAGE_DIRTY(p
, FALSE
);
5836 assert(old_state
!= VM_PURGABLE_EMPTY
);
5838 purgeable_q_t queue
;
5840 /* find the correct queue */
5841 if ((*state
&VM_PURGABLE_ORDERING_MASK
) == VM_PURGABLE_ORDERING_OBSOLETE
)
5842 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_OBSOLETE
];
5844 if ((*state
&VM_PURGABLE_BEHAVIOR_MASK
) == VM_PURGABLE_BEHAVIOR_FIFO
)
5845 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_FIFO
];
5847 queue
= &purgeable_queues
[PURGEABLE_Q_TYPE_LIFO
];
5850 if (old_state
== VM_PURGABLE_NONVOLATILE
||
5851 old_state
== VM_PURGABLE_EMPTY
) {
5854 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
5855 VM_PURGABLE_NO_AGING
) {
5856 object
->purgeable_when_ripe
= FALSE
;
5858 object
->purgeable_when_ripe
= TRUE
;
5861 if (object
->purgeable_when_ripe
) {
5862 kern_return_t result
;
5864 /* try to add token... this can fail */
5865 vm_page_lock_queues();
5867 result
= vm_purgeable_token_add(queue
);
5868 if (result
!= KERN_SUCCESS
) {
5869 vm_page_unlock_queues();
5872 vm_page_unlock_queues();
5875 assert(object
->resident_page_count
>=
5876 object
->wired_page_count
);
5877 delta
= (object
->resident_page_count
-
5878 object
->wired_page_count
);
5882 &vm_page_purgeable_count
);
5884 if (object
->wired_page_count
!= 0) {
5885 OSAddAtomic(object
->wired_page_count
,
5886 &vm_page_purgeable_wired_count
);
5889 object
->purgable
= new_state
;
5891 /* object should be on "non-volatile" queue */
5892 assert(object
->objq
.next
!= NULL
);
5893 assert(object
->objq
.prev
!= NULL
);
5895 else if (old_state
== VM_PURGABLE_VOLATILE
) {
5896 purgeable_q_t old_queue
;
5897 boolean_t purgeable_when_ripe
;
5900 * if reassigning priorities / purgeable groups, we don't change the
5901 * token queue. So moving priorities will not make pages stay around longer.
5902 * Reasoning is that the algorithm gives most priority to the most important
5903 * object. If a new token is added, the most important object' priority is boosted.
5904 * This biases the system already for purgeable queues that move a lot.
5905 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
5907 assert(object
->objq
.next
!= NULL
&& object
->objq
.prev
!= NULL
); /* object should be on a queue */
5909 old_queue
= vm_purgeable_object_remove(object
);
5912 if ((*state
& VM_PURGABLE_NO_AGING_MASK
) ==
5913 VM_PURGABLE_NO_AGING
) {
5914 purgeable_when_ripe
= FALSE
;
5916 purgeable_when_ripe
= TRUE
;
5919 if (old_queue
!= queue
||
5920 (purgeable_when_ripe
!=
5921 object
->purgeable_when_ripe
)) {
5922 kern_return_t result
;
5924 /* Changing queue. Have to move token. */
5925 vm_page_lock_queues();
5926 if (object
->purgeable_when_ripe
) {
5927 vm_purgeable_token_delete_last(old_queue
);
5929 object
->purgeable_when_ripe
= purgeable_when_ripe
;
5930 if (object
->purgeable_when_ripe
) {
5931 result
= vm_purgeable_token_add(queue
);
5932 assert(result
==KERN_SUCCESS
); /* this should never fail since we just freed a token */
5934 vm_page_unlock_queues();
5938 vm_purgeable_object_add(object
, queue
, (*state
&VM_VOLATILE_GROUP_MASK
)>>VM_VOLATILE_GROUP_SHIFT
);
5939 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
5940 vm_purgeable_accounting(object
,
5941 VM_PURGABLE_NONVOLATILE
,
5943 FALSE
); /* task_objq locked? */
5946 assert(queue
->debug_count_objects
>=0);
5951 case VM_PURGABLE_EMPTY
:
5952 if (object
->volatile_fault
) {
5956 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
5962 refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p
));
5963 if ((refmod
& VM_MEM_MODIFIED
) &&
5965 SET_PAGE_DIRTY(p
, FALSE
);
5970 if (old_state
== VM_PURGABLE_VOLATILE
) {
5971 purgeable_q_t old_queue
;
5973 /* object should be on a queue */
5974 assert(object
->objq
.next
!= NULL
&&
5975 object
->objq
.prev
!= NULL
);
5977 old_queue
= vm_purgeable_object_remove(object
);
5979 if (object
->purgeable_when_ripe
) {
5980 vm_page_lock_queues();
5981 vm_purgeable_token_delete_first(old_queue
);
5982 vm_page_unlock_queues();
5986 if (old_state
== VM_PURGABLE_NONVOLATILE
) {
5988 * This object's pages were previously accounted as
5989 * "non-volatile" and now need to be accounted as
5992 vm_purgeable_accounting(object
,
5993 VM_PURGABLE_NONVOLATILE
,
5995 FALSE
); /* task_objq locked? */
5997 * Set to VM_PURGABLE_EMPTY because the pages are no
5998 * longer accounted in the "non-volatile" ledger
5999 * and are also not accounted for in
6000 * "vm_page_purgeable_count".
6002 object
->purgable
= VM_PURGABLE_EMPTY
;
6005 (void) vm_object_purge(object
, 0);
6006 assert(object
->purgable
== VM_PURGABLE_EMPTY
);
6013 vm_object_lock_assert_exclusive(object
);
6015 return KERN_SUCCESS
;
6019 vm_object_get_page_counts(
6021 vm_object_offset_t offset
,
6022 vm_object_size_t size
,
6023 unsigned int *resident_page_count
,
6024 unsigned int *dirty_page_count
)
6027 kern_return_t kr
= KERN_SUCCESS
;
6028 boolean_t count_dirty_pages
= FALSE
;
6029 vm_page_t p
= VM_PAGE_NULL
;
6030 unsigned int local_resident_count
= 0;
6031 unsigned int local_dirty_count
= 0;
6032 vm_object_offset_t cur_offset
= 0;
6033 vm_object_offset_t end_offset
= 0;
6035 if (object
== VM_OBJECT_NULL
)
6036 return KERN_INVALID_ARGUMENT
;
6039 cur_offset
= offset
;
6041 end_offset
= offset
+ size
;
6043 vm_object_lock_assert_exclusive(object
);
6045 if (dirty_page_count
!= NULL
) {
6047 count_dirty_pages
= TRUE
;
6050 if (resident_page_count
!= NULL
&& count_dirty_pages
== FALSE
) {
6053 * - we only want the resident page count, and,
6054 * - the entire object is exactly covered by the request.
6056 if (offset
== 0 && (object
->vo_size
== size
)) {
6058 *resident_page_count
= object
->resident_page_count
;
6063 if (object
->resident_page_count
<= (size
>> PAGE_SHIFT
)) {
6065 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
6067 if (p
->offset
>= cur_offset
&& p
->offset
< end_offset
) {
6069 local_resident_count
++;
6071 if (count_dirty_pages
) {
6073 if (p
->dirty
|| (p
->wpmapped
&& pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
)))) {
6075 local_dirty_count
++;
6082 for (cur_offset
= offset
; cur_offset
< end_offset
; cur_offset
+= PAGE_SIZE_64
) {
6084 p
= vm_page_lookup(object
, cur_offset
);
6086 if (p
!= VM_PAGE_NULL
) {
6088 local_resident_count
++;
6090 if (count_dirty_pages
) {
6092 if (p
->dirty
|| (p
->wpmapped
&& pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p
)))) {
6094 local_dirty_count
++;
6102 if (resident_page_count
!= NULL
) {
6103 *resident_page_count
= local_resident_count
;
6106 if (dirty_page_count
!= NULL
) {
6107 *dirty_page_count
= local_dirty_count
;
6117 * vm_object_res_deallocate
6119 * (recursively) decrement residence counts on vm objects and their shadows.
6120 * Called from vm_object_deallocate and when swapping out an object.
6122 * The object is locked, and remains locked throughout the function,
6123 * even as we iterate down the shadow chain. Locks on intermediate objects
6124 * will be dropped, but not the original object.
6126 * NOTE: this function used to use recursion, rather than iteration.
6129 __private_extern__
void
6130 vm_object_res_deallocate(
6133 vm_object_t orig_object
= object
;
6135 * Object is locked so it can be called directly
6136 * from vm_object_deallocate. Original object is never
6139 assert(object
->res_count
> 0);
6140 while (--object
->res_count
== 0) {
6141 assert(object
->ref_count
>= object
->res_count
);
6142 vm_object_deactivate_all_pages(object
);
6143 /* iterate on shadow, if present */
6144 if (object
->shadow
!= VM_OBJECT_NULL
) {
6145 vm_object_t tmp_object
= object
->shadow
;
6146 vm_object_lock(tmp_object
);
6147 if (object
!= orig_object
)
6148 vm_object_unlock(object
);
6149 object
= tmp_object
;
6150 assert(object
->res_count
> 0);
6154 if (object
!= orig_object
)
6155 vm_object_unlock(object
);
6159 * vm_object_res_reference
6161 * Internal function to increment residence count on a vm object
6162 * and its shadows. It is called only from vm_object_reference, and
6163 * when swapping in a vm object, via vm_map_swap.
6165 * The object is locked, and remains locked throughout the function,
6166 * even as we iterate down the shadow chain. Locks on intermediate objects
6167 * will be dropped, but not the original object.
6169 * NOTE: this function used to use recursion, rather than iteration.
6172 __private_extern__
void
6173 vm_object_res_reference(
6176 vm_object_t orig_object
= object
;
6178 * Object is locked, so this can be called directly
6179 * from vm_object_reference. This lock is never released.
6181 while ((++object
->res_count
== 1) &&
6182 (object
->shadow
!= VM_OBJECT_NULL
)) {
6183 vm_object_t tmp_object
= object
->shadow
;
6185 assert(object
->ref_count
>= object
->res_count
);
6186 vm_object_lock(tmp_object
);
6187 if (object
!= orig_object
)
6188 vm_object_unlock(object
);
6189 object
= tmp_object
;
6191 if (object
!= orig_object
)
6192 vm_object_unlock(object
);
6193 assert(orig_object
->ref_count
>= orig_object
->res_count
);
6195 #endif /* TASK_SWAPPER */
6198 * vm_object_reference:
6200 * Gets another reference to the given object.
6202 #ifdef vm_object_reference
6203 #undef vm_object_reference
6205 __private_extern__
void
6206 vm_object_reference(
6209 if (object
== VM_OBJECT_NULL
)
6212 vm_object_lock(object
);
6213 assert(object
->ref_count
> 0);
6214 vm_object_reference_locked(object
);
6215 vm_object_unlock(object
);
6219 * vm_object_transpose
6221 * This routine takes two VM objects of the same size and exchanges
6222 * their backing store.
6223 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6224 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6226 * The VM objects must not be locked by caller.
6228 unsigned int vm_object_transpose_count
= 0;
6230 vm_object_transpose(
6231 vm_object_t object1
,
6232 vm_object_t object2
,
6233 vm_object_size_t transpose_size
)
6235 vm_object_t tmp_object
;
6236 kern_return_t retval
;
6237 boolean_t object1_locked
, object2_locked
;
6239 vm_object_offset_t page_offset
;
6241 tmp_object
= VM_OBJECT_NULL
;
6242 object1_locked
= FALSE
; object2_locked
= FALSE
;
6244 if (object1
== object2
||
6245 object1
== VM_OBJECT_NULL
||
6246 object2
== VM_OBJECT_NULL
) {
6248 * If the 2 VM objects are the same, there's
6249 * no point in exchanging their backing store.
6251 retval
= KERN_INVALID_VALUE
;
6256 * Since we need to lock both objects at the same time,
6257 * make sure we always lock them in the same order to
6260 if (object1
> object2
) {
6261 tmp_object
= object1
;
6263 object2
= tmp_object
;
6267 * Allocate a temporary VM object to hold object1's contents
6268 * while we copy object2 to object1.
6270 tmp_object
= vm_object_allocate(transpose_size
);
6271 vm_object_lock(tmp_object
);
6272 tmp_object
->can_persist
= FALSE
;
6276 * Grab control of the 1st VM object.
6278 vm_object_lock(object1
);
6279 object1_locked
= TRUE
;
6280 if (!object1
->alive
|| object1
->terminating
||
6281 object1
->copy
|| object1
->shadow
|| object1
->shadowed
||
6282 object1
->purgable
!= VM_PURGABLE_DENY
) {
6284 * We don't deal with copy or shadow objects (yet).
6286 retval
= KERN_INVALID_VALUE
;
6290 * We're about to mess with the object's backing store and
6291 * taking a "paging_in_progress" reference wouldn't be enough
6292 * to prevent any paging activity on this object, so the caller should
6293 * have "quiesced" the objects beforehand, via a UPL operation with
6294 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6295 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6297 * Wait for any paging operation to complete (but only paging, not
6298 * other kind of activities not linked to the pager). After we're
6299 * statisfied that there's no more paging in progress, we keep the
6300 * object locked, to guarantee that no one tries to access its pager.
6302 vm_object_paging_only_wait(object1
, THREAD_UNINT
);
6305 * Same as above for the 2nd object...
6307 vm_object_lock(object2
);
6308 object2_locked
= TRUE
;
6309 if (! object2
->alive
|| object2
->terminating
||
6310 object2
->copy
|| object2
->shadow
|| object2
->shadowed
||
6311 object2
->purgable
!= VM_PURGABLE_DENY
) {
6312 retval
= KERN_INVALID_VALUE
;
6315 vm_object_paging_only_wait(object2
, THREAD_UNINT
);
6318 if (object1
->vo_size
!= object2
->vo_size
||
6319 object1
->vo_size
!= transpose_size
) {
6321 * If the 2 objects don't have the same size, we can't
6322 * exchange their backing stores or one would overflow.
6323 * If their size doesn't match the caller's
6324 * "transpose_size", we can't do it either because the
6325 * transpose operation will affect the entire span of
6328 retval
= KERN_INVALID_VALUE
;
6334 * Transpose the lists of resident pages.
6335 * This also updates the resident_page_count and the memq_hint.
6337 if (object1
->phys_contiguous
|| vm_page_queue_empty(&object1
->memq
)) {
6339 * No pages in object1, just transfer pages
6340 * from object2 to object1. No need to go through
6341 * an intermediate object.
6343 while (!vm_page_queue_empty(&object2
->memq
)) {
6344 page
= (vm_page_t
) vm_page_queue_first(&object2
->memq
);
6345 vm_page_rename(page
, object1
, page
->offset
);
6347 assert(vm_page_queue_empty(&object2
->memq
));
6348 } else if (object2
->phys_contiguous
|| vm_page_queue_empty(&object2
->memq
)) {
6350 * No pages in object2, just transfer pages
6351 * from object1 to object2. No need to go through
6352 * an intermediate object.
6354 while (!vm_page_queue_empty(&object1
->memq
)) {
6355 page
= (vm_page_t
) vm_page_queue_first(&object1
->memq
);
6356 vm_page_rename(page
, object2
, page
->offset
);
6358 assert(vm_page_queue_empty(&object1
->memq
));
6360 /* transfer object1's pages to tmp_object */
6361 while (!vm_page_queue_empty(&object1
->memq
)) {
6362 page
= (vm_page_t
) vm_page_queue_first(&object1
->memq
);
6363 page_offset
= page
->offset
;
6364 vm_page_remove(page
, TRUE
);
6365 page
->offset
= page_offset
;
6366 vm_page_queue_enter(&tmp_object
->memq
, page
, vm_page_t
, listq
);
6368 assert(vm_page_queue_empty(&object1
->memq
));
6369 /* transfer object2's pages to object1 */
6370 while (!vm_page_queue_empty(&object2
->memq
)) {
6371 page
= (vm_page_t
) vm_page_queue_first(&object2
->memq
);
6372 vm_page_rename(page
, object1
, page
->offset
);
6374 assert(vm_page_queue_empty(&object2
->memq
));
6375 /* transfer tmp_object's pages to object2 */
6376 while (!vm_page_queue_empty(&tmp_object
->memq
)) {
6377 page
= (vm_page_t
) vm_page_queue_first(&tmp_object
->memq
);
6378 vm_page_queue_remove(&tmp_object
->memq
, page
,
6380 vm_page_insert(page
, object2
, page
->offset
);
6382 assert(vm_page_queue_empty(&tmp_object
->memq
));
6385 #define __TRANSPOSE_FIELD(field) \
6387 tmp_object->field = object1->field; \
6388 object1->field = object2->field; \
6389 object2->field = tmp_object->field; \
6392 /* "Lock" refers to the object not its contents */
6393 /* "size" should be identical */
6394 assert(object1
->vo_size
== object2
->vo_size
);
6395 /* "memq_hint" was updated above when transposing pages */
6396 /* "ref_count" refers to the object not its contents */
6397 assert(object1
->ref_count
>= 1);
6398 assert(object2
->ref_count
>= 1);
6400 /* "res_count" refers to the object not its contents */
6402 /* "resident_page_count" was updated above when transposing pages */
6403 /* "wired_page_count" was updated above when transposing pages */
6404 /* "reusable_page_count" was updated above when transposing pages */
6405 /* there should be no "copy" */
6406 assert(!object1
->copy
);
6407 assert(!object2
->copy
);
6408 /* there should be no "shadow" */
6409 assert(!object1
->shadow
);
6410 assert(!object2
->shadow
);
6411 __TRANSPOSE_FIELD(vo_shadow_offset
); /* used by phys_contiguous objects */
6412 __TRANSPOSE_FIELD(pager
);
6413 __TRANSPOSE_FIELD(paging_offset
);
6414 __TRANSPOSE_FIELD(pager_control
);
6415 /* update the memory_objects' pointers back to the VM objects */
6416 if (object1
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
6417 memory_object_control_collapse(object1
->pager_control
,
6420 if (object2
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
6421 memory_object_control_collapse(object2
->pager_control
,
6424 __TRANSPOSE_FIELD(copy_strategy
);
6425 /* "paging_in_progress" refers to the object not its contents */
6426 assert(!object1
->paging_in_progress
);
6427 assert(!object2
->paging_in_progress
);
6428 assert(object1
->activity_in_progress
);
6429 assert(object2
->activity_in_progress
);
6430 /* "all_wanted" refers to the object not its contents */
6431 __TRANSPOSE_FIELD(pager_created
);
6432 __TRANSPOSE_FIELD(pager_initialized
);
6433 __TRANSPOSE_FIELD(pager_ready
);
6434 __TRANSPOSE_FIELD(pager_trusted
);
6435 __TRANSPOSE_FIELD(can_persist
);
6436 __TRANSPOSE_FIELD(internal
);
6437 __TRANSPOSE_FIELD(private);
6438 __TRANSPOSE_FIELD(pageout
);
6439 /* "alive" should be set */
6440 assert(object1
->alive
);
6441 assert(object2
->alive
);
6442 /* "purgeable" should be non-purgeable */
6443 assert(object1
->purgable
== VM_PURGABLE_DENY
);
6444 assert(object2
->purgable
== VM_PURGABLE_DENY
);
6445 /* "shadowed" refers to the the object not its contents */
6446 __TRANSPOSE_FIELD(purgeable_when_ripe
);
6447 __TRANSPOSE_FIELD(true_share
);
6448 /* "terminating" should not be set */
6449 assert(!object1
->terminating
);
6450 assert(!object2
->terminating
);
6451 /* transfer "named" reference if needed */
6452 if (object1
->named
&& !object2
->named
) {
6453 assert(object1
->ref_count
>= 2);
6454 assert(object2
->ref_count
>= 1);
6455 object1
->ref_count
--;
6456 object2
->ref_count
++;
6457 } else if (!object1
->named
&& object2
->named
) {
6458 assert(object1
->ref_count
>= 1);
6459 assert(object2
->ref_count
>= 2);
6460 object1
->ref_count
++;
6461 object2
->ref_count
--;
6463 __TRANSPOSE_FIELD(named
);
6464 /* "shadow_severed" refers to the object not its contents */
6465 __TRANSPOSE_FIELD(phys_contiguous
);
6466 __TRANSPOSE_FIELD(nophyscache
);
6467 /* "cached_list.next" points to transposed object */
6468 object1
->cached_list
.next
= (queue_entry_t
) object2
;
6469 object2
->cached_list
.next
= (queue_entry_t
) object1
;
6470 /* "cached_list.prev" should be NULL */
6471 assert(object1
->cached_list
.prev
== NULL
);
6472 assert(object2
->cached_list
.prev
== NULL
);
6473 __TRANSPOSE_FIELD(last_alloc
);
6474 __TRANSPOSE_FIELD(sequential
);
6475 __TRANSPOSE_FIELD(pages_created
);
6476 __TRANSPOSE_FIELD(pages_used
);
6477 __TRANSPOSE_FIELD(scan_collisions
);
6478 __TRANSPOSE_FIELD(cow_hint
);
6479 __TRANSPOSE_FIELD(wimg_bits
);
6480 __TRANSPOSE_FIELD(set_cache_attr
);
6481 __TRANSPOSE_FIELD(code_signed
);
6482 object1
->transposed
= TRUE
;
6483 object2
->transposed
= TRUE
;
6484 __TRANSPOSE_FIELD(mapping_in_progress
);
6485 __TRANSPOSE_FIELD(volatile_empty
);
6486 __TRANSPOSE_FIELD(volatile_fault
);
6487 __TRANSPOSE_FIELD(all_reusable
);
6488 assert(object1
->blocked_access
);
6489 assert(object2
->blocked_access
);
6490 assert(object1
->__object2_unused_bits
== 0);
6491 assert(object2
->__object2_unused_bits
== 0);
6493 /* "uplq" refers to the object not its contents (see upl_transpose()) */
6495 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.next
== NULL
));
6496 assert((object1
->purgable
== VM_PURGABLE_DENY
) || (object1
->objq
.prev
== NULL
));
6497 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.next
== NULL
));
6498 assert((object2
->purgable
== VM_PURGABLE_DENY
) || (object2
->objq
.prev
== NULL
));
6500 #undef __TRANSPOSE_FIELD
6502 retval
= KERN_SUCCESS
;
6508 if (tmp_object
!= VM_OBJECT_NULL
) {
6509 vm_object_unlock(tmp_object
);
6511 * Re-initialize the temporary object to avoid
6512 * deallocating a real pager.
6514 _vm_object_allocate(transpose_size
, tmp_object
);
6515 vm_object_deallocate(tmp_object
);
6516 tmp_object
= VM_OBJECT_NULL
;
6519 if (object1_locked
) {
6520 vm_object_unlock(object1
);
6521 object1_locked
= FALSE
;
6523 if (object2_locked
) {
6524 vm_object_unlock(object2
);
6525 object2_locked
= FALSE
;
6528 vm_object_transpose_count
++;
6535 * vm_object_cluster_size
6537 * Determine how big a cluster we should issue an I/O for...
6539 * Inputs: *start == offset of page needed
6540 * *length == maximum cluster pager can handle
6541 * Outputs: *start == beginning offset of cluster
6542 * *length == length of cluster to try
6544 * The original *start will be encompassed by the cluster
6547 extern int speculative_reads_disabled
;
6550 * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
6551 * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
6552 * always be page-aligned. The derivation could involve operations (e.g. division)
6553 * that could give us non-page-size aligned values if we start out with values that
6554 * are odd multiples of PAGE_SIZE.
6557 unsigned int preheat_max_bytes
= (1024 * 512);
6558 #else /* CONFIG_EMBEDDED */
6559 unsigned int preheat_max_bytes
= MAX_UPL_TRANSFER_BYTES
;
6560 #endif /* CONFIG_EMBEDDED */
6561 unsigned int preheat_min_bytes
= (1024 * 32);
6564 __private_extern__
void
6565 vm_object_cluster_size(vm_object_t object
, vm_object_offset_t
*start
,
6566 vm_size_t
*length
, vm_object_fault_info_t fault_info
, uint32_t *io_streaming
)
6568 vm_size_t pre_heat_size
;
6569 vm_size_t tail_size
;
6570 vm_size_t head_size
;
6571 vm_size_t max_length
;
6572 vm_size_t cluster_size
;
6573 vm_object_offset_t object_size
;
6574 vm_object_offset_t orig_start
;
6575 vm_object_offset_t target_start
;
6576 vm_object_offset_t offset
;
6577 vm_behavior_t behavior
;
6578 boolean_t look_behind
= TRUE
;
6579 boolean_t look_ahead
= TRUE
;
6580 boolean_t isSSD
= FALSE
;
6581 uint32_t throttle_limit
;
6583 int sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
6584 vm_size_t max_ph_size
;
6585 vm_size_t min_ph_size
;
6587 assert( !(*length
& PAGE_MASK
));
6588 assert( !(*start
& PAGE_MASK_64
));
6591 * remember maxiumum length of run requested
6593 max_length
= *length
;
6595 * we'll always return a cluster size of at least
6596 * 1 page, since the original fault must always
6599 *length
= PAGE_SIZE
;
6602 if (speculative_reads_disabled
|| fault_info
== NULL
) {
6604 * no cluster... just fault the page in
6608 orig_start
= *start
;
6609 target_start
= orig_start
;
6610 cluster_size
= round_page(fault_info
->cluster_size
);
6611 behavior
= fault_info
->behavior
;
6613 vm_object_lock(object
);
6615 if (object
->pager
== MEMORY_OBJECT_NULL
)
6616 goto out
; /* pager is gone for this object, nothing more to do */
6618 vnode_pager_get_isSSD(object
->pager
, &isSSD
);
6620 min_ph_size
= round_page(preheat_min_bytes
);
6621 max_ph_size
= round_page(preheat_max_bytes
);
6623 #if !CONFIG_EMBEDDED
6628 if (min_ph_size
& PAGE_MASK_64
) {
6629 min_ph_size
= trunc_page(min_ph_size
);
6632 if (max_ph_size
& PAGE_MASK_64
) {
6633 max_ph_size
= trunc_page(max_ph_size
);
6636 #endif /* !CONFIG_EMBEDDED */
6638 if (min_ph_size
< PAGE_SIZE
)
6639 min_ph_size
= PAGE_SIZE
;
6641 if (max_ph_size
< PAGE_SIZE
)
6642 max_ph_size
= PAGE_SIZE
;
6643 else if (max_ph_size
> MAX_UPL_TRANSFER_BYTES
)
6644 max_ph_size
= MAX_UPL_TRANSFER_BYTES
;
6646 if (max_length
> max_ph_size
)
6647 max_length
= max_ph_size
;
6649 if (max_length
<= PAGE_SIZE
)
6652 if (object
->internal
)
6653 object_size
= object
->vo_size
;
6655 vnode_pager_get_object_size(object
->pager
, &object_size
);
6657 object_size
= round_page_64(object_size
);
6659 if (orig_start
>= object_size
) {
6661 * fault occurred beyond the EOF...
6662 * we need to punt w/o changing the
6667 if (object
->pages_used
> object
->pages_created
) {
6669 * must have wrapped our 32 bit counters
6672 object
->pages_used
= object
->pages_created
= 0;
6674 if ((sequential_run
= object
->sequential
)) {
6675 if (sequential_run
< 0) {
6676 sequential_behavior
= VM_BEHAVIOR_RSEQNTL
;
6677 sequential_run
= 0 - sequential_run
;
6679 sequential_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
6686 behavior
= VM_BEHAVIOR_DEFAULT
;
6688 case VM_BEHAVIOR_DEFAULT
:
6689 if (object
->internal
&& fault_info
->user_tag
== VM_MEMORY_STACK
)
6692 if (sequential_run
>= (3 * PAGE_SIZE
)) {
6693 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
6695 if (sequential_behavior
== VM_BEHAVIOR_SEQUENTIAL
)
6696 look_behind
= FALSE
;
6703 if (object
->pages_created
< (20 * (min_ph_size
>> PAGE_SHIFT
))) {
6707 pre_heat_size
= min_ph_size
;
6710 * Linear growth in PH size: The maximum size is max_length...
6711 * this cacluation will result in a size that is neither a
6712 * power of 2 nor a multiple of PAGE_SIZE... so round
6713 * it up to the nearest PAGE_SIZE boundary
6715 pre_heat_size
= (max_length
* (uint64_t)object
->pages_used
) / object
->pages_created
;
6717 if (pre_heat_size
< min_ph_size
)
6718 pre_heat_size
= min_ph_size
;
6720 pre_heat_size
= round_page(pre_heat_size
);
6725 case VM_BEHAVIOR_RANDOM
:
6726 if ((pre_heat_size
= cluster_size
) <= PAGE_SIZE
)
6730 case VM_BEHAVIOR_SEQUENTIAL
:
6731 if ((pre_heat_size
= cluster_size
) == 0)
6732 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
6733 look_behind
= FALSE
;
6738 case VM_BEHAVIOR_RSEQNTL
:
6739 if ((pre_heat_size
= cluster_size
) == 0)
6740 pre_heat_size
= sequential_run
+ PAGE_SIZE
;
6747 throttle_limit
= (uint32_t) max_length
;
6748 assert(throttle_limit
== max_length
);
6750 if (vnode_pager_get_throttle_io_limit(object
->pager
, &throttle_limit
) == KERN_SUCCESS
) {
6751 if (max_length
> throttle_limit
)
6752 max_length
= throttle_limit
;
6754 if (pre_heat_size
> max_length
)
6755 pre_heat_size
= max_length
;
6757 if (behavior
== VM_BEHAVIOR_DEFAULT
&& (pre_heat_size
> min_ph_size
)) {
6759 unsigned int consider_free
= vm_page_free_count
+ vm_page_cleaned_count
;
6761 if (consider_free
< vm_page_throttle_limit
) {
6762 pre_heat_size
= trunc_page(pre_heat_size
/ 16);
6763 } else if (consider_free
< vm_page_free_target
) {
6764 pre_heat_size
= trunc_page(pre_heat_size
/ 4);
6767 if (pre_heat_size
< min_ph_size
)
6768 pre_heat_size
= min_ph_size
;
6770 if (look_ahead
== TRUE
) {
6771 if (look_behind
== TRUE
) {
6773 * if we get here its due to a random access...
6774 * so we want to center the original fault address
6775 * within the cluster we will issue... make sure
6776 * to calculate 'head_size' as a multiple of PAGE_SIZE...
6777 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
6778 * necessarily an even number of pages so we need to truncate
6779 * the result to a PAGE_SIZE boundary
6781 head_size
= trunc_page(pre_heat_size
/ 2);
6783 if (target_start
> head_size
)
6784 target_start
-= head_size
;
6789 * 'target_start' at this point represents the beginning offset
6790 * of the cluster we are considering... 'orig_start' will be in
6791 * the center of this cluster if we didn't have to clip the start
6792 * due to running into the start of the file
6795 if ((target_start
+ pre_heat_size
) > object_size
)
6796 pre_heat_size
= (vm_size_t
)(round_page_64(object_size
- target_start
));
6798 * at this point caclulate the number of pages beyond the original fault
6799 * address that we want to consider... this is guaranteed not to extend beyond
6800 * the current EOF...
6802 assert((vm_size_t
)(orig_start
- target_start
) == (orig_start
- target_start
));
6803 tail_size
= pre_heat_size
- (vm_size_t
)(orig_start
- target_start
) - PAGE_SIZE
;
6805 if (pre_heat_size
> target_start
) {
6807 * since pre_heat_size is always smaller then 2^32,
6808 * if it is larger then target_start (a 64 bit value)
6809 * it is safe to clip target_start to 32 bits
6811 pre_heat_size
= (vm_size_t
) target_start
;
6815 assert( !(target_start
& PAGE_MASK_64
));
6816 assert( !(pre_heat_size
& PAGE_MASK_64
));
6818 if (pre_heat_size
<= PAGE_SIZE
)
6821 if (look_behind
== TRUE
) {
6823 * take a look at the pages before the original
6824 * faulting offset... recalculate this in case
6825 * we had to clip 'pre_heat_size' above to keep
6826 * from running past the EOF.
6828 head_size
= pre_heat_size
- tail_size
- PAGE_SIZE
;
6830 for (offset
= orig_start
- PAGE_SIZE_64
; head_size
; offset
-= PAGE_SIZE_64
, head_size
-= PAGE_SIZE
) {
6832 * don't poke below the lowest offset
6834 if (offset
< fault_info
->lo_offset
)
6837 * for external objects or internal objects w/o a pager,
6838 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6840 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
6843 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
6845 * don't bridge resident pages
6850 *length
+= PAGE_SIZE
;
6853 if (look_ahead
== TRUE
) {
6854 for (offset
= orig_start
+ PAGE_SIZE_64
; tail_size
; offset
+= PAGE_SIZE_64
, tail_size
-= PAGE_SIZE
) {
6856 * don't poke above the highest offset
6858 if (offset
>= fault_info
->hi_offset
)
6860 assert(offset
< object_size
);
6863 * for external objects or internal objects w/o a pager,
6864 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6866 if (VM_COMPRESSOR_PAGER_STATE_GET(object
, offset
) == VM_EXTERNAL_STATE_ABSENT
) {
6869 if (vm_page_lookup(object
, offset
) != VM_PAGE_NULL
) {
6871 * don't bridge resident pages
6875 *length
+= PAGE_SIZE
;
6879 if (*length
> max_length
)
6880 *length
= max_length
;
6882 vm_object_unlock(object
);
6884 DTRACE_VM1(clustersize
, vm_size_t
, *length
);
6889 * Allow manipulation of individual page state. This is actually part of
6890 * the UPL regimen but takes place on the VM object rather than on a UPL
6896 vm_object_offset_t offset
,
6898 ppnum_t
*phys_entry
,
6903 vm_object_lock(object
);
6905 if(ops
& UPL_POP_PHYSICAL
) {
6906 if(object
->phys_contiguous
) {
6908 *phys_entry
= (ppnum_t
)
6909 (object
->vo_shadow_offset
>> PAGE_SHIFT
);
6911 vm_object_unlock(object
);
6912 return KERN_SUCCESS
;
6914 vm_object_unlock(object
);
6915 return KERN_INVALID_OBJECT
;
6918 if(object
->phys_contiguous
) {
6919 vm_object_unlock(object
);
6920 return KERN_INVALID_OBJECT
;
6924 if((dst_page
= vm_page_lookup(object
,offset
)) == VM_PAGE_NULL
) {
6925 vm_object_unlock(object
);
6926 return KERN_FAILURE
;
6929 /* Sync up on getting the busy bit */
6930 if((dst_page
->busy
|| dst_page
->cleaning
) &&
6931 (((ops
& UPL_POP_SET
) &&
6932 (ops
& UPL_POP_BUSY
)) || (ops
& UPL_POP_DUMP
))) {
6933 /* someone else is playing with the page, we will */
6935 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
6939 if (ops
& UPL_POP_DUMP
) {
6940 if (dst_page
->pmapped
== TRUE
)
6941 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
6943 VM_PAGE_FREE(dst_page
);
6950 /* Get the condition of flags before requested ops */
6951 /* are undertaken */
6953 if(dst_page
->dirty
) *flags
|= UPL_POP_DIRTY
;
6954 if(dst_page
->free_when_done
) *flags
|= UPL_POP_PAGEOUT
;
6955 if(dst_page
->precious
) *flags
|= UPL_POP_PRECIOUS
;
6956 if(dst_page
->absent
) *flags
|= UPL_POP_ABSENT
;
6957 if(dst_page
->busy
) *flags
|= UPL_POP_BUSY
;
6960 /* The caller should have made a call either contingent with */
6961 /* or prior to this call to set UPL_POP_BUSY */
6962 if(ops
& UPL_POP_SET
) {
6963 /* The protection granted with this assert will */
6964 /* not be complete. If the caller violates the */
6965 /* convention and attempts to change page state */
6966 /* without first setting busy we may not see it */
6967 /* because the page may already be busy. However */
6968 /* if such violations occur we will assert sooner */
6970 assert(dst_page
->busy
|| (ops
& UPL_POP_BUSY
));
6971 if (ops
& UPL_POP_DIRTY
) {
6972 SET_PAGE_DIRTY(dst_page
, FALSE
);
6974 if (ops
& UPL_POP_PAGEOUT
) dst_page
->free_when_done
= TRUE
;
6975 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= TRUE
;
6976 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= TRUE
;
6977 if (ops
& UPL_POP_BUSY
) dst_page
->busy
= TRUE
;
6980 if(ops
& UPL_POP_CLR
) {
6981 assert(dst_page
->busy
);
6982 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= FALSE
;
6983 if (ops
& UPL_POP_PAGEOUT
) dst_page
->free_when_done
= FALSE
;
6984 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= FALSE
;
6985 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= FALSE
;
6986 if (ops
& UPL_POP_BUSY
) {
6987 dst_page
->busy
= FALSE
;
6988 PAGE_WAKEUP(dst_page
);
6993 * The physical page number will remain valid
6994 * only if the page is kept busy.
6996 assert(dst_page
->busy
);
6997 *phys_entry
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
7003 vm_object_unlock(object
);
7004 return KERN_SUCCESS
;
7009 * vm_object_range_op offers performance enhancement over
7010 * vm_object_page_op for page_op functions which do not require page
7011 * level state to be returned from the call. Page_op was created to provide
7012 * a low-cost alternative to page manipulation via UPLs when only a single
7013 * page was involved. The range_op call establishes the ability in the _op
7014 * family of functions to work on multiple pages where the lack of page level
7015 * state handling allows the caller to avoid the overhead of the upl structures.
7021 vm_object_offset_t offset_beg
,
7022 vm_object_offset_t offset_end
,
7026 vm_object_offset_t offset
;
7029 if (offset_end
- offset_beg
> (uint32_t) -1) {
7030 /* range is too big and would overflow "*range" */
7031 return KERN_INVALID_ARGUMENT
;
7033 if (object
->resident_page_count
== 0) {
7035 if (ops
& UPL_ROP_PRESENT
) {
7038 *range
= (uint32_t) (offset_end
- offset_beg
);
7039 assert(*range
== (offset_end
- offset_beg
));
7042 return KERN_SUCCESS
;
7044 vm_object_lock(object
);
7046 if (object
->phys_contiguous
) {
7047 vm_object_unlock(object
);
7048 return KERN_INVALID_OBJECT
;
7051 offset
= offset_beg
& ~PAGE_MASK_64
;
7053 while (offset
< offset_end
) {
7054 dst_page
= vm_page_lookup(object
, offset
);
7055 if (dst_page
!= VM_PAGE_NULL
) {
7056 if (ops
& UPL_ROP_DUMP
) {
7057 if (dst_page
->busy
|| dst_page
->cleaning
) {
7059 * someone else is playing with the
7060 * page, we will have to wait
7062 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
7064 * need to relook the page up since it's
7065 * state may have changed while we slept
7066 * it might even belong to a different object
7071 if (dst_page
->laundry
)
7072 vm_pageout_steal_laundry(dst_page
, FALSE
);
7074 if (dst_page
->pmapped
== TRUE
)
7075 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
7077 VM_PAGE_FREE(dst_page
);
7079 } else if ((ops
& UPL_ROP_ABSENT
)
7080 && (!dst_page
->absent
|| dst_page
->busy
)) {
7083 } else if (ops
& UPL_ROP_PRESENT
)
7086 offset
+= PAGE_SIZE
;
7088 vm_object_unlock(object
);
7091 if (offset
> offset_end
)
7092 offset
= offset_end
;
7093 if(offset
> offset_beg
) {
7094 *range
= (uint32_t) (offset
- offset_beg
);
7095 assert(*range
== (offset
- offset_beg
));
7100 return KERN_SUCCESS
;
7104 * Used to point a pager directly to a range of memory (when the pager may be associated
7105 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
7106 * expect that the virtual address will denote the start of a range that is physically contiguous.
7108 kern_return_t
pager_map_to_phys_contiguous(
7109 memory_object_control_t object
,
7110 memory_object_offset_t offset
,
7111 addr64_t base_vaddr
,
7115 boolean_t clobbered_private
;
7116 kern_return_t retval
;
7117 vm_object_t pager_object
;
7119 page_num
= pmap_find_phys(kernel_pmap
, base_vaddr
);
7122 retval
= KERN_FAILURE
;
7126 pager_object
= memory_object_control_to_vm_object(object
);
7128 if (!pager_object
) {
7129 retval
= KERN_FAILURE
;
7133 clobbered_private
= pager_object
->private;
7134 if (pager_object
->private != TRUE
) {
7135 vm_object_lock(pager_object
);
7136 pager_object
->private = TRUE
;
7137 vm_object_unlock(pager_object
);
7139 retval
= vm_object_populate_with_private(pager_object
, offset
, page_num
, size
);
7141 if (retval
!= KERN_SUCCESS
) {
7142 if (pager_object
->private != clobbered_private
) {
7143 vm_object_lock(pager_object
);
7144 pager_object
->private = clobbered_private
;
7145 vm_object_unlock(pager_object
);
7153 uint32_t scan_object_collision
= 0;
7156 vm_object_lock(vm_object_t object
)
7158 if (object
== vm_pageout_scan_wants_object
) {
7159 scan_object_collision
++;
7162 lck_rw_lock_exclusive(&object
->Lock
);
7163 #if DEVELOPMENT || DEBUG
7164 object
->Lock_owner
= current_thread();
7169 vm_object_lock_avoid(vm_object_t object
)
7171 if (object
== vm_pageout_scan_wants_object
) {
7172 scan_object_collision
++;
7179 _vm_object_lock_try(vm_object_t object
)
7183 retval
= lck_rw_try_lock_exclusive(&object
->Lock
);
7184 #if DEVELOPMENT || DEBUG
7186 object
->Lock_owner
= current_thread();
7192 vm_object_lock_try(vm_object_t object
)
7195 * Called from hibernate path so check before blocking.
7197 if (vm_object_lock_avoid(object
) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
7200 return _vm_object_lock_try(object
);
7204 vm_object_lock_shared(vm_object_t object
)
7206 if (vm_object_lock_avoid(object
)) {
7209 lck_rw_lock_shared(&object
->Lock
);
7213 vm_object_lock_yield_shared(vm_object_t object
)
7215 boolean_t retval
= FALSE
, force_yield
= FALSE
;;
7217 vm_object_lock_assert_shared(object
);
7219 force_yield
= vm_object_lock_avoid(object
);
7221 retval
= lck_rw_lock_yield_shared(&object
->Lock
, force_yield
);
7227 vm_object_lock_try_shared(vm_object_t object
)
7229 if (vm_object_lock_avoid(object
)) {
7232 return (lck_rw_try_lock_shared(&object
->Lock
));
7236 vm_object_lock_upgrade(vm_object_t object
)
7239 retval
= lck_rw_lock_shared_to_exclusive(&object
->Lock
);
7240 #if DEVELOPMENT || DEBUG
7242 object
->Lock_owner
= current_thread();
7248 vm_object_unlock(vm_object_t object
)
7250 #if DEVELOPMENT || DEBUG
7251 if (object
->Lock_owner
) {
7252 if (object
->Lock_owner
!= current_thread())
7253 panic("vm_object_unlock: not owner - %p\n", object
);
7254 object
->Lock_owner
= 0;
7257 lck_rw_done(&object
->Lock
);
7261 unsigned int vm_object_change_wimg_mode_count
= 0;
7264 * The object must be locked
7267 vm_object_change_wimg_mode(vm_object_t object
, unsigned int wimg_mode
)
7271 vm_object_lock_assert_exclusive(object
);
7273 vm_object_paging_wait(object
, THREAD_UNINT
);
7275 vm_page_queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
7278 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p
), wimg_mode
);
7280 if (wimg_mode
== VM_WIMG_USE_DEFAULT
)
7281 object
->set_cache_attr
= FALSE
;
7283 object
->set_cache_attr
= TRUE
;
7285 object
->wimg_bits
= wimg_mode
;
7287 vm_object_change_wimg_mode_count
++;
7293 * This routine does the "relocation" of previously
7294 * compressed pages belonging to this object that are
7295 * residing in a number of compressed segments into
7296 * a set of compressed segments dedicated to hold
7297 * compressed pages belonging to this object.
7300 extern void *freezer_chead
;
7301 extern char *freezer_compressor_scratch_buf
;
7302 extern int c_freezer_compression_count
;
7303 extern AbsoluteTime c_freezer_last_yield_ts
;
7305 #define MAX_FREE_BATCH 32
7306 #define FREEZER_DUTY_CYCLE_ON_MS 5
7307 #define FREEZER_DUTY_CYCLE_OFF_MS 5
7309 static int c_freezer_should_yield(void);
7313 c_freezer_should_yield()
7315 AbsoluteTime cur_time
;
7318 assert(c_freezer_last_yield_ts
);
7319 clock_get_uptime(&cur_time
);
7321 SUB_ABSOLUTETIME(&cur_time
, &c_freezer_last_yield_ts
);
7322 absolutetime_to_nanoseconds(cur_time
, &nsecs
);
7324 if (nsecs
> 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS
)
7331 vm_object_compressed_freezer_done()
7333 vm_compressor_finished_filling(&freezer_chead
);
7338 vm_object_compressed_freezer_pageout(
7342 vm_page_t local_freeq
= NULL
;
7343 int local_freed
= 0;
7344 kern_return_t retval
= KERN_SUCCESS
;
7345 int obj_resident_page_count_snapshot
= 0;
7347 assert(object
!= VM_OBJECT_NULL
);
7348 assert(object
->internal
);
7350 vm_object_lock(object
);
7352 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7354 if (!object
->pager_initialized
) {
7356 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
7358 if (!object
->pager_initialized
)
7359 vm_object_compressor_pager_create(object
);
7362 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7363 vm_object_unlock(object
);
7368 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
) {
7369 vm_object_offset_t curr_offset
= 0;
7372 * Go through the object and make sure that any
7373 * previously compressed pages are relocated into
7374 * a compressed segment associated with our "freezer_chead".
7376 while (curr_offset
< object
->vo_size
) {
7378 curr_offset
= vm_compressor_pager_next_compressed(object
->pager
, curr_offset
);
7380 if (curr_offset
== (vm_object_offset_t
) -1)
7383 retval
= vm_compressor_pager_relocate(object
->pager
, curr_offset
, &freezer_chead
);
7385 if (retval
!= KERN_SUCCESS
)
7388 curr_offset
+= PAGE_SIZE_64
;
7393 * We can't hold the object lock while heading down into the compressed pager
7394 * layer because we might need the kernel map lock down there to allocate new
7395 * compressor data structures. And if this same object is mapped in the kernel
7396 * and there's a fault on it, then that thread will want the object lock while
7397 * holding the kernel map lock.
7399 * Since we are going to drop/grab the object lock repeatedly, we must make sure
7400 * we won't be stuck in an infinite loop if the same page(s) keep getting
7401 * decompressed. So we grab a snapshot of the number of pages in the object and
7402 * we won't process any more than that number of pages.
7405 obj_resident_page_count_snapshot
= object
->resident_page_count
;
7407 vm_object_activity_begin(object
);
7409 while ((obj_resident_page_count_snapshot
--) && !vm_page_queue_empty(&object
->memq
)) {
7411 p
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
7413 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START
, object
, local_freed
, 0, 0, 0);
7415 vm_page_lockspin_queues();
7417 if (p
->cleaning
|| p
->fictitious
|| p
->busy
|| p
->absent
|| p
->unusual
|| p
->error
|| VM_PAGE_WIRED(p
)) {
7419 vm_page_unlock_queues();
7421 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 1, 0, 0);
7423 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
7424 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
7429 if (p
->pmapped
== TRUE
) {
7430 int refmod_state
, pmap_flags
;
7432 if (p
->dirty
|| p
->precious
) {
7433 pmap_flags
= PMAP_OPTIONS_COMPRESSOR
;
7435 pmap_flags
= PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
7438 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
), pmap_flags
, NULL
);
7439 if (refmod_state
& VM_MEM_MODIFIED
) {
7440 SET_PAGE_DIRTY(p
, FALSE
);
7444 if (p
->dirty
== FALSE
&& p
->precious
== FALSE
) {
7446 * Clean and non-precious page.
7448 vm_page_unlock_queues();
7451 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 2, 0, 0);
7456 vm_pageout_steal_laundry(p
, TRUE
);
7458 vm_page_queues_remove(p
, TRUE
);
7460 vm_page_unlock_queues();
7464 * In case the compressor fails to compress this page, we need it at
7465 * the back of the object memq so that we don't keep trying to process it.
7466 * Make the move here while we have the object lock held.
7469 vm_page_queue_remove(&object
->memq
, p
, vm_page_t
, listq
);
7470 vm_page_queue_enter(&object
->memq
, p
, vm_page_t
, listq
);
7473 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
7475 * Mark the page busy so no one messes with it while we have the object lock dropped.
7480 vm_object_activity_begin(object
);
7482 vm_object_unlock(object
);
7485 * arg3 == FALSE tells vm_pageout_compress_page that we don't hold the object lock and the pager may not be initialized.
7487 if (vm_pageout_compress_page(&freezer_chead
, freezer_compressor_scratch_buf
, p
, FALSE
) == KERN_SUCCESS
) {
7489 * page has already been un-tabled from the object via 'vm_page_remove'
7491 p
->snext
= local_freeq
;
7495 if (local_freed
>= MAX_FREE_BATCH
) {
7497 vm_page_free_list(local_freeq
, TRUE
);
7502 c_freezer_compression_count
++;
7504 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END
, object
, local_freed
, 0, 0, 0);
7506 if (local_freed
== 0 && c_freezer_should_yield()) {
7508 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
7509 clock_get_uptime(&c_freezer_last_yield_ts
);
7512 vm_object_lock(object
);
7516 vm_page_free_list(local_freeq
, TRUE
);
7522 vm_object_activity_end(object
);
7524 vm_object_unlock(object
);
7526 if (c_freezer_should_yield()) {
7528 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS
);
7529 clock_get_uptime(&c_freezer_last_yield_ts
);
7533 #endif /* CONFIG_FREEZE */
7541 struct vm_pageout_queue
*iq
;
7543 if (!VM_CONFIG_COMPRESSOR_IS_PRESENT
)
7546 iq
= &vm_pageout_queue_internal
;
7548 assert(object
!= VM_OBJECT_NULL
);
7550 vm_object_lock(object
);
7552 if (!object
->internal
||
7553 object
->terminating
||
7555 vm_object_unlock(object
);
7559 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7561 if (!object
->pager_initialized
) {
7563 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
7565 if (!object
->pager_initialized
)
7566 vm_object_compressor_pager_create(object
);
7569 if (!object
->pager_initialized
|| object
->pager
== MEMORY_OBJECT_NULL
) {
7570 vm_object_unlock(object
);
7576 next
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
7578 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
)next
)) {
7580 next
= (vm_page_t
)vm_page_queue_next(&next
->listq
);
7582 assert(p
->vm_page_q_state
!= VM_PAGE_ON_FREE_Q
);
7584 if ((p
->vm_page_q_state
== VM_PAGE_ON_THROTTLED_Q
) ||
7593 * Page is already being cleaned or can't be cleaned.
7598 /* Throw to the pageout queue */
7600 vm_page_lockspin_queues();
7602 if (vm_compressor_low_on_space()) {
7603 vm_page_unlock_queues();
7607 if (VM_PAGE_Q_THROTTLED(iq
)) {
7609 iq
->pgo_draining
= TRUE
;
7611 assert_wait((event_t
) (&iq
->pgo_laundry
+ 1),
7612 THREAD_INTERRUPTIBLE
);
7613 vm_page_unlock_queues();
7614 vm_object_unlock(object
);
7616 thread_block(THREAD_CONTINUE_NULL
);
7618 vm_object_lock(object
);
7622 assert(!p
->fictitious
);
7625 assert(!p
->unusual
);
7627 assert(!VM_PAGE_WIRED(p
));
7628 assert(!p
->cleaning
);
7630 if (p
->pmapped
== TRUE
) {
7635 * Tell pmap the page should be accounted
7636 * for as "compressed" if it's been modified.
7639 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
7640 if (p
->dirty
|| p
->precious
) {
7642 * We already know it's been modified,
7643 * so tell pmap to account for it
7646 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
7648 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p
),
7651 if (refmod_state
& VM_MEM_MODIFIED
) {
7652 SET_PAGE_DIRTY(p
, FALSE
);
7656 if (!p
->dirty
&& !p
->precious
) {
7657 vm_page_unlock_queues();
7661 vm_page_queues_remove(p
, TRUE
);
7663 vm_pageout_cluster(p
);
7665 vm_page_unlock_queues();
7667 vm_object_unlock(object
);
7673 vm_page_request_reprioritize(vm_object_t o
, uint64_t blkno
, uint32_t len
, int prio
)
7675 io_reprioritize_req_t req
;
7676 struct vnode
*devvp
= NULL
;
7678 if(vnode_pager_get_object_devvp(o
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
)
7682 * Create the request for I/O reprioritization.
7683 * We use the noblock variant of zalloc because we're holding the object
7684 * lock here and we could cause a deadlock in low memory conditions.
7686 req
= (io_reprioritize_req_t
)zalloc_noblock(io_reprioritize_req_zone
);
7691 req
->priority
= prio
;
7694 /* Insert request into the reprioritization list */
7695 IO_REPRIORITIZE_LIST_LOCK();
7696 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
7697 IO_REPRIORITIZE_LIST_UNLOCK();
7699 /* Wakeup reprioritize thread */
7700 IO_REPRIO_THREAD_WAKEUP();
7706 vm_decmp_upl_reprioritize(upl_t upl
, int prio
)
7710 io_reprioritize_req_t req
;
7711 struct vnode
*devvp
= NULL
;
7715 uint64_t *io_upl_reprio_info
;
7718 if ((upl
->flags
& UPL_TRACKED_BY_OBJECT
) == 0 || (upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0)
7722 * We dont want to perform any allocations with the upl lock held since that might
7723 * result in a deadlock. If the system is low on memory, the pageout thread would
7724 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
7725 * be freed up by the pageout thread, it would be a deadlock.
7729 /* First step is just to get the size of the upl to find out how big the reprio info is */
7730 if(!upl_try_lock(upl
))
7733 if (upl
->decmp_io_upl
== NULL
) {
7734 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7739 io_upl
= upl
->decmp_io_upl
;
7740 assert((io_upl
->flags
& UPL_DECMP_REAL_IO
) != 0);
7741 io_upl_size
= io_upl
->size
;
7744 /* Now perform the allocation */
7745 io_upl_reprio_info
= (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
7746 if (io_upl_reprio_info
== NULL
)
7749 /* Now again take the lock, recheck the state and grab out the required info */
7750 if(!upl_try_lock(upl
))
7753 if (upl
->decmp_io_upl
== NULL
|| upl
->decmp_io_upl
!= io_upl
) {
7754 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7758 memcpy(io_upl_reprio_info
, io_upl
->upl_reprio_info
, sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
7760 /* Get the VM object for this UPL */
7761 if (io_upl
->flags
& UPL_SHADOWED
) {
7762 object
= io_upl
->map_object
->shadow
;
7764 object
= io_upl
->map_object
;
7767 /* Get the dev vnode ptr for this object */
7768 if(!object
|| !object
->pager
||
7769 vnode_pager_get_object_devvp(object
->pager
, (uintptr_t *)&devvp
) != KERN_SUCCESS
) {
7776 /* Now we have all the information needed to do the expedite */
7779 while (offset
< io_upl_size
) {
7780 blkno
= io_upl_reprio_info
[(offset
/ PAGE_SIZE
)] & UPL_REPRIO_INFO_MASK
;
7781 len
= (io_upl_reprio_info
[(offset
/ PAGE_SIZE
)] >> UPL_REPRIO_INFO_SHIFT
) & UPL_REPRIO_INFO_MASK
;
7784 * This implementation may cause some spurious expedites due to the
7785 * fact that we dont cleanup the blkno & len from the upl_reprio_info
7786 * even after the I/O is complete.
7789 if (blkno
!= 0 && len
!= 0) {
7790 /* Create the request for I/O reprioritization */
7791 req
= (io_reprioritize_req_t
)zalloc(io_reprioritize_req_zone
);
7792 assert(req
!= NULL
);
7795 req
->priority
= prio
;
7798 /* Insert request into the reprioritization list */
7799 IO_REPRIORITIZE_LIST_LOCK();
7800 queue_enter(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
7801 IO_REPRIORITIZE_LIST_UNLOCK();
7805 offset
+= PAGE_SIZE
;
7809 /* Wakeup reprioritize thread */
7810 IO_REPRIO_THREAD_WAKEUP();
7813 kfree(io_upl_reprio_info
, sizeof(uint64_t) * (io_upl_size
/ PAGE_SIZE
));
7818 vm_page_handle_prio_inversion(vm_object_t o
, vm_page_t m
)
7821 upl_page_info_t
*pl
;
7822 unsigned int i
, num_pages
;
7825 cur_tier
= proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
);
7828 Scan through all UPLs associated with the object to find the
7829 UPL containing the contended page.
7831 queue_iterate(&o
->uplq
, upl
, upl_t
, uplq
) {
7832 if (((upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) || upl
->upl_priority
<= cur_tier
)
7834 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
7835 num_pages
= (upl
->size
/ PAGE_SIZE
);
7838 For each page in the UPL page list, see if it matches the contended
7839 page and was issued as a low prio I/O.
7841 for(i
=0; i
< num_pages
; i
++) {
7842 if(UPL_PAGE_PRESENT(pl
,i
) && VM_PAGE_GET_PHYS_PAGE(m
) == pl
[i
].phys_addr
) {
7843 if ((upl
->flags
& UPL_DECMP_REQ
) && upl
->decmp_io_upl
) {
7844 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, VM_KERNEL_UNSLIDE_OR_PERM(upl
->upl_creator
), VM_KERNEL_UNSLIDE_OR_PERM(m
),
7845 VM_KERNEL_UNSLIDE_OR_PERM(upl
), upl
->upl_priority
, 0);
7846 vm_decmp_upl_reprioritize(upl
, cur_tier
);
7849 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_EXPEDITE
)) | DBG_FUNC_NONE
, VM_KERNEL_UNSLIDE_OR_PERM(upl
->upl_creator
), VM_KERNEL_UNSLIDE_OR_PERM(m
),
7850 upl
->upl_reprio_info
[i
], upl
->upl_priority
, 0);
7851 if (UPL_REPRIO_INFO_BLKNO(upl
, i
) != 0 && UPL_REPRIO_INFO_LEN(upl
, i
) != 0)
7852 vm_page_request_reprioritize(o
, UPL_REPRIO_INFO_BLKNO(upl
, i
), UPL_REPRIO_INFO_LEN(upl
, i
), cur_tier
);
7856 /* Check if we found any hits */
7865 vm_page_sleep(vm_object_t o
, vm_page_t m
, int interruptible
)
7869 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_START
, o
, m
, 0, 0, 0);
7871 if (o
->io_tracking
&& ((m
->busy
== TRUE
) || (m
->cleaning
== TRUE
) || VM_PAGE_WIRED(m
))) {
7873 Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
7875 vm_page_handle_prio_inversion(o
,m
);
7878 ret
= thread_sleep_vm_object(o
, m
, interruptible
);
7879 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM
, VM_PAGE_SLEEP
)) | DBG_FUNC_END
, o
, m
, 0, 0, 0);
7884 io_reprioritize_thread(void *param __unused
, wait_result_t wr __unused
)
7886 io_reprioritize_req_t req
= NULL
;
7890 IO_REPRIORITIZE_LIST_LOCK();
7891 if (queue_empty(&io_reprioritize_list
)) {
7892 IO_REPRIORITIZE_LIST_UNLOCK();
7896 queue_remove_first(&io_reprioritize_list
, req
, io_reprioritize_req_t
, io_reprioritize_list
);
7897 IO_REPRIORITIZE_LIST_UNLOCK();
7899 vnode_pager_issue_reprioritize_io(req
->devvp
, req
->blkno
, req
->len
, req
->priority
);
7900 zfree(io_reprioritize_req_zone
, req
);
7903 IO_REPRIO_THREAD_CONTINUATION();