]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_object.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Virtual memory object module.
63 */
64
2d21ac55 65#include <debug.h>
1c79356b
A
66#include <mach_pagemap.h>
67#include <task_swapper.h>
68
0b4e3aa0 69#include <mach/mach_types.h>
1c79356b
A
70#include <mach/memory_object.h>
71#include <mach/memory_object_default.h>
72#include <mach/memory_object_control_server.h>
73#include <mach/vm_param.h>
91447636 74
316670eb
A
75#include <mach/sdt.h>
76
91447636 77#include <ipc/ipc_types.h>
1c79356b 78#include <ipc/ipc_port.h>
91447636
A
79
80#include <kern/kern_types.h>
1c79356b 81#include <kern/assert.h>
1c79356b
A
82#include <kern/queue.h>
83#include <kern/xpr.h>
6d2010ae 84#include <kern/kalloc.h>
1c79356b
A
85#include <kern/zalloc.h>
86#include <kern/host.h>
87#include <kern/host_statistics.h>
88#include <kern/processor.h>
91447636 89#include <kern/misc_protos.h>
39037602 90#include <kern/policy_internal.h>
91447636 91
1c79356b 92#include <vm/memory_object.h>
39236c6e 93#include <vm/vm_compressor_pager.h>
1c79356b
A
94#include <vm/vm_fault.h>
95#include <vm/vm_map.h>
96#include <vm/vm_object.h>
97#include <vm/vm_page.h>
98#include <vm/vm_pageout.h>
91447636 99#include <vm/vm_protos.h>
2d21ac55 100#include <vm/vm_purgeable_internal.h>
1c79356b 101
39236c6e
A
102#include <vm/vm_compressor.h>
103
fe8ab488
A
104#if CONFIG_PHANTOM_CACHE
105#include <vm/vm_phantom_cache.h>
106#endif
107
108boolean_t vm_object_collapse_compressor_allowed = TRUE;
109
110struct vm_counters vm_counters;
111
112#if VM_OBJECT_TRACKING
113boolean_t vm_object_tracking_inited = FALSE;
fe8ab488 114btlog_t *vm_object_tracking_btlog;
39037602 115
fe8ab488
A
116void
117vm_object_tracking_init(void)
118{
119 int vm_object_tracking;
120
121 vm_object_tracking = 1;
122 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking,
123 sizeof (vm_object_tracking));
124
125 if (vm_object_tracking) {
fe8ab488 126 vm_object_tracking_btlog = btlog_create(
39037602 127 VM_OBJECT_TRACKING_NUM_RECORDS,
fe8ab488 128 VM_OBJECT_TRACKING_BTDEPTH,
39037602 129 TRUE /* caller_will_remove_entries_for_element? */);
fe8ab488
A
130 assert(vm_object_tracking_btlog);
131 vm_object_tracking_inited = TRUE;
132 }
133}
134#endif /* VM_OBJECT_TRACKING */
135
1c79356b
A
136/*
137 * Virtual memory objects maintain the actual data
138 * associated with allocated virtual memory. A given
139 * page of memory exists within exactly one object.
140 *
141 * An object is only deallocated when all "references"
0b4e3aa0 142 * are given up.
1c79356b
A
143 *
144 * Associated with each object is a list of all resident
145 * memory pages belonging to that object; this list is
146 * maintained by the "vm_page" module, but locked by the object's
147 * lock.
148 *
0b4e3aa0 149 * Each object also records the memory object reference
1c79356b 150 * that is used by the kernel to request and write
0b4e3aa0 151 * back data (the memory object, field "pager"), etc...
1c79356b
A
152 *
153 * Virtual memory objects are allocated to provide
154 * zero-filled memory (vm_allocate) or map a user-defined
155 * memory object into a virtual address space (vm_map).
156 *
157 * Virtual memory objects that refer to a user-defined
158 * memory object are called "permanent", because all changes
159 * made in virtual memory are reflected back to the
160 * memory manager, which may then store it permanently.
161 * Other virtual memory objects are called "temporary",
162 * meaning that changes need be written back only when
163 * necessary to reclaim pages, and that storage associated
164 * with the object can be discarded once it is no longer
165 * mapped.
166 *
167 * A permanent memory object may be mapped into more
168 * than one virtual address space. Moreover, two threads
169 * may attempt to make the first mapping of a memory
170 * object concurrently. Only one thread is allowed to
171 * complete this mapping; all others wait for the
172 * "pager_initialized" field is asserted, indicating
173 * that the first thread has initialized all of the
174 * necessary fields in the virtual memory object structure.
175 *
176 * The kernel relies on a *default memory manager* to
177 * provide backing storage for the zero-filled virtual
0b4e3aa0 178 * memory objects. The pager memory objects associated
1c79356b 179 * with these temporary virtual memory objects are only
0b4e3aa0
A
180 * requested from the default memory manager when it
181 * becomes necessary. Virtual memory objects
1c79356b
A
182 * that depend on the default memory manager are called
183 * "internal". The "pager_created" field is provided to
184 * indicate whether these ports have ever been allocated.
185 *
186 * The kernel may also create virtual memory objects to
187 * hold changed pages after a copy-on-write operation.
188 * In this case, the virtual memory object (and its
189 * backing storage -- its memory object) only contain
190 * those pages that have been changed. The "shadow"
191 * field refers to the virtual memory object that contains
192 * the remainder of the contents. The "shadow_offset"
193 * field indicates where in the "shadow" these contents begin.
194 * The "copy" field refers to a virtual memory object
195 * to which changed pages must be copied before changing
196 * this object, in order to implement another form
197 * of copy-on-write optimization.
198 *
199 * The virtual memory object structure also records
200 * the attributes associated with its memory object.
201 * The "pager_ready", "can_persist" and "copy_strategy"
202 * fields represent those attributes. The "cached_list"
203 * field is used in the implementation of the persistence
204 * attribute.
205 *
206 * ZZZ Continue this comment.
207 */
208
209/* Forward declarations for internal functions. */
0b4e3aa0 210static kern_return_t vm_object_terminate(
1c79356b
A
211 vm_object_t object);
212
213extern void vm_object_remove(
214 vm_object_t object);
215
0b4e3aa0 216static kern_return_t vm_object_copy_call(
1c79356b
A
217 vm_object_t src_object,
218 vm_object_offset_t src_offset,
219 vm_object_size_t size,
220 vm_object_t *_result_object);
221
0b4e3aa0 222static void vm_object_do_collapse(
1c79356b
A
223 vm_object_t object,
224 vm_object_t backing_object);
225
0b4e3aa0 226static void vm_object_do_bypass(
1c79356b
A
227 vm_object_t object,
228 vm_object_t backing_object);
229
0b4e3aa0 230static void vm_object_release_pager(
b0d623f7
A
231 memory_object_t pager,
232 boolean_t hashed);
1c79356b 233
0b4e3aa0 234static zone_t vm_object_zone; /* vm backing store zone */
1c79356b
A
235
236/*
237 * All wired-down kernel memory belongs to a single virtual
238 * memory object (kernel_object) to avoid wasting data structures.
239 */
39037602
A
240static struct vm_object kernel_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
241vm_object_t kernel_object;
1c79356b 242
39037602 243static struct vm_object compressor_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
39236c6e 244vm_object_t compressor_object = &compressor_object_store;
2d21ac55 245
1c79356b
A
246/*
247 * The submap object is used as a placeholder for vm_map_submap
248 * operations. The object is declared in vm_map.c because it
249 * is exported by the vm_map module. The storage is declared
250 * here because it must be initialized here.
251 */
39037602 252static struct vm_object vm_submap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
1c79356b
A
253
254/*
255 * Virtual memory objects are initialized from
256 * a template (see vm_object_allocate).
257 *
258 * When adding a new field to the virtual memory
259 * object structure, be sure to add initialization
0b4e3aa0 260 * (see _vm_object_allocate()).
1c79356b 261 */
0b4e3aa0 262static struct vm_object vm_object_template;
1c79356b 263
b0d623f7
A
264unsigned int vm_page_purged_wired = 0;
265unsigned int vm_page_purged_busy = 0;
266unsigned int vm_page_purged_others = 0;
267
268#if VM_OBJECT_CACHE
1c79356b
A
269/*
270 * Virtual memory objects that are not referenced by
271 * any address maps, but that are allowed to persist
272 * (an attribute specified by the associated memory manager),
273 * are kept in a queue (vm_object_cached_list).
274 *
275 * When an object from this queue is referenced again,
276 * for example to make another address space mapping,
277 * it must be removed from the queue. That is, the
278 * queue contains *only* objects with zero references.
279 *
280 * The kernel may choose to terminate objects from this
281 * queue in order to reclaim storage. The current policy
282 * is to permit a fixed maximum number of unreferenced
283 * objects (vm_object_cached_max).
284 *
285 * A spin lock (accessed by routines
286 * vm_object_cache_{lock,lock_try,unlock}) governs the
287 * object cache. It must be held when objects are
288 * added to or removed from the cache (in vm_object_terminate).
289 * The routines that acquire a reference to a virtual
290 * memory object based on one of the memory object ports
291 * must also lock the cache.
292 *
293 * Ideally, the object cache should be more isolated
294 * from the reference mechanism, so that the lock need
295 * not be held to make simple references.
296 */
b0d623f7
A
297static vm_object_t vm_object_cache_trim(
298 boolean_t called_from_vm_object_deallocate);
299
6d2010ae
A
300static void vm_object_deactivate_all_pages(
301 vm_object_t object);
302
0b4e3aa0
A
303static int vm_object_cached_high; /* highest # cached objects */
304static int vm_object_cached_max = 512; /* may be patched*/
1c79356b 305
1c79356b 306#define vm_object_cache_lock() \
b0d623f7
A
307 lck_mtx_lock(&vm_object_cached_lock_data)
308#define vm_object_cache_lock_try() \
309 lck_mtx_try_lock(&vm_object_cached_lock_data)
6d2010ae
A
310
311#endif /* VM_OBJECT_CACHE */
312
313static queue_head_t vm_object_cached_list;
314static uint32_t vm_object_cache_pages_freed = 0;
315static uint32_t vm_object_cache_pages_moved = 0;
316static uint32_t vm_object_cache_pages_skipped = 0;
317static uint32_t vm_object_cache_adds = 0;
318static uint32_t vm_object_cached_count = 0;
319static lck_mtx_t vm_object_cached_lock_data;
320static lck_mtx_ext_t vm_object_cached_lock_data_ext;
321
322static uint32_t vm_object_page_grab_failed = 0;
323static uint32_t vm_object_page_grab_skipped = 0;
324static uint32_t vm_object_page_grab_returned = 0;
325static uint32_t vm_object_page_grab_pmapped = 0;
326static uint32_t vm_object_page_grab_reactivations = 0;
327
b0d623f7
A
328#define vm_object_cache_lock_spin() \
329 lck_mtx_lock_spin(&vm_object_cached_lock_data)
1c79356b 330#define vm_object_cache_unlock() \
b0d623f7
A
331 lck_mtx_unlock(&vm_object_cached_lock_data)
332
6d2010ae 333static void vm_object_cache_remove_locked(vm_object_t);
b0d623f7 334
1c79356b
A
335
336#define VM_OBJECT_HASH_COUNT 1024
b0d623f7
A
337#define VM_OBJECT_HASH_LOCK_COUNT 512
338
d1ecb069
A
339static lck_mtx_t vm_object_hashed_lock_data[VM_OBJECT_HASH_LOCK_COUNT];
340static lck_mtx_ext_t vm_object_hashed_lock_data_ext[VM_OBJECT_HASH_LOCK_COUNT];
b0d623f7 341
0b4e3aa0 342static queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
b0d623f7 343static struct zone *vm_object_hash_zone;
1c79356b
A
344
345struct vm_object_hash_entry {
346 queue_chain_t hash_link; /* hash chain link */
0b4e3aa0 347 memory_object_t pager; /* pager we represent */
1c79356b
A
348 vm_object_t object; /* corresponding object */
349 boolean_t waiting; /* someone waiting for
350 * termination */
351};
352
353typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
354#define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
355
b0d623f7 356#define VM_OBJECT_HASH_SHIFT 5
1c79356b 357#define vm_object_hash(pager) \
b0d623f7
A
358 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT))
359
360#define vm_object_lock_hash(pager) \
361 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_LOCK_COUNT))
1c79356b 362
91447636
A
363void vm_object_hash_entry_free(
364 vm_object_hash_entry_t entry);
365
8f6c56a5
A
366static void vm_object_reap(vm_object_t object);
367static void vm_object_reap_async(vm_object_t object);
368static void vm_object_reaper_thread(void);
b0d623f7
A
369
370static lck_mtx_t vm_object_reaper_lock_data;
371static lck_mtx_ext_t vm_object_reaper_lock_data_ext;
372
373static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
8f6c56a5
A
374unsigned int vm_object_reap_count = 0;
375unsigned int vm_object_reap_count_async = 0;
376
b0d623f7
A
377#define vm_object_reaper_lock() \
378 lck_mtx_lock(&vm_object_reaper_lock_data)
379#define vm_object_reaper_lock_spin() \
380 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
381#define vm_object_reaper_unlock() \
382 lck_mtx_unlock(&vm_object_reaper_lock_data)
383
fe8ab488
A
384#if CONFIG_IOSCHED
385/* I/O Re-prioritization request list */
386queue_head_t io_reprioritize_list;
387lck_spin_t io_reprioritize_list_lock;
388
389#define IO_REPRIORITIZE_LIST_LOCK() \
390 lck_spin_lock(&io_reprioritize_list_lock)
391#define IO_REPRIORITIZE_LIST_UNLOCK() \
392 lck_spin_unlock(&io_reprioritize_list_lock)
393
394#define MAX_IO_REPRIORITIZE_REQS 8192
395zone_t io_reprioritize_req_zone;
396
397/* I/O Re-prioritization thread */
398int io_reprioritize_wakeup = 0;
399static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused);
400
401#define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup)
402#define IO_REPRIO_THREAD_CONTINUATION() \
403{ \
404 assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \
405 thread_block(io_reprioritize_thread); \
406}
407
408void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int);
409void vm_page_handle_prio_inversion(vm_object_t, vm_page_t);
410void vm_decmp_upl_reprioritize(upl_t, int);
411#endif
412
6d2010ae
A
413#if 0
414#undef KERNEL_DEBUG
415#define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
416#endif
b0d623f7
A
417
418
419static lck_mtx_t *
420vm_object_hash_lock_spin(
421 memory_object_t pager)
422{
423 int index;
424
425 index = vm_object_lock_hash(pager);
426
427 lck_mtx_lock_spin(&vm_object_hashed_lock_data[index]);
428
429 return (&vm_object_hashed_lock_data[index]);
430}
431
432static void
433vm_object_hash_unlock(lck_mtx_t *lck)
434{
435 lck_mtx_unlock(lck);
436}
437
438
1c79356b
A
439/*
440 * vm_object_hash_lookup looks up a pager in the hashtable
441 * and returns the corresponding entry, with optional removal.
442 */
0b4e3aa0 443static vm_object_hash_entry_t
1c79356b 444vm_object_hash_lookup(
0b4e3aa0 445 memory_object_t pager,
1c79356b
A
446 boolean_t remove_entry)
447{
b0d623f7
A
448 queue_t bucket;
449 vm_object_hash_entry_t entry;
1c79356b
A
450
451 bucket = &vm_object_hashtable[vm_object_hash(pager)];
452
453 entry = (vm_object_hash_entry_t)queue_first(bucket);
454 while (!queue_end(bucket, (queue_entry_t)entry)) {
b0d623f7
A
455 if (entry->pager == pager) {
456 if (remove_entry) {
457 queue_remove(bucket, entry,
458 vm_object_hash_entry_t, hash_link);
459 }
1c79356b
A
460 return(entry);
461 }
1c79356b
A
462 entry = (vm_object_hash_entry_t)queue_next(&entry->hash_link);
463 }
1c79356b
A
464 return(VM_OBJECT_HASH_ENTRY_NULL);
465}
466
467/*
468 * vm_object_hash_enter enters the specified
469 * pager / cache object association in the hashtable.
470 */
471
0b4e3aa0 472static void
1c79356b 473vm_object_hash_insert(
b0d623f7
A
474 vm_object_hash_entry_t entry,
475 vm_object_t object)
1c79356b 476{
b0d623f7 477 queue_t bucket;
1c79356b 478
3e170ce0 479 assert(vm_object_hash_lookup(entry->pager, FALSE) == NULL);
fe8ab488 480
1c79356b
A
481 bucket = &vm_object_hashtable[vm_object_hash(entry->pager)];
482
483 queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link);
b0d623f7 484
3e170ce0
A
485 if (object->hashed) {
486 /*
487 * "hashed" was pre-set on this (new) object to avoid
488 * locking issues in vm_object_enter() (can't attempt to
489 * grab the object lock while holding the hash lock as
490 * a spinlock), so no need to set it here (and no need to
491 * hold the object's lock).
492 */
493 } else {
494 vm_object_lock_assert_exclusive(object);
495 object->hashed = TRUE;
496 }
497
b0d623f7 498 entry->object = object;
1c79356b
A
499}
500
0b4e3aa0 501static vm_object_hash_entry_t
1c79356b 502vm_object_hash_entry_alloc(
0b4e3aa0 503 memory_object_t pager)
1c79356b
A
504{
505 vm_object_hash_entry_t entry;
506
507 entry = (vm_object_hash_entry_t)zalloc(vm_object_hash_zone);
508 entry->pager = pager;
509 entry->object = VM_OBJECT_NULL;
510 entry->waiting = FALSE;
511
512 return(entry);
513}
514
515void
516vm_object_hash_entry_free(
517 vm_object_hash_entry_t entry)
518{
91447636 519 zfree(vm_object_hash_zone, entry);
1c79356b
A
520}
521
522/*
523 * vm_object_allocate:
524 *
525 * Returns a new object with the given size.
526 */
527
91447636 528__private_extern__ void
1c79356b
A
529_vm_object_allocate(
530 vm_object_size_t size,
531 vm_object_t object)
532{
533 XPR(XPR_VM_OBJECT,
534 "vm_object_allocate, object 0x%X size 0x%X\n",
b0d623f7 535 object, size, 0,0,0);
1c79356b
A
536
537 *object = vm_object_template;
39037602 538 vm_page_queue_init(&object->memq);
1c79356b 539 queue_init(&object->msr_q);
fe8ab488 540#if UPL_DEBUG || CONFIG_IOSCHED
1c79356b 541 queue_init(&object->uplq);
fe8ab488 542#endif
1c79356b 543 vm_object_lock_init(object);
6d2010ae 544 object->vo_size = size;
fe8ab488
A
545
546#if VM_OBJECT_TRACKING_OP_CREATED
547 if (vm_object_tracking_inited) {
548 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
549 int numsaved = 0;
550
551 numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
552 btlog_add_entry(vm_object_tracking_btlog,
553 object,
554 VM_OBJECT_TRACKING_OP_CREATED,
555 bt,
556 numsaved);
557 }
558#endif /* VM_OBJECT_TRACKING_OP_CREATED */
1c79356b
A
559}
560
0b4e3aa0 561__private_extern__ vm_object_t
1c79356b
A
562vm_object_allocate(
563 vm_object_size_t size)
564{
39037602 565 vm_object_t object;
1c79356b
A
566
567 object = (vm_object_t) zalloc(vm_object_zone);
568
0b4e3aa0
A
569// dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
570
571 if (object != VM_OBJECT_NULL)
572 _vm_object_allocate(size, object);
1c79356b
A
573
574 return object;
575}
576
2d21ac55 577
b0d623f7 578lck_grp_t vm_object_lck_grp;
6d2010ae
A
579lck_grp_t vm_object_cache_lck_grp;
580lck_grp_attr_t vm_object_lck_grp_attr;
b0d623f7
A
581lck_attr_t vm_object_lck_attr;
582lck_attr_t kernel_object_lck_attr;
39236c6e 583lck_attr_t compressor_object_lck_attr;
2d21ac55 584
1c79356b
A
585/*
586 * vm_object_bootstrap:
587 *
588 * Initialize the VM objects module.
589 */
0b4e3aa0 590__private_extern__ void
1c79356b
A
591vm_object_bootstrap(void)
592{
39037602
A
593 int i;
594 vm_size_t vm_object_size;
595
596 vm_object_size = (sizeof(struct vm_object) + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1);
1c79356b 597
39037602
A
598 vm_object_zone = zinit(vm_object_size,
599 round_page(512*1024),
600 round_page(12*1024),
601 "vm objects");
6d2010ae 602 zone_change(vm_object_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
0b4c1975 603 zone_change(vm_object_zone, Z_NOENCRYPT, TRUE);
1c79356b 604
b0d623f7
A
605 vm_object_init_lck_grp();
606
1c79356b 607 queue_init(&vm_object_cached_list);
b0d623f7
A
608
609 lck_mtx_init_ext(&vm_object_cached_lock_data,
610 &vm_object_cached_lock_data_ext,
6d2010ae 611 &vm_object_cache_lck_grp,
b0d623f7 612 &vm_object_lck_attr);
6d2010ae 613
b0d623f7
A
614 queue_init(&vm_object_reaper_queue);
615
616 for (i = 0; i < VM_OBJECT_HASH_LOCK_COUNT; i++) {
617 lck_mtx_init_ext(&vm_object_hashed_lock_data[i],
618 &vm_object_hashed_lock_data_ext[i],
619 &vm_object_lck_grp,
620 &vm_object_lck_attr);
621 }
622 lck_mtx_init_ext(&vm_object_reaper_lock_data,
623 &vm_object_reaper_lock_data_ext,
624 &vm_object_lck_grp,
625 &vm_object_lck_attr);
1c79356b
A
626
627 vm_object_hash_zone =
628 zinit((vm_size_t) sizeof (struct vm_object_hash_entry),
b0d623f7
A
629 round_page(512*1024),
630 round_page(12*1024),
1c79356b 631 "vm object hash entries");
6d2010ae 632 zone_change(vm_object_hash_zone, Z_CALLERACCT, FALSE);
0b4c1975 633 zone_change(vm_object_hash_zone, Z_NOENCRYPT, TRUE);
1c79356b
A
634
635 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
636 queue_init(&vm_object_hashtable[i]);
637
2d21ac55 638
1c79356b
A
639 /*
640 * Fill in a template object, for quick initialization
641 */
642
643 /* memq; Lock; init after allocation */
39037602
A
644
645
646 vm_object_template.memq.prev = 0;
647 vm_object_template.memq.next = 0;
2d21ac55
A
648#if 0
649 /*
650 * We can't call vm_object_lock_init() here because that will
651 * allocate some memory and VM is not fully initialized yet.
b0d623f7 652 * The lock will be initialized for each allocated object in
2d21ac55
A
653 * _vm_object_allocate(), so we don't need to initialize it in
654 * the vm_object_template.
655 */
656 vm_object_lock_init(&vm_object_template);
39037602
A
657#endif
658#if DEVELOPMENT || DEBUG
659 vm_object_template.Lock_owner = 0;
2d21ac55 660#endif
6d2010ae 661 vm_object_template.vo_size = 0;
91447636 662 vm_object_template.memq_hint = VM_PAGE_NULL;
1c79356b
A
663 vm_object_template.ref_count = 1;
664#if TASK_SWAPPER
665 vm_object_template.res_count = 1;
666#endif /* TASK_SWAPPER */
667 vm_object_template.resident_page_count = 0;
b0d623f7
A
668 vm_object_template.wired_page_count = 0;
669 vm_object_template.reusable_page_count = 0;
1c79356b
A
670 vm_object_template.copy = VM_OBJECT_NULL;
671 vm_object_template.shadow = VM_OBJECT_NULL;
6d2010ae 672 vm_object_template.vo_shadow_offset = (vm_object_offset_t) 0;
0b4e3aa0 673 vm_object_template.pager = MEMORY_OBJECT_NULL;
1c79356b 674 vm_object_template.paging_offset = 0;
91447636 675 vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL;
1c79356b 676 vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC;
1c79356b 677 vm_object_template.paging_in_progress = 0;
fe8ab488
A
678#if __LP64__
679 vm_object_template.__object1_unused_bits = 0;
680#endif /* __LP64__ */
b0d623f7 681 vm_object_template.activity_in_progress = 0;
1c79356b
A
682
683 /* Begin bitfields */
684 vm_object_template.all_wanted = 0; /* all bits FALSE */
685 vm_object_template.pager_created = FALSE;
686 vm_object_template.pager_initialized = FALSE;
687 vm_object_template.pager_ready = FALSE;
688 vm_object_template.pager_trusted = FALSE;
689 vm_object_template.can_persist = FALSE;
690 vm_object_template.internal = TRUE;
691 vm_object_template.temporary = TRUE;
692 vm_object_template.private = FALSE;
693 vm_object_template.pageout = FALSE;
694 vm_object_template.alive = TRUE;
2d21ac55 695 vm_object_template.purgable = VM_PURGABLE_DENY;
39236c6e 696 vm_object_template.purgeable_when_ripe = FALSE;
2d21ac55 697 vm_object_template.shadowed = FALSE;
1c79356b 698 vm_object_template.advisory_pageout = FALSE;
2d21ac55 699 vm_object_template.true_share = FALSE;
1c79356b 700 vm_object_template.terminating = FALSE;
2d21ac55 701 vm_object_template.named = FALSE;
1c79356b
A
702 vm_object_template.shadow_severed = FALSE;
703 vm_object_template.phys_contiguous = FALSE;
0b4e3aa0 704 vm_object_template.nophyscache = FALSE;
1c79356b
A
705 /* End bitfields */
706
2d21ac55
A
707 vm_object_template.cached_list.prev = NULL;
708 vm_object_template.cached_list.next = NULL;
709 vm_object_template.msr_q.prev = NULL;
710 vm_object_template.msr_q.next = NULL;
711
1c79356b 712 vm_object_template.last_alloc = (vm_object_offset_t) 0;
2d21ac55
A
713 vm_object_template.sequential = (vm_object_offset_t) 0;
714 vm_object_template.pages_created = 0;
715 vm_object_template.pages_used = 0;
6d2010ae 716 vm_object_template.scan_collisions = 0;
fe8ab488
A
717#if CONFIG_PHANTOM_CACHE
718 vm_object_template.phantom_object_id = 0;
719#endif
2d21ac55 720 vm_object_template.cow_hint = ~(vm_offset_t)0;
1c79356b
A
721#if MACH_ASSERT
722 vm_object_template.paging_object = VM_OBJECT_NULL;
723#endif /* MACH_ASSERT */
724
2d21ac55 725 /* cache bitfields */
6d2010ae
A
726 vm_object_template.wimg_bits = VM_WIMG_USE_DEFAULT;
727 vm_object_template.set_cache_attr = FALSE;
39236c6e 728 vm_object_template.object_slid = FALSE;
2d21ac55 729 vm_object_template.code_signed = FALSE;
b0d623f7
A
730 vm_object_template.hashed = FALSE;
731 vm_object_template.transposed = FALSE;
593a1d5f 732 vm_object_template.mapping_in_progress = FALSE;
fe8ab488 733 vm_object_template.phantom_isssd = FALSE;
b0d623f7
A
734 vm_object_template.volatile_empty = FALSE;
735 vm_object_template.volatile_fault = FALSE;
736 vm_object_template.all_reusable = FALSE;
737 vm_object_template.blocked_access = FALSE;
738 vm_object_template.__object2_unused_bits = 0;
fe8ab488 739#if CONFIG_IOSCHED || UPL_DEBUG
2d21ac55
A
740 vm_object_template.uplq.prev = NULL;
741 vm_object_template.uplq.next = NULL;
742#endif /* UPL_DEBUG */
743#ifdef VM_PIP_DEBUG
744 bzero(&vm_object_template.pip_holders,
745 sizeof (vm_object_template.pip_holders));
746#endif /* VM_PIP_DEBUG */
747
fe8ab488
A
748 vm_object_template.objq.next = NULL;
749 vm_object_template.objq.prev = NULL;
2d21ac55 750
39236c6e
A
751 vm_object_template.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
752 vm_object_template.purgeable_queue_group = 0;
753
6d2010ae 754 vm_object_template.vo_cache_ts = 0;
3e170ce0
A
755
756 vm_object_template.wire_tag = VM_KERN_MEMORY_NONE;
39037602
A
757
758 vm_object_template.io_tracking = FALSE;
759
760#if CONFIG_SECLUDED_MEMORY
761 vm_object_template.eligible_for_secluded = FALSE;
762 vm_object_template.can_grab_secluded = FALSE;
763#else /* CONFIG_SECLUDED_MEMORY */
764 vm_object_template.__object3_unused_bits = 0;
765#endif /* CONFIG_SECLUDED_MEMORY */
2d21ac55 766
fe8ab488
A
767#if DEBUG
768 bzero(&vm_object_template.purgeable_owner_bt[0],
769 sizeof (vm_object_template.purgeable_owner_bt));
770 vm_object_template.vo_purgeable_volatilizer = NULL;
771 bzero(&vm_object_template.purgeable_volatilizer_bt[0],
772 sizeof (vm_object_template.purgeable_volatilizer_bt));
773#endif /* DEBUG */
774
1c79356b
A
775 /*
776 * Initialize the "kernel object"
777 */
778
779 kernel_object = &kernel_object_store;
780
781/*
782 * Note that in the following size specifications, we need to add 1 because
55e303ae 783 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
1c79356b 784 */
55e303ae 785
b0d623f7
A
786 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
787 kernel_object);
39236c6e
A
788
789 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
790 compressor_object);
55e303ae 791 kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
39236c6e 792 compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1c79356b
A
793
794 /*
795 * Initialize the "submap object". Make it as large as the
796 * kernel object so that no limit is imposed on submap sizes.
797 */
798
799 vm_submap_object = &vm_submap_object_store;
b0d623f7
A
800 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
801 vm_submap_object);
55e303ae
A
802 vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
803
1c79356b
A
804 /*
805 * Create an "extra" reference to this object so that we never
806 * try to deallocate it; zfree doesn't like to be called with
807 * non-zone memory.
808 */
809 vm_object_reference(vm_submap_object);
1c79356b
A
810}
811
fe8ab488
A
812#if CONFIG_IOSCHED
813void
814vm_io_reprioritize_init(void)
815{
816 kern_return_t result;
817 thread_t thread = THREAD_NULL;
818
819 /* Initialze the I/O reprioritization subsystem */
820 lck_spin_init(&io_reprioritize_list_lock, &vm_object_lck_grp, &vm_object_lck_attr);
821 queue_init(&io_reprioritize_list);
822
823 io_reprioritize_req_zone = zinit(sizeof(struct io_reprioritize_req),
824 MAX_IO_REPRIORITIZE_REQS * sizeof(struct io_reprioritize_req),
825 4096, "io_reprioritize_req");
826
827 result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread);
828 if (result == KERN_SUCCESS) {
829 thread_deallocate(thread);
830 } else {
831 panic("Could not create io_reprioritize_thread");
832 }
833}
834#endif
835
8f6c56a5
A
836void
837vm_object_reaper_init(void)
838{
839 kern_return_t kr;
840 thread_t thread;
841
8f6c56a5
A
842 kr = kernel_thread_start_priority(
843 (thread_continue_t) vm_object_reaper_thread,
844 NULL,
845 BASEPRI_PREEMPT - 1,
846 &thread);
847 if (kr != KERN_SUCCESS) {
2d21ac55 848 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
8f6c56a5
A
849 }
850 thread_deallocate(thread);
851}
852
0b4e3aa0 853__private_extern__ void
1c79356b
A
854vm_object_init(void)
855{
856 /*
857 * Finish initializing the kernel object.
858 */
859}
860
2d21ac55
A
861
862__private_extern__ void
863vm_object_init_lck_grp(void)
864{
b0d623f7 865 /*
2d21ac55
A
866 * initialze the vm_object lock world
867 */
b0d623f7 868 lck_grp_attr_setdefault(&vm_object_lck_grp_attr);
2d21ac55 869 lck_grp_init(&vm_object_lck_grp, "vm_object", &vm_object_lck_grp_attr);
6d2010ae 870 lck_grp_init(&vm_object_cache_lck_grp, "vm_object_cache", &vm_object_lck_grp_attr);
2d21ac55
A
871 lck_attr_setdefault(&vm_object_lck_attr);
872 lck_attr_setdefault(&kernel_object_lck_attr);
873 lck_attr_cleardebug(&kernel_object_lck_attr);
39236c6e
A
874 lck_attr_setdefault(&compressor_object_lck_attr);
875 lck_attr_cleardebug(&compressor_object_lck_attr);
2d21ac55
A
876}
877
b0d623f7 878#if VM_OBJECT_CACHE
1c79356b
A
879#define MIGHT_NOT_CACHE_SHADOWS 1
880#if MIGHT_NOT_CACHE_SHADOWS
0b4e3aa0 881static int cache_shadows = TRUE;
1c79356b 882#endif /* MIGHT_NOT_CACHE_SHADOWS */
b0d623f7 883#endif
1c79356b
A
884
885/*
886 * vm_object_deallocate:
887 *
888 * Release a reference to the specified object,
889 * gained either through a vm_object_allocate
890 * or a vm_object_reference call. When all references
891 * are gone, storage associated with this object
892 * may be relinquished.
893 *
894 * No object may be locked.
895 */
2d21ac55
A
896unsigned long vm_object_deallocate_shared_successes = 0;
897unsigned long vm_object_deallocate_shared_failures = 0;
898unsigned long vm_object_deallocate_shared_swap_failures = 0;
3e170ce0 899
0b4e3aa0 900__private_extern__ void
1c79356b 901vm_object_deallocate(
39037602 902 vm_object_t object)
1c79356b 903{
b0d623f7 904#if VM_OBJECT_CACHE
2d21ac55 905 boolean_t retry_cache_trim = FALSE;
2d21ac55 906 uint32_t try_failed_count = 0;
b0d623f7
A
907#endif
908 vm_object_t shadow = VM_OBJECT_NULL;
1c79356b
A
909
910// if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
911// else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
912
2d21ac55
A
913 if (object == VM_OBJECT_NULL)
914 return;
915
39236c6e 916 if (object == kernel_object || object == compressor_object) {
b0d623f7
A
917 vm_object_lock_shared(object);
918
919 OSAddAtomic(-1, &object->ref_count);
920
921 if (object->ref_count == 0) {
39236c6e
A
922 if (object == kernel_object)
923 panic("vm_object_deallocate: losing kernel_object\n");
924 else
925 panic("vm_object_deallocate: losing compressor_object\n");
2d21ac55 926 }
b0d623f7 927 vm_object_unlock(object);
2d21ac55
A
928 return;
929 }
930
fe8ab488
A
931 if (object->ref_count == 2 &&
932 object->named) {
933 /*
934 * This "named" object's reference count is about to
935 * drop from 2 to 1:
936 * we'll need to call memory_object_last_unmap().
937 */
938 } else if (object->ref_count == 2 &&
939 object->internal &&
940 object->shadow != VM_OBJECT_NULL) {
941 /*
942 * This internal object's reference count is about to
943 * drop from 2 to 1 and it has a shadow object:
944 * we'll want to try and collapse this object with its
945 * shadow.
946 */
947 } else if (object->ref_count >= 2) {
2d21ac55
A
948 UInt32 original_ref_count;
949 volatile UInt32 *ref_count_p;
950 Boolean atomic_swap;
951
952 /*
953 * The object currently looks like it is not being
954 * kept alive solely by the reference we're about to release.
955 * Let's try and release our reference without taking
956 * all the locks we would need if we had to terminate the
957 * object (cache lock + exclusive object lock).
958 * Lock the object "shared" to make sure we don't race with
959 * anyone holding it "exclusive".
960 */
961 vm_object_lock_shared(object);
962 ref_count_p = (volatile UInt32 *) &object->ref_count;
963 original_ref_count = object->ref_count;
964 /*
965 * Test again as "ref_count" could have changed.
966 * "named" shouldn't change.
967 */
fe8ab488
A
968 if (original_ref_count == 2 &&
969 object->named) {
970 /* need to take slow path for m_o_last_unmap() */
971 atomic_swap = FALSE;
972 } else if (original_ref_count == 2 &&
973 object->internal &&
974 object->shadow != VM_OBJECT_NULL) {
975 /* need to take slow path for vm_object_collapse() */
976 atomic_swap = FALSE;
977 } else if (original_ref_count < 2) {
978 /* need to take slow path for vm_object_terminate() */
979 atomic_swap = FALSE;
980 } else {
981 /* try an atomic update with the shared lock */
2d21ac55
A
982 atomic_swap = OSCompareAndSwap(
983 original_ref_count,
984 original_ref_count - 1,
985 (UInt32 *) &object->ref_count);
986 if (atomic_swap == FALSE) {
987 vm_object_deallocate_shared_swap_failures++;
fe8ab488 988 /* fall back to the slow path... */
2d21ac55 989 }
2d21ac55 990 }
fe8ab488 991
2d21ac55
A
992 vm_object_unlock(object);
993
994 if (atomic_swap) {
b0d623f7
A
995 /*
996 * ref_count was updated atomically !
997 */
2d21ac55
A
998 vm_object_deallocate_shared_successes++;
999 return;
1000 }
1001
1002 /*
1003 * Someone else updated the ref_count at the same
1004 * time and we lost the race. Fall back to the usual
1005 * slow but safe path...
1006 */
1007 vm_object_deallocate_shared_failures++;
1008 }
1c79356b
A
1009
1010 while (object != VM_OBJECT_NULL) {
1011
b0d623f7 1012 vm_object_lock(object);
2d21ac55 1013
0b4e3aa0
A
1014 assert(object->ref_count > 0);
1015
1016 /*
1017 * If the object has a named reference, and only
1018 * that reference would remain, inform the pager
1019 * about the last "mapping" reference going away.
1020 */
1021 if ((object->ref_count == 2) && (object->named)) {
1022 memory_object_t pager = object->pager;
1023
1024 /* Notify the Pager that there are no */
1025 /* more mappers for this object */
1026
1027 if (pager != MEMORY_OBJECT_NULL) {
593a1d5f
A
1028 vm_object_mapping_wait(object, THREAD_UNINT);
1029 vm_object_mapping_begin(object);
0b4e3aa0 1030 vm_object_unlock(object);
2d21ac55 1031
b0d623f7 1032 memory_object_last_unmap(pager);
593a1d5f 1033
b0d623f7 1034 vm_object_lock(object);
593a1d5f 1035 vm_object_mapping_end(object);
0b4e3aa0 1036 }
b0d623f7 1037 assert(object->ref_count > 0);
0b4e3aa0 1038 }
1c79356b
A
1039
1040 /*
1041 * Lose the reference. If other references
1042 * remain, then we are done, unless we need
1043 * to retry a cache trim.
1044 * If it is the last reference, then keep it
1045 * until any pending initialization is completed.
1046 */
1047
0b4e3aa0
A
1048 /* if the object is terminating, it cannot go into */
1049 /* the cache and we obviously should not call */
1050 /* terminate again. */
1051
1052 if ((object->ref_count > 1) || object->terminating) {
2d21ac55 1053 vm_object_lock_assert_exclusive(object);
1c79356b 1054 object->ref_count--;
1c79356b 1055 vm_object_res_deallocate(object);
91447636
A
1056
1057 if (object->ref_count == 1 &&
1058 object->shadow != VM_OBJECT_NULL) {
1059 /*
0c530ab8
A
1060 * There's only one reference left on this
1061 * VM object. We can't tell if it's a valid
1062 * one (from a mapping for example) or if this
1063 * object is just part of a possibly stale and
1064 * useless shadow chain.
1065 * We would like to try and collapse it into
1066 * its parent, but we don't have any pointers
1067 * back to this parent object.
91447636
A
1068 * But we can try and collapse this object with
1069 * its own shadows, in case these are useless
1070 * too...
0c530ab8
A
1071 * We can't bypass this object though, since we
1072 * don't know if this last reference on it is
1073 * meaningful or not.
91447636 1074 */
0c530ab8 1075 vm_object_collapse(object, 0, FALSE);
91447636 1076 }
91447636 1077 vm_object_unlock(object);
b0d623f7 1078#if VM_OBJECT_CACHE
1c79356b
A
1079 if (retry_cache_trim &&
1080 ((object = vm_object_cache_trim(TRUE)) !=
1081 VM_OBJECT_NULL)) {
1082 continue;
1083 }
b0d623f7 1084#endif
1c79356b
A
1085 return;
1086 }
1087
1088 /*
1089 * We have to wait for initialization
1090 * before destroying or caching the object.
1091 */
1092
1093 if (object->pager_created && ! object->pager_initialized) {
1094 assert(! object->can_persist);
1095 vm_object_assert_wait(object,
1096 VM_OBJECT_EVENT_INITIALIZED,
1097 THREAD_UNINT);
1098 vm_object_unlock(object);
b0d623f7 1099
9bccf70c 1100 thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
1101 continue;
1102 }
1103
b0d623f7 1104#if VM_OBJECT_CACHE
1c79356b
A
1105 /*
1106 * If this object can persist, then enter it in
1107 * the cache. Otherwise, terminate it.
1108 *
1109 * NOTE: Only permanent objects are cached, and
1110 * permanent objects cannot have shadows. This
1111 * affects the residence counting logic in a minor
1112 * way (can do it in-line, mostly).
1113 */
1114
0b4e3aa0 1115 if ((object->can_persist) && (object->alive)) {
1c79356b
A
1116 /*
1117 * Now it is safe to decrement reference count,
1118 * and to return if reference count is > 0.
1119 */
b0d623f7 1120
2d21ac55 1121 vm_object_lock_assert_exclusive(object);
1c79356b
A
1122 if (--object->ref_count > 0) {
1123 vm_object_res_deallocate(object);
1124 vm_object_unlock(object);
b0d623f7 1125
1c79356b
A
1126 if (retry_cache_trim &&
1127 ((object = vm_object_cache_trim(TRUE)) !=
1128 VM_OBJECT_NULL)) {
1129 continue;
1130 }
1131 return;
1132 }
1133
1134#if MIGHT_NOT_CACHE_SHADOWS
1135 /*
1136 * Remove shadow now if we don't
1137 * want to cache shadows.
1138 */
1139 if (! cache_shadows) {
1140 shadow = object->shadow;
1141 object->shadow = VM_OBJECT_NULL;
1142 }
1143#endif /* MIGHT_NOT_CACHE_SHADOWS */
1144
1145 /*
1146 * Enter the object onto the queue of
1147 * cached objects, and deactivate
1148 * all of its pages.
1149 */
1150 assert(object->shadow == VM_OBJECT_NULL);
1151 VM_OBJ_RES_DECR(object);
1152 XPR(XPR_VM_OBJECT,
1153 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
b0d623f7
A
1154 object,
1155 vm_object_cached_list.next,
1156 vm_object_cached_list.prev,0,0);
1157
1158
1159 vm_object_unlock(object);
1160
1161 try_failed_count = 0;
1162 for (;;) {
1163 vm_object_cache_lock();
1164
1165 /*
1166 * if we try to take a regular lock here
1167 * we risk deadlocking against someone
1168 * holding a lock on this object while
1169 * trying to vm_object_deallocate a different
1170 * object
1171 */
1172 if (vm_object_lock_try(object))
1173 break;
1174 vm_object_cache_unlock();
1175 try_failed_count++;
1c79356b 1176
b0d623f7
A
1177 mutex_pause(try_failed_count); /* wait a bit */
1178 }
1c79356b
A
1179 vm_object_cached_count++;
1180 if (vm_object_cached_count > vm_object_cached_high)
1181 vm_object_cached_high = vm_object_cached_count;
1182 queue_enter(&vm_object_cached_list, object,
1183 vm_object_t, cached_list);
1184 vm_object_cache_unlock();
b0d623f7 1185
0b4e3aa0 1186 vm_object_deactivate_all_pages(object);
1c79356b
A
1187 vm_object_unlock(object);
1188
1189#if MIGHT_NOT_CACHE_SHADOWS
1190 /*
1191 * If we have a shadow that we need
1192 * to deallocate, do so now, remembering
1193 * to trim the cache later.
1194 */
1195 if (! cache_shadows && shadow != VM_OBJECT_NULL) {
1196 object = shadow;
1197 retry_cache_trim = TRUE;
1198 continue;
1199 }
1200#endif /* MIGHT_NOT_CACHE_SHADOWS */
1201
1202 /*
1203 * Trim the cache. If the cache trim
1204 * returns with a shadow for us to deallocate,
1205 * then remember to retry the cache trim
1206 * when we are done deallocating the shadow.
1207 * Otherwise, we are done.
1208 */
1209
1210 object = vm_object_cache_trim(TRUE);
1211 if (object == VM_OBJECT_NULL) {
1212 return;
1213 }
1214 retry_cache_trim = TRUE;
b0d623f7
A
1215 } else
1216#endif /* VM_OBJECT_CACHE */
1217 {
1c79356b
A
1218 /*
1219 * This object is not cachable; terminate it.
1220 */
1221 XPR(XPR_VM_OBJECT,
91447636 1222 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
b0d623f7 1223 object, object->resident_page_count,
91447636
A
1224 object->paging_in_progress,
1225 (void *)current_thread(),object->ref_count);
1c79356b
A
1226
1227 VM_OBJ_RES_DECR(object); /* XXX ? */
1228 /*
1229 * Terminate this object. If it had a shadow,
1230 * then deallocate it; otherwise, if we need
1231 * to retry a cache trim, do so now; otherwise,
1232 * we are done. "pageout" objects have a shadow,
1233 * but maintain a "paging reference" rather than
1234 * a normal reference.
1235 */
1236 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
b0d623f7
A
1237
1238 if (vm_object_terminate(object) != KERN_SUCCESS) {
1c79356b
A
1239 return;
1240 }
1241 if (shadow != VM_OBJECT_NULL) {
1242 object = shadow;
1243 continue;
1244 }
b0d623f7 1245#if VM_OBJECT_CACHE
1c79356b
A
1246 if (retry_cache_trim &&
1247 ((object = vm_object_cache_trim(TRUE)) !=
1248 VM_OBJECT_NULL)) {
1249 continue;
1250 }
b0d623f7 1251#endif
1c79356b
A
1252 return;
1253 }
1254 }
b0d623f7 1255#if VM_OBJECT_CACHE
1c79356b 1256 assert(! retry_cache_trim);
b0d623f7 1257#endif
1c79356b
A
1258}
1259
b0d623f7 1260
6d2010ae
A
1261
1262vm_page_t
1263vm_object_page_grab(
1264 vm_object_t object)
1265{
1266 vm_page_t p, next_p;
1267 int p_limit = 0;
1268 int p_skipped = 0;
1269
1270 vm_object_lock_assert_exclusive(object);
1271
39037602 1272 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
6d2010ae
A
1273 p_limit = MIN(50, object->resident_page_count);
1274
39037602 1275 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) {
6d2010ae
A
1276
1277 p = next_p;
39037602 1278 next_p = (vm_page_t)vm_page_queue_next(&next_p->listq);
6d2010ae 1279
316670eb 1280 if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry || p->fictitious)
6d2010ae
A
1281 goto move_page_in_obj;
1282
1283 if (p->pmapped || p->dirty || p->precious) {
1284 vm_page_lockspin_queues();
1285
1286 if (p->pmapped) {
1287 int refmod_state;
1288
1289 vm_object_page_grab_pmapped++;
1290
1291 if (p->reference == FALSE || p->dirty == FALSE) {
1292
39037602 1293 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p));
6d2010ae
A
1294
1295 if (refmod_state & VM_MEM_REFERENCED)
1296 p->reference = TRUE;
316670eb
A
1297 if (refmod_state & VM_MEM_MODIFIED) {
1298 SET_PAGE_DIRTY(p, FALSE);
1299 }
6d2010ae
A
1300 }
1301 if (p->dirty == FALSE && p->precious == FALSE) {
1302
39037602 1303 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
6d2010ae
A
1304
1305 if (refmod_state & VM_MEM_REFERENCED)
1306 p->reference = TRUE;
316670eb
A
1307 if (refmod_state & VM_MEM_MODIFIED) {
1308 SET_PAGE_DIRTY(p, FALSE);
1309 }
6d2010ae
A
1310
1311 if (p->dirty == FALSE)
1312 goto take_page;
1313 }
1314 }
39037602 1315 if ((p->vm_page_q_state != VM_PAGE_ON_ACTIVE_Q) && p->reference == TRUE) {
6d2010ae
A
1316 vm_page_activate(p);
1317
1318 VM_STAT_INCR(reactivations);
1319 vm_object_page_grab_reactivations++;
1320 }
1321 vm_page_unlock_queues();
1322move_page_in_obj:
39037602
A
1323 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
1324 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
6d2010ae
A
1325
1326 p_skipped++;
1327 continue;
1328 }
1329 vm_page_lockspin_queues();
1330take_page:
1331 vm_page_free_prepare_queues(p);
1332 vm_object_page_grab_returned++;
1333 vm_object_page_grab_skipped += p_skipped;
1334
1335 vm_page_unlock_queues();
1336
1337 vm_page_free_prepare_object(p, TRUE);
1338
1339 return (p);
1340 }
1341 vm_object_page_grab_skipped += p_skipped;
1342 vm_object_page_grab_failed++;
1343
1344 return (NULL);
1345}
1346
1347
1348
1349#define EVICT_PREPARE_LIMIT 64
1350#define EVICT_AGE 10
1351
1352static clock_sec_t vm_object_cache_aging_ts = 0;
1353
1354static void
1355vm_object_cache_remove_locked(
1356 vm_object_t object)
1357{
39037602
A
1358 assert(object->purgable == VM_PURGABLE_DENY);
1359 assert(object->wired_page_count == 0);
1360
6d2010ae
A
1361 queue_remove(&vm_object_cached_list, object, vm_object_t, objq);
1362 object->objq.next = NULL;
1363 object->objq.prev = NULL;
1364
1365 vm_object_cached_count--;
1366}
1367
1368void
1369vm_object_cache_remove(
1370 vm_object_t object)
1371{
1372 vm_object_cache_lock_spin();
1373
1374 if (object->objq.next || object->objq.prev)
1375 vm_object_cache_remove_locked(object);
1376
1377 vm_object_cache_unlock();
1378}
1379
1380void
1381vm_object_cache_add(
1382 vm_object_t object)
1383{
1384 clock_sec_t sec;
1385 clock_nsec_t nsec;
1386
39037602
A
1387 assert(object->purgable == VM_PURGABLE_DENY);
1388 assert(object->wired_page_count == 0);
1389
6d2010ae
A
1390 if (object->resident_page_count == 0)
1391 return;
1392 clock_get_system_nanotime(&sec, &nsec);
1393
1394 vm_object_cache_lock_spin();
1395
1396 if (object->objq.next == NULL && object->objq.prev == NULL) {
1397 queue_enter(&vm_object_cached_list, object, vm_object_t, objq);
1398 object->vo_cache_ts = sec + EVICT_AGE;
1399 object->vo_cache_pages_to_scan = object->resident_page_count;
1400
1401 vm_object_cached_count++;
1402 vm_object_cache_adds++;
1403 }
1404 vm_object_cache_unlock();
1405}
1406
1407int
1408vm_object_cache_evict(
1409 int num_to_evict,
1410 int max_objects_to_examine)
1411{
1412 vm_object_t object = VM_OBJECT_NULL;
1413 vm_object_t next_obj = VM_OBJECT_NULL;
1414 vm_page_t local_free_q = VM_PAGE_NULL;
1415 vm_page_t p;
1416 vm_page_t next_p;
1417 int object_cnt = 0;
1418 vm_page_t ep_array[EVICT_PREPARE_LIMIT];
1419 int ep_count;
1420 int ep_limit;
1421 int ep_index;
1422 int ep_freed = 0;
1423 int ep_moved = 0;
1424 uint32_t ep_skipped = 0;
1425 clock_sec_t sec;
1426 clock_nsec_t nsec;
1427
1428 KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
1429 /*
1430 * do a couple of quick checks to see if it's
1431 * worthwhile grabbing the lock
1432 */
1433 if (queue_empty(&vm_object_cached_list)) {
1434 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1435 return (0);
1436 }
1437 clock_get_system_nanotime(&sec, &nsec);
1438
1439 /*
1440 * the object on the head of the queue has not
1441 * yet sufficiently aged
1442 */
1443 if (sec < vm_object_cache_aging_ts) {
1444 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1445 return (0);
1446 }
1447 /*
1448 * don't need the queue lock to find
1449 * and lock an object on the cached list
1450 */
1451 vm_page_unlock_queues();
1452
1453 vm_object_cache_lock_spin();
1454
1455 for (;;) {
1456 next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
1457
1458 while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
1459
1460 object = next_obj;
1461 next_obj = (vm_object_t)queue_next(&next_obj->objq);
39037602
A
1462
1463 assert(object->purgable == VM_PURGABLE_DENY);
1464 assert(object->wired_page_count == 0);
6d2010ae
A
1465
1466 if (sec < object->vo_cache_ts) {
1467 KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
1468
1469 vm_object_cache_aging_ts = object->vo_cache_ts;
1470 object = VM_OBJECT_NULL;
1471 break;
1472 }
1473 if (!vm_object_lock_try_scan(object)) {
1474 /*
1475 * just skip over this guy for now... if we find
1476 * an object to steal pages from, we'll revist in a bit...
1477 * hopefully, the lock will have cleared
1478 */
1479 KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0);
1480
1481 object = VM_OBJECT_NULL;
1482 continue;
1483 }
39037602 1484 if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
6d2010ae
A
1485 /*
1486 * this case really shouldn't happen, but it's not fatal
1487 * so deal with it... if we don't remove the object from
1488 * the list, we'll never move past it.
1489 */
1490 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1491
1492 vm_object_cache_remove_locked(object);
1493 vm_object_unlock(object);
1494 object = VM_OBJECT_NULL;
1495 continue;
1496 }
1497 /*
1498 * we have a locked object with pages...
1499 * time to start harvesting
1500 */
1501 break;
1502 }
1503 vm_object_cache_unlock();
1504
1505 if (object == VM_OBJECT_NULL)
1506 break;
1507
1508 /*
1509 * object is locked at this point and
1510 * has resident pages
1511 */
39037602 1512 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
6d2010ae
A
1513
1514 /*
1515 * break the page scan into 2 pieces to minimize the time spent
1516 * behind the page queue lock...
1517 * the list of pages on these unused objects is likely to be cold
1518 * w/r to the cpu cache which increases the time to scan the list
1519 * tenfold... and we may have a 'run' of pages we can't utilize that
1520 * needs to be skipped over...
1521 */
1522 if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT)
1523 ep_limit = EVICT_PREPARE_LIMIT;
1524 ep_count = 0;
1525
39037602 1526 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
6d2010ae
A
1527
1528 p = next_p;
39037602 1529 next_p = (vm_page_t)vm_page_queue_next(&next_p->listq);
6d2010ae
A
1530
1531 object->vo_cache_pages_to_scan--;
1532
316670eb 1533 if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry) {
39037602
A
1534 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
1535 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
6d2010ae
A
1536
1537 ep_skipped++;
1538 continue;
1539 }
1540 if (p->wpmapped || p->dirty || p->precious) {
39037602
A
1541 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
1542 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
6d2010ae 1543
39037602 1544 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p));
6d2010ae
A
1545 }
1546 ep_array[ep_count++] = p;
1547 }
1548 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0);
1549
1550 vm_page_lockspin_queues();
1551
1552 for (ep_index = 0; ep_index < ep_count; ep_index++) {
1553
1554 p = ep_array[ep_index];
1555
1556 if (p->wpmapped || p->dirty || p->precious) {
1557 p->reference = FALSE;
1558 p->no_cache = FALSE;
1559
316670eb
A
1560 /*
1561 * we've already filtered out pages that are in the laundry
1562 * so if we get here, this page can't be on the pageout queue
1563 */
39037602 1564 vm_page_queues_remove(p, FALSE);
3e170ce0 1565 vm_page_enqueue_inactive(p, TRUE);
6d2010ae
A
1566
1567 ep_moved++;
1568 } else {
fe8ab488
A
1569#if CONFIG_PHANTOM_CACHE
1570 vm_phantom_cache_add_ghost(p);
1571#endif
6d2010ae
A
1572 vm_page_free_prepare_queues(p);
1573
39037602 1574 assert(p->pageq.next == 0 && p->pageq.prev == 0);
6d2010ae
A
1575 /*
1576 * Add this page to our list of reclaimed pages,
1577 * to be freed later.
1578 */
39037602 1579 p->snext = local_free_q;
6d2010ae
A
1580 local_free_q = p;
1581
1582 ep_freed++;
1583 }
1584 }
1585 vm_page_unlock_queues();
1586
1587 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0);
1588
1589 if (local_free_q) {
1590 vm_page_free_list(local_free_q, TRUE);
1591 local_free_q = VM_PAGE_NULL;
1592 }
1593 if (object->vo_cache_pages_to_scan == 0) {
1594 KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0);
1595
1596 vm_object_cache_remove(object);
1597
1598 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1599 }
1600 /*
1601 * done with this object
1602 */
1603 vm_object_unlock(object);
1604 object = VM_OBJECT_NULL;
1605
1606 /*
1607 * at this point, we are not holding any locks
1608 */
1609 if ((ep_freed + ep_moved) >= num_to_evict) {
1610 /*
1611 * we've reached our target for the
1612 * number of pages to evict
1613 */
1614 break;
1615 }
1616 vm_object_cache_lock_spin();
1617 }
1618 /*
1619 * put the page queues lock back to the caller's
1620 * idea of it
1621 */
1622 vm_page_lock_queues();
1623
1624 vm_object_cache_pages_freed += ep_freed;
1625 vm_object_cache_pages_moved += ep_moved;
1626 vm_object_cache_pages_skipped += ep_skipped;
1627
1628 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0);
1629 return (ep_freed);
1630}
1631
1632
b0d623f7 1633#if VM_OBJECT_CACHE
1c79356b
A
1634/*
1635 * Check to see whether we really need to trim
1636 * down the cache. If so, remove an object from
1637 * the cache, terminate it, and repeat.
1638 *
1639 * Called with, and returns with, cache lock unlocked.
1640 */
1641vm_object_t
1642vm_object_cache_trim(
1643 boolean_t called_from_vm_object_deallocate)
1644{
39037602 1645 vm_object_t object = VM_OBJECT_NULL;
1c79356b
A
1646 vm_object_t shadow;
1647
1648 for (;;) {
1649
1650 /*
1651 * If we no longer need to trim the cache,
1652 * then we are done.
1653 */
b0d623f7
A
1654 if (vm_object_cached_count <= vm_object_cached_max)
1655 return VM_OBJECT_NULL;
1c79356b
A
1656
1657 vm_object_cache_lock();
1658 if (vm_object_cached_count <= vm_object_cached_max) {
1659 vm_object_cache_unlock();
1660 return VM_OBJECT_NULL;
1661 }
1662
1663 /*
1664 * We must trim down the cache, so remove
1665 * the first object in the cache.
1666 */
1667 XPR(XPR_VM_OBJECT,
1668 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
b0d623f7
A
1669 vm_object_cached_list.next,
1670 vm_object_cached_list.prev, 0, 0, 0);
1c79356b
A
1671
1672 object = (vm_object_t) queue_first(&vm_object_cached_list);
9bccf70c
A
1673 if(object == (vm_object_t) &vm_object_cached_list) {
1674 /* something's wrong with the calling parameter or */
1675 /* the value of vm_object_cached_count, just fix */
1676 /* and return */
1677 if(vm_object_cached_max < 0)
1678 vm_object_cached_max = 0;
1679 vm_object_cached_count = 0;
1680 vm_object_cache_unlock();
1681 return VM_OBJECT_NULL;
1682 }
1c79356b
A
1683 vm_object_lock(object);
1684 queue_remove(&vm_object_cached_list, object, vm_object_t,
1685 cached_list);
1686 vm_object_cached_count--;
1687
b0d623f7 1688 vm_object_cache_unlock();
1c79356b
A
1689 /*
1690 * Since this object is in the cache, we know
1691 * that it is initialized and has no references.
1692 * Take a reference to avoid recursive deallocations.
1693 */
1694
1695 assert(object->pager_initialized);
1696 assert(object->ref_count == 0);
2d21ac55 1697 vm_object_lock_assert_exclusive(object);
1c79356b
A
1698 object->ref_count++;
1699
1700 /*
1701 * Terminate the object.
1702 * If the object had a shadow, we let vm_object_deallocate
1703 * deallocate it. "pageout" objects have a shadow, but
1704 * maintain a "paging reference" rather than a normal
1705 * reference.
1706 * (We are careful here to limit recursion.)
1707 */
1708 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
b0d623f7 1709
1c79356b
A
1710 if(vm_object_terminate(object) != KERN_SUCCESS)
1711 continue;
b0d623f7 1712
1c79356b
A
1713 if (shadow != VM_OBJECT_NULL) {
1714 if (called_from_vm_object_deallocate) {
1715 return shadow;
1716 } else {
1717 vm_object_deallocate(shadow);
1718 }
1719 }
1720 }
1721}
b0d623f7 1722#endif
1c79356b 1723
1c79356b
A
1724
1725/*
1726 * Routine: vm_object_terminate
1727 * Purpose:
1728 * Free all resources associated with a vm_object.
1729 * In/out conditions:
0b4e3aa0 1730 * Upon entry, the object must be locked,
1c79356b
A
1731 * and the object must have exactly one reference.
1732 *
1733 * The shadow object reference is left alone.
1734 *
1735 * The object must be unlocked if its found that pages
1736 * must be flushed to a backing object. If someone
1737 * manages to map the object while it is being flushed
1738 * the object is returned unlocked and unchanged. Otherwise,
1739 * upon exit, the cache will be unlocked, and the
1740 * object will cease to exist.
1741 */
0b4e3aa0 1742static kern_return_t
1c79356b 1743vm_object_terminate(
b0d623f7 1744 vm_object_t object)
1c79356b 1745{
b0d623f7 1746 vm_object_t shadow_object;
1c79356b
A
1747
1748 XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
b0d623f7
A
1749 object, object->ref_count, 0, 0, 0);
1750
39037602
A
1751 vm_object_lock_assert_exclusive(object);
1752
b0d623f7
A
1753 if (!object->pageout && (!object->temporary || object->can_persist) &&
1754 (object->pager != NULL || object->shadow_severed)) {
1c79356b
A
1755 /*
1756 * Clear pager_trusted bit so that the pages get yanked
1757 * out of the object instead of cleaned in place. This
1758 * prevents a deadlock in XMM and makes more sense anyway.
1759 */
1760 object->pager_trusted = FALSE;
1761
b0d623f7 1762 vm_object_reap_pages(object, REAP_TERMINATE);
1c79356b 1763 }
0b4e3aa0
A
1764 /*
1765 * Make sure the object isn't already being terminated
1766 */
b0d623f7 1767 if (object->terminating) {
2d21ac55
A
1768 vm_object_lock_assert_exclusive(object);
1769 object->ref_count--;
0b4e3aa0 1770 assert(object->ref_count > 0);
0b4e3aa0
A
1771 vm_object_unlock(object);
1772 return KERN_FAILURE;
1773 }
1774
1775 /*
1776 * Did somebody get a reference to the object while we were
1777 * cleaning it?
1778 */
b0d623f7 1779 if (object->ref_count != 1) {
2d21ac55
A
1780 vm_object_lock_assert_exclusive(object);
1781 object->ref_count--;
0b4e3aa0 1782 assert(object->ref_count > 0);
1c79356b 1783 vm_object_res_deallocate(object);
1c79356b
A
1784 vm_object_unlock(object);
1785 return KERN_FAILURE;
1786 }
1787
1c79356b
A
1788 /*
1789 * Make sure no one can look us up now.
1790 */
1791
0b4e3aa0
A
1792 object->terminating = TRUE;
1793 object->alive = FALSE;
1c79356b 1794
6d2010ae
A
1795 if ( !object->internal && (object->objq.next || object->objq.prev))
1796 vm_object_cache_remove(object);
1797
b0d623f7
A
1798 if (object->hashed) {
1799 lck_mtx_t *lck;
1800
1801 lck = vm_object_hash_lock_spin(object->pager);
1802 vm_object_remove(object);
1803 vm_object_hash_unlock(lck);
1804 }
1c79356b
A
1805 /*
1806 * Detach the object from its shadow if we are the shadow's
55e303ae
A
1807 * copy. The reference we hold on the shadow must be dropped
1808 * by our caller.
1c79356b
A
1809 */
1810 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1811 !(object->pageout)) {
1812 vm_object_lock(shadow_object);
55e303ae
A
1813 if (shadow_object->copy == object)
1814 shadow_object->copy = VM_OBJECT_NULL;
1c79356b
A
1815 vm_object_unlock(shadow_object);
1816 }
1817
b0d623f7
A
1818 if (object->paging_in_progress != 0 ||
1819 object->activity_in_progress != 0) {
8f6c56a5
A
1820 /*
1821 * There are still some paging_in_progress references
1822 * on this object, meaning that there are some paging
1823 * or other I/O operations in progress for this VM object.
1824 * Such operations take some paging_in_progress references
1825 * up front to ensure that the object doesn't go away, but
1826 * they may also need to acquire a reference on the VM object,
1827 * to map it in kernel space, for example. That means that
1828 * they may end up releasing the last reference on the VM
1829 * object, triggering its termination, while still holding
1830 * paging_in_progress references. Waiting for these
1831 * pending paging_in_progress references to go away here would
1832 * deadlock.
1833 *
1834 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1835 * complete the VM object termination if it still holds
1836 * paging_in_progress references at this point.
1837 *
1838 * No new paging_in_progress should appear now that the
1839 * VM object is "terminating" and not "alive".
1840 */
1841 vm_object_reap_async(object);
8f6c56a5 1842 vm_object_unlock(object);
6601e61a
A
1843 /*
1844 * Return KERN_FAILURE to let the caller know that we
1845 * haven't completed the termination and it can't drop this
1846 * object's reference on its shadow object yet.
1847 * The reaper thread will take care of that once it has
1848 * completed this object's termination.
1849 */
1850 return KERN_FAILURE;
8f6c56a5 1851 }
b0d623f7
A
1852 /*
1853 * complete the VM object termination
1854 */
8f6c56a5
A
1855 vm_object_reap(object);
1856 object = VM_OBJECT_NULL;
8f6c56a5 1857
2d21ac55 1858 /*
b0d623f7
A
1859 * the object lock was released by vm_object_reap()
1860 *
2d21ac55
A
1861 * KERN_SUCCESS means that this object has been terminated
1862 * and no longer needs its shadow object but still holds a
1863 * reference on it.
1864 * The caller is responsible for dropping that reference.
1865 * We can't call vm_object_deallocate() here because that
1866 * would create a recursion.
1867 */
8f6c56a5
A
1868 return KERN_SUCCESS;
1869}
1870
b0d623f7 1871
8f6c56a5
A
1872/*
1873 * vm_object_reap():
1874 *
1875 * Complete the termination of a VM object after it's been marked
1876 * as "terminating" and "!alive" by vm_object_terminate().
1877 *
b0d623f7
A
1878 * The VM object must be locked by caller.
1879 * The lock will be released on return and the VM object is no longer valid.
8f6c56a5 1880 */
3e170ce0 1881
8f6c56a5
A
1882void
1883vm_object_reap(
1884 vm_object_t object)
1885{
1886 memory_object_t pager;
8f6c56a5 1887
2d21ac55
A
1888 vm_object_lock_assert_exclusive(object);
1889 assert(object->paging_in_progress == 0);
b0d623f7 1890 assert(object->activity_in_progress == 0);
8f6c56a5
A
1891
1892 vm_object_reap_count++;
1893
fe8ab488
A
1894 /*
1895 * Disown this purgeable object to cleanup its owner's purgeable
1896 * ledgers. We need to do this before disconnecting the object
1897 * from its pager, to properly account for compressed pages.
1898 */
1899 if (object->internal &&
1900 object->purgable != VM_PURGABLE_DENY) {
1901 vm_purgeable_accounting(object,
1902 object->purgable,
1903 TRUE); /* disown */
1904 }
1905
0b4e3aa0
A
1906 pager = object->pager;
1907 object->pager = MEMORY_OBJECT_NULL;
1908
1909 if (pager != MEMORY_OBJECT_NULL)
91447636 1910 memory_object_control_disable(object->pager_control);
0b4e3aa0 1911
1c79356b
A
1912 object->ref_count--;
1913#if TASK_SWAPPER
1914 assert(object->res_count == 0);
1915#endif /* TASK_SWAPPER */
1916
1c79356b
A
1917 assert (object->ref_count == 0);
1918
b0d623f7
A
1919 /*
1920 * remove from purgeable queue if it's on
1921 */
fe8ab488
A
1922 if (object->internal) {
1923 task_t owner;
1924
1925 owner = object->vo_purgeable_owner;
1926
3e170ce0
A
1927 VM_OBJECT_UNWIRED(object);
1928
fe8ab488
A
1929 if (object->purgable == VM_PURGABLE_DENY) {
1930 /* not purgeable: nothing to do */
1931 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
1932 purgeable_q_t queue;
1933
1934 assert(object->vo_purgeable_owner == NULL);
1935
1936 queue = vm_purgeable_object_remove(object);
1937 assert(queue);
1938
1939 if (object->purgeable_when_ripe) {
1940 /*
1941 * Must take page lock for this -
1942 * using it to protect token queue
1943 */
1944 vm_page_lock_queues();
1945 vm_purgeable_token_delete_first(queue);
1946
1947 assert(queue->debug_count_objects>=0);
1948 vm_page_unlock_queues();
1949 }
2d21ac55 1950
39236c6e 1951 /*
fe8ab488
A
1952 * Update "vm_page_purgeable_count" in bulk and mark
1953 * object as VM_PURGABLE_EMPTY to avoid updating
1954 * "vm_page_purgeable_count" again in vm_page_remove()
1955 * when reaping the pages.
39236c6e 1956 */
fe8ab488
A
1957 unsigned int delta;
1958 assert(object->resident_page_count >=
1959 object->wired_page_count);
1960 delta = (object->resident_page_count -
1961 object->wired_page_count);
1962 if (delta != 0) {
1963 assert(vm_page_purgeable_count >= delta);
1964 OSAddAtomic(-delta,
1965 (SInt32 *)&vm_page_purgeable_count);
1966 }
1967 if (object->wired_page_count != 0) {
1968 assert(vm_page_purgeable_wired_count >=
1969 object->wired_page_count);
1970 OSAddAtomic(-object->wired_page_count,
1971 (SInt32 *)&vm_page_purgeable_wired_count);
1972 }
1973 object->purgable = VM_PURGABLE_EMPTY;
1974 }
1975 else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
1976 object->purgable == VM_PURGABLE_EMPTY) {
1977 /* remove from nonvolatile queue */
1978 assert(object->vo_purgeable_owner == TASK_NULL);
1979 vm_purgeable_nonvolatile_dequeue(object);
1980 } else {
1981 panic("object %p in unexpected purgeable state 0x%x\n",
1982 object, object->purgable);
39236c6e 1983 }
fe8ab488
A
1984 assert(object->objq.next == NULL);
1985 assert(object->objq.prev == NULL);
2d21ac55
A
1986 }
1987
1c79356b
A
1988 /*
1989 * Clean or free the pages, as appropriate.
1990 * It is possible for us to find busy/absent pages,
1991 * if some faults on this object were aborted.
1992 */
1993 if (object->pageout) {
8f6c56a5 1994 assert(object->shadow != VM_OBJECT_NULL);
1c79356b
A
1995
1996 vm_pageout_object_terminate(object);
1997
b0d623f7 1998 } else if (((object->temporary && !object->can_persist) || (pager == MEMORY_OBJECT_NULL))) {
2d21ac55 1999
b0d623f7 2000 vm_object_reap_pages(object, REAP_REAP);
1c79356b 2001 }
39037602 2002 assert(vm_page_queue_empty(&object->memq));
1c79356b 2003 assert(object->paging_in_progress == 0);
b0d623f7 2004 assert(object->activity_in_progress == 0);
1c79356b
A
2005 assert(object->ref_count == 0);
2006
1c79356b 2007 /*
0b4e3aa0
A
2008 * If the pager has not already been released by
2009 * vm_object_destroy, we need to terminate it and
2010 * release our reference to it here.
1c79356b 2011 */
0b4e3aa0
A
2012 if (pager != MEMORY_OBJECT_NULL) {
2013 vm_object_unlock(object);
b0d623f7 2014 vm_object_release_pager(pager, object->hashed);
0b4e3aa0 2015 vm_object_lock(object);
1c79356b 2016 }
0b4e3aa0 2017
1c79356b 2018 /* kick off anyone waiting on terminating */
0b4e3aa0 2019 object->terminating = FALSE;
1c79356b
A
2020 vm_object_paging_begin(object);
2021 vm_object_paging_end(object);
2022 vm_object_unlock(object);
2023
6601e61a
A
2024 object->shadow = VM_OBJECT_NULL;
2025
fe8ab488
A
2026#if VM_OBJECT_TRACKING
2027 if (vm_object_tracking_inited) {
2028 btlog_remove_entries_for_element(vm_object_tracking_btlog,
2029 object);
2030 }
2031#endif /* VM_OBJECT_TRACKING */
2032
2d21ac55 2033 vm_object_lock_destroy(object);
1c79356b
A
2034 /*
2035 * Free the space for the object.
2036 */
91447636 2037 zfree(vm_object_zone, object);
8f6c56a5
A
2038 object = VM_OBJECT_NULL;
2039}
2040
8f6c56a5 2041
6d2010ae 2042unsigned int vm_max_batch = 256;
8f6c56a5 2043
b0d623f7
A
2044#define V_O_R_MAX_BATCH 128
2045
6d2010ae
A
2046#define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
2047
b0d623f7
A
2048
2049#define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
2050 MACRO_BEGIN \
2051 if (_local_free_q) { \
2052 if (do_disconnect) { \
2053 vm_page_t m; \
2054 for (m = _local_free_q; \
2055 m != VM_PAGE_NULL; \
39037602 2056 m = m->snext) { \
b0d623f7 2057 if (m->pmapped) { \
39037602 2058 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
b0d623f7
A
2059 } \
2060 } \
2061 } \
2062 vm_page_free_list(_local_free_q, TRUE); \
2063 _local_free_q = VM_PAGE_NULL; \
2064 } \
2065 MACRO_END
2066
8f6c56a5
A
2067
2068void
b0d623f7
A
2069vm_object_reap_pages(
2070 vm_object_t object,
2071 int reap_type)
8f6c56a5 2072{
b0d623f7
A
2073 vm_page_t p;
2074 vm_page_t next;
2075 vm_page_t local_free_q = VM_PAGE_NULL;
2076 int loop_count;
2077 boolean_t disconnect_on_release;
39236c6e 2078 pmap_flush_context pmap_flush_context_storage;
8f6c56a5 2079
b0d623f7 2080 if (reap_type == REAP_DATA_FLUSH) {
2d21ac55 2081 /*
b0d623f7
A
2082 * We need to disconnect pages from all pmaps before
2083 * releasing them to the free list
2d21ac55 2084 */
b0d623f7
A
2085 disconnect_on_release = TRUE;
2086 } else {
2087 /*
2088 * Either the caller has already disconnected the pages
2089 * from all pmaps, or we disconnect them here as we add
2090 * them to out local list of pages to be released.
2091 * No need to re-disconnect them when we release the pages
2092 * to the free list.
2093 */
2094 disconnect_on_release = FALSE;
2095 }
2096
2097restart_after_sleep:
39037602 2098 if (vm_page_queue_empty(&object->memq))
b0d623f7 2099 return;
316670eb 2100 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
b0d623f7 2101
39236c6e
A
2102 if (reap_type == REAP_PURGEABLE)
2103 pmap_flush_context_init(&pmap_flush_context_storage);
2104
b0d623f7
A
2105 vm_page_lockspin_queues();
2106
39037602 2107 next = (vm_page_t)vm_page_queue_first(&object->memq);
b0d623f7 2108
39037602 2109 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
b0d623f7
A
2110
2111 p = next;
39037602 2112 next = (vm_page_t)vm_page_queue_next(&next->listq);
b0d623f7
A
2113
2114 if (--loop_count == 0) {
2115
2116 vm_page_unlock_queues();
2117
2118 if (local_free_q) {
39236c6e
A
2119
2120 if (reap_type == REAP_PURGEABLE) {
2121 pmap_flush(&pmap_flush_context_storage);
2122 pmap_flush_context_init(&pmap_flush_context_storage);
2123 }
b0d623f7
A
2124 /*
2125 * Free the pages we reclaimed so far
2126 * and take a little break to avoid
2127 * hogging the page queue lock too long
2128 */
2129 VM_OBJ_REAP_FREELIST(local_free_q,
2130 disconnect_on_release);
2131 } else
2132 mutex_pause(0);
2133
316670eb 2134 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
b0d623f7
A
2135
2136 vm_page_lockspin_queues();
2137 }
2138 if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
2139
316670eb 2140 if (p->busy || p->cleaning) {
b0d623f7
A
2141
2142 vm_page_unlock_queues();
2143 /*
2144 * free the pages reclaimed so far
2145 */
2146 VM_OBJ_REAP_FREELIST(local_free_q,
2147 disconnect_on_release);
2148
2149 PAGE_SLEEP(object, p, THREAD_UNINT);
2150
2151 goto restart_after_sleep;
2152 }
39037602 2153 if (p->laundry)
316670eb 2154 vm_pageout_steal_laundry(p, TRUE);
b0d623f7
A
2155 }
2156 switch (reap_type) {
2157
2158 case REAP_DATA_FLUSH:
2159 if (VM_PAGE_WIRED(p)) {
2160 /*
2161 * this is an odd case... perhaps we should
2162 * zero-fill this page since we're conceptually
2163 * tossing its data at this point, but leaving
2164 * it on the object to honor the 'wire' contract
2165 */
2166 continue;
2167 }
2168 break;
2169
2170 case REAP_PURGEABLE:
2171 if (VM_PAGE_WIRED(p)) {
316670eb
A
2172 /*
2173 * can't purge a wired page
2174 */
b0d623f7
A
2175 vm_page_purged_wired++;
2176 continue;
2177 }
39037602 2178 if (p->laundry && !p->busy && !p->cleaning)
316670eb 2179 vm_pageout_steal_laundry(p, TRUE);
39037602 2180
fe8ab488 2181 if (p->cleaning || p->laundry || p->absent) {
316670eb
A
2182 /*
2183 * page is being acted upon,
2184 * so don't mess with it
2185 */
2186 vm_page_purged_others++;
2187 continue;
2188 }
b0d623f7
A
2189 if (p->busy) {
2190 /*
2191 * We can't reclaim a busy page but we can
316670eb 2192 * make it more likely to be paged (it's not wired) to make
b0d623f7
A
2193 * sure that it gets considered by
2194 * vm_pageout_scan() later.
2195 */
39037602
A
2196 if (VM_PAGE_PAGEABLE(p))
2197 vm_page_deactivate(p);
b0d623f7
A
2198 vm_page_purged_busy++;
2199 continue;
2200 }
2201
39037602 2202 assert(VM_PAGE_OBJECT(p) != kernel_object);
b0d623f7
A
2203
2204 /*
2205 * we can discard this page...
2206 */
2207 if (p->pmapped == TRUE) {
b0d623f7
A
2208 /*
2209 * unmap the page
2210 */
39037602 2211 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
b0d623f7 2212 }
39236c6e 2213 vm_page_purged_count++;
b0d623f7
A
2214
2215 break;
2216
2217 case REAP_TERMINATE:
2218 if (p->absent || p->private) {
2219 /*
2220 * For private pages, VM_PAGE_FREE just
2221 * leaves the page structure around for
2222 * its owner to clean up. For absent
2223 * pages, the structure is returned to
2224 * the appropriate pool.
2225 */
2226 break;
2227 }
2228 if (p->fictitious) {
39037602 2229 assert (VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr);
b0d623f7
A
2230 break;
2231 }
2232 if (!p->dirty && p->wpmapped)
39037602 2233 p->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
b0d623f7
A
2234
2235 if ((p->dirty || p->precious) && !p->error && object->alive) {
2236
3e170ce0 2237 assert(!object->internal);
39037602
A
2238
2239 p->free_when_done = TRUE;
2240
316670eb 2241 if (!p->laundry) {
39037602 2242 vm_page_queues_remove(p, TRUE);
316670eb
A
2243 /*
2244 * flush page... page will be freed
2245 * upon completion of I/O
2246 */
39037602 2247 (void)vm_pageout_cluster(p, FALSE, FALSE);
316670eb 2248 }
b0d623f7
A
2249 vm_page_unlock_queues();
2250 /*
2251 * free the pages reclaimed so far
2252 */
2253 VM_OBJ_REAP_FREELIST(local_free_q,
2254 disconnect_on_release);
2255
b0d623f7
A
2256 vm_object_paging_wait(object, THREAD_UNINT);
2257
2258 goto restart_after_sleep;
2259 }
2260 break;
2261
2262 case REAP_REAP:
2263 break;
2264 }
2265 vm_page_free_prepare_queues(p);
39037602 2266 assert(p->pageq.next == 0 && p->pageq.prev == 0);
b0d623f7
A
2267 /*
2268 * Add this page to our list of reclaimed pages,
2269 * to be freed later.
2270 */
39037602 2271 p->snext = local_free_q;
b0d623f7
A
2272 local_free_q = p;
2273 }
2274 vm_page_unlock_queues();
2275
2276 /*
2277 * Free the remaining reclaimed pages
2278 */
39236c6e
A
2279 if (reap_type == REAP_PURGEABLE)
2280 pmap_flush(&pmap_flush_context_storage);
2281
b0d623f7
A
2282 VM_OBJ_REAP_FREELIST(local_free_q,
2283 disconnect_on_release);
2284}
2285
2286
2287void
2288vm_object_reap_async(
2289 vm_object_t object)
2290{
2291 vm_object_lock_assert_exclusive(object);
2292
2293 vm_object_reaper_lock_spin();
2294
2295 vm_object_reap_count_async++;
2296
2297 /* enqueue the VM object... */
2298 queue_enter(&vm_object_reaper_queue, object,
2299 vm_object_t, cached_list);
2300
2301 vm_object_reaper_unlock();
2302
2303 /* ... and wake up the reaper thread */
2304 thread_wakeup((event_t) &vm_object_reaper_queue);
2305}
2306
2307
2308void
2309vm_object_reaper_thread(void)
2310{
2311 vm_object_t object, shadow_object;
2312
2313 vm_object_reaper_lock_spin();
2314
2315 while (!queue_empty(&vm_object_reaper_queue)) {
2316 queue_remove_first(&vm_object_reaper_queue,
2317 object,
2318 vm_object_t,
2319 cached_list);
2320
2321 vm_object_reaper_unlock();
2322 vm_object_lock(object);
2323
2324 assert(object->terminating);
2325 assert(!object->alive);
2326
2327 /*
2328 * The pageout daemon might be playing with our pages.
2329 * Now that the object is dead, it won't touch any more
2330 * pages, but some pages might already be on their way out.
2331 * Hence, we wait until the active paging activities have
2332 * ceased before we break the association with the pager
2333 * itself.
2334 */
2335 while (object->paging_in_progress != 0 ||
2336 object->activity_in_progress != 0) {
2337 vm_object_wait(object,
2338 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
2339 THREAD_UNINT);
2340 vm_object_lock(object);
2341 }
2342
2343 shadow_object =
2344 object->pageout ? VM_OBJECT_NULL : object->shadow;
6601e61a 2345
8f6c56a5
A
2346 vm_object_reap(object);
2347 /* cache is unlocked and object is no longer valid */
2348 object = VM_OBJECT_NULL;
2349
6601e61a
A
2350 if (shadow_object != VM_OBJECT_NULL) {
2351 /*
2352 * Drop the reference "object" was holding on
2353 * its shadow object.
2354 */
2355 vm_object_deallocate(shadow_object);
2356 shadow_object = VM_OBJECT_NULL;
2357 }
b0d623f7 2358 vm_object_reaper_lock_spin();
8f6c56a5
A
2359 }
2360
2361 /* wait for more work... */
2362 assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
b0d623f7
A
2363
2364 vm_object_reaper_unlock();
2365
8f6c56a5
A
2366 thread_block((thread_continue_t) vm_object_reaper_thread);
2367 /*NOTREACHED*/
1c79356b
A
2368}
2369
2370/*
2371 * Routine: vm_object_pager_wakeup
2372 * Purpose: Wake up anyone waiting for termination of a pager.
2373 */
2374
0b4e3aa0 2375static void
1c79356b 2376vm_object_pager_wakeup(
0b4e3aa0 2377 memory_object_t pager)
1c79356b
A
2378{
2379 vm_object_hash_entry_t entry;
2380 boolean_t waiting = FALSE;
b0d623f7 2381 lck_mtx_t *lck;
1c79356b
A
2382
2383 /*
2384 * If anyone was waiting for the memory_object_terminate
2385 * to be queued, wake them up now.
2386 */
b0d623f7 2387 lck = vm_object_hash_lock_spin(pager);
1c79356b
A
2388 entry = vm_object_hash_lookup(pager, TRUE);
2389 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
2390 waiting = entry->waiting;
b0d623f7
A
2391 vm_object_hash_unlock(lck);
2392
1c79356b
A
2393 if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
2394 if (waiting)
2395 thread_wakeup((event_t) pager);
2396 vm_object_hash_entry_free(entry);
2397 }
2398}
2399
2400/*
0b4e3aa0
A
2401 * Routine: vm_object_release_pager
2402 * Purpose: Terminate the pager and, upon completion,
2403 * release our last reference to it.
2404 * just like memory_object_terminate, except
2405 * that we wake up anyone blocked in vm_object_enter
2406 * waiting for termination message to be queued
2407 * before calling memory_object_init.
1c79356b 2408 */
0b4e3aa0
A
2409static void
2410vm_object_release_pager(
b0d623f7
A
2411 memory_object_t pager,
2412 boolean_t hashed)
1c79356b 2413{
1c79356b 2414
0b4e3aa0
A
2415 /*
2416 * Terminate the pager.
2417 */
1c79356b 2418
0b4e3aa0 2419 (void) memory_object_terminate(pager);
1c79356b 2420
b0d623f7
A
2421 if (hashed == TRUE) {
2422 /*
2423 * Wakeup anyone waiting for this terminate
2424 * and remove the entry from the hash
2425 */
2426 vm_object_pager_wakeup(pager);
2427 }
0b4e3aa0
A
2428 /*
2429 * Release reference to pager.
2430 */
2431 memory_object_deallocate(pager);
2432}
1c79356b 2433
1c79356b 2434/*
0b4e3aa0 2435 * Routine: vm_object_destroy
1c79356b 2436 * Purpose:
0b4e3aa0 2437 * Shut down a VM object, despite the
1c79356b
A
2438 * presence of address map (or other) references
2439 * to the vm_object.
2440 */
2441kern_return_t
0b4e3aa0
A
2442vm_object_destroy(
2443 vm_object_t object,
91447636 2444 __unused kern_return_t reason)
1c79356b 2445{
0b4e3aa0 2446 memory_object_t old_pager;
1c79356b
A
2447
2448 if (object == VM_OBJECT_NULL)
2449 return(KERN_SUCCESS);
2450
2451 /*
0b4e3aa0 2452 * Remove the pager association immediately.
1c79356b
A
2453 *
2454 * This will prevent the memory manager from further
2455 * meddling. [If it wanted to flush data or make
2456 * other changes, it should have done so before performing
2457 * the destroy call.]
2458 */
2459
1c79356b 2460 vm_object_lock(object);
1c79356b
A
2461 object->can_persist = FALSE;
2462 object->named = FALSE;
0b4e3aa0 2463 object->alive = FALSE;
1c79356b 2464
b0d623f7
A
2465 if (object->hashed) {
2466 lck_mtx_t *lck;
2467 /*
2468 * Rip out the pager from the vm_object now...
2469 */
2470 lck = vm_object_hash_lock_spin(object->pager);
2471 vm_object_remove(object);
2472 vm_object_hash_unlock(lck);
2473 }
0b4e3aa0
A
2474 old_pager = object->pager;
2475 object->pager = MEMORY_OBJECT_NULL;
2476 if (old_pager != MEMORY_OBJECT_NULL)
91447636 2477 memory_object_control_disable(object->pager_control);
1c79356b
A
2478
2479 /*
b0d623f7
A
2480 * Wait for the existing paging activity (that got
2481 * through before we nulled out the pager) to subside.
2482 */
2483
2484 vm_object_paging_wait(object, THREAD_UNINT);
2485 vm_object_unlock(object);
2486
2487 /*
2488 * Terminate the object now.
2489 */
2490 if (old_pager != MEMORY_OBJECT_NULL) {
2491 vm_object_release_pager(old_pager, object->hashed);
2492
2493 /*
2494 * JMM - Release the caller's reference. This assumes the
2495 * caller had a reference to release, which is a big (but
2496 * currently valid) assumption if this is driven from the
2497 * vnode pager (it is holding a named reference when making
2498 * this call)..
2499 */
2500 vm_object_deallocate(object);
2501
2502 }
2503 return(KERN_SUCCESS);
2504}
2505
2506
6d2010ae
A
2507#if VM_OBJECT_CACHE
2508
b0d623f7
A
2509#define VM_OBJ_DEACT_ALL_STATS DEBUG
2510#if VM_OBJ_DEACT_ALL_STATS
2511uint32_t vm_object_deactivate_all_pages_batches = 0;
2512uint32_t vm_object_deactivate_all_pages_pages = 0;
2513#endif /* VM_OBJ_DEACT_ALL_STATS */
2514/*
2515 * vm_object_deactivate_all_pages
2516 *
2517 * Deactivate all pages in the specified object. (Keep its pages
2518 * in memory even though it is no longer referenced.)
2519 *
2520 * The object must be locked.
2521 */
2522static void
2523vm_object_deactivate_all_pages(
39037602 2524 vm_object_t object)
b0d623f7 2525{
39037602 2526 vm_page_t p;
b0d623f7
A
2527 int loop_count;
2528#if VM_OBJ_DEACT_ALL_STATS
2529 int pages_count;
2530#endif /* VM_OBJ_DEACT_ALL_STATS */
2531#define V_O_D_A_P_MAX_BATCH 256
2532
6d2010ae 2533 loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
b0d623f7
A
2534#if VM_OBJ_DEACT_ALL_STATS
2535 pages_count = 0;
2536#endif /* VM_OBJ_DEACT_ALL_STATS */
2537 vm_page_lock_queues();
39037602 2538 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
b0d623f7
A
2539 if (--loop_count == 0) {
2540#if VM_OBJ_DEACT_ALL_STATS
2541 hw_atomic_add(&vm_object_deactivate_all_pages_batches,
2542 1);
2543 hw_atomic_add(&vm_object_deactivate_all_pages_pages,
2544 pages_count);
2545 pages_count = 0;
2546#endif /* VM_OBJ_DEACT_ALL_STATS */
2547 lck_mtx_yield(&vm_page_queue_lock);
6d2010ae 2548 loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
b0d623f7 2549 }
39037602 2550 if (!p->busy && (p->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q)) {
b0d623f7
A
2551#if VM_OBJ_DEACT_ALL_STATS
2552 pages_count++;
2553#endif /* VM_OBJ_DEACT_ALL_STATS */
2554 vm_page_deactivate(p);
2555 }
2556 }
2557#if VM_OBJ_DEACT_ALL_STATS
2558 if (pages_count) {
2559 hw_atomic_add(&vm_object_deactivate_all_pages_batches, 1);
2560 hw_atomic_add(&vm_object_deactivate_all_pages_pages,
2561 pages_count);
2562 pages_count = 0;
2563 }
2564#endif /* VM_OBJ_DEACT_ALL_STATS */
2565 vm_page_unlock_queues();
2566}
6d2010ae 2567#endif /* VM_OBJECT_CACHE */
b0d623f7
A
2568
2569
2570
2571/*
2572 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2573 * exist because of the need to handle shadow chains. When deactivating pages, we only
2574 * want to deactive the ones at the top most level in the object chain. In order to do
2575 * this efficiently, the specified address range is divided up into "chunks" and we use
2576 * a bit map to keep track of which pages have already been processed as we descend down
2577 * the shadow chain. These chunk macros hide the details of the bit map implementation
2578 * as much as we can.
2579 *
2580 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2581 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2582 * order bit represents page 0 in the current range and highest order bit represents
2583 * page 63.
2584 *
2585 * For further convenience, we also use negative logic for the page state in the bit map.
2586 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2587 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2588 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2589 * out with all the bits set. The macros below hide all these details from the caller.
2590 */
2591
2592#define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2593 /* be the same as the number of bits in */
2594 /* the chunk_state_t type. We use 64 */
2595 /* just for convenience. */
2596
2597#define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2598
2599typedef uint64_t chunk_state_t;
2600
2601/*
2602 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2603 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2604 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2605 * looking at pages in that range. This can save us from unnecessarily chasing down the
2606 * shadow chain.
2607 */
2608
2609#define CHUNK_INIT(c, len) \
2610 MACRO_BEGIN \
2611 uint64_t p; \
2612 \
2613 (c) = 0xffffffffffffffffLL; \
2614 \
2615 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2616 MARK_PAGE_HANDLED(c, p); \
2617 MACRO_END
2618
6d2010ae 2619
b0d623f7
A
2620/*
2621 * Return true if all pages in the chunk have not yet been processed.
2622 */
2623
2624#define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2625
2626/*
2627 * Return true if the page at offset 'p' in the bit map has already been handled
2628 * while processing a higher level object in the shadow chain.
2629 */
2630
2631#define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0)
2632
2633/*
2634 * Mark the page at offset 'p' in the bit map as having been processed.
2635 */
2636
2637#define MARK_PAGE_HANDLED(c, p) \
2638MACRO_BEGIN \
2639 (c) = (c) & ~(1LL << (p)); \
2640MACRO_END
2641
2642
2643/*
2644 * Return true if the page at the given offset has been paged out. Object is
2645 * locked upon entry and returned locked.
2646 */
2647
2648static boolean_t
2649page_is_paged_out(
2650 vm_object_t object,
2651 vm_object_offset_t offset)
2652{
39236c6e
A
2653 if (object->internal &&
2654 object->alive &&
2655 !object->terminating &&
2656 object->pager_ready) {
2657
39037602
A
2658 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
2659 == VM_EXTERNAL_STATE_EXISTS) {
b0d623f7
A
2660 return TRUE;
2661 }
2662 }
b0d623f7
A
2663 return FALSE;
2664}
2665
2666
6d2010ae 2667
39236c6e
A
2668/*
2669 * madvise_free_debug
2670 *
2671 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2672 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2673 * simulate the loss of the page's contents as if the page had been
2674 * reclaimed and then re-faulted.
2675 */
2676#if DEVELOPMENT || DEBUG
2677int madvise_free_debug = 1;
2678#else /* DEBUG */
2679int madvise_free_debug = 0;
2680#endif /* DEBUG */
2681
b0d623f7
A
2682/*
2683 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2684 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2685 * a size that is less than or equal to the CHUNK_SIZE.
2686 */
2687
2688static void
2689deactivate_pages_in_object(
2690 vm_object_t object,
2691 vm_object_offset_t offset,
2692 vm_object_size_t size,
2693 boolean_t kill_page,
2694 boolean_t reusable_page,
b0d623f7 2695 boolean_t all_reusable,
39236c6e 2696 chunk_state_t *chunk_state,
3e170ce0
A
2697 pmap_flush_context *pfc,
2698 struct pmap *pmap,
2699 vm_map_offset_t pmap_offset)
b0d623f7
A
2700{
2701 vm_page_t m;
2702 int p;
6d2010ae
A
2703 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
2704 struct vm_page_delayed_work *dwp;
b0d623f7 2705 int dw_count;
6d2010ae 2706 int dw_limit;
b0d623f7
A
2707 unsigned int reusable = 0;
2708
b0d623f7
A
2709 /*
2710 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2711 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2712 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2713 * all the pages in the chunk.
2714 */
2715
2716 dwp = &dw_array[0];
2717 dw_count = 0;
6d2010ae 2718 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
b0d623f7 2719
3e170ce0 2720 for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
b0d623f7
A
2721
2722 /*
2723 * If this offset has already been found and handled in a higher level object, then don't
2724 * do anything with it in the current shadow object.
2725 */
2726
2727 if (PAGE_ALREADY_HANDLED(*chunk_state, p))
2728 continue;
2729
2730 /*
2731 * See if the page at this offset is around. First check to see if the page is resident,
2732 * then if not, check the existence map or with the pager.
2733 */
2734
2735 if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2736
2737 /*
2738 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2739 * so that we won't bother looking for a page at this offset again if there are more
2740 * shadow objects. Then deactivate the page.
2741 */
2742
2743 MARK_PAGE_HANDLED(*chunk_state, p);
2744
316670eb 2745 if (( !VM_PAGE_WIRED(m)) && (!m->private) && (!m->gobbled) && (!m->busy) && (!m->laundry)) {
b0d623f7 2746 int clear_refmod;
fe8ab488 2747 int pmap_options;
b0d623f7 2748
39236c6e
A
2749 dwp->dw_mask = 0;
2750
fe8ab488 2751 pmap_options = 0;
b0d623f7 2752 clear_refmod = VM_MEM_REFERENCED;
39236c6e 2753 dwp->dw_mask |= DW_clear_reference;
b0d623f7
A
2754
2755 if ((kill_page) && (object->internal)) {
39236c6e
A
2756 if (madvise_free_debug) {
2757 /*
2758 * zero-fill the page now
2759 * to simulate it being
2760 * reclaimed and re-faulted.
2761 */
39037602 2762 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
39236c6e 2763 }
b0d623f7
A
2764 m->precious = FALSE;
2765 m->dirty = FALSE;
2766
2767 clear_refmod |= VM_MEM_MODIFIED;
39037602 2768 if (m->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q) {
d1ecb069
A
2769 /*
2770 * This page is now clean and
2771 * reclaimable. Move it out
2772 * of the throttled queue, so
2773 * that vm_pageout_scan() can
2774 * find it.
2775 */
2776 dwp->dw_mask |= DW_move_page;
2777 }
39037602
A
2778
2779 VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
b0d623f7
A
2780
2781 if (reusable_page && !m->reusable) {
2782 assert(!all_reusable);
2783 assert(!object->all_reusable);
2784 m->reusable = TRUE;
2785 object->reusable_page_count++;
2786 assert(object->resident_page_count >= object->reusable_page_count);
2787 reusable++;
fe8ab488
A
2788 /*
2789 * Tell pmap this page is now
2790 * "reusable" (to update pmap
2791 * stats for all mappings).
2792 */
2793 pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
b0d623f7
A
2794 }
2795 }
fe8ab488 2796 pmap_options |= PMAP_OPTIONS_NOFLUSH;
39037602 2797 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m),
fe8ab488
A
2798 clear_refmod,
2799 pmap_options,
2800 (void *)pfc);
b0d623f7 2801
39037602 2802 if ((m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && !(reusable_page || all_reusable))
b0d623f7 2803 dwp->dw_mask |= DW_move_page;
6d2010ae 2804
39236c6e
A
2805 if (dwp->dw_mask)
2806 VM_PAGE_ADD_DELAYED_WORK(dwp, m,
2807 dw_count);
b0d623f7 2808
6d2010ae 2809 if (dw_count >= dw_limit) {
b0d623f7
A
2810 if (reusable) {
2811 OSAddAtomic(reusable,
2812 &vm_page_stats_reusable.reusable_count);
2813 vm_page_stats_reusable.reusable += reusable;
2814 reusable = 0;
2815 }
3e170ce0 2816 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
b0d623f7
A
2817
2818 dwp = &dw_array[0];
2819 dw_count = 0;
2820 }
2821 }
2822
2823 } else {
2824
2825 /*
2826 * The page at this offset isn't memory resident, check to see if it's
2827 * been paged out. If so, mark it as handled so we don't bother looking
2828 * for it in the shadow chain.
2829 */
2830
2831 if (page_is_paged_out(object, offset)) {
2832 MARK_PAGE_HANDLED(*chunk_state, p);
2833
2834 /*
2835 * If we're killing a non-resident page, then clear the page in the existence
2836 * map so we don't bother paging it back in if it's touched again in the future.
2837 */
2838
2839 if ((kill_page) && (object->internal)) {
39037602
A
2840
2841 VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2842
2843 if (pmap != PMAP_NULL) {
3e170ce0
A
2844 /*
2845 * Tell pmap that this page
2846 * is no longer mapped, to
2847 * adjust the footprint ledger
2848 * because this page is no
2849 * longer compressed.
2850 */
2851 pmap_remove_options(
2852 pmap,
2853 pmap_offset,
2854 (pmap_offset +
2855 PAGE_SIZE),
2856 PMAP_OPTIONS_REMOVE);
2857 }
b0d623f7
A
2858 }
2859 }
2860 }
2861 }
2862
2863 if (reusable) {
2864 OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
2865 vm_page_stats_reusable.reusable += reusable;
2866 reusable = 0;
2867 }
2868
2869 if (dw_count)
3e170ce0 2870 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
b0d623f7
A
2871}
2872
2873
2874/*
2875 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2876 * will always be less than or equal to the given size. The total range is divided up
2877 * into chunks for efficiency and performance related to the locks and handling the shadow
2878 * chain. This routine returns how much of the given "size" it actually processed. It's
2879 * up to the caler to loop and keep calling this routine until the entire range they want
2880 * to process has been done.
2881 */
2882
2883static vm_object_size_t
2884deactivate_a_chunk(
2885 vm_object_t orig_object,
2886 vm_object_offset_t offset,
2887 vm_object_size_t size,
2888 boolean_t kill_page,
2889 boolean_t reusable_page,
39236c6e 2890 boolean_t all_reusable,
3e170ce0
A
2891 pmap_flush_context *pfc,
2892 struct pmap *pmap,
2893 vm_map_offset_t pmap_offset)
b0d623f7
A
2894{
2895 vm_object_t object;
2896 vm_object_t tmp_object;
2897 vm_object_size_t length;
2898 chunk_state_t chunk_state;
2899
2900
2901 /*
2902 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2903 * remaining size the caller asked for.
2904 */
2905
2906 length = MIN(size, CHUNK_SIZE);
2907
2908 /*
2909 * The chunk_state keeps track of which pages we've already processed if there's
2910 * a shadow chain on this object. At this point, we haven't done anything with this
2911 * range of pages yet, so initialize the state to indicate no pages processed yet.
1c79356b
A
2912 */
2913
b0d623f7
A
2914 CHUNK_INIT(chunk_state, length);
2915 object = orig_object;
1c79356b
A
2916
2917 /*
b0d623f7
A
2918 * Start at the top level object and iterate around the loop once for each object
2919 * in the shadow chain. We stop processing early if we've already found all the pages
2920 * in the range. Otherwise we stop when we run out of shadow objects.
1c79356b 2921 */
0b4e3aa0 2922
b0d623f7
A
2923 while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2924 vm_object_paging_begin(object);
2925
3e170ce0 2926 deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state, pfc, pmap, pmap_offset);
b0d623f7
A
2927
2928 vm_object_paging_end(object);
2929
2930 /*
2931 * We've finished with this object, see if there's a shadow object. If
2932 * there is, update the offset and lock the new object. We also turn off
2933 * kill_page at this point since we only kill pages in the top most object.
0b4e3aa0 2934 */
1c79356b 2935
b0d623f7
A
2936 tmp_object = object->shadow;
2937
2938 if (tmp_object) {
2939 kill_page = FALSE;
2940 reusable_page = FALSE;
2941 all_reusable = FALSE;
6d2010ae 2942 offset += object->vo_shadow_offset;
b0d623f7
A
2943 vm_object_lock(tmp_object);
2944 }
2945
2946 if (object != orig_object)
2947 vm_object_unlock(object);
2948
2949 object = tmp_object;
1c79356b 2950 }
b0d623f7
A
2951
2952 if (object && object != orig_object)
2953 vm_object_unlock(object);
2954
2955 return length;
1c79356b
A
2956}
2957
b0d623f7
A
2958
2959
1c79356b 2960/*
b0d623f7
A
2961 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2962 * we also clear the modified status of the page and "forget" any changes that have been made
2963 * to the page.
1c79356b 2964 */
1c79356b 2965
0b4e3aa0
A
2966__private_extern__ void
2967vm_object_deactivate_pages(
2968 vm_object_t object,
2969 vm_object_offset_t offset,
2970 vm_object_size_t size,
b0d623f7 2971 boolean_t kill_page,
3e170ce0
A
2972 boolean_t reusable_page,
2973 struct pmap *pmap,
2974 vm_map_offset_t pmap_offset)
0b4e3aa0 2975{
b0d623f7
A
2976 vm_object_size_t length;
2977 boolean_t all_reusable;
39236c6e 2978 pmap_flush_context pmap_flush_context_storage;
0b4e3aa0
A
2979
2980 /*
b0d623f7
A
2981 * We break the range up into chunks and do one chunk at a time. This is for
2982 * efficiency and performance while handling the shadow chains and the locks.
2983 * The deactivate_a_chunk() function returns how much of the range it processed.
2984 * We keep calling this routine until the given size is exhausted.
0b4e3aa0 2985 */
0b4e3aa0 2986
0b4e3aa0 2987
b0d623f7 2988 all_reusable = FALSE;
fe8ab488
A
2989#if 11
2990 /*
2991 * For the sake of accurate "reusable" pmap stats, we need
2992 * to tell pmap about each page that is no longer "reusable",
2993 * so we can't do the "all_reusable" optimization.
2994 */
2995#else
b0d623f7 2996 if (reusable_page &&
6d2010ae
A
2997 object->internal &&
2998 object->vo_size != 0 &&
2999 object->vo_size == size &&
b0d623f7
A
3000 object->reusable_page_count == 0) {
3001 all_reusable = TRUE;
3002 reusable_page = FALSE;
3003 }
fe8ab488 3004#endif
0b4e3aa0 3005
d1ecb069
A
3006 if ((reusable_page || all_reusable) && object->all_reusable) {
3007 /* This means MADV_FREE_REUSABLE has been called twice, which
3008 * is probably illegal. */
3009 return;
3010 }
d1ecb069 3011
39236c6e
A
3012 pmap_flush_context_init(&pmap_flush_context_storage);
3013
b0d623f7 3014 while (size) {
3e170ce0 3015 length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable, &pmap_flush_context_storage, pmap, pmap_offset);
0b4e3aa0 3016
b0d623f7
A
3017 size -= length;
3018 offset += length;
3e170ce0 3019 pmap_offset += length;
b0d623f7 3020 }
39236c6e 3021 pmap_flush(&pmap_flush_context_storage);
91447636 3022
b0d623f7
A
3023 if (all_reusable) {
3024 if (!object->all_reusable) {
3025 unsigned int reusable;
3026
3027 object->all_reusable = TRUE;
3028 assert(object->reusable_page_count == 0);
3029 /* update global stats */
3030 reusable = object->resident_page_count;
3031 OSAddAtomic(reusable,
3032 &vm_page_stats_reusable.reusable_count);
3033 vm_page_stats_reusable.reusable += reusable;
3034 vm_page_stats_reusable.all_reusable_calls++;
3035 }
3036 } else if (reusable_page) {
3037 vm_page_stats_reusable.partial_reusable_calls++;
3038 }
3039}
0b4e3aa0 3040
b0d623f7
A
3041void
3042vm_object_reuse_pages(
3043 vm_object_t object,
3044 vm_object_offset_t start_offset,
3045 vm_object_offset_t end_offset,
3046 boolean_t allow_partial_reuse)
3047{
3048 vm_object_offset_t cur_offset;
3049 vm_page_t m;
3050 unsigned int reused, reusable;
0b4e3aa0 3051
b0d623f7
A
3052#define VM_OBJECT_REUSE_PAGE(object, m, reused) \
3053 MACRO_BEGIN \
3054 if ((m) != VM_PAGE_NULL && \
3055 (m)->reusable) { \
3056 assert((object)->reusable_page_count <= \
3057 (object)->resident_page_count); \
3058 assert((object)->reusable_page_count > 0); \
3059 (object)->reusable_page_count--; \
3060 (m)->reusable = FALSE; \
3061 (reused)++; \
fe8ab488
A
3062 /* \
3063 * Tell pmap that this page is no longer \
3064 * "reusable", to update the "reusable" stats \
3065 * for all the pmaps that have mapped this \
3066 * page. \
3067 */ \
39037602 3068 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
fe8ab488
A
3069 0, /* refmod */ \
3070 (PMAP_OPTIONS_CLEAR_REUSABLE \
3071 | PMAP_OPTIONS_NOFLUSH), \
3072 NULL); \
b0d623f7
A
3073 } \
3074 MACRO_END
2d21ac55 3075
b0d623f7
A
3076 reused = 0;
3077 reusable = 0;
0b4e3aa0 3078
b0d623f7 3079 vm_object_lock_assert_exclusive(object);
0b4e3aa0 3080
b0d623f7 3081 if (object->all_reusable) {
fe8ab488
A
3082 panic("object %p all_reusable: can't update pmap stats\n",
3083 object);
b0d623f7
A
3084 assert(object->reusable_page_count == 0);
3085 object->all_reusable = FALSE;
6d2010ae 3086 if (end_offset - start_offset == object->vo_size ||
b0d623f7
A
3087 !allow_partial_reuse) {
3088 vm_page_stats_reusable.all_reuse_calls++;
3089 reused = object->resident_page_count;
3090 } else {
3091 vm_page_stats_reusable.partial_reuse_calls++;
39037602 3092 vm_page_queue_iterate(&object->memq, m, vm_page_t, listq) {
b0d623f7
A
3093 if (m->offset < start_offset ||
3094 m->offset >= end_offset) {
3095 m->reusable = TRUE;
3096 object->reusable_page_count++;
3097 assert(object->resident_page_count >= object->reusable_page_count);
3098 continue;
3099 } else {
3100 assert(!m->reusable);
3101 reused++;
0b4e3aa0
A
3102 }
3103 }
3104 }
b0d623f7
A
3105 } else if (object->resident_page_count >
3106 ((end_offset - start_offset) >> PAGE_SHIFT)) {
3107 vm_page_stats_reusable.partial_reuse_calls++;
3108 for (cur_offset = start_offset;
3109 cur_offset < end_offset;
3110 cur_offset += PAGE_SIZE_64) {
3111 if (object->reusable_page_count == 0) {
3112 break;
3113 }
3114 m = vm_page_lookup(object, cur_offset);
3115 VM_OBJECT_REUSE_PAGE(object, m, reused);
3116 }
3117 } else {
3118 vm_page_stats_reusable.partial_reuse_calls++;
39037602 3119 vm_page_queue_iterate(&object->memq, m, vm_page_t, listq) {
b0d623f7
A
3120 if (object->reusable_page_count == 0) {
3121 break;
3122 }
3123 if (m->offset < start_offset ||
3124 m->offset >= end_offset) {
3125 continue;
3126 }
3127 VM_OBJECT_REUSE_PAGE(object, m, reused);
3128 }
0b4e3aa0 3129 }
b0d623f7
A
3130
3131 /* update global stats */
3132 OSAddAtomic(reusable-reused, &vm_page_stats_reusable.reusable_count);
3133 vm_page_stats_reusable.reused += reused;
3134 vm_page_stats_reusable.reusable += reusable;
0b4e3aa0 3135}
1c79356b
A
3136
3137/*
3138 * Routine: vm_object_pmap_protect
3139 *
3140 * Purpose:
3141 * Reduces the permission for all physical
3142 * pages in the specified object range.
3143 *
3144 * If removing write permission only, it is
3145 * sufficient to protect only the pages in
3146 * the top-level object; only those pages may
3147 * have write permission.
3148 *
3149 * If removing all access, we must follow the
3150 * shadow chain from the top-level object to
3151 * remove access to all pages in shadowed objects.
3152 *
3153 * The object must *not* be locked. The object must
3154 * be temporary/internal.
3155 *
3156 * If pmap is not NULL, this routine assumes that
3157 * the only mappings for the pages are in that
3158 * pmap.
3159 */
3160
0b4e3aa0 3161__private_extern__ void
1c79356b 3162vm_object_pmap_protect(
39037602
A
3163 vm_object_t object,
3164 vm_object_offset_t offset,
91447636 3165 vm_object_size_t size,
1c79356b 3166 pmap_t pmap,
91447636 3167 vm_map_offset_t pmap_start,
1c79356b
A
3168 vm_prot_t prot)
3169{
39236c6e
A
3170 vm_object_pmap_protect_options(object, offset, size,
3171 pmap, pmap_start, prot, 0);
3172}
3173
3174__private_extern__ void
3175vm_object_pmap_protect_options(
39037602
A
3176 vm_object_t object,
3177 vm_object_offset_t offset,
39236c6e
A
3178 vm_object_size_t size,
3179 pmap_t pmap,
3180 vm_map_offset_t pmap_start,
3181 vm_prot_t prot,
3182 int options)
3183{
3184 pmap_flush_context pmap_flush_context_storage;
3185 boolean_t delayed_pmap_flush = FALSE;
3186
1c79356b 3187 if (object == VM_OBJECT_NULL)
39236c6e 3188 return;
91447636
A
3189 size = vm_object_round_page(size);
3190 offset = vm_object_trunc_page(offset);
1c79356b
A
3191
3192 vm_object_lock(object);
3193
2d21ac55
A
3194 if (object->phys_contiguous) {
3195 if (pmap != NULL) {
3196 vm_object_unlock(object);
39236c6e
A
3197 pmap_protect_options(pmap,
3198 pmap_start,
3199 pmap_start + size,
3200 prot,
3201 options & ~PMAP_OPTIONS_NOFLUSH,
3202 NULL);
2d21ac55
A
3203 } else {
3204 vm_object_offset_t phys_start, phys_end, phys_addr;
3205
6d2010ae 3206 phys_start = object->vo_shadow_offset + offset;
2d21ac55
A
3207 phys_end = phys_start + size;
3208 assert(phys_start <= phys_end);
6d2010ae 3209 assert(phys_end <= object->vo_shadow_offset + object->vo_size);
2d21ac55
A
3210 vm_object_unlock(object);
3211
39236c6e
A
3212 pmap_flush_context_init(&pmap_flush_context_storage);
3213 delayed_pmap_flush = FALSE;
3214
2d21ac55
A
3215 for (phys_addr = phys_start;
3216 phys_addr < phys_end;
3217 phys_addr += PAGE_SIZE_64) {
39236c6e
A
3218 pmap_page_protect_options(
3219 (ppnum_t) (phys_addr >> PAGE_SHIFT),
3220 prot,
3221 options | PMAP_OPTIONS_NOFLUSH,
3222 (void *)&pmap_flush_context_storage);
3223 delayed_pmap_flush = TRUE;
2d21ac55 3224 }
39236c6e
A
3225 if (delayed_pmap_flush == TRUE)
3226 pmap_flush(&pmap_flush_context_storage);
2d21ac55
A
3227 }
3228 return;
3229 }
3230
55e303ae 3231 assert(object->internal);
de355530 3232
1c79356b 3233 while (TRUE) {
91447636 3234 if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
1c79356b 3235 vm_object_unlock(object);
39236c6e
A
3236 pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
3237 options & ~PMAP_OPTIONS_NOFLUSH, NULL);
1c79356b
A
3238 return;
3239 }
3240
39236c6e
A
3241 pmap_flush_context_init(&pmap_flush_context_storage);
3242 delayed_pmap_flush = FALSE;
3243
3244 /*
3245 * if we are doing large ranges with respect to resident
3246 * page count then we should interate over pages otherwise
3247 * inverse page look-up will be faster
3248 */
91447636 3249 if (ptoa_64(object->resident_page_count / 4) < size) {
9bccf70c
A
3250 vm_page_t p;
3251 vm_object_offset_t end;
1c79356b
A
3252
3253 end = offset + size;
3254
39037602 3255 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
39236c6e
A
3256 if (!p->fictitious && (offset <= p->offset) && (p->offset < end)) {
3257 vm_map_offset_t start;
1c79356b 3258
39236c6e 3259 start = pmap_start + p->offset - offset;
1c79356b 3260
39236c6e
A
3261 if (pmap != PMAP_NULL)
3262 pmap_protect_options(
3263 pmap,
3264 start,
3265 start + PAGE_SIZE_64,
3266 prot,
3267 options | PMAP_OPTIONS_NOFLUSH,
3268 &pmap_flush_context_storage);
3269 else
3270 pmap_page_protect_options(
39037602 3271 VM_PAGE_GET_PHYS_PAGE(p),
39236c6e
A
3272 prot,
3273 options | PMAP_OPTIONS_NOFLUSH,
3274 &pmap_flush_context_storage);
3275 delayed_pmap_flush = TRUE;
3276 }
1c79356b 3277 }
39236c6e 3278
9bccf70c
A
3279 } else {
3280 vm_page_t p;
3281 vm_object_offset_t end;
3282 vm_object_offset_t target_off;
3283
3284 end = offset + size;
3285
39236c6e
A
3286 for (target_off = offset;
3287 target_off < end; target_off += PAGE_SIZE) {
3288
3289 p = vm_page_lookup(object, target_off);
3290
3291 if (p != VM_PAGE_NULL) {
3292 vm_object_offset_t start;
3293
3294 start = pmap_start + (p->offset - offset);
3295
3296 if (pmap != PMAP_NULL)
3297 pmap_protect_options(
3298 pmap,
3299 start,
3300 start + PAGE_SIZE_64,
3301 prot,
3302 options | PMAP_OPTIONS_NOFLUSH,
3303 &pmap_flush_context_storage);
3304 else
3305 pmap_page_protect_options(
39037602 3306 VM_PAGE_GET_PHYS_PAGE(p),
39236c6e
A
3307 prot,
3308 options | PMAP_OPTIONS_NOFLUSH,
3309 &pmap_flush_context_storage);
3310 delayed_pmap_flush = TRUE;
9bccf70c
A
3311 }
3312 }
39236c6e
A
3313 }
3314 if (delayed_pmap_flush == TRUE)
3315 pmap_flush(&pmap_flush_context_storage);
1c79356b
A
3316
3317 if (prot == VM_PROT_NONE) {
3318 /*
3319 * Must follow shadow chain to remove access
3320 * to pages in shadowed objects.
3321 */
39037602 3322 vm_object_t next_object;
1c79356b
A
3323
3324 next_object = object->shadow;
3325 if (next_object != VM_OBJECT_NULL) {
6d2010ae 3326 offset += object->vo_shadow_offset;
1c79356b
A
3327 vm_object_lock(next_object);
3328 vm_object_unlock(object);
3329 object = next_object;
3330 }
3331 else {
3332 /*
3333 * End of chain - we are done.
3334 */
3335 break;
3336 }
3337 }
3338 else {
3339 /*
3340 * Pages in shadowed objects may never have
3341 * write permission - we may stop here.
3342 */
3343 break;
3344 }
3345 }
3346
3347 vm_object_unlock(object);
3348}
3349
3350/*
3351 * Routine: vm_object_copy_slowly
3352 *
3353 * Description:
3354 * Copy the specified range of the source
3355 * virtual memory object without using
3356 * protection-based optimizations (such
3357 * as copy-on-write). The pages in the
3358 * region are actually copied.
3359 *
3360 * In/out conditions:
3361 * The caller must hold a reference and a lock
3362 * for the source virtual memory object. The source
3363 * object will be returned *unlocked*.
3364 *
3365 * Results:
3366 * If the copy is completed successfully, KERN_SUCCESS is
3367 * returned. If the caller asserted the interruptible
3368 * argument, and an interruption occurred while waiting
3369 * for a user-generated event, MACH_SEND_INTERRUPTED is
3370 * returned. Other values may be returned to indicate
3371 * hard errors during the copy operation.
3372 *
3373 * A new virtual memory object is returned in a
3374 * parameter (_result_object). The contents of this
3375 * new object, starting at a zero offset, are a copy
3376 * of the source memory region. In the event of
3377 * an error, this parameter will contain the value
3378 * VM_OBJECT_NULL.
3379 */
0b4e3aa0 3380__private_extern__ kern_return_t
1c79356b 3381vm_object_copy_slowly(
39037602 3382 vm_object_t src_object,
1c79356b
A
3383 vm_object_offset_t src_offset,
3384 vm_object_size_t size,
3385 boolean_t interruptible,
3386 vm_object_t *_result_object) /* OUT */
3387{
3388 vm_object_t new_object;
3389 vm_object_offset_t new_offset;
3390
2d21ac55 3391 struct vm_object_fault_info fault_info;
1c79356b
A
3392
3393 XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
3394 src_object, src_offset, size, 0, 0);
3395
3396 if (size == 0) {
3397 vm_object_unlock(src_object);
3398 *_result_object = VM_OBJECT_NULL;
3399 return(KERN_INVALID_ARGUMENT);
3400 }
3401
3402 /*
3403 * Prevent destruction of the source object while we copy.
3404 */
3405
2d21ac55 3406 vm_object_reference_locked(src_object);
1c79356b
A
3407 vm_object_unlock(src_object);
3408
3409 /*
3410 * Create a new object to hold the copied pages.
3411 * A few notes:
3412 * We fill the new object starting at offset 0,
3413 * regardless of the input offset.
3414 * We don't bother to lock the new object within
3415 * this routine, since we have the only reference.
3416 */
3417
3418 new_object = vm_object_allocate(size);
3419 new_offset = 0;
3420
3421 assert(size == trunc_page_64(size)); /* Will the loop terminate? */
3422
2d21ac55
A
3423 fault_info.interruptible = interruptible;
3424 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
fe8ab488
A
3425 fault_info.user_tag = 0;
3426 fault_info.pmap_options = 0;
2d21ac55
A
3427 fault_info.lo_offset = src_offset;
3428 fault_info.hi_offset = src_offset + size;
3429 fault_info.no_cache = FALSE;
b0d623f7 3430 fault_info.stealth = TRUE;
6d2010ae
A
3431 fault_info.io_sync = FALSE;
3432 fault_info.cs_bypass = FALSE;
0b4c1975 3433 fault_info.mark_zf_absent = FALSE;
316670eb 3434 fault_info.batch_pmap_op = FALSE;
2d21ac55 3435
1c79356b
A
3436 for ( ;
3437 size != 0 ;
3438 src_offset += PAGE_SIZE_64,
3439 new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
3440 ) {
3441 vm_page_t new_page;
3442 vm_fault_return_t result;
3443
2d21ac55
A
3444 vm_object_lock(new_object);
3445
1c79356b
A
3446 while ((new_page = vm_page_alloc(new_object, new_offset))
3447 == VM_PAGE_NULL) {
2d21ac55
A
3448
3449 vm_object_unlock(new_object);
3450
1c79356b
A
3451 if (!vm_page_wait(interruptible)) {
3452 vm_object_deallocate(new_object);
91447636 3453 vm_object_deallocate(src_object);
1c79356b
A
3454 *_result_object = VM_OBJECT_NULL;
3455 return(MACH_SEND_INTERRUPTED);
3456 }
2d21ac55 3457 vm_object_lock(new_object);
1c79356b 3458 }
2d21ac55 3459 vm_object_unlock(new_object);
1c79356b
A
3460
3461 do {
3462 vm_prot_t prot = VM_PROT_READ;
3463 vm_page_t _result_page;
3464 vm_page_t top_page;
1c79356b
A
3465 vm_page_t result_page;
3466 kern_return_t error_code;
39037602
A
3467 vm_object_t result_page_object;
3468
1c79356b
A
3469
3470 vm_object_lock(src_object);
3e170ce0
A
3471
3472 if (src_object->internal &&
3473 src_object->shadow == VM_OBJECT_NULL &&
3474 (vm_page_lookup(src_object,
3475 src_offset) == VM_PAGE_NULL) &&
3476 (src_object->pager == NULL ||
3477 (VM_COMPRESSOR_PAGER_STATE_GET(src_object,
3478 src_offset) ==
3479 VM_EXTERNAL_STATE_ABSENT))) {
3480 /*
3481 * This page is neither resident nor compressed
3482 * and there's no shadow object below
3483 * "src_object", so this page is really missing.
3484 * There's no need to zero-fill it just to copy
3485 * it: let's leave it missing in "new_object"
3486 * and get zero-filled on demand.
3487 */
3488 vm_object_unlock(src_object);
3489 /* free the unused "new_page"... */
3490 vm_object_lock(new_object);
3491 VM_PAGE_FREE(new_page);
3492 new_page = VM_PAGE_NULL;
3493 vm_object_unlock(new_object);
3494 /* ...and go to next page in "src_object" */
3495 result = VM_FAULT_SUCCESS;
3496 break;
3497 }
3498
1c79356b
A
3499 vm_object_paging_begin(src_object);
3500
b0d623f7
A
3501 if (size > (vm_size_t) -1) {
3502 /* 32-bit overflow */
3503 fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
3504 } else {
3505 fault_info.cluster_size = (vm_size_t) size;
3506 assert(fault_info.cluster_size == size);
3507 }
2d21ac55 3508
1c79356b 3509 XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
39236c6e 3510 _result_page = VM_PAGE_NULL;
1c79356b 3511 result = vm_fault_page(src_object, src_offset,
2d21ac55 3512 VM_PROT_READ, FALSE,
39236c6e 3513 FALSE, /* page not looked up */
1c79356b
A
3514 &prot, &_result_page, &top_page,
3515 (int *)0,
2d21ac55 3516 &error_code, FALSE, FALSE, &fault_info);
1c79356b
A
3517
3518 switch(result) {
b0d623f7
A
3519 case VM_FAULT_SUCCESS:
3520 result_page = _result_page;
39037602 3521 result_page_object = VM_PAGE_OBJECT(result_page);
1c79356b 3522
b0d623f7 3523 /*
b0d623f7
A
3524 * Copy the page to the new object.
3525 *
3526 * POLICY DECISION:
3527 * If result_page is clean,
3528 * we could steal it instead
3529 * of copying.
3530 */
1c79356b 3531
b0d623f7 3532 vm_page_copy(result_page, new_page);
39037602 3533 vm_object_unlock(result_page_object);
1c79356b 3534
b0d623f7
A
3535 /*
3536 * Let go of both pages (make them
3537 * not busy, perform wakeup, activate).
3538 */
3539 vm_object_lock(new_object);
316670eb 3540 SET_PAGE_DIRTY(new_page, FALSE);
b0d623f7
A
3541 PAGE_WAKEUP_DONE(new_page);
3542 vm_object_unlock(new_object);
1c79356b 3543
39037602 3544 vm_object_lock(result_page_object);
b0d623f7 3545 PAGE_WAKEUP_DONE(result_page);
1c79356b 3546
b0d623f7 3547 vm_page_lockspin_queues();
39037602
A
3548 if ((result_page->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3549 (result_page->vm_page_q_state == VM_PAGE_NOT_ON_Q)) {
b0d623f7 3550 vm_page_activate(result_page);
39037602 3551 }
b0d623f7
A
3552 vm_page_activate(new_page);
3553 vm_page_unlock_queues();
1c79356b 3554
b0d623f7
A
3555 /*
3556 * Release paging references and
3557 * top-level placeholder page, if any.
3558 */
3559
39037602 3560 vm_fault_cleanup(result_page_object,
b0d623f7
A
3561 top_page);
3562
3563 break;
1c79356b 3564
b0d623f7
A
3565 case VM_FAULT_RETRY:
3566 break;
3567
b0d623f7
A
3568 case VM_FAULT_MEMORY_SHORTAGE:
3569 if (vm_page_wait(interruptible))
1c79356b 3570 break;
b0d623f7 3571 /* fall thru */
1c79356b 3572
b0d623f7
A
3573 case VM_FAULT_INTERRUPTED:
3574 vm_object_lock(new_object);
3575 VM_PAGE_FREE(new_page);
3576 vm_object_unlock(new_object);
3577
3578 vm_object_deallocate(new_object);
3579 vm_object_deallocate(src_object);
3580 *_result_object = VM_OBJECT_NULL;
3581 return(MACH_SEND_INTERRUPTED);
1c79356b 3582
b0d623f7
A
3583 case VM_FAULT_SUCCESS_NO_VM_PAGE:
3584 /* success but no VM page: fail */
3585 vm_object_paging_end(src_object);
3586 vm_object_unlock(src_object);
3587 /*FALLTHROUGH*/
3588 case VM_FAULT_MEMORY_ERROR:
3589 /*
3590 * A policy choice:
3591 * (a) ignore pages that we can't
3592 * copy
3593 * (b) return the null object if
3594 * any page fails [chosen]
3595 */
593a1d5f 3596
b0d623f7
A
3597 vm_object_lock(new_object);
3598 VM_PAGE_FREE(new_page);
3599 vm_object_unlock(new_object);
1c79356b 3600
b0d623f7
A
3601 vm_object_deallocate(new_object);
3602 vm_object_deallocate(src_object);
3603 *_result_object = VM_OBJECT_NULL;
3604 return(error_code ? error_code:
3605 KERN_MEMORY_ERROR);
1c79356b 3606
b0d623f7
A
3607 default:
3608 panic("vm_object_copy_slowly: unexpected error"
3609 " 0x%x from vm_fault_page()\n", result);
1c79356b
A
3610 }
3611 } while (result != VM_FAULT_SUCCESS);
3612 }
3613
3614 /*
3615 * Lose the extra reference, and return our object.
3616 */
1c79356b
A
3617 vm_object_deallocate(src_object);
3618 *_result_object = new_object;
3619 return(KERN_SUCCESS);
3620}
3621
3622/*
3623 * Routine: vm_object_copy_quickly
3624 *
3625 * Purpose:
3626 * Copy the specified range of the source virtual
3627 * memory object, if it can be done without waiting
3628 * for user-generated events.
3629 *
3630 * Results:
3631 * If the copy is successful, the copy is returned in
3632 * the arguments; otherwise, the arguments are not
3633 * affected.
3634 *
3635 * In/out conditions:
3636 * The object should be unlocked on entry and exit.
3637 */
3638
3639/*ARGSUSED*/
0b4e3aa0 3640__private_extern__ boolean_t
1c79356b
A
3641vm_object_copy_quickly(
3642 vm_object_t *_object, /* INOUT */
91447636
A
3643 __unused vm_object_offset_t offset, /* IN */
3644 __unused vm_object_size_t size, /* IN */
1c79356b
A
3645 boolean_t *_src_needs_copy, /* OUT */
3646 boolean_t *_dst_needs_copy) /* OUT */
3647{
3648 vm_object_t object = *_object;
3649 memory_object_copy_strategy_t copy_strategy;
3650
3651 XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
3652 *_object, offset, size, 0, 0);
3653 if (object == VM_OBJECT_NULL) {
3654 *_src_needs_copy = FALSE;
3655 *_dst_needs_copy = FALSE;
3656 return(TRUE);
3657 }
3658
3659 vm_object_lock(object);
3660
3661 copy_strategy = object->copy_strategy;
3662
3663 switch (copy_strategy) {
3664 case MEMORY_OBJECT_COPY_SYMMETRIC:
3665
3666 /*
3667 * Symmetric copy strategy.
3668 * Make another reference to the object.
3669 * Leave object/offset unchanged.
3670 */
3671
2d21ac55 3672 vm_object_reference_locked(object);
1c79356b
A
3673 object->shadowed = TRUE;
3674 vm_object_unlock(object);
3675
3676 /*
3677 * Both source and destination must make
3678 * shadows, and the source must be made
3679 * read-only if not already.
3680 */
3681
3682 *_src_needs_copy = TRUE;
3683 *_dst_needs_copy = TRUE;
3684
3685 break;
3686
3687 case MEMORY_OBJECT_COPY_DELAY:
3688 vm_object_unlock(object);
3689 return(FALSE);
3690
3691 default:
3692 vm_object_unlock(object);
3693 return(FALSE);
3694 }
3695 return(TRUE);
3696}
3697
0b4e3aa0
A
3698static int copy_call_count = 0;
3699static int copy_call_sleep_count = 0;
3700static int copy_call_restart_count = 0;
1c79356b
A
3701
3702/*
3703 * Routine: vm_object_copy_call [internal]
3704 *
3705 * Description:
3706 * Copy the source object (src_object), using the
3707 * user-managed copy algorithm.
3708 *
3709 * In/out conditions:
3710 * The source object must be locked on entry. It
3711 * will be *unlocked* on exit.
3712 *
3713 * Results:
3714 * If the copy is successful, KERN_SUCCESS is returned.
3715 * A new object that represents the copied virtual
3716 * memory is returned in a parameter (*_result_object).
3717 * If the return value indicates an error, this parameter
3718 * is not valid.
3719 */
0b4e3aa0 3720static kern_return_t
1c79356b
A
3721vm_object_copy_call(
3722 vm_object_t src_object,
3723 vm_object_offset_t src_offset,
3724 vm_object_size_t size,
3725 vm_object_t *_result_object) /* OUT */
3726{
3727 kern_return_t kr;
3728 vm_object_t copy;
3729 boolean_t check_ready = FALSE;
2d21ac55 3730 uint32_t try_failed_count = 0;
1c79356b
A
3731
3732 /*
3733 * If a copy is already in progress, wait and retry.
3734 *
3735 * XXX
3736 * Consider making this call interruptable, as Mike
3737 * intended it to be.
3738 *
3739 * XXXO
3740 * Need a counter or version or something to allow
3741 * us to use the copy that the currently requesting
3742 * thread is obtaining -- is it worth adding to the
3743 * vm object structure? Depends how common this case it.
3744 */
3745 copy_call_count++;
3746 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
9bccf70c 3747 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
1c79356b 3748 THREAD_UNINT);
1c79356b
A
3749 copy_call_restart_count++;
3750 }
3751
3752 /*
3753 * Indicate (for the benefit of memory_object_create_copy)
3754 * that we want a copy for src_object. (Note that we cannot
3755 * do a real assert_wait before calling memory_object_copy,
3756 * so we simply set the flag.)
3757 */
3758
3759 vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL);
3760 vm_object_unlock(src_object);
3761
3762 /*
3763 * Ask the memory manager to give us a memory object
3764 * which represents a copy of the src object.
3765 * The memory manager may give us a memory object
3766 * which we already have, or it may give us a
3767 * new memory object. This memory object will arrive
3768 * via memory_object_create_copy.
3769 */
3770
3771 kr = KERN_FAILURE; /* XXX need to change memory_object.defs */
3772 if (kr != KERN_SUCCESS) {
3773 return kr;
3774 }
3775
3776 /*
3777 * Wait for the copy to arrive.
3778 */
3779 vm_object_lock(src_object);
3780 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
9bccf70c 3781 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
1c79356b 3782 THREAD_UNINT);
1c79356b
A
3783 copy_call_sleep_count++;
3784 }
3785Retry:
3786 assert(src_object->copy != VM_OBJECT_NULL);
3787 copy = src_object->copy;
3788 if (!vm_object_lock_try(copy)) {
3789 vm_object_unlock(src_object);
2d21ac55
A
3790
3791 try_failed_count++;
3792 mutex_pause(try_failed_count); /* wait a bit */
3793
1c79356b
A
3794 vm_object_lock(src_object);
3795 goto Retry;
3796 }
6d2010ae
A
3797 if (copy->vo_size < src_offset+size)
3798 copy->vo_size = src_offset+size;
1c79356b
A
3799
3800 if (!copy->pager_ready)
3801 check_ready = TRUE;
3802
3803 /*
3804 * Return the copy.
3805 */
3806 *_result_object = copy;
3807 vm_object_unlock(copy);
3808 vm_object_unlock(src_object);
3809
3810 /* Wait for the copy to be ready. */
3811 if (check_ready == TRUE) {
3812 vm_object_lock(copy);
3813 while (!copy->pager_ready) {
9bccf70c 3814 vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
1c79356b
A
3815 }
3816 vm_object_unlock(copy);
3817 }
3818
3819 return KERN_SUCCESS;
3820}
3821
0b4e3aa0
A
3822static int copy_delayed_lock_collisions = 0;
3823static int copy_delayed_max_collisions = 0;
3824static int copy_delayed_lock_contention = 0;
3825static int copy_delayed_protect_iterate = 0;
1c79356b
A
3826
3827/*
3828 * Routine: vm_object_copy_delayed [internal]
3829 *
3830 * Description:
3831 * Copy the specified virtual memory object, using
3832 * the asymmetric copy-on-write algorithm.
3833 *
3834 * In/out conditions:
55e303ae
A
3835 * The src_object must be locked on entry. It will be unlocked
3836 * on exit - so the caller must also hold a reference to it.
1c79356b
A
3837 *
3838 * This routine will not block waiting for user-generated
3839 * events. It is not interruptible.
3840 */
0b4e3aa0 3841__private_extern__ vm_object_t
1c79356b
A
3842vm_object_copy_delayed(
3843 vm_object_t src_object,
3844 vm_object_offset_t src_offset,
2d21ac55
A
3845 vm_object_size_t size,
3846 boolean_t src_object_shared)
1c79356b
A
3847{
3848 vm_object_t new_copy = VM_OBJECT_NULL;
3849 vm_object_t old_copy;
3850 vm_page_t p;
55e303ae 3851 vm_object_size_t copy_size = src_offset + size;
39236c6e
A
3852 pmap_flush_context pmap_flush_context_storage;
3853 boolean_t delayed_pmap_flush = FALSE;
1c79356b 3854
2d21ac55 3855
1c79356b
A
3856 int collisions = 0;
3857 /*
3858 * The user-level memory manager wants to see all of the changes
3859 * to this object, but it has promised not to make any changes on
3860 * its own.
3861 *
3862 * Perform an asymmetric copy-on-write, as follows:
3863 * Create a new object, called a "copy object" to hold
3864 * pages modified by the new mapping (i.e., the copy,
3865 * not the original mapping).
3866 * Record the original object as the backing object for
3867 * the copy object. If the original mapping does not
3868 * change a page, it may be used read-only by the copy.
3869 * Record the copy object in the original object.
3870 * When the original mapping causes a page to be modified,
3871 * it must be copied to a new page that is "pushed" to
3872 * the copy object.
3873 * Mark the new mapping (the copy object) copy-on-write.
3874 * This makes the copy object itself read-only, allowing
3875 * it to be reused if the original mapping makes no
3876 * changes, and simplifying the synchronization required
3877 * in the "push" operation described above.
3878 *
3879 * The copy-on-write is said to be assymetric because the original
3880 * object is *not* marked copy-on-write. A copied page is pushed
3881 * to the copy object, regardless which party attempted to modify
3882 * the page.
3883 *
3884 * Repeated asymmetric copy operations may be done. If the
3885 * original object has not been changed since the last copy, its
3886 * copy object can be reused. Otherwise, a new copy object can be
3887 * inserted between the original object and its previous copy
3888 * object. Since any copy object is read-only, this cannot affect
3889 * affect the contents of the previous copy object.
3890 *
3891 * Note that a copy object is higher in the object tree than the
3892 * original object; therefore, use of the copy object recorded in
3893 * the original object must be done carefully, to avoid deadlock.
3894 */
3895
3e170ce0 3896 copy_size = vm_object_round_page(copy_size);
1c79356b 3897 Retry:
1c79356b 3898
55e303ae
A
3899 /*
3900 * Wait for paging in progress.
3901 */
b0d623f7
A
3902 if (!src_object->true_share &&
3903 (src_object->paging_in_progress != 0 ||
3904 src_object->activity_in_progress != 0)) {
2d21ac55
A
3905 if (src_object_shared == TRUE) {
3906 vm_object_unlock(src_object);
2d21ac55
A
3907 vm_object_lock(src_object);
3908 src_object_shared = FALSE;
b0d623f7 3909 goto Retry;
2d21ac55 3910 }
55e303ae 3911 vm_object_paging_wait(src_object, THREAD_UNINT);
2d21ac55 3912 }
1c79356b
A
3913 /*
3914 * See whether we can reuse the result of a previous
3915 * copy operation.
3916 */
3917
3918 old_copy = src_object->copy;
3919 if (old_copy != VM_OBJECT_NULL) {
2d21ac55
A
3920 int lock_granted;
3921
1c79356b
A
3922 /*
3923 * Try to get the locks (out of order)
3924 */
2d21ac55
A
3925 if (src_object_shared == TRUE)
3926 lock_granted = vm_object_lock_try_shared(old_copy);
3927 else
3928 lock_granted = vm_object_lock_try(old_copy);
3929
3930 if (!lock_granted) {
1c79356b 3931 vm_object_unlock(src_object);
1c79356b 3932
1c79356b
A
3933 if (collisions++ == 0)
3934 copy_delayed_lock_contention++;
2d21ac55
A
3935 mutex_pause(collisions);
3936
3937 /* Heisenberg Rules */
3938 copy_delayed_lock_collisions++;
1c79356b
A
3939
3940 if (collisions > copy_delayed_max_collisions)
3941 copy_delayed_max_collisions = collisions;
3942
2d21ac55
A
3943 if (src_object_shared == TRUE)
3944 vm_object_lock_shared(src_object);
3945 else
3946 vm_object_lock(src_object);
3947
1c79356b
A
3948 goto Retry;
3949 }
3950
3951 /*
3952 * Determine whether the old copy object has
3953 * been modified.
3954 */
3955
3956 if (old_copy->resident_page_count == 0 &&
3957 !old_copy->pager_created) {
3958 /*
3959 * It has not been modified.
3960 *
3961 * Return another reference to
55e303ae
A
3962 * the existing copy-object if
3963 * we can safely grow it (if
3964 * needed).
de355530 3965 */
1c79356b 3966
6d2010ae 3967 if (old_copy->vo_size < copy_size) {
2d21ac55
A
3968 if (src_object_shared == TRUE) {
3969 vm_object_unlock(old_copy);
3970 vm_object_unlock(src_object);
3971
3972 vm_object_lock(src_object);
3973 src_object_shared = FALSE;
3974 goto Retry;
3975 }
55e303ae
A
3976 /*
3977 * We can't perform a delayed copy if any of the
3978 * pages in the extended range are wired (because
3979 * we can't safely take write permission away from
3980 * wired pages). If the pages aren't wired, then
3981 * go ahead and protect them.
3982 */
3983 copy_delayed_protect_iterate++;
2d21ac55 3984
39236c6e
A
3985 pmap_flush_context_init(&pmap_flush_context_storage);
3986 delayed_pmap_flush = FALSE;
3987
39037602 3988 vm_page_queue_iterate(&src_object->memq, p, vm_page_t, listq) {
55e303ae 3989 if (!p->fictitious &&
6d2010ae 3990 p->offset >= old_copy->vo_size &&
55e303ae 3991 p->offset < copy_size) {
b0d623f7 3992 if (VM_PAGE_WIRED(p)) {
55e303ae
A
3993 vm_object_unlock(old_copy);
3994 vm_object_unlock(src_object);
91447636
A
3995
3996 if (new_copy != VM_OBJECT_NULL) {
3997 vm_object_unlock(new_copy);
3998 vm_object_deallocate(new_copy);
3999 }
39236c6e
A
4000 if (delayed_pmap_flush == TRUE)
4001 pmap_flush(&pmap_flush_context_storage);
91447636 4002
55e303ae
A
4003 return VM_OBJECT_NULL;
4004 } else {
39037602 4005 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
39236c6e
A
4006 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
4007 delayed_pmap_flush = TRUE;
55e303ae
A
4008 }
4009 }
4010 }
39236c6e
A
4011 if (delayed_pmap_flush == TRUE)
4012 pmap_flush(&pmap_flush_context_storage);
4013
6d2010ae 4014 old_copy->vo_size = copy_size;
55e303ae 4015 }
2d21ac55
A
4016 if (src_object_shared == TRUE)
4017 vm_object_reference_shared(old_copy);
4018 else
4019 vm_object_reference_locked(old_copy);
d7e50217
A
4020 vm_object_unlock(old_copy);
4021 vm_object_unlock(src_object);
91447636
A
4022
4023 if (new_copy != VM_OBJECT_NULL) {
4024 vm_object_unlock(new_copy);
4025 vm_object_deallocate(new_copy);
4026 }
55e303ae 4027 return(old_copy);
d7e50217 4028 }
2d21ac55
A
4029
4030
de355530
A
4031
4032 /*
4033 * Adjust the size argument so that the newly-created
4034 * copy object will be large enough to back either the
55e303ae 4035 * old copy object or the new mapping.
de355530 4036 */
6d2010ae
A
4037 if (old_copy->vo_size > copy_size)
4038 copy_size = old_copy->vo_size;
55e303ae
A
4039
4040 if (new_copy == VM_OBJECT_NULL) {
4041 vm_object_unlock(old_copy);
4042 vm_object_unlock(src_object);
4043 new_copy = vm_object_allocate(copy_size);
4044 vm_object_lock(src_object);
4045 vm_object_lock(new_copy);
2d21ac55
A
4046
4047 src_object_shared = FALSE;
55e303ae
A
4048 goto Retry;
4049 }
6d2010ae 4050 new_copy->vo_size = copy_size;
1c79356b
A
4051
4052 /*
4053 * The copy-object is always made large enough to
4054 * completely shadow the original object, since
4055 * it may have several users who want to shadow
4056 * the original object at different points.
4057 */
4058
4059 assert((old_copy->shadow == src_object) &&
6d2010ae 4060 (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
1c79356b 4061
55e303ae
A
4062 } else if (new_copy == VM_OBJECT_NULL) {
4063 vm_object_unlock(src_object);
4064 new_copy = vm_object_allocate(copy_size);
4065 vm_object_lock(src_object);
4066 vm_object_lock(new_copy);
2d21ac55
A
4067
4068 src_object_shared = FALSE;
55e303ae
A
4069 goto Retry;
4070 }
4071
4072 /*
4073 * We now have the src object locked, and the new copy object
4074 * allocated and locked (and potentially the old copy locked).
4075 * Before we go any further, make sure we can still perform
4076 * a delayed copy, as the situation may have changed.
4077 *
4078 * Specifically, we can't perform a delayed copy if any of the
4079 * pages in the range are wired (because we can't safely take
4080 * write permission away from wired pages). If the pages aren't
4081 * wired, then go ahead and protect them.
4082 */
4083 copy_delayed_protect_iterate++;
2d21ac55 4084
39236c6e
A
4085 pmap_flush_context_init(&pmap_flush_context_storage);
4086 delayed_pmap_flush = FALSE;
4087
39037602 4088 vm_page_queue_iterate(&src_object->memq, p, vm_page_t, listq) {
55e303ae 4089 if (!p->fictitious && p->offset < copy_size) {
b0d623f7 4090 if (VM_PAGE_WIRED(p)) {
55e303ae
A
4091 if (old_copy)
4092 vm_object_unlock(old_copy);
4093 vm_object_unlock(src_object);
4094 vm_object_unlock(new_copy);
4095 vm_object_deallocate(new_copy);
39236c6e
A
4096
4097 if (delayed_pmap_flush == TRUE)
4098 pmap_flush(&pmap_flush_context_storage);
4099
55e303ae
A
4100 return VM_OBJECT_NULL;
4101 } else {
39037602 4102 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
39236c6e
A
4103 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
4104 delayed_pmap_flush = TRUE;
55e303ae
A
4105 }
4106 }
4107 }
39236c6e
A
4108 if (delayed_pmap_flush == TRUE)
4109 pmap_flush(&pmap_flush_context_storage);
4110
55e303ae 4111 if (old_copy != VM_OBJECT_NULL) {
1c79356b
A
4112 /*
4113 * Make the old copy-object shadow the new one.
4114 * It will receive no more pages from the original
4115 * object.
4116 */
4117
2d21ac55
A
4118 /* remove ref. from old_copy */
4119 vm_object_lock_assert_exclusive(src_object);
4120 src_object->ref_count--;
1c79356b 4121 assert(src_object->ref_count > 0);
2d21ac55 4122 vm_object_lock_assert_exclusive(old_copy);
1c79356b 4123 old_copy->shadow = new_copy;
2d21ac55 4124 vm_object_lock_assert_exclusive(new_copy);
1c79356b
A
4125 assert(new_copy->ref_count > 0);
4126 new_copy->ref_count++; /* for old_copy->shadow ref. */
4127
4128#if TASK_SWAPPER
4129 if (old_copy->res_count) {
4130 VM_OBJ_RES_INCR(new_copy);
4131 VM_OBJ_RES_DECR(src_object);
4132 }
4133#endif
4134
4135 vm_object_unlock(old_copy); /* done with old_copy */
1c79356b
A
4136 }
4137
4138 /*
4139 * Point the new copy at the existing object.
4140 */
2d21ac55 4141 vm_object_lock_assert_exclusive(new_copy);
1c79356b 4142 new_copy->shadow = src_object;
6d2010ae 4143 new_copy->vo_shadow_offset = 0;
1c79356b 4144 new_copy->shadowed = TRUE; /* caller must set needs_copy */
2d21ac55
A
4145
4146 vm_object_lock_assert_exclusive(src_object);
4147 vm_object_reference_locked(src_object);
1c79356b 4148 src_object->copy = new_copy;
55e303ae 4149 vm_object_unlock(src_object);
1c79356b
A
4150 vm_object_unlock(new_copy);
4151
1c79356b
A
4152 XPR(XPR_VM_OBJECT,
4153 "vm_object_copy_delayed: used copy object %X for source %X\n",
b0d623f7 4154 new_copy, src_object, 0, 0, 0);
1c79356b 4155
2d21ac55 4156 return new_copy;
1c79356b
A
4157}
4158
4159/*
4160 * Routine: vm_object_copy_strategically
4161 *
4162 * Purpose:
4163 * Perform a copy according to the source object's
4164 * declared strategy. This operation may block,
4165 * and may be interrupted.
4166 */
0b4e3aa0 4167__private_extern__ kern_return_t
1c79356b 4168vm_object_copy_strategically(
39037602 4169 vm_object_t src_object,
1c79356b
A
4170 vm_object_offset_t src_offset,
4171 vm_object_size_t size,
4172 vm_object_t *dst_object, /* OUT */
4173 vm_object_offset_t *dst_offset, /* OUT */
4174 boolean_t *dst_needs_copy) /* OUT */
4175{
4176 boolean_t result;
4177 boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */
2d21ac55 4178 boolean_t object_lock_shared = FALSE;
1c79356b
A
4179 memory_object_copy_strategy_t copy_strategy;
4180
4181 assert(src_object != VM_OBJECT_NULL);
4182
2d21ac55
A
4183 copy_strategy = src_object->copy_strategy;
4184
4185 if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
4186 vm_object_lock_shared(src_object);
4187 object_lock_shared = TRUE;
4188 } else
4189 vm_object_lock(src_object);
1c79356b
A
4190
4191 /*
4192 * The copy strategy is only valid if the memory manager
4193 * is "ready". Internal objects are always ready.
4194 */
4195
4196 while (!src_object->internal && !src_object->pager_ready) {
9bccf70c 4197 wait_result_t wait_result;
1c79356b 4198
2d21ac55
A
4199 if (object_lock_shared == TRUE) {
4200 vm_object_unlock(src_object);
4201 vm_object_lock(src_object);
4202 object_lock_shared = FALSE;
4203 continue;
4204 }
9bccf70c
A
4205 wait_result = vm_object_sleep( src_object,
4206 VM_OBJECT_EVENT_PAGER_READY,
4207 interruptible);
4208 if (wait_result != THREAD_AWAKENED) {
4209 vm_object_unlock(src_object);
1c79356b
A
4210 *dst_object = VM_OBJECT_NULL;
4211 *dst_offset = 0;
4212 *dst_needs_copy = FALSE;
4213 return(MACH_SEND_INTERRUPTED);
4214 }
1c79356b
A
4215 }
4216
1c79356b
A
4217 /*
4218 * Use the appropriate copy strategy.
4219 */
4220
4221 switch (copy_strategy) {
55e303ae
A
4222 case MEMORY_OBJECT_COPY_DELAY:
4223 *dst_object = vm_object_copy_delayed(src_object,
2d21ac55 4224 src_offset, size, object_lock_shared);
55e303ae
A
4225 if (*dst_object != VM_OBJECT_NULL) {
4226 *dst_offset = src_offset;
4227 *dst_needs_copy = TRUE;
4228 result = KERN_SUCCESS;
4229 break;
4230 }
4231 vm_object_lock(src_object);
4232 /* fall thru when delayed copy not allowed */
4233
1c79356b
A
4234 case MEMORY_OBJECT_COPY_NONE:
4235 result = vm_object_copy_slowly(src_object, src_offset, size,
4236 interruptible, dst_object);
4237 if (result == KERN_SUCCESS) {
4238 *dst_offset = 0;
4239 *dst_needs_copy = FALSE;
4240 }
4241 break;
4242
4243 case MEMORY_OBJECT_COPY_CALL:
4244 result = vm_object_copy_call(src_object, src_offset, size,
4245 dst_object);
4246 if (result == KERN_SUCCESS) {
4247 *dst_offset = src_offset;
4248 *dst_needs_copy = TRUE;
4249 }
4250 break;
4251
1c79356b 4252 case MEMORY_OBJECT_COPY_SYMMETRIC:
b0d623f7 4253 XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n", src_object, src_offset, size, 0, 0);
1c79356b
A
4254 vm_object_unlock(src_object);
4255 result = KERN_MEMORY_RESTART_COPY;
4256 break;
4257
4258 default:
4259 panic("copy_strategically: bad strategy");
4260 result = KERN_INVALID_ARGUMENT;
4261 }
4262 return(result);
4263}
4264
4265/*
4266 * vm_object_shadow:
4267 *
4268 * Create a new object which is backed by the
4269 * specified existing object range. The source
4270 * object reference is deallocated.
4271 *
4272 * The new object and offset into that object
4273 * are returned in the source parameters.
4274 */
6d2010ae 4275boolean_t vm_object_shadow_check = TRUE;
1c79356b 4276
0b4e3aa0 4277__private_extern__ boolean_t
1c79356b
A
4278vm_object_shadow(
4279 vm_object_t *object, /* IN/OUT */
4280 vm_object_offset_t *offset, /* IN/OUT */
4281 vm_object_size_t length)
4282{
39037602
A
4283 vm_object_t source;
4284 vm_object_t result;
1c79356b
A
4285
4286 source = *object;
e2d2fc5c
A
4287 assert(source != VM_OBJECT_NULL);
4288 if (source == VM_OBJECT_NULL)
4289 return FALSE;
4290
2d21ac55
A
4291#if 0
4292 /*
4293 * XXX FBDP
4294 * This assertion is valid but it gets triggered by Rosetta for example
4295 * due to a combination of vm_remap() that changes a VM object's
4296 * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
4297 * that then sets "needs_copy" on its map entry. This creates a
4298 * mapping situation that VM should never see and doesn't know how to
4299 * handle.
4300 * It's not clear if this can create any real problem but we should
4301 * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
4302 * do more than just set "needs_copy" to handle the copy-on-write...
4303 * In the meantime, let's disable the assertion.
4304 */
1c79356b 4305 assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
2d21ac55 4306#endif
1c79356b
A
4307
4308 /*
4309 * Determine if we really need a shadow.
6d2010ae
A
4310 *
4311 * If the source object is larger than what we are trying
4312 * to create, then force the shadow creation even if the
4313 * ref count is 1. This will allow us to [potentially]
4314 * collapse the underlying object away in the future
4315 * (freeing up the extra data it might contain and that
4316 * we don't need).
1c79356b 4317 */
39037602
A
4318
4319 assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
4320
6d2010ae
A
4321 if (vm_object_shadow_check &&
4322 source->vo_size == length &&
4323 source->ref_count == 1 &&
1c79356b 4324 (source->shadow == VM_OBJECT_NULL ||
6d2010ae 4325 source->shadow->copy == VM_OBJECT_NULL) )
1c79356b 4326 {
39037602
A
4327 /* lock the object and check again */
4328 vm_object_lock(source);
4329 if (source->vo_size == length &&
4330 source->ref_count == 1 &&
4331 (source->shadow == VM_OBJECT_NULL ||
4332 source->shadow->copy == VM_OBJECT_NULL))
4333 {
4334 source->shadowed = FALSE;
4335 vm_object_unlock(source);
4336 return FALSE;
4337 }
4338 /* things changed while we were locking "source"... */
4339 vm_object_unlock(source);
1c79356b
A
4340 }
4341
4342 /*
4343 * Allocate a new object with the given length
4344 */
4345
4346 if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
4347 panic("vm_object_shadow: no object for shadowing");
4348
4349 /*
4350 * The new object shadows the source object, adding
4351 * a reference to it. Our caller changes his reference
4352 * to point to the new object, removing a reference to
4353 * the source object. Net result: no change of reference
4354 * count.
4355 */
4356 result->shadow = source;
4357
4358 /*
4359 * Store the offset into the source object,
4360 * and fix up the offset into the new object.
4361 */
4362
6d2010ae 4363 result->vo_shadow_offset = *offset;
1c79356b
A
4364
4365 /*
4366 * Return the new things
4367 */
4368
4369 *offset = 0;
4370 *object = result;
4371 return TRUE;
4372}
4373
4374/*
4375 * The relationship between vm_object structures and
0b4e3aa0 4376 * the memory_object requires careful synchronization.
1c79356b 4377 *
0b4e3aa0 4378 * All associations are created by memory_object_create_named
39037602 4379 * for external pagers and vm_object_compressor_pager_create for internal
0b4e3aa0
A
4380 * objects as follows:
4381 *
4382 * pager: the memory_object itself, supplied by
1c79356b
A
4383 * the user requesting a mapping (or the kernel,
4384 * when initializing internal objects); the
4385 * kernel simulates holding send rights by keeping
4386 * a port reference;
0b4e3aa0 4387 *
1c79356b
A
4388 * pager_request:
4389 * the memory object control port,
4390 * created by the kernel; the kernel holds
4391 * receive (and ownership) rights to this
4392 * port, but no other references.
1c79356b
A
4393 *
4394 * When initialization is complete, the "initialized" field
4395 * is asserted. Other mappings using a particular memory object,
4396 * and any references to the vm_object gained through the
4397 * port association must wait for this initialization to occur.
4398 *
4399 * In order to allow the memory manager to set attributes before
4400 * requests (notably virtual copy operations, but also data or
4401 * unlock requests) are made, a "ready" attribute is made available.
4402 * Only the memory manager may affect the value of this attribute.
4403 * Its value does not affect critical kernel functions, such as
4404 * internal object initialization or destruction. [Furthermore,
4405 * memory objects created by the kernel are assumed to be ready
4406 * immediately; the default memory manager need not explicitly
4407 * set the "ready" attribute.]
4408 *
4409 * [Both the "initialized" and "ready" attribute wait conditions
4410 * use the "pager" field as the wait event.]
4411 *
4412 * The port associations can be broken down by any of the
4413 * following routines:
4414 * vm_object_terminate:
4415 * No references to the vm_object remain, and
4416 * the object cannot (or will not) be cached.
4417 * This is the normal case, and is done even
4418 * though one of the other cases has already been
4419 * done.
1c79356b
A
4420 * memory_object_destroy:
4421 * The memory manager has requested that the
0b4e3aa0
A
4422 * kernel relinquish references to the memory
4423 * object. [The memory manager may not want to
4424 * destroy the memory object, but may wish to
4425 * refuse or tear down existing memory mappings.]
4426 *
1c79356b
A
4427 * Each routine that breaks an association must break all of
4428 * them at once. At some later time, that routine must clear
0b4e3aa0 4429 * the pager field and release the memory object references.
1c79356b
A
4430 * [Furthermore, each routine must cope with the simultaneous
4431 * or previous operations of the others.]
4432 *
b0d623f7 4433 * In addition to the lock on the object, the vm_object_hash_lock
0b4e3aa0 4434 * governs the associations. References gained through the
b0d623f7 4435 * association require use of the hash lock.
1c79356b 4436 *
0b4e3aa0 4437 * Because the pager field may be cleared spontaneously, it
1c79356b
A
4438 * cannot be used to determine whether a memory object has
4439 * ever been associated with a particular vm_object. [This
2d21ac55
A
4440 * knowledge is important to the shadow object mechanism.]
4441 * For this reason, an additional "created" attribute is
4442 * provided.
4443 *
4444 * During various paging operations, the pager reference found in the
4445 * vm_object must be valid. To prevent this from being released,
4446 * (other than being removed, i.e., made null), routines may use
4447 * the vm_object_paging_begin/end routines [actually, macros].
4448 * The implementation uses the "paging_in_progress" and "wanted" fields.
4449 * [Operations that alter the validity of the pager values include the
4450 * termination routines and vm_object_collapse.]
4451 */
1c79356b 4452
1c79356b
A
4453
4454/*
4455 * Routine: vm_object_enter
4456 * Purpose:
4457 * Find a VM object corresponding to the given
4458 * pager; if no such object exists, create one,
4459 * and initialize the pager.
4460 */
4461vm_object_t
4462vm_object_enter(
0b4e3aa0 4463 memory_object_t pager,
1c79356b
A
4464 vm_object_size_t size,
4465 boolean_t internal,
4466 boolean_t init,
0b4e3aa0 4467 boolean_t named)
1c79356b 4468{
39037602 4469 vm_object_t object;
1c79356b
A
4470 vm_object_t new_object;
4471 boolean_t must_init;
1c79356b 4472 vm_object_hash_entry_t entry, new_entry;
2d21ac55 4473 uint32_t try_failed_count = 0;
b0d623f7 4474 lck_mtx_t *lck;
1c79356b 4475
0b4e3aa0 4476 if (pager == MEMORY_OBJECT_NULL)
1c79356b
A
4477 return(vm_object_allocate(size));
4478
4479 new_object = VM_OBJECT_NULL;
4480 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
4481 must_init = init;
4482
4483 /*
4484 * Look for an object associated with this port.
4485 */
2d21ac55 4486Retry:
b0d623f7 4487 lck = vm_object_hash_lock_spin(pager);
55e303ae 4488 do {
1c79356b
A
4489 entry = vm_object_hash_lookup(pager, FALSE);
4490
55e303ae
A
4491 if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
4492 if (new_object == VM_OBJECT_NULL) {
4493 /*
4494 * We must unlock to create a new object;
4495 * if we do so, we must try the lookup again.
4496 */
b0d623f7 4497 vm_object_hash_unlock(lck);
55e303ae
A
4498 assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
4499 new_entry = vm_object_hash_entry_alloc(pager);
4500 new_object = vm_object_allocate(size);
3e170ce0
A
4501 /*
4502 * Set new_object->hashed now, while noone
4503 * knows about this object yet and we
4504 * don't need to lock it. Once it's in
4505 * the hash table, we would have to lock
4506 * the object to set its "hashed" bit and
4507 * we can't lock the object while holding
4508 * the hash lock as a spinlock...
4509 */
4510 new_object->hashed = TRUE;
b0d623f7 4511 lck = vm_object_hash_lock_spin(pager);
55e303ae
A
4512 } else {
4513 /*
4514 * Lookup failed twice, and we have something
4515 * to insert; set the object.
4516 */
3e170ce0
A
4517 /*
4518 * We can't lock the object here since we're
4519 * holding the hash lock as a spin lock.
4520 * We've already pre-set "new_object->hashed"
4521 * when we created "new_object" above, so we
4522 * won't need to modify the object in
4523 * vm_object_hash_insert().
4524 */
4525 assert(new_object->hashed);
b0d623f7 4526 vm_object_hash_insert(new_entry, new_object);
55e303ae 4527 entry = new_entry;
55e303ae
A
4528 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
4529 new_object = VM_OBJECT_NULL;
4530 must_init = TRUE;
4531 }
4532 } else if (entry->object == VM_OBJECT_NULL) {
4533 /*
4534 * If a previous object is being terminated,
4535 * we must wait for the termination message
4536 * to be queued (and lookup the entry again).
4537 */
1c79356b 4538 entry->waiting = TRUE;
55e303ae 4539 entry = VM_OBJECT_HASH_ENTRY_NULL;
1c79356b 4540 assert_wait((event_t) pager, THREAD_UNINT);
b0d623f7
A
4541 vm_object_hash_unlock(lck);
4542
91447636 4543 thread_block(THREAD_CONTINUE_NULL);
b0d623f7 4544 lck = vm_object_hash_lock_spin(pager);
1c79356b 4545 }
55e303ae 4546 } while (entry == VM_OBJECT_HASH_ENTRY_NULL);
1c79356b
A
4547
4548 object = entry->object;
4549 assert(object != VM_OBJECT_NULL);
4550
4551 if (!must_init) {
b0d623f7 4552 if ( !vm_object_lock_try(object)) {
2d21ac55 4553
b0d623f7 4554 vm_object_hash_unlock(lck);
2d21ac55
A
4555
4556 try_failed_count++;
4557 mutex_pause(try_failed_count); /* wait a bit */
2d21ac55
A
4558 goto Retry;
4559 }
1c79356b 4560 assert(!internal || object->internal);
b0d623f7 4561#if VM_OBJECT_CACHE
1c79356b 4562 if (object->ref_count == 0) {
b0d623f7
A
4563 if ( !vm_object_cache_lock_try()) {
4564
4565 vm_object_hash_unlock(lck);
4566 vm_object_unlock(object);
4567
4568 try_failed_count++;
4569 mutex_pause(try_failed_count); /* wait a bit */
4570 goto Retry;
4571 }
1c79356b 4572 XPR(XPR_VM_OBJECT_CACHE,
b0d623f7
A
4573 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
4574 object,
4575 vm_object_cached_list.next,
4576 vm_object_cached_list.prev, 0,0);
1c79356b
A
4577 queue_remove(&vm_object_cached_list, object,
4578 vm_object_t, cached_list);
4579 vm_object_cached_count--;
b0d623f7
A
4580
4581 vm_object_cache_unlock();
4582 }
4583#endif
4584 if (named) {
4585 assert(!object->named);
4586 object->named = TRUE;
1c79356b 4587 }
2d21ac55 4588 vm_object_lock_assert_exclusive(object);
1c79356b
A
4589 object->ref_count++;
4590 vm_object_res_reference(object);
b0d623f7
A
4591
4592 vm_object_hash_unlock(lck);
1c79356b
A
4593 vm_object_unlock(object);
4594
2d21ac55 4595 VM_STAT_INCR(hits);
b0d623f7
A
4596 } else
4597 vm_object_hash_unlock(lck);
4598
1c79356b
A
4599 assert(object->ref_count > 0);
4600
2d21ac55 4601 VM_STAT_INCR(lookups);
1c79356b 4602
1c79356b
A
4603 XPR(XPR_VM_OBJECT,
4604 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
b0d623f7 4605 pager, object, must_init, 0, 0);
1c79356b
A
4606
4607 /*
4608 * If we raced to create a vm_object but lost, let's
4609 * throw away ours.
4610 */
4611
3e170ce0
A
4612 if (new_object != VM_OBJECT_NULL) {
4613 /*
4614 * Undo the pre-setting of "new_object->hashed" before
4615 * deallocating "new_object", since we did not insert it
4616 * into the hash table after all.
4617 */
4618 assert(new_object->hashed);
4619 new_object->hashed = FALSE;
1c79356b 4620 vm_object_deallocate(new_object);
3e170ce0 4621 }
1c79356b
A
4622
4623 if (new_entry != VM_OBJECT_HASH_ENTRY_NULL)
4624 vm_object_hash_entry_free(new_entry);
4625
4626 if (must_init) {
91447636 4627 memory_object_control_t control;
1c79356b
A
4628
4629 /*
4630 * Allocate request port.
4631 */
4632
91447636
A
4633 control = memory_object_control_allocate(object);
4634 assert (control != MEMORY_OBJECT_CONTROL_NULL);
1c79356b
A
4635
4636 vm_object_lock(object);
91447636 4637 assert(object != kernel_object);
1c79356b
A
4638
4639 /*
0b4e3aa0 4640 * Copy the reference we were given.
1c79356b
A
4641 */
4642
0b4e3aa0 4643 memory_object_reference(pager);
1c79356b
A
4644 object->pager_created = TRUE;
4645 object->pager = pager;
4646 object->internal = internal;
4647 object->pager_trusted = internal;
4648 if (!internal) {
4649 /* copy strategy invalid until set by memory manager */
4650 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
4651 }
91447636 4652 object->pager_control = control;
1c79356b
A
4653 object->pager_ready = FALSE;
4654
1c79356b
A
4655 vm_object_unlock(object);
4656
4657 /*
4658 * Let the pager know we're using it.
4659 */
4660
0b4e3aa0 4661 (void) memory_object_init(pager,
91447636 4662 object->pager_control,
0b4e3aa0 4663 PAGE_SIZE);
1c79356b
A
4664
4665 vm_object_lock(object);
0b4e3aa0
A
4666 if (named)
4667 object->named = TRUE;
1c79356b 4668 if (internal) {
39037602 4669 vm_object_lock_assert_exclusive(object);
1c79356b
A
4670 object->pager_ready = TRUE;
4671 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
4672 }
4673
4674 object->pager_initialized = TRUE;
4675 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
4676 } else {
4677 vm_object_lock(object);
4678 }
4679
4680 /*
4681 * [At this point, the object must be locked]
4682 */
4683
4684 /*
4685 * Wait for the work above to be done by the first
4686 * thread to map this object.
4687 */
4688
4689 while (!object->pager_initialized) {
9bccf70c 4690 vm_object_sleep(object,
1c79356b
A
4691 VM_OBJECT_EVENT_INITIALIZED,
4692 THREAD_UNINT);
1c79356b
A
4693 }
4694 vm_object_unlock(object);
4695
4696 XPR(XPR_VM_OBJECT,
4697 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
b0d623f7 4698 object, object->pager, internal, 0,0);
1c79356b
A
4699 return(object);
4700}
4701
4702/*
39037602 4703 * Routine: vm_object_compressor_pager_create
1c79356b
A
4704 * Purpose:
4705 * Create a memory object for an internal object.
4706 * In/out conditions:
4707 * The object is locked on entry and exit;
4708 * it may be unlocked within this call.
4709 * Limitations:
4710 * Only one thread may be performing a
39037602 4711 * vm_object_compressor_pager_create on an object at
1c79356b
A
4712 * a time. Presumably, only the pageout
4713 * daemon will be using this routine.
4714 */
4715
39236c6e
A
4716void
4717vm_object_compressor_pager_create(
39037602 4718 vm_object_t object)
39236c6e
A
4719{
4720 memory_object_t pager;
4721 vm_object_hash_entry_t entry;
4722 lck_mtx_t *lck;
fe8ab488 4723 vm_object_t pager_object = VM_OBJECT_NULL;
39236c6e
A
4724
4725 assert(object != kernel_object);
4726
4727 /*
4728 * Prevent collapse or termination by holding a paging reference
4729 */
4730
4731 vm_object_paging_begin(object);
4732 if (object->pager_created) {
4733 /*
4734 * Someone else got to it first...
4735 * wait for them to finish initializing the ports
4736 */
4737 while (!object->pager_initialized) {
4738 vm_object_sleep(object,
4739 VM_OBJECT_EVENT_INITIALIZED,
4740 THREAD_UNINT);
4741 }
4742 vm_object_paging_end(object);
4743 return;
4744 }
4745
4746 /*
4747 * Indicate that a memory object has been assigned
4748 * before dropping the lock, to prevent a race.
4749 */
4750
4751 object->pager_created = TRUE;
4752 object->paging_offset = 0;
4753
4754 vm_object_unlock(object);
4755
22ba694c
A
4756 if ((uint32_t) (object->vo_size/PAGE_SIZE) !=
4757 (object->vo_size/PAGE_SIZE)) {
4758 panic("vm_object_compressor_pager_create(%p): "
4759 "object size 0x%llx >= 0x%llx\n",
4760 object,
4761 (uint64_t) object->vo_size,
4762 0x0FFFFFFFFULL*PAGE_SIZE);
39236c6e
A
4763 }
4764
4765 /*
4766 * Create the [internal] pager, and associate it with this object.
4767 *
4768 * We make the association here so that vm_object_enter()
4769 * can look up the object to complete initializing it. No
4770 * user will ever map this object.
4771 */
4772 {
4773 assert(object->temporary);
4774
4775 /* create our new memory object */
22ba694c
A
4776 assert((uint32_t) (object->vo_size/PAGE_SIZE) ==
4777 (object->vo_size/PAGE_SIZE));
39236c6e 4778 (void) compressor_memory_object_create(
22ba694c 4779 (memory_object_size_t) object->vo_size,
39236c6e 4780 &pager);
22ba694c
A
4781 if (pager == NULL) {
4782 panic("vm_object_compressor_pager_create(): "
4783 "no pager for object %p size 0x%llx\n",
4784 object, (uint64_t) object->vo_size);
4785 }
39236c6e
A
4786 }
4787
4788 entry = vm_object_hash_entry_alloc(pager);
4789
fe8ab488 4790 vm_object_lock(object);
39236c6e
A
4791 lck = vm_object_hash_lock_spin(pager);
4792 vm_object_hash_insert(entry, object);
4793 vm_object_hash_unlock(lck);
fe8ab488 4794 vm_object_unlock(object);
39236c6e
A
4795
4796 /*
4797 * A reference was returned by
4798 * memory_object_create(), and it is
4799 * copied by vm_object_enter().
4800 */
4801
fe8ab488
A
4802 pager_object = vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE);
4803
4804 if (pager_object != object) {
4805 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager, pager_object, object, (uint64_t) object->vo_size);
4806 }
39236c6e
A
4807
4808 /*
4809 * Drop the reference we were passed.
4810 */
4811 memory_object_deallocate(pager);
4812
4813 vm_object_lock(object);
4814
4815 /*
4816 * Release the paging reference
4817 */
4818 vm_object_paging_end(object);
4819}
4820
1c79356b
A
4821/*
4822 * Routine: vm_object_remove
4823 * Purpose:
4824 * Eliminate the pager/object association
4825 * for this pager.
4826 * Conditions:
4827 * The object cache must be locked.
4828 */
0b4e3aa0 4829__private_extern__ void
1c79356b
A
4830vm_object_remove(
4831 vm_object_t object)
4832{
0b4e3aa0 4833 memory_object_t pager;
1c79356b 4834
0b4e3aa0 4835 if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
1c79356b
A
4836 vm_object_hash_entry_t entry;
4837
0b4e3aa0 4838 entry = vm_object_hash_lookup(pager, FALSE);
1c79356b
A
4839 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
4840 entry->object = VM_OBJECT_NULL;
4841 }
4842
1c79356b
A
4843}
4844
4845/*
4846 * Global variables for vm_object_collapse():
4847 *
4848 * Counts for normal collapses and bypasses.
4849 * Debugging variables, to watch or disable collapse.
4850 */
0b4e3aa0
A
4851static long object_collapses = 0;
4852static long object_bypasses = 0;
1c79356b 4853
0b4e3aa0
A
4854static boolean_t vm_object_collapse_allowed = TRUE;
4855static boolean_t vm_object_bypass_allowed = TRUE;
4856
91447636
A
4857unsigned long vm_object_collapse_encrypted = 0;
4858
fe8ab488
A
4859void vm_object_do_collapse_compressor(vm_object_t object,
4860 vm_object_t backing_object);
4861void
4862vm_object_do_collapse_compressor(
4863 vm_object_t object,
4864 vm_object_t backing_object)
4865{
4866 vm_object_offset_t new_offset, backing_offset;
4867 vm_object_size_t size;
4868
4869 vm_counters.do_collapse_compressor++;
4870
4871 vm_object_lock_assert_exclusive(object);
4872 vm_object_lock_assert_exclusive(backing_object);
4873
4874 size = object->vo_size;
4875
4876 /*
4877 * Move all compressed pages from backing_object
4878 * to the parent.
4879 */
4880
4881 for (backing_offset = object->vo_shadow_offset;
4882 backing_offset < object->vo_shadow_offset + object->vo_size;
4883 backing_offset += PAGE_SIZE) {
4884 memory_object_offset_t backing_pager_offset;
4885
4886 /* find the next compressed page at or after this offset */
4887 backing_pager_offset = (backing_offset +
4888 backing_object->paging_offset);
4889 backing_pager_offset = vm_compressor_pager_next_compressed(
4890 backing_object->pager,
4891 backing_pager_offset);
4892 if (backing_pager_offset == (memory_object_offset_t) -1) {
4893 /* no more compressed pages */
4894 break;
4895 }
4896 backing_offset = (backing_pager_offset -
4897 backing_object->paging_offset);
4898
4899 new_offset = backing_offset - object->vo_shadow_offset;
4900
4901 if (new_offset >= object->vo_size) {
4902 /* we're out of the scope of "object": done */
4903 break;
4904 }
4905
4906 if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
4907 (vm_compressor_pager_state_get(object->pager,
4908 (new_offset +
4909 object->paging_offset)) ==
4910 VM_EXTERNAL_STATE_EXISTS)) {
4911 /*
4912 * This page already exists in object, resident or
4913 * compressed.
4914 * We don't need this compressed page in backing_object
4915 * and it will be reclaimed when we release
4916 * backing_object.
4917 */
4918 continue;
4919 }
4920
4921 /*
4922 * backing_object has this page in the VM compressor and
4923 * we need to transfer it to object.
4924 */
4925 vm_counters.do_collapse_compressor_pages++;
4926 vm_compressor_pager_transfer(
4927 /* destination: */
4928 object->pager,
4929 (new_offset + object->paging_offset),
4930 /* source: */
4931 backing_object->pager,
4932 (backing_offset + backing_object->paging_offset));
4933 }
4934}
4935
1c79356b 4936/*
0b4e3aa0
A
4937 * Routine: vm_object_do_collapse
4938 * Purpose:
4939 * Collapse an object with the object backing it.
4940 * Pages in the backing object are moved into the
4941 * parent, and the backing object is deallocated.
4942 * Conditions:
4943 * Both objects and the cache are locked; the page
4944 * queues are unlocked.
1c79356b
A
4945 *
4946 */
0b4e3aa0 4947static void
1c79356b
A
4948vm_object_do_collapse(
4949 vm_object_t object,
4950 vm_object_t backing_object)
4951{
4952 vm_page_t p, pp;
4953 vm_object_offset_t new_offset, backing_offset;
4954 vm_object_size_t size;
4955
b0d623f7
A
4956 vm_object_lock_assert_exclusive(object);
4957 vm_object_lock_assert_exclusive(backing_object);
4958
fe8ab488
A
4959 assert(object->purgable == VM_PURGABLE_DENY);
4960 assert(backing_object->purgable == VM_PURGABLE_DENY);
4961
6d2010ae
A
4962 backing_offset = object->vo_shadow_offset;
4963 size = object->vo_size;
1c79356b 4964
1c79356b
A
4965 /*
4966 * Move all in-memory pages from backing_object
4967 * to the parent. Pages that have been paged out
4968 * will be overwritten by any of the parent's
4969 * pages that shadow them.
4970 */
4971
39037602 4972 while (!vm_page_queue_empty(&backing_object->memq)) {
1c79356b 4973
39037602 4974 p = (vm_page_t) vm_page_queue_first(&backing_object->memq);
1c79356b
A
4975
4976 new_offset = (p->offset - backing_offset);
4977
4978 assert(!p->busy || p->absent);
91447636 4979
1c79356b
A
4980 /*
4981 * If the parent has a page here, or if
4982 * this page falls outside the parent,
4983 * dispose of it.
4984 *
4985 * Otherwise, move it as planned.
4986 */
4987
4988 if (p->offset < backing_offset || new_offset >= size) {
4989 VM_PAGE_FREE(p);
4990 } else {
91447636
A
4991 /*
4992 * ENCRYPTED SWAP:
4993 * The encryption key includes the "pager" and the
2d21ac55
A
4994 * "paging_offset". These will not change during the
4995 * object collapse, so we can just move an encrypted
4996 * page from one object to the other in this case.
4997 * We can't decrypt the page here, since we can't drop
91447636 4998 * the object lock.
91447636 4999 */
2d21ac55
A
5000 if (p->encrypted) {
5001 vm_object_collapse_encrypted++;
5002 }
1c79356b
A
5003 pp = vm_page_lookup(object, new_offset);
5004 if (pp == VM_PAGE_NULL) {
5005
fe8ab488
A
5006 if (VM_COMPRESSOR_PAGER_STATE_GET(object,
5007 new_offset)
5008 == VM_EXTERNAL_STATE_EXISTS) {
5009 /*
5010 * Parent object has this page
5011 * in the VM compressor.
5012 * Throw away the backing
5013 * object's page.
5014 */
5015 VM_PAGE_FREE(p);
5016 } else {
5017 /*
5018 * Parent now has no page.
5019 * Move the backing object's page
5020 * up.
5021 */
5022 vm_page_rename(p, object, new_offset,
5023 TRUE);
5024 }
1c79356b
A
5025 } else {
5026 assert(! pp->absent);
5027
5028 /*
5029 * Parent object has a real page.
5030 * Throw away the backing object's
5031 * page.
5032 */
5033 VM_PAGE_FREE(p);
5034 }
5035 }
5036 }
1c79356b 5037
fe8ab488
A
5038 if (vm_object_collapse_compressor_allowed &&
5039 object->pager != MEMORY_OBJECT_NULL &&
5040 backing_object->pager != MEMORY_OBJECT_NULL) {
5041
5042 /* move compressed pages from backing_object to object */
5043 vm_object_do_collapse_compressor(object, backing_object);
5044
5045 } else if (backing_object->pager != MEMORY_OBJECT_NULL) {
1c79356b
A
5046 vm_object_hash_entry_t entry;
5047
fe8ab488
A
5048 assert((!object->pager_created &&
5049 (object->pager == MEMORY_OBJECT_NULL)) ||
5050 (!backing_object->pager_created &&
5051 (backing_object->pager == MEMORY_OBJECT_NULL)));
1c79356b
A
5052 /*
5053 * Move the pager from backing_object to object.
5054 *
5055 * XXX We're only using part of the paging space
5056 * for keeps now... we ought to discard the
5057 * unused portion.
5058 */
5059
55e303ae 5060 assert(!object->paging_in_progress);
b0d623f7 5061 assert(!object->activity_in_progress);
fe8ab488
A
5062 assert(!object->pager_created);
5063 assert(object->pager == NULL);
1c79356b 5064 object->pager = backing_object->pager;
b0d623f7
A
5065
5066 if (backing_object->hashed) {
5067 lck_mtx_t *lck;
5068
5069 lck = vm_object_hash_lock_spin(backing_object->pager);
5070 entry = vm_object_hash_lookup(object->pager, FALSE);
5071 assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
5072 entry->object = object;
5073 vm_object_hash_unlock(lck);
5074
5075 object->hashed = TRUE;
5076 }
1c79356b 5077 object->pager_created = backing_object->pager_created;
91447636 5078 object->pager_control = backing_object->pager_control;
1c79356b
A
5079 object->pager_ready = backing_object->pager_ready;
5080 object->pager_initialized = backing_object->pager_initialized;
1c79356b
A
5081 object->paging_offset =
5082 backing_object->paging_offset + backing_offset;
91447636
A
5083 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
5084 memory_object_control_collapse(object->pager_control,
0b4e3aa0 5085 object);
1c79356b 5086 }
fe8ab488
A
5087 /* the backing_object has lost its pager: reset all fields */
5088 backing_object->pager_created = FALSE;
5089 backing_object->pager_control = NULL;
5090 backing_object->pager_ready = FALSE;
5091 backing_object->paging_offset = 0;
5092 backing_object->pager = NULL;
1c79356b 5093 }
1c79356b
A
5094 /*
5095 * Object now shadows whatever backing_object did.
5096 * Note that the reference to backing_object->shadow
5097 * moves from within backing_object to within object.
5098 */
5099
91447636
A
5100 assert(!object->phys_contiguous);
5101 assert(!backing_object->phys_contiguous);
1c79356b 5102 object->shadow = backing_object->shadow;
91447636 5103 if (object->shadow) {
6d2010ae 5104 object->vo_shadow_offset += backing_object->vo_shadow_offset;
fe8ab488
A
5105 /* "backing_object" gave its shadow to "object" */
5106 backing_object->shadow = VM_OBJECT_NULL;
5107 backing_object->vo_shadow_offset = 0;
91447636
A
5108 } else {
5109 /* no shadow, therefore no shadow offset... */
6d2010ae 5110 object->vo_shadow_offset = 0;
91447636 5111 }
1c79356b 5112 assert((object->shadow == VM_OBJECT_NULL) ||
55e303ae 5113 (object->shadow->copy != backing_object));
1c79356b
A
5114
5115 /*
5116 * Discard backing_object.
5117 *
5118 * Since the backing object has no pages, no
5119 * pager left, and no object references within it,
5120 * all that is necessary is to dispose of it.
5121 */
fe8ab488 5122 object_collapses++;
1c79356b 5123
fe8ab488
A
5124 assert(backing_object->ref_count == 1);
5125 assert(backing_object->resident_page_count == 0);
5126 assert(backing_object->paging_in_progress == 0);
5127 assert(backing_object->activity_in_progress == 0);
5128 assert(backing_object->shadow == VM_OBJECT_NULL);
5129 assert(backing_object->vo_shadow_offset == 0);
5130
5131 if (backing_object->pager != MEMORY_OBJECT_NULL) {
5132 /* ... unless it has a pager; need to terminate pager too */
5133 vm_counters.do_collapse_terminate++;
5134 if (vm_object_terminate(backing_object) != KERN_SUCCESS) {
5135 vm_counters.do_collapse_terminate_failure++;
5136 }
5137 return;
5138 }
5139
5140 assert(backing_object->pager == NULL);
1c79356b 5141
1c79356b
A
5142 backing_object->alive = FALSE;
5143 vm_object_unlock(backing_object);
5144
5145 XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
b0d623f7 5146 backing_object, 0,0,0,0);
1c79356b 5147
fe8ab488
A
5148#if VM_OBJECT_TRACKING
5149 if (vm_object_tracking_inited) {
5150 btlog_remove_entries_for_element(vm_object_tracking_btlog,
5151 backing_object);
5152 }
5153#endif /* VM_OBJECT_TRACKING */
5154
2d21ac55
A
5155 vm_object_lock_destroy(backing_object);
5156
91447636 5157 zfree(vm_object_zone, backing_object);
1c79356b 5158
1c79356b
A
5159}
5160
0b4e3aa0 5161static void
1c79356b
A
5162vm_object_do_bypass(
5163 vm_object_t object,
5164 vm_object_t backing_object)
5165{
5166 /*
5167 * Make the parent shadow the next object
5168 * in the chain.
5169 */
5170
b0d623f7 5171 vm_object_lock_assert_exclusive(object);
2d21ac55
A
5172 vm_object_lock_assert_exclusive(backing_object);
5173
1c79356b
A
5174#if TASK_SWAPPER
5175 /*
5176 * Do object reference in-line to
5177 * conditionally increment shadow's
5178 * residence count. If object is not
5179 * resident, leave residence count
5180 * on shadow alone.
5181 */
5182 if (backing_object->shadow != VM_OBJECT_NULL) {
5183 vm_object_lock(backing_object->shadow);
2d21ac55 5184 vm_object_lock_assert_exclusive(backing_object->shadow);
1c79356b
A
5185 backing_object->shadow->ref_count++;
5186 if (object->res_count != 0)
5187 vm_object_res_reference(backing_object->shadow);
5188 vm_object_unlock(backing_object->shadow);
5189 }
5190#else /* TASK_SWAPPER */
5191 vm_object_reference(backing_object->shadow);
5192#endif /* TASK_SWAPPER */
5193
91447636
A
5194 assert(!object->phys_contiguous);
5195 assert(!backing_object->phys_contiguous);
1c79356b 5196 object->shadow = backing_object->shadow;
91447636 5197 if (object->shadow) {
6d2010ae 5198 object->vo_shadow_offset += backing_object->vo_shadow_offset;
91447636
A
5199 } else {
5200 /* no shadow, therefore no shadow offset... */
6d2010ae 5201 object->vo_shadow_offset = 0;
91447636 5202 }
1c79356b
A
5203
5204 /*
5205 * Backing object might have had a copy pointer
5206 * to us. If it did, clear it.
5207 */
5208 if (backing_object->copy == object) {
5209 backing_object->copy = VM_OBJECT_NULL;
5210 }
5211
5212 /*
5213 * Drop the reference count on backing_object.
5214#if TASK_SWAPPER
5215 * Since its ref_count was at least 2, it
5216 * will not vanish; so we don't need to call
5217 * vm_object_deallocate.
593a1d5f 5218 * [with a caveat for "named" objects]
1c79356b
A
5219 *
5220 * The res_count on the backing object is
5221 * conditionally decremented. It's possible
5222 * (via vm_pageout_scan) to get here with
5223 * a "swapped" object, which has a 0 res_count,
5224 * in which case, the backing object res_count
5225 * is already down by one.
5226#else
5227 * Don't call vm_object_deallocate unless
5228 * ref_count drops to zero.
5229 *
5230 * The ref_count can drop to zero here if the
5231 * backing object could be bypassed but not
5232 * collapsed, such as when the backing object
5233 * is temporary and cachable.
5234#endif
5235 */
593a1d5f
A
5236 if (backing_object->ref_count > 2 ||
5237 (!backing_object->named && backing_object->ref_count > 1)) {
2d21ac55 5238 vm_object_lock_assert_exclusive(backing_object);
1c79356b
A
5239 backing_object->ref_count--;
5240#if TASK_SWAPPER
5241 if (object->res_count != 0)
5242 vm_object_res_deallocate(backing_object);
5243 assert(backing_object->ref_count > 0);
5244#endif /* TASK_SWAPPER */
5245 vm_object_unlock(backing_object);
5246 } else {
5247
5248 /*
5249 * Drop locks so that we can deallocate
5250 * the backing object.
5251 */
5252
5253#if TASK_SWAPPER
5254 if (object->res_count == 0) {
5255 /* XXX get a reference for the deallocate below */
5256 vm_object_res_reference(backing_object);
5257 }
5258#endif /* TASK_SWAPPER */
316670eb
A
5259 /*
5260 * vm_object_collapse (the caller of this function) is
5261 * now called from contexts that may not guarantee that a
5262 * valid reference is held on the object... w/o a valid
5263 * reference, it is unsafe and unwise (you will definitely
5264 * regret it) to unlock the object and then retake the lock
5265 * since the object may be terminated and recycled in between.
5266 * The "activity_in_progress" reference will keep the object
5267 * 'stable'.
5268 */
5269 vm_object_activity_begin(object);
1c79356b 5270 vm_object_unlock(object);
316670eb 5271
1c79356b
A
5272 vm_object_unlock(backing_object);
5273 vm_object_deallocate(backing_object);
5274
5275 /*
5276 * Relock object. We don't have to reverify
5277 * its state since vm_object_collapse will
5278 * do that for us as it starts at the
5279 * top of its loop.
5280 */
5281
5282 vm_object_lock(object);
316670eb 5283 vm_object_activity_end(object);
1c79356b
A
5284 }
5285
5286 object_bypasses++;
5287}
0b4e3aa0 5288
1c79356b
A
5289
5290/*
5291 * vm_object_collapse:
5292 *
5293 * Perform an object collapse or an object bypass if appropriate.
5294 * The real work of collapsing and bypassing is performed in
5295 * the routines vm_object_do_collapse and vm_object_do_bypass.
5296 *
5297 * Requires that the object be locked and the page queues be unlocked.
5298 *
5299 */
91447636
A
5300static unsigned long vm_object_collapse_calls = 0;
5301static unsigned long vm_object_collapse_objects = 0;
5302static unsigned long vm_object_collapse_do_collapse = 0;
5303static unsigned long vm_object_collapse_do_bypass = 0;
99c3a104 5304
0b4e3aa0 5305__private_extern__ void
1c79356b 5306vm_object_collapse(
39037602
A
5307 vm_object_t object,
5308 vm_object_offset_t hint_offset,
0c530ab8 5309 boolean_t can_bypass)
1c79356b 5310{
39037602
A
5311 vm_object_t backing_object;
5312 unsigned int rcount;
5313 unsigned int size;
91447636 5314 vm_object_t original_object;
b0d623f7
A
5315 int object_lock_type;
5316 int backing_object_lock_type;
91447636
A
5317
5318 vm_object_collapse_calls++;
0b4e3aa0 5319
0c530ab8
A
5320 if (! vm_object_collapse_allowed &&
5321 ! (can_bypass && vm_object_bypass_allowed)) {
1c79356b
A
5322 return;
5323 }
5324
5325 XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
b0d623f7 5326 object, 0,0,0,0);
1c79356b 5327
91447636
A
5328 if (object == VM_OBJECT_NULL)
5329 return;
5330
5331 original_object = object;
5332
b0d623f7
A
5333 /*
5334 * The top object was locked "exclusive" by the caller.
5335 * In the first pass, to determine if we can collapse the shadow chain,
5336 * take a "shared" lock on the shadow objects. If we can collapse,
5337 * we'll have to go down the chain again with exclusive locks.
5338 */
5339 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5340 backing_object_lock_type = OBJECT_LOCK_SHARED;
5341
5342retry:
5343 object = original_object;
5344 vm_object_lock_assert_exclusive(object);
5345
1c79356b 5346 while (TRUE) {
91447636 5347 vm_object_collapse_objects++;
1c79356b
A
5348 /*
5349 * Verify that the conditions are right for either
5350 * collapse or bypass:
1c79356b 5351 */
1c79356b
A
5352
5353 /*
5354 * There is a backing object, and
5355 */
5356
91447636
A
5357 backing_object = object->shadow;
5358 if (backing_object == VM_OBJECT_NULL) {
5359 if (object != original_object) {
5360 vm_object_unlock(object);
5361 }
1c79356b 5362 return;
91447636 5363 }
b0d623f7
A
5364 if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
5365 vm_object_lock_shared(backing_object);
5366 } else {
5367 vm_object_lock(backing_object);
5368 }
5369
91447636
A
5370 /*
5371 * No pages in the object are currently
5372 * being paged out, and
5373 */
b0d623f7
A
5374 if (object->paging_in_progress != 0 ||
5375 object->activity_in_progress != 0) {
91447636 5376 /* try and collapse the rest of the shadow chain */
91447636
A
5377 if (object != original_object) {
5378 vm_object_unlock(object);
5379 }
5380 object = backing_object;
b0d623f7 5381 object_lock_type = backing_object_lock_type;
91447636
A
5382 continue;
5383 }
5384
1c79356b
A
5385 /*
5386 * ...
5387 * The backing object is not read_only,
5388 * and no pages in the backing object are
5389 * currently being paged out.
5390 * The backing object is internal.
5391 *
5392 */
5393
5394 if (!backing_object->internal ||
b0d623f7
A
5395 backing_object->paging_in_progress != 0 ||
5396 backing_object->activity_in_progress != 0) {
91447636
A
5397 /* try and collapse the rest of the shadow chain */
5398 if (object != original_object) {
5399 vm_object_unlock(object);
5400 }
5401 object = backing_object;
b0d623f7 5402 object_lock_type = backing_object_lock_type;
91447636 5403 continue;
1c79356b 5404 }
fe8ab488
A
5405
5406 /*
5407 * Purgeable objects are not supposed to engage in
5408 * copy-on-write activities, so should not have
5409 * any shadow objects or be a shadow object to another
5410 * object.
5411 * Collapsing a purgeable object would require some
5412 * updates to the purgeable compressed ledgers.
5413 */
5414 if (object->purgable != VM_PURGABLE_DENY ||
5415 backing_object->purgable != VM_PURGABLE_DENY) {
5416 panic("vm_object_collapse() attempting to collapse "
5417 "purgeable object: %p(%d) %p(%d)\n",
5418 object, object->purgable,
5419 backing_object, backing_object->purgable);
5420 /* try and collapse the rest of the shadow chain */
5421 if (object != original_object) {
5422 vm_object_unlock(object);
5423 }
5424 object = backing_object;
5425 object_lock_type = backing_object_lock_type;
5426 continue;
5427 }
1c79356b
A
5428
5429 /*
5430 * The backing object can't be a copy-object:
5431 * the shadow_offset for the copy-object must stay
5432 * as 0. Furthermore (for the 'we have all the
5433 * pages' case), if we bypass backing_object and
5434 * just shadow the next object in the chain, old
5435 * pages from that object would then have to be copied
5436 * BOTH into the (former) backing_object and into the
5437 * parent object.
5438 */
5439 if (backing_object->shadow != VM_OBJECT_NULL &&
55e303ae 5440 backing_object->shadow->copy == backing_object) {
91447636
A
5441 /* try and collapse the rest of the shadow chain */
5442 if (object != original_object) {
5443 vm_object_unlock(object);
5444 }
5445 object = backing_object;
b0d623f7 5446 object_lock_type = backing_object_lock_type;
91447636 5447 continue;
1c79356b
A
5448 }
5449
5450 /*
5451 * We can now try to either collapse the backing
5452 * object (if the parent is the only reference to
5453 * it) or (perhaps) remove the parent's reference
5454 * to it.
1c79356b 5455 *
0b4e3aa0
A
5456 * If there is exactly one reference to the backing
5457 * object, we may be able to collapse it into the
5458 * parent.
1c79356b 5459 *
55e303ae
A
5460 * As long as one of the objects is still not known
5461 * to the pager, we can collapse them.
1c79356b 5462 */
1c79356b 5463 if (backing_object->ref_count == 1 &&
fe8ab488
A
5464 (vm_object_collapse_compressor_allowed ||
5465 !object->pager_created
39236c6e 5466 || (!backing_object->pager_created)
55e303ae 5467 ) && vm_object_collapse_allowed) {
1c79356b 5468
1c79356b 5469 /*
b0d623f7 5470 * We need the exclusive lock on the VM objects.
1c79356b 5471 */
b0d623f7
A
5472 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5473 /*
5474 * We have an object and its shadow locked
5475 * "shared". We can't just upgrade the locks
5476 * to "exclusive", as some other thread might
5477 * also have these objects locked "shared" and
5478 * attempt to upgrade one or the other to
5479 * "exclusive". The upgrades would block
5480 * forever waiting for the other "shared" locks
5481 * to get released.
5482 * So we have to release the locks and go
5483 * down the shadow chain again (since it could
5484 * have changed) with "exclusive" locking.
5485 */
1c79356b 5486 vm_object_unlock(backing_object);
b0d623f7
A
5487 if (object != original_object)
5488 vm_object_unlock(object);
5489 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5490 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5491 goto retry;
1c79356b
A
5492 }
5493
b0d623f7
A
5494 XPR(XPR_VM_OBJECT,
5495 "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
5496 backing_object, object,
5497 backing_object->pager,
5498 backing_object->pager_control, 0);
5499
1c79356b
A
5500 /*
5501 * Collapse the object with its backing
5502 * object, and try again with the object's
5503 * new backing object.
5504 */
5505
5506 vm_object_do_collapse(object, backing_object);
91447636 5507 vm_object_collapse_do_collapse++;
1c79356b
A
5508 continue;
5509 }
5510
1c79356b
A
5511 /*
5512 * Collapsing the backing object was not possible
5513 * or permitted, so let's try bypassing it.
5514 */
5515
0c530ab8 5516 if (! (can_bypass && vm_object_bypass_allowed)) {
91447636
A
5517 /* try and collapse the rest of the shadow chain */
5518 if (object != original_object) {
5519 vm_object_unlock(object);
5520 }
5521 object = backing_object;
b0d623f7 5522 object_lock_type = backing_object_lock_type;
91447636 5523 continue;
1c79356b
A
5524 }
5525
0b4e3aa0 5526
1c79356b 5527 /*
55e303ae
A
5528 * If the object doesn't have all its pages present,
5529 * we have to make sure no pages in the backing object
5530 * "show through" before bypassing it.
1c79356b 5531 */
39236c6e 5532 size = (unsigned int)atop(object->vo_size);
55e303ae 5533 rcount = object->resident_page_count;
99c3a104 5534
55e303ae 5535 if (rcount != size) {
55e303ae
A
5536 vm_object_offset_t offset;
5537 vm_object_offset_t backing_offset;
5538 unsigned int backing_rcount;
55e303ae
A
5539
5540 /*
5541 * If the backing object has a pager but no pagemap,
5542 * then we cannot bypass it, because we don't know
5543 * what pages it has.
5544 */
39037602 5545 if (backing_object->pager_created) {
91447636
A
5546 /* try and collapse the rest of the shadow chain */
5547 if (object != original_object) {
5548 vm_object_unlock(object);
5549 }
5550 object = backing_object;
b0d623f7 5551 object_lock_type = backing_object_lock_type;
91447636 5552 continue;
55e303ae 5553 }
1c79356b 5554
55e303ae
A
5555 /*
5556 * If the object has a pager but no pagemap,
5557 * then we cannot bypass it, because we don't know
5558 * what pages it has.
5559 */
39037602 5560 if (object->pager_created) {
91447636
A
5561 /* try and collapse the rest of the shadow chain */
5562 if (object != original_object) {
5563 vm_object_unlock(object);
5564 }
5565 object = backing_object;
b0d623f7 5566 object_lock_type = backing_object_lock_type;
91447636 5567 continue;
55e303ae 5568 }
0b4e3aa0 5569
99c3a104
A
5570 backing_offset = object->vo_shadow_offset;
5571 backing_rcount = backing_object->resident_page_count;
5572
5573 if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
39236c6e 5574 /*
99c3a104
A
5575 * we have enough pages in the backing object to guarantee that
5576 * at least 1 of them must be 'uncovered' by a resident page
5577 * in the object we're evaluating, so move on and
5578 * try to collapse the rest of the shadow chain
5579 */
39236c6e
A
5580 if (object != original_object) {
5581 vm_object_unlock(object);
5582 }
5583 object = backing_object;
5584 object_lock_type = backing_object_lock_type;
5585 continue;
99c3a104
A
5586 }
5587
55e303ae
A
5588 /*
5589 * If all of the pages in the backing object are
5590 * shadowed by the parent object, the parent
5591 * object no longer has to shadow the backing
5592 * object; it can shadow the next one in the
5593 * chain.
5594 *
5595 * If the backing object has existence info,
5596 * we must check examine its existence info
5597 * as well.
5598 *
5599 */
1c79356b 5600
39236c6e
A
5601#define EXISTS_IN_OBJECT(obj, off, rc) \
5602 ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
5603 == VM_EXTERNAL_STATE_EXISTS) || \
99c3a104 5604 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
55e303ae
A
5605
5606 /*
5607 * Check the hint location first
5608 * (since it is often the quickest way out of here).
5609 */
5610 if (object->cow_hint != ~(vm_offset_t)0)
5611 hint_offset = (vm_object_offset_t)object->cow_hint;
5612 else
5613 hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
5614 (hint_offset - 8 * PAGE_SIZE_64) : 0;
5615
5616 if (EXISTS_IN_OBJECT(backing_object, hint_offset +
5617 backing_offset, backing_rcount) &&
5618 !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
5619 /* dependency right at the hint */
b0d623f7 5620 object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
91447636
A
5621 /* try and collapse the rest of the shadow chain */
5622 if (object != original_object) {
5623 vm_object_unlock(object);
5624 }
5625 object = backing_object;
b0d623f7 5626 object_lock_type = backing_object_lock_type;
91447636 5627 continue;
0b4e3aa0 5628 }
55e303ae
A
5629
5630 /*
5631 * If the object's window onto the backing_object
5632 * is large compared to the number of resident
5633 * pages in the backing object, it makes sense to
5634 * walk the backing_object's resident pages first.
5635 *
99c3a104
A
5636 * NOTE: Pages may be in both the existence map and/or
5637 * resident, so if we don't find a dependency while
5638 * walking the backing object's resident page list
5639 * directly, and there is an existence map, we'll have
5640 * to run the offset based 2nd pass. Because we may
5641 * have to run both passes, we need to be careful
5642 * not to decrement 'rcount' in the 1st pass
55e303ae 5643 */
99c3a104 5644 if (backing_rcount && backing_rcount < (size / 8)) {
55e303ae
A
5645 unsigned int rc = rcount;
5646 vm_page_t p;
5647
5648 backing_rcount = backing_object->resident_page_count;
39037602 5649 p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
55e303ae 5650 do {
55e303ae 5651 offset = (p->offset - backing_offset);
99c3a104 5652
6d2010ae 5653 if (offset < object->vo_size &&
55e303ae
A
5654 offset != hint_offset &&
5655 !EXISTS_IN_OBJECT(object, offset, rc)) {
5656 /* found a dependency */
b0d623f7
A
5657 object->cow_hint = (vm_offset_t) offset; /* atomic */
5658
91447636 5659 break;
55e303ae 5660 }
39037602 5661 p = (vm_page_t) vm_page_queue_next(&p->listq);
55e303ae
A
5662
5663 } while (--backing_rcount);
91447636
A
5664 if (backing_rcount != 0 ) {
5665 /* try and collapse the rest of the shadow chain */
5666 if (object != original_object) {
5667 vm_object_unlock(object);
5668 }
5669 object = backing_object;
b0d623f7 5670 object_lock_type = backing_object_lock_type;
91447636
A
5671 continue;
5672 }
0b4e3aa0 5673 }
55e303ae
A
5674
5675 /*
5676 * Walk through the offsets looking for pages in the
5677 * backing object that show through to the object.
5678 */
39037602 5679 if (backing_rcount) {
55e303ae
A
5680 offset = hint_offset;
5681
5682 while((offset =
6d2010ae 5683 (offset + PAGE_SIZE_64 < object->vo_size) ?
55e303ae
A
5684 (offset + PAGE_SIZE_64) : 0) != hint_offset) {
5685
55e303ae
A
5686 if (EXISTS_IN_OBJECT(backing_object, offset +
5687 backing_offset, backing_rcount) &&
5688 !EXISTS_IN_OBJECT(object, offset, rcount)) {
5689 /* found a dependency */
b0d623f7 5690 object->cow_hint = (vm_offset_t) offset; /* atomic */
91447636 5691 break;
55e303ae
A
5692 }
5693 }
91447636
A
5694 if (offset != hint_offset) {
5695 /* try and collapse the rest of the shadow chain */
5696 if (object != original_object) {
5697 vm_object_unlock(object);
5698 }
5699 object = backing_object;
b0d623f7 5700 object_lock_type = backing_object_lock_type;
91447636
A
5701 continue;
5702 }
0b4e3aa0
A
5703 }
5704 }
1c79356b 5705
b0d623f7
A
5706 /*
5707 * We need "exclusive" locks on the 2 VM objects.
5708 */
5709 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5710 vm_object_unlock(backing_object);
5711 if (object != original_object)
5712 vm_object_unlock(object);
5713 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5714 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5715 goto retry;
5716 }
5717
55e303ae
A
5718 /* reset the offset hint for any objects deeper in the chain */
5719 object->cow_hint = (vm_offset_t)0;
1c79356b
A
5720
5721 /*
5722 * All interesting pages in the backing object
5723 * already live in the parent or its pager.
5724 * Thus we can bypass the backing object.
5725 */
5726
5727 vm_object_do_bypass(object, backing_object);
91447636 5728 vm_object_collapse_do_bypass++;
1c79356b
A
5729
5730 /*
5731 * Try again with this object's new backing object.
5732 */
5733
5734 continue;
5735 }
91447636 5736
fe8ab488
A
5737 /* NOT REACHED */
5738 /*
91447636
A
5739 if (object != original_object) {
5740 vm_object_unlock(object);
5741 }
fe8ab488 5742 */
1c79356b
A
5743}
5744
5745/*
5746 * Routine: vm_object_page_remove: [internal]
5747 * Purpose:
5748 * Removes all physical pages in the specified
5749 * object range from the object's list of pages.
5750 *
5751 * In/out conditions:
5752 * The object must be locked.
5753 * The object must not have paging_in_progress, usually
5754 * guaranteed by not having a pager.
5755 */
5756unsigned int vm_object_page_remove_lookup = 0;
5757unsigned int vm_object_page_remove_iterate = 0;
5758
0b4e3aa0 5759__private_extern__ void
1c79356b 5760vm_object_page_remove(
39037602
A
5761 vm_object_t object,
5762 vm_object_offset_t start,
5763 vm_object_offset_t end)
1c79356b 5764{
39037602 5765 vm_page_t p, next;
1c79356b
A
5766
5767 /*
5768 * One and two page removals are most popular.
5769 * The factor of 16 here is somewhat arbitrary.
5770 * It balances vm_object_lookup vs iteration.
5771 */
5772
55e303ae 5773 if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
1c79356b
A
5774 vm_object_page_remove_lookup++;
5775
5776 for (; start < end; start += PAGE_SIZE_64) {
5777 p = vm_page_lookup(object, start);
5778 if (p != VM_PAGE_NULL) {
39037602 5779 assert(!p->cleaning && !p->laundry);
2d21ac55 5780 if (!p->fictitious && p->pmapped)
39037602 5781 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
1c79356b
A
5782 VM_PAGE_FREE(p);
5783 }
5784 }
5785 } else {
5786 vm_object_page_remove_iterate++;
5787
39037602
A
5788 p = (vm_page_t) vm_page_queue_first(&object->memq);
5789 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
5790 next = (vm_page_t) vm_page_queue_next(&p->listq);
1c79356b 5791 if ((start <= p->offset) && (p->offset < end)) {
39037602 5792 assert(!p->cleaning && !p->laundry);
2d21ac55 5793 if (!p->fictitious && p->pmapped)
39037602 5794 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
1c79356b
A
5795 VM_PAGE_FREE(p);
5796 }
5797 p = next;
5798 }
5799 }
5800}
5801
0b4e3aa0 5802
1c79356b
A
5803/*
5804 * Routine: vm_object_coalesce
5805 * Function: Coalesces two objects backing up adjoining
5806 * regions of memory into a single object.
5807 *
5808 * returns TRUE if objects were combined.
5809 *
5810 * NOTE: Only works at the moment if the second object is NULL -
5811 * if it's not, which object do we lock first?
5812 *
5813 * Parameters:
5814 * prev_object First object to coalesce
5815 * prev_offset Offset into prev_object
5816 * next_object Second object into coalesce
5817 * next_offset Offset into next_object
5818 *
5819 * prev_size Size of reference to prev_object
5820 * next_size Size of reference to next_object
5821 *
5822 * Conditions:
5823 * The object(s) must *not* be locked. The map must be locked
5824 * to preserve the reference to the object(s).
5825 */
0b4e3aa0 5826static int vm_object_coalesce_count = 0;
1c79356b 5827
0b4e3aa0 5828__private_extern__ boolean_t
1c79356b 5829vm_object_coalesce(
39037602 5830 vm_object_t prev_object,
1c79356b
A
5831 vm_object_t next_object,
5832 vm_object_offset_t prev_offset,
91447636 5833 __unused vm_object_offset_t next_offset,
1c79356b
A
5834 vm_object_size_t prev_size,
5835 vm_object_size_t next_size)
5836{
5837 vm_object_size_t newsize;
5838
5839#ifdef lint
5840 next_offset++;
5841#endif /* lint */
5842
5843 if (next_object != VM_OBJECT_NULL) {
5844 return(FALSE);
5845 }
5846
5847 if (prev_object == VM_OBJECT_NULL) {
5848 return(TRUE);
5849 }
5850
5851 XPR(XPR_VM_OBJECT,
5852 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
b0d623f7 5853 prev_object, prev_offset, prev_size, next_size, 0);
1c79356b
A
5854
5855 vm_object_lock(prev_object);
5856
5857 /*
5858 * Try to collapse the object first
5859 */
0c530ab8 5860 vm_object_collapse(prev_object, prev_offset, TRUE);
1c79356b
A
5861
5862 /*
5863 * Can't coalesce if pages not mapped to
5864 * prev_entry may be in use any way:
5865 * . more than one reference
5866 * . paged out
5867 * . shadows another object
5868 * . has a copy elsewhere
2d21ac55 5869 * . is purgeable
1c79356b
A
5870 * . paging references (pages might be in page-list)
5871 */
5872
5873 if ((prev_object->ref_count > 1) ||
5874 prev_object->pager_created ||
5875 (prev_object->shadow != VM_OBJECT_NULL) ||
5876 (prev_object->copy != VM_OBJECT_NULL) ||
5877 (prev_object->true_share != FALSE) ||
2d21ac55 5878 (prev_object->purgable != VM_PURGABLE_DENY) ||
b0d623f7
A
5879 (prev_object->paging_in_progress != 0) ||
5880 (prev_object->activity_in_progress != 0)) {
1c79356b
A
5881 vm_object_unlock(prev_object);
5882 return(FALSE);
5883 }
5884
5885 vm_object_coalesce_count++;
5886
5887 /*
5888 * Remove any pages that may still be in the object from
5889 * a previous deallocation.
5890 */
5891 vm_object_page_remove(prev_object,
5892 prev_offset + prev_size,
5893 prev_offset + prev_size + next_size);
5894
5895 /*
5896 * Extend the object if necessary.
5897 */
5898 newsize = prev_offset + prev_size + next_size;
6d2010ae 5899 if (newsize > prev_object->vo_size) {
6d2010ae 5900 prev_object->vo_size = newsize;
1c79356b
A
5901 }
5902
5903 vm_object_unlock(prev_object);
5904 return(TRUE);
5905}
5906
0b4e3aa0
A
5907kern_return_t
5908vm_object_populate_with_private(
55e303ae 5909 vm_object_t object,
0b4e3aa0 5910 vm_object_offset_t offset,
55e303ae
A
5911 ppnum_t phys_page,
5912 vm_size_t size)
0b4e3aa0 5913{
55e303ae 5914 ppnum_t base_page;
0b4e3aa0
A
5915 vm_object_offset_t base_offset;
5916
5917
316670eb 5918 if (!object->private)
0b4e3aa0
A
5919 return KERN_FAILURE;
5920
55e303ae 5921 base_page = phys_page;
0b4e3aa0
A
5922
5923 vm_object_lock(object);
316670eb
A
5924
5925 if (!object->phys_contiguous) {
0b4e3aa0 5926 vm_page_t m;
316670eb
A
5927
5928 if ((base_offset = trunc_page_64(offset)) != offset) {
0b4e3aa0
A
5929 vm_object_unlock(object);
5930 return KERN_FAILURE;
5931 }
5932 base_offset += object->paging_offset;
316670eb
A
5933
5934 while (size) {
0b4e3aa0 5935 m = vm_page_lookup(object, base_offset);
316670eb
A
5936
5937 if (m != VM_PAGE_NULL) {
5938 if (m->fictitious) {
39037602 5939 if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) {
b0d623f7 5940
2d21ac55 5941 vm_page_lockspin_queues();
2d21ac55 5942 m->private = TRUE;
b0d623f7
A
5943 vm_page_unlock_queues();
5944
5945 m->fictitious = FALSE;
39037602 5946 VM_PAGE_SET_PHYS_PAGE(m, base_page);
0b4e3aa0 5947 }
39037602 5948 } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) {
316670eb
A
5949
5950 if ( !m->private) {
5951 /*
5952 * we'd leak a real page... that can't be right
5953 */
5954 panic("vm_object_populate_with_private - %p not private", m);
5955 }
5956 if (m->pmapped) {
2d21ac55
A
5957 /*
5958 * pmap call to clear old mapping
5959 */
39037602 5960 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2d21ac55 5961 }
39037602 5962 VM_PAGE_SET_PHYS_PAGE(m, base_page);
0b4e3aa0 5963 }
316670eb
A
5964 if (m->encrypted) {
5965 /*
5966 * we should never see this on a ficticious or private page
5967 */
5968 panic("vm_object_populate_with_private - %p encrypted", m);
5969 }
91447636 5970
0b4e3aa0 5971 } else {
b0d623f7 5972 while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
0b4e3aa0 5973 vm_page_more_fictitious();
b0d623f7
A
5974
5975 /*
5976 * private normally requires lock_queues but since we
5977 * are initializing the page, its not necessary here
5978 */
0b4e3aa0 5979 m->private = TRUE;
b0d623f7 5980 m->fictitious = FALSE;
39037602 5981 VM_PAGE_SET_PHYS_PAGE(m, base_page);
0b4e3aa0 5982 m->unusual = TRUE;
316670eb 5983 m->busy = FALSE;
b0d623f7 5984
0b4e3aa0
A
5985 vm_page_insert(m, object, base_offset);
5986 }
55e303ae 5987 base_page++; /* Go to the next physical page */
0b4e3aa0
A
5988 base_offset += PAGE_SIZE;
5989 size -= PAGE_SIZE;
5990 }
5991 } else {
5992 /* NOTE: we should check the original settings here */
5993 /* if we have a size > zero a pmap call should be made */
5994 /* to disable the range */
5995
5996 /* pmap_? */
5997
5998 /* shadows on contiguous memory are not allowed */
5999 /* we therefore can use the offset field */
6d2010ae
A
6000 object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
6001 object->vo_size = size;
0b4e3aa0
A
6002 }
6003 vm_object_unlock(object);
316670eb 6004
0b4e3aa0
A
6005 return KERN_SUCCESS;
6006}
6007
1c79356b
A
6008/*
6009 * memory_object_free_from_cache:
6010 *
6011 * Walk the vm_object cache list, removing and freeing vm_objects
0c530ab8 6012 * which are backed by the pager identified by the caller, (pager_ops).
1c79356b
A
6013 * Remove up to "count" objects, if there are that may available
6014 * in the cache.
0b4e3aa0 6015 *
1c79356b
A
6016 * Walk the list at most once, return the number of vm_objects
6017 * actually freed.
1c79356b
A
6018 */
6019
0b4e3aa0 6020__private_extern__ kern_return_t
1c79356b 6021memory_object_free_from_cache(
91447636 6022 __unused host_t host,
b0d623f7 6023 __unused memory_object_pager_ops_t pager_ops,
1c79356b
A
6024 int *count)
6025{
b0d623f7 6026#if VM_OBJECT_CACHE
1c79356b 6027 int object_released = 0;
1c79356b 6028
39037602 6029 vm_object_t object = VM_OBJECT_NULL;
1c79356b
A
6030 vm_object_t shadow;
6031
6032/*
6033 if(host == HOST_NULL)
6034 return(KERN_INVALID_ARGUMENT);
6035*/
6036
6037 try_again:
6038 vm_object_cache_lock();
6039
6040 queue_iterate(&vm_object_cached_list, object,
6041 vm_object_t, cached_list) {
0c530ab8
A
6042 if (object->pager &&
6043 (pager_ops == object->pager->mo_pager_ops)) {
1c79356b
A
6044 vm_object_lock(object);
6045 queue_remove(&vm_object_cached_list, object,
6046 vm_object_t, cached_list);
6047 vm_object_cached_count--;
6048
b0d623f7 6049 vm_object_cache_unlock();
1c79356b
A
6050 /*
6051 * Since this object is in the cache, we know
0b4e3aa0
A
6052 * that it is initialized and has only a pager's
6053 * (implicit) reference. Take a reference to avoid
6054 * recursive deallocations.
1c79356b
A
6055 */
6056
6057 assert(object->pager_initialized);
6058 assert(object->ref_count == 0);
2d21ac55 6059 vm_object_lock_assert_exclusive(object);
1c79356b
A
6060 object->ref_count++;
6061
6062 /*
6063 * Terminate the object.
6064 * If the object had a shadow, we let
6065 * vm_object_deallocate deallocate it.
6066 * "pageout" objects have a shadow, but
6067 * maintain a "paging reference" rather
6068 * than a normal reference.
6069 * (We are careful here to limit recursion.)
6070 */
6071 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
b0d623f7 6072
1c79356b
A
6073 if ((vm_object_terminate(object) == KERN_SUCCESS)
6074 && (shadow != VM_OBJECT_NULL)) {
6075 vm_object_deallocate(shadow);
6076 }
6077
6078 if(object_released++ == *count)
6079 return KERN_SUCCESS;
6080 goto try_again;
6081 }
6082 }
6083 vm_object_cache_unlock();
6084 *count = object_released;
b0d623f7
A
6085#else
6086 *count = 0;
6087#endif
1c79356b
A
6088 return KERN_SUCCESS;
6089}
6090
0b4e3aa0 6091
1c79356b
A
6092
6093kern_return_t
0b4e3aa0
A
6094memory_object_create_named(
6095 memory_object_t pager,
6096 memory_object_offset_t size,
6097 memory_object_control_t *control)
1c79356b 6098{
0b4e3aa0
A
6099 vm_object_t object;
6100 vm_object_hash_entry_t entry;
b0d623f7 6101 lck_mtx_t *lck;
1c79356b 6102
0b4e3aa0
A
6103 *control = MEMORY_OBJECT_CONTROL_NULL;
6104 if (pager == MEMORY_OBJECT_NULL)
6105 return KERN_INVALID_ARGUMENT;
1c79356b 6106
b0d623f7 6107 lck = vm_object_hash_lock_spin(pager);
0b4e3aa0 6108 entry = vm_object_hash_lookup(pager, FALSE);
b0d623f7 6109
0b4e3aa0
A
6110 if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
6111 (entry->object != VM_OBJECT_NULL)) {
6112 if (entry->object->named == TRUE)
6113 panic("memory_object_create_named: caller already holds the right"); }
b0d623f7 6114 vm_object_hash_unlock(lck);
1c79356b 6115
b0d623f7 6116 if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE)) == VM_OBJECT_NULL) {
0b4e3aa0
A
6117 return(KERN_INVALID_OBJECT);
6118 }
6119
6120 /* wait for object (if any) to be ready */
6121 if (object != VM_OBJECT_NULL) {
6122 vm_object_lock(object);
6123 object->named = TRUE;
6124 while (!object->pager_ready) {
9bccf70c
A
6125 vm_object_sleep(object,
6126 VM_OBJECT_EVENT_PAGER_READY,
6127 THREAD_UNINT);
0b4e3aa0 6128 }
91447636 6129 *control = object->pager_control;
0b4e3aa0
A
6130 vm_object_unlock(object);
6131 }
6132 return (KERN_SUCCESS);
6133}
1c79356b 6134
1c79356b 6135
0b4e3aa0
A
6136/*
6137 * Routine: memory_object_recover_named [user interface]
6138 * Purpose:
6139 * Attempt to recover a named reference for a VM object.
6140 * VM will verify that the object has not already started
6141 * down the termination path, and if it has, will optionally
6142 * wait for that to finish.
6143 * Returns:
6144 * KERN_SUCCESS - we recovered a named reference on the object
6145 * KERN_FAILURE - we could not recover a reference (object dead)
6146 * KERN_INVALID_ARGUMENT - bad memory object control
6147 */
6148kern_return_t
6149memory_object_recover_named(
6150 memory_object_control_t control,
6151 boolean_t wait_on_terminating)
6152{
6153 vm_object_t object;
1c79356b 6154
0b4e3aa0
A
6155 object = memory_object_control_to_vm_object(control);
6156 if (object == VM_OBJECT_NULL) {
0b4e3aa0
A
6157 return (KERN_INVALID_ARGUMENT);
6158 }
0b4e3aa0
A
6159restart:
6160 vm_object_lock(object);
1c79356b 6161
0b4e3aa0 6162 if (object->terminating && wait_on_terminating) {
0b4e3aa0
A
6163 vm_object_wait(object,
6164 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
6165 THREAD_UNINT);
0b4e3aa0
A
6166 goto restart;
6167 }
6168
6169 if (!object->alive) {
0b4e3aa0
A
6170 vm_object_unlock(object);
6171 return KERN_FAILURE;
1c79356b
A
6172 }
6173
0b4e3aa0 6174 if (object->named == TRUE) {
0b4e3aa0
A
6175 vm_object_unlock(object);
6176 return KERN_SUCCESS;
6177 }
b0d623f7
A
6178#if VM_OBJECT_CACHE
6179 if ((object->ref_count == 0) && (!object->terminating)) {
6180 if (!vm_object_cache_lock_try()) {
6181 vm_object_unlock(object);
6182 goto restart;
6183 }
0b4e3aa0
A
6184 queue_remove(&vm_object_cached_list, object,
6185 vm_object_t, cached_list);
b0d623f7
A
6186 vm_object_cached_count--;
6187 XPR(XPR_VM_OBJECT_CACHE,
6188 "memory_object_recover_named: removing %X, head (%X, %X)\n",
6189 object,
6190 vm_object_cached_list.next,
6191 vm_object_cached_list.prev, 0,0);
6192
6193 vm_object_cache_unlock();
0b4e3aa0 6194 }
b0d623f7 6195#endif
0b4e3aa0 6196 object->named = TRUE;
2d21ac55 6197 vm_object_lock_assert_exclusive(object);
0b4e3aa0
A
6198 object->ref_count++;
6199 vm_object_res_reference(object);
6200 while (!object->pager_ready) {
9bccf70c
A
6201 vm_object_sleep(object,
6202 VM_OBJECT_EVENT_PAGER_READY,
6203 THREAD_UNINT);
0b4e3aa0
A
6204 }
6205 vm_object_unlock(object);
6206 return (KERN_SUCCESS);
1c79356b
A
6207}
6208
0b4e3aa0
A
6209
6210/*
6211 * vm_object_release_name:
6212 *
6213 * Enforces name semantic on memory_object reference count decrement
6214 * This routine should not be called unless the caller holds a name
6215 * reference gained through the memory_object_create_named.
6216 *
6217 * If the TERMINATE_IDLE flag is set, the call will return if the
6218 * reference count is not 1. i.e. idle with the only remaining reference
6219 * being the name.
6220 * If the decision is made to proceed the name field flag is set to
6221 * false and the reference count is decremented. If the RESPECT_CACHE
6222 * flag is set and the reference count has gone to zero, the
6223 * memory_object is checked to see if it is cacheable otherwise when
6224 * the reference count is zero, it is simply terminated.
6225 */
6226
6227__private_extern__ kern_return_t
6228vm_object_release_name(
6229 vm_object_t object,
6230 int flags)
1c79356b 6231{
0b4e3aa0
A
6232 vm_object_t shadow;
6233 boolean_t original_object = TRUE;
1c79356b 6234
0b4e3aa0 6235 while (object != VM_OBJECT_NULL) {
1c79356b 6236
0b4e3aa0 6237 vm_object_lock(object);
b0d623f7 6238
0b4e3aa0 6239 assert(object->alive);
b0d623f7 6240 if (original_object)
0b4e3aa0
A
6241 assert(object->named);
6242 assert(object->ref_count > 0);
6243
6244 /*
6245 * We have to wait for initialization before
6246 * destroying or caching the object.
6247 */
6248
6249 if (object->pager_created && !object->pager_initialized) {
6250 assert(!object->can_persist);
6251 vm_object_assert_wait(object,
6252 VM_OBJECT_EVENT_INITIALIZED,
6253 THREAD_UNINT);
6254 vm_object_unlock(object);
9bccf70c 6255 thread_block(THREAD_CONTINUE_NULL);
0b4e3aa0 6256 continue;
1c79356b
A
6257 }
6258
0b4e3aa0
A
6259 if (((object->ref_count > 1)
6260 && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
6261 || (object->terminating)) {
6262 vm_object_unlock(object);
0b4e3aa0
A
6263 return KERN_FAILURE;
6264 } else {
6265 if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
6266 vm_object_unlock(object);
0b4e3aa0 6267 return KERN_SUCCESS;
1c79356b 6268 }
0b4e3aa0
A
6269 }
6270
6271 if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
6272 (object->ref_count == 1)) {
b0d623f7 6273 if (original_object)
0b4e3aa0 6274 object->named = FALSE;
1c79356b 6275 vm_object_unlock(object);
0b4e3aa0
A
6276 /* let vm_object_deallocate push this thing into */
6277 /* the cache, if that it is where it is bound */
6278 vm_object_deallocate(object);
6279 return KERN_SUCCESS;
6280 }
6281 VM_OBJ_RES_DECR(object);
6282 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
b0d623f7
A
6283
6284 if (object->ref_count == 1) {
6285 if (vm_object_terminate(object) != KERN_SUCCESS) {
6286 if (original_object) {
0b4e3aa0
A
6287 return KERN_FAILURE;
6288 } else {
6289 return KERN_SUCCESS;
6290 }
6291 }
6292 if (shadow != VM_OBJECT_NULL) {
6293 original_object = FALSE;
6294 object = shadow;
6295 continue;
6296 }
6297 return KERN_SUCCESS;
6298 } else {
2d21ac55 6299 vm_object_lock_assert_exclusive(object);
0b4e3aa0
A
6300 object->ref_count--;
6301 assert(object->ref_count > 0);
6302 if(original_object)
6303 object->named = FALSE;
6304 vm_object_unlock(object);
0b4e3aa0 6305 return KERN_SUCCESS;
1c79356b 6306 }
1c79356b 6307 }
91447636
A
6308 /*NOTREACHED*/
6309 assert(0);
6310 return KERN_FAILURE;
1c79356b
A
6311}
6312
0b4e3aa0
A
6313
6314__private_extern__ kern_return_t
6315vm_object_lock_request(
6316 vm_object_t object,
6317 vm_object_offset_t offset,
6318 vm_object_size_t size,
6319 memory_object_return_t should_return,
6320 int flags,
6321 vm_prot_t prot)
1c79356b 6322{
91447636
A
6323 __unused boolean_t should_flush;
6324
6325 should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
1c79356b 6326
0b4e3aa0
A
6327 XPR(XPR_MEMORY_OBJECT,
6328 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
b0d623f7 6329 object, offset, size,
0b4e3aa0 6330 (((should_return&1)<<1)|should_flush), prot);
1c79356b 6331
0b4e3aa0
A
6332 /*
6333 * Check for bogus arguments.
6334 */
6335 if (object == VM_OBJECT_NULL)
6336 return (KERN_INVALID_ARGUMENT);
1c79356b 6337
0b4e3aa0
A
6338 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
6339 return (KERN_INVALID_ARGUMENT);
1c79356b 6340
55e303ae 6341 size = round_page_64(size);
0b4e3aa0
A
6342
6343 /*
6344 * Lock the object, and acquire a paging reference to
6345 * prevent the memory_object reference from being released.
6346 */
6347 vm_object_lock(object);
6348 vm_object_paging_begin(object);
0b4e3aa0
A
6349
6350 (void)vm_object_update(object,
91447636 6351 offset, size, NULL, NULL, should_return, flags, prot);
0b4e3aa0
A
6352
6353 vm_object_paging_end(object);
6354 vm_object_unlock(object);
6355
6356 return (KERN_SUCCESS);
6357}
6358
91447636 6359/*
2d21ac55 6360 * Empty a purgeable object by grabbing the physical pages assigned to it and
91447636
A
6361 * putting them on the free queue without writing them to backing store, etc.
6362 * When the pages are next touched they will be demand zero-fill pages. We
6363 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
6364 * skip referenced/dirty pages, pages on the active queue, etc. We're more
2d21ac55 6365 * than happy to grab these since this is a purgeable object. We mark the
91447636
A
6366 * object as "empty" after reaping its pages.
6367 *
b0d623f7
A
6368 * On entry the object must be locked and it must be
6369 * purgeable with no delayed copies pending.
91447636 6370 */
b0d623f7 6371void
fe8ab488 6372vm_object_purge(vm_object_t object, int flags)
91447636 6373{
4bd07ac2
A
6374 unsigned int object_page_count = 0;
6375 unsigned int pgcount = 0;
6376 boolean_t skipped_object = FALSE;
6377
b0d623f7 6378 vm_object_lock_assert_exclusive(object);
0b4e3aa0 6379
b0d623f7
A
6380 if (object->purgable == VM_PURGABLE_DENY)
6381 return;
91447636
A
6382
6383 assert(object->copy == VM_OBJECT_NULL);
6384 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
593a1d5f 6385
fe8ab488
A
6386 /*
6387 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
6388 * reaping its pages. We update vm_page_purgeable_count in bulk
6389 * and we don't want vm_page_remove() to update it again for each
6390 * page we reap later.
6391 *
6392 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
6393 * are all accounted for in the "volatile" ledgers, so this does not
6394 * make any difference.
6395 * If we transitioned directly from NONVOLATILE to EMPTY,
6396 * vm_page_purgeable_count must have been updated when the object
6397 * was dequeued from its volatile queue and the purgeable ledgers
6398 * must have also been updated accordingly at that time (in
6399 * vm_object_purgable_control()).
6400 */
6401 if (object->purgable == VM_PURGABLE_VOLATILE) {
b0d623f7
A
6402 unsigned int delta;
6403 assert(object->resident_page_count >=
6404 object->wired_page_count);
6405 delta = (object->resident_page_count -
6406 object->wired_page_count);
6407 if (delta != 0) {
6408 assert(vm_page_purgeable_count >=
6409 delta);
6410 OSAddAtomic(-delta,
6411 (SInt32 *)&vm_page_purgeable_count);
91447636 6412 }
b0d623f7
A
6413 if (object->wired_page_count != 0) {
6414 assert(vm_page_purgeable_wired_count >=
6415 object->wired_page_count);
6416 OSAddAtomic(-object->wired_page_count,
6417 (SInt32 *)&vm_page_purgeable_wired_count);
91447636 6418 }
fe8ab488 6419 object->purgable = VM_PURGABLE_EMPTY;
91447636 6420 }
fe8ab488 6421 assert(object->purgable == VM_PURGABLE_EMPTY);
b0d623f7 6422
4bd07ac2
A
6423 object_page_count = object->resident_page_count;
6424
b0d623f7 6425 vm_object_reap_pages(object, REAP_PURGEABLE);
fe8ab488 6426
39037602
A
6427 if (object->pager != NULL) {
6428
6429 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
fe8ab488
A
6430
6431 if (object->activity_in_progress == 0 &&
6432 object->paging_in_progress == 0) {
6433 /*
6434 * Also reap any memory coming from this object
6435 * in the VM compressor.
6436 *
6437 * There are no operations in progress on the VM object
6438 * and no operation can start while we're holding the
6439 * VM object lock, so it's safe to reap the compressed
6440 * pages and update the page counts.
6441 */
6442 pgcount = vm_compressor_pager_get_count(object->pager);
6443 if (pgcount) {
6444 pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
6445 vm_compressor_pager_count(object->pager,
6446 -pgcount,
6447 FALSE, /* shared */
6448 object);
6449 vm_purgeable_compressed_update(object,
6450 -pgcount);
6451 }
6452 if ( !(flags & C_DONT_BLOCK)) {
6453 assert(vm_compressor_pager_get_count(object->pager)
6454 == 0);
6455 }
6456 } else {
6457 /*
6458 * There's some kind of paging activity in progress
6459 * for this object, which could result in a page
6460 * being compressed or decompressed, possibly while
6461 * the VM object is not locked, so it could race
6462 * with us.
6463 *
6464 * We can't really synchronize this without possibly
6465 * causing a deadlock when the compressor needs to
6466 * allocate or free memory while compressing or
6467 * decompressing a page from a purgeable object
6468 * mapped in the kernel_map...
6469 *
6470 * So let's not attempt to purge the compressor
6471 * pager if there's any kind of operation in
6472 * progress on the VM object.
6473 */
4bd07ac2 6474 skipped_object = TRUE;
fe8ab488
A
6475 }
6476 }
6477
6478 vm_object_lock_assert_exclusive(object);
4bd07ac2
A
6479
6480 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)),
6481 VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
6482 object_page_count,
6483 pgcount,
6484 skipped_object,
6485 0);
6486
91447636 6487}
b0d623f7 6488
91447636
A
6489
6490/*
2d21ac55
A
6491 * vm_object_purgeable_control() allows the caller to control and investigate the
6492 * state of a purgeable object. A purgeable object is created via a call to
6493 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
6494 * never be coalesced with any other object -- even other purgeable objects --
6495 * and will thus always remain a distinct object. A purgeable object has
91447636 6496 * special semantics when its reference count is exactly 1. If its reference
2d21ac55 6497 * count is greater than 1, then a purgeable object will behave like a normal
91447636
A
6498 * object and attempts to use this interface will result in an error return
6499 * of KERN_INVALID_ARGUMENT.
6500 *
2d21ac55 6501 * A purgeable object may be put into a "volatile" state which will make the
91447636
A
6502 * object's pages elligable for being reclaimed without paging to backing
6503 * store if the system runs low on memory. If the pages in a volatile
2d21ac55
A
6504 * purgeable object are reclaimed, the purgeable object is said to have been
6505 * "emptied." When a purgeable object is emptied the system will reclaim as
91447636
A
6506 * many pages from the object as it can in a convenient manner (pages already
6507 * en route to backing store or busy for other reasons are left as is). When
2d21ac55 6508 * a purgeable object is made volatile, its pages will generally be reclaimed
91447636
A
6509 * before other pages in the application's working set. This semantic is
6510 * generally used by applications which can recreate the data in the object
6511 * faster than it can be paged in. One such example might be media assets
6512 * which can be reread from a much faster RAID volume.
6513 *
2d21ac55 6514 * A purgeable object may be designated as "non-volatile" which means it will
91447636
A
6515 * behave like all other objects in the system with pages being written to and
6516 * read from backing store as needed to satisfy system memory needs. If the
6517 * object was emptied before the object was made non-volatile, that fact will
2d21ac55 6518 * be returned as the old state of the purgeable object (see
91447636
A
6519 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
6520 * were reclaimed as part of emptying the object will be refaulted in as
6521 * zero-fill on demand. It is up to the application to note that an object
6522 * was emptied and recreate the objects contents if necessary. When a
2d21ac55
A
6523 * purgeable object is made non-volatile, its pages will generally not be paged
6524 * out to backing store in the immediate future. A purgeable object may also
91447636
A
6525 * be manually emptied.
6526 *
6527 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
2d21ac55 6528 * volatile purgeable object may be queried at any time. This information may
91447636
A
6529 * be used as a control input to let the application know when the system is
6530 * experiencing memory pressure and is reclaiming memory.
6531 *
2d21ac55 6532 * The specified address may be any address within the purgeable object. If
91447636
A
6533 * the specified address does not represent any object in the target task's
6534 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
2d21ac55 6535 * object containing the specified address is not a purgeable object, then
91447636
A
6536 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
6537 * returned.
6538 *
6539 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
6540 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
2d21ac55
A
6541 * state is used to set the new state of the purgeable object and return its
6542 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
91447636
A
6543 * object is returned in the parameter state.
6544 *
6545 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
6546 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
6547 * the non-volatile, volatile and volatile/empty states described above.
2d21ac55 6548 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
91447636
A
6549 * immediately reclaim as many pages in the object as can be conveniently
6550 * collected (some may have already been written to backing store or be
6551 * otherwise busy).
6552 *
2d21ac55
A
6553 * The process of making a purgeable object non-volatile and determining its
6554 * previous state is atomic. Thus, if a purgeable object is made
91447636 6555 * VM_PURGABLE_NONVOLATILE and the old state is returned as
2d21ac55 6556 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
91447636
A
6557 * completely intact and will remain so until the object is made volatile
6558 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
6559 * was reclaimed while it was in a volatile state and its previous contents
6560 * have been lost.
6561 */
6562/*
6563 * The object must be locked.
6564 */
6565kern_return_t
6566vm_object_purgable_control(
6567 vm_object_t object,
6568 vm_purgable_t control,
6569 int *state)
6570{
6571 int old_state;
2d21ac55 6572 int new_state;
91447636
A
6573
6574 if (object == VM_OBJECT_NULL) {
6575 /*
2d21ac55 6576 * Object must already be present or it can't be purgeable.
91447636
A
6577 */
6578 return KERN_INVALID_ARGUMENT;
6579 }
6580
fe8ab488
A
6581 vm_object_lock_assert_exclusive(object);
6582
91447636 6583 /*
2d21ac55 6584 * Get current state of the purgeable object.
91447636 6585 */
2d21ac55
A
6586 old_state = object->purgable;
6587 if (old_state == VM_PURGABLE_DENY)
91447636
A
6588 return KERN_INVALID_ARGUMENT;
6589
2d21ac55 6590 /* purgeable cant have delayed copies - now or in the future */
91447636
A
6591 assert(object->copy == VM_OBJECT_NULL);
6592 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
6593
6594 /*
6595 * Execute the desired operation.
6596 */
6597 if (control == VM_PURGABLE_GET_STATE) {
6598 *state = old_state;
6599 return KERN_SUCCESS;
6600 }
6601
b0d623f7
A
6602 if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
6603 object->volatile_empty = TRUE;
6604 }
6605 if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
6606 object->volatile_fault = TRUE;
6607 }
6608
2d21ac55 6609 new_state = *state & VM_PURGABLE_STATE_MASK;
b0d623f7
A
6610 if (new_state == VM_PURGABLE_VOLATILE &&
6611 object->volatile_empty) {
6612 new_state = VM_PURGABLE_EMPTY;
6613 }
6614
2d21ac55
A
6615 switch (new_state) {
6616 case VM_PURGABLE_DENY:
91447636 6617 case VM_PURGABLE_NONVOLATILE:
2d21ac55
A
6618 object->purgable = new_state;
6619
b0d623f7
A
6620 if (old_state == VM_PURGABLE_VOLATILE) {
6621 unsigned int delta;
6622
6623 assert(object->resident_page_count >=
6624 object->wired_page_count);
6625 delta = (object->resident_page_count -
6626 object->wired_page_count);
6627
6628 assert(vm_page_purgeable_count >= delta);
6629
6630 if (delta != 0) {
6631 OSAddAtomic(-delta,
6632 (SInt32 *)&vm_page_purgeable_count);
6633 }
6634 if (object->wired_page_count != 0) {
6635 assert(vm_page_purgeable_wired_count >=
6636 object->wired_page_count);
6637 OSAddAtomic(-object->wired_page_count,
6638 (SInt32 *)&vm_page_purgeable_wired_count);
6639 }
6640
2d21ac55 6641 vm_page_lock_queues();
b0d623f7 6642
fe8ab488
A
6643 /* object should be on a queue */
6644 assert(object->objq.next != NULL &&
6645 object->objq.prev != NULL);
6646 purgeable_q_t queue;
6647
6648 /*
6649 * Move object from its volatile queue to the
6650 * non-volatile queue...
6651 */
6652 queue = vm_purgeable_object_remove(object);
b0d623f7
A
6653 assert(queue);
6654
39236c6e
A
6655 if (object->purgeable_when_ripe) {
6656 vm_purgeable_token_delete_last(queue);
6657 }
b0d623f7
A
6658 assert(queue->debug_count_objects>=0);
6659
2d21ac55 6660 vm_page_unlock_queues();
91447636 6661 }
fe8ab488
A
6662 if (old_state == VM_PURGABLE_VOLATILE ||
6663 old_state == VM_PURGABLE_EMPTY) {
6664 /*
6665 * Transfer the object's pages from the volatile to
6666 * non-volatile ledgers.
6667 */
6668 vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE,
6669 FALSE);
6670 }
6671
91447636
A
6672 break;
6673
6674 case VM_PURGABLE_VOLATILE:
b0d623f7
A
6675 if (object->volatile_fault) {
6676 vm_page_t p;
6677 int refmod;
6678
39037602 6679 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
b0d623f7
A
6680 if (p->busy ||
6681 VM_PAGE_WIRED(p) ||
6682 p->fictitious) {
6683 continue;
6684 }
39037602 6685 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
b0d623f7
A
6686 if ((refmod & VM_MEM_MODIFIED) &&
6687 !p->dirty) {
316670eb 6688 SET_PAGE_DIRTY(p, FALSE);
b0d623f7
A
6689 }
6690 }
6691 }
6692
593a1d5f 6693 if (old_state == VM_PURGABLE_EMPTY &&
fe8ab488
A
6694 object->resident_page_count == 0 &&
6695 object->pager == NULL)
2d21ac55 6696 break;
b0d623f7 6697
2d21ac55
A
6698 purgeable_q_t queue;
6699
6700 /* find the correct queue */
6701 if ((*state&VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE)
593a1d5f 6702 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
2d21ac55
A
6703 else {
6704 if ((*state&VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO)
6705 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
6706 else
6707 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
91447636 6708 }
2d21ac55 6709
593a1d5f
A
6710 if (old_state == VM_PURGABLE_NONVOLATILE ||
6711 old_state == VM_PURGABLE_EMPTY) {
b0d623f7
A
6712 unsigned int delta;
6713
39236c6e
A
6714 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6715 VM_PURGABLE_NO_AGING) {
6716 object->purgeable_when_ripe = FALSE;
6717 } else {
6718 object->purgeable_when_ripe = TRUE;
6719 }
6720
6721 if (object->purgeable_when_ripe) {
6722 kern_return_t result;
91447636 6723
39236c6e
A
6724 /* try to add token... this can fail */
6725 vm_page_lock_queues();
6726
6727 result = vm_purgeable_token_add(queue);
6728 if (result != KERN_SUCCESS) {
6729 vm_page_unlock_queues();
6730 return result;
6731 }
6732 vm_page_unlock_queues();
91447636 6733 }
2d21ac55 6734
b0d623f7
A
6735 assert(object->resident_page_count >=
6736 object->wired_page_count);
6737 delta = (object->resident_page_count -
6738 object->wired_page_count);
6739
6740 if (delta != 0) {
6741 OSAddAtomic(delta,
6742 &vm_page_purgeable_count);
6743 }
6744 if (object->wired_page_count != 0) {
6745 OSAddAtomic(object->wired_page_count,
6746 &vm_page_purgeable_wired_count);
6747 }
6748
2d21ac55
A
6749 object->purgable = new_state;
6750
fe8ab488
A
6751 /* object should be on "non-volatile" queue */
6752 assert(object->objq.next != NULL);
6753 assert(object->objq.prev != NULL);
91447636 6754 }
2d21ac55 6755 else if (old_state == VM_PURGABLE_VOLATILE) {
39236c6e
A
6756 purgeable_q_t old_queue;
6757 boolean_t purgeable_when_ripe;
6758
2d21ac55
A
6759 /*
6760 * if reassigning priorities / purgeable groups, we don't change the
6761 * token queue. So moving priorities will not make pages stay around longer.
6762 * Reasoning is that the algorithm gives most priority to the most important
6763 * object. If a new token is added, the most important object' priority is boosted.
6764 * This biases the system already for purgeable queues that move a lot.
6765 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
6766 */
6767 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
6768
39236c6e 6769 old_queue = vm_purgeable_object_remove(object);
2d21ac55
A
6770 assert(old_queue);
6771
39236c6e
A
6772 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6773 VM_PURGABLE_NO_AGING) {
6774 purgeable_when_ripe = FALSE;
6775 } else {
6776 purgeable_when_ripe = TRUE;
6777 }
6778
6779 if (old_queue != queue ||
6780 (purgeable_when_ripe !=
6781 object->purgeable_when_ripe)) {
2d21ac55
A
6782 kern_return_t result;
6783
6784 /* Changing queue. Have to move token. */
6785 vm_page_lock_queues();
39236c6e
A
6786 if (object->purgeable_when_ripe) {
6787 vm_purgeable_token_delete_last(old_queue);
6788 }
6789 object->purgeable_when_ripe = purgeable_when_ripe;
6790 if (object->purgeable_when_ripe) {
6791 result = vm_purgeable_token_add(queue);
6792 assert(result==KERN_SUCCESS); /* this should never fail since we just freed a token */
6793 }
2d21ac55 6794 vm_page_unlock_queues();
91447636 6795
2d21ac55
A
6796 }
6797 };
6798 vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
fe8ab488
A
6799 if (old_state == VM_PURGABLE_NONVOLATILE) {
6800 vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
6801 FALSE);
6802 }
2d21ac55
A
6803
6804 assert(queue->debug_count_objects>=0);
6805
91447636
A
6806 break;
6807
6808
6809 case VM_PURGABLE_EMPTY:
b0d623f7
A
6810 if (object->volatile_fault) {
6811 vm_page_t p;
6812 int refmod;
6813
39037602 6814 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
b0d623f7
A
6815 if (p->busy ||
6816 VM_PAGE_WIRED(p) ||
6817 p->fictitious) {
6818 continue;
6819 }
39037602 6820 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
b0d623f7
A
6821 if ((refmod & VM_MEM_MODIFIED) &&
6822 !p->dirty) {
316670eb 6823 SET_PAGE_DIRTY(p, FALSE);
b0d623f7 6824 }
2d21ac55 6825 }
b0d623f7
A
6826 }
6827
fe8ab488
A
6828 if (old_state == new_state) {
6829 /* nothing changes */
6830 break;
6831 }
2d21ac55 6832
fe8ab488
A
6833 assert(old_state == VM_PURGABLE_NONVOLATILE ||
6834 old_state == VM_PURGABLE_VOLATILE);
6835 if (old_state == VM_PURGABLE_VOLATILE) {
6836 purgeable_q_t old_queue;
6837
6838 /* object should be on a queue */
6839 assert(object->objq.next != NULL &&
6840 object->objq.prev != NULL);
6841
6842 old_queue = vm_purgeable_object_remove(object);
6843 assert(old_queue);
6844 if (object->purgeable_when_ripe) {
6845 vm_page_lock_queues();
6846 vm_purgeable_token_delete_first(old_queue);
6847 vm_page_unlock_queues();
2d21ac55 6848 }
91447636 6849 }
91447636 6850
fe8ab488
A
6851 if (old_state == VM_PURGABLE_NONVOLATILE) {
6852 /*
6853 * This object's pages were previously accounted as
6854 * "non-volatile" and now need to be accounted as
6855 * "volatile".
6856 */
6857 vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
6858 FALSE);
6859 /*
6860 * Set to VM_PURGABLE_EMPTY because the pages are no
6861 * longer accounted in the "non-volatile" ledger
6862 * and are also not accounted for in
6863 * "vm_page_purgeable_count".
6864 */
6865 object->purgable = VM_PURGABLE_EMPTY;
6866 }
6867
6868 (void) vm_object_purge(object, 0);
6869 assert(object->purgable == VM_PURGABLE_EMPTY);
6870
6871 break;
91447636 6872 }
fe8ab488 6873
91447636
A
6874 *state = old_state;
6875
fe8ab488
A
6876 vm_object_lock_assert_exclusive(object);
6877
91447636
A
6878 return KERN_SUCCESS;
6879}
0b4e3aa0 6880
39236c6e
A
6881kern_return_t
6882vm_object_get_page_counts(
6883 vm_object_t object,
6884 vm_object_offset_t offset,
6885 vm_object_size_t size,
6886 unsigned int *resident_page_count,
6887 unsigned int *dirty_page_count)
6888{
6889
6890 kern_return_t kr = KERN_SUCCESS;
6891 boolean_t count_dirty_pages = FALSE;
6892 vm_page_t p = VM_PAGE_NULL;
6893 unsigned int local_resident_count = 0;
6894 unsigned int local_dirty_count = 0;
6895 vm_object_offset_t cur_offset = 0;
6896 vm_object_offset_t end_offset = 0;
6897
6898 if (object == VM_OBJECT_NULL)
6899 return KERN_INVALID_ARGUMENT;
6900
6901
6902 cur_offset = offset;
6903
6904 end_offset = offset + size;
6905
6906 vm_object_lock_assert_exclusive(object);
6907
6908 if (dirty_page_count != NULL) {
6909
6910 count_dirty_pages = TRUE;
6911 }
6912
6913 if (resident_page_count != NULL && count_dirty_pages == FALSE) {
6914 /*
6915 * Fast path when:
6916 * - we only want the resident page count, and,
6917 * - the entire object is exactly covered by the request.
6918 */
6919 if (offset == 0 && (object->vo_size == size)) {
6920
6921 *resident_page_count = object->resident_page_count;
6922 goto out;
6923 }
6924 }
6925
6926 if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
6927
39037602 6928 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
39236c6e
A
6929
6930 if (p->offset >= cur_offset && p->offset < end_offset) {
6931
6932 local_resident_count++;
6933
6934 if (count_dirty_pages) {
6935
39037602 6936 if (p->dirty || (p->wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
39236c6e
A
6937
6938 local_dirty_count++;
6939 }
6940 }
6941 }
6942 }
6943 } else {
6944
6945 for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
6946
6947 p = vm_page_lookup(object, cur_offset);
6948
6949 if (p != VM_PAGE_NULL) {
6950
6951 local_resident_count++;
6952
6953 if (count_dirty_pages) {
6954
39037602 6955 if (p->dirty || (p->wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
39236c6e
A
6956
6957 local_dirty_count++;
6958 }
6959 }
6960 }
6961 }
6962
6963 }
6964
6965 if (resident_page_count != NULL) {
6966 *resident_page_count = local_resident_count;
6967 }
6968
6969 if (dirty_page_count != NULL) {
6970 *dirty_page_count = local_dirty_count;
6971 }
6972
6973out:
6974 return kr;
6975}
6976
6977
0b4e3aa0
A
6978#if TASK_SWAPPER
6979/*
6980 * vm_object_res_deallocate
6981 *
6982 * (recursively) decrement residence counts on vm objects and their shadows.
6983 * Called from vm_object_deallocate and when swapping out an object.
6984 *
6985 * The object is locked, and remains locked throughout the function,
6986 * even as we iterate down the shadow chain. Locks on intermediate objects
6987 * will be dropped, but not the original object.
6988 *
6989 * NOTE: this function used to use recursion, rather than iteration.
6990 */
6991
6992__private_extern__ void
6993vm_object_res_deallocate(
6994 vm_object_t object)
6995{
6996 vm_object_t orig_object = object;
6997 /*
6998 * Object is locked so it can be called directly
6999 * from vm_object_deallocate. Original object is never
7000 * unlocked.
7001 */
7002 assert(object->res_count > 0);
7003 while (--object->res_count == 0) {
7004 assert(object->ref_count >= object->res_count);
7005 vm_object_deactivate_all_pages(object);
7006 /* iterate on shadow, if present */
7007 if (object->shadow != VM_OBJECT_NULL) {
7008 vm_object_t tmp_object = object->shadow;
7009 vm_object_lock(tmp_object);
7010 if (object != orig_object)
7011 vm_object_unlock(object);
7012 object = tmp_object;
7013 assert(object->res_count > 0);
7014 } else
7015 break;
7016 }
7017 if (object != orig_object)
1c79356b 7018 vm_object_unlock(object);
0b4e3aa0
A
7019}
7020
7021/*
7022 * vm_object_res_reference
7023 *
7024 * Internal function to increment residence count on a vm object
7025 * and its shadows. It is called only from vm_object_reference, and
7026 * when swapping in a vm object, via vm_map_swap.
7027 *
7028 * The object is locked, and remains locked throughout the function,
7029 * even as we iterate down the shadow chain. Locks on intermediate objects
7030 * will be dropped, but not the original object.
7031 *
7032 * NOTE: this function used to use recursion, rather than iteration.
7033 */
7034
7035__private_extern__ void
7036vm_object_res_reference(
7037 vm_object_t object)
7038{
7039 vm_object_t orig_object = object;
7040 /*
7041 * Object is locked, so this can be called directly
7042 * from vm_object_reference. This lock is never released.
7043 */
7044 while ((++object->res_count == 1) &&
7045 (object->shadow != VM_OBJECT_NULL)) {
7046 vm_object_t tmp_object = object->shadow;
7047
7048 assert(object->ref_count >= object->res_count);
7049 vm_object_lock(tmp_object);
7050 if (object != orig_object)
7051 vm_object_unlock(object);
7052 object = tmp_object;
1c79356b 7053 }
0b4e3aa0
A
7054 if (object != orig_object)
7055 vm_object_unlock(object);
7056 assert(orig_object->ref_count >= orig_object->res_count);
1c79356b 7057}
0b4e3aa0
A
7058#endif /* TASK_SWAPPER */
7059
7060/*
7061 * vm_object_reference:
7062 *
7063 * Gets another reference to the given object.
7064 */
7065#ifdef vm_object_reference
7066#undef vm_object_reference
7067#endif
7068__private_extern__ void
7069vm_object_reference(
39037602 7070 vm_object_t object)
0b4e3aa0
A
7071{
7072 if (object == VM_OBJECT_NULL)
7073 return;
7074
7075 vm_object_lock(object);
7076 assert(object->ref_count > 0);
7077 vm_object_reference_locked(object);
7078 vm_object_unlock(object);
7079}
7080
1c79356b
A
7081#ifdef MACH_BSD
7082/*
7083 * Scale the vm_object_cache
7084 * This is required to make sure that the vm_object_cache is big
7085 * enough to effectively cache the mapped file.
7086 * This is really important with UBC as all the regular file vnodes
7087 * have memory object associated with them. Havving this cache too
7088 * small results in rapid reclaim of vnodes and hurts performance a LOT!
7089 *
7090 * This is also needed as number of vnodes can be dynamically scaled.
7091 */
7092kern_return_t
91447636
A
7093adjust_vm_object_cache(
7094 __unused vm_size_t oval,
b0d623f7 7095 __unused vm_size_t nval)
1c79356b 7096{
b0d623f7 7097#if VM_OBJECT_CACHE
1c79356b
A
7098 vm_object_cached_max = nval;
7099 vm_object_cache_trim(FALSE);
b0d623f7 7100#endif
1c79356b
A
7101 return (KERN_SUCCESS);
7102}
7103#endif /* MACH_BSD */
7104
91447636
A
7105
7106/*
7107 * vm_object_transpose
7108 *
7109 * This routine takes two VM objects of the same size and exchanges
7110 * their backing store.
7111 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
7112 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
7113 *
7114 * The VM objects must not be locked by caller.
7115 */
b0d623f7 7116unsigned int vm_object_transpose_count = 0;
91447636
A
7117kern_return_t
7118vm_object_transpose(
7119 vm_object_t object1,
7120 vm_object_t object2,
7121 vm_object_size_t transpose_size)
7122{
7123 vm_object_t tmp_object;
7124 kern_return_t retval;
7125 boolean_t object1_locked, object2_locked;
91447636
A
7126 vm_page_t page;
7127 vm_object_offset_t page_offset;
b0d623f7
A
7128 lck_mtx_t *hash_lck;
7129 vm_object_hash_entry_t hash_entry;
91447636
A
7130
7131 tmp_object = VM_OBJECT_NULL;
7132 object1_locked = FALSE; object2_locked = FALSE;
91447636
A
7133
7134 if (object1 == object2 ||
7135 object1 == VM_OBJECT_NULL ||
7136 object2 == VM_OBJECT_NULL) {
7137 /*
7138 * If the 2 VM objects are the same, there's
7139 * no point in exchanging their backing store.
7140 */
7141 retval = KERN_INVALID_VALUE;
7142 goto done;
7143 }
7144
b0d623f7
A
7145 /*
7146 * Since we need to lock both objects at the same time,
7147 * make sure we always lock them in the same order to
7148 * avoid deadlocks.
7149 */
7150 if (object1 > object2) {
7151 tmp_object = object1;
7152 object1 = object2;
7153 object2 = tmp_object;
7154 }
7155
7156 /*
7157 * Allocate a temporary VM object to hold object1's contents
7158 * while we copy object2 to object1.
7159 */
7160 tmp_object = vm_object_allocate(transpose_size);
7161 vm_object_lock(tmp_object);
7162 tmp_object->can_persist = FALSE;
7163
7164
7165 /*
7166 * Grab control of the 1st VM object.
7167 */
91447636
A
7168 vm_object_lock(object1);
7169 object1_locked = TRUE;
2d21ac55
A
7170 if (!object1->alive || object1->terminating ||
7171 object1->copy || object1->shadow || object1->shadowed ||
7172 object1->purgable != VM_PURGABLE_DENY) {
91447636
A
7173 /*
7174 * We don't deal with copy or shadow objects (yet).
7175 */
7176 retval = KERN_INVALID_VALUE;
7177 goto done;
7178 }
7179 /*
b0d623f7
A
7180 * We're about to mess with the object's backing store and
7181 * taking a "paging_in_progress" reference wouldn't be enough
91447636
A
7182 * to prevent any paging activity on this object, so the caller should
7183 * have "quiesced" the objects beforehand, via a UPL operation with
7184 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
7185 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
b0d623f7
A
7186 *
7187 * Wait for any paging operation to complete (but only paging, not
7188 * other kind of activities not linked to the pager). After we're
7189 * statisfied that there's no more paging in progress, we keep the
7190 * object locked, to guarantee that no one tries to access its pager.
91447636 7191 */
b0d623f7 7192 vm_object_paging_only_wait(object1, THREAD_UNINT);
91447636
A
7193
7194 /*
7195 * Same as above for the 2nd object...
7196 */
7197 vm_object_lock(object2);
7198 object2_locked = TRUE;
2d21ac55
A
7199 if (! object2->alive || object2->terminating ||
7200 object2->copy || object2->shadow || object2->shadowed ||
7201 object2->purgable != VM_PURGABLE_DENY) {
91447636
A
7202 retval = KERN_INVALID_VALUE;
7203 goto done;
7204 }
b0d623f7 7205 vm_object_paging_only_wait(object2, THREAD_UNINT);
91447636 7206
91447636 7207
6d2010ae
A
7208 if (object1->vo_size != object2->vo_size ||
7209 object1->vo_size != transpose_size) {
91447636
A
7210 /*
7211 * If the 2 objects don't have the same size, we can't
7212 * exchange their backing stores or one would overflow.
7213 * If their size doesn't match the caller's
7214 * "transpose_size", we can't do it either because the
7215 * transpose operation will affect the entire span of
7216 * the objects.
7217 */
7218 retval = KERN_INVALID_VALUE;
7219 goto done;
7220 }
7221
7222
7223 /*
7224 * Transpose the lists of resident pages.
2d21ac55 7225 * This also updates the resident_page_count and the memq_hint.
91447636 7226 */
39037602 7227 if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) {
91447636
A
7228 /*
7229 * No pages in object1, just transfer pages
7230 * from object2 to object1. No need to go through
7231 * an intermediate object.
7232 */
39037602
A
7233 while (!vm_page_queue_empty(&object2->memq)) {
7234 page = (vm_page_t) vm_page_queue_first(&object2->memq);
2d21ac55 7235 vm_page_rename(page, object1, page->offset, FALSE);
91447636 7236 }
39037602
A
7237 assert(vm_page_queue_empty(&object2->memq));
7238 } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
91447636
A
7239 /*
7240 * No pages in object2, just transfer pages
7241 * from object1 to object2. No need to go through
7242 * an intermediate object.
7243 */
39037602
A
7244 while (!vm_page_queue_empty(&object1->memq)) {
7245 page = (vm_page_t) vm_page_queue_first(&object1->memq);
2d21ac55 7246 vm_page_rename(page, object2, page->offset, FALSE);
91447636 7247 }
39037602 7248 assert(vm_page_queue_empty(&object1->memq));
91447636
A
7249 } else {
7250 /* transfer object1's pages to tmp_object */
39037602
A
7251 while (!vm_page_queue_empty(&object1->memq)) {
7252 page = (vm_page_t) vm_page_queue_first(&object1->memq);
91447636 7253 page_offset = page->offset;
b0d623f7 7254 vm_page_remove(page, TRUE);
91447636 7255 page->offset = page_offset;
39037602 7256 vm_page_queue_enter(&tmp_object->memq, page, vm_page_t, listq);
91447636 7257 }
39037602 7258 assert(vm_page_queue_empty(&object1->memq));
91447636 7259 /* transfer object2's pages to object1 */
39037602
A
7260 while (!vm_page_queue_empty(&object2->memq)) {
7261 page = (vm_page_t) vm_page_queue_first(&object2->memq);
2d21ac55 7262 vm_page_rename(page, object1, page->offset, FALSE);
91447636 7263 }
39037602 7264 assert(vm_page_queue_empty(&object2->memq));
3e170ce0 7265 /* transfer tmp_object's pages to object2 */
39037602
A
7266 while (!vm_page_queue_empty(&tmp_object->memq)) {
7267 page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
7268 vm_page_queue_remove(&tmp_object->memq, page,
7269 vm_page_t, listq);
91447636
A
7270 vm_page_insert(page, object2, page->offset);
7271 }
39037602 7272 assert(vm_page_queue_empty(&tmp_object->memq));
91447636
A
7273 }
7274
91447636
A
7275#define __TRANSPOSE_FIELD(field) \
7276MACRO_BEGIN \
7277 tmp_object->field = object1->field; \
7278 object1->field = object2->field; \
7279 object2->field = tmp_object->field; \
7280MACRO_END
7281
b0d623f7 7282 /* "Lock" refers to the object not its contents */
2d21ac55 7283 /* "size" should be identical */
6d2010ae 7284 assert(object1->vo_size == object2->vo_size);
b0d623f7 7285 /* "memq_hint" was updated above when transposing pages */
2d21ac55
A
7286 /* "ref_count" refers to the object not its contents */
7287#if TASK_SWAPPER
7288 /* "res_count" refers to the object not its contents */
7289#endif
7290 /* "resident_page_count" was updated above when transposing pages */
b0d623f7
A
7291 /* "wired_page_count" was updated above when transposing pages */
7292 /* "reusable_page_count" was updated above when transposing pages */
2d21ac55 7293 /* there should be no "copy" */
91447636
A
7294 assert(!object1->copy);
7295 assert(!object2->copy);
2d21ac55 7296 /* there should be no "shadow" */
91447636
A
7297 assert(!object1->shadow);
7298 assert(!object2->shadow);
6d2010ae 7299 __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
91447636
A
7300 __TRANSPOSE_FIELD(pager);
7301 __TRANSPOSE_FIELD(paging_offset);
91447636
A
7302 __TRANSPOSE_FIELD(pager_control);
7303 /* update the memory_objects' pointers back to the VM objects */
7304 if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
7305 memory_object_control_collapse(object1->pager_control,
7306 object1);
7307 }
7308 if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
7309 memory_object_control_collapse(object2->pager_control,
7310 object2);
7311 }
2d21ac55
A
7312 __TRANSPOSE_FIELD(copy_strategy);
7313 /* "paging_in_progress" refers to the object not its contents */
b0d623f7
A
7314 assert(!object1->paging_in_progress);
7315 assert(!object2->paging_in_progress);
7316 assert(object1->activity_in_progress);
7317 assert(object2->activity_in_progress);
2d21ac55 7318 /* "all_wanted" refers to the object not its contents */
91447636
A
7319 __TRANSPOSE_FIELD(pager_created);
7320 __TRANSPOSE_FIELD(pager_initialized);
7321 __TRANSPOSE_FIELD(pager_ready);
7322 __TRANSPOSE_FIELD(pager_trusted);
2d21ac55 7323 __TRANSPOSE_FIELD(can_persist);
91447636
A
7324 __TRANSPOSE_FIELD(internal);
7325 __TRANSPOSE_FIELD(temporary);
7326 __TRANSPOSE_FIELD(private);
7327 __TRANSPOSE_FIELD(pageout);
2d21ac55
A
7328 /* "alive" should be set */
7329 assert(object1->alive);
7330 assert(object2->alive);
7331 /* "purgeable" should be non-purgeable */
7332 assert(object1->purgable == VM_PURGABLE_DENY);
7333 assert(object2->purgable == VM_PURGABLE_DENY);
7334 /* "shadowed" refers to the the object not its contents */
39236c6e 7335 __TRANSPOSE_FIELD(purgeable_when_ripe);
2d21ac55 7336 __TRANSPOSE_FIELD(advisory_pageout);
91447636 7337 __TRANSPOSE_FIELD(true_share);
2d21ac55
A
7338 /* "terminating" should not be set */
7339 assert(!object1->terminating);
7340 assert(!object2->terminating);
7341 __TRANSPOSE_FIELD(named);
7342 /* "shadow_severed" refers to the object not its contents */
91447636
A
7343 __TRANSPOSE_FIELD(phys_contiguous);
7344 __TRANSPOSE_FIELD(nophyscache);
b0d623f7
A
7345 /* "cached_list.next" points to transposed object */
7346 object1->cached_list.next = (queue_entry_t) object2;
7347 object2->cached_list.next = (queue_entry_t) object1;
7348 /* "cached_list.prev" should be NULL */
2d21ac55 7349 assert(object1->cached_list.prev == NULL);
2d21ac55 7350 assert(object2->cached_list.prev == NULL);
2d21ac55
A
7351 /* "msr_q" is linked to the object not its contents */
7352 assert(queue_empty(&object1->msr_q));
7353 assert(queue_empty(&object2->msr_q));
91447636
A
7354 __TRANSPOSE_FIELD(last_alloc);
7355 __TRANSPOSE_FIELD(sequential);
2d21ac55
A
7356 __TRANSPOSE_FIELD(pages_created);
7357 __TRANSPOSE_FIELD(pages_used);
6d2010ae 7358 __TRANSPOSE_FIELD(scan_collisions);
91447636 7359 __TRANSPOSE_FIELD(cow_hint);
2d21ac55
A
7360#if MACH_ASSERT
7361 __TRANSPOSE_FIELD(paging_object);
7362#endif
91447636 7363 __TRANSPOSE_FIELD(wimg_bits);
6d2010ae 7364 __TRANSPOSE_FIELD(set_cache_attr);
2d21ac55 7365 __TRANSPOSE_FIELD(code_signed);
b0d623f7
A
7366 if (object1->hashed) {
7367 hash_lck = vm_object_hash_lock_spin(object2->pager);
7368 hash_entry = vm_object_hash_lookup(object2->pager, FALSE);
7369 assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
7370 hash_entry->object = object2;
7371 vm_object_hash_unlock(hash_lck);
7372 }
7373 if (object2->hashed) {
7374 hash_lck = vm_object_hash_lock_spin(object1->pager);
7375 hash_entry = vm_object_hash_lookup(object1->pager, FALSE);
7376 assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
7377 hash_entry->object = object1;
7378 vm_object_hash_unlock(hash_lck);
7379 }
7380 __TRANSPOSE_FIELD(hashed);
7381 object1->transposed = TRUE;
7382 object2->transposed = TRUE;
7383 __TRANSPOSE_FIELD(mapping_in_progress);
7384 __TRANSPOSE_FIELD(volatile_empty);
7385 __TRANSPOSE_FIELD(volatile_fault);
7386 __TRANSPOSE_FIELD(all_reusable);
7387 assert(object1->blocked_access);
7388 assert(object2->blocked_access);
7389 assert(object1->__object2_unused_bits == 0);
7390 assert(object2->__object2_unused_bits == 0);
7391#if UPL_DEBUG
2d21ac55
A
7392 /* "uplq" refers to the object not its contents (see upl_transpose()) */
7393#endif
3e170ce0
A
7394 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL));
7395 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL));
7396 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL));
7397 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL));
91447636
A
7398
7399#undef __TRANSPOSE_FIELD
7400
7401 retval = KERN_SUCCESS;
7402
7403done:
7404 /*
7405 * Cleanup.
7406 */
7407 if (tmp_object != VM_OBJECT_NULL) {
91447636
A
7408 vm_object_unlock(tmp_object);
7409 /*
7410 * Re-initialize the temporary object to avoid
7411 * deallocating a real pager.
7412 */
7413 _vm_object_allocate(transpose_size, tmp_object);
7414 vm_object_deallocate(tmp_object);
7415 tmp_object = VM_OBJECT_NULL;
7416 }
7417
7418 if (object1_locked) {
7419 vm_object_unlock(object1);
7420 object1_locked = FALSE;
7421 }
7422 if (object2_locked) {
7423 vm_object_unlock(object2);
7424 object2_locked = FALSE;
7425 }
b0d623f7
A
7426
7427 vm_object_transpose_count++;
91447636
A
7428
7429 return retval;
7430}
0c530ab8
A
7431
7432
2d21ac55 7433/*
b0d623f7 7434 * vm_object_cluster_size
2d21ac55
A
7435 *
7436 * Determine how big a cluster we should issue an I/O for...
7437 *
7438 * Inputs: *start == offset of page needed
7439 * *length == maximum cluster pager can handle
7440 * Outputs: *start == beginning offset of cluster
7441 * *length == length of cluster to try
7442 *
7443 * The original *start will be encompassed by the cluster
7444 *
7445 */
7446extern int speculative_reads_disabled;
6d2010ae
A
7447extern int ignore_is_ssd;
7448
39037602
A
7449/*
7450 * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
7451 * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
7452 * always be page-aligned. The derivation could involve operations (e.g. division)
7453 * that could give us non-page-size aligned values if we start out with values that
7454 * are odd multiples of PAGE_SIZE.
7455 */
7456 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
fe8ab488 7457unsigned int preheat_min_bytes = (1024 * 32);
2d21ac55 7458
2d21ac55
A
7459
7460__private_extern__ void
7461vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
b0d623f7 7462 vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
2d21ac55
A
7463{
7464 vm_size_t pre_heat_size;
7465 vm_size_t tail_size;
7466 vm_size_t head_size;
7467 vm_size_t max_length;
7468 vm_size_t cluster_size;
7469 vm_object_offset_t object_size;
7470 vm_object_offset_t orig_start;
7471 vm_object_offset_t target_start;
7472 vm_object_offset_t offset;
7473 vm_behavior_t behavior;
7474 boolean_t look_behind = TRUE;
7475 boolean_t look_ahead = TRUE;
6d2010ae 7476 boolean_t isSSD = FALSE;
b0d623f7 7477 uint32_t throttle_limit;
2d21ac55
A
7478 int sequential_run;
7479 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
fe8ab488
A
7480 vm_size_t max_ph_size;
7481 vm_size_t min_ph_size;
2d21ac55
A
7482
7483 assert( !(*length & PAGE_MASK));
7484 assert( !(*start & PAGE_MASK_64));
7485
6d2010ae
A
7486 /*
7487 * remember maxiumum length of run requested
7488 */
7489 max_length = *length;
2d21ac55
A
7490 /*
7491 * we'll always return a cluster size of at least
7492 * 1 page, since the original fault must always
7493 * be processed
7494 */
7495 *length = PAGE_SIZE;
b0d623f7 7496 *io_streaming = 0;
2d21ac55 7497
6d2010ae 7498 if (speculative_reads_disabled || fault_info == NULL) {
2d21ac55
A
7499 /*
7500 * no cluster... just fault the page in
7501 */
7502 return;
7503 }
7504 orig_start = *start;
7505 target_start = orig_start;
b0d623f7 7506 cluster_size = round_page(fault_info->cluster_size);
2d21ac55
A
7507 behavior = fault_info->behavior;
7508
7509 vm_object_lock(object);
7510
6d2010ae
A
7511 if (object->pager == MEMORY_OBJECT_NULL)
7512 goto out; /* pager is gone for this object, nothing more to do */
7513
7514 if (!ignore_is_ssd)
7515 vnode_pager_get_isSSD(object->pager, &isSSD);
7516
fe8ab488
A
7517 min_ph_size = round_page(preheat_min_bytes);
7518 max_ph_size = round_page(preheat_max_bytes);
6d2010ae
A
7519
7520 if (isSSD) {
7521 min_ph_size /= 2;
7522 max_ph_size /= 8;
39037602
A
7523
7524 if (min_ph_size & PAGE_MASK_64) {
7525 min_ph_size = trunc_page(min_ph_size);
7526 }
7527
7528 if (max_ph_size & PAGE_MASK_64) {
7529 max_ph_size = trunc_page(max_ph_size);
7530 }
6d2010ae 7531 }
39037602 7532
fe8ab488
A
7533 if (min_ph_size < PAGE_SIZE)
7534 min_ph_size = PAGE_SIZE;
6d2010ae 7535
fe8ab488
A
7536 if (max_ph_size < PAGE_SIZE)
7537 max_ph_size = PAGE_SIZE;
7538 else if (max_ph_size > MAX_UPL_TRANSFER_BYTES)
7539 max_ph_size = MAX_UPL_TRANSFER_BYTES;
6d2010ae 7540
fe8ab488
A
7541 if (max_length > max_ph_size)
7542 max_length = max_ph_size;
6d2010ae
A
7543
7544 if (max_length <= PAGE_SIZE)
7545 goto out;
7546
2d21ac55 7547 if (object->internal)
6d2010ae 7548 object_size = object->vo_size;
2d21ac55 7549 else
6d2010ae 7550 vnode_pager_get_object_size(object->pager, &object_size);
2d21ac55
A
7551
7552 object_size = round_page_64(object_size);
7553
7554 if (orig_start >= object_size) {
7555 /*
7556 * fault occurred beyond the EOF...
7557 * we need to punt w/o changing the
7558 * starting offset
7559 */
7560 goto out;
7561 }
7562 if (object->pages_used > object->pages_created) {
7563 /*
7564 * must have wrapped our 32 bit counters
7565 * so reset
7566 */
7567 object->pages_used = object->pages_created = 0;
7568 }
7569 if ((sequential_run = object->sequential)) {
7570 if (sequential_run < 0) {
7571 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
7572 sequential_run = 0 - sequential_run;
7573 } else {
7574 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
7575 }
b0d623f7 7576
2d21ac55 7577 }
6d2010ae 7578 switch (behavior) {
2d21ac55
A
7579
7580 default:
7581 behavior = VM_BEHAVIOR_DEFAULT;
7582
7583 case VM_BEHAVIOR_DEFAULT:
7584 if (object->internal && fault_info->user_tag == VM_MEMORY_STACK)
7585 goto out;
7586
b0d623f7 7587 if (sequential_run >= (3 * PAGE_SIZE)) {
2d21ac55
A
7588 pre_heat_size = sequential_run + PAGE_SIZE;
7589
b0d623f7 7590 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL)
2d21ac55
A
7591 look_behind = FALSE;
7592 else
7593 look_ahead = FALSE;
b0d623f7
A
7594
7595 *io_streaming = 1;
2d21ac55 7596 } else {
2d21ac55 7597
fe8ab488 7598 if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
2d21ac55
A
7599 /*
7600 * prime the pump
7601 */
fe8ab488 7602 pre_heat_size = min_ph_size;
6d2010ae
A
7603 } else {
7604 /*
7605 * Linear growth in PH size: The maximum size is max_length...
7606 * this cacluation will result in a size that is neither a
7607 * power of 2 nor a multiple of PAGE_SIZE... so round
7608 * it up to the nearest PAGE_SIZE boundary
7609 */
3e170ce0 7610 pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
fe8ab488
A
7611
7612 if (pre_heat_size < min_ph_size)
7613 pre_heat_size = min_ph_size;
6d2010ae
A
7614 else
7615 pre_heat_size = round_page(pre_heat_size);
2d21ac55 7616 }
2d21ac55
A
7617 }
7618 break;
7619
7620 case VM_BEHAVIOR_RANDOM:
7621 if ((pre_heat_size = cluster_size) <= PAGE_SIZE)
7622 goto out;
7623 break;
7624
7625 case VM_BEHAVIOR_SEQUENTIAL:
7626 if ((pre_heat_size = cluster_size) == 0)
7627 pre_heat_size = sequential_run + PAGE_SIZE;
7628 look_behind = FALSE;
b0d623f7 7629 *io_streaming = 1;
2d21ac55
A
7630
7631 break;
7632
7633 case VM_BEHAVIOR_RSEQNTL:
7634 if ((pre_heat_size = cluster_size) == 0)
7635 pre_heat_size = sequential_run + PAGE_SIZE;
7636 look_ahead = FALSE;
b0d623f7 7637 *io_streaming = 1;
2d21ac55
A
7638
7639 break;
7640
7641 }
b0d623f7
A
7642 throttle_limit = (uint32_t) max_length;
7643 assert(throttle_limit == max_length);
7644
39236c6e 7645 if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
b0d623f7
A
7646 if (max_length > throttle_limit)
7647 max_length = throttle_limit;
7648 }
2d21ac55
A
7649 if (pre_heat_size > max_length)
7650 pre_heat_size = max_length;
7651
fe8ab488 7652 if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
316670eb
A
7653
7654 unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
7655
7656 if (consider_free < vm_page_throttle_limit) {
6d2010ae 7657 pre_heat_size = trunc_page(pre_heat_size / 16);
316670eb 7658 } else if (consider_free < vm_page_free_target) {
6d2010ae 7659 pre_heat_size = trunc_page(pre_heat_size / 4);
316670eb
A
7660 }
7661
fe8ab488
A
7662 if (pre_heat_size < min_ph_size)
7663 pre_heat_size = min_ph_size;
b0d623f7 7664 }
2d21ac55 7665 if (look_ahead == TRUE) {
b0d623f7
A
7666 if (look_behind == TRUE) {
7667 /*
7668 * if we get here its due to a random access...
7669 * so we want to center the original fault address
7670 * within the cluster we will issue... make sure
7671 * to calculate 'head_size' as a multiple of PAGE_SIZE...
7672 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
7673 * necessarily an even number of pages so we need to truncate
7674 * the result to a PAGE_SIZE boundary
7675 */
7676 head_size = trunc_page(pre_heat_size / 2);
2d21ac55 7677
b0d623f7
A
7678 if (target_start > head_size)
7679 target_start -= head_size;
7680 else
7681 target_start = 0;
2d21ac55 7682
b0d623f7
A
7683 /*
7684 * 'target_start' at this point represents the beginning offset
7685 * of the cluster we are considering... 'orig_start' will be in
7686 * the center of this cluster if we didn't have to clip the start
7687 * due to running into the start of the file
7688 */
7689 }
7690 if ((target_start + pre_heat_size) > object_size)
7691 pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
7692 /*
7693 * at this point caclulate the number of pages beyond the original fault
7694 * address that we want to consider... this is guaranteed not to extend beyond
7695 * the current EOF...
7696 */
7697 assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
7698 tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
2d21ac55 7699 } else {
6d2010ae
A
7700 if (pre_heat_size > target_start) {
7701 /*
7702 * since pre_heat_size is always smaller then 2^32,
7703 * if it is larger then target_start (a 64 bit value)
7704 * it is safe to clip target_start to 32 bits
7705 */
7706 pre_heat_size = (vm_size_t) target_start;
7707 }
2d21ac55
A
7708 tail_size = 0;
7709 }
b0d623f7 7710 assert( !(target_start & PAGE_MASK_64));
39037602 7711 assert( !(pre_heat_size & PAGE_MASK_64));
b0d623f7 7712
2d21ac55
A
7713 if (pre_heat_size <= PAGE_SIZE)
7714 goto out;
7715
7716 if (look_behind == TRUE) {
7717 /*
7718 * take a look at the pages before the original
b0d623f7
A
7719 * faulting offset... recalculate this in case
7720 * we had to clip 'pre_heat_size' above to keep
7721 * from running past the EOF.
2d21ac55
A
7722 */
7723 head_size = pre_heat_size - tail_size - PAGE_SIZE;
7724
7725 for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
7726 /*
7727 * don't poke below the lowest offset
7728 */
7729 if (offset < fault_info->lo_offset)
7730 break;
39037602
A
7731 /*
7732 * for external objects or internal objects w/o a pager,
7733 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
2d21ac55 7734 */
39037602 7735 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
39236c6e
A
7736 break;
7737 }
2d21ac55
A
7738 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7739 /*
7740 * don't bridge resident pages
7741 */
7742 break;
7743 }
7744 *start = offset;
7745 *length += PAGE_SIZE;
7746 }
7747 }
7748 if (look_ahead == TRUE) {
7749 for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
7750 /*
7751 * don't poke above the highest offset
7752 */
7753 if (offset >= fault_info->hi_offset)
7754 break;
b0d623f7
A
7755 assert(offset < object_size);
7756
39037602
A
7757 /*
7758 * for external objects or internal objects w/o a pager,
7759 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
2d21ac55 7760 */
fe8ab488 7761 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
39236c6e
A
7762 break;
7763 }
2d21ac55
A
7764 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7765 /*
7766 * don't bridge resident pages
7767 */
7768 break;
7769 }
7770 *length += PAGE_SIZE;
7771 }
7772 }
7773out:
b0d623f7
A
7774 if (*length > max_length)
7775 *length = max_length;
7776
2d21ac55 7777 vm_object_unlock(object);
316670eb
A
7778
7779 DTRACE_VM1(clustersize, vm_size_t, *length);
2d21ac55
A
7780}
7781
7782
7783/*
7784 * Allow manipulation of individual page state. This is actually part of
7785 * the UPL regimen but takes place on the VM object rather than on a UPL
7786 */
0c530ab8
A
7787
7788kern_return_t
7789vm_object_page_op(
7790 vm_object_t object,
7791 vm_object_offset_t offset,
7792 int ops,
7793 ppnum_t *phys_entry,
7794 int *flags)
7795{
7796 vm_page_t dst_page;
7797
7798 vm_object_lock(object);
7799
7800 if(ops & UPL_POP_PHYSICAL) {
7801 if(object->phys_contiguous) {
7802 if (phys_entry) {
7803 *phys_entry = (ppnum_t)
6d2010ae 7804 (object->vo_shadow_offset >> PAGE_SHIFT);
0c530ab8
A
7805 }
7806 vm_object_unlock(object);
7807 return KERN_SUCCESS;
7808 } else {
7809 vm_object_unlock(object);
7810 return KERN_INVALID_OBJECT;
7811 }
7812 }
7813 if(object->phys_contiguous) {
7814 vm_object_unlock(object);
7815 return KERN_INVALID_OBJECT;
7816 }
7817
7818 while(TRUE) {
7819 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
7820 vm_object_unlock(object);
7821 return KERN_FAILURE;
7822 }
7823
7824 /* Sync up on getting the busy bit */
7825 if((dst_page->busy || dst_page->cleaning) &&
7826 (((ops & UPL_POP_SET) &&
7827 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
7828 /* someone else is playing with the page, we will */
7829 /* have to wait */
7830 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
7831 continue;
7832 }
7833
7834 if (ops & UPL_POP_DUMP) {
2d21ac55 7835 if (dst_page->pmapped == TRUE)
39037602 7836 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
0c530ab8 7837
b0d623f7 7838 VM_PAGE_FREE(dst_page);
0c530ab8
A
7839 break;
7840 }
7841
7842 if (flags) {
7843 *flags = 0;
7844
7845 /* Get the condition of flags before requested ops */
7846 /* are undertaken */
7847
7848 if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
39037602 7849 if(dst_page->free_when_done) *flags |= UPL_POP_PAGEOUT;
0c530ab8
A
7850 if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
7851 if(dst_page->absent) *flags |= UPL_POP_ABSENT;
7852 if(dst_page->busy) *flags |= UPL_POP_BUSY;
7853 }
7854
7855 /* The caller should have made a call either contingent with */
7856 /* or prior to this call to set UPL_POP_BUSY */
7857 if(ops & UPL_POP_SET) {
7858 /* The protection granted with this assert will */
7859 /* not be complete. If the caller violates the */
7860 /* convention and attempts to change page state */
7861 /* without first setting busy we may not see it */
7862 /* because the page may already be busy. However */
7863 /* if such violations occur we will assert sooner */
7864 /* or later. */
7865 assert(dst_page->busy || (ops & UPL_POP_BUSY));
316670eb
A
7866 if (ops & UPL_POP_DIRTY) {
7867 SET_PAGE_DIRTY(dst_page, FALSE);
7868 }
39037602 7869 if (ops & UPL_POP_PAGEOUT) dst_page->free_when_done = TRUE;
0c530ab8
A
7870 if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
7871 if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
7872 if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
7873 }
7874
7875 if(ops & UPL_POP_CLR) {
7876 assert(dst_page->busy);
7877 if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
39037602 7878 if (ops & UPL_POP_PAGEOUT) dst_page->free_when_done = FALSE;
0c530ab8
A
7879 if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
7880 if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
7881 if (ops & UPL_POP_BUSY) {
7882 dst_page->busy = FALSE;
7883 PAGE_WAKEUP(dst_page);
7884 }
7885 }
7886
7887 if (dst_page->encrypted) {
7888 /*
7889 * ENCRYPTED SWAP:
7890 * We need to decrypt this encrypted page before the
7891 * caller can access its contents.
7892 * But if the caller really wants to access the page's
7893 * contents, they have to keep the page "busy".
7894 * Otherwise, the page could get recycled or re-encrypted
7895 * at any time.
7896 */
7897 if ((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY) &&
7898 dst_page->busy) {
7899 /*
7900 * The page is stable enough to be accessed by
7901 * the caller, so make sure its contents are
7902 * not encrypted.
7903 */
7904 vm_page_decrypt(dst_page, 0);
7905 } else {
7906 /*
7907 * The page is not busy, so don't bother
7908 * decrypting it, since anything could
7909 * happen to it between now and when the
7910 * caller wants to access it.
7911 * We should not give the caller access
7912 * to this page.
7913 */
7914 assert(!phys_entry);
7915 }
7916 }
7917
7918 if (phys_entry) {
7919 /*
7920 * The physical page number will remain valid
7921 * only if the page is kept busy.
7922 * ENCRYPTED SWAP: make sure we don't let the
7923 * caller access an encrypted page.
7924 */
7925 assert(dst_page->busy);
7926 assert(!dst_page->encrypted);
39037602 7927 *phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
0c530ab8
A
7928 }
7929
7930 break;
7931 }
7932
7933 vm_object_unlock(object);
7934 return KERN_SUCCESS;
7935
7936}
7937
7938/*
7939 * vm_object_range_op offers performance enhancement over
7940 * vm_object_page_op for page_op functions which do not require page
7941 * level state to be returned from the call. Page_op was created to provide
7942 * a low-cost alternative to page manipulation via UPLs when only a single
7943 * page was involved. The range_op call establishes the ability in the _op
7944 * family of functions to work on multiple pages where the lack of page level
7945 * state handling allows the caller to avoid the overhead of the upl structures.
7946 */
7947
7948kern_return_t
7949vm_object_range_op(
7950 vm_object_t object,
7951 vm_object_offset_t offset_beg,
7952 vm_object_offset_t offset_end,
7953 int ops,
b0d623f7 7954 uint32_t *range)
0c530ab8
A
7955{
7956 vm_object_offset_t offset;
7957 vm_page_t dst_page;
7958
b0d623f7
A
7959 if (offset_end - offset_beg > (uint32_t) -1) {
7960 /* range is too big and would overflow "*range" */
7961 return KERN_INVALID_ARGUMENT;
7962 }
0c530ab8
A
7963 if (object->resident_page_count == 0) {
7964 if (range) {
b0d623f7 7965 if (ops & UPL_ROP_PRESENT) {
0c530ab8 7966 *range = 0;
b0d623f7
A
7967 } else {
7968 *range = (uint32_t) (offset_end - offset_beg);
7969 assert(*range == (offset_end - offset_beg));
7970 }
0c530ab8
A
7971 }
7972 return KERN_SUCCESS;
7973 }
7974 vm_object_lock(object);
7975
7976 if (object->phys_contiguous) {
7977 vm_object_unlock(object);
7978 return KERN_INVALID_OBJECT;
7979 }
7980
2d21ac55 7981 offset = offset_beg & ~PAGE_MASK_64;
0c530ab8
A
7982
7983 while (offset < offset_end) {
7984 dst_page = vm_page_lookup(object, offset);
7985 if (dst_page != VM_PAGE_NULL) {
7986 if (ops & UPL_ROP_DUMP) {
316670eb 7987 if (dst_page->busy || dst_page->cleaning) {
6d2010ae 7988 /*
0c530ab8
A
7989 * someone else is playing with the
7990 * page, we will have to wait
7991 */
2d21ac55 7992 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
0c530ab8
A
7993 /*
7994 * need to relook the page up since it's
7995 * state may have changed while we slept
7996 * it might even belong to a different object
7997 * at this point
7998 */
7999 continue;
8000 }
39037602 8001 if (dst_page->laundry)
316670eb 8002 vm_pageout_steal_laundry(dst_page, FALSE);
39037602 8003
2d21ac55 8004 if (dst_page->pmapped == TRUE)
39037602 8005 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
0c530ab8 8006
b0d623f7 8007 VM_PAGE_FREE(dst_page);
2d21ac55 8008
3e170ce0
A
8009 } else if ((ops & UPL_ROP_ABSENT)
8010 && (!dst_page->absent || dst_page->busy)) {
8011 break;
8012 }
0c530ab8
A
8013 } else if (ops & UPL_ROP_PRESENT)
8014 break;
8015
8016 offset += PAGE_SIZE;
8017 }
8018 vm_object_unlock(object);
8019
2d21ac55
A
8020 if (range) {
8021 if (offset > offset_end)
8022 offset = offset_end;
b0d623f7
A
8023 if(offset > offset_beg) {
8024 *range = (uint32_t) (offset - offset_beg);
8025 assert(*range == (offset - offset_beg));
8026 } else {
8027 *range = 0;
8028 }
2d21ac55 8029 }
0c530ab8
A
8030 return KERN_SUCCESS;
8031}
2d21ac55 8032
39236c6e
A
8033/*
8034 * Used to point a pager directly to a range of memory (when the pager may be associated
8035 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
8036 * expect that the virtual address will denote the start of a range that is physically contiguous.
8037 */
8038kern_return_t pager_map_to_phys_contiguous(
8039 memory_object_control_t object,
8040 memory_object_offset_t offset,
8041 addr64_t base_vaddr,
8042 vm_size_t size)
8043{
8044 ppnum_t page_num;
8045 boolean_t clobbered_private;
8046 kern_return_t retval;
8047 vm_object_t pager_object;
8048
8049 page_num = pmap_find_phys(kernel_pmap, base_vaddr);
8050
8051 if (!page_num) {
8052 retval = KERN_FAILURE;
8053 goto out;
8054 }
8055
8056 pager_object = memory_object_control_to_vm_object(object);
8057
8058 if (!pager_object) {
8059 retval = KERN_FAILURE;
8060 goto out;
8061 }
8062
8063 clobbered_private = pager_object->private;
39037602
A
8064 if (pager_object->private != TRUE) {
8065 vm_object_lock(pager_object);
8066 pager_object->private = TRUE;
8067 vm_object_unlock(pager_object);
8068 }
39236c6e
A
8069 retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
8070
39037602
A
8071 if (retval != KERN_SUCCESS) {
8072 if (pager_object->private != clobbered_private) {
8073 vm_object_lock(pager_object);
8074 pager_object->private = clobbered_private;
8075 vm_object_unlock(pager_object);
8076 }
8077 }
39236c6e
A
8078
8079out:
8080 return retval;
8081}
2d21ac55
A
8082
8083uint32_t scan_object_collision = 0;
8084
8085void
8086vm_object_lock(vm_object_t object)
8087{
8088 if (object == vm_pageout_scan_wants_object) {
8089 scan_object_collision++;
8090 mutex_pause(2);
8091 }
8092 lck_rw_lock_exclusive(&object->Lock);
39037602
A
8093#if DEVELOPMENT || DEBUG
8094 object->Lock_owner = current_thread();
8095#endif
2d21ac55
A
8096}
8097
8098boolean_t
b0d623f7 8099vm_object_lock_avoid(vm_object_t object)
2d21ac55
A
8100{
8101 if (object == vm_pageout_scan_wants_object) {
8102 scan_object_collision++;
b0d623f7 8103 return TRUE;
2d21ac55 8104 }
b0d623f7
A
8105 return FALSE;
8106}
8107
8108boolean_t
8109_vm_object_lock_try(vm_object_t object)
8110{
39037602
A
8111 boolean_t retval;
8112
8113 retval = lck_rw_try_lock_exclusive(&object->Lock);
8114#if DEVELOPMENT || DEBUG
8115 if (retval == TRUE)
8116 object->Lock_owner = current_thread();
8117#endif
8118 return (retval);
2d21ac55
A
8119}
8120
b0d623f7
A
8121boolean_t
8122vm_object_lock_try(vm_object_t object)
8123{
6d2010ae
A
8124 /*
8125 * Called from hibernate path so check before blocking.
8126 */
8127 if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
b0d623f7
A
8128 mutex_pause(2);
8129 }
8130 return _vm_object_lock_try(object);
8131}
6d2010ae 8132
2d21ac55
A
8133void
8134vm_object_lock_shared(vm_object_t object)
8135{
b0d623f7 8136 if (vm_object_lock_avoid(object)) {
2d21ac55
A
8137 mutex_pause(2);
8138 }
8139 lck_rw_lock_shared(&object->Lock);
8140}
8141
8142boolean_t
8143vm_object_lock_try_shared(vm_object_t object)
8144{
b0d623f7 8145 if (vm_object_lock_avoid(object)) {
2d21ac55
A
8146 mutex_pause(2);
8147 }
8148 return (lck_rw_try_lock_shared(&object->Lock));
8149}
6d2010ae 8150
39037602
A
8151boolean_t
8152vm_object_lock_upgrade(vm_object_t object)
8153{ boolean_t retval;
8154
8155 retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
8156#if DEVELOPMENT || DEBUG
8157 if (retval == TRUE)
8158 object->Lock_owner = current_thread();
8159#endif
8160 return (retval);
8161}
8162
8163void
8164vm_object_unlock(vm_object_t object)
8165{
8166#if DEVELOPMENT || DEBUG
8167 if (object->Lock_owner) {
8168 if (object->Lock_owner != current_thread())
8169 panic("vm_object_unlock: not owner - %p\n", object);
8170 object->Lock_owner = 0;
8171 }
8172#endif
8173 lck_rw_done(&object->Lock);
8174}
8175
6d2010ae
A
8176
8177unsigned int vm_object_change_wimg_mode_count = 0;
8178
8179/*
8180 * The object must be locked
8181 */
8182void
8183vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
8184{
8185 vm_page_t p;
8186
8187 vm_object_lock_assert_exclusive(object);
8188
8189 vm_object_paging_wait(object, THREAD_UNINT);
8190
39037602 8191 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
6d2010ae
A
8192
8193 if (!p->fictitious)
39037602 8194 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode);
6d2010ae
A
8195 }
8196 if (wimg_mode == VM_WIMG_USE_DEFAULT)
8197 object->set_cache_attr = FALSE;
8198 else
8199 object->set_cache_attr = TRUE;
8200
8201 object->wimg_bits = wimg_mode;
8202
8203 vm_object_change_wimg_mode_count++;
8204}
8205
8206#if CONFIG_FREEZE
8207
3e170ce0
A
8208/*
8209 * This routine does the "relocation" of previously
8210 * compressed pages belonging to this object that are
8211 * residing in a number of compressed segments into
8212 * a set of compressed segments dedicated to hold
8213 * compressed pages belonging to this object.
8214 */
8215
8216extern void *freezer_chead;
8217extern char *freezer_compressor_scratch_buf;
8218extern int c_freezer_compression_count;
8219extern AbsoluteTime c_freezer_last_yield_ts;
8220
8221#define MAX_FREE_BATCH 32
8222#define FREEZER_DUTY_CYCLE_ON_MS 5
8223#define FREEZER_DUTY_CYCLE_OFF_MS 5
8224
8225static int c_freezer_should_yield(void);
8226
8227
8228static int
8229c_freezer_should_yield()
8230{
8231 AbsoluteTime cur_time;
8232 uint64_t nsecs;
8233
8234 assert(c_freezer_last_yield_ts);
8235 clock_get_uptime(&cur_time);
8236
8237 SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
8238 absolutetime_to_nanoseconds(cur_time, &nsecs);
8239
8240 if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS)
8241 return (1);
8242 return (0);
8243}
8244
8245
6d2010ae 8246void
3e170ce0
A
8247vm_object_compressed_freezer_done()
8248{
8249 vm_compressor_finished_filling(&freezer_chead);
8250}
8251
8252
8253void
8254vm_object_compressed_freezer_pageout(
6d2010ae
A
8255 vm_object_t object)
8256{
3e170ce0
A
8257 vm_page_t p;
8258 vm_page_t local_freeq = NULL;
8259 int local_freed = 0;
8260 kern_return_t retval = KERN_SUCCESS;
8261 int obj_resident_page_count_snapshot = 0;
8262
8263 assert(object != VM_OBJECT_NULL);
39037602 8264 assert(object->internal);
39236c6e 8265
6d2010ae 8266 vm_object_lock(object);
39236c6e 8267
3e170ce0
A
8268 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8269
39236c6e 8270 if (!object->pager_initialized) {
3e170ce0
A
8271
8272 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
8273
8274 if (!object->pager_initialized)
8275 vm_object_compressor_pager_create(object);
39236c6e 8276 }
fe8ab488 8277
3e170ce0
A
8278 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8279 vm_object_unlock(object);
8280 return;
8281 }
fe8ab488
A
8282 }
8283
39037602 8284 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
3e170ce0
A
8285 vm_object_offset_t curr_offset = 0;
8286
8287 /*
8288 * Go through the object and make sure that any
8289 * previously compressed pages are relocated into
8290 * a compressed segment associated with our "freezer_chead".
8291 */
8292 while (curr_offset < object->vo_size) {
8293
8294 curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
8295
8296 if (curr_offset == (vm_object_offset_t) -1)
8297 break;
8298
8299 retval = vm_compressor_pager_relocate(object->pager, curr_offset, &freezer_chead);
8300
8301 if (retval != KERN_SUCCESS)
8302 break;
fe8ab488 8303
3e170ce0
A
8304 curr_offset += PAGE_SIZE_64;
8305 }
39236c6e
A
8306 }
8307
3e170ce0
A
8308 /*
8309 * We can't hold the object lock while heading down into the compressed pager
8310 * layer because we might need the kernel map lock down there to allocate new
8311 * compressor data structures. And if this same object is mapped in the kernel
8312 * and there's a fault on it, then that thread will want the object lock while
8313 * holding the kernel map lock.
8314 *
8315 * Since we are going to drop/grab the object lock repeatedly, we must make sure
8316 * we won't be stuck in an infinite loop if the same page(s) keep getting
8317 * decompressed. So we grab a snapshot of the number of pages in the object and
8318 * we won't process any more than that number of pages.
8319 */
8320
8321 obj_resident_page_count_snapshot = object->resident_page_count;
8322
8323 vm_object_activity_begin(object);
8324
39037602 8325 while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq)) {
3e170ce0 8326
39037602 8327 p = (vm_page_t)vm_page_queue_first(&object->memq);
3e170ce0
A
8328
8329 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0);
6d2010ae 8330
6d2010ae
A
8331 vm_page_lockspin_queues();
8332
3e170ce0 8333 if (p->cleaning || p->fictitious || p->busy || p->absent || p->unusual || p->error || VM_PAGE_WIRED(p)) {
3e170ce0
A
8334
8335 vm_page_unlock_queues();
8336
8337 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0);
8338
39037602
A
8339 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
8340 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
3e170ce0
A
8341
8342 continue;
8343 }
8344
8345 if (p->pmapped == TRUE) {
8346 int refmod_state, pmap_flags;
8347
8348 if (p->dirty || p->precious) {
8349 pmap_flags = PMAP_OPTIONS_COMPRESSOR;
8350 } else {
8351 pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
8352 }
8353
39037602 8354 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL);
3e170ce0
A
8355 if (refmod_state & VM_MEM_MODIFIED) {
8356 SET_PAGE_DIRTY(p, FALSE);
8357 }
8358 }
8359
8360 if (p->dirty == FALSE && p->precious == FALSE) {
8361 /*
8362 * Clean and non-precious page.
8363 */
8364 vm_page_unlock_queues();
8365 VM_PAGE_FREE(p);
8366
8367 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0);
8368 continue;
8369 }
8370
39037602 8371 if (p->laundry)
3e170ce0 8372 vm_pageout_steal_laundry(p, TRUE);
3e170ce0 8373
39037602
A
8374 vm_page_queues_remove(p, TRUE);
8375
3e170ce0
A
8376 vm_page_unlock_queues();
8377
8378
316670eb 8379 /*
3e170ce0
A
8380 * In case the compressor fails to compress this page, we need it at
8381 * the back of the object memq so that we don't keep trying to process it.
8382 * Make the move here while we have the object lock held.
316670eb 8383 */
39236c6e 8384
39037602
A
8385 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
8386 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
39236c6e 8387
3e170ce0
A
8388 /*
8389 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
8390 *
8391 * Mark the page busy so no one messes with it while we have the object lock dropped.
8392 */
39236c6e 8393
3e170ce0 8394 p->busy = TRUE;
39236c6e 8395
3e170ce0 8396 vm_object_activity_begin(object);
39236c6e 8397
3e170ce0
A
8398 vm_object_unlock(object);
8399
8400 /*
8401 * arg3 == FALSE tells vm_pageout_compress_page that we don't hold the object lock and the pager may not be initialized.
8402 */
8403 if (vm_pageout_compress_page(&freezer_chead, freezer_compressor_scratch_buf, p, FALSE) == KERN_SUCCESS) {
8404 /*
8405 * page has already been un-tabled from the object via 'vm_page_remove'
8406 */
39037602 8407 p->snext = local_freeq;
3e170ce0
A
8408 local_freeq = p;
8409 local_freed++;
8410
8411 if (local_freed >= MAX_FREE_BATCH) {
8412
8413 vm_page_free_list(local_freeq, TRUE);
39236c6e 8414
3e170ce0
A
8415 local_freeq = NULL;
8416 local_freed = 0;
39236c6e 8417 }
3e170ce0
A
8418 c_freezer_compression_count++;
8419 }
8420 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0);
8421
8422 if (local_freed == 0 && c_freezer_should_yield()) {
39236c6e 8423
3e170ce0
A
8424 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
8425 clock_get_uptime(&c_freezer_last_yield_ts);
316670eb 8426 }
3e170ce0
A
8427
8428 vm_object_lock(object);
6d2010ae
A
8429 }
8430
3e170ce0
A
8431 if (local_freeq) {
8432 vm_page_free_list(local_freeq, TRUE);
8433
8434 local_freeq = NULL;
8435 local_freed = 0;
8436 }
8437
8438 vm_object_activity_end(object);
8439
6d2010ae 8440 vm_object_unlock(object);
3e170ce0
A
8441
8442 if (c_freezer_should_yield()) {
8443
8444 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
8445 clock_get_uptime(&c_freezer_last_yield_ts);
8446 }
6d2010ae
A
8447}
8448
6d2010ae 8449#endif /* CONFIG_FREEZE */
fe8ab488
A
8450
8451
3e170ce0
A
8452void
8453vm_object_pageout(
8454 vm_object_t object)
8455{
8456 vm_page_t p, next;
8457 struct vm_pageout_queue *iq;
8458 boolean_t need_unlock = TRUE;
8459
39037602
A
8460 if (!VM_CONFIG_COMPRESSOR_IS_PRESENT)
8461 return;
8462
3e170ce0
A
8463 iq = &vm_pageout_queue_internal;
8464
8465 assert(object != VM_OBJECT_NULL );
3e170ce0
A
8466
8467 vm_object_lock(object);
8468
8469 if (!object->internal ||
8470 object->terminating ||
8471 !object->alive) {
8472 vm_object_unlock(object);
8473 return;
8474 }
8475
8476 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8477
8478 if (!object->pager_initialized) {
8479
8480 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
8481
8482 if (!object->pager_initialized)
8483 vm_object_compressor_pager_create(object);
8484 }
8485
8486 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8487 vm_object_unlock(object);
8488 return;
8489 }
8490 }
8491
8492ReScan:
39037602 8493 next = (vm_page_t)vm_page_queue_first(&object->memq);
3e170ce0 8494
39037602 8495 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
3e170ce0 8496 p = next;
39037602 8497 next = (vm_page_t)vm_page_queue_next(&next->listq);
3e170ce0 8498
39037602
A
8499 assert(p->vm_page_q_state != VM_PAGE_ON_FREE_Q);
8500
8501 if ((p->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q) ||
3e170ce0
A
8502 p->encrypted_cleaning ||
8503 p->cleaning ||
8504 p->laundry ||
3e170ce0
A
8505 p->busy ||
8506 p->absent ||
8507 p->error ||
8508 p->fictitious ||
8509 VM_PAGE_WIRED(p)) {
8510 /*
8511 * Page is already being cleaned or can't be cleaned.
8512 */
8513 continue;
8514 }
8515
8516 /* Throw to the pageout queue */
8517
8518 vm_page_lockspin_queues();
8519 need_unlock = TRUE;
8520
8521 if (vm_compressor_low_on_space()) {
8522 vm_page_unlock_queues();
8523 break;
8524 }
8525
8526 if (VM_PAGE_Q_THROTTLED(iq)) {
8527
8528 iq->pgo_draining = TRUE;
8529
8530 assert_wait((event_t) (&iq->pgo_laundry + 1),
8531 THREAD_INTERRUPTIBLE);
8532 vm_page_unlock_queues();
8533 vm_object_unlock(object);
8534
8535 thread_block(THREAD_CONTINUE_NULL);
8536
8537 vm_object_lock(object);
8538 goto ReScan;
8539 }
8540
8541 assert(!p->fictitious);
8542 assert(!p->busy);
8543 assert(!p->absent);
8544 assert(!p->unusual);
8545 assert(!p->error);
8546 assert(!VM_PAGE_WIRED(p));
8547 assert(!p->cleaning);
8548
8549 if (p->pmapped == TRUE) {
8550 int refmod_state;
8551 int pmap_options;
8552
39037602
A
8553 /*
8554 * Tell pmap the page should be accounted
8555 * for as "compressed" if it's been modified.
8556 */
8557 pmap_options =
8558 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
8559 if (p->dirty || p->precious) {
3e170ce0 8560 /*
39037602
A
8561 * We already know it's been modified,
8562 * so tell pmap to account for it
8563 * as "compressed".
3e170ce0 8564 */
39037602 8565 pmap_options = PMAP_OPTIONS_COMPRESSOR;
3e170ce0 8566 }
39037602 8567 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
3e170ce0
A
8568 pmap_options,
8569 NULL);
8570 if (refmod_state & VM_MEM_MODIFIED) {
8571 SET_PAGE_DIRTY(p, FALSE);
8572 }
8573 }
8574
8575 if (!p->dirty && !p->precious) {
8576 vm_page_unlock_queues();
8577 VM_PAGE_FREE(p);
8578 continue;
8579 }
8580
39037602
A
8581 vm_page_queues_remove(p, TRUE);
8582
8583 if (vm_pageout_cluster(p, FALSE, TRUE))
3e170ce0
A
8584 need_unlock = FALSE;
8585
8586 if (need_unlock == TRUE)
8587 vm_page_unlock_queues();
8588 }
8589
8590 vm_object_unlock(object);
8591}
8592
8593
fe8ab488
A
8594#if CONFIG_IOSCHED
8595void
8596vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
8597{
8598 io_reprioritize_req_t req;
8599 struct vnode *devvp = NULL;
8600
8601 if(vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS)
8602 return;
8603
3e170ce0
A
8604 /*
8605 * Create the request for I/O reprioritization.
8606 * We use the noblock variant of zalloc because we're holding the object
8607 * lock here and we could cause a deadlock in low memory conditions.
8608 */
8609 req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
8610 if (req == NULL)
8611 return;
fe8ab488
A
8612 req->blkno = blkno;
8613 req->len = len;
8614 req->priority = prio;
8615 req->devvp = devvp;
8616
8617 /* Insert request into the reprioritization list */
8618 IO_REPRIORITIZE_LIST_LOCK();
8619 queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
8620 IO_REPRIORITIZE_LIST_UNLOCK();
8621
8622 /* Wakeup reprioritize thread */
8623 IO_REPRIO_THREAD_WAKEUP();
8624
8625 return;
8626}
8627
8628void
8629vm_decmp_upl_reprioritize(upl_t upl, int prio)
8630{
8631 int offset;
8632 vm_object_t object;
8633 io_reprioritize_req_t req;
8634 struct vnode *devvp = NULL;
8635 uint64_t blkno;
8636 uint32_t len;
8637 upl_t io_upl;
8638 uint64_t *io_upl_reprio_info;
8639 int io_upl_size;
8640
8641 if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0)
8642 return;
8643
8644 /*
8645 * We dont want to perform any allocations with the upl lock held since that might
8646 * result in a deadlock. If the system is low on memory, the pageout thread would
8647 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
8648 * be freed up by the pageout thread, it would be a deadlock.
8649 */
8650
8651
8652 /* First step is just to get the size of the upl to find out how big the reprio info is */
a1c7dba1
A
8653 if(!upl_try_lock(upl))
8654 return;
8655
fe8ab488
A
8656 if (upl->decmp_io_upl == NULL) {
8657 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8658 upl_unlock(upl);
8659 return;
8660 }
8661
8662 io_upl = upl->decmp_io_upl;
8663 assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0);
8664 io_upl_size = io_upl->size;
8665 upl_unlock(upl);
8666
8667 /* Now perform the allocation */
8668 io_upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size / PAGE_SIZE));
8669 if (io_upl_reprio_info == NULL)
8670 return;
8671
8672 /* Now again take the lock, recheck the state and grab out the required info */
a1c7dba1
A
8673 if(!upl_try_lock(upl))
8674 goto out;
8675
fe8ab488
A
8676 if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
8677 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8678 upl_unlock(upl);
8679 goto out;
8680 }
8681 memcpy(io_upl_reprio_info, io_upl->upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE));
8682
8683 /* Get the VM object for this UPL */
8684 if (io_upl->flags & UPL_SHADOWED) {
8685 object = io_upl->map_object->shadow;
8686 } else {
8687 object = io_upl->map_object;
8688 }
8689
8690 /* Get the dev vnode ptr for this object */
8691 if(!object || !object->pager ||
8692 vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
8693 upl_unlock(upl);
8694 goto out;
8695 }
8696
8697 upl_unlock(upl);
8698
8699 /* Now we have all the information needed to do the expedite */
8700
8701 offset = 0;
8702 while (offset < io_upl_size) {
8703 blkno = io_upl_reprio_info[(offset / PAGE_SIZE)] & UPL_REPRIO_INFO_MASK;
8704 len = (io_upl_reprio_info[(offset / PAGE_SIZE)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;
8705
8706 /*
8707 * This implementation may cause some spurious expedites due to the
8708 * fact that we dont cleanup the blkno & len from the upl_reprio_info
8709 * even after the I/O is complete.
8710 */
8711
8712 if (blkno != 0 && len != 0) {
8713 /* Create the request for I/O reprioritization */
8714 req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone);
8715 assert(req != NULL);
8716 req->blkno = blkno;
8717 req->len = len;
8718 req->priority = prio;
8719 req->devvp = devvp;
8720
8721 /* Insert request into the reprioritization list */
8722 IO_REPRIORITIZE_LIST_LOCK();
8723 queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
8724 IO_REPRIORITIZE_LIST_UNLOCK();
8725
8726 offset += len;
8727 } else {
8728 offset += PAGE_SIZE;
8729 }
8730 }
8731
8732 /* Wakeup reprioritize thread */
8733 IO_REPRIO_THREAD_WAKEUP();
8734
8735out:
8736 kfree(io_upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE));
8737 return;
8738}
8739
8740void
8741vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m)
8742{
8743 upl_t upl;
8744 upl_page_info_t *pl;
8745 unsigned int i, num_pages;
8746 int cur_tier;
8747
8748 cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
8749
8750 /*
8751 Scan through all UPLs associated with the object to find the
8752 UPL containing the contended page.
8753 */
8754 queue_iterate(&o->uplq, upl, upl_t, uplq) {
8755 if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier)
8756 continue;
8757 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
8758 num_pages = (upl->size / PAGE_SIZE);
8759
8760 /*
8761 For each page in the UPL page list, see if it matches the contended
8762 page and was issued as a low prio I/O.
8763 */
8764 for(i=0; i < num_pages; i++) {
39037602 8765 if(UPL_PAGE_PRESENT(pl,i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
fe8ab488
A
8766 if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
8767 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl, upl->upl_priority, 0);
8768 vm_decmp_upl_reprioritize(upl, cur_tier);
8769 break;
8770 }
8771 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl->upl_reprio_info[i], upl->upl_priority, 0);
8772 if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0)
8773 vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
8774 break;
8775 }
8776 }
8777 /* Check if we found any hits */
8778 if (i != num_pages)
8779 break;
8780 }
8781
8782 return;
8783}
8784
8785wait_result_t
8786vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible)
8787{
8788 wait_result_t ret;
8789
8790 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);
8791
8792 if (o->io_tracking && ((m->busy == TRUE) || (m->cleaning == TRUE) || VM_PAGE_WIRED(m))) {
8793 /*
8794 Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
8795 */
8796 vm_page_handle_prio_inversion(o,m);
8797 }
8798 m->wanted = TRUE;
8799 ret = thread_sleep_vm_object(o, m, interruptible);
8800 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0);
8801 return ret;
8802}
8803
8804static void
8805io_reprioritize_thread(void *param __unused, wait_result_t wr __unused)
8806{
8807 io_reprioritize_req_t req = NULL;
8808
8809 while(1) {
8810
8811 IO_REPRIORITIZE_LIST_LOCK();
8812 if (queue_empty(&io_reprioritize_list)) {
8813 IO_REPRIORITIZE_LIST_UNLOCK();
8814 break;
8815 }
8816
8817 queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
8818 IO_REPRIORITIZE_LIST_UNLOCK();
8819
8820 vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority);
8821 zfree(io_reprioritize_req_zone, req);
8822 }
8823
8824 IO_REPRIO_THREAD_CONTINUATION();
8825}
8826#endif