]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.c
72b525fdd76d7b5f9cc6ee82f2242c58a7ce43aa
[apple/xnu.git] / osfmk / vm / vm_object.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Virtual memory object module.
63 */
64
65 #include <debug.h>
66 #include <mach_pagemap.h>
67 #include <task_swapper.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/memory_object_control_server.h>
73 #include <mach/vm_param.h>
74
75 #include <mach/sdt.h>
76
77 #include <ipc/ipc_types.h>
78 #include <ipc/ipc_port.h>
79
80 #include <kern/kern_types.h>
81 #include <kern/assert.h>
82 #include <kern/queue.h>
83 #include <kern/xpr.h>
84 #include <kern/kalloc.h>
85 #include <kern/zalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/processor.h>
89 #include <kern/misc_protos.h>
90 #include <kern/policy_internal.h>
91
92 #include <vm/memory_object.h>
93 #include <vm/vm_compressor_pager.h>
94 #include <vm/vm_fault.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_protos.h>
100 #include <vm/vm_purgeable_internal.h>
101
102 #include <vm/vm_compressor.h>
103
104 #if CONFIG_PHANTOM_CACHE
105 #include <vm/vm_phantom_cache.h>
106 #endif
107
108 boolean_t vm_object_collapse_compressor_allowed = TRUE;
109
110 struct vm_counters vm_counters;
111
112 #if VM_OBJECT_TRACKING
113 boolean_t vm_object_tracking_inited = FALSE;
114 btlog_t *vm_object_tracking_btlog;
115
116 void
117 vm_object_tracking_init(void)
118 {
119 int vm_object_tracking;
120
121 vm_object_tracking = 1;
122 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking,
123 sizeof (vm_object_tracking));
124
125 if (vm_object_tracking) {
126 vm_object_tracking_btlog = btlog_create(
127 VM_OBJECT_TRACKING_NUM_RECORDS,
128 VM_OBJECT_TRACKING_BTDEPTH,
129 TRUE /* caller_will_remove_entries_for_element? */);
130 assert(vm_object_tracking_btlog);
131 vm_object_tracking_inited = TRUE;
132 }
133 }
134 #endif /* VM_OBJECT_TRACKING */
135
136 /*
137 * Virtual memory objects maintain the actual data
138 * associated with allocated virtual memory. A given
139 * page of memory exists within exactly one object.
140 *
141 * An object is only deallocated when all "references"
142 * are given up.
143 *
144 * Associated with each object is a list of all resident
145 * memory pages belonging to that object; this list is
146 * maintained by the "vm_page" module, but locked by the object's
147 * lock.
148 *
149 * Each object also records the memory object reference
150 * that is used by the kernel to request and write
151 * back data (the memory object, field "pager"), etc...
152 *
153 * Virtual memory objects are allocated to provide
154 * zero-filled memory (vm_allocate) or map a user-defined
155 * memory object into a virtual address space (vm_map).
156 *
157 * Virtual memory objects that refer to a user-defined
158 * memory object are called "permanent", because all changes
159 * made in virtual memory are reflected back to the
160 * memory manager, which may then store it permanently.
161 * Other virtual memory objects are called "temporary",
162 * meaning that changes need be written back only when
163 * necessary to reclaim pages, and that storage associated
164 * with the object can be discarded once it is no longer
165 * mapped.
166 *
167 * A permanent memory object may be mapped into more
168 * than one virtual address space. Moreover, two threads
169 * may attempt to make the first mapping of a memory
170 * object concurrently. Only one thread is allowed to
171 * complete this mapping; all others wait for the
172 * "pager_initialized" field is asserted, indicating
173 * that the first thread has initialized all of the
174 * necessary fields in the virtual memory object structure.
175 *
176 * The kernel relies on a *default memory manager* to
177 * provide backing storage for the zero-filled virtual
178 * memory objects. The pager memory objects associated
179 * with these temporary virtual memory objects are only
180 * requested from the default memory manager when it
181 * becomes necessary. Virtual memory objects
182 * that depend on the default memory manager are called
183 * "internal". The "pager_created" field is provided to
184 * indicate whether these ports have ever been allocated.
185 *
186 * The kernel may also create virtual memory objects to
187 * hold changed pages after a copy-on-write operation.
188 * In this case, the virtual memory object (and its
189 * backing storage -- its memory object) only contain
190 * those pages that have been changed. The "shadow"
191 * field refers to the virtual memory object that contains
192 * the remainder of the contents. The "shadow_offset"
193 * field indicates where in the "shadow" these contents begin.
194 * The "copy" field refers to a virtual memory object
195 * to which changed pages must be copied before changing
196 * this object, in order to implement another form
197 * of copy-on-write optimization.
198 *
199 * The virtual memory object structure also records
200 * the attributes associated with its memory object.
201 * The "pager_ready", "can_persist" and "copy_strategy"
202 * fields represent those attributes. The "cached_list"
203 * field is used in the implementation of the persistence
204 * attribute.
205 *
206 * ZZZ Continue this comment.
207 */
208
209 /* Forward declarations for internal functions. */
210 static kern_return_t vm_object_terminate(
211 vm_object_t object);
212
213 extern void vm_object_remove(
214 vm_object_t object);
215
216 static kern_return_t vm_object_copy_call(
217 vm_object_t src_object,
218 vm_object_offset_t src_offset,
219 vm_object_size_t size,
220 vm_object_t *_result_object);
221
222 static void vm_object_do_collapse(
223 vm_object_t object,
224 vm_object_t backing_object);
225
226 static void vm_object_do_bypass(
227 vm_object_t object,
228 vm_object_t backing_object);
229
230 static void vm_object_release_pager(
231 memory_object_t pager,
232 boolean_t hashed);
233
234 static zone_t vm_object_zone; /* vm backing store zone */
235
236 /*
237 * All wired-down kernel memory belongs to a single virtual
238 * memory object (kernel_object) to avoid wasting data structures.
239 */
240 static struct vm_object kernel_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
241 vm_object_t kernel_object;
242
243 static struct vm_object compressor_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
244 vm_object_t compressor_object = &compressor_object_store;
245
246 /*
247 * The submap object is used as a placeholder for vm_map_submap
248 * operations. The object is declared in vm_map.c because it
249 * is exported by the vm_map module. The storage is declared
250 * here because it must be initialized here.
251 */
252 static struct vm_object vm_submap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
253
254 /*
255 * Virtual memory objects are initialized from
256 * a template (see vm_object_allocate).
257 *
258 * When adding a new field to the virtual memory
259 * object structure, be sure to add initialization
260 * (see _vm_object_allocate()).
261 */
262 static struct vm_object vm_object_template;
263
264 unsigned int vm_page_purged_wired = 0;
265 unsigned int vm_page_purged_busy = 0;
266 unsigned int vm_page_purged_others = 0;
267
268 #if VM_OBJECT_CACHE
269 /*
270 * Virtual memory objects that are not referenced by
271 * any address maps, but that are allowed to persist
272 * (an attribute specified by the associated memory manager),
273 * are kept in a queue (vm_object_cached_list).
274 *
275 * When an object from this queue is referenced again,
276 * for example to make another address space mapping,
277 * it must be removed from the queue. That is, the
278 * queue contains *only* objects with zero references.
279 *
280 * The kernel may choose to terminate objects from this
281 * queue in order to reclaim storage. The current policy
282 * is to permit a fixed maximum number of unreferenced
283 * objects (vm_object_cached_max).
284 *
285 * A spin lock (accessed by routines
286 * vm_object_cache_{lock,lock_try,unlock}) governs the
287 * object cache. It must be held when objects are
288 * added to or removed from the cache (in vm_object_terminate).
289 * The routines that acquire a reference to a virtual
290 * memory object based on one of the memory object ports
291 * must also lock the cache.
292 *
293 * Ideally, the object cache should be more isolated
294 * from the reference mechanism, so that the lock need
295 * not be held to make simple references.
296 */
297 static vm_object_t vm_object_cache_trim(
298 boolean_t called_from_vm_object_deallocate);
299
300 static void vm_object_deactivate_all_pages(
301 vm_object_t object);
302
303 static int vm_object_cached_high; /* highest # cached objects */
304 static int vm_object_cached_max = 512; /* may be patched*/
305
306 #define vm_object_cache_lock() \
307 lck_mtx_lock(&vm_object_cached_lock_data)
308 #define vm_object_cache_lock_try() \
309 lck_mtx_try_lock(&vm_object_cached_lock_data)
310
311 #endif /* VM_OBJECT_CACHE */
312
313 static queue_head_t vm_object_cached_list;
314 static uint32_t vm_object_cache_pages_freed = 0;
315 static uint32_t vm_object_cache_pages_moved = 0;
316 static uint32_t vm_object_cache_pages_skipped = 0;
317 static uint32_t vm_object_cache_adds = 0;
318 static uint32_t vm_object_cached_count = 0;
319 static lck_mtx_t vm_object_cached_lock_data;
320 static lck_mtx_ext_t vm_object_cached_lock_data_ext;
321
322 static uint32_t vm_object_page_grab_failed = 0;
323 static uint32_t vm_object_page_grab_skipped = 0;
324 static uint32_t vm_object_page_grab_returned = 0;
325 static uint32_t vm_object_page_grab_pmapped = 0;
326 static uint32_t vm_object_page_grab_reactivations = 0;
327
328 #define vm_object_cache_lock_spin() \
329 lck_mtx_lock_spin(&vm_object_cached_lock_data)
330 #define vm_object_cache_unlock() \
331 lck_mtx_unlock(&vm_object_cached_lock_data)
332
333 static void vm_object_cache_remove_locked(vm_object_t);
334
335
336 #define VM_OBJECT_HASH_COUNT 1024
337 #define VM_OBJECT_HASH_LOCK_COUNT 512
338
339 static lck_mtx_t vm_object_hashed_lock_data[VM_OBJECT_HASH_LOCK_COUNT];
340 static lck_mtx_ext_t vm_object_hashed_lock_data_ext[VM_OBJECT_HASH_LOCK_COUNT];
341
342 static queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
343 static struct zone *vm_object_hash_zone;
344
345 struct vm_object_hash_entry {
346 queue_chain_t hash_link; /* hash chain link */
347 memory_object_t pager; /* pager we represent */
348 vm_object_t object; /* corresponding object */
349 boolean_t waiting; /* someone waiting for
350 * termination */
351 };
352
353 typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
354 #define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
355
356 #define VM_OBJECT_HASH_SHIFT 5
357 #define vm_object_hash(pager) \
358 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT))
359
360 #define vm_object_lock_hash(pager) \
361 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_LOCK_COUNT))
362
363 void vm_object_hash_entry_free(
364 vm_object_hash_entry_t entry);
365
366 static void vm_object_reap(vm_object_t object);
367 static void vm_object_reap_async(vm_object_t object);
368 static void vm_object_reaper_thread(void);
369
370 static lck_mtx_t vm_object_reaper_lock_data;
371 static lck_mtx_ext_t vm_object_reaper_lock_data_ext;
372
373 static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
374 unsigned int vm_object_reap_count = 0;
375 unsigned int vm_object_reap_count_async = 0;
376
377 #define vm_object_reaper_lock() \
378 lck_mtx_lock(&vm_object_reaper_lock_data)
379 #define vm_object_reaper_lock_spin() \
380 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
381 #define vm_object_reaper_unlock() \
382 lck_mtx_unlock(&vm_object_reaper_lock_data)
383
384 #if CONFIG_IOSCHED
385 /* I/O Re-prioritization request list */
386 queue_head_t io_reprioritize_list;
387 lck_spin_t io_reprioritize_list_lock;
388
389 #define IO_REPRIORITIZE_LIST_LOCK() \
390 lck_spin_lock(&io_reprioritize_list_lock)
391 #define IO_REPRIORITIZE_LIST_UNLOCK() \
392 lck_spin_unlock(&io_reprioritize_list_lock)
393
394 #define MAX_IO_REPRIORITIZE_REQS 8192
395 zone_t io_reprioritize_req_zone;
396
397 /* I/O Re-prioritization thread */
398 int io_reprioritize_wakeup = 0;
399 static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused);
400
401 #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup)
402 #define IO_REPRIO_THREAD_CONTINUATION() \
403 { \
404 assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \
405 thread_block(io_reprioritize_thread); \
406 }
407
408 void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int);
409 void vm_page_handle_prio_inversion(vm_object_t, vm_page_t);
410 void vm_decmp_upl_reprioritize(upl_t, int);
411 #endif
412
413 #if 0
414 #undef KERNEL_DEBUG
415 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
416 #endif
417
418
419 static lck_mtx_t *
420 vm_object_hash_lock_spin(
421 memory_object_t pager)
422 {
423 int index;
424
425 index = vm_object_lock_hash(pager);
426
427 lck_mtx_lock_spin(&vm_object_hashed_lock_data[index]);
428
429 return (&vm_object_hashed_lock_data[index]);
430 }
431
432 static void
433 vm_object_hash_unlock(lck_mtx_t *lck)
434 {
435 lck_mtx_unlock(lck);
436 }
437
438
439 /*
440 * vm_object_hash_lookup looks up a pager in the hashtable
441 * and returns the corresponding entry, with optional removal.
442 */
443 static vm_object_hash_entry_t
444 vm_object_hash_lookup(
445 memory_object_t pager,
446 boolean_t remove_entry)
447 {
448 queue_t bucket;
449 vm_object_hash_entry_t entry;
450
451 bucket = &vm_object_hashtable[vm_object_hash(pager)];
452
453 entry = (vm_object_hash_entry_t)queue_first(bucket);
454 while (!queue_end(bucket, (queue_entry_t)entry)) {
455 if (entry->pager == pager) {
456 if (remove_entry) {
457 queue_remove(bucket, entry,
458 vm_object_hash_entry_t, hash_link);
459 }
460 return(entry);
461 }
462 entry = (vm_object_hash_entry_t)queue_next(&entry->hash_link);
463 }
464 return(VM_OBJECT_HASH_ENTRY_NULL);
465 }
466
467 /*
468 * vm_object_hash_enter enters the specified
469 * pager / cache object association in the hashtable.
470 */
471
472 static void
473 vm_object_hash_insert(
474 vm_object_hash_entry_t entry,
475 vm_object_t object)
476 {
477 queue_t bucket;
478
479 assert(vm_object_hash_lookup(entry->pager, FALSE) == NULL);
480
481 bucket = &vm_object_hashtable[vm_object_hash(entry->pager)];
482
483 queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link);
484
485 if (object->hashed) {
486 /*
487 * "hashed" was pre-set on this (new) object to avoid
488 * locking issues in vm_object_enter() (can't attempt to
489 * grab the object lock while holding the hash lock as
490 * a spinlock), so no need to set it here (and no need to
491 * hold the object's lock).
492 */
493 } else {
494 vm_object_lock_assert_exclusive(object);
495 object->hashed = TRUE;
496 }
497
498 entry->object = object;
499 }
500
501 static vm_object_hash_entry_t
502 vm_object_hash_entry_alloc(
503 memory_object_t pager)
504 {
505 vm_object_hash_entry_t entry;
506
507 entry = (vm_object_hash_entry_t)zalloc(vm_object_hash_zone);
508 entry->pager = pager;
509 entry->object = VM_OBJECT_NULL;
510 entry->waiting = FALSE;
511
512 return(entry);
513 }
514
515 void
516 vm_object_hash_entry_free(
517 vm_object_hash_entry_t entry)
518 {
519 zfree(vm_object_hash_zone, entry);
520 }
521
522 /*
523 * vm_object_allocate:
524 *
525 * Returns a new object with the given size.
526 */
527
528 __private_extern__ void
529 _vm_object_allocate(
530 vm_object_size_t size,
531 vm_object_t object)
532 {
533 XPR(XPR_VM_OBJECT,
534 "vm_object_allocate, object 0x%X size 0x%X\n",
535 object, size, 0,0,0);
536
537 *object = vm_object_template;
538 vm_page_queue_init(&object->memq);
539 queue_init(&object->msr_q);
540 #if UPL_DEBUG || CONFIG_IOSCHED
541 queue_init(&object->uplq);
542 #endif
543 vm_object_lock_init(object);
544 object->vo_size = size;
545
546 #if VM_OBJECT_TRACKING_OP_CREATED
547 if (vm_object_tracking_inited) {
548 void *bt[VM_OBJECT_TRACKING_BTDEPTH];
549 int numsaved = 0;
550
551 numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
552 btlog_add_entry(vm_object_tracking_btlog,
553 object,
554 VM_OBJECT_TRACKING_OP_CREATED,
555 bt,
556 numsaved);
557 }
558 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
559 }
560
561 __private_extern__ vm_object_t
562 vm_object_allocate(
563 vm_object_size_t size)
564 {
565 vm_object_t object;
566
567 object = (vm_object_t) zalloc(vm_object_zone);
568
569 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
570
571 if (object != VM_OBJECT_NULL)
572 _vm_object_allocate(size, object);
573
574 return object;
575 }
576
577
578 lck_grp_t vm_object_lck_grp;
579 lck_grp_t vm_object_cache_lck_grp;
580 lck_grp_attr_t vm_object_lck_grp_attr;
581 lck_attr_t vm_object_lck_attr;
582 lck_attr_t kernel_object_lck_attr;
583 lck_attr_t compressor_object_lck_attr;
584
585 /*
586 * vm_object_bootstrap:
587 *
588 * Initialize the VM objects module.
589 */
590 __private_extern__ void
591 vm_object_bootstrap(void)
592 {
593 int i;
594 vm_size_t vm_object_size;
595
596 vm_object_size = (sizeof(struct vm_object) + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1);
597
598 vm_object_zone = zinit(vm_object_size,
599 round_page(512*1024),
600 round_page(12*1024),
601 "vm objects");
602 zone_change(vm_object_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
603 zone_change(vm_object_zone, Z_NOENCRYPT, TRUE);
604
605 vm_object_init_lck_grp();
606
607 queue_init(&vm_object_cached_list);
608
609 lck_mtx_init_ext(&vm_object_cached_lock_data,
610 &vm_object_cached_lock_data_ext,
611 &vm_object_cache_lck_grp,
612 &vm_object_lck_attr);
613
614 queue_init(&vm_object_reaper_queue);
615
616 for (i = 0; i < VM_OBJECT_HASH_LOCK_COUNT; i++) {
617 lck_mtx_init_ext(&vm_object_hashed_lock_data[i],
618 &vm_object_hashed_lock_data_ext[i],
619 &vm_object_lck_grp,
620 &vm_object_lck_attr);
621 }
622 lck_mtx_init_ext(&vm_object_reaper_lock_data,
623 &vm_object_reaper_lock_data_ext,
624 &vm_object_lck_grp,
625 &vm_object_lck_attr);
626
627 vm_object_hash_zone =
628 zinit((vm_size_t) sizeof (struct vm_object_hash_entry),
629 round_page(512*1024),
630 round_page(12*1024),
631 "vm object hash entries");
632 zone_change(vm_object_hash_zone, Z_CALLERACCT, FALSE);
633 zone_change(vm_object_hash_zone, Z_NOENCRYPT, TRUE);
634
635 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
636 queue_init(&vm_object_hashtable[i]);
637
638
639 /*
640 * Fill in a template object, for quick initialization
641 */
642
643 /* memq; Lock; init after allocation */
644
645
646 vm_object_template.memq.prev = 0;
647 vm_object_template.memq.next = 0;
648 #if 0
649 /*
650 * We can't call vm_object_lock_init() here because that will
651 * allocate some memory and VM is not fully initialized yet.
652 * The lock will be initialized for each allocated object in
653 * _vm_object_allocate(), so we don't need to initialize it in
654 * the vm_object_template.
655 */
656 vm_object_lock_init(&vm_object_template);
657 #endif
658 #if DEVELOPMENT || DEBUG
659 vm_object_template.Lock_owner = 0;
660 #endif
661 vm_object_template.vo_size = 0;
662 vm_object_template.memq_hint = VM_PAGE_NULL;
663 vm_object_template.ref_count = 1;
664 #if TASK_SWAPPER
665 vm_object_template.res_count = 1;
666 #endif /* TASK_SWAPPER */
667 vm_object_template.resident_page_count = 0;
668 vm_object_template.wired_page_count = 0;
669 vm_object_template.reusable_page_count = 0;
670 vm_object_template.copy = VM_OBJECT_NULL;
671 vm_object_template.shadow = VM_OBJECT_NULL;
672 vm_object_template.vo_shadow_offset = (vm_object_offset_t) 0;
673 vm_object_template.pager = MEMORY_OBJECT_NULL;
674 vm_object_template.paging_offset = 0;
675 vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL;
676 vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC;
677 vm_object_template.paging_in_progress = 0;
678 #if __LP64__
679 vm_object_template.__object1_unused_bits = 0;
680 #endif /* __LP64__ */
681 vm_object_template.activity_in_progress = 0;
682
683 /* Begin bitfields */
684 vm_object_template.all_wanted = 0; /* all bits FALSE */
685 vm_object_template.pager_created = FALSE;
686 vm_object_template.pager_initialized = FALSE;
687 vm_object_template.pager_ready = FALSE;
688 vm_object_template.pager_trusted = FALSE;
689 vm_object_template.can_persist = FALSE;
690 vm_object_template.internal = TRUE;
691 vm_object_template.temporary = TRUE;
692 vm_object_template.private = FALSE;
693 vm_object_template.pageout = FALSE;
694 vm_object_template.alive = TRUE;
695 vm_object_template.purgable = VM_PURGABLE_DENY;
696 vm_object_template.purgeable_when_ripe = FALSE;
697 vm_object_template.shadowed = FALSE;
698 vm_object_template.advisory_pageout = FALSE;
699 vm_object_template.true_share = FALSE;
700 vm_object_template.terminating = FALSE;
701 vm_object_template.named = FALSE;
702 vm_object_template.shadow_severed = FALSE;
703 vm_object_template.phys_contiguous = FALSE;
704 vm_object_template.nophyscache = FALSE;
705 /* End bitfields */
706
707 vm_object_template.cached_list.prev = NULL;
708 vm_object_template.cached_list.next = NULL;
709 vm_object_template.msr_q.prev = NULL;
710 vm_object_template.msr_q.next = NULL;
711
712 vm_object_template.last_alloc = (vm_object_offset_t) 0;
713 vm_object_template.sequential = (vm_object_offset_t) 0;
714 vm_object_template.pages_created = 0;
715 vm_object_template.pages_used = 0;
716 vm_object_template.scan_collisions = 0;
717 #if CONFIG_PHANTOM_CACHE
718 vm_object_template.phantom_object_id = 0;
719 #endif
720 vm_object_template.cow_hint = ~(vm_offset_t)0;
721 #if MACH_ASSERT
722 vm_object_template.paging_object = VM_OBJECT_NULL;
723 #endif /* MACH_ASSERT */
724
725 /* cache bitfields */
726 vm_object_template.wimg_bits = VM_WIMG_USE_DEFAULT;
727 vm_object_template.set_cache_attr = FALSE;
728 vm_object_template.object_slid = FALSE;
729 vm_object_template.code_signed = FALSE;
730 vm_object_template.hashed = FALSE;
731 vm_object_template.transposed = FALSE;
732 vm_object_template.mapping_in_progress = FALSE;
733 vm_object_template.phantom_isssd = FALSE;
734 vm_object_template.volatile_empty = FALSE;
735 vm_object_template.volatile_fault = FALSE;
736 vm_object_template.all_reusable = FALSE;
737 vm_object_template.blocked_access = FALSE;
738 vm_object_template.__object2_unused_bits = 0;
739 #if CONFIG_IOSCHED || UPL_DEBUG
740 vm_object_template.uplq.prev = NULL;
741 vm_object_template.uplq.next = NULL;
742 #endif /* UPL_DEBUG */
743 #ifdef VM_PIP_DEBUG
744 bzero(&vm_object_template.pip_holders,
745 sizeof (vm_object_template.pip_holders));
746 #endif /* VM_PIP_DEBUG */
747
748 vm_object_template.objq.next = NULL;
749 vm_object_template.objq.prev = NULL;
750
751 vm_object_template.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
752 vm_object_template.purgeable_queue_group = 0;
753
754 vm_object_template.vo_cache_ts = 0;
755
756 vm_object_template.wire_tag = VM_KERN_MEMORY_NONE;
757
758 vm_object_template.io_tracking = FALSE;
759
760 #if CONFIG_SECLUDED_MEMORY
761 vm_object_template.eligible_for_secluded = FALSE;
762 vm_object_template.can_grab_secluded = FALSE;
763 #else /* CONFIG_SECLUDED_MEMORY */
764 vm_object_template.__object3_unused_bits = 0;
765 #endif /* CONFIG_SECLUDED_MEMORY */
766
767 #if DEBUG
768 bzero(&vm_object_template.purgeable_owner_bt[0],
769 sizeof (vm_object_template.purgeable_owner_bt));
770 vm_object_template.vo_purgeable_volatilizer = NULL;
771 bzero(&vm_object_template.purgeable_volatilizer_bt[0],
772 sizeof (vm_object_template.purgeable_volatilizer_bt));
773 #endif /* DEBUG */
774
775 /*
776 * Initialize the "kernel object"
777 */
778
779 kernel_object = &kernel_object_store;
780
781 /*
782 * Note that in the following size specifications, we need to add 1 because
783 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
784 */
785
786 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
787 kernel_object);
788
789 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
790 compressor_object);
791 kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
792 compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
793
794 /*
795 * Initialize the "submap object". Make it as large as the
796 * kernel object so that no limit is imposed on submap sizes.
797 */
798
799 vm_submap_object = &vm_submap_object_store;
800 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
801 vm_submap_object);
802 vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
803
804 /*
805 * Create an "extra" reference to this object so that we never
806 * try to deallocate it; zfree doesn't like to be called with
807 * non-zone memory.
808 */
809 vm_object_reference(vm_submap_object);
810 }
811
812 #if CONFIG_IOSCHED
813 void
814 vm_io_reprioritize_init(void)
815 {
816 kern_return_t result;
817 thread_t thread = THREAD_NULL;
818
819 /* Initialze the I/O reprioritization subsystem */
820 lck_spin_init(&io_reprioritize_list_lock, &vm_object_lck_grp, &vm_object_lck_attr);
821 queue_init(&io_reprioritize_list);
822
823 io_reprioritize_req_zone = zinit(sizeof(struct io_reprioritize_req),
824 MAX_IO_REPRIORITIZE_REQS * sizeof(struct io_reprioritize_req),
825 4096, "io_reprioritize_req");
826
827 result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread);
828 if (result == KERN_SUCCESS) {
829 thread_deallocate(thread);
830 } else {
831 panic("Could not create io_reprioritize_thread");
832 }
833 }
834 #endif
835
836 void
837 vm_object_reaper_init(void)
838 {
839 kern_return_t kr;
840 thread_t thread;
841
842 kr = kernel_thread_start_priority(
843 (thread_continue_t) vm_object_reaper_thread,
844 NULL,
845 BASEPRI_PREEMPT - 1,
846 &thread);
847 if (kr != KERN_SUCCESS) {
848 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
849 }
850 thread_deallocate(thread);
851 }
852
853 __private_extern__ void
854 vm_object_init(void)
855 {
856 /*
857 * Finish initializing the kernel object.
858 */
859 }
860
861
862 __private_extern__ void
863 vm_object_init_lck_grp(void)
864 {
865 /*
866 * initialze the vm_object lock world
867 */
868 lck_grp_attr_setdefault(&vm_object_lck_grp_attr);
869 lck_grp_init(&vm_object_lck_grp, "vm_object", &vm_object_lck_grp_attr);
870 lck_grp_init(&vm_object_cache_lck_grp, "vm_object_cache", &vm_object_lck_grp_attr);
871 lck_attr_setdefault(&vm_object_lck_attr);
872 lck_attr_setdefault(&kernel_object_lck_attr);
873 lck_attr_cleardebug(&kernel_object_lck_attr);
874 lck_attr_setdefault(&compressor_object_lck_attr);
875 lck_attr_cleardebug(&compressor_object_lck_attr);
876 }
877
878 #if VM_OBJECT_CACHE
879 #define MIGHT_NOT_CACHE_SHADOWS 1
880 #if MIGHT_NOT_CACHE_SHADOWS
881 static int cache_shadows = TRUE;
882 #endif /* MIGHT_NOT_CACHE_SHADOWS */
883 #endif
884
885 /*
886 * vm_object_deallocate:
887 *
888 * Release a reference to the specified object,
889 * gained either through a vm_object_allocate
890 * or a vm_object_reference call. When all references
891 * are gone, storage associated with this object
892 * may be relinquished.
893 *
894 * No object may be locked.
895 */
896 unsigned long vm_object_deallocate_shared_successes = 0;
897 unsigned long vm_object_deallocate_shared_failures = 0;
898 unsigned long vm_object_deallocate_shared_swap_failures = 0;
899
900 __private_extern__ void
901 vm_object_deallocate(
902 vm_object_t object)
903 {
904 #if VM_OBJECT_CACHE
905 boolean_t retry_cache_trim = FALSE;
906 uint32_t try_failed_count = 0;
907 #endif
908 vm_object_t shadow = VM_OBJECT_NULL;
909
910 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
911 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
912
913 if (object == VM_OBJECT_NULL)
914 return;
915
916 if (object == kernel_object || object == compressor_object) {
917 vm_object_lock_shared(object);
918
919 OSAddAtomic(-1, &object->ref_count);
920
921 if (object->ref_count == 0) {
922 if (object == kernel_object)
923 panic("vm_object_deallocate: losing kernel_object\n");
924 else
925 panic("vm_object_deallocate: losing compressor_object\n");
926 }
927 vm_object_unlock(object);
928 return;
929 }
930
931 if (object->ref_count == 2 &&
932 object->named) {
933 /*
934 * This "named" object's reference count is about to
935 * drop from 2 to 1:
936 * we'll need to call memory_object_last_unmap().
937 */
938 } else if (object->ref_count == 2 &&
939 object->internal &&
940 object->shadow != VM_OBJECT_NULL) {
941 /*
942 * This internal object's reference count is about to
943 * drop from 2 to 1 and it has a shadow object:
944 * we'll want to try and collapse this object with its
945 * shadow.
946 */
947 } else if (object->ref_count >= 2) {
948 UInt32 original_ref_count;
949 volatile UInt32 *ref_count_p;
950 Boolean atomic_swap;
951
952 /*
953 * The object currently looks like it is not being
954 * kept alive solely by the reference we're about to release.
955 * Let's try and release our reference without taking
956 * all the locks we would need if we had to terminate the
957 * object (cache lock + exclusive object lock).
958 * Lock the object "shared" to make sure we don't race with
959 * anyone holding it "exclusive".
960 */
961 vm_object_lock_shared(object);
962 ref_count_p = (volatile UInt32 *) &object->ref_count;
963 original_ref_count = object->ref_count;
964 /*
965 * Test again as "ref_count" could have changed.
966 * "named" shouldn't change.
967 */
968 if (original_ref_count == 2 &&
969 object->named) {
970 /* need to take slow path for m_o_last_unmap() */
971 atomic_swap = FALSE;
972 } else if (original_ref_count == 2 &&
973 object->internal &&
974 object->shadow != VM_OBJECT_NULL) {
975 /* need to take slow path for vm_object_collapse() */
976 atomic_swap = FALSE;
977 } else if (original_ref_count < 2) {
978 /* need to take slow path for vm_object_terminate() */
979 atomic_swap = FALSE;
980 } else {
981 /* try an atomic update with the shared lock */
982 atomic_swap = OSCompareAndSwap(
983 original_ref_count,
984 original_ref_count - 1,
985 (UInt32 *) &object->ref_count);
986 if (atomic_swap == FALSE) {
987 vm_object_deallocate_shared_swap_failures++;
988 /* fall back to the slow path... */
989 }
990 }
991
992 vm_object_unlock(object);
993
994 if (atomic_swap) {
995 /*
996 * ref_count was updated atomically !
997 */
998 vm_object_deallocate_shared_successes++;
999 return;
1000 }
1001
1002 /*
1003 * Someone else updated the ref_count at the same
1004 * time and we lost the race. Fall back to the usual
1005 * slow but safe path...
1006 */
1007 vm_object_deallocate_shared_failures++;
1008 }
1009
1010 while (object != VM_OBJECT_NULL) {
1011
1012 vm_object_lock(object);
1013
1014 assert(object->ref_count > 0);
1015
1016 /*
1017 * If the object has a named reference, and only
1018 * that reference would remain, inform the pager
1019 * about the last "mapping" reference going away.
1020 */
1021 if ((object->ref_count == 2) && (object->named)) {
1022 memory_object_t pager = object->pager;
1023
1024 /* Notify the Pager that there are no */
1025 /* more mappers for this object */
1026
1027 if (pager != MEMORY_OBJECT_NULL) {
1028 vm_object_mapping_wait(object, THREAD_UNINT);
1029 vm_object_mapping_begin(object);
1030 vm_object_unlock(object);
1031
1032 memory_object_last_unmap(pager);
1033
1034 vm_object_lock(object);
1035 vm_object_mapping_end(object);
1036 }
1037 assert(object->ref_count > 0);
1038 }
1039
1040 /*
1041 * Lose the reference. If other references
1042 * remain, then we are done, unless we need
1043 * to retry a cache trim.
1044 * If it is the last reference, then keep it
1045 * until any pending initialization is completed.
1046 */
1047
1048 /* if the object is terminating, it cannot go into */
1049 /* the cache and we obviously should not call */
1050 /* terminate again. */
1051
1052 if ((object->ref_count > 1) || object->terminating) {
1053 vm_object_lock_assert_exclusive(object);
1054 object->ref_count--;
1055 vm_object_res_deallocate(object);
1056
1057 if (object->ref_count == 1 &&
1058 object->shadow != VM_OBJECT_NULL) {
1059 /*
1060 * There's only one reference left on this
1061 * VM object. We can't tell if it's a valid
1062 * one (from a mapping for example) or if this
1063 * object is just part of a possibly stale and
1064 * useless shadow chain.
1065 * We would like to try and collapse it into
1066 * its parent, but we don't have any pointers
1067 * back to this parent object.
1068 * But we can try and collapse this object with
1069 * its own shadows, in case these are useless
1070 * too...
1071 * We can't bypass this object though, since we
1072 * don't know if this last reference on it is
1073 * meaningful or not.
1074 */
1075 vm_object_collapse(object, 0, FALSE);
1076 }
1077 vm_object_unlock(object);
1078 #if VM_OBJECT_CACHE
1079 if (retry_cache_trim &&
1080 ((object = vm_object_cache_trim(TRUE)) !=
1081 VM_OBJECT_NULL)) {
1082 continue;
1083 }
1084 #endif
1085 return;
1086 }
1087
1088 /*
1089 * We have to wait for initialization
1090 * before destroying or caching the object.
1091 */
1092
1093 if (object->pager_created && ! object->pager_initialized) {
1094 assert(! object->can_persist);
1095 vm_object_assert_wait(object,
1096 VM_OBJECT_EVENT_INITIALIZED,
1097 THREAD_UNINT);
1098 vm_object_unlock(object);
1099
1100 thread_block(THREAD_CONTINUE_NULL);
1101 continue;
1102 }
1103
1104 #if VM_OBJECT_CACHE
1105 /*
1106 * If this object can persist, then enter it in
1107 * the cache. Otherwise, terminate it.
1108 *
1109 * NOTE: Only permanent objects are cached, and
1110 * permanent objects cannot have shadows. This
1111 * affects the residence counting logic in a minor
1112 * way (can do it in-line, mostly).
1113 */
1114
1115 if ((object->can_persist) && (object->alive)) {
1116 /*
1117 * Now it is safe to decrement reference count,
1118 * and to return if reference count is > 0.
1119 */
1120
1121 vm_object_lock_assert_exclusive(object);
1122 if (--object->ref_count > 0) {
1123 vm_object_res_deallocate(object);
1124 vm_object_unlock(object);
1125
1126 if (retry_cache_trim &&
1127 ((object = vm_object_cache_trim(TRUE)) !=
1128 VM_OBJECT_NULL)) {
1129 continue;
1130 }
1131 return;
1132 }
1133
1134 #if MIGHT_NOT_CACHE_SHADOWS
1135 /*
1136 * Remove shadow now if we don't
1137 * want to cache shadows.
1138 */
1139 if (! cache_shadows) {
1140 shadow = object->shadow;
1141 object->shadow = VM_OBJECT_NULL;
1142 }
1143 #endif /* MIGHT_NOT_CACHE_SHADOWS */
1144
1145 /*
1146 * Enter the object onto the queue of
1147 * cached objects, and deactivate
1148 * all of its pages.
1149 */
1150 assert(object->shadow == VM_OBJECT_NULL);
1151 VM_OBJ_RES_DECR(object);
1152 XPR(XPR_VM_OBJECT,
1153 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
1154 object,
1155 vm_object_cached_list.next,
1156 vm_object_cached_list.prev,0,0);
1157
1158
1159 vm_object_unlock(object);
1160
1161 try_failed_count = 0;
1162 for (;;) {
1163 vm_object_cache_lock();
1164
1165 /*
1166 * if we try to take a regular lock here
1167 * we risk deadlocking against someone
1168 * holding a lock on this object while
1169 * trying to vm_object_deallocate a different
1170 * object
1171 */
1172 if (vm_object_lock_try(object))
1173 break;
1174 vm_object_cache_unlock();
1175 try_failed_count++;
1176
1177 mutex_pause(try_failed_count); /* wait a bit */
1178 }
1179 vm_object_cached_count++;
1180 if (vm_object_cached_count > vm_object_cached_high)
1181 vm_object_cached_high = vm_object_cached_count;
1182 queue_enter(&vm_object_cached_list, object,
1183 vm_object_t, cached_list);
1184 vm_object_cache_unlock();
1185
1186 vm_object_deactivate_all_pages(object);
1187 vm_object_unlock(object);
1188
1189 #if MIGHT_NOT_CACHE_SHADOWS
1190 /*
1191 * If we have a shadow that we need
1192 * to deallocate, do so now, remembering
1193 * to trim the cache later.
1194 */
1195 if (! cache_shadows && shadow != VM_OBJECT_NULL) {
1196 object = shadow;
1197 retry_cache_trim = TRUE;
1198 continue;
1199 }
1200 #endif /* MIGHT_NOT_CACHE_SHADOWS */
1201
1202 /*
1203 * Trim the cache. If the cache trim
1204 * returns with a shadow for us to deallocate,
1205 * then remember to retry the cache trim
1206 * when we are done deallocating the shadow.
1207 * Otherwise, we are done.
1208 */
1209
1210 object = vm_object_cache_trim(TRUE);
1211 if (object == VM_OBJECT_NULL) {
1212 return;
1213 }
1214 retry_cache_trim = TRUE;
1215 } else
1216 #endif /* VM_OBJECT_CACHE */
1217 {
1218 /*
1219 * This object is not cachable; terminate it.
1220 */
1221 XPR(XPR_VM_OBJECT,
1222 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
1223 object, object->resident_page_count,
1224 object->paging_in_progress,
1225 (void *)current_thread(),object->ref_count);
1226
1227 VM_OBJ_RES_DECR(object); /* XXX ? */
1228 /*
1229 * Terminate this object. If it had a shadow,
1230 * then deallocate it; otherwise, if we need
1231 * to retry a cache trim, do so now; otherwise,
1232 * we are done. "pageout" objects have a shadow,
1233 * but maintain a "paging reference" rather than
1234 * a normal reference.
1235 */
1236 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
1237
1238 if (vm_object_terminate(object) != KERN_SUCCESS) {
1239 return;
1240 }
1241 if (shadow != VM_OBJECT_NULL) {
1242 object = shadow;
1243 continue;
1244 }
1245 #if VM_OBJECT_CACHE
1246 if (retry_cache_trim &&
1247 ((object = vm_object_cache_trim(TRUE)) !=
1248 VM_OBJECT_NULL)) {
1249 continue;
1250 }
1251 #endif
1252 return;
1253 }
1254 }
1255 #if VM_OBJECT_CACHE
1256 assert(! retry_cache_trim);
1257 #endif
1258 }
1259
1260
1261
1262 vm_page_t
1263 vm_object_page_grab(
1264 vm_object_t object)
1265 {
1266 vm_page_t p, next_p;
1267 int p_limit = 0;
1268 int p_skipped = 0;
1269
1270 vm_object_lock_assert_exclusive(object);
1271
1272 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
1273 p_limit = MIN(50, object->resident_page_count);
1274
1275 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) {
1276
1277 p = next_p;
1278 next_p = (vm_page_t)vm_page_queue_next(&next_p->listq);
1279
1280 if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry || p->fictitious)
1281 goto move_page_in_obj;
1282
1283 if (p->pmapped || p->dirty || p->precious) {
1284 vm_page_lockspin_queues();
1285
1286 if (p->pmapped) {
1287 int refmod_state;
1288
1289 vm_object_page_grab_pmapped++;
1290
1291 if (p->reference == FALSE || p->dirty == FALSE) {
1292
1293 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p));
1294
1295 if (refmod_state & VM_MEM_REFERENCED)
1296 p->reference = TRUE;
1297 if (refmod_state & VM_MEM_MODIFIED) {
1298 SET_PAGE_DIRTY(p, FALSE);
1299 }
1300 }
1301 if (p->dirty == FALSE && p->precious == FALSE) {
1302
1303 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
1304
1305 if (refmod_state & VM_MEM_REFERENCED)
1306 p->reference = TRUE;
1307 if (refmod_state & VM_MEM_MODIFIED) {
1308 SET_PAGE_DIRTY(p, FALSE);
1309 }
1310
1311 if (p->dirty == FALSE)
1312 goto take_page;
1313 }
1314 }
1315 if ((p->vm_page_q_state != VM_PAGE_ON_ACTIVE_Q) && p->reference == TRUE) {
1316 vm_page_activate(p);
1317
1318 VM_STAT_INCR(reactivations);
1319 vm_object_page_grab_reactivations++;
1320 }
1321 vm_page_unlock_queues();
1322 move_page_in_obj:
1323 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
1324 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
1325
1326 p_skipped++;
1327 continue;
1328 }
1329 vm_page_lockspin_queues();
1330 take_page:
1331 vm_page_free_prepare_queues(p);
1332 vm_object_page_grab_returned++;
1333 vm_object_page_grab_skipped += p_skipped;
1334
1335 vm_page_unlock_queues();
1336
1337 vm_page_free_prepare_object(p, TRUE);
1338
1339 return (p);
1340 }
1341 vm_object_page_grab_skipped += p_skipped;
1342 vm_object_page_grab_failed++;
1343
1344 return (NULL);
1345 }
1346
1347
1348
1349 #define EVICT_PREPARE_LIMIT 64
1350 #define EVICT_AGE 10
1351
1352 static clock_sec_t vm_object_cache_aging_ts = 0;
1353
1354 static void
1355 vm_object_cache_remove_locked(
1356 vm_object_t object)
1357 {
1358 assert(object->purgable == VM_PURGABLE_DENY);
1359 assert(object->wired_page_count == 0);
1360
1361 queue_remove(&vm_object_cached_list, object, vm_object_t, objq);
1362 object->objq.next = NULL;
1363 object->objq.prev = NULL;
1364
1365 vm_object_cached_count--;
1366 }
1367
1368 void
1369 vm_object_cache_remove(
1370 vm_object_t object)
1371 {
1372 vm_object_cache_lock_spin();
1373
1374 if (object->objq.next || object->objq.prev)
1375 vm_object_cache_remove_locked(object);
1376
1377 vm_object_cache_unlock();
1378 }
1379
1380 void
1381 vm_object_cache_add(
1382 vm_object_t object)
1383 {
1384 clock_sec_t sec;
1385 clock_nsec_t nsec;
1386
1387 assert(object->purgable == VM_PURGABLE_DENY);
1388 assert(object->wired_page_count == 0);
1389
1390 if (object->resident_page_count == 0)
1391 return;
1392 clock_get_system_nanotime(&sec, &nsec);
1393
1394 vm_object_cache_lock_spin();
1395
1396 if (object->objq.next == NULL && object->objq.prev == NULL) {
1397 queue_enter(&vm_object_cached_list, object, vm_object_t, objq);
1398 object->vo_cache_ts = sec + EVICT_AGE;
1399 object->vo_cache_pages_to_scan = object->resident_page_count;
1400
1401 vm_object_cached_count++;
1402 vm_object_cache_adds++;
1403 }
1404 vm_object_cache_unlock();
1405 }
1406
1407 int
1408 vm_object_cache_evict(
1409 int num_to_evict,
1410 int max_objects_to_examine)
1411 {
1412 vm_object_t object = VM_OBJECT_NULL;
1413 vm_object_t next_obj = VM_OBJECT_NULL;
1414 vm_page_t local_free_q = VM_PAGE_NULL;
1415 vm_page_t p;
1416 vm_page_t next_p;
1417 int object_cnt = 0;
1418 vm_page_t ep_array[EVICT_PREPARE_LIMIT];
1419 int ep_count;
1420 int ep_limit;
1421 int ep_index;
1422 int ep_freed = 0;
1423 int ep_moved = 0;
1424 uint32_t ep_skipped = 0;
1425 clock_sec_t sec;
1426 clock_nsec_t nsec;
1427
1428 KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
1429 /*
1430 * do a couple of quick checks to see if it's
1431 * worthwhile grabbing the lock
1432 */
1433 if (queue_empty(&vm_object_cached_list)) {
1434 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1435 return (0);
1436 }
1437 clock_get_system_nanotime(&sec, &nsec);
1438
1439 /*
1440 * the object on the head of the queue has not
1441 * yet sufficiently aged
1442 */
1443 if (sec < vm_object_cache_aging_ts) {
1444 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1445 return (0);
1446 }
1447 /*
1448 * don't need the queue lock to find
1449 * and lock an object on the cached list
1450 */
1451 vm_page_unlock_queues();
1452
1453 vm_object_cache_lock_spin();
1454
1455 for (;;) {
1456 next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
1457
1458 while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
1459
1460 object = next_obj;
1461 next_obj = (vm_object_t)queue_next(&next_obj->objq);
1462
1463 assert(object->purgable == VM_PURGABLE_DENY);
1464 assert(object->wired_page_count == 0);
1465
1466 if (sec < object->vo_cache_ts) {
1467 KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
1468
1469 vm_object_cache_aging_ts = object->vo_cache_ts;
1470 object = VM_OBJECT_NULL;
1471 break;
1472 }
1473 if (!vm_object_lock_try_scan(object)) {
1474 /*
1475 * just skip over this guy for now... if we find
1476 * an object to steal pages from, we'll revist in a bit...
1477 * hopefully, the lock will have cleared
1478 */
1479 KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0);
1480
1481 object = VM_OBJECT_NULL;
1482 continue;
1483 }
1484 if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
1485 /*
1486 * this case really shouldn't happen, but it's not fatal
1487 * so deal with it... if we don't remove the object from
1488 * the list, we'll never move past it.
1489 */
1490 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1491
1492 vm_object_cache_remove_locked(object);
1493 vm_object_unlock(object);
1494 object = VM_OBJECT_NULL;
1495 continue;
1496 }
1497 /*
1498 * we have a locked object with pages...
1499 * time to start harvesting
1500 */
1501 break;
1502 }
1503 vm_object_cache_unlock();
1504
1505 if (object == VM_OBJECT_NULL)
1506 break;
1507
1508 /*
1509 * object is locked at this point and
1510 * has resident pages
1511 */
1512 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
1513
1514 /*
1515 * break the page scan into 2 pieces to minimize the time spent
1516 * behind the page queue lock...
1517 * the list of pages on these unused objects is likely to be cold
1518 * w/r to the cpu cache which increases the time to scan the list
1519 * tenfold... and we may have a 'run' of pages we can't utilize that
1520 * needs to be skipped over...
1521 */
1522 if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT)
1523 ep_limit = EVICT_PREPARE_LIMIT;
1524 ep_count = 0;
1525
1526 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
1527
1528 p = next_p;
1529 next_p = (vm_page_t)vm_page_queue_next(&next_p->listq);
1530
1531 object->vo_cache_pages_to_scan--;
1532
1533 if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry) {
1534 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
1535 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
1536
1537 ep_skipped++;
1538 continue;
1539 }
1540 if (p->wpmapped || p->dirty || p->precious) {
1541 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
1542 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
1543
1544 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p));
1545 }
1546 ep_array[ep_count++] = p;
1547 }
1548 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0);
1549
1550 vm_page_lockspin_queues();
1551
1552 for (ep_index = 0; ep_index < ep_count; ep_index++) {
1553
1554 p = ep_array[ep_index];
1555
1556 if (p->wpmapped || p->dirty || p->precious) {
1557 p->reference = FALSE;
1558 p->no_cache = FALSE;
1559
1560 /*
1561 * we've already filtered out pages that are in the laundry
1562 * so if we get here, this page can't be on the pageout queue
1563 */
1564 vm_page_queues_remove(p, FALSE);
1565 vm_page_enqueue_inactive(p, TRUE);
1566
1567 ep_moved++;
1568 } else {
1569 #if CONFIG_PHANTOM_CACHE
1570 vm_phantom_cache_add_ghost(p);
1571 #endif
1572 vm_page_free_prepare_queues(p);
1573
1574 assert(p->pageq.next == 0 && p->pageq.prev == 0);
1575 /*
1576 * Add this page to our list of reclaimed pages,
1577 * to be freed later.
1578 */
1579 p->snext = local_free_q;
1580 local_free_q = p;
1581
1582 ep_freed++;
1583 }
1584 }
1585 vm_page_unlock_queues();
1586
1587 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0);
1588
1589 if (local_free_q) {
1590 vm_page_free_list(local_free_q, TRUE);
1591 local_free_q = VM_PAGE_NULL;
1592 }
1593 if (object->vo_cache_pages_to_scan == 0) {
1594 KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0);
1595
1596 vm_object_cache_remove(object);
1597
1598 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1599 }
1600 /*
1601 * done with this object
1602 */
1603 vm_object_unlock(object);
1604 object = VM_OBJECT_NULL;
1605
1606 /*
1607 * at this point, we are not holding any locks
1608 */
1609 if ((ep_freed + ep_moved) >= num_to_evict) {
1610 /*
1611 * we've reached our target for the
1612 * number of pages to evict
1613 */
1614 break;
1615 }
1616 vm_object_cache_lock_spin();
1617 }
1618 /*
1619 * put the page queues lock back to the caller's
1620 * idea of it
1621 */
1622 vm_page_lock_queues();
1623
1624 vm_object_cache_pages_freed += ep_freed;
1625 vm_object_cache_pages_moved += ep_moved;
1626 vm_object_cache_pages_skipped += ep_skipped;
1627
1628 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0);
1629 return (ep_freed);
1630 }
1631
1632
1633 #if VM_OBJECT_CACHE
1634 /*
1635 * Check to see whether we really need to trim
1636 * down the cache. If so, remove an object from
1637 * the cache, terminate it, and repeat.
1638 *
1639 * Called with, and returns with, cache lock unlocked.
1640 */
1641 vm_object_t
1642 vm_object_cache_trim(
1643 boolean_t called_from_vm_object_deallocate)
1644 {
1645 vm_object_t object = VM_OBJECT_NULL;
1646 vm_object_t shadow;
1647
1648 for (;;) {
1649
1650 /*
1651 * If we no longer need to trim the cache,
1652 * then we are done.
1653 */
1654 if (vm_object_cached_count <= vm_object_cached_max)
1655 return VM_OBJECT_NULL;
1656
1657 vm_object_cache_lock();
1658 if (vm_object_cached_count <= vm_object_cached_max) {
1659 vm_object_cache_unlock();
1660 return VM_OBJECT_NULL;
1661 }
1662
1663 /*
1664 * We must trim down the cache, so remove
1665 * the first object in the cache.
1666 */
1667 XPR(XPR_VM_OBJECT,
1668 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
1669 vm_object_cached_list.next,
1670 vm_object_cached_list.prev, 0, 0, 0);
1671
1672 object = (vm_object_t) queue_first(&vm_object_cached_list);
1673 if(object == (vm_object_t) &vm_object_cached_list) {
1674 /* something's wrong with the calling parameter or */
1675 /* the value of vm_object_cached_count, just fix */
1676 /* and return */
1677 if(vm_object_cached_max < 0)
1678 vm_object_cached_max = 0;
1679 vm_object_cached_count = 0;
1680 vm_object_cache_unlock();
1681 return VM_OBJECT_NULL;
1682 }
1683 vm_object_lock(object);
1684 queue_remove(&vm_object_cached_list, object, vm_object_t,
1685 cached_list);
1686 vm_object_cached_count--;
1687
1688 vm_object_cache_unlock();
1689 /*
1690 * Since this object is in the cache, we know
1691 * that it is initialized and has no references.
1692 * Take a reference to avoid recursive deallocations.
1693 */
1694
1695 assert(object->pager_initialized);
1696 assert(object->ref_count == 0);
1697 vm_object_lock_assert_exclusive(object);
1698 object->ref_count++;
1699
1700 /*
1701 * Terminate the object.
1702 * If the object had a shadow, we let vm_object_deallocate
1703 * deallocate it. "pageout" objects have a shadow, but
1704 * maintain a "paging reference" rather than a normal
1705 * reference.
1706 * (We are careful here to limit recursion.)
1707 */
1708 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
1709
1710 if(vm_object_terminate(object) != KERN_SUCCESS)
1711 continue;
1712
1713 if (shadow != VM_OBJECT_NULL) {
1714 if (called_from_vm_object_deallocate) {
1715 return shadow;
1716 } else {
1717 vm_object_deallocate(shadow);
1718 }
1719 }
1720 }
1721 }
1722 #endif
1723
1724
1725 /*
1726 * Routine: vm_object_terminate
1727 * Purpose:
1728 * Free all resources associated with a vm_object.
1729 * In/out conditions:
1730 * Upon entry, the object must be locked,
1731 * and the object must have exactly one reference.
1732 *
1733 * The shadow object reference is left alone.
1734 *
1735 * The object must be unlocked if its found that pages
1736 * must be flushed to a backing object. If someone
1737 * manages to map the object while it is being flushed
1738 * the object is returned unlocked and unchanged. Otherwise,
1739 * upon exit, the cache will be unlocked, and the
1740 * object will cease to exist.
1741 */
1742 static kern_return_t
1743 vm_object_terminate(
1744 vm_object_t object)
1745 {
1746 vm_object_t shadow_object;
1747
1748 XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
1749 object, object->ref_count, 0, 0, 0);
1750
1751 vm_object_lock_assert_exclusive(object);
1752
1753 if (!object->pageout && (!object->temporary || object->can_persist) &&
1754 (object->pager != NULL || object->shadow_severed)) {
1755 /*
1756 * Clear pager_trusted bit so that the pages get yanked
1757 * out of the object instead of cleaned in place. This
1758 * prevents a deadlock in XMM and makes more sense anyway.
1759 */
1760 object->pager_trusted = FALSE;
1761
1762 vm_object_reap_pages(object, REAP_TERMINATE);
1763 }
1764 /*
1765 * Make sure the object isn't already being terminated
1766 */
1767 if (object->terminating) {
1768 vm_object_lock_assert_exclusive(object);
1769 object->ref_count--;
1770 assert(object->ref_count > 0);
1771 vm_object_unlock(object);
1772 return KERN_FAILURE;
1773 }
1774
1775 /*
1776 * Did somebody get a reference to the object while we were
1777 * cleaning it?
1778 */
1779 if (object->ref_count != 1) {
1780 vm_object_lock_assert_exclusive(object);
1781 object->ref_count--;
1782 assert(object->ref_count > 0);
1783 vm_object_res_deallocate(object);
1784 vm_object_unlock(object);
1785 return KERN_FAILURE;
1786 }
1787
1788 /*
1789 * Make sure no one can look us up now.
1790 */
1791
1792 object->terminating = TRUE;
1793 object->alive = FALSE;
1794
1795 if ( !object->internal && (object->objq.next || object->objq.prev))
1796 vm_object_cache_remove(object);
1797
1798 if (object->hashed) {
1799 lck_mtx_t *lck;
1800
1801 lck = vm_object_hash_lock_spin(object->pager);
1802 vm_object_remove(object);
1803 vm_object_hash_unlock(lck);
1804 }
1805 /*
1806 * Detach the object from its shadow if we are the shadow's
1807 * copy. The reference we hold on the shadow must be dropped
1808 * by our caller.
1809 */
1810 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1811 !(object->pageout)) {
1812 vm_object_lock(shadow_object);
1813 if (shadow_object->copy == object)
1814 shadow_object->copy = VM_OBJECT_NULL;
1815 vm_object_unlock(shadow_object);
1816 }
1817
1818 if (object->paging_in_progress != 0 ||
1819 object->activity_in_progress != 0) {
1820 /*
1821 * There are still some paging_in_progress references
1822 * on this object, meaning that there are some paging
1823 * or other I/O operations in progress for this VM object.
1824 * Such operations take some paging_in_progress references
1825 * up front to ensure that the object doesn't go away, but
1826 * they may also need to acquire a reference on the VM object,
1827 * to map it in kernel space, for example. That means that
1828 * they may end up releasing the last reference on the VM
1829 * object, triggering its termination, while still holding
1830 * paging_in_progress references. Waiting for these
1831 * pending paging_in_progress references to go away here would
1832 * deadlock.
1833 *
1834 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1835 * complete the VM object termination if it still holds
1836 * paging_in_progress references at this point.
1837 *
1838 * No new paging_in_progress should appear now that the
1839 * VM object is "terminating" and not "alive".
1840 */
1841 vm_object_reap_async(object);
1842 vm_object_unlock(object);
1843 /*
1844 * Return KERN_FAILURE to let the caller know that we
1845 * haven't completed the termination and it can't drop this
1846 * object's reference on its shadow object yet.
1847 * The reaper thread will take care of that once it has
1848 * completed this object's termination.
1849 */
1850 return KERN_FAILURE;
1851 }
1852 /*
1853 * complete the VM object termination
1854 */
1855 vm_object_reap(object);
1856 object = VM_OBJECT_NULL;
1857
1858 /*
1859 * the object lock was released by vm_object_reap()
1860 *
1861 * KERN_SUCCESS means that this object has been terminated
1862 * and no longer needs its shadow object but still holds a
1863 * reference on it.
1864 * The caller is responsible for dropping that reference.
1865 * We can't call vm_object_deallocate() here because that
1866 * would create a recursion.
1867 */
1868 return KERN_SUCCESS;
1869 }
1870
1871
1872 /*
1873 * vm_object_reap():
1874 *
1875 * Complete the termination of a VM object after it's been marked
1876 * as "terminating" and "!alive" by vm_object_terminate().
1877 *
1878 * The VM object must be locked by caller.
1879 * The lock will be released on return and the VM object is no longer valid.
1880 */
1881
1882 void
1883 vm_object_reap(
1884 vm_object_t object)
1885 {
1886 memory_object_t pager;
1887
1888 vm_object_lock_assert_exclusive(object);
1889 assert(object->paging_in_progress == 0);
1890 assert(object->activity_in_progress == 0);
1891
1892 vm_object_reap_count++;
1893
1894 /*
1895 * Disown this purgeable object to cleanup its owner's purgeable
1896 * ledgers. We need to do this before disconnecting the object
1897 * from its pager, to properly account for compressed pages.
1898 */
1899 if (object->internal &&
1900 object->purgable != VM_PURGABLE_DENY) {
1901 vm_purgeable_accounting(object,
1902 object->purgable,
1903 TRUE); /* disown */
1904 }
1905
1906 pager = object->pager;
1907 object->pager = MEMORY_OBJECT_NULL;
1908
1909 if (pager != MEMORY_OBJECT_NULL)
1910 memory_object_control_disable(object->pager_control);
1911
1912 object->ref_count--;
1913 #if TASK_SWAPPER
1914 assert(object->res_count == 0);
1915 #endif /* TASK_SWAPPER */
1916
1917 assert (object->ref_count == 0);
1918
1919 /*
1920 * remove from purgeable queue if it's on
1921 */
1922 if (object->internal) {
1923 task_t owner;
1924
1925 owner = object->vo_purgeable_owner;
1926
1927 VM_OBJECT_UNWIRED(object);
1928
1929 if (object->purgable == VM_PURGABLE_DENY) {
1930 /* not purgeable: nothing to do */
1931 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
1932 purgeable_q_t queue;
1933
1934 assert(object->vo_purgeable_owner == NULL);
1935
1936 queue = vm_purgeable_object_remove(object);
1937 assert(queue);
1938
1939 if (object->purgeable_when_ripe) {
1940 /*
1941 * Must take page lock for this -
1942 * using it to protect token queue
1943 */
1944 vm_page_lock_queues();
1945 vm_purgeable_token_delete_first(queue);
1946
1947 assert(queue->debug_count_objects>=0);
1948 vm_page_unlock_queues();
1949 }
1950
1951 /*
1952 * Update "vm_page_purgeable_count" in bulk and mark
1953 * object as VM_PURGABLE_EMPTY to avoid updating
1954 * "vm_page_purgeable_count" again in vm_page_remove()
1955 * when reaping the pages.
1956 */
1957 unsigned int delta;
1958 assert(object->resident_page_count >=
1959 object->wired_page_count);
1960 delta = (object->resident_page_count -
1961 object->wired_page_count);
1962 if (delta != 0) {
1963 assert(vm_page_purgeable_count >= delta);
1964 OSAddAtomic(-delta,
1965 (SInt32 *)&vm_page_purgeable_count);
1966 }
1967 if (object->wired_page_count != 0) {
1968 assert(vm_page_purgeable_wired_count >=
1969 object->wired_page_count);
1970 OSAddAtomic(-object->wired_page_count,
1971 (SInt32 *)&vm_page_purgeable_wired_count);
1972 }
1973 object->purgable = VM_PURGABLE_EMPTY;
1974 }
1975 else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
1976 object->purgable == VM_PURGABLE_EMPTY) {
1977 /* remove from nonvolatile queue */
1978 assert(object->vo_purgeable_owner == TASK_NULL);
1979 vm_purgeable_nonvolatile_dequeue(object);
1980 } else {
1981 panic("object %p in unexpected purgeable state 0x%x\n",
1982 object, object->purgable);
1983 }
1984 assert(object->objq.next == NULL);
1985 assert(object->objq.prev == NULL);
1986 }
1987
1988 /*
1989 * Clean or free the pages, as appropriate.
1990 * It is possible for us to find busy/absent pages,
1991 * if some faults on this object were aborted.
1992 */
1993 if (object->pageout) {
1994 assert(object->shadow != VM_OBJECT_NULL);
1995
1996 vm_pageout_object_terminate(object);
1997
1998 } else if (((object->temporary && !object->can_persist) || (pager == MEMORY_OBJECT_NULL))) {
1999
2000 vm_object_reap_pages(object, REAP_REAP);
2001 }
2002 assert(vm_page_queue_empty(&object->memq));
2003 assert(object->paging_in_progress == 0);
2004 assert(object->activity_in_progress == 0);
2005 assert(object->ref_count == 0);
2006
2007 /*
2008 * If the pager has not already been released by
2009 * vm_object_destroy, we need to terminate it and
2010 * release our reference to it here.
2011 */
2012 if (pager != MEMORY_OBJECT_NULL) {
2013 vm_object_unlock(object);
2014 vm_object_release_pager(pager, object->hashed);
2015 vm_object_lock(object);
2016 }
2017
2018 /* kick off anyone waiting on terminating */
2019 object->terminating = FALSE;
2020 vm_object_paging_begin(object);
2021 vm_object_paging_end(object);
2022 vm_object_unlock(object);
2023
2024 object->shadow = VM_OBJECT_NULL;
2025
2026 #if VM_OBJECT_TRACKING
2027 if (vm_object_tracking_inited) {
2028 btlog_remove_entries_for_element(vm_object_tracking_btlog,
2029 object);
2030 }
2031 #endif /* VM_OBJECT_TRACKING */
2032
2033 vm_object_lock_destroy(object);
2034 /*
2035 * Free the space for the object.
2036 */
2037 zfree(vm_object_zone, object);
2038 object = VM_OBJECT_NULL;
2039 }
2040
2041
2042 unsigned int vm_max_batch = 256;
2043
2044 #define V_O_R_MAX_BATCH 128
2045
2046 #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
2047
2048
2049 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
2050 MACRO_BEGIN \
2051 if (_local_free_q) { \
2052 if (do_disconnect) { \
2053 vm_page_t m; \
2054 for (m = _local_free_q; \
2055 m != VM_PAGE_NULL; \
2056 m = m->snext) { \
2057 if (m->pmapped) { \
2058 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
2059 } \
2060 } \
2061 } \
2062 vm_page_free_list(_local_free_q, TRUE); \
2063 _local_free_q = VM_PAGE_NULL; \
2064 } \
2065 MACRO_END
2066
2067
2068 void
2069 vm_object_reap_pages(
2070 vm_object_t object,
2071 int reap_type)
2072 {
2073 vm_page_t p;
2074 vm_page_t next;
2075 vm_page_t local_free_q = VM_PAGE_NULL;
2076 int loop_count;
2077 boolean_t disconnect_on_release;
2078 pmap_flush_context pmap_flush_context_storage;
2079
2080 if (reap_type == REAP_DATA_FLUSH) {
2081 /*
2082 * We need to disconnect pages from all pmaps before
2083 * releasing them to the free list
2084 */
2085 disconnect_on_release = TRUE;
2086 } else {
2087 /*
2088 * Either the caller has already disconnected the pages
2089 * from all pmaps, or we disconnect them here as we add
2090 * them to out local list of pages to be released.
2091 * No need to re-disconnect them when we release the pages
2092 * to the free list.
2093 */
2094 disconnect_on_release = FALSE;
2095 }
2096
2097 restart_after_sleep:
2098 if (vm_page_queue_empty(&object->memq))
2099 return;
2100 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
2101
2102 if (reap_type == REAP_PURGEABLE)
2103 pmap_flush_context_init(&pmap_flush_context_storage);
2104
2105 vm_page_lockspin_queues();
2106
2107 next = (vm_page_t)vm_page_queue_first(&object->memq);
2108
2109 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
2110
2111 p = next;
2112 next = (vm_page_t)vm_page_queue_next(&next->listq);
2113
2114 if (--loop_count == 0) {
2115
2116 vm_page_unlock_queues();
2117
2118 if (local_free_q) {
2119
2120 if (reap_type == REAP_PURGEABLE) {
2121 pmap_flush(&pmap_flush_context_storage);
2122 pmap_flush_context_init(&pmap_flush_context_storage);
2123 }
2124 /*
2125 * Free the pages we reclaimed so far
2126 * and take a little break to avoid
2127 * hogging the page queue lock too long
2128 */
2129 VM_OBJ_REAP_FREELIST(local_free_q,
2130 disconnect_on_release);
2131 } else
2132 mutex_pause(0);
2133
2134 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
2135
2136 vm_page_lockspin_queues();
2137 }
2138 if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
2139
2140 if (p->busy || p->cleaning) {
2141
2142 vm_page_unlock_queues();
2143 /*
2144 * free the pages reclaimed so far
2145 */
2146 VM_OBJ_REAP_FREELIST(local_free_q,
2147 disconnect_on_release);
2148
2149 PAGE_SLEEP(object, p, THREAD_UNINT);
2150
2151 goto restart_after_sleep;
2152 }
2153 if (p->laundry)
2154 vm_pageout_steal_laundry(p, TRUE);
2155 }
2156 switch (reap_type) {
2157
2158 case REAP_DATA_FLUSH:
2159 if (VM_PAGE_WIRED(p)) {
2160 /*
2161 * this is an odd case... perhaps we should
2162 * zero-fill this page since we're conceptually
2163 * tossing its data at this point, but leaving
2164 * it on the object to honor the 'wire' contract
2165 */
2166 continue;
2167 }
2168 break;
2169
2170 case REAP_PURGEABLE:
2171 if (VM_PAGE_WIRED(p)) {
2172 /*
2173 * can't purge a wired page
2174 */
2175 vm_page_purged_wired++;
2176 continue;
2177 }
2178 if (p->laundry && !p->busy && !p->cleaning)
2179 vm_pageout_steal_laundry(p, TRUE);
2180
2181 if (p->cleaning || p->laundry || p->absent) {
2182 /*
2183 * page is being acted upon,
2184 * so don't mess with it
2185 */
2186 vm_page_purged_others++;
2187 continue;
2188 }
2189 if (p->busy) {
2190 /*
2191 * We can't reclaim a busy page but we can
2192 * make it more likely to be paged (it's not wired) to make
2193 * sure that it gets considered by
2194 * vm_pageout_scan() later.
2195 */
2196 if (VM_PAGE_PAGEABLE(p))
2197 vm_page_deactivate(p);
2198 vm_page_purged_busy++;
2199 continue;
2200 }
2201
2202 assert(VM_PAGE_OBJECT(p) != kernel_object);
2203
2204 /*
2205 * we can discard this page...
2206 */
2207 if (p->pmapped == TRUE) {
2208 /*
2209 * unmap the page
2210 */
2211 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
2212 }
2213 vm_page_purged_count++;
2214
2215 break;
2216
2217 case REAP_TERMINATE:
2218 if (p->absent || p->private) {
2219 /*
2220 * For private pages, VM_PAGE_FREE just
2221 * leaves the page structure around for
2222 * its owner to clean up. For absent
2223 * pages, the structure is returned to
2224 * the appropriate pool.
2225 */
2226 break;
2227 }
2228 if (p->fictitious) {
2229 assert (VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr);
2230 break;
2231 }
2232 if (!p->dirty && p->wpmapped)
2233 p->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
2234
2235 if ((p->dirty || p->precious) && !p->error && object->alive) {
2236
2237 assert(!object->internal);
2238
2239 p->free_when_done = TRUE;
2240
2241 if (!p->laundry) {
2242 vm_page_queues_remove(p, TRUE);
2243 /*
2244 * flush page... page will be freed
2245 * upon completion of I/O
2246 */
2247 (void)vm_pageout_cluster(p, FALSE, FALSE);
2248 }
2249 vm_page_unlock_queues();
2250 /*
2251 * free the pages reclaimed so far
2252 */
2253 VM_OBJ_REAP_FREELIST(local_free_q,
2254 disconnect_on_release);
2255
2256 vm_object_paging_wait(object, THREAD_UNINT);
2257
2258 goto restart_after_sleep;
2259 }
2260 break;
2261
2262 case REAP_REAP:
2263 break;
2264 }
2265 vm_page_free_prepare_queues(p);
2266 assert(p->pageq.next == 0 && p->pageq.prev == 0);
2267 /*
2268 * Add this page to our list of reclaimed pages,
2269 * to be freed later.
2270 */
2271 p->snext = local_free_q;
2272 local_free_q = p;
2273 }
2274 vm_page_unlock_queues();
2275
2276 /*
2277 * Free the remaining reclaimed pages
2278 */
2279 if (reap_type == REAP_PURGEABLE)
2280 pmap_flush(&pmap_flush_context_storage);
2281
2282 VM_OBJ_REAP_FREELIST(local_free_q,
2283 disconnect_on_release);
2284 }
2285
2286
2287 void
2288 vm_object_reap_async(
2289 vm_object_t object)
2290 {
2291 vm_object_lock_assert_exclusive(object);
2292
2293 vm_object_reaper_lock_spin();
2294
2295 vm_object_reap_count_async++;
2296
2297 /* enqueue the VM object... */
2298 queue_enter(&vm_object_reaper_queue, object,
2299 vm_object_t, cached_list);
2300
2301 vm_object_reaper_unlock();
2302
2303 /* ... and wake up the reaper thread */
2304 thread_wakeup((event_t) &vm_object_reaper_queue);
2305 }
2306
2307
2308 void
2309 vm_object_reaper_thread(void)
2310 {
2311 vm_object_t object, shadow_object;
2312
2313 vm_object_reaper_lock_spin();
2314
2315 while (!queue_empty(&vm_object_reaper_queue)) {
2316 queue_remove_first(&vm_object_reaper_queue,
2317 object,
2318 vm_object_t,
2319 cached_list);
2320
2321 vm_object_reaper_unlock();
2322 vm_object_lock(object);
2323
2324 assert(object->terminating);
2325 assert(!object->alive);
2326
2327 /*
2328 * The pageout daemon might be playing with our pages.
2329 * Now that the object is dead, it won't touch any more
2330 * pages, but some pages might already be on their way out.
2331 * Hence, we wait until the active paging activities have
2332 * ceased before we break the association with the pager
2333 * itself.
2334 */
2335 while (object->paging_in_progress != 0 ||
2336 object->activity_in_progress != 0) {
2337 vm_object_wait(object,
2338 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
2339 THREAD_UNINT);
2340 vm_object_lock(object);
2341 }
2342
2343 shadow_object =
2344 object->pageout ? VM_OBJECT_NULL : object->shadow;
2345
2346 vm_object_reap(object);
2347 /* cache is unlocked and object is no longer valid */
2348 object = VM_OBJECT_NULL;
2349
2350 if (shadow_object != VM_OBJECT_NULL) {
2351 /*
2352 * Drop the reference "object" was holding on
2353 * its shadow object.
2354 */
2355 vm_object_deallocate(shadow_object);
2356 shadow_object = VM_OBJECT_NULL;
2357 }
2358 vm_object_reaper_lock_spin();
2359 }
2360
2361 /* wait for more work... */
2362 assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
2363
2364 vm_object_reaper_unlock();
2365
2366 thread_block((thread_continue_t) vm_object_reaper_thread);
2367 /*NOTREACHED*/
2368 }
2369
2370 /*
2371 * Routine: vm_object_pager_wakeup
2372 * Purpose: Wake up anyone waiting for termination of a pager.
2373 */
2374
2375 static void
2376 vm_object_pager_wakeup(
2377 memory_object_t pager)
2378 {
2379 vm_object_hash_entry_t entry;
2380 boolean_t waiting = FALSE;
2381 lck_mtx_t *lck;
2382
2383 /*
2384 * If anyone was waiting for the memory_object_terminate
2385 * to be queued, wake them up now.
2386 */
2387 lck = vm_object_hash_lock_spin(pager);
2388 entry = vm_object_hash_lookup(pager, TRUE);
2389 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
2390 waiting = entry->waiting;
2391 vm_object_hash_unlock(lck);
2392
2393 if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
2394 if (waiting)
2395 thread_wakeup((event_t) pager);
2396 vm_object_hash_entry_free(entry);
2397 }
2398 }
2399
2400 /*
2401 * Routine: vm_object_release_pager
2402 * Purpose: Terminate the pager and, upon completion,
2403 * release our last reference to it.
2404 * just like memory_object_terminate, except
2405 * that we wake up anyone blocked in vm_object_enter
2406 * waiting for termination message to be queued
2407 * before calling memory_object_init.
2408 */
2409 static void
2410 vm_object_release_pager(
2411 memory_object_t pager,
2412 boolean_t hashed)
2413 {
2414
2415 /*
2416 * Terminate the pager.
2417 */
2418
2419 (void) memory_object_terminate(pager);
2420
2421 if (hashed == TRUE) {
2422 /*
2423 * Wakeup anyone waiting for this terminate
2424 * and remove the entry from the hash
2425 */
2426 vm_object_pager_wakeup(pager);
2427 }
2428 /*
2429 * Release reference to pager.
2430 */
2431 memory_object_deallocate(pager);
2432 }
2433
2434 /*
2435 * Routine: vm_object_destroy
2436 * Purpose:
2437 * Shut down a VM object, despite the
2438 * presence of address map (or other) references
2439 * to the vm_object.
2440 */
2441 kern_return_t
2442 vm_object_destroy(
2443 vm_object_t object,
2444 __unused kern_return_t reason)
2445 {
2446 memory_object_t old_pager;
2447
2448 if (object == VM_OBJECT_NULL)
2449 return(KERN_SUCCESS);
2450
2451 /*
2452 * Remove the pager association immediately.
2453 *
2454 * This will prevent the memory manager from further
2455 * meddling. [If it wanted to flush data or make
2456 * other changes, it should have done so before performing
2457 * the destroy call.]
2458 */
2459
2460 vm_object_lock(object);
2461 object->can_persist = FALSE;
2462 object->named = FALSE;
2463 object->alive = FALSE;
2464
2465 if (object->hashed) {
2466 lck_mtx_t *lck;
2467 /*
2468 * Rip out the pager from the vm_object now...
2469 */
2470 lck = vm_object_hash_lock_spin(object->pager);
2471 vm_object_remove(object);
2472 vm_object_hash_unlock(lck);
2473 }
2474 old_pager = object->pager;
2475 object->pager = MEMORY_OBJECT_NULL;
2476 if (old_pager != MEMORY_OBJECT_NULL)
2477 memory_object_control_disable(object->pager_control);
2478
2479 /*
2480 * Wait for the existing paging activity (that got
2481 * through before we nulled out the pager) to subside.
2482 */
2483
2484 vm_object_paging_wait(object, THREAD_UNINT);
2485 vm_object_unlock(object);
2486
2487 /*
2488 * Terminate the object now.
2489 */
2490 if (old_pager != MEMORY_OBJECT_NULL) {
2491 vm_object_release_pager(old_pager, object->hashed);
2492
2493 /*
2494 * JMM - Release the caller's reference. This assumes the
2495 * caller had a reference to release, which is a big (but
2496 * currently valid) assumption if this is driven from the
2497 * vnode pager (it is holding a named reference when making
2498 * this call)..
2499 */
2500 vm_object_deallocate(object);
2501
2502 }
2503 return(KERN_SUCCESS);
2504 }
2505
2506
2507 #if VM_OBJECT_CACHE
2508
2509 #define VM_OBJ_DEACT_ALL_STATS DEBUG
2510 #if VM_OBJ_DEACT_ALL_STATS
2511 uint32_t vm_object_deactivate_all_pages_batches = 0;
2512 uint32_t vm_object_deactivate_all_pages_pages = 0;
2513 #endif /* VM_OBJ_DEACT_ALL_STATS */
2514 /*
2515 * vm_object_deactivate_all_pages
2516 *
2517 * Deactivate all pages in the specified object. (Keep its pages
2518 * in memory even though it is no longer referenced.)
2519 *
2520 * The object must be locked.
2521 */
2522 static void
2523 vm_object_deactivate_all_pages(
2524 vm_object_t object)
2525 {
2526 vm_page_t p;
2527 int loop_count;
2528 #if VM_OBJ_DEACT_ALL_STATS
2529 int pages_count;
2530 #endif /* VM_OBJ_DEACT_ALL_STATS */
2531 #define V_O_D_A_P_MAX_BATCH 256
2532
2533 loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
2534 #if VM_OBJ_DEACT_ALL_STATS
2535 pages_count = 0;
2536 #endif /* VM_OBJ_DEACT_ALL_STATS */
2537 vm_page_lock_queues();
2538 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
2539 if (--loop_count == 0) {
2540 #if VM_OBJ_DEACT_ALL_STATS
2541 hw_atomic_add(&vm_object_deactivate_all_pages_batches,
2542 1);
2543 hw_atomic_add(&vm_object_deactivate_all_pages_pages,
2544 pages_count);
2545 pages_count = 0;
2546 #endif /* VM_OBJ_DEACT_ALL_STATS */
2547 lck_mtx_yield(&vm_page_queue_lock);
2548 loop_count = BATCH_LIMIT(V_O_D_A_P_MAX_BATCH);
2549 }
2550 if (!p->busy && (p->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q)) {
2551 #if VM_OBJ_DEACT_ALL_STATS
2552 pages_count++;
2553 #endif /* VM_OBJ_DEACT_ALL_STATS */
2554 vm_page_deactivate(p);
2555 }
2556 }
2557 #if VM_OBJ_DEACT_ALL_STATS
2558 if (pages_count) {
2559 hw_atomic_add(&vm_object_deactivate_all_pages_batches, 1);
2560 hw_atomic_add(&vm_object_deactivate_all_pages_pages,
2561 pages_count);
2562 pages_count = 0;
2563 }
2564 #endif /* VM_OBJ_DEACT_ALL_STATS */
2565 vm_page_unlock_queues();
2566 }
2567 #endif /* VM_OBJECT_CACHE */
2568
2569
2570
2571 /*
2572 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2573 * exist because of the need to handle shadow chains. When deactivating pages, we only
2574 * want to deactive the ones at the top most level in the object chain. In order to do
2575 * this efficiently, the specified address range is divided up into "chunks" and we use
2576 * a bit map to keep track of which pages have already been processed as we descend down
2577 * the shadow chain. These chunk macros hide the details of the bit map implementation
2578 * as much as we can.
2579 *
2580 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2581 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2582 * order bit represents page 0 in the current range and highest order bit represents
2583 * page 63.
2584 *
2585 * For further convenience, we also use negative logic for the page state in the bit map.
2586 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2587 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2588 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2589 * out with all the bits set. The macros below hide all these details from the caller.
2590 */
2591
2592 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2593 /* be the same as the number of bits in */
2594 /* the chunk_state_t type. We use 64 */
2595 /* just for convenience. */
2596
2597 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2598
2599 typedef uint64_t chunk_state_t;
2600
2601 /*
2602 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2603 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2604 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2605 * looking at pages in that range. This can save us from unnecessarily chasing down the
2606 * shadow chain.
2607 */
2608
2609 #define CHUNK_INIT(c, len) \
2610 MACRO_BEGIN \
2611 uint64_t p; \
2612 \
2613 (c) = 0xffffffffffffffffLL; \
2614 \
2615 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2616 MARK_PAGE_HANDLED(c, p); \
2617 MACRO_END
2618
2619
2620 /*
2621 * Return true if all pages in the chunk have not yet been processed.
2622 */
2623
2624 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2625
2626 /*
2627 * Return true if the page at offset 'p' in the bit map has already been handled
2628 * while processing a higher level object in the shadow chain.
2629 */
2630
2631 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0)
2632
2633 /*
2634 * Mark the page at offset 'p' in the bit map as having been processed.
2635 */
2636
2637 #define MARK_PAGE_HANDLED(c, p) \
2638 MACRO_BEGIN \
2639 (c) = (c) & ~(1LL << (p)); \
2640 MACRO_END
2641
2642
2643 /*
2644 * Return true if the page at the given offset has been paged out. Object is
2645 * locked upon entry and returned locked.
2646 */
2647
2648 static boolean_t
2649 page_is_paged_out(
2650 vm_object_t object,
2651 vm_object_offset_t offset)
2652 {
2653 if (object->internal &&
2654 object->alive &&
2655 !object->terminating &&
2656 object->pager_ready) {
2657
2658 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
2659 == VM_EXTERNAL_STATE_EXISTS) {
2660 return TRUE;
2661 }
2662 }
2663 return FALSE;
2664 }
2665
2666
2667
2668 /*
2669 * madvise_free_debug
2670 *
2671 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2672 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2673 * simulate the loss of the page's contents as if the page had been
2674 * reclaimed and then re-faulted.
2675 */
2676 #if DEVELOPMENT || DEBUG
2677 int madvise_free_debug = 1;
2678 #else /* DEBUG */
2679 int madvise_free_debug = 0;
2680 #endif /* DEBUG */
2681
2682 /*
2683 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2684 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2685 * a size that is less than or equal to the CHUNK_SIZE.
2686 */
2687
2688 static void
2689 deactivate_pages_in_object(
2690 vm_object_t object,
2691 vm_object_offset_t offset,
2692 vm_object_size_t size,
2693 boolean_t kill_page,
2694 boolean_t reusable_page,
2695 boolean_t all_reusable,
2696 chunk_state_t *chunk_state,
2697 pmap_flush_context *pfc,
2698 struct pmap *pmap,
2699 vm_map_offset_t pmap_offset)
2700 {
2701 vm_page_t m;
2702 int p;
2703 struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
2704 struct vm_page_delayed_work *dwp;
2705 int dw_count;
2706 int dw_limit;
2707 unsigned int reusable = 0;
2708
2709 /*
2710 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2711 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2712 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2713 * all the pages in the chunk.
2714 */
2715
2716 dwp = &dw_array[0];
2717 dw_count = 0;
2718 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
2719
2720 for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
2721
2722 /*
2723 * If this offset has already been found and handled in a higher level object, then don't
2724 * do anything with it in the current shadow object.
2725 */
2726
2727 if (PAGE_ALREADY_HANDLED(*chunk_state, p))
2728 continue;
2729
2730 /*
2731 * See if the page at this offset is around. First check to see if the page is resident,
2732 * then if not, check the existence map or with the pager.
2733 */
2734
2735 if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2736
2737 /*
2738 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2739 * so that we won't bother looking for a page at this offset again if there are more
2740 * shadow objects. Then deactivate the page.
2741 */
2742
2743 MARK_PAGE_HANDLED(*chunk_state, p);
2744
2745 if (( !VM_PAGE_WIRED(m)) && (!m->private) && (!m->gobbled) && (!m->busy) && (!m->laundry)) {
2746 int clear_refmod;
2747 int pmap_options;
2748
2749 dwp->dw_mask = 0;
2750
2751 pmap_options = 0;
2752 clear_refmod = VM_MEM_REFERENCED;
2753 dwp->dw_mask |= DW_clear_reference;
2754
2755 if ((kill_page) && (object->internal)) {
2756 if (madvise_free_debug) {
2757 /*
2758 * zero-fill the page now
2759 * to simulate it being
2760 * reclaimed and re-faulted.
2761 */
2762 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
2763 }
2764 m->precious = FALSE;
2765 m->dirty = FALSE;
2766
2767 clear_refmod |= VM_MEM_MODIFIED;
2768 if (m->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q) {
2769 /*
2770 * This page is now clean and
2771 * reclaimable. Move it out
2772 * of the throttled queue, so
2773 * that vm_pageout_scan() can
2774 * find it.
2775 */
2776 dwp->dw_mask |= DW_move_page;
2777 }
2778
2779 VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2780
2781 if (reusable_page && !m->reusable) {
2782 assert(!all_reusable);
2783 assert(!object->all_reusable);
2784 m->reusable = TRUE;
2785 object->reusable_page_count++;
2786 assert(object->resident_page_count >= object->reusable_page_count);
2787 reusable++;
2788 /*
2789 * Tell pmap this page is now
2790 * "reusable" (to update pmap
2791 * stats for all mappings).
2792 */
2793 pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
2794 }
2795 }
2796 pmap_options |= PMAP_OPTIONS_NOFLUSH;
2797 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m),
2798 clear_refmod,
2799 pmap_options,
2800 (void *)pfc);
2801
2802 if ((m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && !(reusable_page || all_reusable))
2803 dwp->dw_mask |= DW_move_page;
2804
2805 if (dwp->dw_mask)
2806 VM_PAGE_ADD_DELAYED_WORK(dwp, m,
2807 dw_count);
2808
2809 if (dw_count >= dw_limit) {
2810 if (reusable) {
2811 OSAddAtomic(reusable,
2812 &vm_page_stats_reusable.reusable_count);
2813 vm_page_stats_reusable.reusable += reusable;
2814 reusable = 0;
2815 }
2816 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
2817
2818 dwp = &dw_array[0];
2819 dw_count = 0;
2820 }
2821 }
2822
2823 } else {
2824
2825 /*
2826 * The page at this offset isn't memory resident, check to see if it's
2827 * been paged out. If so, mark it as handled so we don't bother looking
2828 * for it in the shadow chain.
2829 */
2830
2831 if (page_is_paged_out(object, offset)) {
2832 MARK_PAGE_HANDLED(*chunk_state, p);
2833
2834 /*
2835 * If we're killing a non-resident page, then clear the page in the existence
2836 * map so we don't bother paging it back in if it's touched again in the future.
2837 */
2838
2839 if ((kill_page) && (object->internal)) {
2840
2841 VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2842
2843 if (pmap != PMAP_NULL) {
2844 /*
2845 * Tell pmap that this page
2846 * is no longer mapped, to
2847 * adjust the footprint ledger
2848 * because this page is no
2849 * longer compressed.
2850 */
2851 pmap_remove_options(
2852 pmap,
2853 pmap_offset,
2854 (pmap_offset +
2855 PAGE_SIZE),
2856 PMAP_OPTIONS_REMOVE);
2857 }
2858 }
2859 }
2860 }
2861 }
2862
2863 if (reusable) {
2864 OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
2865 vm_page_stats_reusable.reusable += reusable;
2866 reusable = 0;
2867 }
2868
2869 if (dw_count)
2870 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
2871 }
2872
2873
2874 /*
2875 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2876 * will always be less than or equal to the given size. The total range is divided up
2877 * into chunks for efficiency and performance related to the locks and handling the shadow
2878 * chain. This routine returns how much of the given "size" it actually processed. It's
2879 * up to the caler to loop and keep calling this routine until the entire range they want
2880 * to process has been done.
2881 */
2882
2883 static vm_object_size_t
2884 deactivate_a_chunk(
2885 vm_object_t orig_object,
2886 vm_object_offset_t offset,
2887 vm_object_size_t size,
2888 boolean_t kill_page,
2889 boolean_t reusable_page,
2890 boolean_t all_reusable,
2891 pmap_flush_context *pfc,
2892 struct pmap *pmap,
2893 vm_map_offset_t pmap_offset)
2894 {
2895 vm_object_t object;
2896 vm_object_t tmp_object;
2897 vm_object_size_t length;
2898 chunk_state_t chunk_state;
2899
2900
2901 /*
2902 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2903 * remaining size the caller asked for.
2904 */
2905
2906 length = MIN(size, CHUNK_SIZE);
2907
2908 /*
2909 * The chunk_state keeps track of which pages we've already processed if there's
2910 * a shadow chain on this object. At this point, we haven't done anything with this
2911 * range of pages yet, so initialize the state to indicate no pages processed yet.
2912 */
2913
2914 CHUNK_INIT(chunk_state, length);
2915 object = orig_object;
2916
2917 /*
2918 * Start at the top level object and iterate around the loop once for each object
2919 * in the shadow chain. We stop processing early if we've already found all the pages
2920 * in the range. Otherwise we stop when we run out of shadow objects.
2921 */
2922
2923 while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2924 vm_object_paging_begin(object);
2925
2926 deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state, pfc, pmap, pmap_offset);
2927
2928 vm_object_paging_end(object);
2929
2930 /*
2931 * We've finished with this object, see if there's a shadow object. If
2932 * there is, update the offset and lock the new object. We also turn off
2933 * kill_page at this point since we only kill pages in the top most object.
2934 */
2935
2936 tmp_object = object->shadow;
2937
2938 if (tmp_object) {
2939 kill_page = FALSE;
2940 reusable_page = FALSE;
2941 all_reusable = FALSE;
2942 offset += object->vo_shadow_offset;
2943 vm_object_lock(tmp_object);
2944 }
2945
2946 if (object != orig_object)
2947 vm_object_unlock(object);
2948
2949 object = tmp_object;
2950 }
2951
2952 if (object && object != orig_object)
2953 vm_object_unlock(object);
2954
2955 return length;
2956 }
2957
2958
2959
2960 /*
2961 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2962 * we also clear the modified status of the page and "forget" any changes that have been made
2963 * to the page.
2964 */
2965
2966 __private_extern__ void
2967 vm_object_deactivate_pages(
2968 vm_object_t object,
2969 vm_object_offset_t offset,
2970 vm_object_size_t size,
2971 boolean_t kill_page,
2972 boolean_t reusable_page,
2973 struct pmap *pmap,
2974 vm_map_offset_t pmap_offset)
2975 {
2976 vm_object_size_t length;
2977 boolean_t all_reusable;
2978 pmap_flush_context pmap_flush_context_storage;
2979
2980 /*
2981 * We break the range up into chunks and do one chunk at a time. This is for
2982 * efficiency and performance while handling the shadow chains and the locks.
2983 * The deactivate_a_chunk() function returns how much of the range it processed.
2984 * We keep calling this routine until the given size is exhausted.
2985 */
2986
2987
2988 all_reusable = FALSE;
2989 #if 11
2990 /*
2991 * For the sake of accurate "reusable" pmap stats, we need
2992 * to tell pmap about each page that is no longer "reusable",
2993 * so we can't do the "all_reusable" optimization.
2994 */
2995 #else
2996 if (reusable_page &&
2997 object->internal &&
2998 object->vo_size != 0 &&
2999 object->vo_size == size &&
3000 object->reusable_page_count == 0) {
3001 all_reusable = TRUE;
3002 reusable_page = FALSE;
3003 }
3004 #endif
3005
3006 if ((reusable_page || all_reusable) && object->all_reusable) {
3007 /* This means MADV_FREE_REUSABLE has been called twice, which
3008 * is probably illegal. */
3009 return;
3010 }
3011
3012 pmap_flush_context_init(&pmap_flush_context_storage);
3013
3014 while (size) {
3015 length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable, &pmap_flush_context_storage, pmap, pmap_offset);
3016
3017 size -= length;
3018 offset += length;
3019 pmap_offset += length;
3020 }
3021 pmap_flush(&pmap_flush_context_storage);
3022
3023 if (all_reusable) {
3024 if (!object->all_reusable) {
3025 unsigned int reusable;
3026
3027 object->all_reusable = TRUE;
3028 assert(object->reusable_page_count == 0);
3029 /* update global stats */
3030 reusable = object->resident_page_count;
3031 OSAddAtomic(reusable,
3032 &vm_page_stats_reusable.reusable_count);
3033 vm_page_stats_reusable.reusable += reusable;
3034 vm_page_stats_reusable.all_reusable_calls++;
3035 }
3036 } else if (reusable_page) {
3037 vm_page_stats_reusable.partial_reusable_calls++;
3038 }
3039 }
3040
3041 void
3042 vm_object_reuse_pages(
3043 vm_object_t object,
3044 vm_object_offset_t start_offset,
3045 vm_object_offset_t end_offset,
3046 boolean_t allow_partial_reuse)
3047 {
3048 vm_object_offset_t cur_offset;
3049 vm_page_t m;
3050 unsigned int reused, reusable;
3051
3052 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
3053 MACRO_BEGIN \
3054 if ((m) != VM_PAGE_NULL && \
3055 (m)->reusable) { \
3056 assert((object)->reusable_page_count <= \
3057 (object)->resident_page_count); \
3058 assert((object)->reusable_page_count > 0); \
3059 (object)->reusable_page_count--; \
3060 (m)->reusable = FALSE; \
3061 (reused)++; \
3062 /* \
3063 * Tell pmap that this page is no longer \
3064 * "reusable", to update the "reusable" stats \
3065 * for all the pmaps that have mapped this \
3066 * page. \
3067 */ \
3068 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
3069 0, /* refmod */ \
3070 (PMAP_OPTIONS_CLEAR_REUSABLE \
3071 | PMAP_OPTIONS_NOFLUSH), \
3072 NULL); \
3073 } \
3074 MACRO_END
3075
3076 reused = 0;
3077 reusable = 0;
3078
3079 vm_object_lock_assert_exclusive(object);
3080
3081 if (object->all_reusable) {
3082 panic("object %p all_reusable: can't update pmap stats\n",
3083 object);
3084 assert(object->reusable_page_count == 0);
3085 object->all_reusable = FALSE;
3086 if (end_offset - start_offset == object->vo_size ||
3087 !allow_partial_reuse) {
3088 vm_page_stats_reusable.all_reuse_calls++;
3089 reused = object->resident_page_count;
3090 } else {
3091 vm_page_stats_reusable.partial_reuse_calls++;
3092 vm_page_queue_iterate(&object->memq, m, vm_page_t, listq) {
3093 if (m->offset < start_offset ||
3094 m->offset >= end_offset) {
3095 m->reusable = TRUE;
3096 object->reusable_page_count++;
3097 assert(object->resident_page_count >= object->reusable_page_count);
3098 continue;
3099 } else {
3100 assert(!m->reusable);
3101 reused++;
3102 }
3103 }
3104 }
3105 } else if (object->resident_page_count >
3106 ((end_offset - start_offset) >> PAGE_SHIFT)) {
3107 vm_page_stats_reusable.partial_reuse_calls++;
3108 for (cur_offset = start_offset;
3109 cur_offset < end_offset;
3110 cur_offset += PAGE_SIZE_64) {
3111 if (object->reusable_page_count == 0) {
3112 break;
3113 }
3114 m = vm_page_lookup(object, cur_offset);
3115 VM_OBJECT_REUSE_PAGE(object, m, reused);
3116 }
3117 } else {
3118 vm_page_stats_reusable.partial_reuse_calls++;
3119 vm_page_queue_iterate(&object->memq, m, vm_page_t, listq) {
3120 if (object->reusable_page_count == 0) {
3121 break;
3122 }
3123 if (m->offset < start_offset ||
3124 m->offset >= end_offset) {
3125 continue;
3126 }
3127 VM_OBJECT_REUSE_PAGE(object, m, reused);
3128 }
3129 }
3130
3131 /* update global stats */
3132 OSAddAtomic(reusable-reused, &vm_page_stats_reusable.reusable_count);
3133 vm_page_stats_reusable.reused += reused;
3134 vm_page_stats_reusable.reusable += reusable;
3135 }
3136
3137 /*
3138 * Routine: vm_object_pmap_protect
3139 *
3140 * Purpose:
3141 * Reduces the permission for all physical
3142 * pages in the specified object range.
3143 *
3144 * If removing write permission only, it is
3145 * sufficient to protect only the pages in
3146 * the top-level object; only those pages may
3147 * have write permission.
3148 *
3149 * If removing all access, we must follow the
3150 * shadow chain from the top-level object to
3151 * remove access to all pages in shadowed objects.
3152 *
3153 * The object must *not* be locked. The object must
3154 * be temporary/internal.
3155 *
3156 * If pmap is not NULL, this routine assumes that
3157 * the only mappings for the pages are in that
3158 * pmap.
3159 */
3160
3161 __private_extern__ void
3162 vm_object_pmap_protect(
3163 vm_object_t object,
3164 vm_object_offset_t offset,
3165 vm_object_size_t size,
3166 pmap_t pmap,
3167 vm_map_offset_t pmap_start,
3168 vm_prot_t prot)
3169 {
3170 vm_object_pmap_protect_options(object, offset, size,
3171 pmap, pmap_start, prot, 0);
3172 }
3173
3174 __private_extern__ void
3175 vm_object_pmap_protect_options(
3176 vm_object_t object,
3177 vm_object_offset_t offset,
3178 vm_object_size_t size,
3179 pmap_t pmap,
3180 vm_map_offset_t pmap_start,
3181 vm_prot_t prot,
3182 int options)
3183 {
3184 pmap_flush_context pmap_flush_context_storage;
3185 boolean_t delayed_pmap_flush = FALSE;
3186
3187 if (object == VM_OBJECT_NULL)
3188 return;
3189 size = vm_object_round_page(size);
3190 offset = vm_object_trunc_page(offset);
3191
3192 vm_object_lock(object);
3193
3194 if (object->phys_contiguous) {
3195 if (pmap != NULL) {
3196 vm_object_unlock(object);
3197 pmap_protect_options(pmap,
3198 pmap_start,
3199 pmap_start + size,
3200 prot,
3201 options & ~PMAP_OPTIONS_NOFLUSH,
3202 NULL);
3203 } else {
3204 vm_object_offset_t phys_start, phys_end, phys_addr;
3205
3206 phys_start = object->vo_shadow_offset + offset;
3207 phys_end = phys_start + size;
3208 assert(phys_start <= phys_end);
3209 assert(phys_end <= object->vo_shadow_offset + object->vo_size);
3210 vm_object_unlock(object);
3211
3212 pmap_flush_context_init(&pmap_flush_context_storage);
3213 delayed_pmap_flush = FALSE;
3214
3215 for (phys_addr = phys_start;
3216 phys_addr < phys_end;
3217 phys_addr += PAGE_SIZE_64) {
3218 pmap_page_protect_options(
3219 (ppnum_t) (phys_addr >> PAGE_SHIFT),
3220 prot,
3221 options | PMAP_OPTIONS_NOFLUSH,
3222 (void *)&pmap_flush_context_storage);
3223 delayed_pmap_flush = TRUE;
3224 }
3225 if (delayed_pmap_flush == TRUE)
3226 pmap_flush(&pmap_flush_context_storage);
3227 }
3228 return;
3229 }
3230
3231 assert(object->internal);
3232
3233 while (TRUE) {
3234 if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
3235 vm_object_unlock(object);
3236 pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
3237 options & ~PMAP_OPTIONS_NOFLUSH, NULL);
3238 return;
3239 }
3240
3241 pmap_flush_context_init(&pmap_flush_context_storage);
3242 delayed_pmap_flush = FALSE;
3243
3244 /*
3245 * if we are doing large ranges with respect to resident
3246 * page count then we should interate over pages otherwise
3247 * inverse page look-up will be faster
3248 */
3249 if (ptoa_64(object->resident_page_count / 4) < size) {
3250 vm_page_t p;
3251 vm_object_offset_t end;
3252
3253 end = offset + size;
3254
3255 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
3256 if (!p->fictitious && (offset <= p->offset) && (p->offset < end)) {
3257 vm_map_offset_t start;
3258
3259 start = pmap_start + p->offset - offset;
3260
3261 if (pmap != PMAP_NULL)
3262 pmap_protect_options(
3263 pmap,
3264 start,
3265 start + PAGE_SIZE_64,
3266 prot,
3267 options | PMAP_OPTIONS_NOFLUSH,
3268 &pmap_flush_context_storage);
3269 else
3270 pmap_page_protect_options(
3271 VM_PAGE_GET_PHYS_PAGE(p),
3272 prot,
3273 options | PMAP_OPTIONS_NOFLUSH,
3274 &pmap_flush_context_storage);
3275 delayed_pmap_flush = TRUE;
3276 }
3277 }
3278
3279 } else {
3280 vm_page_t p;
3281 vm_object_offset_t end;
3282 vm_object_offset_t target_off;
3283
3284 end = offset + size;
3285
3286 for (target_off = offset;
3287 target_off < end; target_off += PAGE_SIZE) {
3288
3289 p = vm_page_lookup(object, target_off);
3290
3291 if (p != VM_PAGE_NULL) {
3292 vm_object_offset_t start;
3293
3294 start = pmap_start + (p->offset - offset);
3295
3296 if (pmap != PMAP_NULL)
3297 pmap_protect_options(
3298 pmap,
3299 start,
3300 start + PAGE_SIZE_64,
3301 prot,
3302 options | PMAP_OPTIONS_NOFLUSH,
3303 &pmap_flush_context_storage);
3304 else
3305 pmap_page_protect_options(
3306 VM_PAGE_GET_PHYS_PAGE(p),
3307 prot,
3308 options | PMAP_OPTIONS_NOFLUSH,
3309 &pmap_flush_context_storage);
3310 delayed_pmap_flush = TRUE;
3311 }
3312 }
3313 }
3314 if (delayed_pmap_flush == TRUE)
3315 pmap_flush(&pmap_flush_context_storage);
3316
3317 if (prot == VM_PROT_NONE) {
3318 /*
3319 * Must follow shadow chain to remove access
3320 * to pages in shadowed objects.
3321 */
3322 vm_object_t next_object;
3323
3324 next_object = object->shadow;
3325 if (next_object != VM_OBJECT_NULL) {
3326 offset += object->vo_shadow_offset;
3327 vm_object_lock(next_object);
3328 vm_object_unlock(object);
3329 object = next_object;
3330 }
3331 else {
3332 /*
3333 * End of chain - we are done.
3334 */
3335 break;
3336 }
3337 }
3338 else {
3339 /*
3340 * Pages in shadowed objects may never have
3341 * write permission - we may stop here.
3342 */
3343 break;
3344 }
3345 }
3346
3347 vm_object_unlock(object);
3348 }
3349
3350 /*
3351 * Routine: vm_object_copy_slowly
3352 *
3353 * Description:
3354 * Copy the specified range of the source
3355 * virtual memory object without using
3356 * protection-based optimizations (such
3357 * as copy-on-write). The pages in the
3358 * region are actually copied.
3359 *
3360 * In/out conditions:
3361 * The caller must hold a reference and a lock
3362 * for the source virtual memory object. The source
3363 * object will be returned *unlocked*.
3364 *
3365 * Results:
3366 * If the copy is completed successfully, KERN_SUCCESS is
3367 * returned. If the caller asserted the interruptible
3368 * argument, and an interruption occurred while waiting
3369 * for a user-generated event, MACH_SEND_INTERRUPTED is
3370 * returned. Other values may be returned to indicate
3371 * hard errors during the copy operation.
3372 *
3373 * A new virtual memory object is returned in a
3374 * parameter (_result_object). The contents of this
3375 * new object, starting at a zero offset, are a copy
3376 * of the source memory region. In the event of
3377 * an error, this parameter will contain the value
3378 * VM_OBJECT_NULL.
3379 */
3380 __private_extern__ kern_return_t
3381 vm_object_copy_slowly(
3382 vm_object_t src_object,
3383 vm_object_offset_t src_offset,
3384 vm_object_size_t size,
3385 boolean_t interruptible,
3386 vm_object_t *_result_object) /* OUT */
3387 {
3388 vm_object_t new_object;
3389 vm_object_offset_t new_offset;
3390
3391 struct vm_object_fault_info fault_info;
3392
3393 XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
3394 src_object, src_offset, size, 0, 0);
3395
3396 if (size == 0) {
3397 vm_object_unlock(src_object);
3398 *_result_object = VM_OBJECT_NULL;
3399 return(KERN_INVALID_ARGUMENT);
3400 }
3401
3402 /*
3403 * Prevent destruction of the source object while we copy.
3404 */
3405
3406 vm_object_reference_locked(src_object);
3407 vm_object_unlock(src_object);
3408
3409 /*
3410 * Create a new object to hold the copied pages.
3411 * A few notes:
3412 * We fill the new object starting at offset 0,
3413 * regardless of the input offset.
3414 * We don't bother to lock the new object within
3415 * this routine, since we have the only reference.
3416 */
3417
3418 new_object = vm_object_allocate(size);
3419 new_offset = 0;
3420
3421 assert(size == trunc_page_64(size)); /* Will the loop terminate? */
3422
3423 fault_info.interruptible = interruptible;
3424 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
3425 fault_info.user_tag = 0;
3426 fault_info.pmap_options = 0;
3427 fault_info.lo_offset = src_offset;
3428 fault_info.hi_offset = src_offset + size;
3429 fault_info.no_cache = FALSE;
3430 fault_info.stealth = TRUE;
3431 fault_info.io_sync = FALSE;
3432 fault_info.cs_bypass = FALSE;
3433 fault_info.mark_zf_absent = FALSE;
3434 fault_info.batch_pmap_op = FALSE;
3435
3436 for ( ;
3437 size != 0 ;
3438 src_offset += PAGE_SIZE_64,
3439 new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
3440 ) {
3441 vm_page_t new_page;
3442 vm_fault_return_t result;
3443
3444 vm_object_lock(new_object);
3445
3446 while ((new_page = vm_page_alloc(new_object, new_offset))
3447 == VM_PAGE_NULL) {
3448
3449 vm_object_unlock(new_object);
3450
3451 if (!vm_page_wait(interruptible)) {
3452 vm_object_deallocate(new_object);
3453 vm_object_deallocate(src_object);
3454 *_result_object = VM_OBJECT_NULL;
3455 return(MACH_SEND_INTERRUPTED);
3456 }
3457 vm_object_lock(new_object);
3458 }
3459 vm_object_unlock(new_object);
3460
3461 do {
3462 vm_prot_t prot = VM_PROT_READ;
3463 vm_page_t _result_page;
3464 vm_page_t top_page;
3465 vm_page_t result_page;
3466 kern_return_t error_code;
3467 vm_object_t result_page_object;
3468
3469
3470 vm_object_lock(src_object);
3471
3472 if (src_object->internal &&
3473 src_object->shadow == VM_OBJECT_NULL &&
3474 (vm_page_lookup(src_object,
3475 src_offset) == VM_PAGE_NULL) &&
3476 (src_object->pager == NULL ||
3477 (VM_COMPRESSOR_PAGER_STATE_GET(src_object,
3478 src_offset) ==
3479 VM_EXTERNAL_STATE_ABSENT))) {
3480 /*
3481 * This page is neither resident nor compressed
3482 * and there's no shadow object below
3483 * "src_object", so this page is really missing.
3484 * There's no need to zero-fill it just to copy
3485 * it: let's leave it missing in "new_object"
3486 * and get zero-filled on demand.
3487 */
3488 vm_object_unlock(src_object);
3489 /* free the unused "new_page"... */
3490 vm_object_lock(new_object);
3491 VM_PAGE_FREE(new_page);
3492 new_page = VM_PAGE_NULL;
3493 vm_object_unlock(new_object);
3494 /* ...and go to next page in "src_object" */
3495 result = VM_FAULT_SUCCESS;
3496 break;
3497 }
3498
3499 vm_object_paging_begin(src_object);
3500
3501 if (size > (vm_size_t) -1) {
3502 /* 32-bit overflow */
3503 fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
3504 } else {
3505 fault_info.cluster_size = (vm_size_t) size;
3506 assert(fault_info.cluster_size == size);
3507 }
3508
3509 XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
3510 _result_page = VM_PAGE_NULL;
3511 result = vm_fault_page(src_object, src_offset,
3512 VM_PROT_READ, FALSE,
3513 FALSE, /* page not looked up */
3514 &prot, &_result_page, &top_page,
3515 (int *)0,
3516 &error_code, FALSE, FALSE, &fault_info);
3517
3518 switch(result) {
3519 case VM_FAULT_SUCCESS:
3520 result_page = _result_page;
3521 result_page_object = VM_PAGE_OBJECT(result_page);
3522
3523 /*
3524 * Copy the page to the new object.
3525 *
3526 * POLICY DECISION:
3527 * If result_page is clean,
3528 * we could steal it instead
3529 * of copying.
3530 */
3531
3532 vm_page_copy(result_page, new_page);
3533 vm_object_unlock(result_page_object);
3534
3535 /*
3536 * Let go of both pages (make them
3537 * not busy, perform wakeup, activate).
3538 */
3539 vm_object_lock(new_object);
3540 SET_PAGE_DIRTY(new_page, FALSE);
3541 PAGE_WAKEUP_DONE(new_page);
3542 vm_object_unlock(new_object);
3543
3544 vm_object_lock(result_page_object);
3545 PAGE_WAKEUP_DONE(result_page);
3546
3547 vm_page_lockspin_queues();
3548 if ((result_page->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3549 (result_page->vm_page_q_state == VM_PAGE_NOT_ON_Q)) {
3550 vm_page_activate(result_page);
3551 }
3552 vm_page_activate(new_page);
3553 vm_page_unlock_queues();
3554
3555 /*
3556 * Release paging references and
3557 * top-level placeholder page, if any.
3558 */
3559
3560 vm_fault_cleanup(result_page_object,
3561 top_page);
3562
3563 break;
3564
3565 case VM_FAULT_RETRY:
3566 break;
3567
3568 case VM_FAULT_MEMORY_SHORTAGE:
3569 if (vm_page_wait(interruptible))
3570 break;
3571 /* fall thru */
3572
3573 case VM_FAULT_INTERRUPTED:
3574 vm_object_lock(new_object);
3575 VM_PAGE_FREE(new_page);
3576 vm_object_unlock(new_object);
3577
3578 vm_object_deallocate(new_object);
3579 vm_object_deallocate(src_object);
3580 *_result_object = VM_OBJECT_NULL;
3581 return(MACH_SEND_INTERRUPTED);
3582
3583 case VM_FAULT_SUCCESS_NO_VM_PAGE:
3584 /* success but no VM page: fail */
3585 vm_object_paging_end(src_object);
3586 vm_object_unlock(src_object);
3587 /*FALLTHROUGH*/
3588 case VM_FAULT_MEMORY_ERROR:
3589 /*
3590 * A policy choice:
3591 * (a) ignore pages that we can't
3592 * copy
3593 * (b) return the null object if
3594 * any page fails [chosen]
3595 */
3596
3597 vm_object_lock(new_object);
3598 VM_PAGE_FREE(new_page);
3599 vm_object_unlock(new_object);
3600
3601 vm_object_deallocate(new_object);
3602 vm_object_deallocate(src_object);
3603 *_result_object = VM_OBJECT_NULL;
3604 return(error_code ? error_code:
3605 KERN_MEMORY_ERROR);
3606
3607 default:
3608 panic("vm_object_copy_slowly: unexpected error"
3609 " 0x%x from vm_fault_page()\n", result);
3610 }
3611 } while (result != VM_FAULT_SUCCESS);
3612 }
3613
3614 /*
3615 * Lose the extra reference, and return our object.
3616 */
3617 vm_object_deallocate(src_object);
3618 *_result_object = new_object;
3619 return(KERN_SUCCESS);
3620 }
3621
3622 /*
3623 * Routine: vm_object_copy_quickly
3624 *
3625 * Purpose:
3626 * Copy the specified range of the source virtual
3627 * memory object, if it can be done without waiting
3628 * for user-generated events.
3629 *
3630 * Results:
3631 * If the copy is successful, the copy is returned in
3632 * the arguments; otherwise, the arguments are not
3633 * affected.
3634 *
3635 * In/out conditions:
3636 * The object should be unlocked on entry and exit.
3637 */
3638
3639 /*ARGSUSED*/
3640 __private_extern__ boolean_t
3641 vm_object_copy_quickly(
3642 vm_object_t *_object, /* INOUT */
3643 __unused vm_object_offset_t offset, /* IN */
3644 __unused vm_object_size_t size, /* IN */
3645 boolean_t *_src_needs_copy, /* OUT */
3646 boolean_t *_dst_needs_copy) /* OUT */
3647 {
3648 vm_object_t object = *_object;
3649 memory_object_copy_strategy_t copy_strategy;
3650
3651 XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
3652 *_object, offset, size, 0, 0);
3653 if (object == VM_OBJECT_NULL) {
3654 *_src_needs_copy = FALSE;
3655 *_dst_needs_copy = FALSE;
3656 return(TRUE);
3657 }
3658
3659 vm_object_lock(object);
3660
3661 copy_strategy = object->copy_strategy;
3662
3663 switch (copy_strategy) {
3664 case MEMORY_OBJECT_COPY_SYMMETRIC:
3665
3666 /*
3667 * Symmetric copy strategy.
3668 * Make another reference to the object.
3669 * Leave object/offset unchanged.
3670 */
3671
3672 vm_object_reference_locked(object);
3673 object->shadowed = TRUE;
3674 vm_object_unlock(object);
3675
3676 /*
3677 * Both source and destination must make
3678 * shadows, and the source must be made
3679 * read-only if not already.
3680 */
3681
3682 *_src_needs_copy = TRUE;
3683 *_dst_needs_copy = TRUE;
3684
3685 break;
3686
3687 case MEMORY_OBJECT_COPY_DELAY:
3688 vm_object_unlock(object);
3689 return(FALSE);
3690
3691 default:
3692 vm_object_unlock(object);
3693 return(FALSE);
3694 }
3695 return(TRUE);
3696 }
3697
3698 static int copy_call_count = 0;
3699 static int copy_call_sleep_count = 0;
3700 static int copy_call_restart_count = 0;
3701
3702 /*
3703 * Routine: vm_object_copy_call [internal]
3704 *
3705 * Description:
3706 * Copy the source object (src_object), using the
3707 * user-managed copy algorithm.
3708 *
3709 * In/out conditions:
3710 * The source object must be locked on entry. It
3711 * will be *unlocked* on exit.
3712 *
3713 * Results:
3714 * If the copy is successful, KERN_SUCCESS is returned.
3715 * A new object that represents the copied virtual
3716 * memory is returned in a parameter (*_result_object).
3717 * If the return value indicates an error, this parameter
3718 * is not valid.
3719 */
3720 static kern_return_t
3721 vm_object_copy_call(
3722 vm_object_t src_object,
3723 vm_object_offset_t src_offset,
3724 vm_object_size_t size,
3725 vm_object_t *_result_object) /* OUT */
3726 {
3727 kern_return_t kr;
3728 vm_object_t copy;
3729 boolean_t check_ready = FALSE;
3730 uint32_t try_failed_count = 0;
3731
3732 /*
3733 * If a copy is already in progress, wait and retry.
3734 *
3735 * XXX
3736 * Consider making this call interruptable, as Mike
3737 * intended it to be.
3738 *
3739 * XXXO
3740 * Need a counter or version or something to allow
3741 * us to use the copy that the currently requesting
3742 * thread is obtaining -- is it worth adding to the
3743 * vm object structure? Depends how common this case it.
3744 */
3745 copy_call_count++;
3746 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
3747 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
3748 THREAD_UNINT);
3749 copy_call_restart_count++;
3750 }
3751
3752 /*
3753 * Indicate (for the benefit of memory_object_create_copy)
3754 * that we want a copy for src_object. (Note that we cannot
3755 * do a real assert_wait before calling memory_object_copy,
3756 * so we simply set the flag.)
3757 */
3758
3759 vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL);
3760 vm_object_unlock(src_object);
3761
3762 /*
3763 * Ask the memory manager to give us a memory object
3764 * which represents a copy of the src object.
3765 * The memory manager may give us a memory object
3766 * which we already have, or it may give us a
3767 * new memory object. This memory object will arrive
3768 * via memory_object_create_copy.
3769 */
3770
3771 kr = KERN_FAILURE; /* XXX need to change memory_object.defs */
3772 if (kr != KERN_SUCCESS) {
3773 return kr;
3774 }
3775
3776 /*
3777 * Wait for the copy to arrive.
3778 */
3779 vm_object_lock(src_object);
3780 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
3781 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
3782 THREAD_UNINT);
3783 copy_call_sleep_count++;
3784 }
3785 Retry:
3786 assert(src_object->copy != VM_OBJECT_NULL);
3787 copy = src_object->copy;
3788 if (!vm_object_lock_try(copy)) {
3789 vm_object_unlock(src_object);
3790
3791 try_failed_count++;
3792 mutex_pause(try_failed_count); /* wait a bit */
3793
3794 vm_object_lock(src_object);
3795 goto Retry;
3796 }
3797 if (copy->vo_size < src_offset+size)
3798 copy->vo_size = src_offset+size;
3799
3800 if (!copy->pager_ready)
3801 check_ready = TRUE;
3802
3803 /*
3804 * Return the copy.
3805 */
3806 *_result_object = copy;
3807 vm_object_unlock(copy);
3808 vm_object_unlock(src_object);
3809
3810 /* Wait for the copy to be ready. */
3811 if (check_ready == TRUE) {
3812 vm_object_lock(copy);
3813 while (!copy->pager_ready) {
3814 vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
3815 }
3816 vm_object_unlock(copy);
3817 }
3818
3819 return KERN_SUCCESS;
3820 }
3821
3822 static int copy_delayed_lock_collisions = 0;
3823 static int copy_delayed_max_collisions = 0;
3824 static int copy_delayed_lock_contention = 0;
3825 static int copy_delayed_protect_iterate = 0;
3826
3827 /*
3828 * Routine: vm_object_copy_delayed [internal]
3829 *
3830 * Description:
3831 * Copy the specified virtual memory object, using
3832 * the asymmetric copy-on-write algorithm.
3833 *
3834 * In/out conditions:
3835 * The src_object must be locked on entry. It will be unlocked
3836 * on exit - so the caller must also hold a reference to it.
3837 *
3838 * This routine will not block waiting for user-generated
3839 * events. It is not interruptible.
3840 */
3841 __private_extern__ vm_object_t
3842 vm_object_copy_delayed(
3843 vm_object_t src_object,
3844 vm_object_offset_t src_offset,
3845 vm_object_size_t size,
3846 boolean_t src_object_shared)
3847 {
3848 vm_object_t new_copy = VM_OBJECT_NULL;
3849 vm_object_t old_copy;
3850 vm_page_t p;
3851 vm_object_size_t copy_size = src_offset + size;
3852 pmap_flush_context pmap_flush_context_storage;
3853 boolean_t delayed_pmap_flush = FALSE;
3854
3855
3856 int collisions = 0;
3857 /*
3858 * The user-level memory manager wants to see all of the changes
3859 * to this object, but it has promised not to make any changes on
3860 * its own.
3861 *
3862 * Perform an asymmetric copy-on-write, as follows:
3863 * Create a new object, called a "copy object" to hold
3864 * pages modified by the new mapping (i.e., the copy,
3865 * not the original mapping).
3866 * Record the original object as the backing object for
3867 * the copy object. If the original mapping does not
3868 * change a page, it may be used read-only by the copy.
3869 * Record the copy object in the original object.
3870 * When the original mapping causes a page to be modified,
3871 * it must be copied to a new page that is "pushed" to
3872 * the copy object.
3873 * Mark the new mapping (the copy object) copy-on-write.
3874 * This makes the copy object itself read-only, allowing
3875 * it to be reused if the original mapping makes no
3876 * changes, and simplifying the synchronization required
3877 * in the "push" operation described above.
3878 *
3879 * The copy-on-write is said to be assymetric because the original
3880 * object is *not* marked copy-on-write. A copied page is pushed
3881 * to the copy object, regardless which party attempted to modify
3882 * the page.
3883 *
3884 * Repeated asymmetric copy operations may be done. If the
3885 * original object has not been changed since the last copy, its
3886 * copy object can be reused. Otherwise, a new copy object can be
3887 * inserted between the original object and its previous copy
3888 * object. Since any copy object is read-only, this cannot affect
3889 * affect the contents of the previous copy object.
3890 *
3891 * Note that a copy object is higher in the object tree than the
3892 * original object; therefore, use of the copy object recorded in
3893 * the original object must be done carefully, to avoid deadlock.
3894 */
3895
3896 copy_size = vm_object_round_page(copy_size);
3897 Retry:
3898
3899 /*
3900 * Wait for paging in progress.
3901 */
3902 if (!src_object->true_share &&
3903 (src_object->paging_in_progress != 0 ||
3904 src_object->activity_in_progress != 0)) {
3905 if (src_object_shared == TRUE) {
3906 vm_object_unlock(src_object);
3907 vm_object_lock(src_object);
3908 src_object_shared = FALSE;
3909 goto Retry;
3910 }
3911 vm_object_paging_wait(src_object, THREAD_UNINT);
3912 }
3913 /*
3914 * See whether we can reuse the result of a previous
3915 * copy operation.
3916 */
3917
3918 old_copy = src_object->copy;
3919 if (old_copy != VM_OBJECT_NULL) {
3920 int lock_granted;
3921
3922 /*
3923 * Try to get the locks (out of order)
3924 */
3925 if (src_object_shared == TRUE)
3926 lock_granted = vm_object_lock_try_shared(old_copy);
3927 else
3928 lock_granted = vm_object_lock_try(old_copy);
3929
3930 if (!lock_granted) {
3931 vm_object_unlock(src_object);
3932
3933 if (collisions++ == 0)
3934 copy_delayed_lock_contention++;
3935 mutex_pause(collisions);
3936
3937 /* Heisenberg Rules */
3938 copy_delayed_lock_collisions++;
3939
3940 if (collisions > copy_delayed_max_collisions)
3941 copy_delayed_max_collisions = collisions;
3942
3943 if (src_object_shared == TRUE)
3944 vm_object_lock_shared(src_object);
3945 else
3946 vm_object_lock(src_object);
3947
3948 goto Retry;
3949 }
3950
3951 /*
3952 * Determine whether the old copy object has
3953 * been modified.
3954 */
3955
3956 if (old_copy->resident_page_count == 0 &&
3957 !old_copy->pager_created) {
3958 /*
3959 * It has not been modified.
3960 *
3961 * Return another reference to
3962 * the existing copy-object if
3963 * we can safely grow it (if
3964 * needed).
3965 */
3966
3967 if (old_copy->vo_size < copy_size) {
3968 if (src_object_shared == TRUE) {
3969 vm_object_unlock(old_copy);
3970 vm_object_unlock(src_object);
3971
3972 vm_object_lock(src_object);
3973 src_object_shared = FALSE;
3974 goto Retry;
3975 }
3976 /*
3977 * We can't perform a delayed copy if any of the
3978 * pages in the extended range are wired (because
3979 * we can't safely take write permission away from
3980 * wired pages). If the pages aren't wired, then
3981 * go ahead and protect them.
3982 */
3983 copy_delayed_protect_iterate++;
3984
3985 pmap_flush_context_init(&pmap_flush_context_storage);
3986 delayed_pmap_flush = FALSE;
3987
3988 vm_page_queue_iterate(&src_object->memq, p, vm_page_t, listq) {
3989 if (!p->fictitious &&
3990 p->offset >= old_copy->vo_size &&
3991 p->offset < copy_size) {
3992 if (VM_PAGE_WIRED(p)) {
3993 vm_object_unlock(old_copy);
3994 vm_object_unlock(src_object);
3995
3996 if (new_copy != VM_OBJECT_NULL) {
3997 vm_object_unlock(new_copy);
3998 vm_object_deallocate(new_copy);
3999 }
4000 if (delayed_pmap_flush == TRUE)
4001 pmap_flush(&pmap_flush_context_storage);
4002
4003 return VM_OBJECT_NULL;
4004 } else {
4005 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
4006 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
4007 delayed_pmap_flush = TRUE;
4008 }
4009 }
4010 }
4011 if (delayed_pmap_flush == TRUE)
4012 pmap_flush(&pmap_flush_context_storage);
4013
4014 old_copy->vo_size = copy_size;
4015 }
4016 if (src_object_shared == TRUE)
4017 vm_object_reference_shared(old_copy);
4018 else
4019 vm_object_reference_locked(old_copy);
4020 vm_object_unlock(old_copy);
4021 vm_object_unlock(src_object);
4022
4023 if (new_copy != VM_OBJECT_NULL) {
4024 vm_object_unlock(new_copy);
4025 vm_object_deallocate(new_copy);
4026 }
4027 return(old_copy);
4028 }
4029
4030
4031
4032 /*
4033 * Adjust the size argument so that the newly-created
4034 * copy object will be large enough to back either the
4035 * old copy object or the new mapping.
4036 */
4037 if (old_copy->vo_size > copy_size)
4038 copy_size = old_copy->vo_size;
4039
4040 if (new_copy == VM_OBJECT_NULL) {
4041 vm_object_unlock(old_copy);
4042 vm_object_unlock(src_object);
4043 new_copy = vm_object_allocate(copy_size);
4044 vm_object_lock(src_object);
4045 vm_object_lock(new_copy);
4046
4047 src_object_shared = FALSE;
4048 goto Retry;
4049 }
4050 new_copy->vo_size = copy_size;
4051
4052 /*
4053 * The copy-object is always made large enough to
4054 * completely shadow the original object, since
4055 * it may have several users who want to shadow
4056 * the original object at different points.
4057 */
4058
4059 assert((old_copy->shadow == src_object) &&
4060 (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
4061
4062 } else if (new_copy == VM_OBJECT_NULL) {
4063 vm_object_unlock(src_object);
4064 new_copy = vm_object_allocate(copy_size);
4065 vm_object_lock(src_object);
4066 vm_object_lock(new_copy);
4067
4068 src_object_shared = FALSE;
4069 goto Retry;
4070 }
4071
4072 /*
4073 * We now have the src object locked, and the new copy object
4074 * allocated and locked (and potentially the old copy locked).
4075 * Before we go any further, make sure we can still perform
4076 * a delayed copy, as the situation may have changed.
4077 *
4078 * Specifically, we can't perform a delayed copy if any of the
4079 * pages in the range are wired (because we can't safely take
4080 * write permission away from wired pages). If the pages aren't
4081 * wired, then go ahead and protect them.
4082 */
4083 copy_delayed_protect_iterate++;
4084
4085 pmap_flush_context_init(&pmap_flush_context_storage);
4086 delayed_pmap_flush = FALSE;
4087
4088 vm_page_queue_iterate(&src_object->memq, p, vm_page_t, listq) {
4089 if (!p->fictitious && p->offset < copy_size) {
4090 if (VM_PAGE_WIRED(p)) {
4091 if (old_copy)
4092 vm_object_unlock(old_copy);
4093 vm_object_unlock(src_object);
4094 vm_object_unlock(new_copy);
4095 vm_object_deallocate(new_copy);
4096
4097 if (delayed_pmap_flush == TRUE)
4098 pmap_flush(&pmap_flush_context_storage);
4099
4100 return VM_OBJECT_NULL;
4101 } else {
4102 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
4103 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
4104 delayed_pmap_flush = TRUE;
4105 }
4106 }
4107 }
4108 if (delayed_pmap_flush == TRUE)
4109 pmap_flush(&pmap_flush_context_storage);
4110
4111 if (old_copy != VM_OBJECT_NULL) {
4112 /*
4113 * Make the old copy-object shadow the new one.
4114 * It will receive no more pages from the original
4115 * object.
4116 */
4117
4118 /* remove ref. from old_copy */
4119 vm_object_lock_assert_exclusive(src_object);
4120 src_object->ref_count--;
4121 assert(src_object->ref_count > 0);
4122 vm_object_lock_assert_exclusive(old_copy);
4123 old_copy->shadow = new_copy;
4124 vm_object_lock_assert_exclusive(new_copy);
4125 assert(new_copy->ref_count > 0);
4126 new_copy->ref_count++; /* for old_copy->shadow ref. */
4127
4128 #if TASK_SWAPPER
4129 if (old_copy->res_count) {
4130 VM_OBJ_RES_INCR(new_copy);
4131 VM_OBJ_RES_DECR(src_object);
4132 }
4133 #endif
4134
4135 vm_object_unlock(old_copy); /* done with old_copy */
4136 }
4137
4138 /*
4139 * Point the new copy at the existing object.
4140 */
4141 vm_object_lock_assert_exclusive(new_copy);
4142 new_copy->shadow = src_object;
4143 new_copy->vo_shadow_offset = 0;
4144 new_copy->shadowed = TRUE; /* caller must set needs_copy */
4145
4146 vm_object_lock_assert_exclusive(src_object);
4147 vm_object_reference_locked(src_object);
4148 src_object->copy = new_copy;
4149 vm_object_unlock(src_object);
4150 vm_object_unlock(new_copy);
4151
4152 XPR(XPR_VM_OBJECT,
4153 "vm_object_copy_delayed: used copy object %X for source %X\n",
4154 new_copy, src_object, 0, 0, 0);
4155
4156 return new_copy;
4157 }
4158
4159 /*
4160 * Routine: vm_object_copy_strategically
4161 *
4162 * Purpose:
4163 * Perform a copy according to the source object's
4164 * declared strategy. This operation may block,
4165 * and may be interrupted.
4166 */
4167 __private_extern__ kern_return_t
4168 vm_object_copy_strategically(
4169 vm_object_t src_object,
4170 vm_object_offset_t src_offset,
4171 vm_object_size_t size,
4172 vm_object_t *dst_object, /* OUT */
4173 vm_object_offset_t *dst_offset, /* OUT */
4174 boolean_t *dst_needs_copy) /* OUT */
4175 {
4176 boolean_t result;
4177 boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */
4178 boolean_t object_lock_shared = FALSE;
4179 memory_object_copy_strategy_t copy_strategy;
4180
4181 assert(src_object != VM_OBJECT_NULL);
4182
4183 copy_strategy = src_object->copy_strategy;
4184
4185 if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
4186 vm_object_lock_shared(src_object);
4187 object_lock_shared = TRUE;
4188 } else
4189 vm_object_lock(src_object);
4190
4191 /*
4192 * The copy strategy is only valid if the memory manager
4193 * is "ready". Internal objects are always ready.
4194 */
4195
4196 while (!src_object->internal && !src_object->pager_ready) {
4197 wait_result_t wait_result;
4198
4199 if (object_lock_shared == TRUE) {
4200 vm_object_unlock(src_object);
4201 vm_object_lock(src_object);
4202 object_lock_shared = FALSE;
4203 continue;
4204 }
4205 wait_result = vm_object_sleep( src_object,
4206 VM_OBJECT_EVENT_PAGER_READY,
4207 interruptible);
4208 if (wait_result != THREAD_AWAKENED) {
4209 vm_object_unlock(src_object);
4210 *dst_object = VM_OBJECT_NULL;
4211 *dst_offset = 0;
4212 *dst_needs_copy = FALSE;
4213 return(MACH_SEND_INTERRUPTED);
4214 }
4215 }
4216
4217 /*
4218 * Use the appropriate copy strategy.
4219 */
4220
4221 switch (copy_strategy) {
4222 case MEMORY_OBJECT_COPY_DELAY:
4223 *dst_object = vm_object_copy_delayed(src_object,
4224 src_offset, size, object_lock_shared);
4225 if (*dst_object != VM_OBJECT_NULL) {
4226 *dst_offset = src_offset;
4227 *dst_needs_copy = TRUE;
4228 result = KERN_SUCCESS;
4229 break;
4230 }
4231 vm_object_lock(src_object);
4232 /* fall thru when delayed copy not allowed */
4233
4234 case MEMORY_OBJECT_COPY_NONE:
4235 result = vm_object_copy_slowly(src_object, src_offset, size,
4236 interruptible, dst_object);
4237 if (result == KERN_SUCCESS) {
4238 *dst_offset = 0;
4239 *dst_needs_copy = FALSE;
4240 }
4241 break;
4242
4243 case MEMORY_OBJECT_COPY_CALL:
4244 result = vm_object_copy_call(src_object, src_offset, size,
4245 dst_object);
4246 if (result == KERN_SUCCESS) {
4247 *dst_offset = src_offset;
4248 *dst_needs_copy = TRUE;
4249 }
4250 break;
4251
4252 case MEMORY_OBJECT_COPY_SYMMETRIC:
4253 XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n", src_object, src_offset, size, 0, 0);
4254 vm_object_unlock(src_object);
4255 result = KERN_MEMORY_RESTART_COPY;
4256 break;
4257
4258 default:
4259 panic("copy_strategically: bad strategy");
4260 result = KERN_INVALID_ARGUMENT;
4261 }
4262 return(result);
4263 }
4264
4265 /*
4266 * vm_object_shadow:
4267 *
4268 * Create a new object which is backed by the
4269 * specified existing object range. The source
4270 * object reference is deallocated.
4271 *
4272 * The new object and offset into that object
4273 * are returned in the source parameters.
4274 */
4275 boolean_t vm_object_shadow_check = TRUE;
4276
4277 __private_extern__ boolean_t
4278 vm_object_shadow(
4279 vm_object_t *object, /* IN/OUT */
4280 vm_object_offset_t *offset, /* IN/OUT */
4281 vm_object_size_t length)
4282 {
4283 vm_object_t source;
4284 vm_object_t result;
4285
4286 source = *object;
4287 assert(source != VM_OBJECT_NULL);
4288 if (source == VM_OBJECT_NULL)
4289 return FALSE;
4290
4291 #if 0
4292 /*
4293 * XXX FBDP
4294 * This assertion is valid but it gets triggered by Rosetta for example
4295 * due to a combination of vm_remap() that changes a VM object's
4296 * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
4297 * that then sets "needs_copy" on its map entry. This creates a
4298 * mapping situation that VM should never see and doesn't know how to
4299 * handle.
4300 * It's not clear if this can create any real problem but we should
4301 * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
4302 * do more than just set "needs_copy" to handle the copy-on-write...
4303 * In the meantime, let's disable the assertion.
4304 */
4305 assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
4306 #endif
4307
4308 /*
4309 * Determine if we really need a shadow.
4310 *
4311 * If the source object is larger than what we are trying
4312 * to create, then force the shadow creation even if the
4313 * ref count is 1. This will allow us to [potentially]
4314 * collapse the underlying object away in the future
4315 * (freeing up the extra data it might contain and that
4316 * we don't need).
4317 */
4318
4319 assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
4320
4321 if (vm_object_shadow_check &&
4322 source->vo_size == length &&
4323 source->ref_count == 1 &&
4324 (source->shadow == VM_OBJECT_NULL ||
4325 source->shadow->copy == VM_OBJECT_NULL) )
4326 {
4327 /* lock the object and check again */
4328 vm_object_lock(source);
4329 if (source->vo_size == length &&
4330 source->ref_count == 1 &&
4331 (source->shadow == VM_OBJECT_NULL ||
4332 source->shadow->copy == VM_OBJECT_NULL))
4333 {
4334 source->shadowed = FALSE;
4335 vm_object_unlock(source);
4336 return FALSE;
4337 }
4338 /* things changed while we were locking "source"... */
4339 vm_object_unlock(source);
4340 }
4341
4342 /*
4343 * Allocate a new object with the given length
4344 */
4345
4346 if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
4347 panic("vm_object_shadow: no object for shadowing");
4348
4349 /*
4350 * The new object shadows the source object, adding
4351 * a reference to it. Our caller changes his reference
4352 * to point to the new object, removing a reference to
4353 * the source object. Net result: no change of reference
4354 * count.
4355 */
4356 result->shadow = source;
4357
4358 /*
4359 * Store the offset into the source object,
4360 * and fix up the offset into the new object.
4361 */
4362
4363 result->vo_shadow_offset = *offset;
4364
4365 /*
4366 * Return the new things
4367 */
4368
4369 *offset = 0;
4370 *object = result;
4371 return TRUE;
4372 }
4373
4374 /*
4375 * The relationship between vm_object structures and
4376 * the memory_object requires careful synchronization.
4377 *
4378 * All associations are created by memory_object_create_named
4379 * for external pagers and vm_object_compressor_pager_create for internal
4380 * objects as follows:
4381 *
4382 * pager: the memory_object itself, supplied by
4383 * the user requesting a mapping (or the kernel,
4384 * when initializing internal objects); the
4385 * kernel simulates holding send rights by keeping
4386 * a port reference;
4387 *
4388 * pager_request:
4389 * the memory object control port,
4390 * created by the kernel; the kernel holds
4391 * receive (and ownership) rights to this
4392 * port, but no other references.
4393 *
4394 * When initialization is complete, the "initialized" field
4395 * is asserted. Other mappings using a particular memory object,
4396 * and any references to the vm_object gained through the
4397 * port association must wait for this initialization to occur.
4398 *
4399 * In order to allow the memory manager to set attributes before
4400 * requests (notably virtual copy operations, but also data or
4401 * unlock requests) are made, a "ready" attribute is made available.
4402 * Only the memory manager may affect the value of this attribute.
4403 * Its value does not affect critical kernel functions, such as
4404 * internal object initialization or destruction. [Furthermore,
4405 * memory objects created by the kernel are assumed to be ready
4406 * immediately; the default memory manager need not explicitly
4407 * set the "ready" attribute.]
4408 *
4409 * [Both the "initialized" and "ready" attribute wait conditions
4410 * use the "pager" field as the wait event.]
4411 *
4412 * The port associations can be broken down by any of the
4413 * following routines:
4414 * vm_object_terminate:
4415 * No references to the vm_object remain, and
4416 * the object cannot (or will not) be cached.
4417 * This is the normal case, and is done even
4418 * though one of the other cases has already been
4419 * done.
4420 * memory_object_destroy:
4421 * The memory manager has requested that the
4422 * kernel relinquish references to the memory
4423 * object. [The memory manager may not want to
4424 * destroy the memory object, but may wish to
4425 * refuse or tear down existing memory mappings.]
4426 *
4427 * Each routine that breaks an association must break all of
4428 * them at once. At some later time, that routine must clear
4429 * the pager field and release the memory object references.
4430 * [Furthermore, each routine must cope with the simultaneous
4431 * or previous operations of the others.]
4432 *
4433 * In addition to the lock on the object, the vm_object_hash_lock
4434 * governs the associations. References gained through the
4435 * association require use of the hash lock.
4436 *
4437 * Because the pager field may be cleared spontaneously, it
4438 * cannot be used to determine whether a memory object has
4439 * ever been associated with a particular vm_object. [This
4440 * knowledge is important to the shadow object mechanism.]
4441 * For this reason, an additional "created" attribute is
4442 * provided.
4443 *
4444 * During various paging operations, the pager reference found in the
4445 * vm_object must be valid. To prevent this from being released,
4446 * (other than being removed, i.e., made null), routines may use
4447 * the vm_object_paging_begin/end routines [actually, macros].
4448 * The implementation uses the "paging_in_progress" and "wanted" fields.
4449 * [Operations that alter the validity of the pager values include the
4450 * termination routines and vm_object_collapse.]
4451 */
4452
4453
4454 /*
4455 * Routine: vm_object_enter
4456 * Purpose:
4457 * Find a VM object corresponding to the given
4458 * pager; if no such object exists, create one,
4459 * and initialize the pager.
4460 */
4461 vm_object_t
4462 vm_object_enter(
4463 memory_object_t pager,
4464 vm_object_size_t size,
4465 boolean_t internal,
4466 boolean_t init,
4467 boolean_t named)
4468 {
4469 vm_object_t object;
4470 vm_object_t new_object;
4471 boolean_t must_init;
4472 vm_object_hash_entry_t entry, new_entry;
4473 uint32_t try_failed_count = 0;
4474 lck_mtx_t *lck;
4475
4476 if (pager == MEMORY_OBJECT_NULL)
4477 return(vm_object_allocate(size));
4478
4479 new_object = VM_OBJECT_NULL;
4480 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
4481 must_init = init;
4482
4483 /*
4484 * Look for an object associated with this port.
4485 */
4486 Retry:
4487 lck = vm_object_hash_lock_spin(pager);
4488 do {
4489 entry = vm_object_hash_lookup(pager, FALSE);
4490
4491 if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
4492 if (new_object == VM_OBJECT_NULL) {
4493 /*
4494 * We must unlock to create a new object;
4495 * if we do so, we must try the lookup again.
4496 */
4497 vm_object_hash_unlock(lck);
4498 assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
4499 new_entry = vm_object_hash_entry_alloc(pager);
4500 new_object = vm_object_allocate(size);
4501 /*
4502 * Set new_object->hashed now, while noone
4503 * knows about this object yet and we
4504 * don't need to lock it. Once it's in
4505 * the hash table, we would have to lock
4506 * the object to set its "hashed" bit and
4507 * we can't lock the object while holding
4508 * the hash lock as a spinlock...
4509 */
4510 new_object->hashed = TRUE;
4511 lck = vm_object_hash_lock_spin(pager);
4512 } else {
4513 /*
4514 * Lookup failed twice, and we have something
4515 * to insert; set the object.
4516 */
4517 /*
4518 * We can't lock the object here since we're
4519 * holding the hash lock as a spin lock.
4520 * We've already pre-set "new_object->hashed"
4521 * when we created "new_object" above, so we
4522 * won't need to modify the object in
4523 * vm_object_hash_insert().
4524 */
4525 assert(new_object->hashed);
4526 vm_object_hash_insert(new_entry, new_object);
4527 entry = new_entry;
4528 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
4529 new_object = VM_OBJECT_NULL;
4530 must_init = TRUE;
4531 }
4532 } else if (entry->object == VM_OBJECT_NULL) {
4533 /*
4534 * If a previous object is being terminated,
4535 * we must wait for the termination message
4536 * to be queued (and lookup the entry again).
4537 */
4538 entry->waiting = TRUE;
4539 entry = VM_OBJECT_HASH_ENTRY_NULL;
4540 assert_wait((event_t) pager, THREAD_UNINT);
4541 vm_object_hash_unlock(lck);
4542
4543 thread_block(THREAD_CONTINUE_NULL);
4544 lck = vm_object_hash_lock_spin(pager);
4545 }
4546 } while (entry == VM_OBJECT_HASH_ENTRY_NULL);
4547
4548 object = entry->object;
4549 assert(object != VM_OBJECT_NULL);
4550
4551 if (!must_init) {
4552 if ( !vm_object_lock_try(object)) {
4553
4554 vm_object_hash_unlock(lck);
4555
4556 try_failed_count++;
4557 mutex_pause(try_failed_count); /* wait a bit */
4558 goto Retry;
4559 }
4560 assert(!internal || object->internal);
4561 #if VM_OBJECT_CACHE
4562 if (object->ref_count == 0) {
4563 if ( !vm_object_cache_lock_try()) {
4564
4565 vm_object_hash_unlock(lck);
4566 vm_object_unlock(object);
4567
4568 try_failed_count++;
4569 mutex_pause(try_failed_count); /* wait a bit */
4570 goto Retry;
4571 }
4572 XPR(XPR_VM_OBJECT_CACHE,
4573 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
4574 object,
4575 vm_object_cached_list.next,
4576 vm_object_cached_list.prev, 0,0);
4577 queue_remove(&vm_object_cached_list, object,
4578 vm_object_t, cached_list);
4579 vm_object_cached_count--;
4580
4581 vm_object_cache_unlock();
4582 }
4583 #endif
4584 if (named) {
4585 assert(!object->named);
4586 object->named = TRUE;
4587 }
4588 vm_object_lock_assert_exclusive(object);
4589 object->ref_count++;
4590 vm_object_res_reference(object);
4591
4592 vm_object_hash_unlock(lck);
4593 vm_object_unlock(object);
4594
4595 VM_STAT_INCR(hits);
4596 } else
4597 vm_object_hash_unlock(lck);
4598
4599 assert(object->ref_count > 0);
4600
4601 VM_STAT_INCR(lookups);
4602
4603 XPR(XPR_VM_OBJECT,
4604 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
4605 pager, object, must_init, 0, 0);
4606
4607 /*
4608 * If we raced to create a vm_object but lost, let's
4609 * throw away ours.
4610 */
4611
4612 if (new_object != VM_OBJECT_NULL) {
4613 /*
4614 * Undo the pre-setting of "new_object->hashed" before
4615 * deallocating "new_object", since we did not insert it
4616 * into the hash table after all.
4617 */
4618 assert(new_object->hashed);
4619 new_object->hashed = FALSE;
4620 vm_object_deallocate(new_object);
4621 }
4622
4623 if (new_entry != VM_OBJECT_HASH_ENTRY_NULL)
4624 vm_object_hash_entry_free(new_entry);
4625
4626 if (must_init) {
4627 memory_object_control_t control;
4628
4629 /*
4630 * Allocate request port.
4631 */
4632
4633 control = memory_object_control_allocate(object);
4634 assert (control != MEMORY_OBJECT_CONTROL_NULL);
4635
4636 vm_object_lock(object);
4637 assert(object != kernel_object);
4638
4639 /*
4640 * Copy the reference we were given.
4641 */
4642
4643 memory_object_reference(pager);
4644 object->pager_created = TRUE;
4645 object->pager = pager;
4646 object->internal = internal;
4647 object->pager_trusted = internal;
4648 if (!internal) {
4649 /* copy strategy invalid until set by memory manager */
4650 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
4651 }
4652 object->pager_control = control;
4653 object->pager_ready = FALSE;
4654
4655 vm_object_unlock(object);
4656
4657 /*
4658 * Let the pager know we're using it.
4659 */
4660
4661 (void) memory_object_init(pager,
4662 object->pager_control,
4663 PAGE_SIZE);
4664
4665 vm_object_lock(object);
4666 if (named)
4667 object->named = TRUE;
4668 if (internal) {
4669 vm_object_lock_assert_exclusive(object);
4670 object->pager_ready = TRUE;
4671 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
4672 }
4673
4674 object->pager_initialized = TRUE;
4675 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
4676 } else {
4677 vm_object_lock(object);
4678 }
4679
4680 /*
4681 * [At this point, the object must be locked]
4682 */
4683
4684 /*
4685 * Wait for the work above to be done by the first
4686 * thread to map this object.
4687 */
4688
4689 while (!object->pager_initialized) {
4690 vm_object_sleep(object,
4691 VM_OBJECT_EVENT_INITIALIZED,
4692 THREAD_UNINT);
4693 }
4694 vm_object_unlock(object);
4695
4696 XPR(XPR_VM_OBJECT,
4697 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
4698 object, object->pager, internal, 0,0);
4699 return(object);
4700 }
4701
4702 /*
4703 * Routine: vm_object_compressor_pager_create
4704 * Purpose:
4705 * Create a memory object for an internal object.
4706 * In/out conditions:
4707 * The object is locked on entry and exit;
4708 * it may be unlocked within this call.
4709 * Limitations:
4710 * Only one thread may be performing a
4711 * vm_object_compressor_pager_create on an object at
4712 * a time. Presumably, only the pageout
4713 * daemon will be using this routine.
4714 */
4715
4716 void
4717 vm_object_compressor_pager_create(
4718 vm_object_t object)
4719 {
4720 memory_object_t pager;
4721 vm_object_hash_entry_t entry;
4722 lck_mtx_t *lck;
4723 vm_object_t pager_object = VM_OBJECT_NULL;
4724
4725 assert(object != kernel_object);
4726
4727 /*
4728 * Prevent collapse or termination by holding a paging reference
4729 */
4730
4731 vm_object_paging_begin(object);
4732 if (object->pager_created) {
4733 /*
4734 * Someone else got to it first...
4735 * wait for them to finish initializing the ports
4736 */
4737 while (!object->pager_initialized) {
4738 vm_object_sleep(object,
4739 VM_OBJECT_EVENT_INITIALIZED,
4740 THREAD_UNINT);
4741 }
4742 vm_object_paging_end(object);
4743 return;
4744 }
4745
4746 /*
4747 * Indicate that a memory object has been assigned
4748 * before dropping the lock, to prevent a race.
4749 */
4750
4751 object->pager_created = TRUE;
4752 object->paging_offset = 0;
4753
4754 vm_object_unlock(object);
4755
4756 if ((uint32_t) (object->vo_size/PAGE_SIZE) !=
4757 (object->vo_size/PAGE_SIZE)) {
4758 panic("vm_object_compressor_pager_create(%p): "
4759 "object size 0x%llx >= 0x%llx\n",
4760 object,
4761 (uint64_t) object->vo_size,
4762 0x0FFFFFFFFULL*PAGE_SIZE);
4763 }
4764
4765 /*
4766 * Create the [internal] pager, and associate it with this object.
4767 *
4768 * We make the association here so that vm_object_enter()
4769 * can look up the object to complete initializing it. No
4770 * user will ever map this object.
4771 */
4772 {
4773 assert(object->temporary);
4774
4775 /* create our new memory object */
4776 assert((uint32_t) (object->vo_size/PAGE_SIZE) ==
4777 (object->vo_size/PAGE_SIZE));
4778 (void) compressor_memory_object_create(
4779 (memory_object_size_t) object->vo_size,
4780 &pager);
4781 if (pager == NULL) {
4782 panic("vm_object_compressor_pager_create(): "
4783 "no pager for object %p size 0x%llx\n",
4784 object, (uint64_t) object->vo_size);
4785 }
4786 }
4787
4788 entry = vm_object_hash_entry_alloc(pager);
4789
4790 vm_object_lock(object);
4791 lck = vm_object_hash_lock_spin(pager);
4792 vm_object_hash_insert(entry, object);
4793 vm_object_hash_unlock(lck);
4794 vm_object_unlock(object);
4795
4796 /*
4797 * A reference was returned by
4798 * memory_object_create(), and it is
4799 * copied by vm_object_enter().
4800 */
4801
4802 pager_object = vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE);
4803
4804 if (pager_object != object) {
4805 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager, pager_object, object, (uint64_t) object->vo_size);
4806 }
4807
4808 /*
4809 * Drop the reference we were passed.
4810 */
4811 memory_object_deallocate(pager);
4812
4813 vm_object_lock(object);
4814
4815 /*
4816 * Release the paging reference
4817 */
4818 vm_object_paging_end(object);
4819 }
4820
4821 /*
4822 * Routine: vm_object_remove
4823 * Purpose:
4824 * Eliminate the pager/object association
4825 * for this pager.
4826 * Conditions:
4827 * The object cache must be locked.
4828 */
4829 __private_extern__ void
4830 vm_object_remove(
4831 vm_object_t object)
4832 {
4833 memory_object_t pager;
4834
4835 if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
4836 vm_object_hash_entry_t entry;
4837
4838 entry = vm_object_hash_lookup(pager, FALSE);
4839 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
4840 entry->object = VM_OBJECT_NULL;
4841 }
4842
4843 }
4844
4845 /*
4846 * Global variables for vm_object_collapse():
4847 *
4848 * Counts for normal collapses and bypasses.
4849 * Debugging variables, to watch or disable collapse.
4850 */
4851 static long object_collapses = 0;
4852 static long object_bypasses = 0;
4853
4854 static boolean_t vm_object_collapse_allowed = TRUE;
4855 static boolean_t vm_object_bypass_allowed = TRUE;
4856
4857 unsigned long vm_object_collapse_encrypted = 0;
4858
4859 void vm_object_do_collapse_compressor(vm_object_t object,
4860 vm_object_t backing_object);
4861 void
4862 vm_object_do_collapse_compressor(
4863 vm_object_t object,
4864 vm_object_t backing_object)
4865 {
4866 vm_object_offset_t new_offset, backing_offset;
4867 vm_object_size_t size;
4868
4869 vm_counters.do_collapse_compressor++;
4870
4871 vm_object_lock_assert_exclusive(object);
4872 vm_object_lock_assert_exclusive(backing_object);
4873
4874 size = object->vo_size;
4875
4876 /*
4877 * Move all compressed pages from backing_object
4878 * to the parent.
4879 */
4880
4881 for (backing_offset = object->vo_shadow_offset;
4882 backing_offset < object->vo_shadow_offset + object->vo_size;
4883 backing_offset += PAGE_SIZE) {
4884 memory_object_offset_t backing_pager_offset;
4885
4886 /* find the next compressed page at or after this offset */
4887 backing_pager_offset = (backing_offset +
4888 backing_object->paging_offset);
4889 backing_pager_offset = vm_compressor_pager_next_compressed(
4890 backing_object->pager,
4891 backing_pager_offset);
4892 if (backing_pager_offset == (memory_object_offset_t) -1) {
4893 /* no more compressed pages */
4894 break;
4895 }
4896 backing_offset = (backing_pager_offset -
4897 backing_object->paging_offset);
4898
4899 new_offset = backing_offset - object->vo_shadow_offset;
4900
4901 if (new_offset >= object->vo_size) {
4902 /* we're out of the scope of "object": done */
4903 break;
4904 }
4905
4906 if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
4907 (vm_compressor_pager_state_get(object->pager,
4908 (new_offset +
4909 object->paging_offset)) ==
4910 VM_EXTERNAL_STATE_EXISTS)) {
4911 /*
4912 * This page already exists in object, resident or
4913 * compressed.
4914 * We don't need this compressed page in backing_object
4915 * and it will be reclaimed when we release
4916 * backing_object.
4917 */
4918 continue;
4919 }
4920
4921 /*
4922 * backing_object has this page in the VM compressor and
4923 * we need to transfer it to object.
4924 */
4925 vm_counters.do_collapse_compressor_pages++;
4926 vm_compressor_pager_transfer(
4927 /* destination: */
4928 object->pager,
4929 (new_offset + object->paging_offset),
4930 /* source: */
4931 backing_object->pager,
4932 (backing_offset + backing_object->paging_offset));
4933 }
4934 }
4935
4936 /*
4937 * Routine: vm_object_do_collapse
4938 * Purpose:
4939 * Collapse an object with the object backing it.
4940 * Pages in the backing object are moved into the
4941 * parent, and the backing object is deallocated.
4942 * Conditions:
4943 * Both objects and the cache are locked; the page
4944 * queues are unlocked.
4945 *
4946 */
4947 static void
4948 vm_object_do_collapse(
4949 vm_object_t object,
4950 vm_object_t backing_object)
4951 {
4952 vm_page_t p, pp;
4953 vm_object_offset_t new_offset, backing_offset;
4954 vm_object_size_t size;
4955
4956 vm_object_lock_assert_exclusive(object);
4957 vm_object_lock_assert_exclusive(backing_object);
4958
4959 assert(object->purgable == VM_PURGABLE_DENY);
4960 assert(backing_object->purgable == VM_PURGABLE_DENY);
4961
4962 backing_offset = object->vo_shadow_offset;
4963 size = object->vo_size;
4964
4965 /*
4966 * Move all in-memory pages from backing_object
4967 * to the parent. Pages that have been paged out
4968 * will be overwritten by any of the parent's
4969 * pages that shadow them.
4970 */
4971
4972 while (!vm_page_queue_empty(&backing_object->memq)) {
4973
4974 p = (vm_page_t) vm_page_queue_first(&backing_object->memq);
4975
4976 new_offset = (p->offset - backing_offset);
4977
4978 assert(!p->busy || p->absent);
4979
4980 /*
4981 * If the parent has a page here, or if
4982 * this page falls outside the parent,
4983 * dispose of it.
4984 *
4985 * Otherwise, move it as planned.
4986 */
4987
4988 if (p->offset < backing_offset || new_offset >= size) {
4989 VM_PAGE_FREE(p);
4990 } else {
4991 /*
4992 * ENCRYPTED SWAP:
4993 * The encryption key includes the "pager" and the
4994 * "paging_offset". These will not change during the
4995 * object collapse, so we can just move an encrypted
4996 * page from one object to the other in this case.
4997 * We can't decrypt the page here, since we can't drop
4998 * the object lock.
4999 */
5000 if (p->encrypted) {
5001 vm_object_collapse_encrypted++;
5002 }
5003 pp = vm_page_lookup(object, new_offset);
5004 if (pp == VM_PAGE_NULL) {
5005
5006 if (VM_COMPRESSOR_PAGER_STATE_GET(object,
5007 new_offset)
5008 == VM_EXTERNAL_STATE_EXISTS) {
5009 /*
5010 * Parent object has this page
5011 * in the VM compressor.
5012 * Throw away the backing
5013 * object's page.
5014 */
5015 VM_PAGE_FREE(p);
5016 } else {
5017 /*
5018 * Parent now has no page.
5019 * Move the backing object's page
5020 * up.
5021 */
5022 vm_page_rename(p, object, new_offset,
5023 TRUE);
5024 }
5025 } else {
5026 assert(! pp->absent);
5027
5028 /*
5029 * Parent object has a real page.
5030 * Throw away the backing object's
5031 * page.
5032 */
5033 VM_PAGE_FREE(p);
5034 }
5035 }
5036 }
5037
5038 if (vm_object_collapse_compressor_allowed &&
5039 object->pager != MEMORY_OBJECT_NULL &&
5040 backing_object->pager != MEMORY_OBJECT_NULL) {
5041
5042 /* move compressed pages from backing_object to object */
5043 vm_object_do_collapse_compressor(object, backing_object);
5044
5045 } else if (backing_object->pager != MEMORY_OBJECT_NULL) {
5046 vm_object_hash_entry_t entry;
5047
5048 assert((!object->pager_created &&
5049 (object->pager == MEMORY_OBJECT_NULL)) ||
5050 (!backing_object->pager_created &&
5051 (backing_object->pager == MEMORY_OBJECT_NULL)));
5052 /*
5053 * Move the pager from backing_object to object.
5054 *
5055 * XXX We're only using part of the paging space
5056 * for keeps now... we ought to discard the
5057 * unused portion.
5058 */
5059
5060 assert(!object->paging_in_progress);
5061 assert(!object->activity_in_progress);
5062 assert(!object->pager_created);
5063 assert(object->pager == NULL);
5064 object->pager = backing_object->pager;
5065
5066 if (backing_object->hashed) {
5067 lck_mtx_t *lck;
5068
5069 lck = vm_object_hash_lock_spin(backing_object->pager);
5070 entry = vm_object_hash_lookup(object->pager, FALSE);
5071 assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
5072 entry->object = object;
5073 vm_object_hash_unlock(lck);
5074
5075 object->hashed = TRUE;
5076 }
5077 object->pager_created = backing_object->pager_created;
5078 object->pager_control = backing_object->pager_control;
5079 object->pager_ready = backing_object->pager_ready;
5080 object->pager_initialized = backing_object->pager_initialized;
5081 object->paging_offset =
5082 backing_object->paging_offset + backing_offset;
5083 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
5084 memory_object_control_collapse(object->pager_control,
5085 object);
5086 }
5087 /* the backing_object has lost its pager: reset all fields */
5088 backing_object->pager_created = FALSE;
5089 backing_object->pager_control = NULL;
5090 backing_object->pager_ready = FALSE;
5091 backing_object->paging_offset = 0;
5092 backing_object->pager = NULL;
5093 }
5094 /*
5095 * Object now shadows whatever backing_object did.
5096 * Note that the reference to backing_object->shadow
5097 * moves from within backing_object to within object.
5098 */
5099
5100 assert(!object->phys_contiguous);
5101 assert(!backing_object->phys_contiguous);
5102 object->shadow = backing_object->shadow;
5103 if (object->shadow) {
5104 object->vo_shadow_offset += backing_object->vo_shadow_offset;
5105 /* "backing_object" gave its shadow to "object" */
5106 backing_object->shadow = VM_OBJECT_NULL;
5107 backing_object->vo_shadow_offset = 0;
5108 } else {
5109 /* no shadow, therefore no shadow offset... */
5110 object->vo_shadow_offset = 0;
5111 }
5112 assert((object->shadow == VM_OBJECT_NULL) ||
5113 (object->shadow->copy != backing_object));
5114
5115 /*
5116 * Discard backing_object.
5117 *
5118 * Since the backing object has no pages, no
5119 * pager left, and no object references within it,
5120 * all that is necessary is to dispose of it.
5121 */
5122 object_collapses++;
5123
5124 assert(backing_object->ref_count == 1);
5125 assert(backing_object->resident_page_count == 0);
5126 assert(backing_object->paging_in_progress == 0);
5127 assert(backing_object->activity_in_progress == 0);
5128 assert(backing_object->shadow == VM_OBJECT_NULL);
5129 assert(backing_object->vo_shadow_offset == 0);
5130
5131 if (backing_object->pager != MEMORY_OBJECT_NULL) {
5132 /* ... unless it has a pager; need to terminate pager too */
5133 vm_counters.do_collapse_terminate++;
5134 if (vm_object_terminate(backing_object) != KERN_SUCCESS) {
5135 vm_counters.do_collapse_terminate_failure++;
5136 }
5137 return;
5138 }
5139
5140 assert(backing_object->pager == NULL);
5141
5142 backing_object->alive = FALSE;
5143 vm_object_unlock(backing_object);
5144
5145 XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
5146 backing_object, 0,0,0,0);
5147
5148 #if VM_OBJECT_TRACKING
5149 if (vm_object_tracking_inited) {
5150 btlog_remove_entries_for_element(vm_object_tracking_btlog,
5151 backing_object);
5152 }
5153 #endif /* VM_OBJECT_TRACKING */
5154
5155 vm_object_lock_destroy(backing_object);
5156
5157 zfree(vm_object_zone, backing_object);
5158
5159 }
5160
5161 static void
5162 vm_object_do_bypass(
5163 vm_object_t object,
5164 vm_object_t backing_object)
5165 {
5166 /*
5167 * Make the parent shadow the next object
5168 * in the chain.
5169 */
5170
5171 vm_object_lock_assert_exclusive(object);
5172 vm_object_lock_assert_exclusive(backing_object);
5173
5174 #if TASK_SWAPPER
5175 /*
5176 * Do object reference in-line to
5177 * conditionally increment shadow's
5178 * residence count. If object is not
5179 * resident, leave residence count
5180 * on shadow alone.
5181 */
5182 if (backing_object->shadow != VM_OBJECT_NULL) {
5183 vm_object_lock(backing_object->shadow);
5184 vm_object_lock_assert_exclusive(backing_object->shadow);
5185 backing_object->shadow->ref_count++;
5186 if (object->res_count != 0)
5187 vm_object_res_reference(backing_object->shadow);
5188 vm_object_unlock(backing_object->shadow);
5189 }
5190 #else /* TASK_SWAPPER */
5191 vm_object_reference(backing_object->shadow);
5192 #endif /* TASK_SWAPPER */
5193
5194 assert(!object->phys_contiguous);
5195 assert(!backing_object->phys_contiguous);
5196 object->shadow = backing_object->shadow;
5197 if (object->shadow) {
5198 object->vo_shadow_offset += backing_object->vo_shadow_offset;
5199 } else {
5200 /* no shadow, therefore no shadow offset... */
5201 object->vo_shadow_offset = 0;
5202 }
5203
5204 /*
5205 * Backing object might have had a copy pointer
5206 * to us. If it did, clear it.
5207 */
5208 if (backing_object->copy == object) {
5209 backing_object->copy = VM_OBJECT_NULL;
5210 }
5211
5212 /*
5213 * Drop the reference count on backing_object.
5214 #if TASK_SWAPPER
5215 * Since its ref_count was at least 2, it
5216 * will not vanish; so we don't need to call
5217 * vm_object_deallocate.
5218 * [with a caveat for "named" objects]
5219 *
5220 * The res_count on the backing object is
5221 * conditionally decremented. It's possible
5222 * (via vm_pageout_scan) to get here with
5223 * a "swapped" object, which has a 0 res_count,
5224 * in which case, the backing object res_count
5225 * is already down by one.
5226 #else
5227 * Don't call vm_object_deallocate unless
5228 * ref_count drops to zero.
5229 *
5230 * The ref_count can drop to zero here if the
5231 * backing object could be bypassed but not
5232 * collapsed, such as when the backing object
5233 * is temporary and cachable.
5234 #endif
5235 */
5236 if (backing_object->ref_count > 2 ||
5237 (!backing_object->named && backing_object->ref_count > 1)) {
5238 vm_object_lock_assert_exclusive(backing_object);
5239 backing_object->ref_count--;
5240 #if TASK_SWAPPER
5241 if (object->res_count != 0)
5242 vm_object_res_deallocate(backing_object);
5243 assert(backing_object->ref_count > 0);
5244 #endif /* TASK_SWAPPER */
5245 vm_object_unlock(backing_object);
5246 } else {
5247
5248 /*
5249 * Drop locks so that we can deallocate
5250 * the backing object.
5251 */
5252
5253 #if TASK_SWAPPER
5254 if (object->res_count == 0) {
5255 /* XXX get a reference for the deallocate below */
5256 vm_object_res_reference(backing_object);
5257 }
5258 #endif /* TASK_SWAPPER */
5259 /*
5260 * vm_object_collapse (the caller of this function) is
5261 * now called from contexts that may not guarantee that a
5262 * valid reference is held on the object... w/o a valid
5263 * reference, it is unsafe and unwise (you will definitely
5264 * regret it) to unlock the object and then retake the lock
5265 * since the object may be terminated and recycled in between.
5266 * The "activity_in_progress" reference will keep the object
5267 * 'stable'.
5268 */
5269 vm_object_activity_begin(object);
5270 vm_object_unlock(object);
5271
5272 vm_object_unlock(backing_object);
5273 vm_object_deallocate(backing_object);
5274
5275 /*
5276 * Relock object. We don't have to reverify
5277 * its state since vm_object_collapse will
5278 * do that for us as it starts at the
5279 * top of its loop.
5280 */
5281
5282 vm_object_lock(object);
5283 vm_object_activity_end(object);
5284 }
5285
5286 object_bypasses++;
5287 }
5288
5289
5290 /*
5291 * vm_object_collapse:
5292 *
5293 * Perform an object collapse or an object bypass if appropriate.
5294 * The real work of collapsing and bypassing is performed in
5295 * the routines vm_object_do_collapse and vm_object_do_bypass.
5296 *
5297 * Requires that the object be locked and the page queues be unlocked.
5298 *
5299 */
5300 static unsigned long vm_object_collapse_calls = 0;
5301 static unsigned long vm_object_collapse_objects = 0;
5302 static unsigned long vm_object_collapse_do_collapse = 0;
5303 static unsigned long vm_object_collapse_do_bypass = 0;
5304
5305 __private_extern__ void
5306 vm_object_collapse(
5307 vm_object_t object,
5308 vm_object_offset_t hint_offset,
5309 boolean_t can_bypass)
5310 {
5311 vm_object_t backing_object;
5312 unsigned int rcount;
5313 unsigned int size;
5314 vm_object_t original_object;
5315 int object_lock_type;
5316 int backing_object_lock_type;
5317
5318 vm_object_collapse_calls++;
5319
5320 if (! vm_object_collapse_allowed &&
5321 ! (can_bypass && vm_object_bypass_allowed)) {
5322 return;
5323 }
5324
5325 XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
5326 object, 0,0,0,0);
5327
5328 if (object == VM_OBJECT_NULL)
5329 return;
5330
5331 original_object = object;
5332
5333 /*
5334 * The top object was locked "exclusive" by the caller.
5335 * In the first pass, to determine if we can collapse the shadow chain,
5336 * take a "shared" lock on the shadow objects. If we can collapse,
5337 * we'll have to go down the chain again with exclusive locks.
5338 */
5339 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5340 backing_object_lock_type = OBJECT_LOCK_SHARED;
5341
5342 retry:
5343 object = original_object;
5344 vm_object_lock_assert_exclusive(object);
5345
5346 while (TRUE) {
5347 vm_object_collapse_objects++;
5348 /*
5349 * Verify that the conditions are right for either
5350 * collapse or bypass:
5351 */
5352
5353 /*
5354 * There is a backing object, and
5355 */
5356
5357 backing_object = object->shadow;
5358 if (backing_object == VM_OBJECT_NULL) {
5359 if (object != original_object) {
5360 vm_object_unlock(object);
5361 }
5362 return;
5363 }
5364 if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
5365 vm_object_lock_shared(backing_object);
5366 } else {
5367 vm_object_lock(backing_object);
5368 }
5369
5370 /*
5371 * No pages in the object are currently
5372 * being paged out, and
5373 */
5374 if (object->paging_in_progress != 0 ||
5375 object->activity_in_progress != 0) {
5376 /* try and collapse the rest of the shadow chain */
5377 if (object != original_object) {
5378 vm_object_unlock(object);
5379 }
5380 object = backing_object;
5381 object_lock_type = backing_object_lock_type;
5382 continue;
5383 }
5384
5385 /*
5386 * ...
5387 * The backing object is not read_only,
5388 * and no pages in the backing object are
5389 * currently being paged out.
5390 * The backing object is internal.
5391 *
5392 */
5393
5394 if (!backing_object->internal ||
5395 backing_object->paging_in_progress != 0 ||
5396 backing_object->activity_in_progress != 0) {
5397 /* try and collapse the rest of the shadow chain */
5398 if (object != original_object) {
5399 vm_object_unlock(object);
5400 }
5401 object = backing_object;
5402 object_lock_type = backing_object_lock_type;
5403 continue;
5404 }
5405
5406 /*
5407 * Purgeable objects are not supposed to engage in
5408 * copy-on-write activities, so should not have
5409 * any shadow objects or be a shadow object to another
5410 * object.
5411 * Collapsing a purgeable object would require some
5412 * updates to the purgeable compressed ledgers.
5413 */
5414 if (object->purgable != VM_PURGABLE_DENY ||
5415 backing_object->purgable != VM_PURGABLE_DENY) {
5416 panic("vm_object_collapse() attempting to collapse "
5417 "purgeable object: %p(%d) %p(%d)\n",
5418 object, object->purgable,
5419 backing_object, backing_object->purgable);
5420 /* try and collapse the rest of the shadow chain */
5421 if (object != original_object) {
5422 vm_object_unlock(object);
5423 }
5424 object = backing_object;
5425 object_lock_type = backing_object_lock_type;
5426 continue;
5427 }
5428
5429 /*
5430 * The backing object can't be a copy-object:
5431 * the shadow_offset for the copy-object must stay
5432 * as 0. Furthermore (for the 'we have all the
5433 * pages' case), if we bypass backing_object and
5434 * just shadow the next object in the chain, old
5435 * pages from that object would then have to be copied
5436 * BOTH into the (former) backing_object and into the
5437 * parent object.
5438 */
5439 if (backing_object->shadow != VM_OBJECT_NULL &&
5440 backing_object->shadow->copy == backing_object) {
5441 /* try and collapse the rest of the shadow chain */
5442 if (object != original_object) {
5443 vm_object_unlock(object);
5444 }
5445 object = backing_object;
5446 object_lock_type = backing_object_lock_type;
5447 continue;
5448 }
5449
5450 /*
5451 * We can now try to either collapse the backing
5452 * object (if the parent is the only reference to
5453 * it) or (perhaps) remove the parent's reference
5454 * to it.
5455 *
5456 * If there is exactly one reference to the backing
5457 * object, we may be able to collapse it into the
5458 * parent.
5459 *
5460 * As long as one of the objects is still not known
5461 * to the pager, we can collapse them.
5462 */
5463 if (backing_object->ref_count == 1 &&
5464 (vm_object_collapse_compressor_allowed ||
5465 !object->pager_created
5466 || (!backing_object->pager_created)
5467 ) && vm_object_collapse_allowed) {
5468
5469 /*
5470 * We need the exclusive lock on the VM objects.
5471 */
5472 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5473 /*
5474 * We have an object and its shadow locked
5475 * "shared". We can't just upgrade the locks
5476 * to "exclusive", as some other thread might
5477 * also have these objects locked "shared" and
5478 * attempt to upgrade one or the other to
5479 * "exclusive". The upgrades would block
5480 * forever waiting for the other "shared" locks
5481 * to get released.
5482 * So we have to release the locks and go
5483 * down the shadow chain again (since it could
5484 * have changed) with "exclusive" locking.
5485 */
5486 vm_object_unlock(backing_object);
5487 if (object != original_object)
5488 vm_object_unlock(object);
5489 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5490 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5491 goto retry;
5492 }
5493
5494 XPR(XPR_VM_OBJECT,
5495 "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
5496 backing_object, object,
5497 backing_object->pager,
5498 backing_object->pager_control, 0);
5499
5500 /*
5501 * Collapse the object with its backing
5502 * object, and try again with the object's
5503 * new backing object.
5504 */
5505
5506 vm_object_do_collapse(object, backing_object);
5507 vm_object_collapse_do_collapse++;
5508 continue;
5509 }
5510
5511 /*
5512 * Collapsing the backing object was not possible
5513 * or permitted, so let's try bypassing it.
5514 */
5515
5516 if (! (can_bypass && vm_object_bypass_allowed)) {
5517 /* try and collapse the rest of the shadow chain */
5518 if (object != original_object) {
5519 vm_object_unlock(object);
5520 }
5521 object = backing_object;
5522 object_lock_type = backing_object_lock_type;
5523 continue;
5524 }
5525
5526
5527 /*
5528 * If the object doesn't have all its pages present,
5529 * we have to make sure no pages in the backing object
5530 * "show through" before bypassing it.
5531 */
5532 size = (unsigned int)atop(object->vo_size);
5533 rcount = object->resident_page_count;
5534
5535 if (rcount != size) {
5536 vm_object_offset_t offset;
5537 vm_object_offset_t backing_offset;
5538 unsigned int backing_rcount;
5539
5540 /*
5541 * If the backing object has a pager but no pagemap,
5542 * then we cannot bypass it, because we don't know
5543 * what pages it has.
5544 */
5545 if (backing_object->pager_created) {
5546 /* try and collapse the rest of the shadow chain */
5547 if (object != original_object) {
5548 vm_object_unlock(object);
5549 }
5550 object = backing_object;
5551 object_lock_type = backing_object_lock_type;
5552 continue;
5553 }
5554
5555 /*
5556 * If the object has a pager but no pagemap,
5557 * then we cannot bypass it, because we don't know
5558 * what pages it has.
5559 */
5560 if (object->pager_created) {
5561 /* try and collapse the rest of the shadow chain */
5562 if (object != original_object) {
5563 vm_object_unlock(object);
5564 }
5565 object = backing_object;
5566 object_lock_type = backing_object_lock_type;
5567 continue;
5568 }
5569
5570 backing_offset = object->vo_shadow_offset;
5571 backing_rcount = backing_object->resident_page_count;
5572
5573 if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
5574 /*
5575 * we have enough pages in the backing object to guarantee that
5576 * at least 1 of them must be 'uncovered' by a resident page
5577 * in the object we're evaluating, so move on and
5578 * try to collapse the rest of the shadow chain
5579 */
5580 if (object != original_object) {
5581 vm_object_unlock(object);
5582 }
5583 object = backing_object;
5584 object_lock_type = backing_object_lock_type;
5585 continue;
5586 }
5587
5588 /*
5589 * If all of the pages in the backing object are
5590 * shadowed by the parent object, the parent
5591 * object no longer has to shadow the backing
5592 * object; it can shadow the next one in the
5593 * chain.
5594 *
5595 * If the backing object has existence info,
5596 * we must check examine its existence info
5597 * as well.
5598 *
5599 */
5600
5601 #define EXISTS_IN_OBJECT(obj, off, rc) \
5602 ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
5603 == VM_EXTERNAL_STATE_EXISTS) || \
5604 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
5605
5606 /*
5607 * Check the hint location first
5608 * (since it is often the quickest way out of here).
5609 */
5610 if (object->cow_hint != ~(vm_offset_t)0)
5611 hint_offset = (vm_object_offset_t)object->cow_hint;
5612 else
5613 hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
5614 (hint_offset - 8 * PAGE_SIZE_64) : 0;
5615
5616 if (EXISTS_IN_OBJECT(backing_object, hint_offset +
5617 backing_offset, backing_rcount) &&
5618 !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
5619 /* dependency right at the hint */
5620 object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
5621 /* try and collapse the rest of the shadow chain */
5622 if (object != original_object) {
5623 vm_object_unlock(object);
5624 }
5625 object = backing_object;
5626 object_lock_type = backing_object_lock_type;
5627 continue;
5628 }
5629
5630 /*
5631 * If the object's window onto the backing_object
5632 * is large compared to the number of resident
5633 * pages in the backing object, it makes sense to
5634 * walk the backing_object's resident pages first.
5635 *
5636 * NOTE: Pages may be in both the existence map and/or
5637 * resident, so if we don't find a dependency while
5638 * walking the backing object's resident page list
5639 * directly, and there is an existence map, we'll have
5640 * to run the offset based 2nd pass. Because we may
5641 * have to run both passes, we need to be careful
5642 * not to decrement 'rcount' in the 1st pass
5643 */
5644 if (backing_rcount && backing_rcount < (size / 8)) {
5645 unsigned int rc = rcount;
5646 vm_page_t p;
5647
5648 backing_rcount = backing_object->resident_page_count;
5649 p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
5650 do {
5651 offset = (p->offset - backing_offset);
5652
5653 if (offset < object->vo_size &&
5654 offset != hint_offset &&
5655 !EXISTS_IN_OBJECT(object, offset, rc)) {
5656 /* found a dependency */
5657 object->cow_hint = (vm_offset_t) offset; /* atomic */
5658
5659 break;
5660 }
5661 p = (vm_page_t) vm_page_queue_next(&p->listq);
5662
5663 } while (--backing_rcount);
5664 if (backing_rcount != 0 ) {
5665 /* try and collapse the rest of the shadow chain */
5666 if (object != original_object) {
5667 vm_object_unlock(object);
5668 }
5669 object = backing_object;
5670 object_lock_type = backing_object_lock_type;
5671 continue;
5672 }
5673 }
5674
5675 /*
5676 * Walk through the offsets looking for pages in the
5677 * backing object that show through to the object.
5678 */
5679 if (backing_rcount) {
5680 offset = hint_offset;
5681
5682 while((offset =
5683 (offset + PAGE_SIZE_64 < object->vo_size) ?
5684 (offset + PAGE_SIZE_64) : 0) != hint_offset) {
5685
5686 if (EXISTS_IN_OBJECT(backing_object, offset +
5687 backing_offset, backing_rcount) &&
5688 !EXISTS_IN_OBJECT(object, offset, rcount)) {
5689 /* found a dependency */
5690 object->cow_hint = (vm_offset_t) offset; /* atomic */
5691 break;
5692 }
5693 }
5694 if (offset != hint_offset) {
5695 /* try and collapse the rest of the shadow chain */
5696 if (object != original_object) {
5697 vm_object_unlock(object);
5698 }
5699 object = backing_object;
5700 object_lock_type = backing_object_lock_type;
5701 continue;
5702 }
5703 }
5704 }
5705
5706 /*
5707 * We need "exclusive" locks on the 2 VM objects.
5708 */
5709 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5710 vm_object_unlock(backing_object);
5711 if (object != original_object)
5712 vm_object_unlock(object);
5713 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5714 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5715 goto retry;
5716 }
5717
5718 /* reset the offset hint for any objects deeper in the chain */
5719 object->cow_hint = (vm_offset_t)0;
5720
5721 /*
5722 * All interesting pages in the backing object
5723 * already live in the parent or its pager.
5724 * Thus we can bypass the backing object.
5725 */
5726
5727 vm_object_do_bypass(object, backing_object);
5728 vm_object_collapse_do_bypass++;
5729
5730 /*
5731 * Try again with this object's new backing object.
5732 */
5733
5734 continue;
5735 }
5736
5737 /* NOT REACHED */
5738 /*
5739 if (object != original_object) {
5740 vm_object_unlock(object);
5741 }
5742 */
5743 }
5744
5745 /*
5746 * Routine: vm_object_page_remove: [internal]
5747 * Purpose:
5748 * Removes all physical pages in the specified
5749 * object range from the object's list of pages.
5750 *
5751 * In/out conditions:
5752 * The object must be locked.
5753 * The object must not have paging_in_progress, usually
5754 * guaranteed by not having a pager.
5755 */
5756 unsigned int vm_object_page_remove_lookup = 0;
5757 unsigned int vm_object_page_remove_iterate = 0;
5758
5759 __private_extern__ void
5760 vm_object_page_remove(
5761 vm_object_t object,
5762 vm_object_offset_t start,
5763 vm_object_offset_t end)
5764 {
5765 vm_page_t p, next;
5766
5767 /*
5768 * One and two page removals are most popular.
5769 * The factor of 16 here is somewhat arbitrary.
5770 * It balances vm_object_lookup vs iteration.
5771 */
5772
5773 if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
5774 vm_object_page_remove_lookup++;
5775
5776 for (; start < end; start += PAGE_SIZE_64) {
5777 p = vm_page_lookup(object, start);
5778 if (p != VM_PAGE_NULL) {
5779 assert(!p->cleaning && !p->laundry);
5780 if (!p->fictitious && p->pmapped)
5781 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5782 VM_PAGE_FREE(p);
5783 }
5784 }
5785 } else {
5786 vm_object_page_remove_iterate++;
5787
5788 p = (vm_page_t) vm_page_queue_first(&object->memq);
5789 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
5790 next = (vm_page_t) vm_page_queue_next(&p->listq);
5791 if ((start <= p->offset) && (p->offset < end)) {
5792 assert(!p->cleaning && !p->laundry);
5793 if (!p->fictitious && p->pmapped)
5794 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5795 VM_PAGE_FREE(p);
5796 }
5797 p = next;
5798 }
5799 }
5800 }
5801
5802
5803 /*
5804 * Routine: vm_object_coalesce
5805 * Function: Coalesces two objects backing up adjoining
5806 * regions of memory into a single object.
5807 *
5808 * returns TRUE if objects were combined.
5809 *
5810 * NOTE: Only works at the moment if the second object is NULL -
5811 * if it's not, which object do we lock first?
5812 *
5813 * Parameters:
5814 * prev_object First object to coalesce
5815 * prev_offset Offset into prev_object
5816 * next_object Second object into coalesce
5817 * next_offset Offset into next_object
5818 *
5819 * prev_size Size of reference to prev_object
5820 * next_size Size of reference to next_object
5821 *
5822 * Conditions:
5823 * The object(s) must *not* be locked. The map must be locked
5824 * to preserve the reference to the object(s).
5825 */
5826 static int vm_object_coalesce_count = 0;
5827
5828 __private_extern__ boolean_t
5829 vm_object_coalesce(
5830 vm_object_t prev_object,
5831 vm_object_t next_object,
5832 vm_object_offset_t prev_offset,
5833 __unused vm_object_offset_t next_offset,
5834 vm_object_size_t prev_size,
5835 vm_object_size_t next_size)
5836 {
5837 vm_object_size_t newsize;
5838
5839 #ifdef lint
5840 next_offset++;
5841 #endif /* lint */
5842
5843 if (next_object != VM_OBJECT_NULL) {
5844 return(FALSE);
5845 }
5846
5847 if (prev_object == VM_OBJECT_NULL) {
5848 return(TRUE);
5849 }
5850
5851 XPR(XPR_VM_OBJECT,
5852 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
5853 prev_object, prev_offset, prev_size, next_size, 0);
5854
5855 vm_object_lock(prev_object);
5856
5857 /*
5858 * Try to collapse the object first
5859 */
5860 vm_object_collapse(prev_object, prev_offset, TRUE);
5861
5862 /*
5863 * Can't coalesce if pages not mapped to
5864 * prev_entry may be in use any way:
5865 * . more than one reference
5866 * . paged out
5867 * . shadows another object
5868 * . has a copy elsewhere
5869 * . is purgeable
5870 * . paging references (pages might be in page-list)
5871 */
5872
5873 if ((prev_object->ref_count > 1) ||
5874 prev_object->pager_created ||
5875 (prev_object->shadow != VM_OBJECT_NULL) ||
5876 (prev_object->copy != VM_OBJECT_NULL) ||
5877 (prev_object->true_share != FALSE) ||
5878 (prev_object->purgable != VM_PURGABLE_DENY) ||
5879 (prev_object->paging_in_progress != 0) ||
5880 (prev_object->activity_in_progress != 0)) {
5881 vm_object_unlock(prev_object);
5882 return(FALSE);
5883 }
5884
5885 vm_object_coalesce_count++;
5886
5887 /*
5888 * Remove any pages that may still be in the object from
5889 * a previous deallocation.
5890 */
5891 vm_object_page_remove(prev_object,
5892 prev_offset + prev_size,
5893 prev_offset + prev_size + next_size);
5894
5895 /*
5896 * Extend the object if necessary.
5897 */
5898 newsize = prev_offset + prev_size + next_size;
5899 if (newsize > prev_object->vo_size) {
5900 prev_object->vo_size = newsize;
5901 }
5902
5903 vm_object_unlock(prev_object);
5904 return(TRUE);
5905 }
5906
5907 kern_return_t
5908 vm_object_populate_with_private(
5909 vm_object_t object,
5910 vm_object_offset_t offset,
5911 ppnum_t phys_page,
5912 vm_size_t size)
5913 {
5914 ppnum_t base_page;
5915 vm_object_offset_t base_offset;
5916
5917
5918 if (!object->private)
5919 return KERN_FAILURE;
5920
5921 base_page = phys_page;
5922
5923 vm_object_lock(object);
5924
5925 if (!object->phys_contiguous) {
5926 vm_page_t m;
5927
5928 if ((base_offset = trunc_page_64(offset)) != offset) {
5929 vm_object_unlock(object);
5930 return KERN_FAILURE;
5931 }
5932 base_offset += object->paging_offset;
5933
5934 while (size) {
5935 m = vm_page_lookup(object, base_offset);
5936
5937 if (m != VM_PAGE_NULL) {
5938 if (m->fictitious) {
5939 if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) {
5940
5941 vm_page_lockspin_queues();
5942 m->private = TRUE;
5943 vm_page_unlock_queues();
5944
5945 m->fictitious = FALSE;
5946 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5947 }
5948 } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) {
5949
5950 if ( !m->private) {
5951 /*
5952 * we'd leak a real page... that can't be right
5953 */
5954 panic("vm_object_populate_with_private - %p not private", m);
5955 }
5956 if (m->pmapped) {
5957 /*
5958 * pmap call to clear old mapping
5959 */
5960 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
5961 }
5962 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5963 }
5964 if (m->encrypted) {
5965 /*
5966 * we should never see this on a ficticious or private page
5967 */
5968 panic("vm_object_populate_with_private - %p encrypted", m);
5969 }
5970
5971 } else {
5972 while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
5973 vm_page_more_fictitious();
5974
5975 /*
5976 * private normally requires lock_queues but since we
5977 * are initializing the page, its not necessary here
5978 */
5979 m->private = TRUE;
5980 m->fictitious = FALSE;
5981 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5982 m->unusual = TRUE;
5983 m->busy = FALSE;
5984
5985 vm_page_insert(m, object, base_offset);
5986 }
5987 base_page++; /* Go to the next physical page */
5988 base_offset += PAGE_SIZE;
5989 size -= PAGE_SIZE;
5990 }
5991 } else {
5992 /* NOTE: we should check the original settings here */
5993 /* if we have a size > zero a pmap call should be made */
5994 /* to disable the range */
5995
5996 /* pmap_? */
5997
5998 /* shadows on contiguous memory are not allowed */
5999 /* we therefore can use the offset field */
6000 object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
6001 object->vo_size = size;
6002 }
6003 vm_object_unlock(object);
6004
6005 return KERN_SUCCESS;
6006 }
6007
6008 /*
6009 * memory_object_free_from_cache:
6010 *
6011 * Walk the vm_object cache list, removing and freeing vm_objects
6012 * which are backed by the pager identified by the caller, (pager_ops).
6013 * Remove up to "count" objects, if there are that may available
6014 * in the cache.
6015 *
6016 * Walk the list at most once, return the number of vm_objects
6017 * actually freed.
6018 */
6019
6020 __private_extern__ kern_return_t
6021 memory_object_free_from_cache(
6022 __unused host_t host,
6023 __unused memory_object_pager_ops_t pager_ops,
6024 int *count)
6025 {
6026 #if VM_OBJECT_CACHE
6027 int object_released = 0;
6028
6029 vm_object_t object = VM_OBJECT_NULL;
6030 vm_object_t shadow;
6031
6032 /*
6033 if(host == HOST_NULL)
6034 return(KERN_INVALID_ARGUMENT);
6035 */
6036
6037 try_again:
6038 vm_object_cache_lock();
6039
6040 queue_iterate(&vm_object_cached_list, object,
6041 vm_object_t, cached_list) {
6042 if (object->pager &&
6043 (pager_ops == object->pager->mo_pager_ops)) {
6044 vm_object_lock(object);
6045 queue_remove(&vm_object_cached_list, object,
6046 vm_object_t, cached_list);
6047 vm_object_cached_count--;
6048
6049 vm_object_cache_unlock();
6050 /*
6051 * Since this object is in the cache, we know
6052 * that it is initialized and has only a pager's
6053 * (implicit) reference. Take a reference to avoid
6054 * recursive deallocations.
6055 */
6056
6057 assert(object->pager_initialized);
6058 assert(object->ref_count == 0);
6059 vm_object_lock_assert_exclusive(object);
6060 object->ref_count++;
6061
6062 /*
6063 * Terminate the object.
6064 * If the object had a shadow, we let
6065 * vm_object_deallocate deallocate it.
6066 * "pageout" objects have a shadow, but
6067 * maintain a "paging reference" rather
6068 * than a normal reference.
6069 * (We are careful here to limit recursion.)
6070 */
6071 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
6072
6073 if ((vm_object_terminate(object) == KERN_SUCCESS)
6074 && (shadow != VM_OBJECT_NULL)) {
6075 vm_object_deallocate(shadow);
6076 }
6077
6078 if(object_released++ == *count)
6079 return KERN_SUCCESS;
6080 goto try_again;
6081 }
6082 }
6083 vm_object_cache_unlock();
6084 *count = object_released;
6085 #else
6086 *count = 0;
6087 #endif
6088 return KERN_SUCCESS;
6089 }
6090
6091
6092
6093 kern_return_t
6094 memory_object_create_named(
6095 memory_object_t pager,
6096 memory_object_offset_t size,
6097 memory_object_control_t *control)
6098 {
6099 vm_object_t object;
6100 vm_object_hash_entry_t entry;
6101 lck_mtx_t *lck;
6102
6103 *control = MEMORY_OBJECT_CONTROL_NULL;
6104 if (pager == MEMORY_OBJECT_NULL)
6105 return KERN_INVALID_ARGUMENT;
6106
6107 lck = vm_object_hash_lock_spin(pager);
6108 entry = vm_object_hash_lookup(pager, FALSE);
6109
6110 if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
6111 (entry->object != VM_OBJECT_NULL)) {
6112 if (entry->object->named == TRUE)
6113 panic("memory_object_create_named: caller already holds the right"); }
6114 vm_object_hash_unlock(lck);
6115
6116 if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE)) == VM_OBJECT_NULL) {
6117 return(KERN_INVALID_OBJECT);
6118 }
6119
6120 /* wait for object (if any) to be ready */
6121 if (object != VM_OBJECT_NULL) {
6122 vm_object_lock(object);
6123 object->named = TRUE;
6124 while (!object->pager_ready) {
6125 vm_object_sleep(object,
6126 VM_OBJECT_EVENT_PAGER_READY,
6127 THREAD_UNINT);
6128 }
6129 *control = object->pager_control;
6130 vm_object_unlock(object);
6131 }
6132 return (KERN_SUCCESS);
6133 }
6134
6135
6136 /*
6137 * Routine: memory_object_recover_named [user interface]
6138 * Purpose:
6139 * Attempt to recover a named reference for a VM object.
6140 * VM will verify that the object has not already started
6141 * down the termination path, and if it has, will optionally
6142 * wait for that to finish.
6143 * Returns:
6144 * KERN_SUCCESS - we recovered a named reference on the object
6145 * KERN_FAILURE - we could not recover a reference (object dead)
6146 * KERN_INVALID_ARGUMENT - bad memory object control
6147 */
6148 kern_return_t
6149 memory_object_recover_named(
6150 memory_object_control_t control,
6151 boolean_t wait_on_terminating)
6152 {
6153 vm_object_t object;
6154
6155 object = memory_object_control_to_vm_object(control);
6156 if (object == VM_OBJECT_NULL) {
6157 return (KERN_INVALID_ARGUMENT);
6158 }
6159 restart:
6160 vm_object_lock(object);
6161
6162 if (object->terminating && wait_on_terminating) {
6163 vm_object_wait(object,
6164 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
6165 THREAD_UNINT);
6166 goto restart;
6167 }
6168
6169 if (!object->alive) {
6170 vm_object_unlock(object);
6171 return KERN_FAILURE;
6172 }
6173
6174 if (object->named == TRUE) {
6175 vm_object_unlock(object);
6176 return KERN_SUCCESS;
6177 }
6178 #if VM_OBJECT_CACHE
6179 if ((object->ref_count == 0) && (!object->terminating)) {
6180 if (!vm_object_cache_lock_try()) {
6181 vm_object_unlock(object);
6182 goto restart;
6183 }
6184 queue_remove(&vm_object_cached_list, object,
6185 vm_object_t, cached_list);
6186 vm_object_cached_count--;
6187 XPR(XPR_VM_OBJECT_CACHE,
6188 "memory_object_recover_named: removing %X, head (%X, %X)\n",
6189 object,
6190 vm_object_cached_list.next,
6191 vm_object_cached_list.prev, 0,0);
6192
6193 vm_object_cache_unlock();
6194 }
6195 #endif
6196 object->named = TRUE;
6197 vm_object_lock_assert_exclusive(object);
6198 object->ref_count++;
6199 vm_object_res_reference(object);
6200 while (!object->pager_ready) {
6201 vm_object_sleep(object,
6202 VM_OBJECT_EVENT_PAGER_READY,
6203 THREAD_UNINT);
6204 }
6205 vm_object_unlock(object);
6206 return (KERN_SUCCESS);
6207 }
6208
6209
6210 /*
6211 * vm_object_release_name:
6212 *
6213 * Enforces name semantic on memory_object reference count decrement
6214 * This routine should not be called unless the caller holds a name
6215 * reference gained through the memory_object_create_named.
6216 *
6217 * If the TERMINATE_IDLE flag is set, the call will return if the
6218 * reference count is not 1. i.e. idle with the only remaining reference
6219 * being the name.
6220 * If the decision is made to proceed the name field flag is set to
6221 * false and the reference count is decremented. If the RESPECT_CACHE
6222 * flag is set and the reference count has gone to zero, the
6223 * memory_object is checked to see if it is cacheable otherwise when
6224 * the reference count is zero, it is simply terminated.
6225 */
6226
6227 __private_extern__ kern_return_t
6228 vm_object_release_name(
6229 vm_object_t object,
6230 int flags)
6231 {
6232 vm_object_t shadow;
6233 boolean_t original_object = TRUE;
6234
6235 while (object != VM_OBJECT_NULL) {
6236
6237 vm_object_lock(object);
6238
6239 assert(object->alive);
6240 if (original_object)
6241 assert(object->named);
6242 assert(object->ref_count > 0);
6243
6244 /*
6245 * We have to wait for initialization before
6246 * destroying or caching the object.
6247 */
6248
6249 if (object->pager_created && !object->pager_initialized) {
6250 assert(!object->can_persist);
6251 vm_object_assert_wait(object,
6252 VM_OBJECT_EVENT_INITIALIZED,
6253 THREAD_UNINT);
6254 vm_object_unlock(object);
6255 thread_block(THREAD_CONTINUE_NULL);
6256 continue;
6257 }
6258
6259 if (((object->ref_count > 1)
6260 && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
6261 || (object->terminating)) {
6262 vm_object_unlock(object);
6263 return KERN_FAILURE;
6264 } else {
6265 if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
6266 vm_object_unlock(object);
6267 return KERN_SUCCESS;
6268 }
6269 }
6270
6271 if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
6272 (object->ref_count == 1)) {
6273 if (original_object)
6274 object->named = FALSE;
6275 vm_object_unlock(object);
6276 /* let vm_object_deallocate push this thing into */
6277 /* the cache, if that it is where it is bound */
6278 vm_object_deallocate(object);
6279 return KERN_SUCCESS;
6280 }
6281 VM_OBJ_RES_DECR(object);
6282 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
6283
6284 if (object->ref_count == 1) {
6285 if (vm_object_terminate(object) != KERN_SUCCESS) {
6286 if (original_object) {
6287 return KERN_FAILURE;
6288 } else {
6289 return KERN_SUCCESS;
6290 }
6291 }
6292 if (shadow != VM_OBJECT_NULL) {
6293 original_object = FALSE;
6294 object = shadow;
6295 continue;
6296 }
6297 return KERN_SUCCESS;
6298 } else {
6299 vm_object_lock_assert_exclusive(object);
6300 object->ref_count--;
6301 assert(object->ref_count > 0);
6302 if(original_object)
6303 object->named = FALSE;
6304 vm_object_unlock(object);
6305 return KERN_SUCCESS;
6306 }
6307 }
6308 /*NOTREACHED*/
6309 assert(0);
6310 return KERN_FAILURE;
6311 }
6312
6313
6314 __private_extern__ kern_return_t
6315 vm_object_lock_request(
6316 vm_object_t object,
6317 vm_object_offset_t offset,
6318 vm_object_size_t size,
6319 memory_object_return_t should_return,
6320 int flags,
6321 vm_prot_t prot)
6322 {
6323 __unused boolean_t should_flush;
6324
6325 should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
6326
6327 XPR(XPR_MEMORY_OBJECT,
6328 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
6329 object, offset, size,
6330 (((should_return&1)<<1)|should_flush), prot);
6331
6332 /*
6333 * Check for bogus arguments.
6334 */
6335 if (object == VM_OBJECT_NULL)
6336 return (KERN_INVALID_ARGUMENT);
6337
6338 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
6339 return (KERN_INVALID_ARGUMENT);
6340
6341 size = round_page_64(size);
6342
6343 /*
6344 * Lock the object, and acquire a paging reference to
6345 * prevent the memory_object reference from being released.
6346 */
6347 vm_object_lock(object);
6348 vm_object_paging_begin(object);
6349
6350 (void)vm_object_update(object,
6351 offset, size, NULL, NULL, should_return, flags, prot);
6352
6353 vm_object_paging_end(object);
6354 vm_object_unlock(object);
6355
6356 return (KERN_SUCCESS);
6357 }
6358
6359 /*
6360 * Empty a purgeable object by grabbing the physical pages assigned to it and
6361 * putting them on the free queue without writing them to backing store, etc.
6362 * When the pages are next touched they will be demand zero-fill pages. We
6363 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
6364 * skip referenced/dirty pages, pages on the active queue, etc. We're more
6365 * than happy to grab these since this is a purgeable object. We mark the
6366 * object as "empty" after reaping its pages.
6367 *
6368 * On entry the object must be locked and it must be
6369 * purgeable with no delayed copies pending.
6370 */
6371 void
6372 vm_object_purge(vm_object_t object, int flags)
6373 {
6374 unsigned int object_page_count = 0;
6375 unsigned int pgcount = 0;
6376 boolean_t skipped_object = FALSE;
6377
6378 vm_object_lock_assert_exclusive(object);
6379
6380 if (object->purgable == VM_PURGABLE_DENY)
6381 return;
6382
6383 assert(object->copy == VM_OBJECT_NULL);
6384 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
6385
6386 /*
6387 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
6388 * reaping its pages. We update vm_page_purgeable_count in bulk
6389 * and we don't want vm_page_remove() to update it again for each
6390 * page we reap later.
6391 *
6392 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
6393 * are all accounted for in the "volatile" ledgers, so this does not
6394 * make any difference.
6395 * If we transitioned directly from NONVOLATILE to EMPTY,
6396 * vm_page_purgeable_count must have been updated when the object
6397 * was dequeued from its volatile queue and the purgeable ledgers
6398 * must have also been updated accordingly at that time (in
6399 * vm_object_purgable_control()).
6400 */
6401 if (object->purgable == VM_PURGABLE_VOLATILE) {
6402 unsigned int delta;
6403 assert(object->resident_page_count >=
6404 object->wired_page_count);
6405 delta = (object->resident_page_count -
6406 object->wired_page_count);
6407 if (delta != 0) {
6408 assert(vm_page_purgeable_count >=
6409 delta);
6410 OSAddAtomic(-delta,
6411 (SInt32 *)&vm_page_purgeable_count);
6412 }
6413 if (object->wired_page_count != 0) {
6414 assert(vm_page_purgeable_wired_count >=
6415 object->wired_page_count);
6416 OSAddAtomic(-object->wired_page_count,
6417 (SInt32 *)&vm_page_purgeable_wired_count);
6418 }
6419 object->purgable = VM_PURGABLE_EMPTY;
6420 }
6421 assert(object->purgable == VM_PURGABLE_EMPTY);
6422
6423 object_page_count = object->resident_page_count;
6424
6425 vm_object_reap_pages(object, REAP_PURGEABLE);
6426
6427 if (object->pager != NULL) {
6428
6429 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
6430
6431 if (object->activity_in_progress == 0 &&
6432 object->paging_in_progress == 0) {
6433 /*
6434 * Also reap any memory coming from this object
6435 * in the VM compressor.
6436 *
6437 * There are no operations in progress on the VM object
6438 * and no operation can start while we're holding the
6439 * VM object lock, so it's safe to reap the compressed
6440 * pages and update the page counts.
6441 */
6442 pgcount = vm_compressor_pager_get_count(object->pager);
6443 if (pgcount) {
6444 pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
6445 vm_compressor_pager_count(object->pager,
6446 -pgcount,
6447 FALSE, /* shared */
6448 object);
6449 vm_purgeable_compressed_update(object,
6450 -pgcount);
6451 }
6452 if ( !(flags & C_DONT_BLOCK)) {
6453 assert(vm_compressor_pager_get_count(object->pager)
6454 == 0);
6455 }
6456 } else {
6457 /*
6458 * There's some kind of paging activity in progress
6459 * for this object, which could result in a page
6460 * being compressed or decompressed, possibly while
6461 * the VM object is not locked, so it could race
6462 * with us.
6463 *
6464 * We can't really synchronize this without possibly
6465 * causing a deadlock when the compressor needs to
6466 * allocate or free memory while compressing or
6467 * decompressing a page from a purgeable object
6468 * mapped in the kernel_map...
6469 *
6470 * So let's not attempt to purge the compressor
6471 * pager if there's any kind of operation in
6472 * progress on the VM object.
6473 */
6474 skipped_object = TRUE;
6475 }
6476 }
6477
6478 vm_object_lock_assert_exclusive(object);
6479
6480 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)),
6481 VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
6482 object_page_count,
6483 pgcount,
6484 skipped_object,
6485 0);
6486
6487 }
6488
6489
6490 /*
6491 * vm_object_purgeable_control() allows the caller to control and investigate the
6492 * state of a purgeable object. A purgeable object is created via a call to
6493 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
6494 * never be coalesced with any other object -- even other purgeable objects --
6495 * and will thus always remain a distinct object. A purgeable object has
6496 * special semantics when its reference count is exactly 1. If its reference
6497 * count is greater than 1, then a purgeable object will behave like a normal
6498 * object and attempts to use this interface will result in an error return
6499 * of KERN_INVALID_ARGUMENT.
6500 *
6501 * A purgeable object may be put into a "volatile" state which will make the
6502 * object's pages elligable for being reclaimed without paging to backing
6503 * store if the system runs low on memory. If the pages in a volatile
6504 * purgeable object are reclaimed, the purgeable object is said to have been
6505 * "emptied." When a purgeable object is emptied the system will reclaim as
6506 * many pages from the object as it can in a convenient manner (pages already
6507 * en route to backing store or busy for other reasons are left as is). When
6508 * a purgeable object is made volatile, its pages will generally be reclaimed
6509 * before other pages in the application's working set. This semantic is
6510 * generally used by applications which can recreate the data in the object
6511 * faster than it can be paged in. One such example might be media assets
6512 * which can be reread from a much faster RAID volume.
6513 *
6514 * A purgeable object may be designated as "non-volatile" which means it will
6515 * behave like all other objects in the system with pages being written to and
6516 * read from backing store as needed to satisfy system memory needs. If the
6517 * object was emptied before the object was made non-volatile, that fact will
6518 * be returned as the old state of the purgeable object (see
6519 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
6520 * were reclaimed as part of emptying the object will be refaulted in as
6521 * zero-fill on demand. It is up to the application to note that an object
6522 * was emptied and recreate the objects contents if necessary. When a
6523 * purgeable object is made non-volatile, its pages will generally not be paged
6524 * out to backing store in the immediate future. A purgeable object may also
6525 * be manually emptied.
6526 *
6527 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
6528 * volatile purgeable object may be queried at any time. This information may
6529 * be used as a control input to let the application know when the system is
6530 * experiencing memory pressure and is reclaiming memory.
6531 *
6532 * The specified address may be any address within the purgeable object. If
6533 * the specified address does not represent any object in the target task's
6534 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
6535 * object containing the specified address is not a purgeable object, then
6536 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
6537 * returned.
6538 *
6539 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
6540 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
6541 * state is used to set the new state of the purgeable object and return its
6542 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
6543 * object is returned in the parameter state.
6544 *
6545 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
6546 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
6547 * the non-volatile, volatile and volatile/empty states described above.
6548 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
6549 * immediately reclaim as many pages in the object as can be conveniently
6550 * collected (some may have already been written to backing store or be
6551 * otherwise busy).
6552 *
6553 * The process of making a purgeable object non-volatile and determining its
6554 * previous state is atomic. Thus, if a purgeable object is made
6555 * VM_PURGABLE_NONVOLATILE and the old state is returned as
6556 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
6557 * completely intact and will remain so until the object is made volatile
6558 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
6559 * was reclaimed while it was in a volatile state and its previous contents
6560 * have been lost.
6561 */
6562 /*
6563 * The object must be locked.
6564 */
6565 kern_return_t
6566 vm_object_purgable_control(
6567 vm_object_t object,
6568 vm_purgable_t control,
6569 int *state)
6570 {
6571 int old_state;
6572 int new_state;
6573
6574 if (object == VM_OBJECT_NULL) {
6575 /*
6576 * Object must already be present or it can't be purgeable.
6577 */
6578 return KERN_INVALID_ARGUMENT;
6579 }
6580
6581 vm_object_lock_assert_exclusive(object);
6582
6583 /*
6584 * Get current state of the purgeable object.
6585 */
6586 old_state = object->purgable;
6587 if (old_state == VM_PURGABLE_DENY)
6588 return KERN_INVALID_ARGUMENT;
6589
6590 /* purgeable cant have delayed copies - now or in the future */
6591 assert(object->copy == VM_OBJECT_NULL);
6592 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
6593
6594 /*
6595 * Execute the desired operation.
6596 */
6597 if (control == VM_PURGABLE_GET_STATE) {
6598 *state = old_state;
6599 return KERN_SUCCESS;
6600 }
6601
6602 if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
6603 object->volatile_empty = TRUE;
6604 }
6605 if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
6606 object->volatile_fault = TRUE;
6607 }
6608
6609 new_state = *state & VM_PURGABLE_STATE_MASK;
6610 if (new_state == VM_PURGABLE_VOLATILE &&
6611 object->volatile_empty) {
6612 new_state = VM_PURGABLE_EMPTY;
6613 }
6614
6615 switch (new_state) {
6616 case VM_PURGABLE_DENY:
6617 case VM_PURGABLE_NONVOLATILE:
6618 object->purgable = new_state;
6619
6620 if (old_state == VM_PURGABLE_VOLATILE) {
6621 unsigned int delta;
6622
6623 assert(object->resident_page_count >=
6624 object->wired_page_count);
6625 delta = (object->resident_page_count -
6626 object->wired_page_count);
6627
6628 assert(vm_page_purgeable_count >= delta);
6629
6630 if (delta != 0) {
6631 OSAddAtomic(-delta,
6632 (SInt32 *)&vm_page_purgeable_count);
6633 }
6634 if (object->wired_page_count != 0) {
6635 assert(vm_page_purgeable_wired_count >=
6636 object->wired_page_count);
6637 OSAddAtomic(-object->wired_page_count,
6638 (SInt32 *)&vm_page_purgeable_wired_count);
6639 }
6640
6641 vm_page_lock_queues();
6642
6643 /* object should be on a queue */
6644 assert(object->objq.next != NULL &&
6645 object->objq.prev != NULL);
6646 purgeable_q_t queue;
6647
6648 /*
6649 * Move object from its volatile queue to the
6650 * non-volatile queue...
6651 */
6652 queue = vm_purgeable_object_remove(object);
6653 assert(queue);
6654
6655 if (object->purgeable_when_ripe) {
6656 vm_purgeable_token_delete_last(queue);
6657 }
6658 assert(queue->debug_count_objects>=0);
6659
6660 vm_page_unlock_queues();
6661 }
6662 if (old_state == VM_PURGABLE_VOLATILE ||
6663 old_state == VM_PURGABLE_EMPTY) {
6664 /*
6665 * Transfer the object's pages from the volatile to
6666 * non-volatile ledgers.
6667 */
6668 vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE,
6669 FALSE);
6670 }
6671
6672 break;
6673
6674 case VM_PURGABLE_VOLATILE:
6675 if (object->volatile_fault) {
6676 vm_page_t p;
6677 int refmod;
6678
6679 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
6680 if (p->busy ||
6681 VM_PAGE_WIRED(p) ||
6682 p->fictitious) {
6683 continue;
6684 }
6685 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
6686 if ((refmod & VM_MEM_MODIFIED) &&
6687 !p->dirty) {
6688 SET_PAGE_DIRTY(p, FALSE);
6689 }
6690 }
6691 }
6692
6693 if (old_state == VM_PURGABLE_EMPTY &&
6694 object->resident_page_count == 0 &&
6695 object->pager == NULL)
6696 break;
6697
6698 purgeable_q_t queue;
6699
6700 /* find the correct queue */
6701 if ((*state&VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE)
6702 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
6703 else {
6704 if ((*state&VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO)
6705 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
6706 else
6707 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
6708 }
6709
6710 if (old_state == VM_PURGABLE_NONVOLATILE ||
6711 old_state == VM_PURGABLE_EMPTY) {
6712 unsigned int delta;
6713
6714 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6715 VM_PURGABLE_NO_AGING) {
6716 object->purgeable_when_ripe = FALSE;
6717 } else {
6718 object->purgeable_when_ripe = TRUE;
6719 }
6720
6721 if (object->purgeable_when_ripe) {
6722 kern_return_t result;
6723
6724 /* try to add token... this can fail */
6725 vm_page_lock_queues();
6726
6727 result = vm_purgeable_token_add(queue);
6728 if (result != KERN_SUCCESS) {
6729 vm_page_unlock_queues();
6730 return result;
6731 }
6732 vm_page_unlock_queues();
6733 }
6734
6735 assert(object->resident_page_count >=
6736 object->wired_page_count);
6737 delta = (object->resident_page_count -
6738 object->wired_page_count);
6739
6740 if (delta != 0) {
6741 OSAddAtomic(delta,
6742 &vm_page_purgeable_count);
6743 }
6744 if (object->wired_page_count != 0) {
6745 OSAddAtomic(object->wired_page_count,
6746 &vm_page_purgeable_wired_count);
6747 }
6748
6749 object->purgable = new_state;
6750
6751 /* object should be on "non-volatile" queue */
6752 assert(object->objq.next != NULL);
6753 assert(object->objq.prev != NULL);
6754 }
6755 else if (old_state == VM_PURGABLE_VOLATILE) {
6756 purgeable_q_t old_queue;
6757 boolean_t purgeable_when_ripe;
6758
6759 /*
6760 * if reassigning priorities / purgeable groups, we don't change the
6761 * token queue. So moving priorities will not make pages stay around longer.
6762 * Reasoning is that the algorithm gives most priority to the most important
6763 * object. If a new token is added, the most important object' priority is boosted.
6764 * This biases the system already for purgeable queues that move a lot.
6765 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
6766 */
6767 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
6768
6769 old_queue = vm_purgeable_object_remove(object);
6770 assert(old_queue);
6771
6772 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6773 VM_PURGABLE_NO_AGING) {
6774 purgeable_when_ripe = FALSE;
6775 } else {
6776 purgeable_when_ripe = TRUE;
6777 }
6778
6779 if (old_queue != queue ||
6780 (purgeable_when_ripe !=
6781 object->purgeable_when_ripe)) {
6782 kern_return_t result;
6783
6784 /* Changing queue. Have to move token. */
6785 vm_page_lock_queues();
6786 if (object->purgeable_when_ripe) {
6787 vm_purgeable_token_delete_last(old_queue);
6788 }
6789 object->purgeable_when_ripe = purgeable_when_ripe;
6790 if (object->purgeable_when_ripe) {
6791 result = vm_purgeable_token_add(queue);
6792 assert(result==KERN_SUCCESS); /* this should never fail since we just freed a token */
6793 }
6794 vm_page_unlock_queues();
6795
6796 }
6797 };
6798 vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
6799 if (old_state == VM_PURGABLE_NONVOLATILE) {
6800 vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
6801 FALSE);
6802 }
6803
6804 assert(queue->debug_count_objects>=0);
6805
6806 break;
6807
6808
6809 case VM_PURGABLE_EMPTY:
6810 if (object->volatile_fault) {
6811 vm_page_t p;
6812 int refmod;
6813
6814 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
6815 if (p->busy ||
6816 VM_PAGE_WIRED(p) ||
6817 p->fictitious) {
6818 continue;
6819 }
6820 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
6821 if ((refmod & VM_MEM_MODIFIED) &&
6822 !p->dirty) {
6823 SET_PAGE_DIRTY(p, FALSE);
6824 }
6825 }
6826 }
6827
6828 if (old_state == new_state) {
6829 /* nothing changes */
6830 break;
6831 }
6832
6833 assert(old_state == VM_PURGABLE_NONVOLATILE ||
6834 old_state == VM_PURGABLE_VOLATILE);
6835 if (old_state == VM_PURGABLE_VOLATILE) {
6836 purgeable_q_t old_queue;
6837
6838 /* object should be on a queue */
6839 assert(object->objq.next != NULL &&
6840 object->objq.prev != NULL);
6841
6842 old_queue = vm_purgeable_object_remove(object);
6843 assert(old_queue);
6844 if (object->purgeable_when_ripe) {
6845 vm_page_lock_queues();
6846 vm_purgeable_token_delete_first(old_queue);
6847 vm_page_unlock_queues();
6848 }
6849 }
6850
6851 if (old_state == VM_PURGABLE_NONVOLATILE) {
6852 /*
6853 * This object's pages were previously accounted as
6854 * "non-volatile" and now need to be accounted as
6855 * "volatile".
6856 */
6857 vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
6858 FALSE);
6859 /*
6860 * Set to VM_PURGABLE_EMPTY because the pages are no
6861 * longer accounted in the "non-volatile" ledger
6862 * and are also not accounted for in
6863 * "vm_page_purgeable_count".
6864 */
6865 object->purgable = VM_PURGABLE_EMPTY;
6866 }
6867
6868 (void) vm_object_purge(object, 0);
6869 assert(object->purgable == VM_PURGABLE_EMPTY);
6870
6871 break;
6872 }
6873
6874 *state = old_state;
6875
6876 vm_object_lock_assert_exclusive(object);
6877
6878 return KERN_SUCCESS;
6879 }
6880
6881 kern_return_t
6882 vm_object_get_page_counts(
6883 vm_object_t object,
6884 vm_object_offset_t offset,
6885 vm_object_size_t size,
6886 unsigned int *resident_page_count,
6887 unsigned int *dirty_page_count)
6888 {
6889
6890 kern_return_t kr = KERN_SUCCESS;
6891 boolean_t count_dirty_pages = FALSE;
6892 vm_page_t p = VM_PAGE_NULL;
6893 unsigned int local_resident_count = 0;
6894 unsigned int local_dirty_count = 0;
6895 vm_object_offset_t cur_offset = 0;
6896 vm_object_offset_t end_offset = 0;
6897
6898 if (object == VM_OBJECT_NULL)
6899 return KERN_INVALID_ARGUMENT;
6900
6901
6902 cur_offset = offset;
6903
6904 end_offset = offset + size;
6905
6906 vm_object_lock_assert_exclusive(object);
6907
6908 if (dirty_page_count != NULL) {
6909
6910 count_dirty_pages = TRUE;
6911 }
6912
6913 if (resident_page_count != NULL && count_dirty_pages == FALSE) {
6914 /*
6915 * Fast path when:
6916 * - we only want the resident page count, and,
6917 * - the entire object is exactly covered by the request.
6918 */
6919 if (offset == 0 && (object->vo_size == size)) {
6920
6921 *resident_page_count = object->resident_page_count;
6922 goto out;
6923 }
6924 }
6925
6926 if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
6927
6928 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
6929
6930 if (p->offset >= cur_offset && p->offset < end_offset) {
6931
6932 local_resident_count++;
6933
6934 if (count_dirty_pages) {
6935
6936 if (p->dirty || (p->wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6937
6938 local_dirty_count++;
6939 }
6940 }
6941 }
6942 }
6943 } else {
6944
6945 for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
6946
6947 p = vm_page_lookup(object, cur_offset);
6948
6949 if (p != VM_PAGE_NULL) {
6950
6951 local_resident_count++;
6952
6953 if (count_dirty_pages) {
6954
6955 if (p->dirty || (p->wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6956
6957 local_dirty_count++;
6958 }
6959 }
6960 }
6961 }
6962
6963 }
6964
6965 if (resident_page_count != NULL) {
6966 *resident_page_count = local_resident_count;
6967 }
6968
6969 if (dirty_page_count != NULL) {
6970 *dirty_page_count = local_dirty_count;
6971 }
6972
6973 out:
6974 return kr;
6975 }
6976
6977
6978 #if TASK_SWAPPER
6979 /*
6980 * vm_object_res_deallocate
6981 *
6982 * (recursively) decrement residence counts on vm objects and their shadows.
6983 * Called from vm_object_deallocate and when swapping out an object.
6984 *
6985 * The object is locked, and remains locked throughout the function,
6986 * even as we iterate down the shadow chain. Locks on intermediate objects
6987 * will be dropped, but not the original object.
6988 *
6989 * NOTE: this function used to use recursion, rather than iteration.
6990 */
6991
6992 __private_extern__ void
6993 vm_object_res_deallocate(
6994 vm_object_t object)
6995 {
6996 vm_object_t orig_object = object;
6997 /*
6998 * Object is locked so it can be called directly
6999 * from vm_object_deallocate. Original object is never
7000 * unlocked.
7001 */
7002 assert(object->res_count > 0);
7003 while (--object->res_count == 0) {
7004 assert(object->ref_count >= object->res_count);
7005 vm_object_deactivate_all_pages(object);
7006 /* iterate on shadow, if present */
7007 if (object->shadow != VM_OBJECT_NULL) {
7008 vm_object_t tmp_object = object->shadow;
7009 vm_object_lock(tmp_object);
7010 if (object != orig_object)
7011 vm_object_unlock(object);
7012 object = tmp_object;
7013 assert(object->res_count > 0);
7014 } else
7015 break;
7016 }
7017 if (object != orig_object)
7018 vm_object_unlock(object);
7019 }
7020
7021 /*
7022 * vm_object_res_reference
7023 *
7024 * Internal function to increment residence count on a vm object
7025 * and its shadows. It is called only from vm_object_reference, and
7026 * when swapping in a vm object, via vm_map_swap.
7027 *
7028 * The object is locked, and remains locked throughout the function,
7029 * even as we iterate down the shadow chain. Locks on intermediate objects
7030 * will be dropped, but not the original object.
7031 *
7032 * NOTE: this function used to use recursion, rather than iteration.
7033 */
7034
7035 __private_extern__ void
7036 vm_object_res_reference(
7037 vm_object_t object)
7038 {
7039 vm_object_t orig_object = object;
7040 /*
7041 * Object is locked, so this can be called directly
7042 * from vm_object_reference. This lock is never released.
7043 */
7044 while ((++object->res_count == 1) &&
7045 (object->shadow != VM_OBJECT_NULL)) {
7046 vm_object_t tmp_object = object->shadow;
7047
7048 assert(object->ref_count >= object->res_count);
7049 vm_object_lock(tmp_object);
7050 if (object != orig_object)
7051 vm_object_unlock(object);
7052 object = tmp_object;
7053 }
7054 if (object != orig_object)
7055 vm_object_unlock(object);
7056 assert(orig_object->ref_count >= orig_object->res_count);
7057 }
7058 #endif /* TASK_SWAPPER */
7059
7060 /*
7061 * vm_object_reference:
7062 *
7063 * Gets another reference to the given object.
7064 */
7065 #ifdef vm_object_reference
7066 #undef vm_object_reference
7067 #endif
7068 __private_extern__ void
7069 vm_object_reference(
7070 vm_object_t object)
7071 {
7072 if (object == VM_OBJECT_NULL)
7073 return;
7074
7075 vm_object_lock(object);
7076 assert(object->ref_count > 0);
7077 vm_object_reference_locked(object);
7078 vm_object_unlock(object);
7079 }
7080
7081 #ifdef MACH_BSD
7082 /*
7083 * Scale the vm_object_cache
7084 * This is required to make sure that the vm_object_cache is big
7085 * enough to effectively cache the mapped file.
7086 * This is really important with UBC as all the regular file vnodes
7087 * have memory object associated with them. Havving this cache too
7088 * small results in rapid reclaim of vnodes and hurts performance a LOT!
7089 *
7090 * This is also needed as number of vnodes can be dynamically scaled.
7091 */
7092 kern_return_t
7093 adjust_vm_object_cache(
7094 __unused vm_size_t oval,
7095 __unused vm_size_t nval)
7096 {
7097 #if VM_OBJECT_CACHE
7098 vm_object_cached_max = nval;
7099 vm_object_cache_trim(FALSE);
7100 #endif
7101 return (KERN_SUCCESS);
7102 }
7103 #endif /* MACH_BSD */
7104
7105
7106 /*
7107 * vm_object_transpose
7108 *
7109 * This routine takes two VM objects of the same size and exchanges
7110 * their backing store.
7111 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
7112 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
7113 *
7114 * The VM objects must not be locked by caller.
7115 */
7116 unsigned int vm_object_transpose_count = 0;
7117 kern_return_t
7118 vm_object_transpose(
7119 vm_object_t object1,
7120 vm_object_t object2,
7121 vm_object_size_t transpose_size)
7122 {
7123 vm_object_t tmp_object;
7124 kern_return_t retval;
7125 boolean_t object1_locked, object2_locked;
7126 vm_page_t page;
7127 vm_object_offset_t page_offset;
7128 lck_mtx_t *hash_lck;
7129 vm_object_hash_entry_t hash_entry;
7130
7131 tmp_object = VM_OBJECT_NULL;
7132 object1_locked = FALSE; object2_locked = FALSE;
7133
7134 if (object1 == object2 ||
7135 object1 == VM_OBJECT_NULL ||
7136 object2 == VM_OBJECT_NULL) {
7137 /*
7138 * If the 2 VM objects are the same, there's
7139 * no point in exchanging their backing store.
7140 */
7141 retval = KERN_INVALID_VALUE;
7142 goto done;
7143 }
7144
7145 /*
7146 * Since we need to lock both objects at the same time,
7147 * make sure we always lock them in the same order to
7148 * avoid deadlocks.
7149 */
7150 if (object1 > object2) {
7151 tmp_object = object1;
7152 object1 = object2;
7153 object2 = tmp_object;
7154 }
7155
7156 /*
7157 * Allocate a temporary VM object to hold object1's contents
7158 * while we copy object2 to object1.
7159 */
7160 tmp_object = vm_object_allocate(transpose_size);
7161 vm_object_lock(tmp_object);
7162 tmp_object->can_persist = FALSE;
7163
7164
7165 /*
7166 * Grab control of the 1st VM object.
7167 */
7168 vm_object_lock(object1);
7169 object1_locked = TRUE;
7170 if (!object1->alive || object1->terminating ||
7171 object1->copy || object1->shadow || object1->shadowed ||
7172 object1->purgable != VM_PURGABLE_DENY) {
7173 /*
7174 * We don't deal with copy or shadow objects (yet).
7175 */
7176 retval = KERN_INVALID_VALUE;
7177 goto done;
7178 }
7179 /*
7180 * We're about to mess with the object's backing store and
7181 * taking a "paging_in_progress" reference wouldn't be enough
7182 * to prevent any paging activity on this object, so the caller should
7183 * have "quiesced" the objects beforehand, via a UPL operation with
7184 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
7185 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
7186 *
7187 * Wait for any paging operation to complete (but only paging, not
7188 * other kind of activities not linked to the pager). After we're
7189 * statisfied that there's no more paging in progress, we keep the
7190 * object locked, to guarantee that no one tries to access its pager.
7191 */
7192 vm_object_paging_only_wait(object1, THREAD_UNINT);
7193
7194 /*
7195 * Same as above for the 2nd object...
7196 */
7197 vm_object_lock(object2);
7198 object2_locked = TRUE;
7199 if (! object2->alive || object2->terminating ||
7200 object2->copy || object2->shadow || object2->shadowed ||
7201 object2->purgable != VM_PURGABLE_DENY) {
7202 retval = KERN_INVALID_VALUE;
7203 goto done;
7204 }
7205 vm_object_paging_only_wait(object2, THREAD_UNINT);
7206
7207
7208 if (object1->vo_size != object2->vo_size ||
7209 object1->vo_size != transpose_size) {
7210 /*
7211 * If the 2 objects don't have the same size, we can't
7212 * exchange their backing stores or one would overflow.
7213 * If their size doesn't match the caller's
7214 * "transpose_size", we can't do it either because the
7215 * transpose operation will affect the entire span of
7216 * the objects.
7217 */
7218 retval = KERN_INVALID_VALUE;
7219 goto done;
7220 }
7221
7222
7223 /*
7224 * Transpose the lists of resident pages.
7225 * This also updates the resident_page_count and the memq_hint.
7226 */
7227 if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) {
7228 /*
7229 * No pages in object1, just transfer pages
7230 * from object2 to object1. No need to go through
7231 * an intermediate object.
7232 */
7233 while (!vm_page_queue_empty(&object2->memq)) {
7234 page = (vm_page_t) vm_page_queue_first(&object2->memq);
7235 vm_page_rename(page, object1, page->offset, FALSE);
7236 }
7237 assert(vm_page_queue_empty(&object2->memq));
7238 } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
7239 /*
7240 * No pages in object2, just transfer pages
7241 * from object1 to object2. No need to go through
7242 * an intermediate object.
7243 */
7244 while (!vm_page_queue_empty(&object1->memq)) {
7245 page = (vm_page_t) vm_page_queue_first(&object1->memq);
7246 vm_page_rename(page, object2, page->offset, FALSE);
7247 }
7248 assert(vm_page_queue_empty(&object1->memq));
7249 } else {
7250 /* transfer object1's pages to tmp_object */
7251 while (!vm_page_queue_empty(&object1->memq)) {
7252 page = (vm_page_t) vm_page_queue_first(&object1->memq);
7253 page_offset = page->offset;
7254 vm_page_remove(page, TRUE);
7255 page->offset = page_offset;
7256 vm_page_queue_enter(&tmp_object->memq, page, vm_page_t, listq);
7257 }
7258 assert(vm_page_queue_empty(&object1->memq));
7259 /* transfer object2's pages to object1 */
7260 while (!vm_page_queue_empty(&object2->memq)) {
7261 page = (vm_page_t) vm_page_queue_first(&object2->memq);
7262 vm_page_rename(page, object1, page->offset, FALSE);
7263 }
7264 assert(vm_page_queue_empty(&object2->memq));
7265 /* transfer tmp_object's pages to object2 */
7266 while (!vm_page_queue_empty(&tmp_object->memq)) {
7267 page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
7268 vm_page_queue_remove(&tmp_object->memq, page,
7269 vm_page_t, listq);
7270 vm_page_insert(page, object2, page->offset);
7271 }
7272 assert(vm_page_queue_empty(&tmp_object->memq));
7273 }
7274
7275 #define __TRANSPOSE_FIELD(field) \
7276 MACRO_BEGIN \
7277 tmp_object->field = object1->field; \
7278 object1->field = object2->field; \
7279 object2->field = tmp_object->field; \
7280 MACRO_END
7281
7282 /* "Lock" refers to the object not its contents */
7283 /* "size" should be identical */
7284 assert(object1->vo_size == object2->vo_size);
7285 /* "memq_hint" was updated above when transposing pages */
7286 /* "ref_count" refers to the object not its contents */
7287 #if TASK_SWAPPER
7288 /* "res_count" refers to the object not its contents */
7289 #endif
7290 /* "resident_page_count" was updated above when transposing pages */
7291 /* "wired_page_count" was updated above when transposing pages */
7292 /* "reusable_page_count" was updated above when transposing pages */
7293 /* there should be no "copy" */
7294 assert(!object1->copy);
7295 assert(!object2->copy);
7296 /* there should be no "shadow" */
7297 assert(!object1->shadow);
7298 assert(!object2->shadow);
7299 __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
7300 __TRANSPOSE_FIELD(pager);
7301 __TRANSPOSE_FIELD(paging_offset);
7302 __TRANSPOSE_FIELD(pager_control);
7303 /* update the memory_objects' pointers back to the VM objects */
7304 if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
7305 memory_object_control_collapse(object1->pager_control,
7306 object1);
7307 }
7308 if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
7309 memory_object_control_collapse(object2->pager_control,
7310 object2);
7311 }
7312 __TRANSPOSE_FIELD(copy_strategy);
7313 /* "paging_in_progress" refers to the object not its contents */
7314 assert(!object1->paging_in_progress);
7315 assert(!object2->paging_in_progress);
7316 assert(object1->activity_in_progress);
7317 assert(object2->activity_in_progress);
7318 /* "all_wanted" refers to the object not its contents */
7319 __TRANSPOSE_FIELD(pager_created);
7320 __TRANSPOSE_FIELD(pager_initialized);
7321 __TRANSPOSE_FIELD(pager_ready);
7322 __TRANSPOSE_FIELD(pager_trusted);
7323 __TRANSPOSE_FIELD(can_persist);
7324 __TRANSPOSE_FIELD(internal);
7325 __TRANSPOSE_FIELD(temporary);
7326 __TRANSPOSE_FIELD(private);
7327 __TRANSPOSE_FIELD(pageout);
7328 /* "alive" should be set */
7329 assert(object1->alive);
7330 assert(object2->alive);
7331 /* "purgeable" should be non-purgeable */
7332 assert(object1->purgable == VM_PURGABLE_DENY);
7333 assert(object2->purgable == VM_PURGABLE_DENY);
7334 /* "shadowed" refers to the the object not its contents */
7335 __TRANSPOSE_FIELD(purgeable_when_ripe);
7336 __TRANSPOSE_FIELD(advisory_pageout);
7337 __TRANSPOSE_FIELD(true_share);
7338 /* "terminating" should not be set */
7339 assert(!object1->terminating);
7340 assert(!object2->terminating);
7341 __TRANSPOSE_FIELD(named);
7342 /* "shadow_severed" refers to the object not its contents */
7343 __TRANSPOSE_FIELD(phys_contiguous);
7344 __TRANSPOSE_FIELD(nophyscache);
7345 /* "cached_list.next" points to transposed object */
7346 object1->cached_list.next = (queue_entry_t) object2;
7347 object2->cached_list.next = (queue_entry_t) object1;
7348 /* "cached_list.prev" should be NULL */
7349 assert(object1->cached_list.prev == NULL);
7350 assert(object2->cached_list.prev == NULL);
7351 /* "msr_q" is linked to the object not its contents */
7352 assert(queue_empty(&object1->msr_q));
7353 assert(queue_empty(&object2->msr_q));
7354 __TRANSPOSE_FIELD(last_alloc);
7355 __TRANSPOSE_FIELD(sequential);
7356 __TRANSPOSE_FIELD(pages_created);
7357 __TRANSPOSE_FIELD(pages_used);
7358 __TRANSPOSE_FIELD(scan_collisions);
7359 __TRANSPOSE_FIELD(cow_hint);
7360 #if MACH_ASSERT
7361 __TRANSPOSE_FIELD(paging_object);
7362 #endif
7363 __TRANSPOSE_FIELD(wimg_bits);
7364 __TRANSPOSE_FIELD(set_cache_attr);
7365 __TRANSPOSE_FIELD(code_signed);
7366 if (object1->hashed) {
7367 hash_lck = vm_object_hash_lock_spin(object2->pager);
7368 hash_entry = vm_object_hash_lookup(object2->pager, FALSE);
7369 assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
7370 hash_entry->object = object2;
7371 vm_object_hash_unlock(hash_lck);
7372 }
7373 if (object2->hashed) {
7374 hash_lck = vm_object_hash_lock_spin(object1->pager);
7375 hash_entry = vm_object_hash_lookup(object1->pager, FALSE);
7376 assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
7377 hash_entry->object = object1;
7378 vm_object_hash_unlock(hash_lck);
7379 }
7380 __TRANSPOSE_FIELD(hashed);
7381 object1->transposed = TRUE;
7382 object2->transposed = TRUE;
7383 __TRANSPOSE_FIELD(mapping_in_progress);
7384 __TRANSPOSE_FIELD(volatile_empty);
7385 __TRANSPOSE_FIELD(volatile_fault);
7386 __TRANSPOSE_FIELD(all_reusable);
7387 assert(object1->blocked_access);
7388 assert(object2->blocked_access);
7389 assert(object1->__object2_unused_bits == 0);
7390 assert(object2->__object2_unused_bits == 0);
7391 #if UPL_DEBUG
7392 /* "uplq" refers to the object not its contents (see upl_transpose()) */
7393 #endif
7394 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL));
7395 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL));
7396 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL));
7397 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL));
7398
7399 #undef __TRANSPOSE_FIELD
7400
7401 retval = KERN_SUCCESS;
7402
7403 done:
7404 /*
7405 * Cleanup.
7406 */
7407 if (tmp_object != VM_OBJECT_NULL) {
7408 vm_object_unlock(tmp_object);
7409 /*
7410 * Re-initialize the temporary object to avoid
7411 * deallocating a real pager.
7412 */
7413 _vm_object_allocate(transpose_size, tmp_object);
7414 vm_object_deallocate(tmp_object);
7415 tmp_object = VM_OBJECT_NULL;
7416 }
7417
7418 if (object1_locked) {
7419 vm_object_unlock(object1);
7420 object1_locked = FALSE;
7421 }
7422 if (object2_locked) {
7423 vm_object_unlock(object2);
7424 object2_locked = FALSE;
7425 }
7426
7427 vm_object_transpose_count++;
7428
7429 return retval;
7430 }
7431
7432
7433 /*
7434 * vm_object_cluster_size
7435 *
7436 * Determine how big a cluster we should issue an I/O for...
7437 *
7438 * Inputs: *start == offset of page needed
7439 * *length == maximum cluster pager can handle
7440 * Outputs: *start == beginning offset of cluster
7441 * *length == length of cluster to try
7442 *
7443 * The original *start will be encompassed by the cluster
7444 *
7445 */
7446 extern int speculative_reads_disabled;
7447 extern int ignore_is_ssd;
7448
7449 /*
7450 * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
7451 * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
7452 * always be page-aligned. The derivation could involve operations (e.g. division)
7453 * that could give us non-page-size aligned values if we start out with values that
7454 * are odd multiples of PAGE_SIZE.
7455 */
7456 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
7457 unsigned int preheat_min_bytes = (1024 * 32);
7458
7459
7460 __private_extern__ void
7461 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
7462 vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
7463 {
7464 vm_size_t pre_heat_size;
7465 vm_size_t tail_size;
7466 vm_size_t head_size;
7467 vm_size_t max_length;
7468 vm_size_t cluster_size;
7469 vm_object_offset_t object_size;
7470 vm_object_offset_t orig_start;
7471 vm_object_offset_t target_start;
7472 vm_object_offset_t offset;
7473 vm_behavior_t behavior;
7474 boolean_t look_behind = TRUE;
7475 boolean_t look_ahead = TRUE;
7476 boolean_t isSSD = FALSE;
7477 uint32_t throttle_limit;
7478 int sequential_run;
7479 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
7480 vm_size_t max_ph_size;
7481 vm_size_t min_ph_size;
7482
7483 assert( !(*length & PAGE_MASK));
7484 assert( !(*start & PAGE_MASK_64));
7485
7486 /*
7487 * remember maxiumum length of run requested
7488 */
7489 max_length = *length;
7490 /*
7491 * we'll always return a cluster size of at least
7492 * 1 page, since the original fault must always
7493 * be processed
7494 */
7495 *length = PAGE_SIZE;
7496 *io_streaming = 0;
7497
7498 if (speculative_reads_disabled || fault_info == NULL) {
7499 /*
7500 * no cluster... just fault the page in
7501 */
7502 return;
7503 }
7504 orig_start = *start;
7505 target_start = orig_start;
7506 cluster_size = round_page(fault_info->cluster_size);
7507 behavior = fault_info->behavior;
7508
7509 vm_object_lock(object);
7510
7511 if (object->pager == MEMORY_OBJECT_NULL)
7512 goto out; /* pager is gone for this object, nothing more to do */
7513
7514 if (!ignore_is_ssd)
7515 vnode_pager_get_isSSD(object->pager, &isSSD);
7516
7517 min_ph_size = round_page(preheat_min_bytes);
7518 max_ph_size = round_page(preheat_max_bytes);
7519
7520 if (isSSD) {
7521 min_ph_size /= 2;
7522 max_ph_size /= 8;
7523
7524 if (min_ph_size & PAGE_MASK_64) {
7525 min_ph_size = trunc_page(min_ph_size);
7526 }
7527
7528 if (max_ph_size & PAGE_MASK_64) {
7529 max_ph_size = trunc_page(max_ph_size);
7530 }
7531 }
7532
7533 if (min_ph_size < PAGE_SIZE)
7534 min_ph_size = PAGE_SIZE;
7535
7536 if (max_ph_size < PAGE_SIZE)
7537 max_ph_size = PAGE_SIZE;
7538 else if (max_ph_size > MAX_UPL_TRANSFER_BYTES)
7539 max_ph_size = MAX_UPL_TRANSFER_BYTES;
7540
7541 if (max_length > max_ph_size)
7542 max_length = max_ph_size;
7543
7544 if (max_length <= PAGE_SIZE)
7545 goto out;
7546
7547 if (object->internal)
7548 object_size = object->vo_size;
7549 else
7550 vnode_pager_get_object_size(object->pager, &object_size);
7551
7552 object_size = round_page_64(object_size);
7553
7554 if (orig_start >= object_size) {
7555 /*
7556 * fault occurred beyond the EOF...
7557 * we need to punt w/o changing the
7558 * starting offset
7559 */
7560 goto out;
7561 }
7562 if (object->pages_used > object->pages_created) {
7563 /*
7564 * must have wrapped our 32 bit counters
7565 * so reset
7566 */
7567 object->pages_used = object->pages_created = 0;
7568 }
7569 if ((sequential_run = object->sequential)) {
7570 if (sequential_run < 0) {
7571 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
7572 sequential_run = 0 - sequential_run;
7573 } else {
7574 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
7575 }
7576
7577 }
7578 switch (behavior) {
7579
7580 default:
7581 behavior = VM_BEHAVIOR_DEFAULT;
7582
7583 case VM_BEHAVIOR_DEFAULT:
7584 if (object->internal && fault_info->user_tag == VM_MEMORY_STACK)
7585 goto out;
7586
7587 if (sequential_run >= (3 * PAGE_SIZE)) {
7588 pre_heat_size = sequential_run + PAGE_SIZE;
7589
7590 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL)
7591 look_behind = FALSE;
7592 else
7593 look_ahead = FALSE;
7594
7595 *io_streaming = 1;
7596 } else {
7597
7598 if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
7599 /*
7600 * prime the pump
7601 */
7602 pre_heat_size = min_ph_size;
7603 } else {
7604 /*
7605 * Linear growth in PH size: The maximum size is max_length...
7606 * this cacluation will result in a size that is neither a
7607 * power of 2 nor a multiple of PAGE_SIZE... so round
7608 * it up to the nearest PAGE_SIZE boundary
7609 */
7610 pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
7611
7612 if (pre_heat_size < min_ph_size)
7613 pre_heat_size = min_ph_size;
7614 else
7615 pre_heat_size = round_page(pre_heat_size);
7616 }
7617 }
7618 break;
7619
7620 case VM_BEHAVIOR_RANDOM:
7621 if ((pre_heat_size = cluster_size) <= PAGE_SIZE)
7622 goto out;
7623 break;
7624
7625 case VM_BEHAVIOR_SEQUENTIAL:
7626 if ((pre_heat_size = cluster_size) == 0)
7627 pre_heat_size = sequential_run + PAGE_SIZE;
7628 look_behind = FALSE;
7629 *io_streaming = 1;
7630
7631 break;
7632
7633 case VM_BEHAVIOR_RSEQNTL:
7634 if ((pre_heat_size = cluster_size) == 0)
7635 pre_heat_size = sequential_run + PAGE_SIZE;
7636 look_ahead = FALSE;
7637 *io_streaming = 1;
7638
7639 break;
7640
7641 }
7642 throttle_limit = (uint32_t) max_length;
7643 assert(throttle_limit == max_length);
7644
7645 if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
7646 if (max_length > throttle_limit)
7647 max_length = throttle_limit;
7648 }
7649 if (pre_heat_size > max_length)
7650 pre_heat_size = max_length;
7651
7652 if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
7653
7654 unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
7655
7656 if (consider_free < vm_page_throttle_limit) {
7657 pre_heat_size = trunc_page(pre_heat_size / 16);
7658 } else if (consider_free < vm_page_free_target) {
7659 pre_heat_size = trunc_page(pre_heat_size / 4);
7660 }
7661
7662 if (pre_heat_size < min_ph_size)
7663 pre_heat_size = min_ph_size;
7664 }
7665 if (look_ahead == TRUE) {
7666 if (look_behind == TRUE) {
7667 /*
7668 * if we get here its due to a random access...
7669 * so we want to center the original fault address
7670 * within the cluster we will issue... make sure
7671 * to calculate 'head_size' as a multiple of PAGE_SIZE...
7672 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
7673 * necessarily an even number of pages so we need to truncate
7674 * the result to a PAGE_SIZE boundary
7675 */
7676 head_size = trunc_page(pre_heat_size / 2);
7677
7678 if (target_start > head_size)
7679 target_start -= head_size;
7680 else
7681 target_start = 0;
7682
7683 /*
7684 * 'target_start' at this point represents the beginning offset
7685 * of the cluster we are considering... 'orig_start' will be in
7686 * the center of this cluster if we didn't have to clip the start
7687 * due to running into the start of the file
7688 */
7689 }
7690 if ((target_start + pre_heat_size) > object_size)
7691 pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
7692 /*
7693 * at this point caclulate the number of pages beyond the original fault
7694 * address that we want to consider... this is guaranteed not to extend beyond
7695 * the current EOF...
7696 */
7697 assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
7698 tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
7699 } else {
7700 if (pre_heat_size > target_start) {
7701 /*
7702 * since pre_heat_size is always smaller then 2^32,
7703 * if it is larger then target_start (a 64 bit value)
7704 * it is safe to clip target_start to 32 bits
7705 */
7706 pre_heat_size = (vm_size_t) target_start;
7707 }
7708 tail_size = 0;
7709 }
7710 assert( !(target_start & PAGE_MASK_64));
7711 assert( !(pre_heat_size & PAGE_MASK_64));
7712
7713 if (pre_heat_size <= PAGE_SIZE)
7714 goto out;
7715
7716 if (look_behind == TRUE) {
7717 /*
7718 * take a look at the pages before the original
7719 * faulting offset... recalculate this in case
7720 * we had to clip 'pre_heat_size' above to keep
7721 * from running past the EOF.
7722 */
7723 head_size = pre_heat_size - tail_size - PAGE_SIZE;
7724
7725 for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
7726 /*
7727 * don't poke below the lowest offset
7728 */
7729 if (offset < fault_info->lo_offset)
7730 break;
7731 /*
7732 * for external objects or internal objects w/o a pager,
7733 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
7734 */
7735 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
7736 break;
7737 }
7738 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7739 /*
7740 * don't bridge resident pages
7741 */
7742 break;
7743 }
7744 *start = offset;
7745 *length += PAGE_SIZE;
7746 }
7747 }
7748 if (look_ahead == TRUE) {
7749 for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
7750 /*
7751 * don't poke above the highest offset
7752 */
7753 if (offset >= fault_info->hi_offset)
7754 break;
7755 assert(offset < object_size);
7756
7757 /*
7758 * for external objects or internal objects w/o a pager,
7759 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
7760 */
7761 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
7762 break;
7763 }
7764 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7765 /*
7766 * don't bridge resident pages
7767 */
7768 break;
7769 }
7770 *length += PAGE_SIZE;
7771 }
7772 }
7773 out:
7774 if (*length > max_length)
7775 *length = max_length;
7776
7777 vm_object_unlock(object);
7778
7779 DTRACE_VM1(clustersize, vm_size_t, *length);
7780 }
7781
7782
7783 /*
7784 * Allow manipulation of individual page state. This is actually part of
7785 * the UPL regimen but takes place on the VM object rather than on a UPL
7786 */
7787
7788 kern_return_t
7789 vm_object_page_op(
7790 vm_object_t object,
7791 vm_object_offset_t offset,
7792 int ops,
7793 ppnum_t *phys_entry,
7794 int *flags)
7795 {
7796 vm_page_t dst_page;
7797
7798 vm_object_lock(object);
7799
7800 if(ops & UPL_POP_PHYSICAL) {
7801 if(object->phys_contiguous) {
7802 if (phys_entry) {
7803 *phys_entry = (ppnum_t)
7804 (object->vo_shadow_offset >> PAGE_SHIFT);
7805 }
7806 vm_object_unlock(object);
7807 return KERN_SUCCESS;
7808 } else {
7809 vm_object_unlock(object);
7810 return KERN_INVALID_OBJECT;
7811 }
7812 }
7813 if(object->phys_contiguous) {
7814 vm_object_unlock(object);
7815 return KERN_INVALID_OBJECT;
7816 }
7817
7818 while(TRUE) {
7819 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
7820 vm_object_unlock(object);
7821 return KERN_FAILURE;
7822 }
7823
7824 /* Sync up on getting the busy bit */
7825 if((dst_page->busy || dst_page->cleaning) &&
7826 (((ops & UPL_POP_SET) &&
7827 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
7828 /* someone else is playing with the page, we will */
7829 /* have to wait */
7830 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
7831 continue;
7832 }
7833
7834 if (ops & UPL_POP_DUMP) {
7835 if (dst_page->pmapped == TRUE)
7836 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
7837
7838 VM_PAGE_FREE(dst_page);
7839 break;
7840 }
7841
7842 if (flags) {
7843 *flags = 0;
7844
7845 /* Get the condition of flags before requested ops */
7846 /* are undertaken */
7847
7848 if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
7849 if(dst_page->free_when_done) *flags |= UPL_POP_PAGEOUT;
7850 if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
7851 if(dst_page->absent) *flags |= UPL_POP_ABSENT;
7852 if(dst_page->busy) *flags |= UPL_POP_BUSY;
7853 }
7854
7855 /* The caller should have made a call either contingent with */
7856 /* or prior to this call to set UPL_POP_BUSY */
7857 if(ops & UPL_POP_SET) {
7858 /* The protection granted with this assert will */
7859 /* not be complete. If the caller violates the */
7860 /* convention and attempts to change page state */
7861 /* without first setting busy we may not see it */
7862 /* because the page may already be busy. However */
7863 /* if such violations occur we will assert sooner */
7864 /* or later. */
7865 assert(dst_page->busy || (ops & UPL_POP_BUSY));
7866 if (ops & UPL_POP_DIRTY) {
7867 SET_PAGE_DIRTY(dst_page, FALSE);
7868 }
7869 if (ops & UPL_POP_PAGEOUT) dst_page->free_when_done = TRUE;
7870 if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
7871 if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
7872 if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
7873 }
7874
7875 if(ops & UPL_POP_CLR) {
7876 assert(dst_page->busy);
7877 if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
7878 if (ops & UPL_POP_PAGEOUT) dst_page->free_when_done = FALSE;
7879 if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
7880 if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
7881 if (ops & UPL_POP_BUSY) {
7882 dst_page->busy = FALSE;
7883 PAGE_WAKEUP(dst_page);
7884 }
7885 }
7886
7887 if (dst_page->encrypted) {
7888 /*
7889 * ENCRYPTED SWAP:
7890 * We need to decrypt this encrypted page before the
7891 * caller can access its contents.
7892 * But if the caller really wants to access the page's
7893 * contents, they have to keep the page "busy".
7894 * Otherwise, the page could get recycled or re-encrypted
7895 * at any time.
7896 */
7897 if ((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY) &&
7898 dst_page->busy) {
7899 /*
7900 * The page is stable enough to be accessed by
7901 * the caller, so make sure its contents are
7902 * not encrypted.
7903 */
7904 vm_page_decrypt(dst_page, 0);
7905 } else {
7906 /*
7907 * The page is not busy, so don't bother
7908 * decrypting it, since anything could
7909 * happen to it between now and when the
7910 * caller wants to access it.
7911 * We should not give the caller access
7912 * to this page.
7913 */
7914 assert(!phys_entry);
7915 }
7916 }
7917
7918 if (phys_entry) {
7919 /*
7920 * The physical page number will remain valid
7921 * only if the page is kept busy.
7922 * ENCRYPTED SWAP: make sure we don't let the
7923 * caller access an encrypted page.
7924 */
7925 assert(dst_page->busy);
7926 assert(!dst_page->encrypted);
7927 *phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
7928 }
7929
7930 break;
7931 }
7932
7933 vm_object_unlock(object);
7934 return KERN_SUCCESS;
7935
7936 }
7937
7938 /*
7939 * vm_object_range_op offers performance enhancement over
7940 * vm_object_page_op for page_op functions which do not require page
7941 * level state to be returned from the call. Page_op was created to provide
7942 * a low-cost alternative to page manipulation via UPLs when only a single
7943 * page was involved. The range_op call establishes the ability in the _op
7944 * family of functions to work on multiple pages where the lack of page level
7945 * state handling allows the caller to avoid the overhead of the upl structures.
7946 */
7947
7948 kern_return_t
7949 vm_object_range_op(
7950 vm_object_t object,
7951 vm_object_offset_t offset_beg,
7952 vm_object_offset_t offset_end,
7953 int ops,
7954 uint32_t *range)
7955 {
7956 vm_object_offset_t offset;
7957 vm_page_t dst_page;
7958
7959 if (offset_end - offset_beg > (uint32_t) -1) {
7960 /* range is too big and would overflow "*range" */
7961 return KERN_INVALID_ARGUMENT;
7962 }
7963 if (object->resident_page_count == 0) {
7964 if (range) {
7965 if (ops & UPL_ROP_PRESENT) {
7966 *range = 0;
7967 } else {
7968 *range = (uint32_t) (offset_end - offset_beg);
7969 assert(*range == (offset_end - offset_beg));
7970 }
7971 }
7972 return KERN_SUCCESS;
7973 }
7974 vm_object_lock(object);
7975
7976 if (object->phys_contiguous) {
7977 vm_object_unlock(object);
7978 return KERN_INVALID_OBJECT;
7979 }
7980
7981 offset = offset_beg & ~PAGE_MASK_64;
7982
7983 while (offset < offset_end) {
7984 dst_page = vm_page_lookup(object, offset);
7985 if (dst_page != VM_PAGE_NULL) {
7986 if (ops & UPL_ROP_DUMP) {
7987 if (dst_page->busy || dst_page->cleaning) {
7988 /*
7989 * someone else is playing with the
7990 * page, we will have to wait
7991 */
7992 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
7993 /*
7994 * need to relook the page up since it's
7995 * state may have changed while we slept
7996 * it might even belong to a different object
7997 * at this point
7998 */
7999 continue;
8000 }
8001 if (dst_page->laundry)
8002 vm_pageout_steal_laundry(dst_page, FALSE);
8003
8004 if (dst_page->pmapped == TRUE)
8005 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
8006
8007 VM_PAGE_FREE(dst_page);
8008
8009 } else if ((ops & UPL_ROP_ABSENT)
8010 && (!dst_page->absent || dst_page->busy)) {
8011 break;
8012 }
8013 } else if (ops & UPL_ROP_PRESENT)
8014 break;
8015
8016 offset += PAGE_SIZE;
8017 }
8018 vm_object_unlock(object);
8019
8020 if (range) {
8021 if (offset > offset_end)
8022 offset = offset_end;
8023 if(offset > offset_beg) {
8024 *range = (uint32_t) (offset - offset_beg);
8025 assert(*range == (offset - offset_beg));
8026 } else {
8027 *range = 0;
8028 }
8029 }
8030 return KERN_SUCCESS;
8031 }
8032
8033 /*
8034 * Used to point a pager directly to a range of memory (when the pager may be associated
8035 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
8036 * expect that the virtual address will denote the start of a range that is physically contiguous.
8037 */
8038 kern_return_t pager_map_to_phys_contiguous(
8039 memory_object_control_t object,
8040 memory_object_offset_t offset,
8041 addr64_t base_vaddr,
8042 vm_size_t size)
8043 {
8044 ppnum_t page_num;
8045 boolean_t clobbered_private;
8046 kern_return_t retval;
8047 vm_object_t pager_object;
8048
8049 page_num = pmap_find_phys(kernel_pmap, base_vaddr);
8050
8051 if (!page_num) {
8052 retval = KERN_FAILURE;
8053 goto out;
8054 }
8055
8056 pager_object = memory_object_control_to_vm_object(object);
8057
8058 if (!pager_object) {
8059 retval = KERN_FAILURE;
8060 goto out;
8061 }
8062
8063 clobbered_private = pager_object->private;
8064 if (pager_object->private != TRUE) {
8065 vm_object_lock(pager_object);
8066 pager_object->private = TRUE;
8067 vm_object_unlock(pager_object);
8068 }
8069 retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
8070
8071 if (retval != KERN_SUCCESS) {
8072 if (pager_object->private != clobbered_private) {
8073 vm_object_lock(pager_object);
8074 pager_object->private = clobbered_private;
8075 vm_object_unlock(pager_object);
8076 }
8077 }
8078
8079 out:
8080 return retval;
8081 }
8082
8083 uint32_t scan_object_collision = 0;
8084
8085 void
8086 vm_object_lock(vm_object_t object)
8087 {
8088 if (object == vm_pageout_scan_wants_object) {
8089 scan_object_collision++;
8090 mutex_pause(2);
8091 }
8092 lck_rw_lock_exclusive(&object->Lock);
8093 #if DEVELOPMENT || DEBUG
8094 object->Lock_owner = current_thread();
8095 #endif
8096 }
8097
8098 boolean_t
8099 vm_object_lock_avoid(vm_object_t object)
8100 {
8101 if (object == vm_pageout_scan_wants_object) {
8102 scan_object_collision++;
8103 return TRUE;
8104 }
8105 return FALSE;
8106 }
8107
8108 boolean_t
8109 _vm_object_lock_try(vm_object_t object)
8110 {
8111 boolean_t retval;
8112
8113 retval = lck_rw_try_lock_exclusive(&object->Lock);
8114 #if DEVELOPMENT || DEBUG
8115 if (retval == TRUE)
8116 object->Lock_owner = current_thread();
8117 #endif
8118 return (retval);
8119 }
8120
8121 boolean_t
8122 vm_object_lock_try(vm_object_t object)
8123 {
8124 /*
8125 * Called from hibernate path so check before blocking.
8126 */
8127 if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
8128 mutex_pause(2);
8129 }
8130 return _vm_object_lock_try(object);
8131 }
8132
8133 void
8134 vm_object_lock_shared(vm_object_t object)
8135 {
8136 if (vm_object_lock_avoid(object)) {
8137 mutex_pause(2);
8138 }
8139 lck_rw_lock_shared(&object->Lock);
8140 }
8141
8142 boolean_t
8143 vm_object_lock_try_shared(vm_object_t object)
8144 {
8145 if (vm_object_lock_avoid(object)) {
8146 mutex_pause(2);
8147 }
8148 return (lck_rw_try_lock_shared(&object->Lock));
8149 }
8150
8151 boolean_t
8152 vm_object_lock_upgrade(vm_object_t object)
8153 { boolean_t retval;
8154
8155 retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
8156 #if DEVELOPMENT || DEBUG
8157 if (retval == TRUE)
8158 object->Lock_owner = current_thread();
8159 #endif
8160 return (retval);
8161 }
8162
8163 void
8164 vm_object_unlock(vm_object_t object)
8165 {
8166 #if DEVELOPMENT || DEBUG
8167 if (object->Lock_owner) {
8168 if (object->Lock_owner != current_thread())
8169 panic("vm_object_unlock: not owner - %p\n", object);
8170 object->Lock_owner = 0;
8171 }
8172 #endif
8173 lck_rw_done(&object->Lock);
8174 }
8175
8176
8177 unsigned int vm_object_change_wimg_mode_count = 0;
8178
8179 /*
8180 * The object must be locked
8181 */
8182 void
8183 vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
8184 {
8185 vm_page_t p;
8186
8187 vm_object_lock_assert_exclusive(object);
8188
8189 vm_object_paging_wait(object, THREAD_UNINT);
8190
8191 vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
8192
8193 if (!p->fictitious)
8194 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode);
8195 }
8196 if (wimg_mode == VM_WIMG_USE_DEFAULT)
8197 object->set_cache_attr = FALSE;
8198 else
8199 object->set_cache_attr = TRUE;
8200
8201 object->wimg_bits = wimg_mode;
8202
8203 vm_object_change_wimg_mode_count++;
8204 }
8205
8206 #if CONFIG_FREEZE
8207
8208 /*
8209 * This routine does the "relocation" of previously
8210 * compressed pages belonging to this object that are
8211 * residing in a number of compressed segments into
8212 * a set of compressed segments dedicated to hold
8213 * compressed pages belonging to this object.
8214 */
8215
8216 extern void *freezer_chead;
8217 extern char *freezer_compressor_scratch_buf;
8218 extern int c_freezer_compression_count;
8219 extern AbsoluteTime c_freezer_last_yield_ts;
8220
8221 #define MAX_FREE_BATCH 32
8222 #define FREEZER_DUTY_CYCLE_ON_MS 5
8223 #define FREEZER_DUTY_CYCLE_OFF_MS 5
8224
8225 static int c_freezer_should_yield(void);
8226
8227
8228 static int
8229 c_freezer_should_yield()
8230 {
8231 AbsoluteTime cur_time;
8232 uint64_t nsecs;
8233
8234 assert(c_freezer_last_yield_ts);
8235 clock_get_uptime(&cur_time);
8236
8237 SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
8238 absolutetime_to_nanoseconds(cur_time, &nsecs);
8239
8240 if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS)
8241 return (1);
8242 return (0);
8243 }
8244
8245
8246 void
8247 vm_object_compressed_freezer_done()
8248 {
8249 vm_compressor_finished_filling(&freezer_chead);
8250 }
8251
8252
8253 void
8254 vm_object_compressed_freezer_pageout(
8255 vm_object_t object)
8256 {
8257 vm_page_t p;
8258 vm_page_t local_freeq = NULL;
8259 int local_freed = 0;
8260 kern_return_t retval = KERN_SUCCESS;
8261 int obj_resident_page_count_snapshot = 0;
8262
8263 assert(object != VM_OBJECT_NULL);
8264 assert(object->internal);
8265
8266 vm_object_lock(object);
8267
8268 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8269
8270 if (!object->pager_initialized) {
8271
8272 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
8273
8274 if (!object->pager_initialized)
8275 vm_object_compressor_pager_create(object);
8276 }
8277
8278 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8279 vm_object_unlock(object);
8280 return;
8281 }
8282 }
8283
8284 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
8285 vm_object_offset_t curr_offset = 0;
8286
8287 /*
8288 * Go through the object and make sure that any
8289 * previously compressed pages are relocated into
8290 * a compressed segment associated with our "freezer_chead".
8291 */
8292 while (curr_offset < object->vo_size) {
8293
8294 curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
8295
8296 if (curr_offset == (vm_object_offset_t) -1)
8297 break;
8298
8299 retval = vm_compressor_pager_relocate(object->pager, curr_offset, &freezer_chead);
8300
8301 if (retval != KERN_SUCCESS)
8302 break;
8303
8304 curr_offset += PAGE_SIZE_64;
8305 }
8306 }
8307
8308 /*
8309 * We can't hold the object lock while heading down into the compressed pager
8310 * layer because we might need the kernel map lock down there to allocate new
8311 * compressor data structures. And if this same object is mapped in the kernel
8312 * and there's a fault on it, then that thread will want the object lock while
8313 * holding the kernel map lock.
8314 *
8315 * Since we are going to drop/grab the object lock repeatedly, we must make sure
8316 * we won't be stuck in an infinite loop if the same page(s) keep getting
8317 * decompressed. So we grab a snapshot of the number of pages in the object and
8318 * we won't process any more than that number of pages.
8319 */
8320
8321 obj_resident_page_count_snapshot = object->resident_page_count;
8322
8323 vm_object_activity_begin(object);
8324
8325 while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq)) {
8326
8327 p = (vm_page_t)vm_page_queue_first(&object->memq);
8328
8329 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0);
8330
8331 vm_page_lockspin_queues();
8332
8333 if (p->cleaning || p->fictitious || p->busy || p->absent || p->unusual || p->error || VM_PAGE_WIRED(p)) {
8334
8335 vm_page_unlock_queues();
8336
8337 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0);
8338
8339 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
8340 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
8341
8342 continue;
8343 }
8344
8345 if (p->pmapped == TRUE) {
8346 int refmod_state, pmap_flags;
8347
8348 if (p->dirty || p->precious) {
8349 pmap_flags = PMAP_OPTIONS_COMPRESSOR;
8350 } else {
8351 pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
8352 }
8353
8354 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL);
8355 if (refmod_state & VM_MEM_MODIFIED) {
8356 SET_PAGE_DIRTY(p, FALSE);
8357 }
8358 }
8359
8360 if (p->dirty == FALSE && p->precious == FALSE) {
8361 /*
8362 * Clean and non-precious page.
8363 */
8364 vm_page_unlock_queues();
8365 VM_PAGE_FREE(p);
8366
8367 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0);
8368 continue;
8369 }
8370
8371 if (p->laundry)
8372 vm_pageout_steal_laundry(p, TRUE);
8373
8374 vm_page_queues_remove(p, TRUE);
8375
8376 vm_page_unlock_queues();
8377
8378
8379 /*
8380 * In case the compressor fails to compress this page, we need it at
8381 * the back of the object memq so that we don't keep trying to process it.
8382 * Make the move here while we have the object lock held.
8383 */
8384
8385 vm_page_queue_remove(&object->memq, p, vm_page_t, listq);
8386 vm_page_queue_enter(&object->memq, p, vm_page_t, listq);
8387
8388 /*
8389 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
8390 *
8391 * Mark the page busy so no one messes with it while we have the object lock dropped.
8392 */
8393
8394 p->busy = TRUE;
8395
8396 vm_object_activity_begin(object);
8397
8398 vm_object_unlock(object);
8399
8400 /*
8401 * arg3 == FALSE tells vm_pageout_compress_page that we don't hold the object lock and the pager may not be initialized.
8402 */
8403 if (vm_pageout_compress_page(&freezer_chead, freezer_compressor_scratch_buf, p, FALSE) == KERN_SUCCESS) {
8404 /*
8405 * page has already been un-tabled from the object via 'vm_page_remove'
8406 */
8407 p->snext = local_freeq;
8408 local_freeq = p;
8409 local_freed++;
8410
8411 if (local_freed >= MAX_FREE_BATCH) {
8412
8413 vm_page_free_list(local_freeq, TRUE);
8414
8415 local_freeq = NULL;
8416 local_freed = 0;
8417 }
8418 c_freezer_compression_count++;
8419 }
8420 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0);
8421
8422 if (local_freed == 0 && c_freezer_should_yield()) {
8423
8424 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
8425 clock_get_uptime(&c_freezer_last_yield_ts);
8426 }
8427
8428 vm_object_lock(object);
8429 }
8430
8431 if (local_freeq) {
8432 vm_page_free_list(local_freeq, TRUE);
8433
8434 local_freeq = NULL;
8435 local_freed = 0;
8436 }
8437
8438 vm_object_activity_end(object);
8439
8440 vm_object_unlock(object);
8441
8442 if (c_freezer_should_yield()) {
8443
8444 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
8445 clock_get_uptime(&c_freezer_last_yield_ts);
8446 }
8447 }
8448
8449 #endif /* CONFIG_FREEZE */
8450
8451
8452 void
8453 vm_object_pageout(
8454 vm_object_t object)
8455 {
8456 vm_page_t p, next;
8457 struct vm_pageout_queue *iq;
8458 boolean_t need_unlock = TRUE;
8459
8460 if (!VM_CONFIG_COMPRESSOR_IS_PRESENT)
8461 return;
8462
8463 iq = &vm_pageout_queue_internal;
8464
8465 assert(object != VM_OBJECT_NULL );
8466
8467 vm_object_lock(object);
8468
8469 if (!object->internal ||
8470 object->terminating ||
8471 !object->alive) {
8472 vm_object_unlock(object);
8473 return;
8474 }
8475
8476 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8477
8478 if (!object->pager_initialized) {
8479
8480 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
8481
8482 if (!object->pager_initialized)
8483 vm_object_compressor_pager_create(object);
8484 }
8485
8486 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8487 vm_object_unlock(object);
8488 return;
8489 }
8490 }
8491
8492 ReScan:
8493 next = (vm_page_t)vm_page_queue_first(&object->memq);
8494
8495 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
8496 p = next;
8497 next = (vm_page_t)vm_page_queue_next(&next->listq);
8498
8499 assert(p->vm_page_q_state != VM_PAGE_ON_FREE_Q);
8500
8501 if ((p->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q) ||
8502 p->encrypted_cleaning ||
8503 p->cleaning ||
8504 p->laundry ||
8505 p->busy ||
8506 p->absent ||
8507 p->error ||
8508 p->fictitious ||
8509 VM_PAGE_WIRED(p)) {
8510 /*
8511 * Page is already being cleaned or can't be cleaned.
8512 */
8513 continue;
8514 }
8515
8516 /* Throw to the pageout queue */
8517
8518 vm_page_lockspin_queues();
8519 need_unlock = TRUE;
8520
8521 if (vm_compressor_low_on_space()) {
8522 vm_page_unlock_queues();
8523 break;
8524 }
8525
8526 if (VM_PAGE_Q_THROTTLED(iq)) {
8527
8528 iq->pgo_draining = TRUE;
8529
8530 assert_wait((event_t) (&iq->pgo_laundry + 1),
8531 THREAD_INTERRUPTIBLE);
8532 vm_page_unlock_queues();
8533 vm_object_unlock(object);
8534
8535 thread_block(THREAD_CONTINUE_NULL);
8536
8537 vm_object_lock(object);
8538 goto ReScan;
8539 }
8540
8541 assert(!p->fictitious);
8542 assert(!p->busy);
8543 assert(!p->absent);
8544 assert(!p->unusual);
8545 assert(!p->error);
8546 assert(!VM_PAGE_WIRED(p));
8547 assert(!p->cleaning);
8548
8549 if (p->pmapped == TRUE) {
8550 int refmod_state;
8551 int pmap_options;
8552
8553 /*
8554 * Tell pmap the page should be accounted
8555 * for as "compressed" if it's been modified.
8556 */
8557 pmap_options =
8558 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
8559 if (p->dirty || p->precious) {
8560 /*
8561 * We already know it's been modified,
8562 * so tell pmap to account for it
8563 * as "compressed".
8564 */
8565 pmap_options = PMAP_OPTIONS_COMPRESSOR;
8566 }
8567 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
8568 pmap_options,
8569 NULL);
8570 if (refmod_state & VM_MEM_MODIFIED) {
8571 SET_PAGE_DIRTY(p, FALSE);
8572 }
8573 }
8574
8575 if (!p->dirty && !p->precious) {
8576 vm_page_unlock_queues();
8577 VM_PAGE_FREE(p);
8578 continue;
8579 }
8580
8581 vm_page_queues_remove(p, TRUE);
8582
8583 if (vm_pageout_cluster(p, FALSE, TRUE))
8584 need_unlock = FALSE;
8585
8586 if (need_unlock == TRUE)
8587 vm_page_unlock_queues();
8588 }
8589
8590 vm_object_unlock(object);
8591 }
8592
8593
8594 #if CONFIG_IOSCHED
8595 void
8596 vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
8597 {
8598 io_reprioritize_req_t req;
8599 struct vnode *devvp = NULL;
8600
8601 if(vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS)
8602 return;
8603
8604 /*
8605 * Create the request for I/O reprioritization.
8606 * We use the noblock variant of zalloc because we're holding the object
8607 * lock here and we could cause a deadlock in low memory conditions.
8608 */
8609 req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
8610 if (req == NULL)
8611 return;
8612 req->blkno = blkno;
8613 req->len = len;
8614 req->priority = prio;
8615 req->devvp = devvp;
8616
8617 /* Insert request into the reprioritization list */
8618 IO_REPRIORITIZE_LIST_LOCK();
8619 queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
8620 IO_REPRIORITIZE_LIST_UNLOCK();
8621
8622 /* Wakeup reprioritize thread */
8623 IO_REPRIO_THREAD_WAKEUP();
8624
8625 return;
8626 }
8627
8628 void
8629 vm_decmp_upl_reprioritize(upl_t upl, int prio)
8630 {
8631 int offset;
8632 vm_object_t object;
8633 io_reprioritize_req_t req;
8634 struct vnode *devvp = NULL;
8635 uint64_t blkno;
8636 uint32_t len;
8637 upl_t io_upl;
8638 uint64_t *io_upl_reprio_info;
8639 int io_upl_size;
8640
8641 if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0)
8642 return;
8643
8644 /*
8645 * We dont want to perform any allocations with the upl lock held since that might
8646 * result in a deadlock. If the system is low on memory, the pageout thread would
8647 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
8648 * be freed up by the pageout thread, it would be a deadlock.
8649 */
8650
8651
8652 /* First step is just to get the size of the upl to find out how big the reprio info is */
8653 if(!upl_try_lock(upl))
8654 return;
8655
8656 if (upl->decmp_io_upl == NULL) {
8657 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8658 upl_unlock(upl);
8659 return;
8660 }
8661
8662 io_upl = upl->decmp_io_upl;
8663 assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0);
8664 io_upl_size = io_upl->size;
8665 upl_unlock(upl);
8666
8667 /* Now perform the allocation */
8668 io_upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size / PAGE_SIZE));
8669 if (io_upl_reprio_info == NULL)
8670 return;
8671
8672 /* Now again take the lock, recheck the state and grab out the required info */
8673 if(!upl_try_lock(upl))
8674 goto out;
8675
8676 if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
8677 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8678 upl_unlock(upl);
8679 goto out;
8680 }
8681 memcpy(io_upl_reprio_info, io_upl->upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE));
8682
8683 /* Get the VM object for this UPL */
8684 if (io_upl->flags & UPL_SHADOWED) {
8685 object = io_upl->map_object->shadow;
8686 } else {
8687 object = io_upl->map_object;
8688 }
8689
8690 /* Get the dev vnode ptr for this object */
8691 if(!object || !object->pager ||
8692 vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
8693 upl_unlock(upl);
8694 goto out;
8695 }
8696
8697 upl_unlock(upl);
8698
8699 /* Now we have all the information needed to do the expedite */
8700
8701 offset = 0;
8702 while (offset < io_upl_size) {
8703 blkno = io_upl_reprio_info[(offset / PAGE_SIZE)] & UPL_REPRIO_INFO_MASK;
8704 len = (io_upl_reprio_info[(offset / PAGE_SIZE)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;
8705
8706 /*
8707 * This implementation may cause some spurious expedites due to the
8708 * fact that we dont cleanup the blkno & len from the upl_reprio_info
8709 * even after the I/O is complete.
8710 */
8711
8712 if (blkno != 0 && len != 0) {
8713 /* Create the request for I/O reprioritization */
8714 req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone);
8715 assert(req != NULL);
8716 req->blkno = blkno;
8717 req->len = len;
8718 req->priority = prio;
8719 req->devvp = devvp;
8720
8721 /* Insert request into the reprioritization list */
8722 IO_REPRIORITIZE_LIST_LOCK();
8723 queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
8724 IO_REPRIORITIZE_LIST_UNLOCK();
8725
8726 offset += len;
8727 } else {
8728 offset += PAGE_SIZE;
8729 }
8730 }
8731
8732 /* Wakeup reprioritize thread */
8733 IO_REPRIO_THREAD_WAKEUP();
8734
8735 out:
8736 kfree(io_upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE));
8737 return;
8738 }
8739
8740 void
8741 vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m)
8742 {
8743 upl_t upl;
8744 upl_page_info_t *pl;
8745 unsigned int i, num_pages;
8746 int cur_tier;
8747
8748 cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
8749
8750 /*
8751 Scan through all UPLs associated with the object to find the
8752 UPL containing the contended page.
8753 */
8754 queue_iterate(&o->uplq, upl, upl_t, uplq) {
8755 if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier)
8756 continue;
8757 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
8758 num_pages = (upl->size / PAGE_SIZE);
8759
8760 /*
8761 For each page in the UPL page list, see if it matches the contended
8762 page and was issued as a low prio I/O.
8763 */
8764 for(i=0; i < num_pages; i++) {
8765 if(UPL_PAGE_PRESENT(pl,i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
8766 if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
8767 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
8768 VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0);
8769 vm_decmp_upl_reprioritize(upl, cur_tier);
8770 break;
8771 }
8772 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
8773 upl->upl_reprio_info[i], upl->upl_priority, 0);
8774 if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0)
8775 vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
8776 break;
8777 }
8778 }
8779 /* Check if we found any hits */
8780 if (i != num_pages)
8781 break;
8782 }
8783
8784 return;
8785 }
8786
8787 wait_result_t
8788 vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible)
8789 {
8790 wait_result_t ret;
8791
8792 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);
8793
8794 if (o->io_tracking && ((m->busy == TRUE) || (m->cleaning == TRUE) || VM_PAGE_WIRED(m))) {
8795 /*
8796 Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
8797 */
8798 vm_page_handle_prio_inversion(o,m);
8799 }
8800 m->wanted = TRUE;
8801 ret = thread_sleep_vm_object(o, m, interruptible);
8802 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0);
8803 return ret;
8804 }
8805
8806 static void
8807 io_reprioritize_thread(void *param __unused, wait_result_t wr __unused)
8808 {
8809 io_reprioritize_req_t req = NULL;
8810
8811 while(1) {
8812
8813 IO_REPRIORITIZE_LIST_LOCK();
8814 if (queue_empty(&io_reprioritize_list)) {
8815 IO_REPRIORITIZE_LIST_UNLOCK();
8816 break;
8817 }
8818
8819 queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
8820 IO_REPRIORITIZE_LIST_UNLOCK();
8821
8822 vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority);
8823 zfree(io_reprioritize_req_zone, req);
8824 }
8825
8826 IO_REPRIO_THREAD_CONTINUATION();
8827 }
8828 #endif