]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.c
f49238416cd85a016087cbe036b3ac9a7384d356
[apple/xnu.git] / osfmk / vm / vm_object.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Virtual memory object module.
63 */
64
65 #include <debug.h>
66 #include <mach_pagemap.h>
67 #include <task_swapper.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/memory_object_control_server.h>
73 #include <mach/vm_param.h>
74
75 #include <ipc/ipc_types.h>
76 #include <ipc/ipc_port.h>
77
78 #include <kern/kern_types.h>
79 #include <kern/assert.h>
80 #include <kern/lock.h>
81 #include <kern/queue.h>
82 #include <kern/xpr.h>
83 #include <kern/zalloc.h>
84 #include <kern/host.h>
85 #include <kern/host_statistics.h>
86 #include <kern/processor.h>
87 #include <kern/misc_protos.h>
88
89 #include <vm/memory_object.h>
90 #include <vm/vm_fault.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_protos.h>
96 #include <vm/vm_purgeable_internal.h>
97
98 #if CONFIG_EMBEDDED
99 #include <sys/kern_memorystatus.h>
100 #endif
101
102 /*
103 * Virtual memory objects maintain the actual data
104 * associated with allocated virtual memory. A given
105 * page of memory exists within exactly one object.
106 *
107 * An object is only deallocated when all "references"
108 * are given up.
109 *
110 * Associated with each object is a list of all resident
111 * memory pages belonging to that object; this list is
112 * maintained by the "vm_page" module, but locked by the object's
113 * lock.
114 *
115 * Each object also records the memory object reference
116 * that is used by the kernel to request and write
117 * back data (the memory object, field "pager"), etc...
118 *
119 * Virtual memory objects are allocated to provide
120 * zero-filled memory (vm_allocate) or map a user-defined
121 * memory object into a virtual address space (vm_map).
122 *
123 * Virtual memory objects that refer to a user-defined
124 * memory object are called "permanent", because all changes
125 * made in virtual memory are reflected back to the
126 * memory manager, which may then store it permanently.
127 * Other virtual memory objects are called "temporary",
128 * meaning that changes need be written back only when
129 * necessary to reclaim pages, and that storage associated
130 * with the object can be discarded once it is no longer
131 * mapped.
132 *
133 * A permanent memory object may be mapped into more
134 * than one virtual address space. Moreover, two threads
135 * may attempt to make the first mapping of a memory
136 * object concurrently. Only one thread is allowed to
137 * complete this mapping; all others wait for the
138 * "pager_initialized" field is asserted, indicating
139 * that the first thread has initialized all of the
140 * necessary fields in the virtual memory object structure.
141 *
142 * The kernel relies on a *default memory manager* to
143 * provide backing storage for the zero-filled virtual
144 * memory objects. The pager memory objects associated
145 * with these temporary virtual memory objects are only
146 * requested from the default memory manager when it
147 * becomes necessary. Virtual memory objects
148 * that depend on the default memory manager are called
149 * "internal". The "pager_created" field is provided to
150 * indicate whether these ports have ever been allocated.
151 *
152 * The kernel may also create virtual memory objects to
153 * hold changed pages after a copy-on-write operation.
154 * In this case, the virtual memory object (and its
155 * backing storage -- its memory object) only contain
156 * those pages that have been changed. The "shadow"
157 * field refers to the virtual memory object that contains
158 * the remainder of the contents. The "shadow_offset"
159 * field indicates where in the "shadow" these contents begin.
160 * The "copy" field refers to a virtual memory object
161 * to which changed pages must be copied before changing
162 * this object, in order to implement another form
163 * of copy-on-write optimization.
164 *
165 * The virtual memory object structure also records
166 * the attributes associated with its memory object.
167 * The "pager_ready", "can_persist" and "copy_strategy"
168 * fields represent those attributes. The "cached_list"
169 * field is used in the implementation of the persistence
170 * attribute.
171 *
172 * ZZZ Continue this comment.
173 */
174
175 /* Forward declarations for internal functions. */
176 static kern_return_t vm_object_terminate(
177 vm_object_t object);
178
179 extern void vm_object_remove(
180 vm_object_t object);
181
182 static kern_return_t vm_object_copy_call(
183 vm_object_t src_object,
184 vm_object_offset_t src_offset,
185 vm_object_size_t size,
186 vm_object_t *_result_object);
187
188 static void vm_object_do_collapse(
189 vm_object_t object,
190 vm_object_t backing_object);
191
192 static void vm_object_do_bypass(
193 vm_object_t object,
194 vm_object_t backing_object);
195
196 static void vm_object_release_pager(
197 memory_object_t pager,
198 boolean_t hashed);
199
200 static zone_t vm_object_zone; /* vm backing store zone */
201
202 /*
203 * All wired-down kernel memory belongs to a single virtual
204 * memory object (kernel_object) to avoid wasting data structures.
205 */
206 static struct vm_object kernel_object_store;
207 vm_object_t kernel_object;
208
209
210 /*
211 * The submap object is used as a placeholder for vm_map_submap
212 * operations. The object is declared in vm_map.c because it
213 * is exported by the vm_map module. The storage is declared
214 * here because it must be initialized here.
215 */
216 static struct vm_object vm_submap_object_store;
217
218 /*
219 * Virtual memory objects are initialized from
220 * a template (see vm_object_allocate).
221 *
222 * When adding a new field to the virtual memory
223 * object structure, be sure to add initialization
224 * (see _vm_object_allocate()).
225 */
226 static struct vm_object vm_object_template;
227
228 unsigned int vm_page_purged_wired = 0;
229 unsigned int vm_page_purged_busy = 0;
230 unsigned int vm_page_purged_others = 0;
231
232 #if VM_OBJECT_CACHE
233 /*
234 * Virtual memory objects that are not referenced by
235 * any address maps, but that are allowed to persist
236 * (an attribute specified by the associated memory manager),
237 * are kept in a queue (vm_object_cached_list).
238 *
239 * When an object from this queue is referenced again,
240 * for example to make another address space mapping,
241 * it must be removed from the queue. That is, the
242 * queue contains *only* objects with zero references.
243 *
244 * The kernel may choose to terminate objects from this
245 * queue in order to reclaim storage. The current policy
246 * is to permit a fixed maximum number of unreferenced
247 * objects (vm_object_cached_max).
248 *
249 * A spin lock (accessed by routines
250 * vm_object_cache_{lock,lock_try,unlock}) governs the
251 * object cache. It must be held when objects are
252 * added to or removed from the cache (in vm_object_terminate).
253 * The routines that acquire a reference to a virtual
254 * memory object based on one of the memory object ports
255 * must also lock the cache.
256 *
257 * Ideally, the object cache should be more isolated
258 * from the reference mechanism, so that the lock need
259 * not be held to make simple references.
260 */
261 static vm_object_t vm_object_cache_trim(
262 boolean_t called_from_vm_object_deallocate);
263
264 static queue_head_t vm_object_cached_list;
265 static int vm_object_cached_count=0;
266 static int vm_object_cached_high; /* highest # cached objects */
267 static int vm_object_cached_max = 512; /* may be patched*/
268
269 static lck_mtx_t vm_object_cached_lock_data;
270 static lck_mtx_ext_t vm_object_cached_lock_data_ext;
271
272 #define vm_object_cache_lock() \
273 lck_mtx_lock(&vm_object_cached_lock_data)
274 #define vm_object_cache_lock_try() \
275 lck_mtx_try_lock(&vm_object_cached_lock_data)
276 #define vm_object_cache_lock_spin() \
277 lck_mtx_lock_spin(&vm_object_cached_lock_data)
278 #define vm_object_cache_unlock() \
279 lck_mtx_unlock(&vm_object_cached_lock_data)
280
281 #endif /* VM_OBJECT_CACHE */
282
283
284 static void vm_object_deactivate_all_pages(
285 vm_object_t object);
286
287
288 #define VM_OBJECT_HASH_COUNT 1024
289 #define VM_OBJECT_HASH_LOCK_COUNT 512
290
291 static lck_mtx_t vm_object_hashed_lock_data[VM_OBJECT_HASH_COUNT];
292 static lck_mtx_ext_t vm_object_hashed_lock_data_ext[VM_OBJECT_HASH_COUNT];
293
294 static queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
295 static struct zone *vm_object_hash_zone;
296
297 struct vm_object_hash_entry {
298 queue_chain_t hash_link; /* hash chain link */
299 memory_object_t pager; /* pager we represent */
300 vm_object_t object; /* corresponding object */
301 boolean_t waiting; /* someone waiting for
302 * termination */
303 };
304
305 typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
306 #define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
307
308 #define VM_OBJECT_HASH_SHIFT 5
309 #define vm_object_hash(pager) \
310 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT))
311
312 #define vm_object_lock_hash(pager) \
313 ((int)((((uintptr_t)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_LOCK_COUNT))
314
315 void vm_object_hash_entry_free(
316 vm_object_hash_entry_t entry);
317
318 static void vm_object_reap(vm_object_t object);
319 static void vm_object_reap_async(vm_object_t object);
320 static void vm_object_reaper_thread(void);
321
322 static lck_mtx_t vm_object_reaper_lock_data;
323 static lck_mtx_ext_t vm_object_reaper_lock_data_ext;
324
325 static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
326 unsigned int vm_object_reap_count = 0;
327 unsigned int vm_object_reap_count_async = 0;
328
329 #define vm_object_reaper_lock() \
330 lck_mtx_lock(&vm_object_reaper_lock_data)
331 #define vm_object_reaper_lock_spin() \
332 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
333 #define vm_object_reaper_unlock() \
334 lck_mtx_unlock(&vm_object_reaper_lock_data)
335
336
337
338 static lck_mtx_t *
339 vm_object_hash_lock_spin(
340 memory_object_t pager)
341 {
342 int index;
343
344 index = vm_object_lock_hash(pager);
345
346 lck_mtx_lock_spin(&vm_object_hashed_lock_data[index]);
347
348 return (&vm_object_hashed_lock_data[index]);
349 }
350
351 static void
352 vm_object_hash_unlock(lck_mtx_t *lck)
353 {
354 lck_mtx_unlock(lck);
355 }
356
357
358 /*
359 * vm_object_hash_lookup looks up a pager in the hashtable
360 * and returns the corresponding entry, with optional removal.
361 */
362 static vm_object_hash_entry_t
363 vm_object_hash_lookup(
364 memory_object_t pager,
365 boolean_t remove_entry)
366 {
367 queue_t bucket;
368 vm_object_hash_entry_t entry;
369
370 bucket = &vm_object_hashtable[vm_object_hash(pager)];
371
372 entry = (vm_object_hash_entry_t)queue_first(bucket);
373 while (!queue_end(bucket, (queue_entry_t)entry)) {
374 if (entry->pager == pager) {
375 if (remove_entry) {
376 queue_remove(bucket, entry,
377 vm_object_hash_entry_t, hash_link);
378 }
379 return(entry);
380 }
381 entry = (vm_object_hash_entry_t)queue_next(&entry->hash_link);
382 }
383 return(VM_OBJECT_HASH_ENTRY_NULL);
384 }
385
386 /*
387 * vm_object_hash_enter enters the specified
388 * pager / cache object association in the hashtable.
389 */
390
391 static void
392 vm_object_hash_insert(
393 vm_object_hash_entry_t entry,
394 vm_object_t object)
395 {
396 queue_t bucket;
397
398 bucket = &vm_object_hashtable[vm_object_hash(entry->pager)];
399
400 queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link);
401
402 entry->object = object;
403 object->hashed = TRUE;
404 }
405
406 static vm_object_hash_entry_t
407 vm_object_hash_entry_alloc(
408 memory_object_t pager)
409 {
410 vm_object_hash_entry_t entry;
411
412 entry = (vm_object_hash_entry_t)zalloc(vm_object_hash_zone);
413 entry->pager = pager;
414 entry->object = VM_OBJECT_NULL;
415 entry->waiting = FALSE;
416
417 return(entry);
418 }
419
420 void
421 vm_object_hash_entry_free(
422 vm_object_hash_entry_t entry)
423 {
424 zfree(vm_object_hash_zone, entry);
425 }
426
427 /*
428 * vm_object_allocate:
429 *
430 * Returns a new object with the given size.
431 */
432
433 __private_extern__ void
434 _vm_object_allocate(
435 vm_object_size_t size,
436 vm_object_t object)
437 {
438 XPR(XPR_VM_OBJECT,
439 "vm_object_allocate, object 0x%X size 0x%X\n",
440 object, size, 0,0,0);
441
442 *object = vm_object_template;
443 queue_init(&object->memq);
444 queue_init(&object->msr_q);
445 #if UPL_DEBUG
446 queue_init(&object->uplq);
447 #endif /* UPL_DEBUG */
448 vm_object_lock_init(object);
449 object->size = size;
450 }
451
452 __private_extern__ vm_object_t
453 vm_object_allocate(
454 vm_object_size_t size)
455 {
456 register vm_object_t object;
457
458 object = (vm_object_t) zalloc(vm_object_zone);
459
460 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
461
462 if (object != VM_OBJECT_NULL)
463 _vm_object_allocate(size, object);
464
465 return object;
466 }
467
468
469 lck_grp_t vm_object_lck_grp;
470 lck_grp_attr_t vm_object_lck_grp_attr;
471 lck_attr_t vm_object_lck_attr;
472 lck_attr_t kernel_object_lck_attr;
473
474 /*
475 * vm_object_bootstrap:
476 *
477 * Initialize the VM objects module.
478 */
479 __private_extern__ void
480 vm_object_bootstrap(void)
481 {
482 register int i;
483
484 vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object),
485 round_page(512*1024),
486 round_page(12*1024),
487 "vm objects");
488
489 vm_object_init_lck_grp();
490
491 #if VM_OBJECT_CACHE
492 queue_init(&vm_object_cached_list);
493
494 lck_mtx_init_ext(&vm_object_cached_lock_data,
495 &vm_object_cached_lock_data_ext,
496 &vm_object_lck_grp,
497 &vm_object_lck_attr);
498 #endif
499 queue_init(&vm_object_reaper_queue);
500
501 for (i = 0; i < VM_OBJECT_HASH_LOCK_COUNT; i++) {
502 lck_mtx_init_ext(&vm_object_hashed_lock_data[i],
503 &vm_object_hashed_lock_data_ext[i],
504 &vm_object_lck_grp,
505 &vm_object_lck_attr);
506 }
507 lck_mtx_init_ext(&vm_object_reaper_lock_data,
508 &vm_object_reaper_lock_data_ext,
509 &vm_object_lck_grp,
510 &vm_object_lck_attr);
511
512 vm_object_hash_zone =
513 zinit((vm_size_t) sizeof (struct vm_object_hash_entry),
514 round_page(512*1024),
515 round_page(12*1024),
516 "vm object hash entries");
517
518 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
519 queue_init(&vm_object_hashtable[i]);
520
521
522 /*
523 * Fill in a template object, for quick initialization
524 */
525
526 /* memq; Lock; init after allocation */
527 vm_object_template.memq.prev = NULL;
528 vm_object_template.memq.next = NULL;
529 #if 0
530 /*
531 * We can't call vm_object_lock_init() here because that will
532 * allocate some memory and VM is not fully initialized yet.
533 * The lock will be initialized for each allocated object in
534 * _vm_object_allocate(), so we don't need to initialize it in
535 * the vm_object_template.
536 */
537 vm_object_lock_init(&vm_object_template);
538 #endif
539 vm_object_template.size = 0;
540 vm_object_template.memq_hint = VM_PAGE_NULL;
541 vm_object_template.ref_count = 1;
542 #if TASK_SWAPPER
543 vm_object_template.res_count = 1;
544 #endif /* TASK_SWAPPER */
545 vm_object_template.resident_page_count = 0;
546 vm_object_template.wired_page_count = 0;
547 vm_object_template.reusable_page_count = 0;
548 vm_object_template.copy = VM_OBJECT_NULL;
549 vm_object_template.shadow = VM_OBJECT_NULL;
550 vm_object_template.shadow_offset = (vm_object_offset_t) 0;
551 vm_object_template.pager = MEMORY_OBJECT_NULL;
552 vm_object_template.paging_offset = 0;
553 vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL;
554 vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC;
555 vm_object_template.paging_in_progress = 0;
556 vm_object_template.activity_in_progress = 0;
557
558 /* Begin bitfields */
559 vm_object_template.all_wanted = 0; /* all bits FALSE */
560 vm_object_template.pager_created = FALSE;
561 vm_object_template.pager_initialized = FALSE;
562 vm_object_template.pager_ready = FALSE;
563 vm_object_template.pager_trusted = FALSE;
564 vm_object_template.can_persist = FALSE;
565 vm_object_template.internal = TRUE;
566 vm_object_template.temporary = TRUE;
567 vm_object_template.private = FALSE;
568 vm_object_template.pageout = FALSE;
569 vm_object_template.alive = TRUE;
570 vm_object_template.purgable = VM_PURGABLE_DENY;
571 vm_object_template.shadowed = FALSE;
572 vm_object_template.silent_overwrite = FALSE;
573 vm_object_template.advisory_pageout = FALSE;
574 vm_object_template.true_share = FALSE;
575 vm_object_template.terminating = FALSE;
576 vm_object_template.named = FALSE;
577 vm_object_template.shadow_severed = FALSE;
578 vm_object_template.phys_contiguous = FALSE;
579 vm_object_template.nophyscache = FALSE;
580 /* End bitfields */
581
582 vm_object_template.cached_list.prev = NULL;
583 vm_object_template.cached_list.next = NULL;
584 vm_object_template.msr_q.prev = NULL;
585 vm_object_template.msr_q.next = NULL;
586
587 vm_object_template.last_alloc = (vm_object_offset_t) 0;
588 vm_object_template.sequential = (vm_object_offset_t) 0;
589 vm_object_template.pages_created = 0;
590 vm_object_template.pages_used = 0;
591
592 #if MACH_PAGEMAP
593 vm_object_template.existence_map = VM_EXTERNAL_NULL;
594 #endif /* MACH_PAGEMAP */
595 vm_object_template.cow_hint = ~(vm_offset_t)0;
596 #if MACH_ASSERT
597 vm_object_template.paging_object = VM_OBJECT_NULL;
598 #endif /* MACH_ASSERT */
599
600 /* cache bitfields */
601 vm_object_template.wimg_bits = VM_WIMG_DEFAULT;
602 vm_object_template.code_signed = FALSE;
603 vm_object_template.hashed = FALSE;
604 vm_object_template.transposed = FALSE;
605 vm_object_template.mapping_in_progress = FALSE;
606 vm_object_template.volatile_empty = FALSE;
607 vm_object_template.volatile_fault = FALSE;
608 vm_object_template.all_reusable = FALSE;
609 vm_object_template.blocked_access = FALSE;
610 vm_object_template.__object2_unused_bits = 0;
611 #if UPL_DEBUG
612 vm_object_template.uplq.prev = NULL;
613 vm_object_template.uplq.next = NULL;
614 #endif /* UPL_DEBUG */
615 #ifdef VM_PIP_DEBUG
616 bzero(&vm_object_template.pip_holders,
617 sizeof (vm_object_template.pip_holders));
618 #endif /* VM_PIP_DEBUG */
619
620 vm_object_template.objq.next=NULL;
621 vm_object_template.objq.prev=NULL;
622
623
624 /*
625 * Initialize the "kernel object"
626 */
627
628 kernel_object = &kernel_object_store;
629
630 /*
631 * Note that in the following size specifications, we need to add 1 because
632 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
633 */
634
635 #ifdef ppc
636 _vm_object_allocate(vm_last_addr + 1,
637 kernel_object);
638 #else
639 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
640 kernel_object);
641 #endif
642 kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
643
644 /*
645 * Initialize the "submap object". Make it as large as the
646 * kernel object so that no limit is imposed on submap sizes.
647 */
648
649 vm_submap_object = &vm_submap_object_store;
650 #ifdef ppc
651 _vm_object_allocate(vm_last_addr + 1,
652 vm_submap_object);
653 #else
654 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1,
655 vm_submap_object);
656 #endif
657 vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
658
659 /*
660 * Create an "extra" reference to this object so that we never
661 * try to deallocate it; zfree doesn't like to be called with
662 * non-zone memory.
663 */
664 vm_object_reference(vm_submap_object);
665
666 #if MACH_PAGEMAP
667 vm_external_module_initialize();
668 #endif /* MACH_PAGEMAP */
669 }
670
671 void
672 vm_object_reaper_init(void)
673 {
674 kern_return_t kr;
675 thread_t thread;
676
677 kr = kernel_thread_start_priority(
678 (thread_continue_t) vm_object_reaper_thread,
679 NULL,
680 BASEPRI_PREEMPT - 1,
681 &thread);
682 if (kr != KERN_SUCCESS) {
683 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
684 }
685 thread_deallocate(thread);
686 }
687
688 __private_extern__ void
689 vm_object_init(void)
690 {
691 /*
692 * Finish initializing the kernel object.
693 */
694 }
695
696
697 __private_extern__ void
698 vm_object_init_lck_grp(void)
699 {
700 /*
701 * initialze the vm_object lock world
702 */
703 lck_grp_attr_setdefault(&vm_object_lck_grp_attr);
704 lck_grp_init(&vm_object_lck_grp, "vm_object", &vm_object_lck_grp_attr);
705 lck_attr_setdefault(&vm_object_lck_attr);
706 lck_attr_setdefault(&kernel_object_lck_attr);
707 lck_attr_cleardebug(&kernel_object_lck_attr);
708 }
709
710 #if VM_OBJECT_CACHE
711 #define MIGHT_NOT_CACHE_SHADOWS 1
712 #if MIGHT_NOT_CACHE_SHADOWS
713 static int cache_shadows = TRUE;
714 #endif /* MIGHT_NOT_CACHE_SHADOWS */
715 #endif
716
717 /*
718 * vm_object_deallocate:
719 *
720 * Release a reference to the specified object,
721 * gained either through a vm_object_allocate
722 * or a vm_object_reference call. When all references
723 * are gone, storage associated with this object
724 * may be relinquished.
725 *
726 * No object may be locked.
727 */
728 unsigned long vm_object_deallocate_shared_successes = 0;
729 unsigned long vm_object_deallocate_shared_failures = 0;
730 unsigned long vm_object_deallocate_shared_swap_failures = 0;
731 __private_extern__ void
732 vm_object_deallocate(
733 register vm_object_t object)
734 {
735 #if VM_OBJECT_CACHE
736 boolean_t retry_cache_trim = FALSE;
737 uint32_t try_failed_count = 0;
738 #endif
739 vm_object_t shadow = VM_OBJECT_NULL;
740
741 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
742 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
743
744 if (object == VM_OBJECT_NULL)
745 return;
746
747 if (object == kernel_object) {
748 vm_object_lock_shared(object);
749
750 OSAddAtomic(-1, &object->ref_count);
751
752 if (object->ref_count == 0) {
753 panic("vm_object_deallocate: losing kernel_object\n");
754 }
755 vm_object_unlock(object);
756 return;
757 }
758
759 if (object->ref_count > 2 ||
760 (!object->named && object->ref_count > 1)) {
761 UInt32 original_ref_count;
762 volatile UInt32 *ref_count_p;
763 Boolean atomic_swap;
764
765 /*
766 * The object currently looks like it is not being
767 * kept alive solely by the reference we're about to release.
768 * Let's try and release our reference without taking
769 * all the locks we would need if we had to terminate the
770 * object (cache lock + exclusive object lock).
771 * Lock the object "shared" to make sure we don't race with
772 * anyone holding it "exclusive".
773 */
774 vm_object_lock_shared(object);
775 ref_count_p = (volatile UInt32 *) &object->ref_count;
776 original_ref_count = object->ref_count;
777 /*
778 * Test again as "ref_count" could have changed.
779 * "named" shouldn't change.
780 */
781 if (original_ref_count > 2 ||
782 (!object->named && original_ref_count > 1)) {
783 atomic_swap = OSCompareAndSwap(
784 original_ref_count,
785 original_ref_count - 1,
786 (UInt32 *) &object->ref_count);
787 if (atomic_swap == FALSE) {
788 vm_object_deallocate_shared_swap_failures++;
789 }
790
791 } else {
792 atomic_swap = FALSE;
793 }
794 vm_object_unlock(object);
795
796 if (atomic_swap) {
797 /*
798 * ref_count was updated atomically !
799 */
800 vm_object_deallocate_shared_successes++;
801 return;
802 }
803
804 /*
805 * Someone else updated the ref_count at the same
806 * time and we lost the race. Fall back to the usual
807 * slow but safe path...
808 */
809 vm_object_deallocate_shared_failures++;
810 }
811
812 while (object != VM_OBJECT_NULL) {
813
814 vm_object_lock(object);
815
816 assert(object->ref_count > 0);
817
818 /*
819 * If the object has a named reference, and only
820 * that reference would remain, inform the pager
821 * about the last "mapping" reference going away.
822 */
823 if ((object->ref_count == 2) && (object->named)) {
824 memory_object_t pager = object->pager;
825
826 /* Notify the Pager that there are no */
827 /* more mappers for this object */
828
829 if (pager != MEMORY_OBJECT_NULL) {
830 vm_object_mapping_wait(object, THREAD_UNINT);
831 vm_object_mapping_begin(object);
832 vm_object_unlock(object);
833
834 memory_object_last_unmap(pager);
835
836 vm_object_lock(object);
837 vm_object_mapping_end(object);
838 }
839 /*
840 * recheck the ref_count since we dropped the object lock
841 * to call 'memory_object_last_unmap'... it's possible
842 * additional references got taken and we only want
843 * to deactivate the pages if this 'named' object will only
844 * referenced by the backing pager once we drop our reference
845 * below
846 */
847 if (!object->terminating && object->ref_count == 2)
848 vm_object_deactivate_all_pages(object);
849
850 assert(object->ref_count > 0);
851 }
852
853 /*
854 * Lose the reference. If other references
855 * remain, then we are done, unless we need
856 * to retry a cache trim.
857 * If it is the last reference, then keep it
858 * until any pending initialization is completed.
859 */
860
861 /* if the object is terminating, it cannot go into */
862 /* the cache and we obviously should not call */
863 /* terminate again. */
864
865 if ((object->ref_count > 1) || object->terminating) {
866 vm_object_lock_assert_exclusive(object);
867 object->ref_count--;
868 vm_object_res_deallocate(object);
869
870 if (object->ref_count == 1 &&
871 object->shadow != VM_OBJECT_NULL) {
872 /*
873 * There's only one reference left on this
874 * VM object. We can't tell if it's a valid
875 * one (from a mapping for example) or if this
876 * object is just part of a possibly stale and
877 * useless shadow chain.
878 * We would like to try and collapse it into
879 * its parent, but we don't have any pointers
880 * back to this parent object.
881 * But we can try and collapse this object with
882 * its own shadows, in case these are useless
883 * too...
884 * We can't bypass this object though, since we
885 * don't know if this last reference on it is
886 * meaningful or not.
887 */
888 vm_object_collapse(object, 0, FALSE);
889 }
890 vm_object_unlock(object);
891 #if VM_OBJECT_CACHE
892 if (retry_cache_trim &&
893 ((object = vm_object_cache_trim(TRUE)) !=
894 VM_OBJECT_NULL)) {
895 continue;
896 }
897 #endif
898 return;
899 }
900
901 /*
902 * We have to wait for initialization
903 * before destroying or caching the object.
904 */
905
906 if (object->pager_created && ! object->pager_initialized) {
907 assert(! object->can_persist);
908 vm_object_assert_wait(object,
909 VM_OBJECT_EVENT_INITIALIZED,
910 THREAD_UNINT);
911 vm_object_unlock(object);
912
913 thread_block(THREAD_CONTINUE_NULL);
914 continue;
915 }
916
917 #if VM_OBJECT_CACHE
918 /*
919 * If this object can persist, then enter it in
920 * the cache. Otherwise, terminate it.
921 *
922 * NOTE: Only permanent objects are cached, and
923 * permanent objects cannot have shadows. This
924 * affects the residence counting logic in a minor
925 * way (can do it in-line, mostly).
926 */
927
928 if ((object->can_persist) && (object->alive)) {
929 /*
930 * Now it is safe to decrement reference count,
931 * and to return if reference count is > 0.
932 */
933
934 vm_object_lock_assert_exclusive(object);
935 if (--object->ref_count > 0) {
936 vm_object_res_deallocate(object);
937 vm_object_unlock(object);
938
939 if (retry_cache_trim &&
940 ((object = vm_object_cache_trim(TRUE)) !=
941 VM_OBJECT_NULL)) {
942 continue;
943 }
944 return;
945 }
946
947 #if MIGHT_NOT_CACHE_SHADOWS
948 /*
949 * Remove shadow now if we don't
950 * want to cache shadows.
951 */
952 if (! cache_shadows) {
953 shadow = object->shadow;
954 object->shadow = VM_OBJECT_NULL;
955 }
956 #endif /* MIGHT_NOT_CACHE_SHADOWS */
957
958 /*
959 * Enter the object onto the queue of
960 * cached objects, and deactivate
961 * all of its pages.
962 */
963 assert(object->shadow == VM_OBJECT_NULL);
964 VM_OBJ_RES_DECR(object);
965 XPR(XPR_VM_OBJECT,
966 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
967 object,
968 vm_object_cached_list.next,
969 vm_object_cached_list.prev,0,0);
970
971
972 vm_object_unlock(object);
973
974 try_failed_count = 0;
975 for (;;) {
976 vm_object_cache_lock();
977
978 /*
979 * if we try to take a regular lock here
980 * we risk deadlocking against someone
981 * holding a lock on this object while
982 * trying to vm_object_deallocate a different
983 * object
984 */
985 if (vm_object_lock_try(object))
986 break;
987 vm_object_cache_unlock();
988 try_failed_count++;
989
990 mutex_pause(try_failed_count); /* wait a bit */
991 }
992 vm_object_cached_count++;
993 if (vm_object_cached_count > vm_object_cached_high)
994 vm_object_cached_high = vm_object_cached_count;
995 queue_enter(&vm_object_cached_list, object,
996 vm_object_t, cached_list);
997 vm_object_cache_unlock();
998
999 vm_object_deactivate_all_pages(object);
1000 vm_object_unlock(object);
1001
1002 #if MIGHT_NOT_CACHE_SHADOWS
1003 /*
1004 * If we have a shadow that we need
1005 * to deallocate, do so now, remembering
1006 * to trim the cache later.
1007 */
1008 if (! cache_shadows && shadow != VM_OBJECT_NULL) {
1009 object = shadow;
1010 retry_cache_trim = TRUE;
1011 continue;
1012 }
1013 #endif /* MIGHT_NOT_CACHE_SHADOWS */
1014
1015 /*
1016 * Trim the cache. If the cache trim
1017 * returns with a shadow for us to deallocate,
1018 * then remember to retry the cache trim
1019 * when we are done deallocating the shadow.
1020 * Otherwise, we are done.
1021 */
1022
1023 object = vm_object_cache_trim(TRUE);
1024 if (object == VM_OBJECT_NULL) {
1025 return;
1026 }
1027 retry_cache_trim = TRUE;
1028 } else
1029 #endif /* VM_OBJECT_CACHE */
1030 {
1031 /*
1032 * This object is not cachable; terminate it.
1033 */
1034 XPR(XPR_VM_OBJECT,
1035 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
1036 object, object->resident_page_count,
1037 object->paging_in_progress,
1038 (void *)current_thread(),object->ref_count);
1039
1040 VM_OBJ_RES_DECR(object); /* XXX ? */
1041 /*
1042 * Terminate this object. If it had a shadow,
1043 * then deallocate it; otherwise, if we need
1044 * to retry a cache trim, do so now; otherwise,
1045 * we are done. "pageout" objects have a shadow,
1046 * but maintain a "paging reference" rather than
1047 * a normal reference.
1048 */
1049 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
1050
1051 if (vm_object_terminate(object) != KERN_SUCCESS) {
1052 return;
1053 }
1054 if (shadow != VM_OBJECT_NULL) {
1055 object = shadow;
1056 continue;
1057 }
1058 #if VM_OBJECT_CACHE
1059 if (retry_cache_trim &&
1060 ((object = vm_object_cache_trim(TRUE)) !=
1061 VM_OBJECT_NULL)) {
1062 continue;
1063 }
1064 #endif
1065 return;
1066 }
1067 }
1068 #if VM_OBJECT_CACHE
1069 assert(! retry_cache_trim);
1070 #endif
1071 }
1072
1073
1074 #if VM_OBJECT_CACHE
1075 /*
1076 * Check to see whether we really need to trim
1077 * down the cache. If so, remove an object from
1078 * the cache, terminate it, and repeat.
1079 *
1080 * Called with, and returns with, cache lock unlocked.
1081 */
1082 vm_object_t
1083 vm_object_cache_trim(
1084 boolean_t called_from_vm_object_deallocate)
1085 {
1086 register vm_object_t object = VM_OBJECT_NULL;
1087 vm_object_t shadow;
1088
1089 for (;;) {
1090
1091 /*
1092 * If we no longer need to trim the cache,
1093 * then we are done.
1094 */
1095 if (vm_object_cached_count <= vm_object_cached_max)
1096 return VM_OBJECT_NULL;
1097
1098 vm_object_cache_lock();
1099 if (vm_object_cached_count <= vm_object_cached_max) {
1100 vm_object_cache_unlock();
1101 return VM_OBJECT_NULL;
1102 }
1103
1104 /*
1105 * We must trim down the cache, so remove
1106 * the first object in the cache.
1107 */
1108 XPR(XPR_VM_OBJECT,
1109 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
1110 vm_object_cached_list.next,
1111 vm_object_cached_list.prev, 0, 0, 0);
1112
1113 object = (vm_object_t) queue_first(&vm_object_cached_list);
1114 if(object == (vm_object_t) &vm_object_cached_list) {
1115 /* something's wrong with the calling parameter or */
1116 /* the value of vm_object_cached_count, just fix */
1117 /* and return */
1118 if(vm_object_cached_max < 0)
1119 vm_object_cached_max = 0;
1120 vm_object_cached_count = 0;
1121 vm_object_cache_unlock();
1122 return VM_OBJECT_NULL;
1123 }
1124 vm_object_lock(object);
1125 queue_remove(&vm_object_cached_list, object, vm_object_t,
1126 cached_list);
1127 vm_object_cached_count--;
1128
1129 vm_object_cache_unlock();
1130 /*
1131 * Since this object is in the cache, we know
1132 * that it is initialized and has no references.
1133 * Take a reference to avoid recursive deallocations.
1134 */
1135
1136 assert(object->pager_initialized);
1137 assert(object->ref_count == 0);
1138 vm_object_lock_assert_exclusive(object);
1139 object->ref_count++;
1140
1141 /*
1142 * Terminate the object.
1143 * If the object had a shadow, we let vm_object_deallocate
1144 * deallocate it. "pageout" objects have a shadow, but
1145 * maintain a "paging reference" rather than a normal
1146 * reference.
1147 * (We are careful here to limit recursion.)
1148 */
1149 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
1150
1151 if(vm_object_terminate(object) != KERN_SUCCESS)
1152 continue;
1153
1154 if (shadow != VM_OBJECT_NULL) {
1155 if (called_from_vm_object_deallocate) {
1156 return shadow;
1157 } else {
1158 vm_object_deallocate(shadow);
1159 }
1160 }
1161 }
1162 }
1163 #endif
1164
1165
1166 /*
1167 * Routine: vm_object_terminate
1168 * Purpose:
1169 * Free all resources associated with a vm_object.
1170 * In/out conditions:
1171 * Upon entry, the object must be locked,
1172 * and the object must have exactly one reference.
1173 *
1174 * The shadow object reference is left alone.
1175 *
1176 * The object must be unlocked if its found that pages
1177 * must be flushed to a backing object. If someone
1178 * manages to map the object while it is being flushed
1179 * the object is returned unlocked and unchanged. Otherwise,
1180 * upon exit, the cache will be unlocked, and the
1181 * object will cease to exist.
1182 */
1183 static kern_return_t
1184 vm_object_terminate(
1185 vm_object_t object)
1186 {
1187 vm_object_t shadow_object;
1188
1189 XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
1190 object, object->ref_count, 0, 0, 0);
1191
1192 if (!object->pageout && (!object->temporary || object->can_persist) &&
1193 (object->pager != NULL || object->shadow_severed)) {
1194 /*
1195 * Clear pager_trusted bit so that the pages get yanked
1196 * out of the object instead of cleaned in place. This
1197 * prevents a deadlock in XMM and makes more sense anyway.
1198 */
1199 object->pager_trusted = FALSE;
1200
1201 vm_object_reap_pages(object, REAP_TERMINATE);
1202 }
1203 /*
1204 * Make sure the object isn't already being terminated
1205 */
1206 if (object->terminating) {
1207 vm_object_lock_assert_exclusive(object);
1208 object->ref_count--;
1209 assert(object->ref_count > 0);
1210 vm_object_unlock(object);
1211 return KERN_FAILURE;
1212 }
1213
1214 /*
1215 * Did somebody get a reference to the object while we were
1216 * cleaning it?
1217 */
1218 if (object->ref_count != 1) {
1219 vm_object_lock_assert_exclusive(object);
1220 object->ref_count--;
1221 assert(object->ref_count > 0);
1222 vm_object_res_deallocate(object);
1223 vm_object_unlock(object);
1224 return KERN_FAILURE;
1225 }
1226
1227 /*
1228 * Make sure no one can look us up now.
1229 */
1230
1231 object->terminating = TRUE;
1232 object->alive = FALSE;
1233
1234 if (object->hashed) {
1235 lck_mtx_t *lck;
1236
1237 lck = vm_object_hash_lock_spin(object->pager);
1238 vm_object_remove(object);
1239 vm_object_hash_unlock(lck);
1240 }
1241 /*
1242 * Detach the object from its shadow if we are the shadow's
1243 * copy. The reference we hold on the shadow must be dropped
1244 * by our caller.
1245 */
1246 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1247 !(object->pageout)) {
1248 vm_object_lock(shadow_object);
1249 if (shadow_object->copy == object)
1250 shadow_object->copy = VM_OBJECT_NULL;
1251 vm_object_unlock(shadow_object);
1252 }
1253
1254 if (object->paging_in_progress != 0 ||
1255 object->activity_in_progress != 0) {
1256 /*
1257 * There are still some paging_in_progress references
1258 * on this object, meaning that there are some paging
1259 * or other I/O operations in progress for this VM object.
1260 * Such operations take some paging_in_progress references
1261 * up front to ensure that the object doesn't go away, but
1262 * they may also need to acquire a reference on the VM object,
1263 * to map it in kernel space, for example. That means that
1264 * they may end up releasing the last reference on the VM
1265 * object, triggering its termination, while still holding
1266 * paging_in_progress references. Waiting for these
1267 * pending paging_in_progress references to go away here would
1268 * deadlock.
1269 *
1270 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1271 * complete the VM object termination if it still holds
1272 * paging_in_progress references at this point.
1273 *
1274 * No new paging_in_progress should appear now that the
1275 * VM object is "terminating" and not "alive".
1276 */
1277 vm_object_reap_async(object);
1278 vm_object_unlock(object);
1279 /*
1280 * Return KERN_FAILURE to let the caller know that we
1281 * haven't completed the termination and it can't drop this
1282 * object's reference on its shadow object yet.
1283 * The reaper thread will take care of that once it has
1284 * completed this object's termination.
1285 */
1286 return KERN_FAILURE;
1287 }
1288 /*
1289 * complete the VM object termination
1290 */
1291 vm_object_reap(object);
1292 object = VM_OBJECT_NULL;
1293
1294 /*
1295 * the object lock was released by vm_object_reap()
1296 *
1297 * KERN_SUCCESS means that this object has been terminated
1298 * and no longer needs its shadow object but still holds a
1299 * reference on it.
1300 * The caller is responsible for dropping that reference.
1301 * We can't call vm_object_deallocate() here because that
1302 * would create a recursion.
1303 */
1304 return KERN_SUCCESS;
1305 }
1306
1307
1308 /*
1309 * vm_object_reap():
1310 *
1311 * Complete the termination of a VM object after it's been marked
1312 * as "terminating" and "!alive" by vm_object_terminate().
1313 *
1314 * The VM object must be locked by caller.
1315 * The lock will be released on return and the VM object is no longer valid.
1316 */
1317 void
1318 vm_object_reap(
1319 vm_object_t object)
1320 {
1321 memory_object_t pager;
1322
1323 vm_object_lock_assert_exclusive(object);
1324 assert(object->paging_in_progress == 0);
1325 assert(object->activity_in_progress == 0);
1326
1327 vm_object_reap_count++;
1328
1329 pager = object->pager;
1330 object->pager = MEMORY_OBJECT_NULL;
1331
1332 if (pager != MEMORY_OBJECT_NULL)
1333 memory_object_control_disable(object->pager_control);
1334
1335 object->ref_count--;
1336 #if TASK_SWAPPER
1337 assert(object->res_count == 0);
1338 #endif /* TASK_SWAPPER */
1339
1340 assert (object->ref_count == 0);
1341
1342 /*
1343 * remove from purgeable queue if it's on
1344 */
1345 if (object->objq.next || object->objq.prev) {
1346 purgeable_q_t queue = vm_purgeable_object_remove(object);
1347 assert(queue);
1348
1349 /* Must take page lock for this - using it to protect token queue */
1350 vm_page_lock_queues();
1351 vm_purgeable_token_delete_first(queue);
1352
1353 assert(queue->debug_count_objects>=0);
1354 vm_page_unlock_queues();
1355 }
1356
1357 /*
1358 * Clean or free the pages, as appropriate.
1359 * It is possible for us to find busy/absent pages,
1360 * if some faults on this object were aborted.
1361 */
1362 if (object->pageout) {
1363 assert(object->shadow != VM_OBJECT_NULL);
1364
1365 vm_pageout_object_terminate(object);
1366
1367 } else if (((object->temporary && !object->can_persist) || (pager == MEMORY_OBJECT_NULL))) {
1368
1369 vm_object_reap_pages(object, REAP_REAP);
1370 }
1371 assert(queue_empty(&object->memq));
1372 assert(object->paging_in_progress == 0);
1373 assert(object->activity_in_progress == 0);
1374 assert(object->ref_count == 0);
1375
1376 /*
1377 * If the pager has not already been released by
1378 * vm_object_destroy, we need to terminate it and
1379 * release our reference to it here.
1380 */
1381 if (pager != MEMORY_OBJECT_NULL) {
1382 vm_object_unlock(object);
1383 vm_object_release_pager(pager, object->hashed);
1384 vm_object_lock(object);
1385 }
1386
1387 /* kick off anyone waiting on terminating */
1388 object->terminating = FALSE;
1389 vm_object_paging_begin(object);
1390 vm_object_paging_end(object);
1391 vm_object_unlock(object);
1392
1393 #if MACH_PAGEMAP
1394 vm_external_destroy(object->existence_map, object->size);
1395 #endif /* MACH_PAGEMAP */
1396
1397 object->shadow = VM_OBJECT_NULL;
1398
1399 vm_object_lock_destroy(object);
1400 /*
1401 * Free the space for the object.
1402 */
1403 zfree(vm_object_zone, object);
1404 object = VM_OBJECT_NULL;
1405 }
1406
1407
1408
1409 #define V_O_R_MAX_BATCH 128
1410
1411
1412 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
1413 MACRO_BEGIN \
1414 if (_local_free_q) { \
1415 if (do_disconnect) { \
1416 vm_page_t m; \
1417 for (m = _local_free_q; \
1418 m != VM_PAGE_NULL; \
1419 m = (vm_page_t) m->pageq.next) { \
1420 if (m->pmapped) { \
1421 pmap_disconnect(m->phys_page); \
1422 } \
1423 } \
1424 } \
1425 vm_page_free_list(_local_free_q, TRUE); \
1426 _local_free_q = VM_PAGE_NULL; \
1427 } \
1428 MACRO_END
1429
1430
1431 void
1432 vm_object_reap_pages(
1433 vm_object_t object,
1434 int reap_type)
1435 {
1436 vm_page_t p;
1437 vm_page_t next;
1438 vm_page_t local_free_q = VM_PAGE_NULL;
1439 int loop_count;
1440 boolean_t disconnect_on_release;
1441
1442 if (reap_type == REAP_DATA_FLUSH) {
1443 /*
1444 * We need to disconnect pages from all pmaps before
1445 * releasing them to the free list
1446 */
1447 disconnect_on_release = TRUE;
1448 } else {
1449 /*
1450 * Either the caller has already disconnected the pages
1451 * from all pmaps, or we disconnect them here as we add
1452 * them to out local list of pages to be released.
1453 * No need to re-disconnect them when we release the pages
1454 * to the free list.
1455 */
1456 disconnect_on_release = FALSE;
1457 }
1458
1459 restart_after_sleep:
1460 if (queue_empty(&object->memq))
1461 return;
1462 loop_count = V_O_R_MAX_BATCH + 1;
1463
1464 vm_page_lockspin_queues();
1465
1466 next = (vm_page_t)queue_first(&object->memq);
1467
1468 while (!queue_end(&object->memq, (queue_entry_t)next)) {
1469
1470 p = next;
1471 next = (vm_page_t)queue_next(&next->listq);
1472
1473 if (--loop_count == 0) {
1474
1475 vm_page_unlock_queues();
1476
1477 if (local_free_q) {
1478 /*
1479 * Free the pages we reclaimed so far
1480 * and take a little break to avoid
1481 * hogging the page queue lock too long
1482 */
1483 VM_OBJ_REAP_FREELIST(local_free_q,
1484 disconnect_on_release);
1485 } else
1486 mutex_pause(0);
1487
1488 loop_count = V_O_R_MAX_BATCH + 1;
1489
1490 vm_page_lockspin_queues();
1491 }
1492 if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
1493
1494 if (reap_type == REAP_DATA_FLUSH && (p->pageout == TRUE && p->list_req_pending == TRUE)) {
1495 p->list_req_pending = FALSE;
1496 p->cleaning = FALSE;
1497 p->pageout = FALSE;
1498 /*
1499 * need to drop the laundry count...
1500 * we may also need to remove it
1501 * from the I/O paging queue...
1502 * vm_pageout_throttle_up handles both cases
1503 *
1504 * the laundry and pageout_queue flags are cleared...
1505 */
1506 vm_pageout_throttle_up(p);
1507
1508 /*
1509 * toss the wire count we picked up
1510 * when we intially set this page up
1511 * to be cleaned...
1512 */
1513 vm_page_unwire(p);
1514 PAGE_WAKEUP(p);
1515
1516 } else if (p->busy || p->cleaning) {
1517
1518 vm_page_unlock_queues();
1519 /*
1520 * free the pages reclaimed so far
1521 */
1522 VM_OBJ_REAP_FREELIST(local_free_q,
1523 disconnect_on_release);
1524
1525 PAGE_SLEEP(object, p, THREAD_UNINT);
1526
1527 goto restart_after_sleep;
1528 }
1529 }
1530 switch (reap_type) {
1531
1532 case REAP_DATA_FLUSH:
1533 if (VM_PAGE_WIRED(p)) {
1534 /*
1535 * this is an odd case... perhaps we should
1536 * zero-fill this page since we're conceptually
1537 * tossing its data at this point, but leaving
1538 * it on the object to honor the 'wire' contract
1539 */
1540 continue;
1541 }
1542 break;
1543
1544 case REAP_PURGEABLE:
1545 if (VM_PAGE_WIRED(p)) {
1546 /* can't purge a wired page */
1547 vm_page_purged_wired++;
1548 continue;
1549 }
1550
1551 if (p->busy) {
1552 /*
1553 * We can't reclaim a busy page but we can
1554 * make it pageable (it's not wired) to make
1555 * sure that it gets considered by
1556 * vm_pageout_scan() later.
1557 */
1558 vm_page_deactivate(p);
1559 vm_page_purged_busy++;
1560 continue;
1561 }
1562
1563 if (p->cleaning || p->laundry || p->list_req_pending) {
1564 /*
1565 * page is being acted upon,
1566 * so don't mess with it
1567 */
1568 vm_page_purged_others++;
1569 continue;
1570 }
1571 assert(p->object != kernel_object);
1572
1573 /*
1574 * we can discard this page...
1575 */
1576 if (p->pmapped == TRUE) {
1577 int refmod_state;
1578 /*
1579 * unmap the page
1580 */
1581 refmod_state = pmap_disconnect(p->phys_page);
1582 if (refmod_state & VM_MEM_MODIFIED) {
1583 p->dirty = TRUE;
1584 }
1585 }
1586 if (p->dirty || p->precious) {
1587 /*
1588 * we saved the cost of cleaning this page !
1589 */
1590 vm_page_purged_count++;
1591 }
1592
1593 break;
1594
1595 case REAP_TERMINATE:
1596 if (p->absent || p->private) {
1597 /*
1598 * For private pages, VM_PAGE_FREE just
1599 * leaves the page structure around for
1600 * its owner to clean up. For absent
1601 * pages, the structure is returned to
1602 * the appropriate pool.
1603 */
1604 break;
1605 }
1606 if (p->fictitious) {
1607 assert (p->phys_page == vm_page_guard_addr);
1608 break;
1609 }
1610 if (!p->dirty && p->wpmapped)
1611 p->dirty = pmap_is_modified(p->phys_page);
1612
1613 if ((p->dirty || p->precious) && !p->error && object->alive) {
1614
1615 p->busy = TRUE;
1616
1617 VM_PAGE_QUEUES_REMOVE(p);
1618
1619 vm_page_unlock_queues();
1620 /*
1621 * free the pages reclaimed so far
1622 */
1623 VM_OBJ_REAP_FREELIST(local_free_q,
1624 disconnect_on_release);
1625
1626 /*
1627 * flush page... page will be freed
1628 * upon completion of I/O
1629 */
1630 vm_pageout_cluster(p);
1631 vm_object_paging_wait(object, THREAD_UNINT);
1632
1633 goto restart_after_sleep;
1634 }
1635 break;
1636
1637 case REAP_REAP:
1638 break;
1639 }
1640 vm_page_free_prepare_queues(p);
1641 assert(p->pageq.next == NULL && p->pageq.prev == NULL);
1642 /*
1643 * Add this page to our list of reclaimed pages,
1644 * to be freed later.
1645 */
1646 p->pageq.next = (queue_entry_t) local_free_q;
1647 local_free_q = p;
1648 }
1649 vm_page_unlock_queues();
1650
1651 /*
1652 * Free the remaining reclaimed pages
1653 */
1654 VM_OBJ_REAP_FREELIST(local_free_q,
1655 disconnect_on_release);
1656 }
1657
1658
1659 void
1660 vm_object_reap_async(
1661 vm_object_t object)
1662 {
1663 vm_object_lock_assert_exclusive(object);
1664
1665 vm_object_reaper_lock_spin();
1666
1667 vm_object_reap_count_async++;
1668
1669 /* enqueue the VM object... */
1670 queue_enter(&vm_object_reaper_queue, object,
1671 vm_object_t, cached_list);
1672
1673 vm_object_reaper_unlock();
1674
1675 /* ... and wake up the reaper thread */
1676 thread_wakeup((event_t) &vm_object_reaper_queue);
1677 }
1678
1679
1680 void
1681 vm_object_reaper_thread(void)
1682 {
1683 vm_object_t object, shadow_object;
1684
1685 vm_object_reaper_lock_spin();
1686
1687 while (!queue_empty(&vm_object_reaper_queue)) {
1688 queue_remove_first(&vm_object_reaper_queue,
1689 object,
1690 vm_object_t,
1691 cached_list);
1692
1693 vm_object_reaper_unlock();
1694 vm_object_lock(object);
1695
1696 assert(object->terminating);
1697 assert(!object->alive);
1698
1699 /*
1700 * The pageout daemon might be playing with our pages.
1701 * Now that the object is dead, it won't touch any more
1702 * pages, but some pages might already be on their way out.
1703 * Hence, we wait until the active paging activities have
1704 * ceased before we break the association with the pager
1705 * itself.
1706 */
1707 while (object->paging_in_progress != 0 ||
1708 object->activity_in_progress != 0) {
1709 vm_object_wait(object,
1710 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1711 THREAD_UNINT);
1712 vm_object_lock(object);
1713 }
1714
1715 shadow_object =
1716 object->pageout ? VM_OBJECT_NULL : object->shadow;
1717
1718 vm_object_reap(object);
1719 /* cache is unlocked and object is no longer valid */
1720 object = VM_OBJECT_NULL;
1721
1722 if (shadow_object != VM_OBJECT_NULL) {
1723 /*
1724 * Drop the reference "object" was holding on
1725 * its shadow object.
1726 */
1727 vm_object_deallocate(shadow_object);
1728 shadow_object = VM_OBJECT_NULL;
1729 }
1730 vm_object_reaper_lock_spin();
1731 }
1732
1733 /* wait for more work... */
1734 assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
1735
1736 vm_object_reaper_unlock();
1737
1738 thread_block((thread_continue_t) vm_object_reaper_thread);
1739 /*NOTREACHED*/
1740 }
1741
1742 /*
1743 * Routine: vm_object_pager_wakeup
1744 * Purpose: Wake up anyone waiting for termination of a pager.
1745 */
1746
1747 static void
1748 vm_object_pager_wakeup(
1749 memory_object_t pager)
1750 {
1751 vm_object_hash_entry_t entry;
1752 boolean_t waiting = FALSE;
1753 lck_mtx_t *lck;
1754
1755 /*
1756 * If anyone was waiting for the memory_object_terminate
1757 * to be queued, wake them up now.
1758 */
1759 lck = vm_object_hash_lock_spin(pager);
1760 entry = vm_object_hash_lookup(pager, TRUE);
1761 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
1762 waiting = entry->waiting;
1763 vm_object_hash_unlock(lck);
1764
1765 if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
1766 if (waiting)
1767 thread_wakeup((event_t) pager);
1768 vm_object_hash_entry_free(entry);
1769 }
1770 }
1771
1772 /*
1773 * Routine: vm_object_release_pager
1774 * Purpose: Terminate the pager and, upon completion,
1775 * release our last reference to it.
1776 * just like memory_object_terminate, except
1777 * that we wake up anyone blocked in vm_object_enter
1778 * waiting for termination message to be queued
1779 * before calling memory_object_init.
1780 */
1781 static void
1782 vm_object_release_pager(
1783 memory_object_t pager,
1784 boolean_t hashed)
1785 {
1786
1787 /*
1788 * Terminate the pager.
1789 */
1790
1791 (void) memory_object_terminate(pager);
1792
1793 if (hashed == TRUE) {
1794 /*
1795 * Wakeup anyone waiting for this terminate
1796 * and remove the entry from the hash
1797 */
1798 vm_object_pager_wakeup(pager);
1799 }
1800 /*
1801 * Release reference to pager.
1802 */
1803 memory_object_deallocate(pager);
1804 }
1805
1806 /*
1807 * Routine: vm_object_destroy
1808 * Purpose:
1809 * Shut down a VM object, despite the
1810 * presence of address map (or other) references
1811 * to the vm_object.
1812 */
1813 kern_return_t
1814 vm_object_destroy(
1815 vm_object_t object,
1816 __unused kern_return_t reason)
1817 {
1818 memory_object_t old_pager;
1819
1820 if (object == VM_OBJECT_NULL)
1821 return(KERN_SUCCESS);
1822
1823 /*
1824 * Remove the pager association immediately.
1825 *
1826 * This will prevent the memory manager from further
1827 * meddling. [If it wanted to flush data or make
1828 * other changes, it should have done so before performing
1829 * the destroy call.]
1830 */
1831
1832 vm_object_lock(object);
1833 object->can_persist = FALSE;
1834 object->named = FALSE;
1835 object->alive = FALSE;
1836
1837 if (object->hashed) {
1838 lck_mtx_t *lck;
1839 /*
1840 * Rip out the pager from the vm_object now...
1841 */
1842 lck = vm_object_hash_lock_spin(object->pager);
1843 vm_object_remove(object);
1844 vm_object_hash_unlock(lck);
1845 }
1846 old_pager = object->pager;
1847 object->pager = MEMORY_OBJECT_NULL;
1848 if (old_pager != MEMORY_OBJECT_NULL)
1849 memory_object_control_disable(object->pager_control);
1850
1851 /*
1852 * Wait for the existing paging activity (that got
1853 * through before we nulled out the pager) to subside.
1854 */
1855
1856 vm_object_paging_wait(object, THREAD_UNINT);
1857 vm_object_unlock(object);
1858
1859 /*
1860 * Terminate the object now.
1861 */
1862 if (old_pager != MEMORY_OBJECT_NULL) {
1863 vm_object_release_pager(old_pager, object->hashed);
1864
1865 /*
1866 * JMM - Release the caller's reference. This assumes the
1867 * caller had a reference to release, which is a big (but
1868 * currently valid) assumption if this is driven from the
1869 * vnode pager (it is holding a named reference when making
1870 * this call)..
1871 */
1872 vm_object_deallocate(object);
1873
1874 }
1875 return(KERN_SUCCESS);
1876 }
1877
1878
1879 #define VM_OBJ_DEACT_ALL_STATS DEBUG
1880 #if VM_OBJ_DEACT_ALL_STATS
1881 uint32_t vm_object_deactivate_all_pages_batches = 0;
1882 uint32_t vm_object_deactivate_all_pages_pages = 0;
1883 #endif /* VM_OBJ_DEACT_ALL_STATS */
1884 /*
1885 * vm_object_deactivate_all_pages
1886 *
1887 * Deactivate all pages in the specified object. (Keep its pages
1888 * in memory even though it is no longer referenced.)
1889 *
1890 * The object must be locked.
1891 */
1892 static void
1893 vm_object_deactivate_all_pages(
1894 register vm_object_t object)
1895 {
1896 register vm_page_t p;
1897 int loop_count;
1898 #if VM_OBJ_DEACT_ALL_STATS
1899 int pages_count;
1900 #endif /* VM_OBJ_DEACT_ALL_STATS */
1901 #define V_O_D_A_P_MAX_BATCH 256
1902
1903 loop_count = V_O_D_A_P_MAX_BATCH;
1904 #if VM_OBJ_DEACT_ALL_STATS
1905 pages_count = 0;
1906 #endif /* VM_OBJ_DEACT_ALL_STATS */
1907 vm_page_lock_queues();
1908 queue_iterate(&object->memq, p, vm_page_t, listq) {
1909 if (--loop_count == 0) {
1910 #if VM_OBJ_DEACT_ALL_STATS
1911 hw_atomic_add(&vm_object_deactivate_all_pages_batches,
1912 1);
1913 hw_atomic_add(&vm_object_deactivate_all_pages_pages,
1914 pages_count);
1915 pages_count = 0;
1916 #endif /* VM_OBJ_DEACT_ALL_STATS */
1917 lck_mtx_yield(&vm_page_queue_lock);
1918 loop_count = V_O_D_A_P_MAX_BATCH;
1919 }
1920 if (!p->busy && !p->throttled) {
1921 #if VM_OBJ_DEACT_ALL_STATS
1922 pages_count++;
1923 #endif /* VM_OBJ_DEACT_ALL_STATS */
1924 vm_page_deactivate(p);
1925 }
1926 }
1927 #if VM_OBJ_DEACT_ALL_STATS
1928 if (pages_count) {
1929 hw_atomic_add(&vm_object_deactivate_all_pages_batches, 1);
1930 hw_atomic_add(&vm_object_deactivate_all_pages_pages,
1931 pages_count);
1932 pages_count = 0;
1933 }
1934 #endif /* VM_OBJ_DEACT_ALL_STATS */
1935 vm_page_unlock_queues();
1936 }
1937
1938
1939
1940 /*
1941 * when deallocating pages it is necessary to hold
1942 * the vm_page_queue_lock (a hot global lock) for certain operations
1943 * on the page... however, the majority of the work can be done
1944 * while merely holding the object lock... to mitigate the time spent behind the
1945 * global lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
1946 * while doing all of the work that doesn't require the vm_page_queue_lock...
1947 * them call dw_do_work to acquire the vm_page_queue_lock and do the
1948 * necessary work for each page... we will grab the busy bit on the page
1949 * so that dw_do_work can drop the object lock if it can't immediately take the
1950 * vm_page_queue_lock in order to compete for the locks in the same order that
1951 * vm_pageout_scan takes them.
1952 */
1953
1954 #define DELAYED_WORK_LIMIT 32
1955
1956 #define DW_clear_reference 0x01
1957 #define DW_move_page 0x02
1958 #define DW_clear_busy 0x04
1959 #define DW_PAGE_WAKEUP 0x08
1960
1961
1962 struct dw {
1963 vm_page_t dw_m;
1964 int dw_mask;
1965 };
1966
1967 static void dw_do_work(vm_object_t object, struct dw *dwp, int dw_count);
1968
1969
1970 static void
1971 dw_do_work(
1972 vm_object_t object,
1973 struct dw *dwp,
1974 int dw_count)
1975 {
1976 vm_page_t m;
1977 int j;
1978
1979 /*
1980 * pageout_scan takes the vm_page_lock_queues first
1981 * then tries for the object lock... to avoid what
1982 * is effectively a lock inversion, we'll go to the
1983 * trouble of taking them in that same order... otherwise
1984 * if this object contains the majority of the pages resident
1985 * in the UBC (or a small set of large objects actively being
1986 * worked on contain the majority of the pages), we could
1987 * cause the pageout_scan thread to 'starve' in its attempt
1988 * to find pages to move to the free queue, since it has to
1989 * successfully acquire the object lock of any candidate page
1990 * before it can steal/clean it.
1991 */
1992 if (!vm_page_trylockspin_queues()) {
1993 vm_object_unlock(object);
1994
1995 vm_page_lockspin_queues();
1996
1997 for (j = 0; ; j++) {
1998 if (!vm_object_lock_avoid(object) &&
1999 _vm_object_lock_try(object))
2000 break;
2001 vm_page_unlock_queues();
2002 mutex_pause(j);
2003 vm_page_lockspin_queues();
2004 }
2005 }
2006 for (j = 0; j < dw_count; j++, dwp++) {
2007
2008 m = dwp->dw_m;
2009
2010 if (dwp->dw_mask & DW_clear_reference)
2011 m->reference = FALSE;
2012
2013 if (dwp->dw_mask & DW_move_page) {
2014 VM_PAGE_QUEUES_REMOVE(m);
2015
2016 assert(!m->laundry);
2017 assert(m->object != kernel_object);
2018 assert(m->pageq.next == NULL &&
2019 m->pageq.prev == NULL);
2020
2021 if (m->zero_fill) {
2022 queue_enter_first(&vm_page_queue_zf, m, vm_page_t, pageq);
2023 vm_zf_queue_count++;
2024 } else {
2025 queue_enter_first(&vm_page_queue_inactive, m, vm_page_t, pageq);
2026 }
2027 m->inactive = TRUE;
2028
2029 if (!m->fictitious) {
2030 vm_page_inactive_count++;
2031 token_new_pagecount++;
2032 } else {
2033 assert(m->phys_page == vm_page_fictitious_addr);
2034 }
2035 }
2036 if (dwp->dw_mask & DW_clear_busy)
2037 dwp->dw_m->busy = FALSE;
2038
2039 if (dwp->dw_mask & DW_PAGE_WAKEUP)
2040 PAGE_WAKEUP(dwp->dw_m);
2041 }
2042 vm_page_unlock_queues();
2043
2044 #if CONFIG_EMBEDDED
2045 {
2046 int percent_avail;
2047
2048 /*
2049 * Decide if we need to send a memory status notification.
2050 */
2051 percent_avail =
2052 (vm_page_active_count + vm_page_inactive_count +
2053 vm_page_speculative_count + vm_page_free_count +
2054 (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
2055 atop_64(max_mem);
2056 if (percent_avail >= (kern_memorystatus_level + 5) ||
2057 percent_avail <= (kern_memorystatus_level - 5)) {
2058 kern_memorystatus_level = percent_avail;
2059 thread_wakeup((event_t)&kern_memorystatus_wakeup);
2060 }
2061 }
2062 #endif
2063 }
2064
2065
2066
2067 /*
2068 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2069 * exist because of the need to handle shadow chains. When deactivating pages, we only
2070 * want to deactive the ones at the top most level in the object chain. In order to do
2071 * this efficiently, the specified address range is divided up into "chunks" and we use
2072 * a bit map to keep track of which pages have already been processed as we descend down
2073 * the shadow chain. These chunk macros hide the details of the bit map implementation
2074 * as much as we can.
2075 *
2076 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2077 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2078 * order bit represents page 0 in the current range and highest order bit represents
2079 * page 63.
2080 *
2081 * For further convenience, we also use negative logic for the page state in the bit map.
2082 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2083 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2084 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2085 * out with all the bits set. The macros below hide all these details from the caller.
2086 */
2087
2088 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2089 /* be the same as the number of bits in */
2090 /* the chunk_state_t type. We use 64 */
2091 /* just for convenience. */
2092
2093 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2094
2095 typedef uint64_t chunk_state_t;
2096
2097 /*
2098 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2099 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2100 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2101 * looking at pages in that range. This can save us from unnecessarily chasing down the
2102 * shadow chain.
2103 */
2104
2105 #define CHUNK_INIT(c, len) \
2106 MACRO_BEGIN \
2107 uint64_t p; \
2108 \
2109 (c) = 0xffffffffffffffffLL; \
2110 \
2111 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2112 MARK_PAGE_HANDLED(c, p); \
2113 MACRO_END
2114
2115 /*
2116 * Return true if all pages in the chunk have not yet been processed.
2117 */
2118
2119 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2120
2121 /*
2122 * Return true if the page at offset 'p' in the bit map has already been handled
2123 * while processing a higher level object in the shadow chain.
2124 */
2125
2126 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0)
2127
2128 /*
2129 * Mark the page at offset 'p' in the bit map as having been processed.
2130 */
2131
2132 #define MARK_PAGE_HANDLED(c, p) \
2133 MACRO_BEGIN \
2134 (c) = (c) & ~(1LL << (p)); \
2135 MACRO_END
2136
2137
2138 /*
2139 * Return true if the page at the given offset has been paged out. Object is
2140 * locked upon entry and returned locked.
2141 */
2142
2143 static boolean_t
2144 page_is_paged_out(
2145 vm_object_t object,
2146 vm_object_offset_t offset)
2147 {
2148 kern_return_t kr;
2149 memory_object_t pager;
2150
2151 /*
2152 * Check the existence map for the page if we have one, otherwise
2153 * ask the pager about this page.
2154 */
2155
2156 #if MACH_PAGEMAP
2157 if (object->existence_map) {
2158 if (vm_external_state_get(object->existence_map, offset)
2159 == VM_EXTERNAL_STATE_EXISTS) {
2160 /*
2161 * We found the page
2162 */
2163
2164 return TRUE;
2165 }
2166 } else
2167 #endif
2168 if (object->internal &&
2169 object->alive &&
2170 !object->terminating &&
2171 object->pager_ready) {
2172
2173 /*
2174 * We're already holding a "paging in progress" reference
2175 * so the object can't disappear when we release the lock.
2176 */
2177
2178 assert(object->paging_in_progress);
2179 pager = object->pager;
2180 vm_object_unlock(object);
2181
2182 kr = memory_object_data_request(
2183 pager,
2184 offset + object->paging_offset,
2185 0, /* just poke the pager */
2186 VM_PROT_READ,
2187 NULL);
2188
2189 vm_object_lock(object);
2190
2191 if (kr == KERN_SUCCESS) {
2192
2193 /*
2194 * We found the page
2195 */
2196
2197 return TRUE;
2198 }
2199 }
2200
2201 return FALSE;
2202 }
2203
2204
2205 /*
2206 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2207 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2208 * a size that is less than or equal to the CHUNK_SIZE.
2209 */
2210
2211 static void
2212 deactivate_pages_in_object(
2213 vm_object_t object,
2214 vm_object_offset_t offset,
2215 vm_object_size_t size,
2216 boolean_t kill_page,
2217 boolean_t reusable_page,
2218 #if !MACH_ASSERT
2219 __unused
2220 #endif
2221 boolean_t all_reusable,
2222 chunk_state_t *chunk_state)
2223 {
2224 vm_page_t m;
2225 int p;
2226 struct dw dw_array[DELAYED_WORK_LIMIT];
2227 struct dw *dwp;
2228 int dw_count;
2229 unsigned int reusable = 0;
2230
2231
2232 /*
2233 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2234 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2235 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2236 * all the pages in the chunk.
2237 */
2238
2239 dwp = &dw_array[0];
2240 dw_count = 0;
2241
2242 for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64) {
2243
2244 /*
2245 * If this offset has already been found and handled in a higher level object, then don't
2246 * do anything with it in the current shadow object.
2247 */
2248
2249 if (PAGE_ALREADY_HANDLED(*chunk_state, p))
2250 continue;
2251
2252 /*
2253 * See if the page at this offset is around. First check to see if the page is resident,
2254 * then if not, check the existence map or with the pager.
2255 */
2256
2257 if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2258
2259 /*
2260 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2261 * so that we won't bother looking for a page at this offset again if there are more
2262 * shadow objects. Then deactivate the page.
2263 */
2264
2265 MARK_PAGE_HANDLED(*chunk_state, p);
2266
2267 if (( !VM_PAGE_WIRED(m)) && (!m->private) && (!m->gobbled) && (!m->busy)) {
2268 int clear_refmod;
2269
2270 assert(!m->laundry);
2271
2272 clear_refmod = VM_MEM_REFERENCED;
2273 dwp->dw_mask = DW_clear_reference;
2274
2275 if ((kill_page) && (object->internal)) {
2276 m->precious = FALSE;
2277 m->dirty = FALSE;
2278
2279 clear_refmod |= VM_MEM_MODIFIED;
2280 #if CONFIG_EMBEDDED
2281 dwp->dw_mask |= DW_move_page;
2282 #endif
2283 #if MACH_PAGEMAP
2284 vm_external_state_clr(object->existence_map, offset);
2285 #endif /* MACH_PAGEMAP */
2286
2287 if (reusable_page && !m->reusable) {
2288 assert(!all_reusable);
2289 assert(!object->all_reusable);
2290 m->reusable = TRUE;
2291 object->reusable_page_count++;
2292 assert(object->resident_page_count >= object->reusable_page_count);
2293 reusable++;
2294 }
2295 }
2296 pmap_clear_refmod(m->phys_page, clear_refmod);
2297
2298 if (!m->throttled && !(reusable_page || all_reusable))
2299 dwp->dw_mask |= DW_move_page;
2300 /*
2301 * dw_do_work may need to drop the object lock
2302 * if it does, we need the pages its looking at to
2303 * be held stable via the busy bit.
2304 */
2305 m->busy = TRUE;
2306 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
2307
2308 dwp->dw_m = m;
2309 dwp++;
2310 dw_count++;
2311
2312 if (dw_count >= DELAYED_WORK_LIMIT) {
2313 if (reusable) {
2314 OSAddAtomic(reusable,
2315 &vm_page_stats_reusable.reusable_count);
2316 vm_page_stats_reusable.reusable += reusable;
2317 reusable = 0;
2318 }
2319 dw_do_work(object, &dw_array[0], dw_count);
2320
2321 dwp = &dw_array[0];
2322 dw_count = 0;
2323 }
2324 }
2325
2326 } else {
2327
2328 /*
2329 * The page at this offset isn't memory resident, check to see if it's
2330 * been paged out. If so, mark it as handled so we don't bother looking
2331 * for it in the shadow chain.
2332 */
2333
2334 if (page_is_paged_out(object, offset)) {
2335 MARK_PAGE_HANDLED(*chunk_state, p);
2336
2337 /*
2338 * If we're killing a non-resident page, then clear the page in the existence
2339 * map so we don't bother paging it back in if it's touched again in the future.
2340 */
2341
2342 if ((kill_page) && (object->internal)) {
2343 #if MACH_PAGEMAP
2344 vm_external_state_clr(object->existence_map, offset);
2345 #endif /* MACH_PAGEMAP */
2346 }
2347 }
2348 }
2349 }
2350
2351 if (reusable) {
2352 OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
2353 vm_page_stats_reusable.reusable += reusable;
2354 reusable = 0;
2355 }
2356
2357 if (dw_count)
2358 dw_do_work(object, &dw_array[0], dw_count);
2359 }
2360
2361
2362 /*
2363 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2364 * will always be less than or equal to the given size. The total range is divided up
2365 * into chunks for efficiency and performance related to the locks and handling the shadow
2366 * chain. This routine returns how much of the given "size" it actually processed. It's
2367 * up to the caler to loop and keep calling this routine until the entire range they want
2368 * to process has been done.
2369 */
2370
2371 static vm_object_size_t
2372 deactivate_a_chunk(
2373 vm_object_t orig_object,
2374 vm_object_offset_t offset,
2375 vm_object_size_t size,
2376 boolean_t kill_page,
2377 boolean_t reusable_page,
2378 boolean_t all_reusable)
2379 {
2380 vm_object_t object;
2381 vm_object_t tmp_object;
2382 vm_object_size_t length;
2383 chunk_state_t chunk_state;
2384
2385
2386 /*
2387 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2388 * remaining size the caller asked for.
2389 */
2390
2391 length = MIN(size, CHUNK_SIZE);
2392
2393 /*
2394 * The chunk_state keeps track of which pages we've already processed if there's
2395 * a shadow chain on this object. At this point, we haven't done anything with this
2396 * range of pages yet, so initialize the state to indicate no pages processed yet.
2397 */
2398
2399 CHUNK_INIT(chunk_state, length);
2400 object = orig_object;
2401
2402 /*
2403 * Start at the top level object and iterate around the loop once for each object
2404 * in the shadow chain. We stop processing early if we've already found all the pages
2405 * in the range. Otherwise we stop when we run out of shadow objects.
2406 */
2407
2408 while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2409 vm_object_paging_begin(object);
2410
2411 deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state);
2412
2413 vm_object_paging_end(object);
2414
2415 /*
2416 * We've finished with this object, see if there's a shadow object. If
2417 * there is, update the offset and lock the new object. We also turn off
2418 * kill_page at this point since we only kill pages in the top most object.
2419 */
2420
2421 tmp_object = object->shadow;
2422
2423 if (tmp_object) {
2424 kill_page = FALSE;
2425 reusable_page = FALSE;
2426 all_reusable = FALSE;
2427 offset += object->shadow_offset;
2428 vm_object_lock(tmp_object);
2429 }
2430
2431 if (object != orig_object)
2432 vm_object_unlock(object);
2433
2434 object = tmp_object;
2435 }
2436
2437 if (object && object != orig_object)
2438 vm_object_unlock(object);
2439
2440 return length;
2441 }
2442
2443
2444
2445 /*
2446 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2447 * we also clear the modified status of the page and "forget" any changes that have been made
2448 * to the page.
2449 */
2450
2451 __private_extern__ void
2452 vm_object_deactivate_pages(
2453 vm_object_t object,
2454 vm_object_offset_t offset,
2455 vm_object_size_t size,
2456 boolean_t kill_page,
2457 boolean_t reusable_page)
2458 {
2459 vm_object_size_t length;
2460 boolean_t all_reusable;
2461
2462 /*
2463 * We break the range up into chunks and do one chunk at a time. This is for
2464 * efficiency and performance while handling the shadow chains and the locks.
2465 * The deactivate_a_chunk() function returns how much of the range it processed.
2466 * We keep calling this routine until the given size is exhausted.
2467 */
2468
2469
2470 all_reusable = FALSE;
2471 if (reusable_page &&
2472 object->size != 0 &&
2473 object->size == size &&
2474 object->reusable_page_count == 0) {
2475 all_reusable = TRUE;
2476 reusable_page = FALSE;
2477 }
2478
2479 while (size) {
2480 length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable);
2481
2482 size -= length;
2483 offset += length;
2484 }
2485
2486 if (all_reusable) {
2487 if (!object->all_reusable) {
2488 unsigned int reusable;
2489
2490 object->all_reusable = TRUE;
2491 assert(object->reusable_page_count == 0);
2492 /* update global stats */
2493 reusable = object->resident_page_count;
2494 OSAddAtomic(reusable,
2495 &vm_page_stats_reusable.reusable_count);
2496 vm_page_stats_reusable.reusable += reusable;
2497 vm_page_stats_reusable.all_reusable_calls++;
2498 }
2499 } else if (reusable_page) {
2500 vm_page_stats_reusable.partial_reusable_calls++;
2501 }
2502 }
2503
2504 void
2505 vm_object_reuse_pages(
2506 vm_object_t object,
2507 vm_object_offset_t start_offset,
2508 vm_object_offset_t end_offset,
2509 boolean_t allow_partial_reuse)
2510 {
2511 vm_object_offset_t cur_offset;
2512 vm_page_t m;
2513 unsigned int reused, reusable;
2514
2515 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
2516 MACRO_BEGIN \
2517 if ((m) != VM_PAGE_NULL && \
2518 (m)->reusable) { \
2519 assert((object)->reusable_page_count <= \
2520 (object)->resident_page_count); \
2521 assert((object)->reusable_page_count > 0); \
2522 (object)->reusable_page_count--; \
2523 (m)->reusable = FALSE; \
2524 (reused)++; \
2525 } \
2526 MACRO_END
2527
2528 reused = 0;
2529 reusable = 0;
2530
2531 vm_object_lock_assert_exclusive(object);
2532
2533 if (object->all_reusable) {
2534 assert(object->reusable_page_count == 0);
2535 object->all_reusable = FALSE;
2536 if (end_offset - start_offset == object->size ||
2537 !allow_partial_reuse) {
2538 vm_page_stats_reusable.all_reuse_calls++;
2539 reused = object->resident_page_count;
2540 } else {
2541 vm_page_stats_reusable.partial_reuse_calls++;
2542 queue_iterate(&object->memq, m, vm_page_t, listq) {
2543 if (m->offset < start_offset ||
2544 m->offset >= end_offset) {
2545 m->reusable = TRUE;
2546 object->reusable_page_count++;
2547 assert(object->resident_page_count >= object->reusable_page_count);
2548 continue;
2549 } else {
2550 assert(!m->reusable);
2551 reused++;
2552 }
2553 }
2554 }
2555 } else if (object->resident_page_count >
2556 ((end_offset - start_offset) >> PAGE_SHIFT)) {
2557 vm_page_stats_reusable.partial_reuse_calls++;
2558 for (cur_offset = start_offset;
2559 cur_offset < end_offset;
2560 cur_offset += PAGE_SIZE_64) {
2561 if (object->reusable_page_count == 0) {
2562 break;
2563 }
2564 m = vm_page_lookup(object, cur_offset);
2565 VM_OBJECT_REUSE_PAGE(object, m, reused);
2566 }
2567 } else {
2568 vm_page_stats_reusable.partial_reuse_calls++;
2569 queue_iterate(&object->memq, m, vm_page_t, listq) {
2570 if (object->reusable_page_count == 0) {
2571 break;
2572 }
2573 if (m->offset < start_offset ||
2574 m->offset >= end_offset) {
2575 continue;
2576 }
2577 VM_OBJECT_REUSE_PAGE(object, m, reused);
2578 }
2579 }
2580
2581 /* update global stats */
2582 OSAddAtomic(reusable-reused, &vm_page_stats_reusable.reusable_count);
2583 vm_page_stats_reusable.reused += reused;
2584 vm_page_stats_reusable.reusable += reusable;
2585 }
2586
2587 /*
2588 * Routine: vm_object_pmap_protect
2589 *
2590 * Purpose:
2591 * Reduces the permission for all physical
2592 * pages in the specified object range.
2593 *
2594 * If removing write permission only, it is
2595 * sufficient to protect only the pages in
2596 * the top-level object; only those pages may
2597 * have write permission.
2598 *
2599 * If removing all access, we must follow the
2600 * shadow chain from the top-level object to
2601 * remove access to all pages in shadowed objects.
2602 *
2603 * The object must *not* be locked. The object must
2604 * be temporary/internal.
2605 *
2606 * If pmap is not NULL, this routine assumes that
2607 * the only mappings for the pages are in that
2608 * pmap.
2609 */
2610
2611 __private_extern__ void
2612 vm_object_pmap_protect(
2613 register vm_object_t object,
2614 register vm_object_offset_t offset,
2615 vm_object_size_t size,
2616 pmap_t pmap,
2617 vm_map_offset_t pmap_start,
2618 vm_prot_t prot)
2619 {
2620 if (object == VM_OBJECT_NULL)
2621 return;
2622 size = vm_object_round_page(size);
2623 offset = vm_object_trunc_page(offset);
2624
2625 vm_object_lock(object);
2626
2627 if (object->phys_contiguous) {
2628 if (pmap != NULL) {
2629 vm_object_unlock(object);
2630 pmap_protect(pmap, pmap_start, pmap_start + size, prot);
2631 } else {
2632 vm_object_offset_t phys_start, phys_end, phys_addr;
2633
2634 phys_start = object->shadow_offset + offset;
2635 phys_end = phys_start + size;
2636 assert(phys_start <= phys_end);
2637 assert(phys_end <= object->shadow_offset + object->size);
2638 vm_object_unlock(object);
2639
2640 for (phys_addr = phys_start;
2641 phys_addr < phys_end;
2642 phys_addr += PAGE_SIZE_64) {
2643 pmap_page_protect((ppnum_t) (phys_addr >> PAGE_SHIFT), prot);
2644 }
2645 }
2646 return;
2647 }
2648
2649 assert(object->internal);
2650
2651 while (TRUE) {
2652 if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
2653 vm_object_unlock(object);
2654 pmap_protect(pmap, pmap_start, pmap_start + size, prot);
2655 return;
2656 }
2657
2658 /* if we are doing large ranges with respect to resident */
2659 /* page count then we should interate over pages otherwise */
2660 /* inverse page look-up will be faster */
2661 if (ptoa_64(object->resident_page_count / 4) < size) {
2662 vm_page_t p;
2663 vm_object_offset_t end;
2664
2665 end = offset + size;
2666
2667 if (pmap != PMAP_NULL) {
2668 queue_iterate(&object->memq, p, vm_page_t, listq) {
2669 if (!p->fictitious &&
2670 (offset <= p->offset) && (p->offset < end)) {
2671 vm_map_offset_t start;
2672
2673 start = pmap_start + p->offset - offset;
2674 pmap_protect(pmap, start, start + PAGE_SIZE_64, prot);
2675 }
2676 }
2677 } else {
2678 queue_iterate(&object->memq, p, vm_page_t, listq) {
2679 if (!p->fictitious &&
2680 (offset <= p->offset) && (p->offset < end)) {
2681
2682 pmap_page_protect(p->phys_page, prot);
2683 }
2684 }
2685 }
2686 } else {
2687 vm_page_t p;
2688 vm_object_offset_t end;
2689 vm_object_offset_t target_off;
2690
2691 end = offset + size;
2692
2693 if (pmap != PMAP_NULL) {
2694 for(target_off = offset;
2695 target_off < end;
2696 target_off += PAGE_SIZE) {
2697 p = vm_page_lookup(object, target_off);
2698 if (p != VM_PAGE_NULL) {
2699 vm_object_offset_t start;
2700 start = pmap_start +
2701 (p->offset - offset);
2702 pmap_protect(pmap, start,
2703 start + PAGE_SIZE, prot);
2704 }
2705 }
2706 } else {
2707 for(target_off = offset;
2708 target_off < end; target_off += PAGE_SIZE) {
2709 p = vm_page_lookup(object, target_off);
2710 if (p != VM_PAGE_NULL) {
2711 pmap_page_protect(p->phys_page, prot);
2712 }
2713 }
2714 }
2715 }
2716
2717 if (prot == VM_PROT_NONE) {
2718 /*
2719 * Must follow shadow chain to remove access
2720 * to pages in shadowed objects.
2721 */
2722 register vm_object_t next_object;
2723
2724 next_object = object->shadow;
2725 if (next_object != VM_OBJECT_NULL) {
2726 offset += object->shadow_offset;
2727 vm_object_lock(next_object);
2728 vm_object_unlock(object);
2729 object = next_object;
2730 }
2731 else {
2732 /*
2733 * End of chain - we are done.
2734 */
2735 break;
2736 }
2737 }
2738 else {
2739 /*
2740 * Pages in shadowed objects may never have
2741 * write permission - we may stop here.
2742 */
2743 break;
2744 }
2745 }
2746
2747 vm_object_unlock(object);
2748 }
2749
2750 /*
2751 * Routine: vm_object_copy_slowly
2752 *
2753 * Description:
2754 * Copy the specified range of the source
2755 * virtual memory object without using
2756 * protection-based optimizations (such
2757 * as copy-on-write). The pages in the
2758 * region are actually copied.
2759 *
2760 * In/out conditions:
2761 * The caller must hold a reference and a lock
2762 * for the source virtual memory object. The source
2763 * object will be returned *unlocked*.
2764 *
2765 * Results:
2766 * If the copy is completed successfully, KERN_SUCCESS is
2767 * returned. If the caller asserted the interruptible
2768 * argument, and an interruption occurred while waiting
2769 * for a user-generated event, MACH_SEND_INTERRUPTED is
2770 * returned. Other values may be returned to indicate
2771 * hard errors during the copy operation.
2772 *
2773 * A new virtual memory object is returned in a
2774 * parameter (_result_object). The contents of this
2775 * new object, starting at a zero offset, are a copy
2776 * of the source memory region. In the event of
2777 * an error, this parameter will contain the value
2778 * VM_OBJECT_NULL.
2779 */
2780 __private_extern__ kern_return_t
2781 vm_object_copy_slowly(
2782 register vm_object_t src_object,
2783 vm_object_offset_t src_offset,
2784 vm_object_size_t size,
2785 boolean_t interruptible,
2786 vm_object_t *_result_object) /* OUT */
2787 {
2788 vm_object_t new_object;
2789 vm_object_offset_t new_offset;
2790
2791 struct vm_object_fault_info fault_info;
2792
2793 XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
2794 src_object, src_offset, size, 0, 0);
2795
2796 if (size == 0) {
2797 vm_object_unlock(src_object);
2798 *_result_object = VM_OBJECT_NULL;
2799 return(KERN_INVALID_ARGUMENT);
2800 }
2801
2802 /*
2803 * Prevent destruction of the source object while we copy.
2804 */
2805
2806 vm_object_reference_locked(src_object);
2807 vm_object_unlock(src_object);
2808
2809 /*
2810 * Create a new object to hold the copied pages.
2811 * A few notes:
2812 * We fill the new object starting at offset 0,
2813 * regardless of the input offset.
2814 * We don't bother to lock the new object within
2815 * this routine, since we have the only reference.
2816 */
2817
2818 new_object = vm_object_allocate(size);
2819 new_offset = 0;
2820
2821 assert(size == trunc_page_64(size)); /* Will the loop terminate? */
2822
2823 fault_info.interruptible = interruptible;
2824 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
2825 fault_info.user_tag = 0;
2826 fault_info.lo_offset = src_offset;
2827 fault_info.hi_offset = src_offset + size;
2828 fault_info.no_cache = FALSE;
2829 fault_info.stealth = TRUE;
2830
2831 for ( ;
2832 size != 0 ;
2833 src_offset += PAGE_SIZE_64,
2834 new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
2835 ) {
2836 vm_page_t new_page;
2837 vm_fault_return_t result;
2838
2839 vm_object_lock(new_object);
2840
2841 while ((new_page = vm_page_alloc(new_object, new_offset))
2842 == VM_PAGE_NULL) {
2843
2844 vm_object_unlock(new_object);
2845
2846 if (!vm_page_wait(interruptible)) {
2847 vm_object_deallocate(new_object);
2848 vm_object_deallocate(src_object);
2849 *_result_object = VM_OBJECT_NULL;
2850 return(MACH_SEND_INTERRUPTED);
2851 }
2852 vm_object_lock(new_object);
2853 }
2854 vm_object_unlock(new_object);
2855
2856 do {
2857 vm_prot_t prot = VM_PROT_READ;
2858 vm_page_t _result_page;
2859 vm_page_t top_page;
2860 register
2861 vm_page_t result_page;
2862 kern_return_t error_code;
2863
2864 vm_object_lock(src_object);
2865 vm_object_paging_begin(src_object);
2866
2867 if (size > (vm_size_t) -1) {
2868 /* 32-bit overflow */
2869 fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE);
2870 } else {
2871 fault_info.cluster_size = (vm_size_t) size;
2872 assert(fault_info.cluster_size == size);
2873 }
2874
2875 XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
2876 result = vm_fault_page(src_object, src_offset,
2877 VM_PROT_READ, FALSE,
2878 &prot, &_result_page, &top_page,
2879 (int *)0,
2880 &error_code, FALSE, FALSE, &fault_info);
2881
2882 switch(result) {
2883 case VM_FAULT_SUCCESS:
2884 result_page = _result_page;
2885
2886 /*
2887 * We don't need to hold the object
2888 * lock -- the busy page will be enough.
2889 * [We don't care about picking up any
2890 * new modifications.]
2891 *
2892 * Copy the page to the new object.
2893 *
2894 * POLICY DECISION:
2895 * If result_page is clean,
2896 * we could steal it instead
2897 * of copying.
2898 */
2899
2900 vm_object_unlock(result_page->object);
2901 vm_page_copy(result_page, new_page);
2902
2903 /*
2904 * Let go of both pages (make them
2905 * not busy, perform wakeup, activate).
2906 */
2907 vm_object_lock(new_object);
2908 new_page->dirty = TRUE;
2909 PAGE_WAKEUP_DONE(new_page);
2910 vm_object_unlock(new_object);
2911
2912 vm_object_lock(result_page->object);
2913 PAGE_WAKEUP_DONE(result_page);
2914
2915 vm_page_lockspin_queues();
2916 if (!result_page->active &&
2917 !result_page->inactive &&
2918 !result_page->throttled)
2919 vm_page_activate(result_page);
2920 vm_page_activate(new_page);
2921 vm_page_unlock_queues();
2922
2923 /*
2924 * Release paging references and
2925 * top-level placeholder page, if any.
2926 */
2927
2928 vm_fault_cleanup(result_page->object,
2929 top_page);
2930
2931 break;
2932
2933 case VM_FAULT_RETRY:
2934 break;
2935
2936 case VM_FAULT_FICTITIOUS_SHORTAGE:
2937 vm_page_more_fictitious();
2938 break;
2939
2940 case VM_FAULT_MEMORY_SHORTAGE:
2941 if (vm_page_wait(interruptible))
2942 break;
2943 /* fall thru */
2944
2945 case VM_FAULT_INTERRUPTED:
2946 vm_object_lock(new_object);
2947 VM_PAGE_FREE(new_page);
2948 vm_object_unlock(new_object);
2949
2950 vm_object_deallocate(new_object);
2951 vm_object_deallocate(src_object);
2952 *_result_object = VM_OBJECT_NULL;
2953 return(MACH_SEND_INTERRUPTED);
2954
2955 case VM_FAULT_SUCCESS_NO_VM_PAGE:
2956 /* success but no VM page: fail */
2957 vm_object_paging_end(src_object);
2958 vm_object_unlock(src_object);
2959 /*FALLTHROUGH*/
2960 case VM_FAULT_MEMORY_ERROR:
2961 /*
2962 * A policy choice:
2963 * (a) ignore pages that we can't
2964 * copy
2965 * (b) return the null object if
2966 * any page fails [chosen]
2967 */
2968
2969 vm_object_lock(new_object);
2970 VM_PAGE_FREE(new_page);
2971 vm_object_unlock(new_object);
2972
2973 vm_object_deallocate(new_object);
2974 vm_object_deallocate(src_object);
2975 *_result_object = VM_OBJECT_NULL;
2976 return(error_code ? error_code:
2977 KERN_MEMORY_ERROR);
2978
2979 default:
2980 panic("vm_object_copy_slowly: unexpected error"
2981 " 0x%x from vm_fault_page()\n", result);
2982 }
2983 } while (result != VM_FAULT_SUCCESS);
2984 }
2985
2986 /*
2987 * Lose the extra reference, and return our object.
2988 */
2989 vm_object_deallocate(src_object);
2990 *_result_object = new_object;
2991 return(KERN_SUCCESS);
2992 }
2993
2994 /*
2995 * Routine: vm_object_copy_quickly
2996 *
2997 * Purpose:
2998 * Copy the specified range of the source virtual
2999 * memory object, if it can be done without waiting
3000 * for user-generated events.
3001 *
3002 * Results:
3003 * If the copy is successful, the copy is returned in
3004 * the arguments; otherwise, the arguments are not
3005 * affected.
3006 *
3007 * In/out conditions:
3008 * The object should be unlocked on entry and exit.
3009 */
3010
3011 /*ARGSUSED*/
3012 __private_extern__ boolean_t
3013 vm_object_copy_quickly(
3014 vm_object_t *_object, /* INOUT */
3015 __unused vm_object_offset_t offset, /* IN */
3016 __unused vm_object_size_t size, /* IN */
3017 boolean_t *_src_needs_copy, /* OUT */
3018 boolean_t *_dst_needs_copy) /* OUT */
3019 {
3020 vm_object_t object = *_object;
3021 memory_object_copy_strategy_t copy_strategy;
3022
3023 XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
3024 *_object, offset, size, 0, 0);
3025 if (object == VM_OBJECT_NULL) {
3026 *_src_needs_copy = FALSE;
3027 *_dst_needs_copy = FALSE;
3028 return(TRUE);
3029 }
3030
3031 vm_object_lock(object);
3032
3033 copy_strategy = object->copy_strategy;
3034
3035 switch (copy_strategy) {
3036 case MEMORY_OBJECT_COPY_SYMMETRIC:
3037
3038 /*
3039 * Symmetric copy strategy.
3040 * Make another reference to the object.
3041 * Leave object/offset unchanged.
3042 */
3043
3044 vm_object_reference_locked(object);
3045 object->shadowed = TRUE;
3046 vm_object_unlock(object);
3047
3048 /*
3049 * Both source and destination must make
3050 * shadows, and the source must be made
3051 * read-only if not already.
3052 */
3053
3054 *_src_needs_copy = TRUE;
3055 *_dst_needs_copy = TRUE;
3056
3057 break;
3058
3059 case MEMORY_OBJECT_COPY_DELAY:
3060 vm_object_unlock(object);
3061 return(FALSE);
3062
3063 default:
3064 vm_object_unlock(object);
3065 return(FALSE);
3066 }
3067 return(TRUE);
3068 }
3069
3070 static int copy_call_count = 0;
3071 static int copy_call_sleep_count = 0;
3072 static int copy_call_restart_count = 0;
3073
3074 /*
3075 * Routine: vm_object_copy_call [internal]
3076 *
3077 * Description:
3078 * Copy the source object (src_object), using the
3079 * user-managed copy algorithm.
3080 *
3081 * In/out conditions:
3082 * The source object must be locked on entry. It
3083 * will be *unlocked* on exit.
3084 *
3085 * Results:
3086 * If the copy is successful, KERN_SUCCESS is returned.
3087 * A new object that represents the copied virtual
3088 * memory is returned in a parameter (*_result_object).
3089 * If the return value indicates an error, this parameter
3090 * is not valid.
3091 */
3092 static kern_return_t
3093 vm_object_copy_call(
3094 vm_object_t src_object,
3095 vm_object_offset_t src_offset,
3096 vm_object_size_t size,
3097 vm_object_t *_result_object) /* OUT */
3098 {
3099 kern_return_t kr;
3100 vm_object_t copy;
3101 boolean_t check_ready = FALSE;
3102 uint32_t try_failed_count = 0;
3103
3104 /*
3105 * If a copy is already in progress, wait and retry.
3106 *
3107 * XXX
3108 * Consider making this call interruptable, as Mike
3109 * intended it to be.
3110 *
3111 * XXXO
3112 * Need a counter or version or something to allow
3113 * us to use the copy that the currently requesting
3114 * thread is obtaining -- is it worth adding to the
3115 * vm object structure? Depends how common this case it.
3116 */
3117 copy_call_count++;
3118 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
3119 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
3120 THREAD_UNINT);
3121 copy_call_restart_count++;
3122 }
3123
3124 /*
3125 * Indicate (for the benefit of memory_object_create_copy)
3126 * that we want a copy for src_object. (Note that we cannot
3127 * do a real assert_wait before calling memory_object_copy,
3128 * so we simply set the flag.)
3129 */
3130
3131 vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL);
3132 vm_object_unlock(src_object);
3133
3134 /*
3135 * Ask the memory manager to give us a memory object
3136 * which represents a copy of the src object.
3137 * The memory manager may give us a memory object
3138 * which we already have, or it may give us a
3139 * new memory object. This memory object will arrive
3140 * via memory_object_create_copy.
3141 */
3142
3143 kr = KERN_FAILURE; /* XXX need to change memory_object.defs */
3144 if (kr != KERN_SUCCESS) {
3145 return kr;
3146 }
3147
3148 /*
3149 * Wait for the copy to arrive.
3150 */
3151 vm_object_lock(src_object);
3152 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
3153 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
3154 THREAD_UNINT);
3155 copy_call_sleep_count++;
3156 }
3157 Retry:
3158 assert(src_object->copy != VM_OBJECT_NULL);
3159 copy = src_object->copy;
3160 if (!vm_object_lock_try(copy)) {
3161 vm_object_unlock(src_object);
3162
3163 try_failed_count++;
3164 mutex_pause(try_failed_count); /* wait a bit */
3165
3166 vm_object_lock(src_object);
3167 goto Retry;
3168 }
3169 if (copy->size < src_offset+size)
3170 copy->size = src_offset+size;
3171
3172 if (!copy->pager_ready)
3173 check_ready = TRUE;
3174
3175 /*
3176 * Return the copy.
3177 */
3178 *_result_object = copy;
3179 vm_object_unlock(copy);
3180 vm_object_unlock(src_object);
3181
3182 /* Wait for the copy to be ready. */
3183 if (check_ready == TRUE) {
3184 vm_object_lock(copy);
3185 while (!copy->pager_ready) {
3186 vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
3187 }
3188 vm_object_unlock(copy);
3189 }
3190
3191 return KERN_SUCCESS;
3192 }
3193
3194 static int copy_delayed_lock_collisions = 0;
3195 static int copy_delayed_max_collisions = 0;
3196 static int copy_delayed_lock_contention = 0;
3197 static int copy_delayed_protect_iterate = 0;
3198
3199 /*
3200 * Routine: vm_object_copy_delayed [internal]
3201 *
3202 * Description:
3203 * Copy the specified virtual memory object, using
3204 * the asymmetric copy-on-write algorithm.
3205 *
3206 * In/out conditions:
3207 * The src_object must be locked on entry. It will be unlocked
3208 * on exit - so the caller must also hold a reference to it.
3209 *
3210 * This routine will not block waiting for user-generated
3211 * events. It is not interruptible.
3212 */
3213 __private_extern__ vm_object_t
3214 vm_object_copy_delayed(
3215 vm_object_t src_object,
3216 vm_object_offset_t src_offset,
3217 vm_object_size_t size,
3218 boolean_t src_object_shared)
3219 {
3220 vm_object_t new_copy = VM_OBJECT_NULL;
3221 vm_object_t old_copy;
3222 vm_page_t p;
3223 vm_object_size_t copy_size = src_offset + size;
3224
3225
3226 int collisions = 0;
3227 /*
3228 * The user-level memory manager wants to see all of the changes
3229 * to this object, but it has promised not to make any changes on
3230 * its own.
3231 *
3232 * Perform an asymmetric copy-on-write, as follows:
3233 * Create a new object, called a "copy object" to hold
3234 * pages modified by the new mapping (i.e., the copy,
3235 * not the original mapping).
3236 * Record the original object as the backing object for
3237 * the copy object. If the original mapping does not
3238 * change a page, it may be used read-only by the copy.
3239 * Record the copy object in the original object.
3240 * When the original mapping causes a page to be modified,
3241 * it must be copied to a new page that is "pushed" to
3242 * the copy object.
3243 * Mark the new mapping (the copy object) copy-on-write.
3244 * This makes the copy object itself read-only, allowing
3245 * it to be reused if the original mapping makes no
3246 * changes, and simplifying the synchronization required
3247 * in the "push" operation described above.
3248 *
3249 * The copy-on-write is said to be assymetric because the original
3250 * object is *not* marked copy-on-write. A copied page is pushed
3251 * to the copy object, regardless which party attempted to modify
3252 * the page.
3253 *
3254 * Repeated asymmetric copy operations may be done. If the
3255 * original object has not been changed since the last copy, its
3256 * copy object can be reused. Otherwise, a new copy object can be
3257 * inserted between the original object and its previous copy
3258 * object. Since any copy object is read-only, this cannot affect
3259 * affect the contents of the previous copy object.
3260 *
3261 * Note that a copy object is higher in the object tree than the
3262 * original object; therefore, use of the copy object recorded in
3263 * the original object must be done carefully, to avoid deadlock.
3264 */
3265
3266 Retry:
3267
3268 /*
3269 * Wait for paging in progress.
3270 */
3271 if (!src_object->true_share &&
3272 (src_object->paging_in_progress != 0 ||
3273 src_object->activity_in_progress != 0)) {
3274 if (src_object_shared == TRUE) {
3275 vm_object_unlock(src_object);
3276 vm_object_lock(src_object);
3277 src_object_shared = FALSE;
3278 goto Retry;
3279 }
3280 vm_object_paging_wait(src_object, THREAD_UNINT);
3281 }
3282 /*
3283 * See whether we can reuse the result of a previous
3284 * copy operation.
3285 */
3286
3287 old_copy = src_object->copy;
3288 if (old_copy != VM_OBJECT_NULL) {
3289 int lock_granted;
3290
3291 /*
3292 * Try to get the locks (out of order)
3293 */
3294 if (src_object_shared == TRUE)
3295 lock_granted = vm_object_lock_try_shared(old_copy);
3296 else
3297 lock_granted = vm_object_lock_try(old_copy);
3298
3299 if (!lock_granted) {
3300 vm_object_unlock(src_object);
3301
3302 if (collisions++ == 0)
3303 copy_delayed_lock_contention++;
3304 mutex_pause(collisions);
3305
3306 /* Heisenberg Rules */
3307 copy_delayed_lock_collisions++;
3308
3309 if (collisions > copy_delayed_max_collisions)
3310 copy_delayed_max_collisions = collisions;
3311
3312 if (src_object_shared == TRUE)
3313 vm_object_lock_shared(src_object);
3314 else
3315 vm_object_lock(src_object);
3316
3317 goto Retry;
3318 }
3319
3320 /*
3321 * Determine whether the old copy object has
3322 * been modified.
3323 */
3324
3325 if (old_copy->resident_page_count == 0 &&
3326 !old_copy->pager_created) {
3327 /*
3328 * It has not been modified.
3329 *
3330 * Return another reference to
3331 * the existing copy-object if
3332 * we can safely grow it (if
3333 * needed).
3334 */
3335
3336 if (old_copy->size < copy_size) {
3337 if (src_object_shared == TRUE) {
3338 vm_object_unlock(old_copy);
3339 vm_object_unlock(src_object);
3340
3341 vm_object_lock(src_object);
3342 src_object_shared = FALSE;
3343 goto Retry;
3344 }
3345 /*
3346 * We can't perform a delayed copy if any of the
3347 * pages in the extended range are wired (because
3348 * we can't safely take write permission away from
3349 * wired pages). If the pages aren't wired, then
3350 * go ahead and protect them.
3351 */
3352 copy_delayed_protect_iterate++;
3353
3354 queue_iterate(&src_object->memq, p, vm_page_t, listq) {
3355 if (!p->fictitious &&
3356 p->offset >= old_copy->size &&
3357 p->offset < copy_size) {
3358 if (VM_PAGE_WIRED(p)) {
3359 vm_object_unlock(old_copy);
3360 vm_object_unlock(src_object);
3361
3362 if (new_copy != VM_OBJECT_NULL) {
3363 vm_object_unlock(new_copy);
3364 vm_object_deallocate(new_copy);
3365 }
3366
3367 return VM_OBJECT_NULL;
3368 } else {
3369 pmap_page_protect(p->phys_page,
3370 (VM_PROT_ALL & ~VM_PROT_WRITE));
3371 }
3372 }
3373 }
3374 old_copy->size = copy_size;
3375 }
3376 if (src_object_shared == TRUE)
3377 vm_object_reference_shared(old_copy);
3378 else
3379 vm_object_reference_locked(old_copy);
3380 vm_object_unlock(old_copy);
3381 vm_object_unlock(src_object);
3382
3383 if (new_copy != VM_OBJECT_NULL) {
3384 vm_object_unlock(new_copy);
3385 vm_object_deallocate(new_copy);
3386 }
3387 return(old_copy);
3388 }
3389
3390
3391
3392 /*
3393 * Adjust the size argument so that the newly-created
3394 * copy object will be large enough to back either the
3395 * old copy object or the new mapping.
3396 */
3397 if (old_copy->size > copy_size)
3398 copy_size = old_copy->size;
3399
3400 if (new_copy == VM_OBJECT_NULL) {
3401 vm_object_unlock(old_copy);
3402 vm_object_unlock(src_object);
3403 new_copy = vm_object_allocate(copy_size);
3404 vm_object_lock(src_object);
3405 vm_object_lock(new_copy);
3406
3407 src_object_shared = FALSE;
3408 goto Retry;
3409 }
3410 new_copy->size = copy_size;
3411
3412 /*
3413 * The copy-object is always made large enough to
3414 * completely shadow the original object, since
3415 * it may have several users who want to shadow
3416 * the original object at different points.
3417 */
3418
3419 assert((old_copy->shadow == src_object) &&
3420 (old_copy->shadow_offset == (vm_object_offset_t) 0));
3421
3422 } else if (new_copy == VM_OBJECT_NULL) {
3423 vm_object_unlock(src_object);
3424 new_copy = vm_object_allocate(copy_size);
3425 vm_object_lock(src_object);
3426 vm_object_lock(new_copy);
3427
3428 src_object_shared = FALSE;
3429 goto Retry;
3430 }
3431
3432 /*
3433 * We now have the src object locked, and the new copy object
3434 * allocated and locked (and potentially the old copy locked).
3435 * Before we go any further, make sure we can still perform
3436 * a delayed copy, as the situation may have changed.
3437 *
3438 * Specifically, we can't perform a delayed copy if any of the
3439 * pages in the range are wired (because we can't safely take
3440 * write permission away from wired pages). If the pages aren't
3441 * wired, then go ahead and protect them.
3442 */
3443 copy_delayed_protect_iterate++;
3444
3445 queue_iterate(&src_object->memq, p, vm_page_t, listq) {
3446 if (!p->fictitious && p->offset < copy_size) {
3447 if (VM_PAGE_WIRED(p)) {
3448 if (old_copy)
3449 vm_object_unlock(old_copy);
3450 vm_object_unlock(src_object);
3451 vm_object_unlock(new_copy);
3452 vm_object_deallocate(new_copy);
3453 return VM_OBJECT_NULL;
3454 } else {
3455 pmap_page_protect(p->phys_page,
3456 (VM_PROT_ALL & ~VM_PROT_WRITE));
3457 }
3458 }
3459 }
3460 if (old_copy != VM_OBJECT_NULL) {
3461 /*
3462 * Make the old copy-object shadow the new one.
3463 * It will receive no more pages from the original
3464 * object.
3465 */
3466
3467 /* remove ref. from old_copy */
3468 vm_object_lock_assert_exclusive(src_object);
3469 src_object->ref_count--;
3470 assert(src_object->ref_count > 0);
3471 vm_object_lock_assert_exclusive(old_copy);
3472 old_copy->shadow = new_copy;
3473 vm_object_lock_assert_exclusive(new_copy);
3474 assert(new_copy->ref_count > 0);
3475 new_copy->ref_count++; /* for old_copy->shadow ref. */
3476
3477 #if TASK_SWAPPER
3478 if (old_copy->res_count) {
3479 VM_OBJ_RES_INCR(new_copy);
3480 VM_OBJ_RES_DECR(src_object);
3481 }
3482 #endif
3483
3484 vm_object_unlock(old_copy); /* done with old_copy */
3485 }
3486
3487 /*
3488 * Point the new copy at the existing object.
3489 */
3490 vm_object_lock_assert_exclusive(new_copy);
3491 new_copy->shadow = src_object;
3492 new_copy->shadow_offset = 0;
3493 new_copy->shadowed = TRUE; /* caller must set needs_copy */
3494
3495 vm_object_lock_assert_exclusive(src_object);
3496 vm_object_reference_locked(src_object);
3497 src_object->copy = new_copy;
3498 vm_object_unlock(src_object);
3499 vm_object_unlock(new_copy);
3500
3501 XPR(XPR_VM_OBJECT,
3502 "vm_object_copy_delayed: used copy object %X for source %X\n",
3503 new_copy, src_object, 0, 0, 0);
3504
3505 return new_copy;
3506 }
3507
3508 /*
3509 * Routine: vm_object_copy_strategically
3510 *
3511 * Purpose:
3512 * Perform a copy according to the source object's
3513 * declared strategy. This operation may block,
3514 * and may be interrupted.
3515 */
3516 __private_extern__ kern_return_t
3517 vm_object_copy_strategically(
3518 register vm_object_t src_object,
3519 vm_object_offset_t src_offset,
3520 vm_object_size_t size,
3521 vm_object_t *dst_object, /* OUT */
3522 vm_object_offset_t *dst_offset, /* OUT */
3523 boolean_t *dst_needs_copy) /* OUT */
3524 {
3525 boolean_t result;
3526 boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */
3527 boolean_t object_lock_shared = FALSE;
3528 memory_object_copy_strategy_t copy_strategy;
3529
3530 assert(src_object != VM_OBJECT_NULL);
3531
3532 copy_strategy = src_object->copy_strategy;
3533
3534 if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
3535 vm_object_lock_shared(src_object);
3536 object_lock_shared = TRUE;
3537 } else
3538 vm_object_lock(src_object);
3539
3540 /*
3541 * The copy strategy is only valid if the memory manager
3542 * is "ready". Internal objects are always ready.
3543 */
3544
3545 while (!src_object->internal && !src_object->pager_ready) {
3546 wait_result_t wait_result;
3547
3548 if (object_lock_shared == TRUE) {
3549 vm_object_unlock(src_object);
3550 vm_object_lock(src_object);
3551 object_lock_shared = FALSE;
3552 continue;
3553 }
3554 wait_result = vm_object_sleep( src_object,
3555 VM_OBJECT_EVENT_PAGER_READY,
3556 interruptible);
3557 if (wait_result != THREAD_AWAKENED) {
3558 vm_object_unlock(src_object);
3559 *dst_object = VM_OBJECT_NULL;
3560 *dst_offset = 0;
3561 *dst_needs_copy = FALSE;
3562 return(MACH_SEND_INTERRUPTED);
3563 }
3564 }
3565
3566 /*
3567 * Use the appropriate copy strategy.
3568 */
3569
3570 switch (copy_strategy) {
3571 case MEMORY_OBJECT_COPY_DELAY:
3572 *dst_object = vm_object_copy_delayed(src_object,
3573 src_offset, size, object_lock_shared);
3574 if (*dst_object != VM_OBJECT_NULL) {
3575 *dst_offset = src_offset;
3576 *dst_needs_copy = TRUE;
3577 result = KERN_SUCCESS;
3578 break;
3579 }
3580 vm_object_lock(src_object);
3581 /* fall thru when delayed copy not allowed */
3582
3583 case MEMORY_OBJECT_COPY_NONE:
3584 result = vm_object_copy_slowly(src_object, src_offset, size,
3585 interruptible, dst_object);
3586 if (result == KERN_SUCCESS) {
3587 *dst_offset = 0;
3588 *dst_needs_copy = FALSE;
3589 }
3590 break;
3591
3592 case MEMORY_OBJECT_COPY_CALL:
3593 result = vm_object_copy_call(src_object, src_offset, size,
3594 dst_object);
3595 if (result == KERN_SUCCESS) {
3596 *dst_offset = src_offset;
3597 *dst_needs_copy = TRUE;
3598 }
3599 break;
3600
3601 case MEMORY_OBJECT_COPY_SYMMETRIC:
3602 XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n", src_object, src_offset, size, 0, 0);
3603 vm_object_unlock(src_object);
3604 result = KERN_MEMORY_RESTART_COPY;
3605 break;
3606
3607 default:
3608 panic("copy_strategically: bad strategy");
3609 result = KERN_INVALID_ARGUMENT;
3610 }
3611 return(result);
3612 }
3613
3614 /*
3615 * vm_object_shadow:
3616 *
3617 * Create a new object which is backed by the
3618 * specified existing object range. The source
3619 * object reference is deallocated.
3620 *
3621 * The new object and offset into that object
3622 * are returned in the source parameters.
3623 */
3624 boolean_t vm_object_shadow_check = FALSE;
3625
3626 __private_extern__ boolean_t
3627 vm_object_shadow(
3628 vm_object_t *object, /* IN/OUT */
3629 vm_object_offset_t *offset, /* IN/OUT */
3630 vm_object_size_t length)
3631 {
3632 register vm_object_t source;
3633 register vm_object_t result;
3634
3635 source = *object;
3636 #if 0
3637 /*
3638 * XXX FBDP
3639 * This assertion is valid but it gets triggered by Rosetta for example
3640 * due to a combination of vm_remap() that changes a VM object's
3641 * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
3642 * that then sets "needs_copy" on its map entry. This creates a
3643 * mapping situation that VM should never see and doesn't know how to
3644 * handle.
3645 * It's not clear if this can create any real problem but we should
3646 * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
3647 * do more than just set "needs_copy" to handle the copy-on-write...
3648 * In the meantime, let's disable the assertion.
3649 */
3650 assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
3651 #endif
3652
3653 /*
3654 * Determine if we really need a shadow.
3655 */
3656
3657 if (vm_object_shadow_check && source->ref_count == 1 &&
3658 (source->shadow == VM_OBJECT_NULL ||
3659 source->shadow->copy == VM_OBJECT_NULL))
3660 {
3661 source->shadowed = FALSE;
3662 return FALSE;
3663 }
3664
3665 /*
3666 * Allocate a new object with the given length
3667 */
3668
3669 if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
3670 panic("vm_object_shadow: no object for shadowing");
3671
3672 /*
3673 * The new object shadows the source object, adding
3674 * a reference to it. Our caller changes his reference
3675 * to point to the new object, removing a reference to
3676 * the source object. Net result: no change of reference
3677 * count.
3678 */
3679 result->shadow = source;
3680
3681 /*
3682 * Store the offset into the source object,
3683 * and fix up the offset into the new object.
3684 */
3685
3686 result->shadow_offset = *offset;
3687
3688 /*
3689 * Return the new things
3690 */
3691
3692 *offset = 0;
3693 *object = result;
3694 return TRUE;
3695 }
3696
3697 /*
3698 * The relationship between vm_object structures and
3699 * the memory_object requires careful synchronization.
3700 *
3701 * All associations are created by memory_object_create_named
3702 * for external pagers and vm_object_pager_create for internal
3703 * objects as follows:
3704 *
3705 * pager: the memory_object itself, supplied by
3706 * the user requesting a mapping (or the kernel,
3707 * when initializing internal objects); the
3708 * kernel simulates holding send rights by keeping
3709 * a port reference;
3710 *
3711 * pager_request:
3712 * the memory object control port,
3713 * created by the kernel; the kernel holds
3714 * receive (and ownership) rights to this
3715 * port, but no other references.
3716 *
3717 * When initialization is complete, the "initialized" field
3718 * is asserted. Other mappings using a particular memory object,
3719 * and any references to the vm_object gained through the
3720 * port association must wait for this initialization to occur.
3721 *
3722 * In order to allow the memory manager to set attributes before
3723 * requests (notably virtual copy operations, but also data or
3724 * unlock requests) are made, a "ready" attribute is made available.
3725 * Only the memory manager may affect the value of this attribute.
3726 * Its value does not affect critical kernel functions, such as
3727 * internal object initialization or destruction. [Furthermore,
3728 * memory objects created by the kernel are assumed to be ready
3729 * immediately; the default memory manager need not explicitly
3730 * set the "ready" attribute.]
3731 *
3732 * [Both the "initialized" and "ready" attribute wait conditions
3733 * use the "pager" field as the wait event.]
3734 *
3735 * The port associations can be broken down by any of the
3736 * following routines:
3737 * vm_object_terminate:
3738 * No references to the vm_object remain, and
3739 * the object cannot (or will not) be cached.
3740 * This is the normal case, and is done even
3741 * though one of the other cases has already been
3742 * done.
3743 * memory_object_destroy:
3744 * The memory manager has requested that the
3745 * kernel relinquish references to the memory
3746 * object. [The memory manager may not want to
3747 * destroy the memory object, but may wish to
3748 * refuse or tear down existing memory mappings.]
3749 *
3750 * Each routine that breaks an association must break all of
3751 * them at once. At some later time, that routine must clear
3752 * the pager field and release the memory object references.
3753 * [Furthermore, each routine must cope with the simultaneous
3754 * or previous operations of the others.]
3755 *
3756 * In addition to the lock on the object, the vm_object_hash_lock
3757 * governs the associations. References gained through the
3758 * association require use of the hash lock.
3759 *
3760 * Because the pager field may be cleared spontaneously, it
3761 * cannot be used to determine whether a memory object has
3762 * ever been associated with a particular vm_object. [This
3763 * knowledge is important to the shadow object mechanism.]
3764 * For this reason, an additional "created" attribute is
3765 * provided.
3766 *
3767 * During various paging operations, the pager reference found in the
3768 * vm_object must be valid. To prevent this from being released,
3769 * (other than being removed, i.e., made null), routines may use
3770 * the vm_object_paging_begin/end routines [actually, macros].
3771 * The implementation uses the "paging_in_progress" and "wanted" fields.
3772 * [Operations that alter the validity of the pager values include the
3773 * termination routines and vm_object_collapse.]
3774 */
3775
3776
3777 /*
3778 * Routine: vm_object_enter
3779 * Purpose:
3780 * Find a VM object corresponding to the given
3781 * pager; if no such object exists, create one,
3782 * and initialize the pager.
3783 */
3784 vm_object_t
3785 vm_object_enter(
3786 memory_object_t pager,
3787 vm_object_size_t size,
3788 boolean_t internal,
3789 boolean_t init,
3790 boolean_t named)
3791 {
3792 register vm_object_t object;
3793 vm_object_t new_object;
3794 boolean_t must_init;
3795 vm_object_hash_entry_t entry, new_entry;
3796 uint32_t try_failed_count = 0;
3797 lck_mtx_t *lck;
3798
3799 if (pager == MEMORY_OBJECT_NULL)
3800 return(vm_object_allocate(size));
3801
3802 new_object = VM_OBJECT_NULL;
3803 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
3804 must_init = init;
3805
3806 /*
3807 * Look for an object associated with this port.
3808 */
3809 Retry:
3810 lck = vm_object_hash_lock_spin(pager);
3811 do {
3812 entry = vm_object_hash_lookup(pager, FALSE);
3813
3814 if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
3815 if (new_object == VM_OBJECT_NULL) {
3816 /*
3817 * We must unlock to create a new object;
3818 * if we do so, we must try the lookup again.
3819 */
3820 vm_object_hash_unlock(lck);
3821 assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
3822 new_entry = vm_object_hash_entry_alloc(pager);
3823 new_object = vm_object_allocate(size);
3824 lck = vm_object_hash_lock_spin(pager);
3825 } else {
3826 /*
3827 * Lookup failed twice, and we have something
3828 * to insert; set the object.
3829 */
3830 vm_object_hash_insert(new_entry, new_object);
3831 entry = new_entry;
3832 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
3833 new_object = VM_OBJECT_NULL;
3834 must_init = TRUE;
3835 }
3836 } else if (entry->object == VM_OBJECT_NULL) {
3837 /*
3838 * If a previous object is being terminated,
3839 * we must wait for the termination message
3840 * to be queued (and lookup the entry again).
3841 */
3842 entry->waiting = TRUE;
3843 entry = VM_OBJECT_HASH_ENTRY_NULL;
3844 assert_wait((event_t) pager, THREAD_UNINT);
3845 vm_object_hash_unlock(lck);
3846
3847 thread_block(THREAD_CONTINUE_NULL);
3848 lck = vm_object_hash_lock_spin(pager);
3849 }
3850 } while (entry == VM_OBJECT_HASH_ENTRY_NULL);
3851
3852 object = entry->object;
3853 assert(object != VM_OBJECT_NULL);
3854
3855 if (!must_init) {
3856 if ( !vm_object_lock_try(object)) {
3857
3858 vm_object_hash_unlock(lck);
3859
3860 try_failed_count++;
3861 mutex_pause(try_failed_count); /* wait a bit */
3862 goto Retry;
3863 }
3864 assert(!internal || object->internal);
3865 #if VM_OBJECT_CACHE
3866 if (object->ref_count == 0) {
3867 if ( !vm_object_cache_lock_try()) {
3868
3869 vm_object_hash_unlock(lck);
3870 vm_object_unlock(object);
3871
3872 try_failed_count++;
3873 mutex_pause(try_failed_count); /* wait a bit */
3874 goto Retry;
3875 }
3876 XPR(XPR_VM_OBJECT_CACHE,
3877 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
3878 object,
3879 vm_object_cached_list.next,
3880 vm_object_cached_list.prev, 0,0);
3881 queue_remove(&vm_object_cached_list, object,
3882 vm_object_t, cached_list);
3883 vm_object_cached_count--;
3884
3885 vm_object_cache_unlock();
3886 }
3887 #endif
3888 if (named) {
3889 assert(!object->named);
3890 object->named = TRUE;
3891 }
3892 vm_object_lock_assert_exclusive(object);
3893 object->ref_count++;
3894 vm_object_res_reference(object);
3895
3896 vm_object_hash_unlock(lck);
3897 vm_object_unlock(object);
3898
3899 VM_STAT_INCR(hits);
3900 } else
3901 vm_object_hash_unlock(lck);
3902
3903 assert(object->ref_count > 0);
3904
3905 VM_STAT_INCR(lookups);
3906
3907 XPR(XPR_VM_OBJECT,
3908 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
3909 pager, object, must_init, 0, 0);
3910
3911 /*
3912 * If we raced to create a vm_object but lost, let's
3913 * throw away ours.
3914 */
3915
3916 if (new_object != VM_OBJECT_NULL)
3917 vm_object_deallocate(new_object);
3918
3919 if (new_entry != VM_OBJECT_HASH_ENTRY_NULL)
3920 vm_object_hash_entry_free(new_entry);
3921
3922 if (must_init) {
3923 memory_object_control_t control;
3924
3925 /*
3926 * Allocate request port.
3927 */
3928
3929 control = memory_object_control_allocate(object);
3930 assert (control != MEMORY_OBJECT_CONTROL_NULL);
3931
3932 vm_object_lock(object);
3933 assert(object != kernel_object);
3934
3935 /*
3936 * Copy the reference we were given.
3937 */
3938
3939 memory_object_reference(pager);
3940 object->pager_created = TRUE;
3941 object->pager = pager;
3942 object->internal = internal;
3943 object->pager_trusted = internal;
3944 if (!internal) {
3945 /* copy strategy invalid until set by memory manager */
3946 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
3947 }
3948 object->pager_control = control;
3949 object->pager_ready = FALSE;
3950
3951 vm_object_unlock(object);
3952
3953 /*
3954 * Let the pager know we're using it.
3955 */
3956
3957 (void) memory_object_init(pager,
3958 object->pager_control,
3959 PAGE_SIZE);
3960
3961 vm_object_lock(object);
3962 if (named)
3963 object->named = TRUE;
3964 if (internal) {
3965 object->pager_ready = TRUE;
3966 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
3967 }
3968
3969 object->pager_initialized = TRUE;
3970 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
3971 } else {
3972 vm_object_lock(object);
3973 }
3974
3975 /*
3976 * [At this point, the object must be locked]
3977 */
3978
3979 /*
3980 * Wait for the work above to be done by the first
3981 * thread to map this object.
3982 */
3983
3984 while (!object->pager_initialized) {
3985 vm_object_sleep(object,
3986 VM_OBJECT_EVENT_INITIALIZED,
3987 THREAD_UNINT);
3988 }
3989 vm_object_unlock(object);
3990
3991 XPR(XPR_VM_OBJECT,
3992 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
3993 object, object->pager, internal, 0,0);
3994 return(object);
3995 }
3996
3997 /*
3998 * Routine: vm_object_pager_create
3999 * Purpose:
4000 * Create a memory object for an internal object.
4001 * In/out conditions:
4002 * The object is locked on entry and exit;
4003 * it may be unlocked within this call.
4004 * Limitations:
4005 * Only one thread may be performing a
4006 * vm_object_pager_create on an object at
4007 * a time. Presumably, only the pageout
4008 * daemon will be using this routine.
4009 */
4010
4011 void
4012 vm_object_pager_create(
4013 register vm_object_t object)
4014 {
4015 memory_object_t pager;
4016 vm_object_hash_entry_t entry;
4017 lck_mtx_t *lck;
4018 #if MACH_PAGEMAP
4019 vm_object_size_t size;
4020 vm_external_map_t map;
4021 #endif /* MACH_PAGEMAP */
4022
4023 XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n",
4024 object, 0,0,0,0);
4025
4026 assert(object != kernel_object);
4027
4028 if (memory_manager_default_check() != KERN_SUCCESS)
4029 return;
4030
4031 /*
4032 * Prevent collapse or termination by holding a paging reference
4033 */
4034
4035 vm_object_paging_begin(object);
4036 if (object->pager_created) {
4037 /*
4038 * Someone else got to it first...
4039 * wait for them to finish initializing the ports
4040 */
4041 while (!object->pager_initialized) {
4042 vm_object_sleep(object,
4043 VM_OBJECT_EVENT_INITIALIZED,
4044 THREAD_UNINT);
4045 }
4046 vm_object_paging_end(object);
4047 return;
4048 }
4049
4050 /*
4051 * Indicate that a memory object has been assigned
4052 * before dropping the lock, to prevent a race.
4053 */
4054
4055 object->pager_created = TRUE;
4056 object->paging_offset = 0;
4057
4058 #if MACH_PAGEMAP
4059 size = object->size;
4060 #endif /* MACH_PAGEMAP */
4061 vm_object_unlock(object);
4062
4063 #if MACH_PAGEMAP
4064 map = vm_external_create(size);
4065 vm_object_lock(object);
4066 assert(object->size == size);
4067 object->existence_map = map;
4068 vm_object_unlock(object);
4069 #endif /* MACH_PAGEMAP */
4070
4071 if ((uint32_t) object->size != object->size) {
4072 panic("vm_object_pager_create(): object size 0x%llx >= 4GB\n",
4073 (uint64_t) object->size);
4074 }
4075
4076 /*
4077 * Create the [internal] pager, and associate it with this object.
4078 *
4079 * We make the association here so that vm_object_enter()
4080 * can look up the object to complete initializing it. No
4081 * user will ever map this object.
4082 */
4083 {
4084 memory_object_default_t dmm;
4085
4086 /* acquire a reference for the default memory manager */
4087 dmm = memory_manager_default_reference();
4088
4089 assert(object->temporary);
4090
4091 /* create our new memory object */
4092 assert((vm_size_t) object->size == object->size);
4093 (void) memory_object_create(dmm, (vm_size_t) object->size,
4094 &pager);
4095
4096 memory_object_default_deallocate(dmm);
4097 }
4098
4099 entry = vm_object_hash_entry_alloc(pager);
4100
4101 lck = vm_object_hash_lock_spin(pager);
4102 vm_object_hash_insert(entry, object);
4103 vm_object_hash_unlock(lck);
4104
4105 /*
4106 * A reference was returned by
4107 * memory_object_create(), and it is
4108 * copied by vm_object_enter().
4109 */
4110
4111 if (vm_object_enter(pager, object->size, TRUE, TRUE, FALSE) != object)
4112 panic("vm_object_pager_create: mismatch");
4113
4114 /*
4115 * Drop the reference we were passed.
4116 */
4117 memory_object_deallocate(pager);
4118
4119 vm_object_lock(object);
4120
4121 /*
4122 * Release the paging reference
4123 */
4124 vm_object_paging_end(object);
4125 }
4126
4127 /*
4128 * Routine: vm_object_remove
4129 * Purpose:
4130 * Eliminate the pager/object association
4131 * for this pager.
4132 * Conditions:
4133 * The object cache must be locked.
4134 */
4135 __private_extern__ void
4136 vm_object_remove(
4137 vm_object_t object)
4138 {
4139 memory_object_t pager;
4140
4141 if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
4142 vm_object_hash_entry_t entry;
4143
4144 entry = vm_object_hash_lookup(pager, FALSE);
4145 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
4146 entry->object = VM_OBJECT_NULL;
4147 }
4148
4149 }
4150
4151 /*
4152 * Global variables for vm_object_collapse():
4153 *
4154 * Counts for normal collapses and bypasses.
4155 * Debugging variables, to watch or disable collapse.
4156 */
4157 static long object_collapses = 0;
4158 static long object_bypasses = 0;
4159
4160 static boolean_t vm_object_collapse_allowed = TRUE;
4161 static boolean_t vm_object_bypass_allowed = TRUE;
4162
4163 #if MACH_PAGEMAP
4164 static int vm_external_discarded;
4165 static int vm_external_collapsed;
4166 #endif
4167
4168 unsigned long vm_object_collapse_encrypted = 0;
4169
4170 /*
4171 * Routine: vm_object_do_collapse
4172 * Purpose:
4173 * Collapse an object with the object backing it.
4174 * Pages in the backing object are moved into the
4175 * parent, and the backing object is deallocated.
4176 * Conditions:
4177 * Both objects and the cache are locked; the page
4178 * queues are unlocked.
4179 *
4180 */
4181 static void
4182 vm_object_do_collapse(
4183 vm_object_t object,
4184 vm_object_t backing_object)
4185 {
4186 vm_page_t p, pp;
4187 vm_object_offset_t new_offset, backing_offset;
4188 vm_object_size_t size;
4189
4190 vm_object_lock_assert_exclusive(object);
4191 vm_object_lock_assert_exclusive(backing_object);
4192
4193 backing_offset = object->shadow_offset;
4194 size = object->size;
4195
4196 /*
4197 * Move all in-memory pages from backing_object
4198 * to the parent. Pages that have been paged out
4199 * will be overwritten by any of the parent's
4200 * pages that shadow them.
4201 */
4202
4203 while (!queue_empty(&backing_object->memq)) {
4204
4205 p = (vm_page_t) queue_first(&backing_object->memq);
4206
4207 new_offset = (p->offset - backing_offset);
4208
4209 assert(!p->busy || p->absent);
4210
4211 /*
4212 * If the parent has a page here, or if
4213 * this page falls outside the parent,
4214 * dispose of it.
4215 *
4216 * Otherwise, move it as planned.
4217 */
4218
4219 if (p->offset < backing_offset || new_offset >= size) {
4220 VM_PAGE_FREE(p);
4221 } else {
4222 /*
4223 * ENCRYPTED SWAP:
4224 * The encryption key includes the "pager" and the
4225 * "paging_offset". These will not change during the
4226 * object collapse, so we can just move an encrypted
4227 * page from one object to the other in this case.
4228 * We can't decrypt the page here, since we can't drop
4229 * the object lock.
4230 */
4231 if (p->encrypted) {
4232 vm_object_collapse_encrypted++;
4233 }
4234 pp = vm_page_lookup(object, new_offset);
4235 if (pp == VM_PAGE_NULL) {
4236
4237 /*
4238 * Parent now has no page.
4239 * Move the backing object's page up.
4240 */
4241
4242 vm_page_rename(p, object, new_offset, TRUE);
4243 #if MACH_PAGEMAP
4244 } else if (pp->absent) {
4245
4246 /*
4247 * Parent has an absent page...
4248 * it's not being paged in, so
4249 * it must really be missing from
4250 * the parent.
4251 *
4252 * Throw out the absent page...
4253 * any faults looking for that
4254 * page will restart with the new
4255 * one.
4256 */
4257
4258 VM_PAGE_FREE(pp);
4259 vm_page_rename(p, object, new_offset, TRUE);
4260 #endif /* MACH_PAGEMAP */
4261 } else {
4262 assert(! pp->absent);
4263
4264 /*
4265 * Parent object has a real page.
4266 * Throw away the backing object's
4267 * page.
4268 */
4269 VM_PAGE_FREE(p);
4270 }
4271 }
4272 }
4273
4274 #if !MACH_PAGEMAP
4275 assert((!object->pager_created && (object->pager == MEMORY_OBJECT_NULL))
4276 || (!backing_object->pager_created
4277 && (backing_object->pager == MEMORY_OBJECT_NULL)));
4278 #else
4279 assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL);
4280 #endif /* !MACH_PAGEMAP */
4281
4282 if (backing_object->pager != MEMORY_OBJECT_NULL) {
4283 vm_object_hash_entry_t entry;
4284
4285 /*
4286 * Move the pager from backing_object to object.
4287 *
4288 * XXX We're only using part of the paging space
4289 * for keeps now... we ought to discard the
4290 * unused portion.
4291 */
4292
4293 assert(!object->paging_in_progress);
4294 assert(!object->activity_in_progress);
4295 object->pager = backing_object->pager;
4296
4297 if (backing_object->hashed) {
4298 lck_mtx_t *lck;
4299
4300 lck = vm_object_hash_lock_spin(backing_object->pager);
4301 entry = vm_object_hash_lookup(object->pager, FALSE);
4302 assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
4303 entry->object = object;
4304 vm_object_hash_unlock(lck);
4305
4306 object->hashed = TRUE;
4307 }
4308 object->pager_created = backing_object->pager_created;
4309 object->pager_control = backing_object->pager_control;
4310 object->pager_ready = backing_object->pager_ready;
4311 object->pager_initialized = backing_object->pager_initialized;
4312 object->paging_offset =
4313 backing_object->paging_offset + backing_offset;
4314 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
4315 memory_object_control_collapse(object->pager_control,
4316 object);
4317 }
4318 }
4319
4320 #if MACH_PAGEMAP
4321 /*
4322 * If the shadow offset is 0, the use the existence map from
4323 * the backing object if there is one. If the shadow offset is
4324 * not zero, toss it.
4325 *
4326 * XXX - If the shadow offset is not 0 then a bit copy is needed
4327 * if the map is to be salvaged. For now, we just just toss the
4328 * old map, giving the collapsed object no map. This means that
4329 * the pager is invoked for zero fill pages. If analysis shows
4330 * that this happens frequently and is a performance hit, then
4331 * this code should be fixed to salvage the map.
4332 */
4333 assert(object->existence_map == VM_EXTERNAL_NULL);
4334 if (backing_offset || (size != backing_object->size)) {
4335 vm_external_discarded++;
4336 vm_external_destroy(backing_object->existence_map,
4337 backing_object->size);
4338 }
4339 else {
4340 vm_external_collapsed++;
4341 object->existence_map = backing_object->existence_map;
4342 }
4343 backing_object->existence_map = VM_EXTERNAL_NULL;
4344 #endif /* MACH_PAGEMAP */
4345
4346 /*
4347 * Object now shadows whatever backing_object did.
4348 * Note that the reference to backing_object->shadow
4349 * moves from within backing_object to within object.
4350 */
4351
4352 assert(!object->phys_contiguous);
4353 assert(!backing_object->phys_contiguous);
4354 object->shadow = backing_object->shadow;
4355 if (object->shadow) {
4356 object->shadow_offset += backing_object->shadow_offset;
4357 } else {
4358 /* no shadow, therefore no shadow offset... */
4359 object->shadow_offset = 0;
4360 }
4361 assert((object->shadow == VM_OBJECT_NULL) ||
4362 (object->shadow->copy != backing_object));
4363
4364 /*
4365 * Discard backing_object.
4366 *
4367 * Since the backing object has no pages, no
4368 * pager left, and no object references within it,
4369 * all that is necessary is to dispose of it.
4370 */
4371
4372 assert((backing_object->ref_count == 1) &&
4373 (backing_object->resident_page_count == 0) &&
4374 (backing_object->paging_in_progress == 0) &&
4375 (backing_object->activity_in_progress == 0));
4376
4377 backing_object->alive = FALSE;
4378 vm_object_unlock(backing_object);
4379
4380 XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
4381 backing_object, 0,0,0,0);
4382
4383 vm_object_lock_destroy(backing_object);
4384
4385 zfree(vm_object_zone, backing_object);
4386
4387 object_collapses++;
4388 }
4389
4390 static void
4391 vm_object_do_bypass(
4392 vm_object_t object,
4393 vm_object_t backing_object)
4394 {
4395 /*
4396 * Make the parent shadow the next object
4397 * in the chain.
4398 */
4399
4400 vm_object_lock_assert_exclusive(object);
4401 vm_object_lock_assert_exclusive(backing_object);
4402
4403 #if TASK_SWAPPER
4404 /*
4405 * Do object reference in-line to
4406 * conditionally increment shadow's
4407 * residence count. If object is not
4408 * resident, leave residence count
4409 * on shadow alone.
4410 */
4411 if (backing_object->shadow != VM_OBJECT_NULL) {
4412 vm_object_lock(backing_object->shadow);
4413 vm_object_lock_assert_exclusive(backing_object->shadow);
4414 backing_object->shadow->ref_count++;
4415 if (object->res_count != 0)
4416 vm_object_res_reference(backing_object->shadow);
4417 vm_object_unlock(backing_object->shadow);
4418 }
4419 #else /* TASK_SWAPPER */
4420 vm_object_reference(backing_object->shadow);
4421 #endif /* TASK_SWAPPER */
4422
4423 assert(!object->phys_contiguous);
4424 assert(!backing_object->phys_contiguous);
4425 object->shadow = backing_object->shadow;
4426 if (object->shadow) {
4427 object->shadow_offset += backing_object->shadow_offset;
4428 } else {
4429 /* no shadow, therefore no shadow offset... */
4430 object->shadow_offset = 0;
4431 }
4432
4433 /*
4434 * Backing object might have had a copy pointer
4435 * to us. If it did, clear it.
4436 */
4437 if (backing_object->copy == object) {
4438 backing_object->copy = VM_OBJECT_NULL;
4439 }
4440
4441 /*
4442 * Drop the reference count on backing_object.
4443 #if TASK_SWAPPER
4444 * Since its ref_count was at least 2, it
4445 * will not vanish; so we don't need to call
4446 * vm_object_deallocate.
4447 * [with a caveat for "named" objects]
4448 *
4449 * The res_count on the backing object is
4450 * conditionally decremented. It's possible
4451 * (via vm_pageout_scan) to get here with
4452 * a "swapped" object, which has a 0 res_count,
4453 * in which case, the backing object res_count
4454 * is already down by one.
4455 #else
4456 * Don't call vm_object_deallocate unless
4457 * ref_count drops to zero.
4458 *
4459 * The ref_count can drop to zero here if the
4460 * backing object could be bypassed but not
4461 * collapsed, such as when the backing object
4462 * is temporary and cachable.
4463 #endif
4464 */
4465 if (backing_object->ref_count > 2 ||
4466 (!backing_object->named && backing_object->ref_count > 1)) {
4467 vm_object_lock_assert_exclusive(backing_object);
4468 backing_object->ref_count--;
4469 #if TASK_SWAPPER
4470 if (object->res_count != 0)
4471 vm_object_res_deallocate(backing_object);
4472 assert(backing_object->ref_count > 0);
4473 #endif /* TASK_SWAPPER */
4474 vm_object_unlock(backing_object);
4475 } else {
4476
4477 /*
4478 * Drop locks so that we can deallocate
4479 * the backing object.
4480 */
4481
4482 #if TASK_SWAPPER
4483 if (object->res_count == 0) {
4484 /* XXX get a reference for the deallocate below */
4485 vm_object_res_reference(backing_object);
4486 }
4487 #endif /* TASK_SWAPPER */
4488 vm_object_unlock(object);
4489 vm_object_unlock(backing_object);
4490 vm_object_deallocate(backing_object);
4491
4492 /*
4493 * Relock object. We don't have to reverify
4494 * its state since vm_object_collapse will
4495 * do that for us as it starts at the
4496 * top of its loop.
4497 */
4498
4499 vm_object_lock(object);
4500 }
4501
4502 object_bypasses++;
4503 }
4504
4505
4506 /*
4507 * vm_object_collapse:
4508 *
4509 * Perform an object collapse or an object bypass if appropriate.
4510 * The real work of collapsing and bypassing is performed in
4511 * the routines vm_object_do_collapse and vm_object_do_bypass.
4512 *
4513 * Requires that the object be locked and the page queues be unlocked.
4514 *
4515 */
4516 static unsigned long vm_object_collapse_calls = 0;
4517 static unsigned long vm_object_collapse_objects = 0;
4518 static unsigned long vm_object_collapse_do_collapse = 0;
4519 static unsigned long vm_object_collapse_do_bypass = 0;
4520 static unsigned long vm_object_collapse_delays = 0;
4521 __private_extern__ void
4522 vm_object_collapse(
4523 register vm_object_t object,
4524 register vm_object_offset_t hint_offset,
4525 boolean_t can_bypass)
4526 {
4527 register vm_object_t backing_object;
4528 register unsigned int rcount;
4529 register unsigned int size;
4530 vm_object_t original_object;
4531 int object_lock_type;
4532 int backing_object_lock_type;
4533
4534 vm_object_collapse_calls++;
4535
4536 if (! vm_object_collapse_allowed &&
4537 ! (can_bypass && vm_object_bypass_allowed)) {
4538 return;
4539 }
4540
4541 XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
4542 object, 0,0,0,0);
4543
4544 if (object == VM_OBJECT_NULL)
4545 return;
4546
4547 original_object = object;
4548
4549 /*
4550 * The top object was locked "exclusive" by the caller.
4551 * In the first pass, to determine if we can collapse the shadow chain,
4552 * take a "shared" lock on the shadow objects. If we can collapse,
4553 * we'll have to go down the chain again with exclusive locks.
4554 */
4555 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4556 backing_object_lock_type = OBJECT_LOCK_SHARED;
4557
4558 retry:
4559 object = original_object;
4560 vm_object_lock_assert_exclusive(object);
4561
4562 while (TRUE) {
4563 vm_object_collapse_objects++;
4564 /*
4565 * Verify that the conditions are right for either
4566 * collapse or bypass:
4567 */
4568
4569 /*
4570 * There is a backing object, and
4571 */
4572
4573 backing_object = object->shadow;
4574 if (backing_object == VM_OBJECT_NULL) {
4575 if (object != original_object) {
4576 vm_object_unlock(object);
4577 }
4578 return;
4579 }
4580 if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
4581 vm_object_lock_shared(backing_object);
4582 } else {
4583 vm_object_lock(backing_object);
4584 }
4585
4586 /*
4587 * No pages in the object are currently
4588 * being paged out, and
4589 */
4590 if (object->paging_in_progress != 0 ||
4591 object->activity_in_progress != 0) {
4592 /* try and collapse the rest of the shadow chain */
4593 if (object != original_object) {
4594 vm_object_unlock(object);
4595 }
4596 object = backing_object;
4597 object_lock_type = backing_object_lock_type;
4598 continue;
4599 }
4600
4601 /*
4602 * ...
4603 * The backing object is not read_only,
4604 * and no pages in the backing object are
4605 * currently being paged out.
4606 * The backing object is internal.
4607 *
4608 */
4609
4610 if (!backing_object->internal ||
4611 backing_object->paging_in_progress != 0 ||
4612 backing_object->activity_in_progress != 0) {
4613 /* try and collapse the rest of the shadow chain */
4614 if (object != original_object) {
4615 vm_object_unlock(object);
4616 }
4617 object = backing_object;
4618 object_lock_type = backing_object_lock_type;
4619 continue;
4620 }
4621
4622 /*
4623 * The backing object can't be a copy-object:
4624 * the shadow_offset for the copy-object must stay
4625 * as 0. Furthermore (for the 'we have all the
4626 * pages' case), if we bypass backing_object and
4627 * just shadow the next object in the chain, old
4628 * pages from that object would then have to be copied
4629 * BOTH into the (former) backing_object and into the
4630 * parent object.
4631 */
4632 if (backing_object->shadow != VM_OBJECT_NULL &&
4633 backing_object->shadow->copy == backing_object) {
4634 /* try and collapse the rest of the shadow chain */
4635 if (object != original_object) {
4636 vm_object_unlock(object);
4637 }
4638 object = backing_object;
4639 object_lock_type = backing_object_lock_type;
4640 continue;
4641 }
4642
4643 /*
4644 * We can now try to either collapse the backing
4645 * object (if the parent is the only reference to
4646 * it) or (perhaps) remove the parent's reference
4647 * to it.
4648 *
4649 * If there is exactly one reference to the backing
4650 * object, we may be able to collapse it into the
4651 * parent.
4652 *
4653 * If MACH_PAGEMAP is defined:
4654 * The parent must not have a pager created for it,
4655 * since collapsing a backing_object dumps new pages
4656 * into the parent that its pager doesn't know about
4657 * (and the collapse code can't merge the existence
4658 * maps).
4659 * Otherwise:
4660 * As long as one of the objects is still not known
4661 * to the pager, we can collapse them.
4662 */
4663 if (backing_object->ref_count == 1 &&
4664 (!object->pager_created
4665 #if !MACH_PAGEMAP
4666 || !backing_object->pager_created
4667 #endif /*!MACH_PAGEMAP */
4668 ) && vm_object_collapse_allowed) {
4669
4670 /*
4671 * We need the exclusive lock on the VM objects.
4672 */
4673 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4674 /*
4675 * We have an object and its shadow locked
4676 * "shared". We can't just upgrade the locks
4677 * to "exclusive", as some other thread might
4678 * also have these objects locked "shared" and
4679 * attempt to upgrade one or the other to
4680 * "exclusive". The upgrades would block
4681 * forever waiting for the other "shared" locks
4682 * to get released.
4683 * So we have to release the locks and go
4684 * down the shadow chain again (since it could
4685 * have changed) with "exclusive" locking.
4686 */
4687 vm_object_unlock(backing_object);
4688 if (object != original_object)
4689 vm_object_unlock(object);
4690 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4691 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4692 goto retry;
4693 }
4694
4695 XPR(XPR_VM_OBJECT,
4696 "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
4697 backing_object, object,
4698 backing_object->pager,
4699 backing_object->pager_control, 0);
4700
4701 /*
4702 * Collapse the object with its backing
4703 * object, and try again with the object's
4704 * new backing object.
4705 */
4706
4707 vm_object_do_collapse(object, backing_object);
4708 vm_object_collapse_do_collapse++;
4709 continue;
4710 }
4711
4712 /*
4713 * Collapsing the backing object was not possible
4714 * or permitted, so let's try bypassing it.
4715 */
4716
4717 if (! (can_bypass && vm_object_bypass_allowed)) {
4718 /* try and collapse the rest of the shadow chain */
4719 if (object != original_object) {
4720 vm_object_unlock(object);
4721 }
4722 object = backing_object;
4723 object_lock_type = backing_object_lock_type;
4724 continue;
4725 }
4726
4727
4728 /*
4729 * If the object doesn't have all its pages present,
4730 * we have to make sure no pages in the backing object
4731 * "show through" before bypassing it.
4732 */
4733 size = atop(object->size);
4734 rcount = object->resident_page_count;
4735 if (rcount != size) {
4736 vm_object_offset_t offset;
4737 vm_object_offset_t backing_offset;
4738 unsigned int backing_rcount;
4739 unsigned int lookups = 0;
4740
4741 /*
4742 * If the backing object has a pager but no pagemap,
4743 * then we cannot bypass it, because we don't know
4744 * what pages it has.
4745 */
4746 if (backing_object->pager_created
4747 #if MACH_PAGEMAP
4748 && (backing_object->existence_map == VM_EXTERNAL_NULL)
4749 #endif /* MACH_PAGEMAP */
4750 ) {
4751 /* try and collapse the rest of the shadow chain */
4752 if (object != original_object) {
4753 vm_object_unlock(object);
4754 }
4755 object = backing_object;
4756 object_lock_type = backing_object_lock_type;
4757 continue;
4758 }
4759
4760 /*
4761 * If the object has a pager but no pagemap,
4762 * then we cannot bypass it, because we don't know
4763 * what pages it has.
4764 */
4765 if (object->pager_created
4766 #if MACH_PAGEMAP
4767 && (object->existence_map == VM_EXTERNAL_NULL)
4768 #endif /* MACH_PAGEMAP */
4769 ) {
4770 /* try and collapse the rest of the shadow chain */
4771 if (object != original_object) {
4772 vm_object_unlock(object);
4773 }
4774 object = backing_object;
4775 object_lock_type = backing_object_lock_type;
4776 continue;
4777 }
4778
4779 /*
4780 * If all of the pages in the backing object are
4781 * shadowed by the parent object, the parent
4782 * object no longer has to shadow the backing
4783 * object; it can shadow the next one in the
4784 * chain.
4785 *
4786 * If the backing object has existence info,
4787 * we must check examine its existence info
4788 * as well.
4789 *
4790 */
4791
4792 backing_offset = object->shadow_offset;
4793 backing_rcount = backing_object->resident_page_count;
4794
4795 #if MACH_PAGEMAP
4796 #define EXISTS_IN_OBJECT(obj, off, rc) \
4797 (vm_external_state_get((obj)->existence_map, \
4798 (vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \
4799 ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
4800 #else
4801 #define EXISTS_IN_OBJECT(obj, off, rc) \
4802 (((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
4803 #endif /* MACH_PAGEMAP */
4804
4805 /*
4806 * Check the hint location first
4807 * (since it is often the quickest way out of here).
4808 */
4809 if (object->cow_hint != ~(vm_offset_t)0)
4810 hint_offset = (vm_object_offset_t)object->cow_hint;
4811 else
4812 hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
4813 (hint_offset - 8 * PAGE_SIZE_64) : 0;
4814
4815 if (EXISTS_IN_OBJECT(backing_object, hint_offset +
4816 backing_offset, backing_rcount) &&
4817 !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
4818 /* dependency right at the hint */
4819 object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
4820 /* try and collapse the rest of the shadow chain */
4821 if (object != original_object) {
4822 vm_object_unlock(object);
4823 }
4824 object = backing_object;
4825 object_lock_type = backing_object_lock_type;
4826 continue;
4827 }
4828
4829 /*
4830 * If the object's window onto the backing_object
4831 * is large compared to the number of resident
4832 * pages in the backing object, it makes sense to
4833 * walk the backing_object's resident pages first.
4834 *
4835 * NOTE: Pages may be in both the existence map and
4836 * resident. So, we can't permanently decrement
4837 * the rcount here because the second loop may
4838 * find the same pages in the backing object'
4839 * existence map that we found here and we would
4840 * double-decrement the rcount. We also may or
4841 * may not have found the
4842 */
4843 if (backing_rcount &&
4844 #if MACH_PAGEMAP
4845 size > ((backing_object->existence_map) ?
4846 backing_rcount : (backing_rcount >> 1))
4847 #else
4848 size > (backing_rcount >> 1)
4849 #endif /* MACH_PAGEMAP */
4850 ) {
4851 unsigned int rc = rcount;
4852 vm_page_t p;
4853
4854 backing_rcount = backing_object->resident_page_count;
4855 p = (vm_page_t)queue_first(&backing_object->memq);
4856 do {
4857 /* Until we get more than one lookup lock */
4858 if (lookups > 256) {
4859 vm_object_collapse_delays++;
4860 lookups = 0;
4861 mutex_pause(0);
4862 }
4863
4864 offset = (p->offset - backing_offset);
4865 if (offset < object->size &&
4866 offset != hint_offset &&
4867 !EXISTS_IN_OBJECT(object, offset, rc)) {
4868 /* found a dependency */
4869 object->cow_hint = (vm_offset_t) offset; /* atomic */
4870
4871 break;
4872 }
4873 p = (vm_page_t) queue_next(&p->listq);
4874
4875 } while (--backing_rcount);
4876 if (backing_rcount != 0 ) {
4877 /* try and collapse the rest of the shadow chain */
4878 if (object != original_object) {
4879 vm_object_unlock(object);
4880 }
4881 object = backing_object;
4882 object_lock_type = backing_object_lock_type;
4883 continue;
4884 }
4885 }
4886
4887 /*
4888 * Walk through the offsets looking for pages in the
4889 * backing object that show through to the object.
4890 */
4891 if (backing_rcount
4892 #if MACH_PAGEMAP
4893 || backing_object->existence_map
4894 #endif /* MACH_PAGEMAP */
4895 ) {
4896 offset = hint_offset;
4897
4898 while((offset =
4899 (offset + PAGE_SIZE_64 < object->size) ?
4900 (offset + PAGE_SIZE_64) : 0) != hint_offset) {
4901
4902 /* Until we get more than one lookup lock */
4903 if (lookups > 256) {
4904 vm_object_collapse_delays++;
4905 lookups = 0;
4906 mutex_pause(0);
4907 }
4908
4909 if (EXISTS_IN_OBJECT(backing_object, offset +
4910 backing_offset, backing_rcount) &&
4911 !EXISTS_IN_OBJECT(object, offset, rcount)) {
4912 /* found a dependency */
4913 object->cow_hint = (vm_offset_t) offset; /* atomic */
4914 break;
4915 }
4916 }
4917 if (offset != hint_offset) {
4918 /* try and collapse the rest of the shadow chain */
4919 if (object != original_object) {
4920 vm_object_unlock(object);
4921 }
4922 object = backing_object;
4923 object_lock_type = backing_object_lock_type;
4924 continue;
4925 }
4926 }
4927 }
4928
4929 /*
4930 * We need "exclusive" locks on the 2 VM objects.
4931 */
4932 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4933 vm_object_unlock(backing_object);
4934 if (object != original_object)
4935 vm_object_unlock(object);
4936 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4937 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4938 goto retry;
4939 }
4940
4941 /* reset the offset hint for any objects deeper in the chain */
4942 object->cow_hint = (vm_offset_t)0;
4943
4944 /*
4945 * All interesting pages in the backing object
4946 * already live in the parent or its pager.
4947 * Thus we can bypass the backing object.
4948 */
4949
4950 vm_object_do_bypass(object, backing_object);
4951 vm_object_collapse_do_bypass++;
4952
4953 /*
4954 * Try again with this object's new backing object.
4955 */
4956
4957 continue;
4958 }
4959
4960 if (object != original_object) {
4961 vm_object_unlock(object);
4962 }
4963 }
4964
4965 /*
4966 * Routine: vm_object_page_remove: [internal]
4967 * Purpose:
4968 * Removes all physical pages in the specified
4969 * object range from the object's list of pages.
4970 *
4971 * In/out conditions:
4972 * The object must be locked.
4973 * The object must not have paging_in_progress, usually
4974 * guaranteed by not having a pager.
4975 */
4976 unsigned int vm_object_page_remove_lookup = 0;
4977 unsigned int vm_object_page_remove_iterate = 0;
4978
4979 __private_extern__ void
4980 vm_object_page_remove(
4981 register vm_object_t object,
4982 register vm_object_offset_t start,
4983 register vm_object_offset_t end)
4984 {
4985 register vm_page_t p, next;
4986
4987 /*
4988 * One and two page removals are most popular.
4989 * The factor of 16 here is somewhat arbitrary.
4990 * It balances vm_object_lookup vs iteration.
4991 */
4992
4993 if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
4994 vm_object_page_remove_lookup++;
4995
4996 for (; start < end; start += PAGE_SIZE_64) {
4997 p = vm_page_lookup(object, start);
4998 if (p != VM_PAGE_NULL) {
4999 assert(!p->cleaning && !p->pageout);
5000 if (!p->fictitious && p->pmapped)
5001 pmap_disconnect(p->phys_page);
5002 VM_PAGE_FREE(p);
5003 }
5004 }
5005 } else {
5006 vm_object_page_remove_iterate++;
5007
5008 p = (vm_page_t) queue_first(&object->memq);
5009 while (!queue_end(&object->memq, (queue_entry_t) p)) {
5010 next = (vm_page_t) queue_next(&p->listq);
5011 if ((start <= p->offset) && (p->offset < end)) {
5012 assert(!p->cleaning && !p->pageout);
5013 if (!p->fictitious && p->pmapped)
5014 pmap_disconnect(p->phys_page);
5015 VM_PAGE_FREE(p);
5016 }
5017 p = next;
5018 }
5019 }
5020 }
5021
5022
5023 /*
5024 * Routine: vm_object_coalesce
5025 * Function: Coalesces two objects backing up adjoining
5026 * regions of memory into a single object.
5027 *
5028 * returns TRUE if objects were combined.
5029 *
5030 * NOTE: Only works at the moment if the second object is NULL -
5031 * if it's not, which object do we lock first?
5032 *
5033 * Parameters:
5034 * prev_object First object to coalesce
5035 * prev_offset Offset into prev_object
5036 * next_object Second object into coalesce
5037 * next_offset Offset into next_object
5038 *
5039 * prev_size Size of reference to prev_object
5040 * next_size Size of reference to next_object
5041 *
5042 * Conditions:
5043 * The object(s) must *not* be locked. The map must be locked
5044 * to preserve the reference to the object(s).
5045 */
5046 static int vm_object_coalesce_count = 0;
5047
5048 __private_extern__ boolean_t
5049 vm_object_coalesce(
5050 register vm_object_t prev_object,
5051 vm_object_t next_object,
5052 vm_object_offset_t prev_offset,
5053 __unused vm_object_offset_t next_offset,
5054 vm_object_size_t prev_size,
5055 vm_object_size_t next_size)
5056 {
5057 vm_object_size_t newsize;
5058
5059 #ifdef lint
5060 next_offset++;
5061 #endif /* lint */
5062
5063 if (next_object != VM_OBJECT_NULL) {
5064 return(FALSE);
5065 }
5066
5067 if (prev_object == VM_OBJECT_NULL) {
5068 return(TRUE);
5069 }
5070
5071 XPR(XPR_VM_OBJECT,
5072 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
5073 prev_object, prev_offset, prev_size, next_size, 0);
5074
5075 vm_object_lock(prev_object);
5076
5077 /*
5078 * Try to collapse the object first
5079 */
5080 vm_object_collapse(prev_object, prev_offset, TRUE);
5081
5082 /*
5083 * Can't coalesce if pages not mapped to
5084 * prev_entry may be in use any way:
5085 * . more than one reference
5086 * . paged out
5087 * . shadows another object
5088 * . has a copy elsewhere
5089 * . is purgeable
5090 * . paging references (pages might be in page-list)
5091 */
5092
5093 if ((prev_object->ref_count > 1) ||
5094 prev_object->pager_created ||
5095 (prev_object->shadow != VM_OBJECT_NULL) ||
5096 (prev_object->copy != VM_OBJECT_NULL) ||
5097 (prev_object->true_share != FALSE) ||
5098 (prev_object->purgable != VM_PURGABLE_DENY) ||
5099 (prev_object->paging_in_progress != 0) ||
5100 (prev_object->activity_in_progress != 0)) {
5101 vm_object_unlock(prev_object);
5102 return(FALSE);
5103 }
5104
5105 vm_object_coalesce_count++;
5106
5107 /*
5108 * Remove any pages that may still be in the object from
5109 * a previous deallocation.
5110 */
5111 vm_object_page_remove(prev_object,
5112 prev_offset + prev_size,
5113 prev_offset + prev_size + next_size);
5114
5115 /*
5116 * Extend the object if necessary.
5117 */
5118 newsize = prev_offset + prev_size + next_size;
5119 if (newsize > prev_object->size) {
5120 #if MACH_PAGEMAP
5121 /*
5122 * We cannot extend an object that has existence info,
5123 * since the existence info might then fail to cover
5124 * the entire object.
5125 *
5126 * This assertion must be true because the object
5127 * has no pager, and we only create existence info
5128 * for objects with pagers.
5129 */
5130 assert(prev_object->existence_map == VM_EXTERNAL_NULL);
5131 #endif /* MACH_PAGEMAP */
5132 prev_object->size = newsize;
5133 }
5134
5135 vm_object_unlock(prev_object);
5136 return(TRUE);
5137 }
5138
5139 /*
5140 * Attach a set of physical pages to an object, so that they can
5141 * be mapped by mapping the object. Typically used to map IO memory.
5142 *
5143 * The mapping function and its private data are used to obtain the
5144 * physical addresses for each page to be mapped.
5145 */
5146 void
5147 vm_object_page_map(
5148 vm_object_t object,
5149 vm_object_offset_t offset,
5150 vm_object_size_t size,
5151 vm_object_offset_t (*map_fn)(void *map_fn_data,
5152 vm_object_offset_t offset),
5153 void *map_fn_data) /* private to map_fn */
5154 {
5155 int64_t num_pages;
5156 int i;
5157 vm_page_t m;
5158 vm_page_t old_page;
5159 vm_object_offset_t addr;
5160
5161 num_pages = atop_64(size);
5162
5163 for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) {
5164
5165 addr = (*map_fn)(map_fn_data, offset);
5166
5167 while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
5168 vm_page_more_fictitious();
5169
5170 vm_object_lock(object);
5171 if ((old_page = vm_page_lookup(object, offset))
5172 != VM_PAGE_NULL)
5173 {
5174 VM_PAGE_FREE(old_page);
5175 }
5176
5177 assert((ppnum_t) addr == addr);
5178 vm_page_init(m, (ppnum_t) addr);
5179 /*
5180 * private normally requires lock_queues but since we
5181 * are initializing the page, its not necessary here
5182 */
5183 m->private = TRUE; /* don`t free page */
5184 m->wire_count = 1;
5185 vm_page_insert(m, object, offset);
5186
5187 PAGE_WAKEUP_DONE(m);
5188 vm_object_unlock(object);
5189 }
5190 }
5191
5192 #include <mach_kdb.h>
5193
5194 #if MACH_KDB
5195 #include <ddb/db_output.h>
5196 #include <vm/vm_print.h>
5197
5198 #define printf kdbprintf
5199
5200 extern boolean_t vm_object_cached(
5201 vm_object_t object);
5202
5203 extern void print_bitstring(
5204 char byte);
5205
5206 boolean_t vm_object_print_pages = FALSE;
5207
5208 void
5209 print_bitstring(
5210 char byte)
5211 {
5212 printf("%c%c%c%c%c%c%c%c",
5213 ((byte & (1 << 0)) ? '1' : '0'),
5214 ((byte & (1 << 1)) ? '1' : '0'),
5215 ((byte & (1 << 2)) ? '1' : '0'),
5216 ((byte & (1 << 3)) ? '1' : '0'),
5217 ((byte & (1 << 4)) ? '1' : '0'),
5218 ((byte & (1 << 5)) ? '1' : '0'),
5219 ((byte & (1 << 6)) ? '1' : '0'),
5220 ((byte & (1 << 7)) ? '1' : '0'));
5221 }
5222
5223 boolean_t
5224 vm_object_cached(
5225 __unused register vm_object_t object)
5226 {
5227 #if VM_OBJECT_CACHE
5228 register vm_object_t o;
5229
5230 queue_iterate(&vm_object_cached_list, o, vm_object_t, cached_list) {
5231 if (object == o) {
5232 return TRUE;
5233 }
5234 }
5235 #endif
5236 return FALSE;
5237 }
5238
5239 #if MACH_PAGEMAP
5240 /*
5241 * vm_external_print: [ debug ]
5242 */
5243 void
5244 vm_external_print(
5245 vm_external_map_t emap,
5246 vm_object_size_t size)
5247 {
5248 if (emap == VM_EXTERNAL_NULL) {
5249 printf("0 ");
5250 } else {
5251 vm_object_size_t existence_size = stob(size);
5252 printf("{ size=%lld, map=[", (uint64_t) existence_size);
5253 if (existence_size > 0) {
5254 print_bitstring(emap[0]);
5255 }
5256 if (existence_size > 1) {
5257 print_bitstring(emap[1]);
5258 }
5259 if (existence_size > 2) {
5260 printf("...");
5261 print_bitstring(emap[existence_size-1]);
5262 }
5263 printf("] }\n");
5264 }
5265 return;
5266 }
5267 #endif /* MACH_PAGEMAP */
5268
5269 int
5270 vm_follow_object(
5271 vm_object_t object)
5272 {
5273 int count = 0;
5274 int orig_db_indent = db_indent;
5275
5276 while (TRUE) {
5277 if (object == VM_OBJECT_NULL) {
5278 db_indent = orig_db_indent;
5279 return count;
5280 }
5281
5282 count += 1;
5283
5284 iprintf("object 0x%x", object);
5285 printf(", shadow=0x%x", object->shadow);
5286 printf(", copy=0x%x", object->copy);
5287 printf(", pager=0x%x", object->pager);
5288 printf(", ref=%d\n", object->ref_count);
5289
5290 db_indent += 2;
5291 object = object->shadow;
5292 }
5293
5294 }
5295
5296 /*
5297 * vm_object_print: [ debug ]
5298 */
5299 void
5300 vm_object_print(db_expr_t db_addr, __unused boolean_t have_addr,
5301 __unused db_expr_t arg_count, __unused char *modif)
5302 {
5303 vm_object_t object;
5304 register vm_page_t p;
5305 const char *s;
5306
5307 register int count;
5308
5309 object = (vm_object_t) (long) db_addr;
5310 if (object == VM_OBJECT_NULL)
5311 return;
5312
5313 iprintf("object 0x%x\n", object);
5314
5315 db_indent += 2;
5316
5317 iprintf("size=0x%x", object->size);
5318 printf(", memq_hint=%p", object->memq_hint);
5319 printf(", ref_count=%d\n", object->ref_count);
5320 iprintf("");
5321 #if TASK_SWAPPER
5322 printf("res_count=%d, ", object->res_count);
5323 #endif /* TASK_SWAPPER */
5324 printf("resident_page_count=%d\n", object->resident_page_count);
5325
5326 iprintf("shadow=0x%x", object->shadow);
5327 if (object->shadow) {
5328 register int i = 0;
5329 vm_object_t shadow = object;
5330 while((shadow = shadow->shadow))
5331 i++;
5332 printf(" (depth %d)", i);
5333 }
5334 printf(", copy=0x%x", object->copy);
5335 printf(", shadow_offset=0x%x", object->shadow_offset);
5336 printf(", last_alloc=0x%x\n", object->last_alloc);
5337
5338 iprintf("pager=0x%x", object->pager);
5339 printf(", paging_offset=0x%x", object->paging_offset);
5340 printf(", pager_control=0x%x\n", object->pager_control);
5341
5342 iprintf("copy_strategy=%d[", object->copy_strategy);
5343 switch (object->copy_strategy) {
5344 case MEMORY_OBJECT_COPY_NONE:
5345 printf("copy_none");
5346 break;
5347
5348 case MEMORY_OBJECT_COPY_CALL:
5349 printf("copy_call");
5350 break;
5351
5352 case MEMORY_OBJECT_COPY_DELAY:
5353 printf("copy_delay");
5354 break;
5355
5356 case MEMORY_OBJECT_COPY_SYMMETRIC:
5357 printf("copy_symmetric");
5358 break;
5359
5360 case MEMORY_OBJECT_COPY_INVALID:
5361 printf("copy_invalid");
5362 break;
5363
5364 default:
5365 printf("?");
5366 }
5367 printf("]");
5368
5369 iprintf("all_wanted=0x%x<", object->all_wanted);
5370 s = "";
5371 if (vm_object_wanted(object, VM_OBJECT_EVENT_INITIALIZED)) {
5372 printf("%sinit", s);
5373 s = ",";
5374 }
5375 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGER_READY)) {
5376 printf("%sready", s);
5377 s = ",";
5378 }
5379 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS)) {
5380 printf("%spaging", s);
5381 s = ",";
5382 }
5383 if (vm_object_wanted(object, VM_OBJECT_EVENT_LOCK_IN_PROGRESS)) {
5384 printf("%slock", s);
5385 s = ",";
5386 }
5387 if (vm_object_wanted(object, VM_OBJECT_EVENT_UNCACHING)) {
5388 printf("%suncaching", s);
5389 s = ",";
5390 }
5391 if (vm_object_wanted(object, VM_OBJECT_EVENT_COPY_CALL)) {
5392 printf("%scopy_call", s);
5393 s = ",";
5394 }
5395 if (vm_object_wanted(object, VM_OBJECT_EVENT_CACHING)) {
5396 printf("%scaching", s);
5397 s = ",";
5398 }
5399 printf(">");
5400 printf(", paging_in_progress=%d\n", object->paging_in_progress);
5401 printf(", activity_in_progress=%d\n", object->activity_in_progress);
5402
5403 iprintf("%screated, %sinit, %sready, %spersist, %strusted, %spageout, %s, %s\n",
5404 (object->pager_created ? "" : "!"),
5405 (object->pager_initialized ? "" : "!"),
5406 (object->pager_ready ? "" : "!"),
5407 (object->can_persist ? "" : "!"),
5408 (object->pager_trusted ? "" : "!"),
5409 (object->pageout ? "" : "!"),
5410 (object->internal ? "internal" : "external"),
5411 (object->temporary ? "temporary" : "permanent"));
5412 iprintf("%salive, %spurgeable, %spurgeable_volatile, %spurgeable_empty, %sshadowed, %scached, %sprivate\n",
5413 (object->alive ? "" : "!"),
5414 ((object->purgable != VM_PURGABLE_DENY) ? "" : "!"),
5415 ((object->purgable == VM_PURGABLE_VOLATILE) ? "" : "!"),
5416 ((object->purgable == VM_PURGABLE_EMPTY) ? "" : "!"),
5417 (object->shadowed ? "" : "!"),
5418 (vm_object_cached(object) ? "" : "!"),
5419 (object->private ? "" : "!"));
5420 iprintf("%sadvisory_pageout, %ssilent_overwrite\n",
5421 (object->advisory_pageout ? "" : "!"),
5422 (object->silent_overwrite ? "" : "!"));
5423
5424 #if MACH_PAGEMAP
5425 iprintf("existence_map=");
5426 vm_external_print(object->existence_map, object->size);
5427 #endif /* MACH_PAGEMAP */
5428 #if MACH_ASSERT
5429 iprintf("paging_object=0x%x\n", object->paging_object);
5430 #endif /* MACH_ASSERT */
5431
5432 if (vm_object_print_pages) {
5433 count = 0;
5434 p = (vm_page_t) queue_first(&object->memq);
5435 while (!queue_end(&object->memq, (queue_entry_t) p)) {
5436 if (count == 0) {
5437 iprintf("memory:=");
5438 } else if (count == 2) {
5439 printf("\n");
5440 iprintf(" ...");
5441 count = 0;
5442 } else {
5443 printf(",");
5444 }
5445 count++;
5446
5447 printf("(off=0x%llX,page=%p)", p->offset, p);
5448 p = (vm_page_t) queue_next(&p->listq);
5449 }
5450 if (count != 0) {
5451 printf("\n");
5452 }
5453 }
5454 db_indent -= 2;
5455 }
5456
5457
5458 /*
5459 * vm_object_find [ debug ]
5460 *
5461 * Find all tasks which reference the given vm_object.
5462 */
5463
5464 boolean_t vm_object_find(vm_object_t object);
5465 boolean_t vm_object_print_verbose = FALSE;
5466
5467 boolean_t
5468 vm_object_find(
5469 vm_object_t object)
5470 {
5471 task_t task;
5472 vm_map_t map;
5473 vm_map_entry_t entry;
5474 boolean_t found = FALSE;
5475
5476 queue_iterate(&tasks, task, task_t, tasks) {
5477 map = task->map;
5478 for (entry = vm_map_first_entry(map);
5479 entry && entry != vm_map_to_entry(map);
5480 entry = entry->vme_next) {
5481
5482 vm_object_t obj;
5483
5484 /*
5485 * For the time being skip submaps,
5486 * only the kernel can have submaps,
5487 * and unless we are interested in
5488 * kernel objects, we can simply skip
5489 * submaps. See sb/dejan/nmk18b7/src/mach_kernel/vm
5490 * for a full solution.
5491 */
5492 if (entry->is_sub_map)
5493 continue;
5494 if (entry)
5495 obj = entry->object.vm_object;
5496 else
5497 continue;
5498
5499 while (obj != VM_OBJECT_NULL) {
5500 if (obj == object) {
5501 if (!found) {
5502 printf("TASK\t\tMAP\t\tENTRY\n");
5503 found = TRUE;
5504 }
5505 printf("0x%x\t0x%x\t0x%x\n",
5506 task, map, entry);
5507 }
5508 obj = obj->shadow;
5509 }
5510 }
5511 }
5512
5513 return(found);
5514 }
5515
5516 #endif /* MACH_KDB */
5517
5518 kern_return_t
5519 vm_object_populate_with_private(
5520 vm_object_t object,
5521 vm_object_offset_t offset,
5522 ppnum_t phys_page,
5523 vm_size_t size)
5524 {
5525 ppnum_t base_page;
5526 vm_object_offset_t base_offset;
5527
5528
5529 if(!object->private)
5530 return KERN_FAILURE;
5531
5532 base_page = phys_page;
5533
5534 vm_object_lock(object);
5535 if(!object->phys_contiguous) {
5536 vm_page_t m;
5537 if((base_offset = trunc_page_64(offset)) != offset) {
5538 vm_object_unlock(object);
5539 return KERN_FAILURE;
5540 }
5541 base_offset += object->paging_offset;
5542 while(size) {
5543 m = vm_page_lookup(object, base_offset);
5544 if(m != VM_PAGE_NULL) {
5545 if(m->fictitious) {
5546 if (m->phys_page != vm_page_guard_addr) {
5547
5548 vm_page_lockspin_queues();
5549 m->private = TRUE;
5550 vm_page_unlock_queues();
5551
5552 m->fictitious = FALSE;
5553 m->phys_page = base_page;
5554 if(!m->busy) {
5555 m->busy = TRUE;
5556 }
5557 if(!m->absent) {
5558 m->absent = TRUE;
5559 }
5560 m->list_req_pending = TRUE;
5561 }
5562 } else if (m->phys_page != base_page) {
5563 if (m->pmapped) {
5564 /*
5565 * pmap call to clear old mapping
5566 */
5567 pmap_disconnect(m->phys_page);
5568 }
5569 m->phys_page = base_page;
5570 }
5571
5572 /*
5573 * ENCRYPTED SWAP:
5574 * We're not pointing to the same
5575 * physical page any longer and the
5576 * contents of the new one are not
5577 * supposed to be encrypted.
5578 * XXX What happens to the original
5579 * physical page. Is it lost ?
5580 */
5581 m->encrypted = FALSE;
5582
5583 } else {
5584 while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
5585 vm_page_more_fictitious();
5586
5587 /*
5588 * private normally requires lock_queues but since we
5589 * are initializing the page, its not necessary here
5590 */
5591 m->private = TRUE;
5592 m->fictitious = FALSE;
5593 m->phys_page = base_page;
5594 m->list_req_pending = TRUE;
5595 m->absent = TRUE;
5596 m->unusual = TRUE;
5597
5598 vm_page_insert(m, object, base_offset);
5599 }
5600 base_page++; /* Go to the next physical page */
5601 base_offset += PAGE_SIZE;
5602 size -= PAGE_SIZE;
5603 }
5604 } else {
5605 /* NOTE: we should check the original settings here */
5606 /* if we have a size > zero a pmap call should be made */
5607 /* to disable the range */
5608
5609 /* pmap_? */
5610
5611 /* shadows on contiguous memory are not allowed */
5612 /* we therefore can use the offset field */
5613 object->shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
5614 object->size = size;
5615 }
5616 vm_object_unlock(object);
5617 return KERN_SUCCESS;
5618 }
5619
5620 /*
5621 * memory_object_free_from_cache:
5622 *
5623 * Walk the vm_object cache list, removing and freeing vm_objects
5624 * which are backed by the pager identified by the caller, (pager_ops).
5625 * Remove up to "count" objects, if there are that may available
5626 * in the cache.
5627 *
5628 * Walk the list at most once, return the number of vm_objects
5629 * actually freed.
5630 */
5631
5632 __private_extern__ kern_return_t
5633 memory_object_free_from_cache(
5634 __unused host_t host,
5635 __unused memory_object_pager_ops_t pager_ops,
5636 int *count)
5637 {
5638 #if VM_OBJECT_CACHE
5639 int object_released = 0;
5640
5641 register vm_object_t object = VM_OBJECT_NULL;
5642 vm_object_t shadow;
5643
5644 /*
5645 if(host == HOST_NULL)
5646 return(KERN_INVALID_ARGUMENT);
5647 */
5648
5649 try_again:
5650 vm_object_cache_lock();
5651
5652 queue_iterate(&vm_object_cached_list, object,
5653 vm_object_t, cached_list) {
5654 if (object->pager &&
5655 (pager_ops == object->pager->mo_pager_ops)) {
5656 vm_object_lock(object);
5657 queue_remove(&vm_object_cached_list, object,
5658 vm_object_t, cached_list);
5659 vm_object_cached_count--;
5660
5661 vm_object_cache_unlock();
5662 /*
5663 * Since this object is in the cache, we know
5664 * that it is initialized and has only a pager's
5665 * (implicit) reference. Take a reference to avoid
5666 * recursive deallocations.
5667 */
5668
5669 assert(object->pager_initialized);
5670 assert(object->ref_count == 0);
5671 vm_object_lock_assert_exclusive(object);
5672 object->ref_count++;
5673
5674 /*
5675 * Terminate the object.
5676 * If the object had a shadow, we let
5677 * vm_object_deallocate deallocate it.
5678 * "pageout" objects have a shadow, but
5679 * maintain a "paging reference" rather
5680 * than a normal reference.
5681 * (We are careful here to limit recursion.)
5682 */
5683 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
5684
5685 if ((vm_object_terminate(object) == KERN_SUCCESS)
5686 && (shadow != VM_OBJECT_NULL)) {
5687 vm_object_deallocate(shadow);
5688 }
5689
5690 if(object_released++ == *count)
5691 return KERN_SUCCESS;
5692 goto try_again;
5693 }
5694 }
5695 vm_object_cache_unlock();
5696 *count = object_released;
5697 #else
5698 *count = 0;
5699 #endif
5700 return KERN_SUCCESS;
5701 }
5702
5703
5704
5705 kern_return_t
5706 memory_object_create_named(
5707 memory_object_t pager,
5708 memory_object_offset_t size,
5709 memory_object_control_t *control)
5710 {
5711 vm_object_t object;
5712 vm_object_hash_entry_t entry;
5713 lck_mtx_t *lck;
5714
5715 *control = MEMORY_OBJECT_CONTROL_NULL;
5716 if (pager == MEMORY_OBJECT_NULL)
5717 return KERN_INVALID_ARGUMENT;
5718
5719 lck = vm_object_hash_lock_spin(pager);
5720 entry = vm_object_hash_lookup(pager, FALSE);
5721
5722 if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
5723 (entry->object != VM_OBJECT_NULL)) {
5724 if (entry->object->named == TRUE)
5725 panic("memory_object_create_named: caller already holds the right"); }
5726 vm_object_hash_unlock(lck);
5727
5728 if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE)) == VM_OBJECT_NULL) {
5729 return(KERN_INVALID_OBJECT);
5730 }
5731
5732 /* wait for object (if any) to be ready */
5733 if (object != VM_OBJECT_NULL) {
5734 vm_object_lock(object);
5735 object->named = TRUE;
5736 while (!object->pager_ready) {
5737 vm_object_sleep(object,
5738 VM_OBJECT_EVENT_PAGER_READY,
5739 THREAD_UNINT);
5740 }
5741 *control = object->pager_control;
5742 vm_object_unlock(object);
5743 }
5744 return (KERN_SUCCESS);
5745 }
5746
5747
5748 /*
5749 * Routine: memory_object_recover_named [user interface]
5750 * Purpose:
5751 * Attempt to recover a named reference for a VM object.
5752 * VM will verify that the object has not already started
5753 * down the termination path, and if it has, will optionally
5754 * wait for that to finish.
5755 * Returns:
5756 * KERN_SUCCESS - we recovered a named reference on the object
5757 * KERN_FAILURE - we could not recover a reference (object dead)
5758 * KERN_INVALID_ARGUMENT - bad memory object control
5759 */
5760 kern_return_t
5761 memory_object_recover_named(
5762 memory_object_control_t control,
5763 boolean_t wait_on_terminating)
5764 {
5765 vm_object_t object;
5766
5767 object = memory_object_control_to_vm_object(control);
5768 if (object == VM_OBJECT_NULL) {
5769 return (KERN_INVALID_ARGUMENT);
5770 }
5771 restart:
5772 vm_object_lock(object);
5773
5774 if (object->terminating && wait_on_terminating) {
5775 vm_object_wait(object,
5776 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
5777 THREAD_UNINT);
5778 goto restart;
5779 }
5780
5781 if (!object->alive) {
5782 vm_object_unlock(object);
5783 return KERN_FAILURE;
5784 }
5785
5786 if (object->named == TRUE) {
5787 vm_object_unlock(object);
5788 return KERN_SUCCESS;
5789 }
5790 #if VM_OBJECT_CACHE
5791 if ((object->ref_count == 0) && (!object->terminating)) {
5792 if (!vm_object_cache_lock_try()) {
5793 vm_object_unlock(object);
5794 goto restart;
5795 }
5796 queue_remove(&vm_object_cached_list, object,
5797 vm_object_t, cached_list);
5798 vm_object_cached_count--;
5799 XPR(XPR_VM_OBJECT_CACHE,
5800 "memory_object_recover_named: removing %X, head (%X, %X)\n",
5801 object,
5802 vm_object_cached_list.next,
5803 vm_object_cached_list.prev, 0,0);
5804
5805 vm_object_cache_unlock();
5806 }
5807 #endif
5808 object->named = TRUE;
5809 vm_object_lock_assert_exclusive(object);
5810 object->ref_count++;
5811 vm_object_res_reference(object);
5812 while (!object->pager_ready) {
5813 vm_object_sleep(object,
5814 VM_OBJECT_EVENT_PAGER_READY,
5815 THREAD_UNINT);
5816 }
5817 vm_object_unlock(object);
5818 return (KERN_SUCCESS);
5819 }
5820
5821
5822 /*
5823 * vm_object_release_name:
5824 *
5825 * Enforces name semantic on memory_object reference count decrement
5826 * This routine should not be called unless the caller holds a name
5827 * reference gained through the memory_object_create_named.
5828 *
5829 * If the TERMINATE_IDLE flag is set, the call will return if the
5830 * reference count is not 1. i.e. idle with the only remaining reference
5831 * being the name.
5832 * If the decision is made to proceed the name field flag is set to
5833 * false and the reference count is decremented. If the RESPECT_CACHE
5834 * flag is set and the reference count has gone to zero, the
5835 * memory_object is checked to see if it is cacheable otherwise when
5836 * the reference count is zero, it is simply terminated.
5837 */
5838
5839 __private_extern__ kern_return_t
5840 vm_object_release_name(
5841 vm_object_t object,
5842 int flags)
5843 {
5844 vm_object_t shadow;
5845 boolean_t original_object = TRUE;
5846
5847 while (object != VM_OBJECT_NULL) {
5848
5849 vm_object_lock(object);
5850
5851 assert(object->alive);
5852 if (original_object)
5853 assert(object->named);
5854 assert(object->ref_count > 0);
5855
5856 /*
5857 * We have to wait for initialization before
5858 * destroying or caching the object.
5859 */
5860
5861 if (object->pager_created && !object->pager_initialized) {
5862 assert(!object->can_persist);
5863 vm_object_assert_wait(object,
5864 VM_OBJECT_EVENT_INITIALIZED,
5865 THREAD_UNINT);
5866 vm_object_unlock(object);
5867 thread_block(THREAD_CONTINUE_NULL);
5868 continue;
5869 }
5870
5871 if (((object->ref_count > 1)
5872 && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
5873 || (object->terminating)) {
5874 vm_object_unlock(object);
5875 return KERN_FAILURE;
5876 } else {
5877 if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
5878 vm_object_unlock(object);
5879 return KERN_SUCCESS;
5880 }
5881 }
5882
5883 if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
5884 (object->ref_count == 1)) {
5885 if (original_object)
5886 object->named = FALSE;
5887 vm_object_unlock(object);
5888 /* let vm_object_deallocate push this thing into */
5889 /* the cache, if that it is where it is bound */
5890 vm_object_deallocate(object);
5891 return KERN_SUCCESS;
5892 }
5893 VM_OBJ_RES_DECR(object);
5894 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
5895
5896 if (object->ref_count == 1) {
5897 if (vm_object_terminate(object) != KERN_SUCCESS) {
5898 if (original_object) {
5899 return KERN_FAILURE;
5900 } else {
5901 return KERN_SUCCESS;
5902 }
5903 }
5904 if (shadow != VM_OBJECT_NULL) {
5905 original_object = FALSE;
5906 object = shadow;
5907 continue;
5908 }
5909 return KERN_SUCCESS;
5910 } else {
5911 vm_object_lock_assert_exclusive(object);
5912 object->ref_count--;
5913 assert(object->ref_count > 0);
5914 if(original_object)
5915 object->named = FALSE;
5916 vm_object_unlock(object);
5917 return KERN_SUCCESS;
5918 }
5919 }
5920 /*NOTREACHED*/
5921 assert(0);
5922 return KERN_FAILURE;
5923 }
5924
5925
5926 __private_extern__ kern_return_t
5927 vm_object_lock_request(
5928 vm_object_t object,
5929 vm_object_offset_t offset,
5930 vm_object_size_t size,
5931 memory_object_return_t should_return,
5932 int flags,
5933 vm_prot_t prot)
5934 {
5935 __unused boolean_t should_flush;
5936
5937 should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
5938
5939 XPR(XPR_MEMORY_OBJECT,
5940 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
5941 object, offset, size,
5942 (((should_return&1)<<1)|should_flush), prot);
5943
5944 /*
5945 * Check for bogus arguments.
5946 */
5947 if (object == VM_OBJECT_NULL)
5948 return (KERN_INVALID_ARGUMENT);
5949
5950 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
5951 return (KERN_INVALID_ARGUMENT);
5952
5953 size = round_page_64(size);
5954
5955 /*
5956 * Lock the object, and acquire a paging reference to
5957 * prevent the memory_object reference from being released.
5958 */
5959 vm_object_lock(object);
5960 vm_object_paging_begin(object);
5961
5962 (void)vm_object_update(object,
5963 offset, size, NULL, NULL, should_return, flags, prot);
5964
5965 vm_object_paging_end(object);
5966 vm_object_unlock(object);
5967
5968 return (KERN_SUCCESS);
5969 }
5970
5971 /*
5972 * Empty a purgeable object by grabbing the physical pages assigned to it and
5973 * putting them on the free queue without writing them to backing store, etc.
5974 * When the pages are next touched they will be demand zero-fill pages. We
5975 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
5976 * skip referenced/dirty pages, pages on the active queue, etc. We're more
5977 * than happy to grab these since this is a purgeable object. We mark the
5978 * object as "empty" after reaping its pages.
5979 *
5980 * On entry the object must be locked and it must be
5981 * purgeable with no delayed copies pending.
5982 */
5983 void
5984 vm_object_purge(vm_object_t object)
5985 {
5986 vm_object_lock_assert_exclusive(object);
5987
5988 if (object->purgable == VM_PURGABLE_DENY)
5989 return;
5990
5991 assert(object->copy == VM_OBJECT_NULL);
5992 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5993
5994 if(object->purgable == VM_PURGABLE_VOLATILE) {
5995 unsigned int delta;
5996 assert(object->resident_page_count >=
5997 object->wired_page_count);
5998 delta = (object->resident_page_count -
5999 object->wired_page_count);
6000 if (delta != 0) {
6001 assert(vm_page_purgeable_count >=
6002 delta);
6003 OSAddAtomic(-delta,
6004 (SInt32 *)&vm_page_purgeable_count);
6005 }
6006 if (object->wired_page_count != 0) {
6007 assert(vm_page_purgeable_wired_count >=
6008 object->wired_page_count);
6009 OSAddAtomic(-object->wired_page_count,
6010 (SInt32 *)&vm_page_purgeable_wired_count);
6011 }
6012 }
6013 object->purgable = VM_PURGABLE_EMPTY;
6014
6015 vm_object_reap_pages(object, REAP_PURGEABLE);
6016 }
6017
6018
6019 /*
6020 * vm_object_purgeable_control() allows the caller to control and investigate the
6021 * state of a purgeable object. A purgeable object is created via a call to
6022 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
6023 * never be coalesced with any other object -- even other purgeable objects --
6024 * and will thus always remain a distinct object. A purgeable object has
6025 * special semantics when its reference count is exactly 1. If its reference
6026 * count is greater than 1, then a purgeable object will behave like a normal
6027 * object and attempts to use this interface will result in an error return
6028 * of KERN_INVALID_ARGUMENT.
6029 *
6030 * A purgeable object may be put into a "volatile" state which will make the
6031 * object's pages elligable for being reclaimed without paging to backing
6032 * store if the system runs low on memory. If the pages in a volatile
6033 * purgeable object are reclaimed, the purgeable object is said to have been
6034 * "emptied." When a purgeable object is emptied the system will reclaim as
6035 * many pages from the object as it can in a convenient manner (pages already
6036 * en route to backing store or busy for other reasons are left as is). When
6037 * a purgeable object is made volatile, its pages will generally be reclaimed
6038 * before other pages in the application's working set. This semantic is
6039 * generally used by applications which can recreate the data in the object
6040 * faster than it can be paged in. One such example might be media assets
6041 * which can be reread from a much faster RAID volume.
6042 *
6043 * A purgeable object may be designated as "non-volatile" which means it will
6044 * behave like all other objects in the system with pages being written to and
6045 * read from backing store as needed to satisfy system memory needs. If the
6046 * object was emptied before the object was made non-volatile, that fact will
6047 * be returned as the old state of the purgeable object (see
6048 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
6049 * were reclaimed as part of emptying the object will be refaulted in as
6050 * zero-fill on demand. It is up to the application to note that an object
6051 * was emptied and recreate the objects contents if necessary. When a
6052 * purgeable object is made non-volatile, its pages will generally not be paged
6053 * out to backing store in the immediate future. A purgeable object may also
6054 * be manually emptied.
6055 *
6056 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
6057 * volatile purgeable object may be queried at any time. This information may
6058 * be used as a control input to let the application know when the system is
6059 * experiencing memory pressure and is reclaiming memory.
6060 *
6061 * The specified address may be any address within the purgeable object. If
6062 * the specified address does not represent any object in the target task's
6063 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
6064 * object containing the specified address is not a purgeable object, then
6065 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
6066 * returned.
6067 *
6068 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
6069 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
6070 * state is used to set the new state of the purgeable object and return its
6071 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
6072 * object is returned in the parameter state.
6073 *
6074 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
6075 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
6076 * the non-volatile, volatile and volatile/empty states described above.
6077 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
6078 * immediately reclaim as many pages in the object as can be conveniently
6079 * collected (some may have already been written to backing store or be
6080 * otherwise busy).
6081 *
6082 * The process of making a purgeable object non-volatile and determining its
6083 * previous state is atomic. Thus, if a purgeable object is made
6084 * VM_PURGABLE_NONVOLATILE and the old state is returned as
6085 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
6086 * completely intact and will remain so until the object is made volatile
6087 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
6088 * was reclaimed while it was in a volatile state and its previous contents
6089 * have been lost.
6090 */
6091 /*
6092 * The object must be locked.
6093 */
6094 kern_return_t
6095 vm_object_purgable_control(
6096 vm_object_t object,
6097 vm_purgable_t control,
6098 int *state)
6099 {
6100 int old_state;
6101 int new_state;
6102
6103 if (object == VM_OBJECT_NULL) {
6104 /*
6105 * Object must already be present or it can't be purgeable.
6106 */
6107 return KERN_INVALID_ARGUMENT;
6108 }
6109
6110 /*
6111 * Get current state of the purgeable object.
6112 */
6113 old_state = object->purgable;
6114 if (old_state == VM_PURGABLE_DENY)
6115 return KERN_INVALID_ARGUMENT;
6116
6117 /* purgeable cant have delayed copies - now or in the future */
6118 assert(object->copy == VM_OBJECT_NULL);
6119 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
6120
6121 /*
6122 * Execute the desired operation.
6123 */
6124 if (control == VM_PURGABLE_GET_STATE) {
6125 *state = old_state;
6126 return KERN_SUCCESS;
6127 }
6128
6129 if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
6130 object->volatile_empty = TRUE;
6131 }
6132 if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
6133 object->volatile_fault = TRUE;
6134 }
6135
6136 new_state = *state & VM_PURGABLE_STATE_MASK;
6137 if (new_state == VM_PURGABLE_VOLATILE &&
6138 object->volatile_empty) {
6139 new_state = VM_PURGABLE_EMPTY;
6140 }
6141
6142 switch (new_state) {
6143 case VM_PURGABLE_DENY:
6144 case VM_PURGABLE_NONVOLATILE:
6145 object->purgable = new_state;
6146
6147 if (old_state == VM_PURGABLE_VOLATILE) {
6148 unsigned int delta;
6149
6150 assert(object->resident_page_count >=
6151 object->wired_page_count);
6152 delta = (object->resident_page_count -
6153 object->wired_page_count);
6154
6155 assert(vm_page_purgeable_count >= delta);
6156
6157 if (delta != 0) {
6158 OSAddAtomic(-delta,
6159 (SInt32 *)&vm_page_purgeable_count);
6160 }
6161 if (object->wired_page_count != 0) {
6162 assert(vm_page_purgeable_wired_count >=
6163 object->wired_page_count);
6164 OSAddAtomic(-object->wired_page_count,
6165 (SInt32 *)&vm_page_purgeable_wired_count);
6166 }
6167
6168 vm_page_lock_queues();
6169
6170 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
6171 purgeable_q_t queue = vm_purgeable_object_remove(object);
6172 assert(queue);
6173
6174 vm_purgeable_token_delete_first(queue);
6175 assert(queue->debug_count_objects>=0);
6176
6177 vm_page_unlock_queues();
6178 }
6179 break;
6180
6181 case VM_PURGABLE_VOLATILE:
6182 if (object->volatile_fault) {
6183 vm_page_t p;
6184 int refmod;
6185
6186 queue_iterate(&object->memq, p, vm_page_t, listq) {
6187 if (p->busy ||
6188 VM_PAGE_WIRED(p) ||
6189 p->fictitious) {
6190 continue;
6191 }
6192 refmod = pmap_disconnect(p->phys_page);
6193 if ((refmod & VM_MEM_MODIFIED) &&
6194 !p->dirty) {
6195 p->dirty = TRUE;
6196 }
6197 }
6198 }
6199
6200 if (old_state == VM_PURGABLE_EMPTY &&
6201 object->resident_page_count == 0)
6202 break;
6203
6204 purgeable_q_t queue;
6205
6206 /* find the correct queue */
6207 if ((*state&VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE)
6208 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
6209 else {
6210 if ((*state&VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO)
6211 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
6212 else
6213 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
6214 }
6215
6216 if (old_state == VM_PURGABLE_NONVOLATILE ||
6217 old_state == VM_PURGABLE_EMPTY) {
6218 unsigned int delta;
6219
6220 /* try to add token... this can fail */
6221 vm_page_lock_queues();
6222
6223 kern_return_t result = vm_purgeable_token_add(queue);
6224 if (result != KERN_SUCCESS) {
6225 vm_page_unlock_queues();
6226 return result;
6227 }
6228 vm_page_unlock_queues();
6229
6230 assert(object->resident_page_count >=
6231 object->wired_page_count);
6232 delta = (object->resident_page_count -
6233 object->wired_page_count);
6234
6235 if (delta != 0) {
6236 OSAddAtomic(delta,
6237 &vm_page_purgeable_count);
6238 }
6239 if (object->wired_page_count != 0) {
6240 OSAddAtomic(object->wired_page_count,
6241 &vm_page_purgeable_wired_count);
6242 }
6243
6244 object->purgable = new_state;
6245
6246 /* object should not be on a queue */
6247 assert(object->objq.next == NULL && object->objq.prev == NULL);
6248 }
6249 else if (old_state == VM_PURGABLE_VOLATILE) {
6250 /*
6251 * if reassigning priorities / purgeable groups, we don't change the
6252 * token queue. So moving priorities will not make pages stay around longer.
6253 * Reasoning is that the algorithm gives most priority to the most important
6254 * object. If a new token is added, the most important object' priority is boosted.
6255 * This biases the system already for purgeable queues that move a lot.
6256 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
6257 */
6258 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
6259
6260 purgeable_q_t old_queue=vm_purgeable_object_remove(object);
6261 assert(old_queue);
6262
6263 if (old_queue != queue) {
6264 kern_return_t result;
6265
6266 /* Changing queue. Have to move token. */
6267 vm_page_lock_queues();
6268 vm_purgeable_token_delete_first(old_queue);
6269 result = vm_purgeable_token_add(queue);
6270 vm_page_unlock_queues();
6271
6272 assert(result==KERN_SUCCESS); /* this should never fail since we just freed a token */
6273 }
6274 };
6275 vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
6276
6277 assert(queue->debug_count_objects>=0);
6278
6279 break;
6280
6281
6282 case VM_PURGABLE_EMPTY:
6283 if (object->volatile_fault) {
6284 vm_page_t p;
6285 int refmod;
6286
6287 queue_iterate(&object->memq, p, vm_page_t, listq) {
6288 if (p->busy ||
6289 VM_PAGE_WIRED(p) ||
6290 p->fictitious) {
6291 continue;
6292 }
6293 refmod = pmap_disconnect(p->phys_page);
6294 if ((refmod & VM_MEM_MODIFIED) &&
6295 !p->dirty) {
6296 p->dirty = TRUE;
6297 }
6298 }
6299 }
6300
6301 if (old_state != new_state) {
6302 assert(old_state == VM_PURGABLE_NONVOLATILE ||
6303 old_state == VM_PURGABLE_VOLATILE);
6304 if (old_state == VM_PURGABLE_VOLATILE) {
6305 purgeable_q_t old_queue;
6306
6307 /* object should be on a queue */
6308 assert(object->objq.next != NULL &&
6309 object->objq.prev != NULL);
6310 old_queue = vm_purgeable_object_remove(object);
6311 assert(old_queue);
6312 vm_page_lock_queues();
6313 vm_purgeable_token_delete_first(old_queue);
6314 vm_page_unlock_queues();
6315 }
6316 (void) vm_object_purge(object);
6317 }
6318 break;
6319
6320 }
6321 *state = old_state;
6322
6323 return KERN_SUCCESS;
6324 }
6325
6326 #if TASK_SWAPPER
6327 /*
6328 * vm_object_res_deallocate
6329 *
6330 * (recursively) decrement residence counts on vm objects and their shadows.
6331 * Called from vm_object_deallocate and when swapping out an object.
6332 *
6333 * The object is locked, and remains locked throughout the function,
6334 * even as we iterate down the shadow chain. Locks on intermediate objects
6335 * will be dropped, but not the original object.
6336 *
6337 * NOTE: this function used to use recursion, rather than iteration.
6338 */
6339
6340 __private_extern__ void
6341 vm_object_res_deallocate(
6342 vm_object_t object)
6343 {
6344 vm_object_t orig_object = object;
6345 /*
6346 * Object is locked so it can be called directly
6347 * from vm_object_deallocate. Original object is never
6348 * unlocked.
6349 */
6350 assert(object->res_count > 0);
6351 while (--object->res_count == 0) {
6352 assert(object->ref_count >= object->res_count);
6353 vm_object_deactivate_all_pages(object);
6354 /* iterate on shadow, if present */
6355 if (object->shadow != VM_OBJECT_NULL) {
6356 vm_object_t tmp_object = object->shadow;
6357 vm_object_lock(tmp_object);
6358 if (object != orig_object)
6359 vm_object_unlock(object);
6360 object = tmp_object;
6361 assert(object->res_count > 0);
6362 } else
6363 break;
6364 }
6365 if (object != orig_object)
6366 vm_object_unlock(object);
6367 }
6368
6369 /*
6370 * vm_object_res_reference
6371 *
6372 * Internal function to increment residence count on a vm object
6373 * and its shadows. It is called only from vm_object_reference, and
6374 * when swapping in a vm object, via vm_map_swap.
6375 *
6376 * The object is locked, and remains locked throughout the function,
6377 * even as we iterate down the shadow chain. Locks on intermediate objects
6378 * will be dropped, but not the original object.
6379 *
6380 * NOTE: this function used to use recursion, rather than iteration.
6381 */
6382
6383 __private_extern__ void
6384 vm_object_res_reference(
6385 vm_object_t object)
6386 {
6387 vm_object_t orig_object = object;
6388 /*
6389 * Object is locked, so this can be called directly
6390 * from vm_object_reference. This lock is never released.
6391 */
6392 while ((++object->res_count == 1) &&
6393 (object->shadow != VM_OBJECT_NULL)) {
6394 vm_object_t tmp_object = object->shadow;
6395
6396 assert(object->ref_count >= object->res_count);
6397 vm_object_lock(tmp_object);
6398 if (object != orig_object)
6399 vm_object_unlock(object);
6400 object = tmp_object;
6401 }
6402 if (object != orig_object)
6403 vm_object_unlock(object);
6404 assert(orig_object->ref_count >= orig_object->res_count);
6405 }
6406 #endif /* TASK_SWAPPER */
6407
6408 /*
6409 * vm_object_reference:
6410 *
6411 * Gets another reference to the given object.
6412 */
6413 #ifdef vm_object_reference
6414 #undef vm_object_reference
6415 #endif
6416 __private_extern__ void
6417 vm_object_reference(
6418 register vm_object_t object)
6419 {
6420 if (object == VM_OBJECT_NULL)
6421 return;
6422
6423 vm_object_lock(object);
6424 assert(object->ref_count > 0);
6425 vm_object_reference_locked(object);
6426 vm_object_unlock(object);
6427 }
6428
6429 #ifdef MACH_BSD
6430 /*
6431 * Scale the vm_object_cache
6432 * This is required to make sure that the vm_object_cache is big
6433 * enough to effectively cache the mapped file.
6434 * This is really important with UBC as all the regular file vnodes
6435 * have memory object associated with them. Havving this cache too
6436 * small results in rapid reclaim of vnodes and hurts performance a LOT!
6437 *
6438 * This is also needed as number of vnodes can be dynamically scaled.
6439 */
6440 kern_return_t
6441 adjust_vm_object_cache(
6442 __unused vm_size_t oval,
6443 __unused vm_size_t nval)
6444 {
6445 #if VM_OBJECT_CACHE
6446 vm_object_cached_max = nval;
6447 vm_object_cache_trim(FALSE);
6448 #endif
6449 return (KERN_SUCCESS);
6450 }
6451 #endif /* MACH_BSD */
6452
6453
6454 /*
6455 * vm_object_transpose
6456 *
6457 * This routine takes two VM objects of the same size and exchanges
6458 * their backing store.
6459 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6460 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6461 *
6462 * The VM objects must not be locked by caller.
6463 */
6464 unsigned int vm_object_transpose_count = 0;
6465 kern_return_t
6466 vm_object_transpose(
6467 vm_object_t object1,
6468 vm_object_t object2,
6469 vm_object_size_t transpose_size)
6470 {
6471 vm_object_t tmp_object;
6472 kern_return_t retval;
6473 boolean_t object1_locked, object2_locked;
6474 vm_page_t page;
6475 vm_object_offset_t page_offset;
6476 lck_mtx_t *hash_lck;
6477 vm_object_hash_entry_t hash_entry;
6478
6479 tmp_object = VM_OBJECT_NULL;
6480 object1_locked = FALSE; object2_locked = FALSE;
6481
6482 if (object1 == object2 ||
6483 object1 == VM_OBJECT_NULL ||
6484 object2 == VM_OBJECT_NULL) {
6485 /*
6486 * If the 2 VM objects are the same, there's
6487 * no point in exchanging their backing store.
6488 */
6489 retval = KERN_INVALID_VALUE;
6490 goto done;
6491 }
6492
6493 /*
6494 * Since we need to lock both objects at the same time,
6495 * make sure we always lock them in the same order to
6496 * avoid deadlocks.
6497 */
6498 if (object1 > object2) {
6499 tmp_object = object1;
6500 object1 = object2;
6501 object2 = tmp_object;
6502 }
6503
6504 /*
6505 * Allocate a temporary VM object to hold object1's contents
6506 * while we copy object2 to object1.
6507 */
6508 tmp_object = vm_object_allocate(transpose_size);
6509 vm_object_lock(tmp_object);
6510 tmp_object->can_persist = FALSE;
6511
6512
6513 /*
6514 * Grab control of the 1st VM object.
6515 */
6516 vm_object_lock(object1);
6517 object1_locked = TRUE;
6518 if (!object1->alive || object1->terminating ||
6519 object1->copy || object1->shadow || object1->shadowed ||
6520 object1->purgable != VM_PURGABLE_DENY) {
6521 /*
6522 * We don't deal with copy or shadow objects (yet).
6523 */
6524 retval = KERN_INVALID_VALUE;
6525 goto done;
6526 }
6527 /*
6528 * We're about to mess with the object's backing store and
6529 * taking a "paging_in_progress" reference wouldn't be enough
6530 * to prevent any paging activity on this object, so the caller should
6531 * have "quiesced" the objects beforehand, via a UPL operation with
6532 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6533 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6534 *
6535 * Wait for any paging operation to complete (but only paging, not
6536 * other kind of activities not linked to the pager). After we're
6537 * statisfied that there's no more paging in progress, we keep the
6538 * object locked, to guarantee that no one tries to access its pager.
6539 */
6540 vm_object_paging_only_wait(object1, THREAD_UNINT);
6541
6542 /*
6543 * Same as above for the 2nd object...
6544 */
6545 vm_object_lock(object2);
6546 object2_locked = TRUE;
6547 if (! object2->alive || object2->terminating ||
6548 object2->copy || object2->shadow || object2->shadowed ||
6549 object2->purgable != VM_PURGABLE_DENY) {
6550 retval = KERN_INVALID_VALUE;
6551 goto done;
6552 }
6553 vm_object_paging_only_wait(object2, THREAD_UNINT);
6554
6555
6556 if (object1->size != object2->size ||
6557 object1->size != transpose_size) {
6558 /*
6559 * If the 2 objects don't have the same size, we can't
6560 * exchange their backing stores or one would overflow.
6561 * If their size doesn't match the caller's
6562 * "transpose_size", we can't do it either because the
6563 * transpose operation will affect the entire span of
6564 * the objects.
6565 */
6566 retval = KERN_INVALID_VALUE;
6567 goto done;
6568 }
6569
6570
6571 /*
6572 * Transpose the lists of resident pages.
6573 * This also updates the resident_page_count and the memq_hint.
6574 */
6575 if (object1->phys_contiguous || queue_empty(&object1->memq)) {
6576 /*
6577 * No pages in object1, just transfer pages
6578 * from object2 to object1. No need to go through
6579 * an intermediate object.
6580 */
6581 while (!queue_empty(&object2->memq)) {
6582 page = (vm_page_t) queue_first(&object2->memq);
6583 vm_page_rename(page, object1, page->offset, FALSE);
6584 }
6585 assert(queue_empty(&object2->memq));
6586 } else if (object2->phys_contiguous || queue_empty(&object2->memq)) {
6587 /*
6588 * No pages in object2, just transfer pages
6589 * from object1 to object2. No need to go through
6590 * an intermediate object.
6591 */
6592 while (!queue_empty(&object1->memq)) {
6593 page = (vm_page_t) queue_first(&object1->memq);
6594 vm_page_rename(page, object2, page->offset, FALSE);
6595 }
6596 assert(queue_empty(&object1->memq));
6597 } else {
6598 /* transfer object1's pages to tmp_object */
6599 while (!queue_empty(&object1->memq)) {
6600 page = (vm_page_t) queue_first(&object1->memq);
6601 page_offset = page->offset;
6602 vm_page_remove(page, TRUE);
6603 page->offset = page_offset;
6604 queue_enter(&tmp_object->memq, page, vm_page_t, listq);
6605 }
6606 assert(queue_empty(&object1->memq));
6607 /* transfer object2's pages to object1 */
6608 while (!queue_empty(&object2->memq)) {
6609 page = (vm_page_t) queue_first(&object2->memq);
6610 vm_page_rename(page, object1, page->offset, FALSE);
6611 }
6612 assert(queue_empty(&object2->memq));
6613 /* transfer tmp_object's pages to object1 */
6614 while (!queue_empty(&tmp_object->memq)) {
6615 page = (vm_page_t) queue_first(&tmp_object->memq);
6616 queue_remove(&tmp_object->memq, page,
6617 vm_page_t, listq);
6618 vm_page_insert(page, object2, page->offset);
6619 }
6620 assert(queue_empty(&tmp_object->memq));
6621 }
6622
6623 #define __TRANSPOSE_FIELD(field) \
6624 MACRO_BEGIN \
6625 tmp_object->field = object1->field; \
6626 object1->field = object2->field; \
6627 object2->field = tmp_object->field; \
6628 MACRO_END
6629
6630 /* "Lock" refers to the object not its contents */
6631 /* "size" should be identical */
6632 assert(object1->size == object2->size);
6633 /* "memq_hint" was updated above when transposing pages */
6634 /* "ref_count" refers to the object not its contents */
6635 #if TASK_SWAPPER
6636 /* "res_count" refers to the object not its contents */
6637 #endif
6638 /* "resident_page_count" was updated above when transposing pages */
6639 /* "wired_page_count" was updated above when transposing pages */
6640 /* "reusable_page_count" was updated above when transposing pages */
6641 /* there should be no "copy" */
6642 assert(!object1->copy);
6643 assert(!object2->copy);
6644 /* there should be no "shadow" */
6645 assert(!object1->shadow);
6646 assert(!object2->shadow);
6647 __TRANSPOSE_FIELD(shadow_offset); /* used by phys_contiguous objects */
6648 __TRANSPOSE_FIELD(pager);
6649 __TRANSPOSE_FIELD(paging_offset);
6650 __TRANSPOSE_FIELD(pager_control);
6651 /* update the memory_objects' pointers back to the VM objects */
6652 if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6653 memory_object_control_collapse(object1->pager_control,
6654 object1);
6655 }
6656 if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6657 memory_object_control_collapse(object2->pager_control,
6658 object2);
6659 }
6660 __TRANSPOSE_FIELD(copy_strategy);
6661 /* "paging_in_progress" refers to the object not its contents */
6662 assert(!object1->paging_in_progress);
6663 assert(!object2->paging_in_progress);
6664 assert(object1->activity_in_progress);
6665 assert(object2->activity_in_progress);
6666 /* "all_wanted" refers to the object not its contents */
6667 __TRANSPOSE_FIELD(pager_created);
6668 __TRANSPOSE_FIELD(pager_initialized);
6669 __TRANSPOSE_FIELD(pager_ready);
6670 __TRANSPOSE_FIELD(pager_trusted);
6671 __TRANSPOSE_FIELD(can_persist);
6672 __TRANSPOSE_FIELD(internal);
6673 __TRANSPOSE_FIELD(temporary);
6674 __TRANSPOSE_FIELD(private);
6675 __TRANSPOSE_FIELD(pageout);
6676 /* "alive" should be set */
6677 assert(object1->alive);
6678 assert(object2->alive);
6679 /* "purgeable" should be non-purgeable */
6680 assert(object1->purgable == VM_PURGABLE_DENY);
6681 assert(object2->purgable == VM_PURGABLE_DENY);
6682 /* "shadowed" refers to the the object not its contents */
6683 __TRANSPOSE_FIELD(silent_overwrite);
6684 __TRANSPOSE_FIELD(advisory_pageout);
6685 __TRANSPOSE_FIELD(true_share);
6686 /* "terminating" should not be set */
6687 assert(!object1->terminating);
6688 assert(!object2->terminating);
6689 __TRANSPOSE_FIELD(named);
6690 /* "shadow_severed" refers to the object not its contents */
6691 __TRANSPOSE_FIELD(phys_contiguous);
6692 __TRANSPOSE_FIELD(nophyscache);
6693 /* "cached_list.next" points to transposed object */
6694 object1->cached_list.next = (queue_entry_t) object2;
6695 object2->cached_list.next = (queue_entry_t) object1;
6696 /* "cached_list.prev" should be NULL */
6697 assert(object1->cached_list.prev == NULL);
6698 assert(object2->cached_list.prev == NULL);
6699 /* "msr_q" is linked to the object not its contents */
6700 assert(queue_empty(&object1->msr_q));
6701 assert(queue_empty(&object2->msr_q));
6702 __TRANSPOSE_FIELD(last_alloc);
6703 __TRANSPOSE_FIELD(sequential);
6704 __TRANSPOSE_FIELD(pages_created);
6705 __TRANSPOSE_FIELD(pages_used);
6706 #if MACH_PAGEMAP
6707 __TRANSPOSE_FIELD(existence_map);
6708 #endif
6709 __TRANSPOSE_FIELD(cow_hint);
6710 #if MACH_ASSERT
6711 __TRANSPOSE_FIELD(paging_object);
6712 #endif
6713 __TRANSPOSE_FIELD(wimg_bits);
6714 __TRANSPOSE_FIELD(code_signed);
6715 if (object1->hashed) {
6716 hash_lck = vm_object_hash_lock_spin(object2->pager);
6717 hash_entry = vm_object_hash_lookup(object2->pager, FALSE);
6718 assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
6719 hash_entry->object = object2;
6720 vm_object_hash_unlock(hash_lck);
6721 }
6722 if (object2->hashed) {
6723 hash_lck = vm_object_hash_lock_spin(object1->pager);
6724 hash_entry = vm_object_hash_lookup(object1->pager, FALSE);
6725 assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
6726 hash_entry->object = object1;
6727 vm_object_hash_unlock(hash_lck);
6728 }
6729 __TRANSPOSE_FIELD(hashed);
6730 object1->transposed = TRUE;
6731 object2->transposed = TRUE;
6732 __TRANSPOSE_FIELD(mapping_in_progress);
6733 __TRANSPOSE_FIELD(volatile_empty);
6734 __TRANSPOSE_FIELD(volatile_fault);
6735 __TRANSPOSE_FIELD(all_reusable);
6736 assert(object1->blocked_access);
6737 assert(object2->blocked_access);
6738 assert(object1->__object2_unused_bits == 0);
6739 assert(object2->__object2_unused_bits == 0);
6740 #if UPL_DEBUG
6741 /* "uplq" refers to the object not its contents (see upl_transpose()) */
6742 #endif
6743 assert(object1->objq.next == NULL);
6744 assert(object1->objq.prev == NULL);
6745 assert(object2->objq.next == NULL);
6746 assert(object2->objq.prev == NULL);
6747
6748 #undef __TRANSPOSE_FIELD
6749
6750 retval = KERN_SUCCESS;
6751
6752 done:
6753 /*
6754 * Cleanup.
6755 */
6756 if (tmp_object != VM_OBJECT_NULL) {
6757 vm_object_unlock(tmp_object);
6758 /*
6759 * Re-initialize the temporary object to avoid
6760 * deallocating a real pager.
6761 */
6762 _vm_object_allocate(transpose_size, tmp_object);
6763 vm_object_deallocate(tmp_object);
6764 tmp_object = VM_OBJECT_NULL;
6765 }
6766
6767 if (object1_locked) {
6768 vm_object_unlock(object1);
6769 object1_locked = FALSE;
6770 }
6771 if (object2_locked) {
6772 vm_object_unlock(object2);
6773 object2_locked = FALSE;
6774 }
6775
6776 vm_object_transpose_count++;
6777
6778 return retval;
6779 }
6780
6781
6782 /*
6783 * vm_object_cluster_size
6784 *
6785 * Determine how big a cluster we should issue an I/O for...
6786 *
6787 * Inputs: *start == offset of page needed
6788 * *length == maximum cluster pager can handle
6789 * Outputs: *start == beginning offset of cluster
6790 * *length == length of cluster to try
6791 *
6792 * The original *start will be encompassed by the cluster
6793 *
6794 */
6795 extern int speculative_reads_disabled;
6796 #if CONFIG_EMBEDDED
6797 unsigned int preheat_pages_max = MAX_UPL_TRANSFER;
6798 unsigned int preheat_pages_min = 8;
6799 unsigned int preheat_pages_mult = 4;
6800 #else
6801 unsigned int preheat_pages_max = MAX_UPL_TRANSFER;
6802 unsigned int preheat_pages_min = 8;
6803 unsigned int preheat_pages_mult = 4;
6804 #endif
6805
6806 uint32_t pre_heat_scaling[MAX_UPL_TRANSFER + 1];
6807 uint32_t pre_heat_cluster[MAX_UPL_TRANSFER + 1];
6808
6809
6810 __private_extern__ void
6811 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
6812 vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
6813 {
6814 vm_size_t pre_heat_size;
6815 vm_size_t tail_size;
6816 vm_size_t head_size;
6817 vm_size_t max_length;
6818 vm_size_t cluster_size;
6819 vm_object_offset_t object_size;
6820 vm_object_offset_t orig_start;
6821 vm_object_offset_t target_start;
6822 vm_object_offset_t offset;
6823 vm_behavior_t behavior;
6824 boolean_t look_behind = TRUE;
6825 boolean_t look_ahead = TRUE;
6826 uint32_t throttle_limit;
6827 int sequential_run;
6828 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6829 unsigned int max_ph_size;
6830 unsigned int min_ph_size;
6831 unsigned int ph_mult;
6832
6833 assert( !(*length & PAGE_MASK));
6834 assert( !(*start & PAGE_MASK_64));
6835
6836 if ( (ph_mult = preheat_pages_mult) < 1 )
6837 ph_mult = 1;
6838 if ( (min_ph_size = preheat_pages_min) < 1 )
6839 min_ph_size = 1;
6840 if ( (max_ph_size = preheat_pages_max) > MAX_UPL_TRANSFER )
6841 max_ph_size = MAX_UPL_TRANSFER;
6842
6843 if ( (max_length = *length) > (max_ph_size * PAGE_SIZE) )
6844 max_length = (max_ph_size * PAGE_SIZE);
6845
6846 /*
6847 * we'll always return a cluster size of at least
6848 * 1 page, since the original fault must always
6849 * be processed
6850 */
6851 *length = PAGE_SIZE;
6852 *io_streaming = 0;
6853
6854 if (speculative_reads_disabled || fault_info == NULL || max_length == 0) {
6855 /*
6856 * no cluster... just fault the page in
6857 */
6858 return;
6859 }
6860 orig_start = *start;
6861 target_start = orig_start;
6862 cluster_size = round_page(fault_info->cluster_size);
6863 behavior = fault_info->behavior;
6864
6865 vm_object_lock(object);
6866
6867 if (object->internal)
6868 object_size = object->size;
6869 else if (object->pager != MEMORY_OBJECT_NULL)
6870 vnode_pager_get_object_size(object->pager, &object_size);
6871 else
6872 goto out; /* pager is gone for this object, nothing more to do */
6873
6874 object_size = round_page_64(object_size);
6875
6876 if (orig_start >= object_size) {
6877 /*
6878 * fault occurred beyond the EOF...
6879 * we need to punt w/o changing the
6880 * starting offset
6881 */
6882 goto out;
6883 }
6884 if (object->pages_used > object->pages_created) {
6885 /*
6886 * must have wrapped our 32 bit counters
6887 * so reset
6888 */
6889 object->pages_used = object->pages_created = 0;
6890 }
6891 if ((sequential_run = object->sequential)) {
6892 if (sequential_run < 0) {
6893 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
6894 sequential_run = 0 - sequential_run;
6895 } else {
6896 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6897 }
6898
6899 }
6900 switch(behavior) {
6901
6902 default:
6903 behavior = VM_BEHAVIOR_DEFAULT;
6904
6905 case VM_BEHAVIOR_DEFAULT:
6906 if (object->internal && fault_info->user_tag == VM_MEMORY_STACK)
6907 goto out;
6908
6909 if (sequential_run >= (3 * PAGE_SIZE)) {
6910 pre_heat_size = sequential_run + PAGE_SIZE;
6911
6912 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL)
6913 look_behind = FALSE;
6914 else
6915 look_ahead = FALSE;
6916
6917 *io_streaming = 1;
6918 } else {
6919
6920 if (object->pages_created < 32 * ph_mult) {
6921 /*
6922 * prime the pump
6923 */
6924 pre_heat_size = PAGE_SIZE * 8 * ph_mult;
6925 break;
6926 }
6927 /*
6928 * Linear growth in PH size: The maximum size is max_length...
6929 * this cacluation will result in a size that is neither a
6930 * power of 2 nor a multiple of PAGE_SIZE... so round
6931 * it up to the nearest PAGE_SIZE boundary
6932 */
6933 pre_heat_size = (ph_mult * (max_length * object->pages_used) / object->pages_created);
6934
6935 if (pre_heat_size < PAGE_SIZE * min_ph_size)
6936 pre_heat_size = PAGE_SIZE * min_ph_size;
6937 else
6938 pre_heat_size = round_page(pre_heat_size);
6939 }
6940 break;
6941
6942 case VM_BEHAVIOR_RANDOM:
6943 if ((pre_heat_size = cluster_size) <= PAGE_SIZE)
6944 goto out;
6945 break;
6946
6947 case VM_BEHAVIOR_SEQUENTIAL:
6948 if ((pre_heat_size = cluster_size) == 0)
6949 pre_heat_size = sequential_run + PAGE_SIZE;
6950 look_behind = FALSE;
6951 *io_streaming = 1;
6952
6953 break;
6954
6955 case VM_BEHAVIOR_RSEQNTL:
6956 if ((pre_heat_size = cluster_size) == 0)
6957 pre_heat_size = sequential_run + PAGE_SIZE;
6958 look_ahead = FALSE;
6959 *io_streaming = 1;
6960
6961 break;
6962
6963 }
6964 throttle_limit = (uint32_t) max_length;
6965 assert(throttle_limit == max_length);
6966
6967 if (vnode_pager_check_hard_throttle(object->pager, &throttle_limit, *io_streaming) == KERN_SUCCESS) {
6968 if (max_length > throttle_limit)
6969 max_length = throttle_limit;
6970 }
6971 if (pre_heat_size > max_length)
6972 pre_heat_size = max_length;
6973
6974 if (behavior == VM_BEHAVIOR_DEFAULT) {
6975 if (vm_page_free_count < vm_page_throttle_limit)
6976 pre_heat_size = trunc_page(pre_heat_size / 8);
6977 else if (vm_page_free_count < vm_page_free_target)
6978 pre_heat_size = trunc_page(pre_heat_size / 2);
6979
6980 if (pre_heat_size <= PAGE_SIZE)
6981 goto out;
6982 }
6983 if (look_ahead == TRUE) {
6984 if (look_behind == TRUE) {
6985 /*
6986 * if we get here its due to a random access...
6987 * so we want to center the original fault address
6988 * within the cluster we will issue... make sure
6989 * to calculate 'head_size' as a multiple of PAGE_SIZE...
6990 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
6991 * necessarily an even number of pages so we need to truncate
6992 * the result to a PAGE_SIZE boundary
6993 */
6994 head_size = trunc_page(pre_heat_size / 2);
6995
6996 if (target_start > head_size)
6997 target_start -= head_size;
6998 else
6999 target_start = 0;
7000
7001 /*
7002 * 'target_start' at this point represents the beginning offset
7003 * of the cluster we are considering... 'orig_start' will be in
7004 * the center of this cluster if we didn't have to clip the start
7005 * due to running into the start of the file
7006 */
7007 }
7008 if ((target_start + pre_heat_size) > object_size)
7009 pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
7010 /*
7011 * at this point caclulate the number of pages beyond the original fault
7012 * address that we want to consider... this is guaranteed not to extend beyond
7013 * the current EOF...
7014 */
7015 assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
7016 tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
7017 } else {
7018 if (pre_heat_size > target_start)
7019 pre_heat_size = (vm_size_t) target_start; /* XXX: 32-bit vs 64-bit ? Joe ? */
7020 tail_size = 0;
7021 }
7022 assert( !(target_start & PAGE_MASK_64));
7023 assert( !(pre_heat_size & PAGE_MASK));
7024
7025 pre_heat_scaling[pre_heat_size / PAGE_SIZE]++;
7026
7027 if (pre_heat_size <= PAGE_SIZE)
7028 goto out;
7029
7030 if (look_behind == TRUE) {
7031 /*
7032 * take a look at the pages before the original
7033 * faulting offset... recalculate this in case
7034 * we had to clip 'pre_heat_size' above to keep
7035 * from running past the EOF.
7036 */
7037 head_size = pre_heat_size - tail_size - PAGE_SIZE;
7038
7039 for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
7040 /*
7041 * don't poke below the lowest offset
7042 */
7043 if (offset < fault_info->lo_offset)
7044 break;
7045 /*
7046 * for external objects and internal objects w/o an existence map
7047 * vm_externl_state_get will return VM_EXTERNAL_STATE_UNKNOWN
7048 */
7049 #if MACH_PAGEMAP
7050 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
7051 /*
7052 * we know for a fact that the pager can't provide the page
7053 * so don't include it or any pages beyond it in this cluster
7054 */
7055 break;
7056 }
7057 #endif
7058 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7059 /*
7060 * don't bridge resident pages
7061 */
7062 break;
7063 }
7064 *start = offset;
7065 *length += PAGE_SIZE;
7066 }
7067 }
7068 if (look_ahead == TRUE) {
7069 for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
7070 /*
7071 * don't poke above the highest offset
7072 */
7073 if (offset >= fault_info->hi_offset)
7074 break;
7075 assert(offset < object_size);
7076
7077 /*
7078 * for external objects and internal objects w/o an existence map
7079 * vm_externl_state_get will return VM_EXTERNAL_STATE_UNKNOWN
7080 */
7081 #if MACH_PAGEMAP
7082 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
7083 /*
7084 * we know for a fact that the pager can't provide the page
7085 * so don't include it or any pages beyond it in this cluster
7086 */
7087 break;
7088 }
7089 #endif
7090 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7091 /*
7092 * don't bridge resident pages
7093 */
7094 break;
7095 }
7096 *length += PAGE_SIZE;
7097 }
7098 }
7099 out:
7100 if (*length > max_length)
7101 *length = max_length;
7102
7103 pre_heat_cluster[*length / PAGE_SIZE]++;
7104
7105 vm_object_unlock(object);
7106 }
7107
7108
7109 /*
7110 * Allow manipulation of individual page state. This is actually part of
7111 * the UPL regimen but takes place on the VM object rather than on a UPL
7112 */
7113
7114 kern_return_t
7115 vm_object_page_op(
7116 vm_object_t object,
7117 vm_object_offset_t offset,
7118 int ops,
7119 ppnum_t *phys_entry,
7120 int *flags)
7121 {
7122 vm_page_t dst_page;
7123
7124 vm_object_lock(object);
7125
7126 if(ops & UPL_POP_PHYSICAL) {
7127 if(object->phys_contiguous) {
7128 if (phys_entry) {
7129 *phys_entry = (ppnum_t)
7130 (object->shadow_offset >> PAGE_SHIFT);
7131 }
7132 vm_object_unlock(object);
7133 return KERN_SUCCESS;
7134 } else {
7135 vm_object_unlock(object);
7136 return KERN_INVALID_OBJECT;
7137 }
7138 }
7139 if(object->phys_contiguous) {
7140 vm_object_unlock(object);
7141 return KERN_INVALID_OBJECT;
7142 }
7143
7144 while(TRUE) {
7145 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
7146 vm_object_unlock(object);
7147 return KERN_FAILURE;
7148 }
7149
7150 /* Sync up on getting the busy bit */
7151 if((dst_page->busy || dst_page->cleaning) &&
7152 (((ops & UPL_POP_SET) &&
7153 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
7154 /* someone else is playing with the page, we will */
7155 /* have to wait */
7156 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
7157 continue;
7158 }
7159
7160 if (ops & UPL_POP_DUMP) {
7161 if (dst_page->pmapped == TRUE)
7162 pmap_disconnect(dst_page->phys_page);
7163
7164 VM_PAGE_FREE(dst_page);
7165 break;
7166 }
7167
7168 if (flags) {
7169 *flags = 0;
7170
7171 /* Get the condition of flags before requested ops */
7172 /* are undertaken */
7173
7174 if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
7175 if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
7176 if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
7177 if(dst_page->absent) *flags |= UPL_POP_ABSENT;
7178 if(dst_page->busy) *flags |= UPL_POP_BUSY;
7179 }
7180
7181 /* The caller should have made a call either contingent with */
7182 /* or prior to this call to set UPL_POP_BUSY */
7183 if(ops & UPL_POP_SET) {
7184 /* The protection granted with this assert will */
7185 /* not be complete. If the caller violates the */
7186 /* convention and attempts to change page state */
7187 /* without first setting busy we may not see it */
7188 /* because the page may already be busy. However */
7189 /* if such violations occur we will assert sooner */
7190 /* or later. */
7191 assert(dst_page->busy || (ops & UPL_POP_BUSY));
7192 if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
7193 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
7194 if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
7195 if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
7196 if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
7197 }
7198
7199 if(ops & UPL_POP_CLR) {
7200 assert(dst_page->busy);
7201 if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
7202 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
7203 if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
7204 if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
7205 if (ops & UPL_POP_BUSY) {
7206 dst_page->busy = FALSE;
7207 PAGE_WAKEUP(dst_page);
7208 }
7209 }
7210
7211 if (dst_page->encrypted) {
7212 /*
7213 * ENCRYPTED SWAP:
7214 * We need to decrypt this encrypted page before the
7215 * caller can access its contents.
7216 * But if the caller really wants to access the page's
7217 * contents, they have to keep the page "busy".
7218 * Otherwise, the page could get recycled or re-encrypted
7219 * at any time.
7220 */
7221 if ((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY) &&
7222 dst_page->busy) {
7223 /*
7224 * The page is stable enough to be accessed by
7225 * the caller, so make sure its contents are
7226 * not encrypted.
7227 */
7228 vm_page_decrypt(dst_page, 0);
7229 } else {
7230 /*
7231 * The page is not busy, so don't bother
7232 * decrypting it, since anything could
7233 * happen to it between now and when the
7234 * caller wants to access it.
7235 * We should not give the caller access
7236 * to this page.
7237 */
7238 assert(!phys_entry);
7239 }
7240 }
7241
7242 if (phys_entry) {
7243 /*
7244 * The physical page number will remain valid
7245 * only if the page is kept busy.
7246 * ENCRYPTED SWAP: make sure we don't let the
7247 * caller access an encrypted page.
7248 */
7249 assert(dst_page->busy);
7250 assert(!dst_page->encrypted);
7251 *phys_entry = dst_page->phys_page;
7252 }
7253
7254 break;
7255 }
7256
7257 vm_object_unlock(object);
7258 return KERN_SUCCESS;
7259
7260 }
7261
7262 /*
7263 * vm_object_range_op offers performance enhancement over
7264 * vm_object_page_op for page_op functions which do not require page
7265 * level state to be returned from the call. Page_op was created to provide
7266 * a low-cost alternative to page manipulation via UPLs when only a single
7267 * page was involved. The range_op call establishes the ability in the _op
7268 * family of functions to work on multiple pages where the lack of page level
7269 * state handling allows the caller to avoid the overhead of the upl structures.
7270 */
7271
7272 kern_return_t
7273 vm_object_range_op(
7274 vm_object_t object,
7275 vm_object_offset_t offset_beg,
7276 vm_object_offset_t offset_end,
7277 int ops,
7278 uint32_t *range)
7279 {
7280 vm_object_offset_t offset;
7281 vm_page_t dst_page;
7282
7283 if (offset_end - offset_beg > (uint32_t) -1) {
7284 /* range is too big and would overflow "*range" */
7285 return KERN_INVALID_ARGUMENT;
7286 }
7287 if (object->resident_page_count == 0) {
7288 if (range) {
7289 if (ops & UPL_ROP_PRESENT) {
7290 *range = 0;
7291 } else {
7292 *range = (uint32_t) (offset_end - offset_beg);
7293 assert(*range == (offset_end - offset_beg));
7294 }
7295 }
7296 return KERN_SUCCESS;
7297 }
7298 vm_object_lock(object);
7299
7300 if (object->phys_contiguous) {
7301 vm_object_unlock(object);
7302 return KERN_INVALID_OBJECT;
7303 }
7304
7305 offset = offset_beg & ~PAGE_MASK_64;
7306
7307 while (offset < offset_end) {
7308 dst_page = vm_page_lookup(object, offset);
7309 if (dst_page != VM_PAGE_NULL) {
7310 if (ops & UPL_ROP_DUMP) {
7311 if (dst_page->busy || dst_page->cleaning) {
7312 /*
7313 * someone else is playing with the
7314 * page, we will have to wait
7315 */
7316 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
7317 /*
7318 * need to relook the page up since it's
7319 * state may have changed while we slept
7320 * it might even belong to a different object
7321 * at this point
7322 */
7323 continue;
7324 }
7325 if (dst_page->pmapped == TRUE)
7326 pmap_disconnect(dst_page->phys_page);
7327
7328 VM_PAGE_FREE(dst_page);
7329
7330 } else if ((ops & UPL_ROP_ABSENT) && !dst_page->absent)
7331 break;
7332 } else if (ops & UPL_ROP_PRESENT)
7333 break;
7334
7335 offset += PAGE_SIZE;
7336 }
7337 vm_object_unlock(object);
7338
7339 if (range) {
7340 if (offset > offset_end)
7341 offset = offset_end;
7342 if(offset > offset_beg) {
7343 *range = (uint32_t) (offset - offset_beg);
7344 assert(*range == (offset - offset_beg));
7345 } else {
7346 *range = 0;
7347 }
7348 }
7349 return KERN_SUCCESS;
7350 }
7351
7352
7353 uint32_t scan_object_collision = 0;
7354
7355 void
7356 vm_object_lock(vm_object_t object)
7357 {
7358 if (object == vm_pageout_scan_wants_object) {
7359 scan_object_collision++;
7360 mutex_pause(2);
7361 }
7362 lck_rw_lock_exclusive(&object->Lock);
7363 }
7364
7365 boolean_t
7366 vm_object_lock_avoid(vm_object_t object)
7367 {
7368 if (object == vm_pageout_scan_wants_object) {
7369 scan_object_collision++;
7370 return TRUE;
7371 }
7372 return FALSE;
7373 }
7374
7375 boolean_t
7376 _vm_object_lock_try(vm_object_t object)
7377 {
7378 return (lck_rw_try_lock_exclusive(&object->Lock));
7379 }
7380
7381 boolean_t
7382 vm_object_lock_try(vm_object_t object)
7383 {
7384 if (vm_object_lock_avoid(object)) {
7385 mutex_pause(2);
7386 }
7387 return _vm_object_lock_try(object);
7388 }
7389 void
7390 vm_object_lock_shared(vm_object_t object)
7391 {
7392 if (vm_object_lock_avoid(object)) {
7393 mutex_pause(2);
7394 }
7395 lck_rw_lock_shared(&object->Lock);
7396 }
7397
7398 boolean_t
7399 vm_object_lock_try_shared(vm_object_t object)
7400 {
7401 if (vm_object_lock_avoid(object)) {
7402 mutex_pause(2);
7403 }
7404 return (lck_rw_try_lock_shared(&object->Lock));
7405 }