]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.c
40bea14773924102c04bea67cbda4e38a39dc7f2
[apple/xnu.git] / osfmk / vm / vm_object.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: vm/vm_object.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 *
64 * Virtual memory object module.
65 */
66
67 #include <mach_pagemap.h>
68 #include <task_swapper.h>
69
70 #include <mach/mach_types.h>
71 #include <mach/memory_object.h>
72 #include <mach/memory_object_default.h>
73 #include <mach/memory_object_control_server.h>
74 #include <mach/vm_param.h>
75
76 #include <ipc/ipc_types.h>
77 #include <ipc/ipc_port.h>
78
79 #include <kern/kern_types.h>
80 #include <kern/assert.h>
81 #include <kern/lock.h>
82 #include <kern/queue.h>
83 #include <kern/xpr.h>
84 #include <kern/zalloc.h>
85 #include <kern/host.h>
86 #include <kern/host_statistics.h>
87 #include <kern/processor.h>
88 #include <kern/misc_protos.h>
89
90 #include <vm/memory_object.h>
91 #include <vm/vm_fault.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/vm_protos.h>
97
98 /*
99 * Virtual memory objects maintain the actual data
100 * associated with allocated virtual memory. A given
101 * page of memory exists within exactly one object.
102 *
103 * An object is only deallocated when all "references"
104 * are given up.
105 *
106 * Associated with each object is a list of all resident
107 * memory pages belonging to that object; this list is
108 * maintained by the "vm_page" module, but locked by the object's
109 * lock.
110 *
111 * Each object also records the memory object reference
112 * that is used by the kernel to request and write
113 * back data (the memory object, field "pager"), etc...
114 *
115 * Virtual memory objects are allocated to provide
116 * zero-filled memory (vm_allocate) or map a user-defined
117 * memory object into a virtual address space (vm_map).
118 *
119 * Virtual memory objects that refer to a user-defined
120 * memory object are called "permanent", because all changes
121 * made in virtual memory are reflected back to the
122 * memory manager, which may then store it permanently.
123 * Other virtual memory objects are called "temporary",
124 * meaning that changes need be written back only when
125 * necessary to reclaim pages, and that storage associated
126 * with the object can be discarded once it is no longer
127 * mapped.
128 *
129 * A permanent memory object may be mapped into more
130 * than one virtual address space. Moreover, two threads
131 * may attempt to make the first mapping of a memory
132 * object concurrently. Only one thread is allowed to
133 * complete this mapping; all others wait for the
134 * "pager_initialized" field is asserted, indicating
135 * that the first thread has initialized all of the
136 * necessary fields in the virtual memory object structure.
137 *
138 * The kernel relies on a *default memory manager* to
139 * provide backing storage for the zero-filled virtual
140 * memory objects. The pager memory objects associated
141 * with these temporary virtual memory objects are only
142 * requested from the default memory manager when it
143 * becomes necessary. Virtual memory objects
144 * that depend on the default memory manager are called
145 * "internal". The "pager_created" field is provided to
146 * indicate whether these ports have ever been allocated.
147 *
148 * The kernel may also create virtual memory objects to
149 * hold changed pages after a copy-on-write operation.
150 * In this case, the virtual memory object (and its
151 * backing storage -- its memory object) only contain
152 * those pages that have been changed. The "shadow"
153 * field refers to the virtual memory object that contains
154 * the remainder of the contents. The "shadow_offset"
155 * field indicates where in the "shadow" these contents begin.
156 * The "copy" field refers to a virtual memory object
157 * to which changed pages must be copied before changing
158 * this object, in order to implement another form
159 * of copy-on-write optimization.
160 *
161 * The virtual memory object structure also records
162 * the attributes associated with its memory object.
163 * The "pager_ready", "can_persist" and "copy_strategy"
164 * fields represent those attributes. The "cached_list"
165 * field is used in the implementation of the persistence
166 * attribute.
167 *
168 * ZZZ Continue this comment.
169 */
170
171 /* Forward declarations for internal functions. */
172 static kern_return_t vm_object_terminate(
173 vm_object_t object);
174
175 extern void vm_object_remove(
176 vm_object_t object);
177
178 static vm_object_t vm_object_cache_trim(
179 boolean_t called_from_vm_object_deallocate);
180
181 static void vm_object_deactivate_all_pages(
182 vm_object_t object);
183
184 static kern_return_t vm_object_copy_call(
185 vm_object_t src_object,
186 vm_object_offset_t src_offset,
187 vm_object_size_t size,
188 vm_object_t *_result_object);
189
190 static void vm_object_do_collapse(
191 vm_object_t object,
192 vm_object_t backing_object);
193
194 static void vm_object_do_bypass(
195 vm_object_t object,
196 vm_object_t backing_object);
197
198 static void vm_object_release_pager(
199 memory_object_t pager);
200
201 static zone_t vm_object_zone; /* vm backing store zone */
202
203 /*
204 * All wired-down kernel memory belongs to a single virtual
205 * memory object (kernel_object) to avoid wasting data structures.
206 */
207 static struct vm_object kernel_object_store;
208 __private_extern__ vm_object_t kernel_object = &kernel_object_store;
209
210 /*
211 * The submap object is used as a placeholder for vm_map_submap
212 * operations. The object is declared in vm_map.c because it
213 * is exported by the vm_map module. The storage is declared
214 * here because it must be initialized here.
215 */
216 static struct vm_object vm_submap_object_store;
217
218 /*
219 * Virtual memory objects are initialized from
220 * a template (see vm_object_allocate).
221 *
222 * When adding a new field to the virtual memory
223 * object structure, be sure to add initialization
224 * (see _vm_object_allocate()).
225 */
226 static struct vm_object vm_object_template;
227
228 /*
229 * Virtual memory objects that are not referenced by
230 * any address maps, but that are allowed to persist
231 * (an attribute specified by the associated memory manager),
232 * are kept in a queue (vm_object_cached_list).
233 *
234 * When an object from this queue is referenced again,
235 * for example to make another address space mapping,
236 * it must be removed from the queue. That is, the
237 * queue contains *only* objects with zero references.
238 *
239 * The kernel may choose to terminate objects from this
240 * queue in order to reclaim storage. The current policy
241 * is to permit a fixed maximum number of unreferenced
242 * objects (vm_object_cached_max).
243 *
244 * A spin lock (accessed by routines
245 * vm_object_cache_{lock,lock_try,unlock}) governs the
246 * object cache. It must be held when objects are
247 * added to or removed from the cache (in vm_object_terminate).
248 * The routines that acquire a reference to a virtual
249 * memory object based on one of the memory object ports
250 * must also lock the cache.
251 *
252 * Ideally, the object cache should be more isolated
253 * from the reference mechanism, so that the lock need
254 * not be held to make simple references.
255 */
256 static queue_head_t vm_object_cached_list;
257 static int vm_object_cached_count=0;
258 static int vm_object_cached_high; /* highest # cached objects */
259 static int vm_object_cached_max = 512; /* may be patched*/
260
261 static decl_mutex_data(,vm_object_cached_lock_data)
262
263 #define vm_object_cache_lock() \
264 mutex_lock(&vm_object_cached_lock_data)
265 #define vm_object_cache_lock_try() \
266 mutex_try(&vm_object_cached_lock_data)
267 #define vm_object_cache_unlock() \
268 mutex_unlock(&vm_object_cached_lock_data)
269
270 #define VM_OBJECT_HASH_COUNT 1024
271 static queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
272 static struct zone *vm_object_hash_zone;
273
274 struct vm_object_hash_entry {
275 queue_chain_t hash_link; /* hash chain link */
276 memory_object_t pager; /* pager we represent */
277 vm_object_t object; /* corresponding object */
278 boolean_t waiting; /* someone waiting for
279 * termination */
280 };
281
282 typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
283 #define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
284
285 #define VM_OBJECT_HASH_SHIFT 8
286 #define vm_object_hash(pager) \
287 ((((unsigned)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT)
288
289 void vm_object_hash_entry_free(
290 vm_object_hash_entry_t entry);
291
292 /*
293 * vm_object_hash_lookup looks up a pager in the hashtable
294 * and returns the corresponding entry, with optional removal.
295 */
296
297 static vm_object_hash_entry_t
298 vm_object_hash_lookup(
299 memory_object_t pager,
300 boolean_t remove_entry)
301 {
302 register queue_t bucket;
303 register vm_object_hash_entry_t entry;
304
305 bucket = &vm_object_hashtable[vm_object_hash(pager)];
306
307 entry = (vm_object_hash_entry_t)queue_first(bucket);
308 while (!queue_end(bucket, (queue_entry_t)entry)) {
309 if (entry->pager == pager && !remove_entry)
310 return(entry);
311 else if (entry->pager == pager) {
312 queue_remove(bucket, entry,
313 vm_object_hash_entry_t, hash_link);
314 return(entry);
315 }
316
317 entry = (vm_object_hash_entry_t)queue_next(&entry->hash_link);
318 }
319
320 return(VM_OBJECT_HASH_ENTRY_NULL);
321 }
322
323 /*
324 * vm_object_hash_enter enters the specified
325 * pager / cache object association in the hashtable.
326 */
327
328 static void
329 vm_object_hash_insert(
330 vm_object_hash_entry_t entry)
331 {
332 register queue_t bucket;
333
334 bucket = &vm_object_hashtable[vm_object_hash(entry->pager)];
335
336 queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link);
337 }
338
339 static vm_object_hash_entry_t
340 vm_object_hash_entry_alloc(
341 memory_object_t pager)
342 {
343 vm_object_hash_entry_t entry;
344
345 entry = (vm_object_hash_entry_t)zalloc(vm_object_hash_zone);
346 entry->pager = pager;
347 entry->object = VM_OBJECT_NULL;
348 entry->waiting = FALSE;
349
350 return(entry);
351 }
352
353 void
354 vm_object_hash_entry_free(
355 vm_object_hash_entry_t entry)
356 {
357 zfree(vm_object_hash_zone, entry);
358 }
359
360 /*
361 * vm_object_allocate:
362 *
363 * Returns a new object with the given size.
364 */
365
366 __private_extern__ void
367 _vm_object_allocate(
368 vm_object_size_t size,
369 vm_object_t object)
370 {
371 XPR(XPR_VM_OBJECT,
372 "vm_object_allocate, object 0x%X size 0x%X\n",
373 (integer_t)object, size, 0,0,0);
374
375 *object = vm_object_template;
376 queue_init(&object->memq);
377 queue_init(&object->msr_q);
378 #ifdef UPL_DEBUG
379 queue_init(&object->uplq);
380 #endif /* UPL_DEBUG */
381 vm_object_lock_init(object);
382 object->size = size;
383 }
384
385 __private_extern__ vm_object_t
386 vm_object_allocate(
387 vm_object_size_t size)
388 {
389 register vm_object_t object;
390
391 object = (vm_object_t) zalloc(vm_object_zone);
392
393 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
394
395 if (object != VM_OBJECT_NULL)
396 _vm_object_allocate(size, object);
397
398 return object;
399 }
400
401 /*
402 * vm_object_bootstrap:
403 *
404 * Initialize the VM objects module.
405 */
406 __private_extern__ void
407 vm_object_bootstrap(void)
408 {
409 register int i;
410
411 vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object),
412 round_page_32(512*1024),
413 round_page_32(12*1024),
414 "vm objects");
415
416 queue_init(&vm_object_cached_list);
417 mutex_init(&vm_object_cached_lock_data, 0);
418
419 vm_object_hash_zone =
420 zinit((vm_size_t) sizeof (struct vm_object_hash_entry),
421 round_page_32(512*1024),
422 round_page_32(12*1024),
423 "vm object hash entries");
424
425 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
426 queue_init(&vm_object_hashtable[i]);
427
428 /*
429 * Fill in a template object, for quick initialization
430 */
431
432 /* memq; Lock; init after allocation */
433 vm_object_template.size = 0;
434 vm_object_template.memq_hint = VM_PAGE_NULL;
435 vm_object_template.ref_count = 1;
436 #if TASK_SWAPPER
437 vm_object_template.res_count = 1;
438 #endif /* TASK_SWAPPER */
439 vm_object_template.resident_page_count = 0;
440 vm_object_template.copy = VM_OBJECT_NULL;
441 vm_object_template.shadow = VM_OBJECT_NULL;
442 vm_object_template.shadow_offset = (vm_object_offset_t) 0;
443 vm_object_template.cow_hint = ~(vm_offset_t)0;
444 vm_object_template.true_share = FALSE;
445
446 vm_object_template.pager = MEMORY_OBJECT_NULL;
447 vm_object_template.paging_offset = 0;
448 vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL;
449 /* msr_q; init after allocation */
450
451 vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC;
452 vm_object_template.absent_count = 0;
453 vm_object_template.paging_in_progress = 0;
454
455 /* Begin bitfields */
456 vm_object_template.all_wanted = 0; /* all bits FALSE */
457 vm_object_template.pager_created = FALSE;
458 vm_object_template.pager_initialized = FALSE;
459 vm_object_template.pager_ready = FALSE;
460 vm_object_template.pager_trusted = FALSE;
461 vm_object_template.can_persist = FALSE;
462 vm_object_template.internal = TRUE;
463 vm_object_template.temporary = TRUE;
464 vm_object_template.private = FALSE;
465 vm_object_template.pageout = FALSE;
466 vm_object_template.alive = TRUE;
467 vm_object_template.purgable = VM_OBJECT_NONPURGABLE;
468 vm_object_template.silent_overwrite = FALSE;
469 vm_object_template.advisory_pageout = FALSE;
470 vm_object_template.shadowed = FALSE;
471 vm_object_template.terminating = FALSE;
472 vm_object_template.shadow_severed = FALSE;
473 vm_object_template.phys_contiguous = FALSE;
474 vm_object_template.nophyscache = FALSE;
475 /* End bitfields */
476
477 /* cache bitfields */
478 vm_object_template.wimg_bits = VM_WIMG_DEFAULT;
479
480 /* cached_list; init after allocation */
481 vm_object_template.last_alloc = (vm_object_offset_t) 0;
482 vm_object_template.cluster_size = 0;
483 #if MACH_PAGEMAP
484 vm_object_template.existence_map = VM_EXTERNAL_NULL;
485 #endif /* MACH_PAGEMAP */
486 #if MACH_ASSERT
487 vm_object_template.paging_object = VM_OBJECT_NULL;
488 #endif /* MACH_ASSERT */
489
490 /*
491 * Initialize the "kernel object"
492 */
493
494 kernel_object = &kernel_object_store;
495
496 /*
497 * Note that in the following size specifications, we need to add 1 because
498 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
499 */
500
501 #ifdef ppc
502 _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1,
503 kernel_object);
504 #else
505 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1,
506 kernel_object);
507 #endif
508 kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
509
510 /*
511 * Initialize the "submap object". Make it as large as the
512 * kernel object so that no limit is imposed on submap sizes.
513 */
514
515 vm_submap_object = &vm_submap_object_store;
516 #ifdef ppc
517 _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1,
518 vm_submap_object);
519 #else
520 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1,
521 vm_submap_object);
522 #endif
523 vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
524
525 /*
526 * Create an "extra" reference to this object so that we never
527 * try to deallocate it; zfree doesn't like to be called with
528 * non-zone memory.
529 */
530 vm_object_reference(vm_submap_object);
531
532 #if MACH_PAGEMAP
533 vm_external_module_initialize();
534 #endif /* MACH_PAGEMAP */
535 }
536
537 __private_extern__ void
538 vm_object_init(void)
539 {
540 /*
541 * Finish initializing the kernel object.
542 */
543 }
544
545 /* remove the typedef below when emergency work-around is taken out */
546 typedef struct vnode_pager {
547 memory_object_t pager;
548 memory_object_t pager_handle; /* pager */
549 memory_object_control_t control_handle; /* memory object's control handle */
550 void *vnode_handle; /* vnode handle */
551 } *vnode_pager_t;
552
553 #define MIGHT_NOT_CACHE_SHADOWS 1
554 #if MIGHT_NOT_CACHE_SHADOWS
555 static int cache_shadows = TRUE;
556 #endif /* MIGHT_NOT_CACHE_SHADOWS */
557
558 /*
559 * vm_object_deallocate:
560 *
561 * Release a reference to the specified object,
562 * gained either through a vm_object_allocate
563 * or a vm_object_reference call. When all references
564 * are gone, storage associated with this object
565 * may be relinquished.
566 *
567 * No object may be locked.
568 */
569 __private_extern__ void
570 vm_object_deallocate(
571 register vm_object_t object)
572 {
573 boolean_t retry_cache_trim = FALSE;
574 vm_object_t shadow = VM_OBJECT_NULL;
575
576 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
577 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
578
579
580 while (object != VM_OBJECT_NULL) {
581
582 /*
583 * The cache holds a reference (uncounted) to
584 * the object; we must lock it before removing
585 * the object.
586 */
587 for (;;) {
588 vm_object_cache_lock();
589
590 /*
591 * if we try to take a regular lock here
592 * we risk deadlocking against someone
593 * holding a lock on this object while
594 * trying to vm_object_deallocate a different
595 * object
596 */
597 if (vm_object_lock_try(object))
598 break;
599 vm_object_cache_unlock();
600 mutex_pause(); /* wait a bit */
601 }
602 assert(object->ref_count > 0);
603
604 /*
605 * If the object has a named reference, and only
606 * that reference would remain, inform the pager
607 * about the last "mapping" reference going away.
608 */
609 if ((object->ref_count == 2) && (object->named)) {
610 memory_object_t pager = object->pager;
611
612 /* Notify the Pager that there are no */
613 /* more mappers for this object */
614
615 if (pager != MEMORY_OBJECT_NULL) {
616 vm_object_unlock(object);
617 vm_object_cache_unlock();
618
619 memory_object_unmap(pager);
620
621 for (;;) {
622 vm_object_cache_lock();
623
624 /*
625 * if we try to take a regular lock here
626 * we risk deadlocking against someone
627 * holding a lock on this object while
628 * trying to vm_object_deallocate a different
629 * object
630 */
631 if (vm_object_lock_try(object))
632 break;
633 vm_object_cache_unlock();
634 mutex_pause(); /* wait a bit */
635 }
636 assert(object->ref_count > 0);
637 }
638 }
639
640 /*
641 * Lose the reference. If other references
642 * remain, then we are done, unless we need
643 * to retry a cache trim.
644 * If it is the last reference, then keep it
645 * until any pending initialization is completed.
646 */
647
648 /* if the object is terminating, it cannot go into */
649 /* the cache and we obviously should not call */
650 /* terminate again. */
651
652 if ((object->ref_count > 1) || object->terminating) {
653 object->ref_count--;
654 vm_object_res_deallocate(object);
655 vm_object_cache_unlock();
656
657 if (object->ref_count == 1 &&
658 object->shadow != VM_OBJECT_NULL) {
659 /*
660 * We don't use this VM object anymore. We
661 * would like to collapse it into its parent(s),
662 * but we don't have any pointers back to these
663 * parent object(s).
664 * But we can try and collapse this object with
665 * its own shadows, in case these are useless
666 * too...
667 */
668 vm_object_collapse(object, 0);
669 }
670
671 vm_object_unlock(object);
672 if (retry_cache_trim &&
673 ((object = vm_object_cache_trim(TRUE)) !=
674 VM_OBJECT_NULL)) {
675 continue;
676 }
677 return;
678 }
679
680 /*
681 * We have to wait for initialization
682 * before destroying or caching the object.
683 */
684
685 if (object->pager_created && ! object->pager_initialized) {
686 assert(! object->can_persist);
687 vm_object_assert_wait(object,
688 VM_OBJECT_EVENT_INITIALIZED,
689 THREAD_UNINT);
690 vm_object_unlock(object);
691 vm_object_cache_unlock();
692 thread_block(THREAD_CONTINUE_NULL);
693 continue;
694 }
695
696 /*
697 * If this object can persist, then enter it in
698 * the cache. Otherwise, terminate it.
699 *
700 * NOTE: Only permanent objects are cached, and
701 * permanent objects cannot have shadows. This
702 * affects the residence counting logic in a minor
703 * way (can do it in-line, mostly).
704 */
705
706 if ((object->can_persist) && (object->alive)) {
707 /*
708 * Now it is safe to decrement reference count,
709 * and to return if reference count is > 0.
710 */
711 if (--object->ref_count > 0) {
712 vm_object_res_deallocate(object);
713 vm_object_unlock(object);
714 vm_object_cache_unlock();
715 if (retry_cache_trim &&
716 ((object = vm_object_cache_trim(TRUE)) !=
717 VM_OBJECT_NULL)) {
718 continue;
719 }
720 return;
721 }
722
723 #if MIGHT_NOT_CACHE_SHADOWS
724 /*
725 * Remove shadow now if we don't
726 * want to cache shadows.
727 */
728 if (! cache_shadows) {
729 shadow = object->shadow;
730 object->shadow = VM_OBJECT_NULL;
731 }
732 #endif /* MIGHT_NOT_CACHE_SHADOWS */
733
734 /*
735 * Enter the object onto the queue of
736 * cached objects, and deactivate
737 * all of its pages.
738 */
739 assert(object->shadow == VM_OBJECT_NULL);
740 VM_OBJ_RES_DECR(object);
741 XPR(XPR_VM_OBJECT,
742 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
743 (integer_t)object,
744 (integer_t)vm_object_cached_list.next,
745 (integer_t)vm_object_cached_list.prev,0,0);
746
747 vm_object_cached_count++;
748 if (vm_object_cached_count > vm_object_cached_high)
749 vm_object_cached_high = vm_object_cached_count;
750 queue_enter(&vm_object_cached_list, object,
751 vm_object_t, cached_list);
752 vm_object_cache_unlock();
753 vm_object_deactivate_all_pages(object);
754 vm_object_unlock(object);
755
756 #if MIGHT_NOT_CACHE_SHADOWS
757 /*
758 * If we have a shadow that we need
759 * to deallocate, do so now, remembering
760 * to trim the cache later.
761 */
762 if (! cache_shadows && shadow != VM_OBJECT_NULL) {
763 object = shadow;
764 retry_cache_trim = TRUE;
765 continue;
766 }
767 #endif /* MIGHT_NOT_CACHE_SHADOWS */
768
769 /*
770 * Trim the cache. If the cache trim
771 * returns with a shadow for us to deallocate,
772 * then remember to retry the cache trim
773 * when we are done deallocating the shadow.
774 * Otherwise, we are done.
775 */
776
777 object = vm_object_cache_trim(TRUE);
778 if (object == VM_OBJECT_NULL) {
779 return;
780 }
781 retry_cache_trim = TRUE;
782
783 } else {
784 /*
785 * This object is not cachable; terminate it.
786 */
787 XPR(XPR_VM_OBJECT,
788 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
789 (integer_t)object, object->resident_page_count,
790 object->paging_in_progress,
791 (void *)current_thread(),object->ref_count);
792
793 VM_OBJ_RES_DECR(object); /* XXX ? */
794 /*
795 * Terminate this object. If it had a shadow,
796 * then deallocate it; otherwise, if we need
797 * to retry a cache trim, do so now; otherwise,
798 * we are done. "pageout" objects have a shadow,
799 * but maintain a "paging reference" rather than
800 * a normal reference.
801 */
802 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
803 if(vm_object_terminate(object) != KERN_SUCCESS) {
804 return;
805 }
806 if (shadow != VM_OBJECT_NULL) {
807 object = shadow;
808 continue;
809 }
810 if (retry_cache_trim &&
811 ((object = vm_object_cache_trim(TRUE)) !=
812 VM_OBJECT_NULL)) {
813 continue;
814 }
815 return;
816 }
817 }
818 assert(! retry_cache_trim);
819 }
820
821 /*
822 * Check to see whether we really need to trim
823 * down the cache. If so, remove an object from
824 * the cache, terminate it, and repeat.
825 *
826 * Called with, and returns with, cache lock unlocked.
827 */
828 vm_object_t
829 vm_object_cache_trim(
830 boolean_t called_from_vm_object_deallocate)
831 {
832 register vm_object_t object = VM_OBJECT_NULL;
833 vm_object_t shadow;
834
835 for (;;) {
836
837 /*
838 * If we no longer need to trim the cache,
839 * then we are done.
840 */
841
842 vm_object_cache_lock();
843 if (vm_object_cached_count <= vm_object_cached_max) {
844 vm_object_cache_unlock();
845 return VM_OBJECT_NULL;
846 }
847
848 /*
849 * We must trim down the cache, so remove
850 * the first object in the cache.
851 */
852 XPR(XPR_VM_OBJECT,
853 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
854 (integer_t)vm_object_cached_list.next,
855 (integer_t)vm_object_cached_list.prev, 0, 0, 0);
856
857 object = (vm_object_t) queue_first(&vm_object_cached_list);
858 if(object == (vm_object_t) &vm_object_cached_list) {
859 /* something's wrong with the calling parameter or */
860 /* the value of vm_object_cached_count, just fix */
861 /* and return */
862 if(vm_object_cached_max < 0)
863 vm_object_cached_max = 0;
864 vm_object_cached_count = 0;
865 vm_object_cache_unlock();
866 return VM_OBJECT_NULL;
867 }
868 vm_object_lock(object);
869 queue_remove(&vm_object_cached_list, object, vm_object_t,
870 cached_list);
871 vm_object_cached_count--;
872
873 /*
874 * Since this object is in the cache, we know
875 * that it is initialized and has no references.
876 * Take a reference to avoid recursive deallocations.
877 */
878
879 assert(object->pager_initialized);
880 assert(object->ref_count == 0);
881 object->ref_count++;
882
883 /*
884 * Terminate the object.
885 * If the object had a shadow, we let vm_object_deallocate
886 * deallocate it. "pageout" objects have a shadow, but
887 * maintain a "paging reference" rather than a normal
888 * reference.
889 * (We are careful here to limit recursion.)
890 */
891 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
892 if(vm_object_terminate(object) != KERN_SUCCESS)
893 continue;
894 if (shadow != VM_OBJECT_NULL) {
895 if (called_from_vm_object_deallocate) {
896 return shadow;
897 } else {
898 vm_object_deallocate(shadow);
899 }
900 }
901 }
902 }
903
904 boolean_t vm_object_terminate_remove_all = FALSE;
905
906 /*
907 * Routine: vm_object_terminate
908 * Purpose:
909 * Free all resources associated with a vm_object.
910 * In/out conditions:
911 * Upon entry, the object must be locked,
912 * and the object must have exactly one reference.
913 *
914 * The shadow object reference is left alone.
915 *
916 * The object must be unlocked if its found that pages
917 * must be flushed to a backing object. If someone
918 * manages to map the object while it is being flushed
919 * the object is returned unlocked and unchanged. Otherwise,
920 * upon exit, the cache will be unlocked, and the
921 * object will cease to exist.
922 */
923 static kern_return_t
924 vm_object_terminate(
925 register vm_object_t object)
926 {
927 memory_object_t pager;
928 register vm_page_t p;
929 vm_object_t shadow_object;
930
931 XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
932 (integer_t)object, object->ref_count, 0, 0, 0);
933
934 if (!object->pageout && (!object->temporary || object->can_persist)
935 && (object->pager != NULL || object->shadow_severed)) {
936 vm_object_cache_unlock();
937 while (!queue_empty(&object->memq)) {
938 /*
939 * Clear pager_trusted bit so that the pages get yanked
940 * out of the object instead of cleaned in place. This
941 * prevents a deadlock in XMM and makes more sense anyway.
942 */
943 object->pager_trusted = FALSE;
944
945 p = (vm_page_t) queue_first(&object->memq);
946
947 VM_PAGE_CHECK(p);
948
949 if (p->busy || p->cleaning) {
950 if(p->cleaning || p->absent) {
951 vm_object_paging_wait(object, THREAD_UNINT);
952 continue;
953 } else {
954 panic("vm_object_terminate.3 0x%x 0x%x", object, p);
955 }
956 }
957
958 vm_page_lock_queues();
959 p->busy = TRUE;
960 VM_PAGE_QUEUES_REMOVE(p);
961 vm_page_unlock_queues();
962
963 if (p->absent || p->private) {
964
965 /*
966 * For private pages, VM_PAGE_FREE just
967 * leaves the page structure around for
968 * its owner to clean up. For absent
969 * pages, the structure is returned to
970 * the appropriate pool.
971 */
972
973 goto free_page;
974 }
975
976 if (p->fictitious)
977 panic("vm_object_terminate.4 0x%x 0x%x", object, p);
978
979 if (!p->dirty)
980 p->dirty = pmap_is_modified(p->phys_page);
981
982 if ((p->dirty || p->precious) && !p->error && object->alive) {
983 vm_pageout_cluster(p); /* flush page */
984 vm_object_paging_wait(object, THREAD_UNINT);
985 XPR(XPR_VM_OBJECT,
986 "vm_object_terminate restart, object 0x%X ref %d\n",
987 (integer_t)object, object->ref_count, 0, 0, 0);
988 } else {
989 free_page:
990 VM_PAGE_FREE(p);
991 }
992 }
993 vm_object_unlock(object);
994 vm_object_cache_lock();
995 vm_object_lock(object);
996 }
997
998 /*
999 * Make sure the object isn't already being terminated
1000 */
1001 if(object->terminating) {
1002 object->ref_count -= 1;
1003 assert(object->ref_count > 0);
1004 vm_object_cache_unlock();
1005 vm_object_unlock(object);
1006 return KERN_FAILURE;
1007 }
1008
1009 /*
1010 * Did somebody get a reference to the object while we were
1011 * cleaning it?
1012 */
1013 if(object->ref_count != 1) {
1014 object->ref_count -= 1;
1015 assert(object->ref_count > 0);
1016 vm_object_res_deallocate(object);
1017 vm_object_cache_unlock();
1018 vm_object_unlock(object);
1019 return KERN_FAILURE;
1020 }
1021
1022 /*
1023 * Make sure no one can look us up now.
1024 */
1025
1026 object->terminating = TRUE;
1027 object->alive = FALSE;
1028 vm_object_remove(object);
1029
1030 /*
1031 * Detach the object from its shadow if we are the shadow's
1032 * copy. The reference we hold on the shadow must be dropped
1033 * by our caller.
1034 */
1035 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1036 !(object->pageout)) {
1037 vm_object_lock(shadow_object);
1038 if (shadow_object->copy == object)
1039 shadow_object->copy = VM_OBJECT_NULL;
1040 vm_object_unlock(shadow_object);
1041 }
1042
1043 /*
1044 * The pageout daemon might be playing with our pages.
1045 * Now that the object is dead, it won't touch any more
1046 * pages, but some pages might already be on their way out.
1047 * Hence, we wait until the active paging activities have ceased
1048 * before we break the association with the pager itself.
1049 */
1050 while (object->paging_in_progress != 0) {
1051 vm_object_cache_unlock();
1052 vm_object_wait(object,
1053 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1054 THREAD_UNINT);
1055 vm_object_cache_lock();
1056 vm_object_lock(object);
1057 }
1058
1059 pager = object->pager;
1060 object->pager = MEMORY_OBJECT_NULL;
1061
1062 if (pager != MEMORY_OBJECT_NULL)
1063 memory_object_control_disable(object->pager_control);
1064 vm_object_cache_unlock();
1065
1066 object->ref_count--;
1067 #if TASK_SWAPPER
1068 assert(object->res_count == 0);
1069 #endif /* TASK_SWAPPER */
1070
1071 assert (object->ref_count == 0);
1072
1073 /*
1074 * Clean or free the pages, as appropriate.
1075 * It is possible for us to find busy/absent pages,
1076 * if some faults on this object were aborted.
1077 */
1078 if (object->pageout) {
1079 assert(shadow_object != VM_OBJECT_NULL);
1080 assert(shadow_object == object->shadow);
1081
1082 vm_pageout_object_terminate(object);
1083
1084 } else if ((object->temporary && !object->can_persist) ||
1085 (pager == MEMORY_OBJECT_NULL)) {
1086 while (!queue_empty(&object->memq)) {
1087 p = (vm_page_t) queue_first(&object->memq);
1088
1089 VM_PAGE_CHECK(p);
1090 VM_PAGE_FREE(p);
1091 }
1092 } else if (!queue_empty(&object->memq)) {
1093 panic("vm_object_terminate: queue just emptied isn't");
1094 }
1095
1096 assert(object->paging_in_progress == 0);
1097 assert(object->ref_count == 0);
1098
1099 /*
1100 * If the pager has not already been released by
1101 * vm_object_destroy, we need to terminate it and
1102 * release our reference to it here.
1103 */
1104 if (pager != MEMORY_OBJECT_NULL) {
1105 vm_object_unlock(object);
1106 vm_object_release_pager(pager);
1107 vm_object_lock(object);
1108 }
1109
1110 /* kick off anyone waiting on terminating */
1111 object->terminating = FALSE;
1112 vm_object_paging_begin(object);
1113 vm_object_paging_end(object);
1114 vm_object_unlock(object);
1115
1116 #if MACH_PAGEMAP
1117 vm_external_destroy(object->existence_map, object->size);
1118 #endif /* MACH_PAGEMAP */
1119
1120 /*
1121 * Free the space for the object.
1122 */
1123 zfree(vm_object_zone, object);
1124 return KERN_SUCCESS;
1125 }
1126
1127 /*
1128 * Routine: vm_object_pager_wakeup
1129 * Purpose: Wake up anyone waiting for termination of a pager.
1130 */
1131
1132 static void
1133 vm_object_pager_wakeup(
1134 memory_object_t pager)
1135 {
1136 vm_object_hash_entry_t entry;
1137 boolean_t waiting = FALSE;
1138
1139 /*
1140 * If anyone was waiting for the memory_object_terminate
1141 * to be queued, wake them up now.
1142 */
1143 vm_object_cache_lock();
1144 entry = vm_object_hash_lookup(pager, TRUE);
1145 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
1146 waiting = entry->waiting;
1147 vm_object_cache_unlock();
1148 if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
1149 if (waiting)
1150 thread_wakeup((event_t) pager);
1151 vm_object_hash_entry_free(entry);
1152 }
1153 }
1154
1155 /*
1156 * Routine: vm_object_release_pager
1157 * Purpose: Terminate the pager and, upon completion,
1158 * release our last reference to it.
1159 * just like memory_object_terminate, except
1160 * that we wake up anyone blocked in vm_object_enter
1161 * waiting for termination message to be queued
1162 * before calling memory_object_init.
1163 */
1164 static void
1165 vm_object_release_pager(
1166 memory_object_t pager)
1167 {
1168
1169 /*
1170 * Terminate the pager.
1171 */
1172
1173 (void) memory_object_terminate(pager);
1174
1175 /*
1176 * Wakeup anyone waiting for this terminate
1177 */
1178 vm_object_pager_wakeup(pager);
1179
1180 /*
1181 * Release reference to pager.
1182 */
1183 memory_object_deallocate(pager);
1184 }
1185
1186 /*
1187 * Routine: vm_object_destroy
1188 * Purpose:
1189 * Shut down a VM object, despite the
1190 * presence of address map (or other) references
1191 * to the vm_object.
1192 */
1193 kern_return_t
1194 vm_object_destroy(
1195 vm_object_t object,
1196 __unused kern_return_t reason)
1197 {
1198 memory_object_t old_pager;
1199
1200 if (object == VM_OBJECT_NULL)
1201 return(KERN_SUCCESS);
1202
1203 /*
1204 * Remove the pager association immediately.
1205 *
1206 * This will prevent the memory manager from further
1207 * meddling. [If it wanted to flush data or make
1208 * other changes, it should have done so before performing
1209 * the destroy call.]
1210 */
1211
1212 vm_object_cache_lock();
1213 vm_object_lock(object);
1214 object->can_persist = FALSE;
1215 object->named = FALSE;
1216 object->alive = FALSE;
1217
1218 /*
1219 * Rip out the pager from the vm_object now...
1220 */
1221
1222 vm_object_remove(object);
1223 old_pager = object->pager;
1224 object->pager = MEMORY_OBJECT_NULL;
1225 if (old_pager != MEMORY_OBJECT_NULL)
1226 memory_object_control_disable(object->pager_control);
1227 vm_object_cache_unlock();
1228
1229 /*
1230 * Wait for the existing paging activity (that got
1231 * through before we nulled out the pager) to subside.
1232 */
1233
1234 vm_object_paging_wait(object, THREAD_UNINT);
1235 vm_object_unlock(object);
1236
1237 /*
1238 * Terminate the object now.
1239 */
1240 if (old_pager != MEMORY_OBJECT_NULL) {
1241 vm_object_release_pager(old_pager);
1242
1243 /*
1244 * JMM - Release the caller's reference. This assumes the
1245 * caller had a reference to release, which is a big (but
1246 * currently valid) assumption if this is driven from the
1247 * vnode pager (it is holding a named reference when making
1248 * this call)..
1249 */
1250 vm_object_deallocate(object);
1251
1252 }
1253 return(KERN_SUCCESS);
1254 }
1255
1256 /*
1257 * vm_object_deactivate_pages
1258 *
1259 * Deactivate all pages in the specified object. (Keep its pages
1260 * in memory even though it is no longer referenced.)
1261 *
1262 * The object must be locked.
1263 */
1264 static void
1265 vm_object_deactivate_all_pages(
1266 register vm_object_t object)
1267 {
1268 register vm_page_t p;
1269
1270 queue_iterate(&object->memq, p, vm_page_t, listq) {
1271 vm_page_lock_queues();
1272 if (!p->busy)
1273 vm_page_deactivate(p);
1274 vm_page_unlock_queues();
1275 }
1276 }
1277
1278 __private_extern__ void
1279 vm_object_deactivate_pages(
1280 vm_object_t object,
1281 vm_object_offset_t offset,
1282 vm_object_size_t size,
1283 boolean_t kill_page)
1284 {
1285 vm_object_t orig_object;
1286 int pages_moved = 0;
1287 int pages_found = 0;
1288
1289 /*
1290 * entered with object lock held, acquire a paging reference to
1291 * prevent the memory_object and control ports from
1292 * being destroyed.
1293 */
1294 orig_object = object;
1295
1296 for (;;) {
1297 register vm_page_t m;
1298 vm_object_offset_t toffset;
1299 vm_object_size_t tsize;
1300
1301 vm_object_paging_begin(object);
1302 vm_page_lock_queues();
1303
1304 for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) {
1305
1306 if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) {
1307
1308 pages_found++;
1309
1310 if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) {
1311
1312 assert(!m->laundry);
1313
1314 m->reference = FALSE;
1315 pmap_clear_reference(m->phys_page);
1316
1317 if ((kill_page) && (object->internal)) {
1318 m->precious = FALSE;
1319 m->dirty = FALSE;
1320 pmap_clear_modify(m->phys_page);
1321 vm_external_state_clr(object->existence_map, offset);
1322 }
1323 VM_PAGE_QUEUES_REMOVE(m);
1324
1325 assert(!m->laundry);
1326 assert(m->object != kernel_object);
1327 assert(m->pageq.next == NULL &&
1328 m->pageq.prev == NULL);
1329 if(m->zero_fill) {
1330 queue_enter_first(
1331 &vm_page_queue_zf,
1332 m, vm_page_t, pageq);
1333 } else {
1334 queue_enter_first(
1335 &vm_page_queue_inactive,
1336 m, vm_page_t, pageq);
1337 }
1338
1339 m->inactive = TRUE;
1340 if (!m->fictitious)
1341 vm_page_inactive_count++;
1342
1343 pages_moved++;
1344 }
1345 }
1346 }
1347 vm_page_unlock_queues();
1348 vm_object_paging_end(object);
1349
1350 if (object->shadow) {
1351 vm_object_t tmp_object;
1352
1353 kill_page = 0;
1354
1355 offset += object->shadow_offset;
1356
1357 tmp_object = object->shadow;
1358 vm_object_lock(tmp_object);
1359
1360 if (object != orig_object)
1361 vm_object_unlock(object);
1362 object = tmp_object;
1363 } else
1364 break;
1365 }
1366 if (object != orig_object)
1367 vm_object_unlock(object);
1368 }
1369
1370 /*
1371 * Routine: vm_object_pmap_protect
1372 *
1373 * Purpose:
1374 * Reduces the permission for all physical
1375 * pages in the specified object range.
1376 *
1377 * If removing write permission only, it is
1378 * sufficient to protect only the pages in
1379 * the top-level object; only those pages may
1380 * have write permission.
1381 *
1382 * If removing all access, we must follow the
1383 * shadow chain from the top-level object to
1384 * remove access to all pages in shadowed objects.
1385 *
1386 * The object must *not* be locked. The object must
1387 * be temporary/internal.
1388 *
1389 * If pmap is not NULL, this routine assumes that
1390 * the only mappings for the pages are in that
1391 * pmap.
1392 */
1393
1394 __private_extern__ void
1395 vm_object_pmap_protect(
1396 register vm_object_t object,
1397 register vm_object_offset_t offset,
1398 vm_object_size_t size,
1399 pmap_t pmap,
1400 vm_map_offset_t pmap_start,
1401 vm_prot_t prot)
1402 {
1403 if (object == VM_OBJECT_NULL)
1404 return;
1405 size = vm_object_round_page(size);
1406 offset = vm_object_trunc_page(offset);
1407
1408 vm_object_lock(object);
1409
1410 assert(object->internal);
1411
1412 while (TRUE) {
1413 if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
1414 vm_object_unlock(object);
1415 pmap_protect(pmap, pmap_start, pmap_start + size, prot);
1416 return;
1417 }
1418
1419 /* if we are doing large ranges with respect to resident */
1420 /* page count then we should interate over pages otherwise */
1421 /* inverse page look-up will be faster */
1422 if (ptoa_64(object->resident_page_count / 4) < size) {
1423 vm_page_t p;
1424 vm_object_offset_t end;
1425
1426 end = offset + size;
1427
1428 if (pmap != PMAP_NULL) {
1429 queue_iterate(&object->memq, p, vm_page_t, listq) {
1430 if (!p->fictitious &&
1431 (offset <= p->offset) && (p->offset < end)) {
1432 vm_map_offset_t start;
1433
1434 start = pmap_start + p->offset - offset;
1435 pmap_protect(pmap, start, start + PAGE_SIZE_64, prot);
1436 }
1437 }
1438 } else {
1439 queue_iterate(&object->memq, p, vm_page_t, listq) {
1440 if (!p->fictitious &&
1441 (offset <= p->offset) && (p->offset < end)) {
1442
1443 pmap_page_protect(p->phys_page,
1444 prot & ~p->page_lock);
1445 }
1446 }
1447 }
1448 } else {
1449 vm_page_t p;
1450 vm_object_offset_t end;
1451 vm_object_offset_t target_off;
1452
1453 end = offset + size;
1454
1455 if (pmap != PMAP_NULL) {
1456 for(target_off = offset;
1457 target_off < end;
1458 target_off += PAGE_SIZE) {
1459 p = vm_page_lookup(object, target_off);
1460 if (p != VM_PAGE_NULL) {
1461 vm_offset_t start;
1462 start = pmap_start +
1463 (vm_offset_t)(p->offset - offset);
1464 pmap_protect(pmap, start,
1465 start + PAGE_SIZE, prot);
1466 }
1467 }
1468 } else {
1469 for(target_off = offset;
1470 target_off < end; target_off += PAGE_SIZE) {
1471 p = vm_page_lookup(object, target_off);
1472 if (p != VM_PAGE_NULL) {
1473 pmap_page_protect(p->phys_page,
1474 prot & ~p->page_lock);
1475 }
1476 }
1477 }
1478 }
1479
1480 if (prot == VM_PROT_NONE) {
1481 /*
1482 * Must follow shadow chain to remove access
1483 * to pages in shadowed objects.
1484 */
1485 register vm_object_t next_object;
1486
1487 next_object = object->shadow;
1488 if (next_object != VM_OBJECT_NULL) {
1489 offset += object->shadow_offset;
1490 vm_object_lock(next_object);
1491 vm_object_unlock(object);
1492 object = next_object;
1493 }
1494 else {
1495 /*
1496 * End of chain - we are done.
1497 */
1498 break;
1499 }
1500 }
1501 else {
1502 /*
1503 * Pages in shadowed objects may never have
1504 * write permission - we may stop here.
1505 */
1506 break;
1507 }
1508 }
1509
1510 vm_object_unlock(object);
1511 }
1512
1513 /*
1514 * Routine: vm_object_copy_slowly
1515 *
1516 * Description:
1517 * Copy the specified range of the source
1518 * virtual memory object without using
1519 * protection-based optimizations (such
1520 * as copy-on-write). The pages in the
1521 * region are actually copied.
1522 *
1523 * In/out conditions:
1524 * The caller must hold a reference and a lock
1525 * for the source virtual memory object. The source
1526 * object will be returned *unlocked*.
1527 *
1528 * Results:
1529 * If the copy is completed successfully, KERN_SUCCESS is
1530 * returned. If the caller asserted the interruptible
1531 * argument, and an interruption occurred while waiting
1532 * for a user-generated event, MACH_SEND_INTERRUPTED is
1533 * returned. Other values may be returned to indicate
1534 * hard errors during the copy operation.
1535 *
1536 * A new virtual memory object is returned in a
1537 * parameter (_result_object). The contents of this
1538 * new object, starting at a zero offset, are a copy
1539 * of the source memory region. In the event of
1540 * an error, this parameter will contain the value
1541 * VM_OBJECT_NULL.
1542 */
1543 __private_extern__ kern_return_t
1544 vm_object_copy_slowly(
1545 register vm_object_t src_object,
1546 vm_object_offset_t src_offset,
1547 vm_object_size_t size,
1548 boolean_t interruptible,
1549 vm_object_t *_result_object) /* OUT */
1550 {
1551 vm_object_t new_object;
1552 vm_object_offset_t new_offset;
1553
1554 vm_object_offset_t src_lo_offset = src_offset;
1555 vm_object_offset_t src_hi_offset = src_offset + size;
1556
1557 XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
1558 src_object, src_offset, size, 0, 0);
1559
1560 if (size == 0) {
1561 vm_object_unlock(src_object);
1562 *_result_object = VM_OBJECT_NULL;
1563 return(KERN_INVALID_ARGUMENT);
1564 }
1565
1566 /*
1567 * Prevent destruction of the source object while we copy.
1568 */
1569
1570 assert(src_object->ref_count > 0);
1571 src_object->ref_count++;
1572 VM_OBJ_RES_INCR(src_object);
1573 vm_object_unlock(src_object);
1574
1575 /*
1576 * Create a new object to hold the copied pages.
1577 * A few notes:
1578 * We fill the new object starting at offset 0,
1579 * regardless of the input offset.
1580 * We don't bother to lock the new object within
1581 * this routine, since we have the only reference.
1582 */
1583
1584 new_object = vm_object_allocate(size);
1585 new_offset = 0;
1586 vm_object_lock(new_object);
1587
1588 assert(size == trunc_page_64(size)); /* Will the loop terminate? */
1589
1590 for ( ;
1591 size != 0 ;
1592 src_offset += PAGE_SIZE_64,
1593 new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
1594 ) {
1595 vm_page_t new_page;
1596 vm_fault_return_t result;
1597
1598 while ((new_page = vm_page_alloc(new_object, new_offset))
1599 == VM_PAGE_NULL) {
1600 if (!vm_page_wait(interruptible)) {
1601 vm_object_unlock(new_object);
1602 vm_object_deallocate(new_object);
1603 vm_object_deallocate(src_object);
1604 *_result_object = VM_OBJECT_NULL;
1605 return(MACH_SEND_INTERRUPTED);
1606 }
1607 }
1608
1609 do {
1610 vm_prot_t prot = VM_PROT_READ;
1611 vm_page_t _result_page;
1612 vm_page_t top_page;
1613 register
1614 vm_page_t result_page;
1615 kern_return_t error_code;
1616
1617 vm_object_lock(src_object);
1618 vm_object_paging_begin(src_object);
1619
1620 XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
1621 result = vm_fault_page(src_object, src_offset,
1622 VM_PROT_READ, FALSE, interruptible,
1623 src_lo_offset, src_hi_offset,
1624 VM_BEHAVIOR_SEQUENTIAL,
1625 &prot, &_result_page, &top_page,
1626 (int *)0,
1627 &error_code, FALSE, FALSE, NULL, 0);
1628
1629 switch(result) {
1630 case VM_FAULT_SUCCESS:
1631 result_page = _result_page;
1632
1633 /*
1634 * We don't need to hold the object
1635 * lock -- the busy page will be enough.
1636 * [We don't care about picking up any
1637 * new modifications.]
1638 *
1639 * Copy the page to the new object.
1640 *
1641 * POLICY DECISION:
1642 * If result_page is clean,
1643 * we could steal it instead
1644 * of copying.
1645 */
1646
1647 vm_object_unlock(result_page->object);
1648 vm_page_copy(result_page, new_page);
1649
1650 /*
1651 * Let go of both pages (make them
1652 * not busy, perform wakeup, activate).
1653 */
1654
1655 new_page->busy = FALSE;
1656 new_page->dirty = TRUE;
1657 vm_object_lock(result_page->object);
1658 PAGE_WAKEUP_DONE(result_page);
1659
1660 vm_page_lock_queues();
1661 if (!result_page->active &&
1662 !result_page->inactive)
1663 vm_page_activate(result_page);
1664 vm_page_activate(new_page);
1665 vm_page_unlock_queues();
1666
1667 /*
1668 * Release paging references and
1669 * top-level placeholder page, if any.
1670 */
1671
1672 vm_fault_cleanup(result_page->object,
1673 top_page);
1674
1675 break;
1676
1677 case VM_FAULT_RETRY:
1678 break;
1679
1680 case VM_FAULT_FICTITIOUS_SHORTAGE:
1681 vm_page_more_fictitious();
1682 break;
1683
1684 case VM_FAULT_MEMORY_SHORTAGE:
1685 if (vm_page_wait(interruptible))
1686 break;
1687 /* fall thru */
1688
1689 case VM_FAULT_INTERRUPTED:
1690 vm_page_free(new_page);
1691 vm_object_unlock(new_object);
1692 vm_object_deallocate(new_object);
1693 vm_object_deallocate(src_object);
1694 *_result_object = VM_OBJECT_NULL;
1695 return(MACH_SEND_INTERRUPTED);
1696
1697 case VM_FAULT_MEMORY_ERROR:
1698 /*
1699 * A policy choice:
1700 * (a) ignore pages that we can't
1701 * copy
1702 * (b) return the null object if
1703 * any page fails [chosen]
1704 */
1705
1706 vm_page_lock_queues();
1707 vm_page_free(new_page);
1708 vm_page_unlock_queues();
1709 vm_object_unlock(new_object);
1710 vm_object_deallocate(new_object);
1711 vm_object_deallocate(src_object);
1712 *_result_object = VM_OBJECT_NULL;
1713 return(error_code ? error_code:
1714 KERN_MEMORY_ERROR);
1715 }
1716 } while (result != VM_FAULT_SUCCESS);
1717 }
1718
1719 /*
1720 * Lose the extra reference, and return our object.
1721 */
1722
1723 vm_object_unlock(new_object);
1724 vm_object_deallocate(src_object);
1725 *_result_object = new_object;
1726 return(KERN_SUCCESS);
1727 }
1728
1729 /*
1730 * Routine: vm_object_copy_quickly
1731 *
1732 * Purpose:
1733 * Copy the specified range of the source virtual
1734 * memory object, if it can be done without waiting
1735 * for user-generated events.
1736 *
1737 * Results:
1738 * If the copy is successful, the copy is returned in
1739 * the arguments; otherwise, the arguments are not
1740 * affected.
1741 *
1742 * In/out conditions:
1743 * The object should be unlocked on entry and exit.
1744 */
1745
1746 /*ARGSUSED*/
1747 __private_extern__ boolean_t
1748 vm_object_copy_quickly(
1749 vm_object_t *_object, /* INOUT */
1750 __unused vm_object_offset_t offset, /* IN */
1751 __unused vm_object_size_t size, /* IN */
1752 boolean_t *_src_needs_copy, /* OUT */
1753 boolean_t *_dst_needs_copy) /* OUT */
1754 {
1755 vm_object_t object = *_object;
1756 memory_object_copy_strategy_t copy_strategy;
1757
1758 XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
1759 *_object, offset, size, 0, 0);
1760 if (object == VM_OBJECT_NULL) {
1761 *_src_needs_copy = FALSE;
1762 *_dst_needs_copy = FALSE;
1763 return(TRUE);
1764 }
1765
1766 vm_object_lock(object);
1767
1768 copy_strategy = object->copy_strategy;
1769
1770 switch (copy_strategy) {
1771 case MEMORY_OBJECT_COPY_SYMMETRIC:
1772
1773 /*
1774 * Symmetric copy strategy.
1775 * Make another reference to the object.
1776 * Leave object/offset unchanged.
1777 */
1778
1779 assert(object->ref_count > 0);
1780 object->ref_count++;
1781 vm_object_res_reference(object);
1782 object->shadowed = TRUE;
1783 vm_object_unlock(object);
1784
1785 /*
1786 * Both source and destination must make
1787 * shadows, and the source must be made
1788 * read-only if not already.
1789 */
1790
1791 *_src_needs_copy = TRUE;
1792 *_dst_needs_copy = TRUE;
1793
1794 break;
1795
1796 case MEMORY_OBJECT_COPY_DELAY:
1797 vm_object_unlock(object);
1798 return(FALSE);
1799
1800 default:
1801 vm_object_unlock(object);
1802 return(FALSE);
1803 }
1804 return(TRUE);
1805 }
1806
1807 static int copy_call_count = 0;
1808 static int copy_call_sleep_count = 0;
1809 static int copy_call_restart_count = 0;
1810
1811 /*
1812 * Routine: vm_object_copy_call [internal]
1813 *
1814 * Description:
1815 * Copy the source object (src_object), using the
1816 * user-managed copy algorithm.
1817 *
1818 * In/out conditions:
1819 * The source object must be locked on entry. It
1820 * will be *unlocked* on exit.
1821 *
1822 * Results:
1823 * If the copy is successful, KERN_SUCCESS is returned.
1824 * A new object that represents the copied virtual
1825 * memory is returned in a parameter (*_result_object).
1826 * If the return value indicates an error, this parameter
1827 * is not valid.
1828 */
1829 static kern_return_t
1830 vm_object_copy_call(
1831 vm_object_t src_object,
1832 vm_object_offset_t src_offset,
1833 vm_object_size_t size,
1834 vm_object_t *_result_object) /* OUT */
1835 {
1836 kern_return_t kr;
1837 vm_object_t copy;
1838 boolean_t check_ready = FALSE;
1839
1840 /*
1841 * If a copy is already in progress, wait and retry.
1842 *
1843 * XXX
1844 * Consider making this call interruptable, as Mike
1845 * intended it to be.
1846 *
1847 * XXXO
1848 * Need a counter or version or something to allow
1849 * us to use the copy that the currently requesting
1850 * thread is obtaining -- is it worth adding to the
1851 * vm object structure? Depends how common this case it.
1852 */
1853 copy_call_count++;
1854 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
1855 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
1856 THREAD_UNINT);
1857 copy_call_restart_count++;
1858 }
1859
1860 /*
1861 * Indicate (for the benefit of memory_object_create_copy)
1862 * that we want a copy for src_object. (Note that we cannot
1863 * do a real assert_wait before calling memory_object_copy,
1864 * so we simply set the flag.)
1865 */
1866
1867 vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL);
1868 vm_object_unlock(src_object);
1869
1870 /*
1871 * Ask the memory manager to give us a memory object
1872 * which represents a copy of the src object.
1873 * The memory manager may give us a memory object
1874 * which we already have, or it may give us a
1875 * new memory object. This memory object will arrive
1876 * via memory_object_create_copy.
1877 */
1878
1879 kr = KERN_FAILURE; /* XXX need to change memory_object.defs */
1880 if (kr != KERN_SUCCESS) {
1881 return kr;
1882 }
1883
1884 /*
1885 * Wait for the copy to arrive.
1886 */
1887 vm_object_lock(src_object);
1888 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
1889 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
1890 THREAD_UNINT);
1891 copy_call_sleep_count++;
1892 }
1893 Retry:
1894 assert(src_object->copy != VM_OBJECT_NULL);
1895 copy = src_object->copy;
1896 if (!vm_object_lock_try(copy)) {
1897 vm_object_unlock(src_object);
1898 mutex_pause(); /* wait a bit */
1899 vm_object_lock(src_object);
1900 goto Retry;
1901 }
1902 if (copy->size < src_offset+size)
1903 copy->size = src_offset+size;
1904
1905 if (!copy->pager_ready)
1906 check_ready = TRUE;
1907
1908 /*
1909 * Return the copy.
1910 */
1911 *_result_object = copy;
1912 vm_object_unlock(copy);
1913 vm_object_unlock(src_object);
1914
1915 /* Wait for the copy to be ready. */
1916 if (check_ready == TRUE) {
1917 vm_object_lock(copy);
1918 while (!copy->pager_ready) {
1919 vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
1920 }
1921 vm_object_unlock(copy);
1922 }
1923
1924 return KERN_SUCCESS;
1925 }
1926
1927 static int copy_delayed_lock_collisions = 0;
1928 static int copy_delayed_max_collisions = 0;
1929 static int copy_delayed_lock_contention = 0;
1930 static int copy_delayed_protect_iterate = 0;
1931
1932 /*
1933 * Routine: vm_object_copy_delayed [internal]
1934 *
1935 * Description:
1936 * Copy the specified virtual memory object, using
1937 * the asymmetric copy-on-write algorithm.
1938 *
1939 * In/out conditions:
1940 * The src_object must be locked on entry. It will be unlocked
1941 * on exit - so the caller must also hold a reference to it.
1942 *
1943 * This routine will not block waiting for user-generated
1944 * events. It is not interruptible.
1945 */
1946 __private_extern__ vm_object_t
1947 vm_object_copy_delayed(
1948 vm_object_t src_object,
1949 vm_object_offset_t src_offset,
1950 vm_object_size_t size)
1951 {
1952 vm_object_t new_copy = VM_OBJECT_NULL;
1953 vm_object_t old_copy;
1954 vm_page_t p;
1955 vm_object_size_t copy_size = src_offset + size;
1956
1957 int collisions = 0;
1958 /*
1959 * The user-level memory manager wants to see all of the changes
1960 * to this object, but it has promised not to make any changes on
1961 * its own.
1962 *
1963 * Perform an asymmetric copy-on-write, as follows:
1964 * Create a new object, called a "copy object" to hold
1965 * pages modified by the new mapping (i.e., the copy,
1966 * not the original mapping).
1967 * Record the original object as the backing object for
1968 * the copy object. If the original mapping does not
1969 * change a page, it may be used read-only by the copy.
1970 * Record the copy object in the original object.
1971 * When the original mapping causes a page to be modified,
1972 * it must be copied to a new page that is "pushed" to
1973 * the copy object.
1974 * Mark the new mapping (the copy object) copy-on-write.
1975 * This makes the copy object itself read-only, allowing
1976 * it to be reused if the original mapping makes no
1977 * changes, and simplifying the synchronization required
1978 * in the "push" operation described above.
1979 *
1980 * The copy-on-write is said to be assymetric because the original
1981 * object is *not* marked copy-on-write. A copied page is pushed
1982 * to the copy object, regardless which party attempted to modify
1983 * the page.
1984 *
1985 * Repeated asymmetric copy operations may be done. If the
1986 * original object has not been changed since the last copy, its
1987 * copy object can be reused. Otherwise, a new copy object can be
1988 * inserted between the original object and its previous copy
1989 * object. Since any copy object is read-only, this cannot affect
1990 * affect the contents of the previous copy object.
1991 *
1992 * Note that a copy object is higher in the object tree than the
1993 * original object; therefore, use of the copy object recorded in
1994 * the original object must be done carefully, to avoid deadlock.
1995 */
1996
1997 Retry:
1998
1999 /*
2000 * Wait for paging in progress.
2001 */
2002 if (!src_object->true_share)
2003 vm_object_paging_wait(src_object, THREAD_UNINT);
2004
2005 /*
2006 * See whether we can reuse the result of a previous
2007 * copy operation.
2008 */
2009
2010 old_copy = src_object->copy;
2011 if (old_copy != VM_OBJECT_NULL) {
2012 /*
2013 * Try to get the locks (out of order)
2014 */
2015 if (!vm_object_lock_try(old_copy)) {
2016 vm_object_unlock(src_object);
2017 mutex_pause();
2018
2019 /* Heisenberg Rules */
2020 copy_delayed_lock_collisions++;
2021 if (collisions++ == 0)
2022 copy_delayed_lock_contention++;
2023
2024 if (collisions > copy_delayed_max_collisions)
2025 copy_delayed_max_collisions = collisions;
2026
2027 vm_object_lock(src_object);
2028 goto Retry;
2029 }
2030
2031 /*
2032 * Determine whether the old copy object has
2033 * been modified.
2034 */
2035
2036 if (old_copy->resident_page_count == 0 &&
2037 !old_copy->pager_created) {
2038 /*
2039 * It has not been modified.
2040 *
2041 * Return another reference to
2042 * the existing copy-object if
2043 * we can safely grow it (if
2044 * needed).
2045 */
2046
2047 if (old_copy->size < copy_size) {
2048 /*
2049 * We can't perform a delayed copy if any of the
2050 * pages in the extended range are wired (because
2051 * we can't safely take write permission away from
2052 * wired pages). If the pages aren't wired, then
2053 * go ahead and protect them.
2054 */
2055 copy_delayed_protect_iterate++;
2056 queue_iterate(&src_object->memq, p, vm_page_t, listq) {
2057 if (!p->fictitious &&
2058 p->offset >= old_copy->size &&
2059 p->offset < copy_size) {
2060 if (p->wire_count > 0) {
2061 vm_object_unlock(old_copy);
2062 vm_object_unlock(src_object);
2063
2064 if (new_copy != VM_OBJECT_NULL) {
2065 vm_object_unlock(new_copy);
2066 vm_object_deallocate(new_copy);
2067 }
2068
2069 return VM_OBJECT_NULL;
2070 } else {
2071 pmap_page_protect(p->phys_page,
2072 (VM_PROT_ALL & ~VM_PROT_WRITE &
2073 ~p->page_lock));
2074 }
2075 }
2076 }
2077 old_copy->size = copy_size;
2078 }
2079
2080 vm_object_reference_locked(old_copy);
2081 vm_object_unlock(old_copy);
2082 vm_object_unlock(src_object);
2083
2084 if (new_copy != VM_OBJECT_NULL) {
2085 vm_object_unlock(new_copy);
2086 vm_object_deallocate(new_copy);
2087 }
2088
2089 return(old_copy);
2090 }
2091
2092 /*
2093 * Adjust the size argument so that the newly-created
2094 * copy object will be large enough to back either the
2095 * old copy object or the new mapping.
2096 */
2097 if (old_copy->size > copy_size)
2098 copy_size = old_copy->size;
2099
2100 if (new_copy == VM_OBJECT_NULL) {
2101 vm_object_unlock(old_copy);
2102 vm_object_unlock(src_object);
2103 new_copy = vm_object_allocate(copy_size);
2104 vm_object_lock(src_object);
2105 vm_object_lock(new_copy);
2106 goto Retry;
2107 }
2108 new_copy->size = copy_size;
2109
2110 /*
2111 * The copy-object is always made large enough to
2112 * completely shadow the original object, since
2113 * it may have several users who want to shadow
2114 * the original object at different points.
2115 */
2116
2117 assert((old_copy->shadow == src_object) &&
2118 (old_copy->shadow_offset == (vm_object_offset_t) 0));
2119
2120 } else if (new_copy == VM_OBJECT_NULL) {
2121 vm_object_unlock(src_object);
2122 new_copy = vm_object_allocate(copy_size);
2123 vm_object_lock(src_object);
2124 vm_object_lock(new_copy);
2125 goto Retry;
2126 }
2127
2128 /*
2129 * We now have the src object locked, and the new copy object
2130 * allocated and locked (and potentially the old copy locked).
2131 * Before we go any further, make sure we can still perform
2132 * a delayed copy, as the situation may have changed.
2133 *
2134 * Specifically, we can't perform a delayed copy if any of the
2135 * pages in the range are wired (because we can't safely take
2136 * write permission away from wired pages). If the pages aren't
2137 * wired, then go ahead and protect them.
2138 */
2139 copy_delayed_protect_iterate++;
2140 queue_iterate(&src_object->memq, p, vm_page_t, listq) {
2141 if (!p->fictitious && p->offset < copy_size) {
2142 if (p->wire_count > 0) {
2143 if (old_copy)
2144 vm_object_unlock(old_copy);
2145 vm_object_unlock(src_object);
2146 vm_object_unlock(new_copy);
2147 vm_object_deallocate(new_copy);
2148 return VM_OBJECT_NULL;
2149 } else {
2150 pmap_page_protect(p->phys_page,
2151 (VM_PROT_ALL & ~VM_PROT_WRITE &
2152 ~p->page_lock));
2153 }
2154 }
2155 }
2156
2157 if (old_copy != VM_OBJECT_NULL) {
2158 /*
2159 * Make the old copy-object shadow the new one.
2160 * It will receive no more pages from the original
2161 * object.
2162 */
2163
2164 src_object->ref_count--; /* remove ref. from old_copy */
2165 assert(src_object->ref_count > 0);
2166 old_copy->shadow = new_copy;
2167 assert(new_copy->ref_count > 0);
2168 new_copy->ref_count++; /* for old_copy->shadow ref. */
2169
2170 #if TASK_SWAPPER
2171 if (old_copy->res_count) {
2172 VM_OBJ_RES_INCR(new_copy);
2173 VM_OBJ_RES_DECR(src_object);
2174 }
2175 #endif
2176
2177 vm_object_unlock(old_copy); /* done with old_copy */
2178 }
2179
2180 /*
2181 * Point the new copy at the existing object.
2182 */
2183 new_copy->shadow = src_object;
2184 new_copy->shadow_offset = 0;
2185 new_copy->shadowed = TRUE; /* caller must set needs_copy */
2186 assert(src_object->ref_count > 0);
2187 src_object->ref_count++;
2188 VM_OBJ_RES_INCR(src_object);
2189 src_object->copy = new_copy;
2190 vm_object_unlock(src_object);
2191 vm_object_unlock(new_copy);
2192
2193 XPR(XPR_VM_OBJECT,
2194 "vm_object_copy_delayed: used copy object %X for source %X\n",
2195 (integer_t)new_copy, (integer_t)src_object, 0, 0, 0);
2196
2197 return(new_copy);
2198 }
2199
2200 /*
2201 * Routine: vm_object_copy_strategically
2202 *
2203 * Purpose:
2204 * Perform a copy according to the source object's
2205 * declared strategy. This operation may block,
2206 * and may be interrupted.
2207 */
2208 __private_extern__ kern_return_t
2209 vm_object_copy_strategically(
2210 register vm_object_t src_object,
2211 vm_object_offset_t src_offset,
2212 vm_object_size_t size,
2213 vm_object_t *dst_object, /* OUT */
2214 vm_object_offset_t *dst_offset, /* OUT */
2215 boolean_t *dst_needs_copy) /* OUT */
2216 {
2217 boolean_t result;
2218 boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */
2219 memory_object_copy_strategy_t copy_strategy;
2220
2221 assert(src_object != VM_OBJECT_NULL);
2222
2223 vm_object_lock(src_object);
2224
2225 /*
2226 * The copy strategy is only valid if the memory manager
2227 * is "ready". Internal objects are always ready.
2228 */
2229
2230 while (!src_object->internal && !src_object->pager_ready) {
2231 wait_result_t wait_result;
2232
2233 wait_result = vm_object_sleep( src_object,
2234 VM_OBJECT_EVENT_PAGER_READY,
2235 interruptible);
2236 if (wait_result != THREAD_AWAKENED) {
2237 vm_object_unlock(src_object);
2238 *dst_object = VM_OBJECT_NULL;
2239 *dst_offset = 0;
2240 *dst_needs_copy = FALSE;
2241 return(MACH_SEND_INTERRUPTED);
2242 }
2243 }
2244
2245 copy_strategy = src_object->copy_strategy;
2246
2247 /*
2248 * Use the appropriate copy strategy.
2249 */
2250
2251 switch (copy_strategy) {
2252 case MEMORY_OBJECT_COPY_DELAY:
2253 *dst_object = vm_object_copy_delayed(src_object,
2254 src_offset, size);
2255 if (*dst_object != VM_OBJECT_NULL) {
2256 *dst_offset = src_offset;
2257 *dst_needs_copy = TRUE;
2258 result = KERN_SUCCESS;
2259 break;
2260 }
2261 vm_object_lock(src_object);
2262 /* fall thru when delayed copy not allowed */
2263
2264 case MEMORY_OBJECT_COPY_NONE:
2265 result = vm_object_copy_slowly(src_object, src_offset, size,
2266 interruptible, dst_object);
2267 if (result == KERN_SUCCESS) {
2268 *dst_offset = 0;
2269 *dst_needs_copy = FALSE;
2270 }
2271 break;
2272
2273 case MEMORY_OBJECT_COPY_CALL:
2274 result = vm_object_copy_call(src_object, src_offset, size,
2275 dst_object);
2276 if (result == KERN_SUCCESS) {
2277 *dst_offset = src_offset;
2278 *dst_needs_copy = TRUE;
2279 }
2280 break;
2281
2282 case MEMORY_OBJECT_COPY_SYMMETRIC:
2283 XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n",(natural_t)src_object, src_offset, size, 0, 0);
2284 vm_object_unlock(src_object);
2285 result = KERN_MEMORY_RESTART_COPY;
2286 break;
2287
2288 default:
2289 panic("copy_strategically: bad strategy");
2290 result = KERN_INVALID_ARGUMENT;
2291 }
2292 return(result);
2293 }
2294
2295 /*
2296 * vm_object_shadow:
2297 *
2298 * Create a new object which is backed by the
2299 * specified existing object range. The source
2300 * object reference is deallocated.
2301 *
2302 * The new object and offset into that object
2303 * are returned in the source parameters.
2304 */
2305 boolean_t vm_object_shadow_check = FALSE;
2306
2307 __private_extern__ boolean_t
2308 vm_object_shadow(
2309 vm_object_t *object, /* IN/OUT */
2310 vm_object_offset_t *offset, /* IN/OUT */
2311 vm_object_size_t length)
2312 {
2313 register vm_object_t source;
2314 register vm_object_t result;
2315
2316 source = *object;
2317 assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
2318
2319 /*
2320 * Determine if we really need a shadow.
2321 */
2322
2323 if (vm_object_shadow_check && source->ref_count == 1 &&
2324 (source->shadow == VM_OBJECT_NULL ||
2325 source->shadow->copy == VM_OBJECT_NULL))
2326 {
2327 source->shadowed = FALSE;
2328 return FALSE;
2329 }
2330
2331 /*
2332 * Allocate a new object with the given length
2333 */
2334
2335 if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
2336 panic("vm_object_shadow: no object for shadowing");
2337
2338 /*
2339 * The new object shadows the source object, adding
2340 * a reference to it. Our caller changes his reference
2341 * to point to the new object, removing a reference to
2342 * the source object. Net result: no change of reference
2343 * count.
2344 */
2345 result->shadow = source;
2346
2347 /*
2348 * Store the offset into the source object,
2349 * and fix up the offset into the new object.
2350 */
2351
2352 result->shadow_offset = *offset;
2353
2354 /*
2355 * Return the new things
2356 */
2357
2358 *offset = 0;
2359 *object = result;
2360 return TRUE;
2361 }
2362
2363 /*
2364 * The relationship between vm_object structures and
2365 * the memory_object requires careful synchronization.
2366 *
2367 * All associations are created by memory_object_create_named
2368 * for external pagers and vm_object_pager_create for internal
2369 * objects as follows:
2370 *
2371 * pager: the memory_object itself, supplied by
2372 * the user requesting a mapping (or the kernel,
2373 * when initializing internal objects); the
2374 * kernel simulates holding send rights by keeping
2375 * a port reference;
2376 *
2377 * pager_request:
2378 * the memory object control port,
2379 * created by the kernel; the kernel holds
2380 * receive (and ownership) rights to this
2381 * port, but no other references.
2382 *
2383 * When initialization is complete, the "initialized" field
2384 * is asserted. Other mappings using a particular memory object,
2385 * and any references to the vm_object gained through the
2386 * port association must wait for this initialization to occur.
2387 *
2388 * In order to allow the memory manager to set attributes before
2389 * requests (notably virtual copy operations, but also data or
2390 * unlock requests) are made, a "ready" attribute is made available.
2391 * Only the memory manager may affect the value of this attribute.
2392 * Its value does not affect critical kernel functions, such as
2393 * internal object initialization or destruction. [Furthermore,
2394 * memory objects created by the kernel are assumed to be ready
2395 * immediately; the default memory manager need not explicitly
2396 * set the "ready" attribute.]
2397 *
2398 * [Both the "initialized" and "ready" attribute wait conditions
2399 * use the "pager" field as the wait event.]
2400 *
2401 * The port associations can be broken down by any of the
2402 * following routines:
2403 * vm_object_terminate:
2404 * No references to the vm_object remain, and
2405 * the object cannot (or will not) be cached.
2406 * This is the normal case, and is done even
2407 * though one of the other cases has already been
2408 * done.
2409 * memory_object_destroy:
2410 * The memory manager has requested that the
2411 * kernel relinquish references to the memory
2412 * object. [The memory manager may not want to
2413 * destroy the memory object, but may wish to
2414 * refuse or tear down existing memory mappings.]
2415 *
2416 * Each routine that breaks an association must break all of
2417 * them at once. At some later time, that routine must clear
2418 * the pager field and release the memory object references.
2419 * [Furthermore, each routine must cope with the simultaneous
2420 * or previous operations of the others.]
2421 *
2422 * In addition to the lock on the object, the vm_object_cache_lock
2423 * governs the associations. References gained through the
2424 * association require use of the cache lock.
2425 *
2426 * Because the pager field may be cleared spontaneously, it
2427 * cannot be used to determine whether a memory object has
2428 * ever been associated with a particular vm_object. [This
2429 * knowledge is important to the shadow object mechanism.]
2430 * For this reason, an additional "created" attribute is
2431 * provided.
2432 *
2433 * During various paging operations, the pager reference found in the
2434 * vm_object must be valid. To prevent this from being released,
2435 * (other than being removed, i.e., made null), routines may use
2436 * the vm_object_paging_begin/end routines [actually, macros].
2437 * The implementation uses the "paging_in_progress" and "wanted" fields.
2438 * [Operations that alter the validity of the pager values include the
2439 * termination routines and vm_object_collapse.]
2440 */
2441
2442 #if 0
2443 static void vm_object_abort_activity(
2444 vm_object_t object);
2445
2446 /*
2447 * Routine: vm_object_abort_activity [internal use only]
2448 * Purpose:
2449 * Abort paging requests pending on this object.
2450 * In/out conditions:
2451 * The object is locked on entry and exit.
2452 */
2453 static void
2454 vm_object_abort_activity(
2455 vm_object_t object)
2456 {
2457 register
2458 vm_page_t p;
2459 vm_page_t next;
2460
2461 XPR(XPR_VM_OBJECT, "vm_object_abort_activity, object 0x%X\n",
2462 (integer_t)object, 0, 0, 0, 0);
2463
2464 /*
2465 * Abort all activity that would be waiting
2466 * for a result on this memory object.
2467 *
2468 * We could also choose to destroy all pages
2469 * that we have in memory for this object, but
2470 * we don't.
2471 */
2472
2473 p = (vm_page_t) queue_first(&object->memq);
2474 while (!queue_end(&object->memq, (queue_entry_t) p)) {
2475 next = (vm_page_t) queue_next(&p->listq);
2476
2477 /*
2478 * If it's being paged in, destroy it.
2479 * If an unlock has been requested, start it again.
2480 */
2481
2482 if (p->busy && p->absent) {
2483 VM_PAGE_FREE(p);
2484 }
2485 else {
2486 if (p->unlock_request != VM_PROT_NONE)
2487 p->unlock_request = VM_PROT_NONE;
2488 PAGE_WAKEUP(p);
2489 }
2490
2491 p = next;
2492 }
2493
2494 /*
2495 * Wake up threads waiting for the memory object to
2496 * become ready.
2497 */
2498
2499 object->pager_ready = TRUE;
2500 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
2501 }
2502
2503 /*
2504 * Routine: vm_object_pager_dead
2505 *
2506 * Purpose:
2507 * A port is being destroy, and the IPC kobject code
2508 * can't tell if it represents a pager port or not.
2509 * So this function is called each time it sees a port
2510 * die.
2511 * THIS IS HORRIBLY INEFFICIENT. We should only call
2512 * this routine if we had requested a notification on
2513 * the port.
2514 */
2515
2516 __private_extern__ void
2517 vm_object_pager_dead(
2518 ipc_port_t pager)
2519 {
2520 vm_object_t object;
2521 vm_object_hash_entry_t entry;
2522
2523 /*
2524 * Perform essentially the same operations as in vm_object_lookup,
2525 * except that this time we look up based on the memory_object
2526 * port, not the control port.
2527 */
2528 vm_object_cache_lock();
2529 entry = vm_object_hash_lookup(pager, FALSE);
2530 if (entry == VM_OBJECT_HASH_ENTRY_NULL ||
2531 entry->object == VM_OBJECT_NULL) {
2532 vm_object_cache_unlock();
2533 return;
2534 }
2535
2536 object = entry->object;
2537 entry->object = VM_OBJECT_NULL;
2538
2539 vm_object_lock(object);
2540 if (object->ref_count == 0) {
2541 XPR(XPR_VM_OBJECT_CACHE,
2542 "vm_object_destroy: removing %x from cache, head (%x, %x)\n",
2543 (integer_t)object,
2544 (integer_t)vm_object_cached_list.next,
2545 (integer_t)vm_object_cached_list.prev, 0,0);
2546
2547 queue_remove(&vm_object_cached_list, object,
2548 vm_object_t, cached_list);
2549 vm_object_cached_count--;
2550 }
2551 object->ref_count++;
2552 vm_object_res_reference(object);
2553
2554 object->can_persist = FALSE;
2555
2556 assert(object->pager == pager);
2557
2558 /*
2559 * Remove the pager association.
2560 *
2561 * Note that the memory_object itself is dead, so
2562 * we don't bother with it.
2563 */
2564
2565 object->pager = MEMORY_OBJECT_NULL;
2566
2567 vm_object_unlock(object);
2568 vm_object_cache_unlock();
2569
2570 vm_object_pager_wakeup(pager);
2571
2572 /*
2573 * Release the pager reference. Note that there's no
2574 * point in trying the memory_object_terminate call
2575 * because the memory_object itself is dead. Also
2576 * release the memory_object_control reference, since
2577 * the pager didn't do that either.
2578 */
2579
2580 memory_object_deallocate(pager);
2581 memory_object_control_deallocate(object->pager_request);
2582
2583
2584 /*
2585 * Restart pending page requests
2586 */
2587 vm_object_lock(object);
2588 vm_object_abort_activity(object);
2589 vm_object_unlock(object);
2590
2591 /*
2592 * Lose the object reference.
2593 */
2594
2595 vm_object_deallocate(object);
2596 }
2597 #endif
2598
2599 /*
2600 * Routine: vm_object_enter
2601 * Purpose:
2602 * Find a VM object corresponding to the given
2603 * pager; if no such object exists, create one,
2604 * and initialize the pager.
2605 */
2606 vm_object_t
2607 vm_object_enter(
2608 memory_object_t pager,
2609 vm_object_size_t size,
2610 boolean_t internal,
2611 boolean_t init,
2612 boolean_t named)
2613 {
2614 register vm_object_t object;
2615 vm_object_t new_object;
2616 boolean_t must_init;
2617 vm_object_hash_entry_t entry, new_entry;
2618
2619 if (pager == MEMORY_OBJECT_NULL)
2620 return(vm_object_allocate(size));
2621
2622 new_object = VM_OBJECT_NULL;
2623 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
2624 must_init = init;
2625
2626 /*
2627 * Look for an object associated with this port.
2628 */
2629
2630 vm_object_cache_lock();
2631 do {
2632 entry = vm_object_hash_lookup(pager, FALSE);
2633
2634 if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
2635 if (new_object == VM_OBJECT_NULL) {
2636 /*
2637 * We must unlock to create a new object;
2638 * if we do so, we must try the lookup again.
2639 */
2640 vm_object_cache_unlock();
2641 assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
2642 new_entry = vm_object_hash_entry_alloc(pager);
2643 new_object = vm_object_allocate(size);
2644 vm_object_cache_lock();
2645 } else {
2646 /*
2647 * Lookup failed twice, and we have something
2648 * to insert; set the object.
2649 */
2650 vm_object_hash_insert(new_entry);
2651 entry = new_entry;
2652 entry->object = new_object;
2653 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
2654 new_object = VM_OBJECT_NULL;
2655 must_init = TRUE;
2656 }
2657 } else if (entry->object == VM_OBJECT_NULL) {
2658 /*
2659 * If a previous object is being terminated,
2660 * we must wait for the termination message
2661 * to be queued (and lookup the entry again).
2662 */
2663 entry->waiting = TRUE;
2664 entry = VM_OBJECT_HASH_ENTRY_NULL;
2665 assert_wait((event_t) pager, THREAD_UNINT);
2666 vm_object_cache_unlock();
2667 thread_block(THREAD_CONTINUE_NULL);
2668 vm_object_cache_lock();
2669 }
2670 } while (entry == VM_OBJECT_HASH_ENTRY_NULL);
2671
2672 object = entry->object;
2673 assert(object != VM_OBJECT_NULL);
2674
2675 if (!must_init) {
2676 vm_object_lock(object);
2677 assert(!internal || object->internal);
2678 if (named) {
2679 assert(!object->named);
2680 object->named = TRUE;
2681 }
2682 if (object->ref_count == 0) {
2683 XPR(XPR_VM_OBJECT_CACHE,
2684 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
2685 (integer_t)object,
2686 (integer_t)vm_object_cached_list.next,
2687 (integer_t)vm_object_cached_list.prev, 0,0);
2688 queue_remove(&vm_object_cached_list, object,
2689 vm_object_t, cached_list);
2690 vm_object_cached_count--;
2691 }
2692 object->ref_count++;
2693 vm_object_res_reference(object);
2694 vm_object_unlock(object);
2695
2696 VM_STAT(hits++);
2697 }
2698 assert(object->ref_count > 0);
2699
2700 VM_STAT(lookups++);
2701
2702 vm_object_cache_unlock();
2703
2704 XPR(XPR_VM_OBJECT,
2705 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
2706 (integer_t)pager, (integer_t)object, must_init, 0, 0);
2707
2708 /*
2709 * If we raced to create a vm_object but lost, let's
2710 * throw away ours.
2711 */
2712
2713 if (new_object != VM_OBJECT_NULL)
2714 vm_object_deallocate(new_object);
2715
2716 if (new_entry != VM_OBJECT_HASH_ENTRY_NULL)
2717 vm_object_hash_entry_free(new_entry);
2718
2719 if (must_init) {
2720 memory_object_control_t control;
2721
2722 /*
2723 * Allocate request port.
2724 */
2725
2726 control = memory_object_control_allocate(object);
2727 assert (control != MEMORY_OBJECT_CONTROL_NULL);
2728
2729 vm_object_lock(object);
2730 assert(object != kernel_object);
2731
2732 /*
2733 * Copy the reference we were given.
2734 */
2735
2736 memory_object_reference(pager);
2737 object->pager_created = TRUE;
2738 object->pager = pager;
2739 object->internal = internal;
2740 object->pager_trusted = internal;
2741 if (!internal) {
2742 /* copy strategy invalid until set by memory manager */
2743 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
2744 }
2745 object->pager_control = control;
2746 object->pager_ready = FALSE;
2747
2748 vm_object_unlock(object);
2749
2750 /*
2751 * Let the pager know we're using it.
2752 */
2753
2754 (void) memory_object_init(pager,
2755 object->pager_control,
2756 PAGE_SIZE);
2757
2758 vm_object_lock(object);
2759 if (named)
2760 object->named = TRUE;
2761 if (internal) {
2762 object->pager_ready = TRUE;
2763 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
2764 }
2765
2766 object->pager_initialized = TRUE;
2767 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
2768 } else {
2769 vm_object_lock(object);
2770 }
2771
2772 /*
2773 * [At this point, the object must be locked]
2774 */
2775
2776 /*
2777 * Wait for the work above to be done by the first
2778 * thread to map this object.
2779 */
2780
2781 while (!object->pager_initialized) {
2782 vm_object_sleep(object,
2783 VM_OBJECT_EVENT_INITIALIZED,
2784 THREAD_UNINT);
2785 }
2786 vm_object_unlock(object);
2787
2788 XPR(XPR_VM_OBJECT,
2789 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
2790 (integer_t)object, (integer_t)object->pager, internal, 0,0);
2791 return(object);
2792 }
2793
2794 /*
2795 * Routine: vm_object_pager_create
2796 * Purpose:
2797 * Create a memory object for an internal object.
2798 * In/out conditions:
2799 * The object is locked on entry and exit;
2800 * it may be unlocked within this call.
2801 * Limitations:
2802 * Only one thread may be performing a
2803 * vm_object_pager_create on an object at
2804 * a time. Presumably, only the pageout
2805 * daemon will be using this routine.
2806 */
2807
2808 void
2809 vm_object_pager_create(
2810 register vm_object_t object)
2811 {
2812 memory_object_t pager;
2813 vm_object_hash_entry_t entry;
2814 #if MACH_PAGEMAP
2815 vm_object_size_t size;
2816 vm_external_map_t map;
2817 #endif /* MACH_PAGEMAP */
2818
2819 XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n",
2820 (integer_t)object, 0,0,0,0);
2821
2822 assert(object != kernel_object);
2823
2824 if (memory_manager_default_check() != KERN_SUCCESS)
2825 return;
2826
2827 /*
2828 * Prevent collapse or termination by holding a paging reference
2829 */
2830
2831 vm_object_paging_begin(object);
2832 if (object->pager_created) {
2833 /*
2834 * Someone else got to it first...
2835 * wait for them to finish initializing the ports
2836 */
2837 while (!object->pager_initialized) {
2838 vm_object_sleep(object,
2839 VM_OBJECT_EVENT_INITIALIZED,
2840 THREAD_UNINT);
2841 }
2842 vm_object_paging_end(object);
2843 return;
2844 }
2845
2846 /*
2847 * Indicate that a memory object has been assigned
2848 * before dropping the lock, to prevent a race.
2849 */
2850
2851 object->pager_created = TRUE;
2852 object->paging_offset = 0;
2853
2854 #if MACH_PAGEMAP
2855 size = object->size;
2856 #endif /* MACH_PAGEMAP */
2857 vm_object_unlock(object);
2858
2859 #if MACH_PAGEMAP
2860 map = vm_external_create(size);
2861 vm_object_lock(object);
2862 assert(object->size == size);
2863 object->existence_map = map;
2864 vm_object_unlock(object);
2865 #endif /* MACH_PAGEMAP */
2866
2867 /*
2868 * Create the [internal] pager, and associate it with this object.
2869 *
2870 * We make the association here so that vm_object_enter()
2871 * can look up the object to complete initializing it. No
2872 * user will ever map this object.
2873 */
2874 {
2875 memory_object_default_t dmm;
2876 vm_size_t cluster_size;
2877
2878 /* acquire a reference for the default memory manager */
2879 dmm = memory_manager_default_reference(&cluster_size);
2880 assert(cluster_size >= PAGE_SIZE);
2881
2882 object->cluster_size = cluster_size; /* XXX ??? */
2883 assert(object->temporary);
2884
2885 /* create our new memory object */
2886 (void) memory_object_create(dmm, object->size, &pager);
2887
2888 memory_object_default_deallocate(dmm);
2889 }
2890
2891 entry = vm_object_hash_entry_alloc(pager);
2892
2893 vm_object_cache_lock();
2894 vm_object_hash_insert(entry);
2895
2896 entry->object = object;
2897 vm_object_cache_unlock();
2898
2899 /*
2900 * A reference was returned by
2901 * memory_object_create(), and it is
2902 * copied by vm_object_enter().
2903 */
2904
2905 if (vm_object_enter(pager, object->size, TRUE, TRUE, FALSE) != object)
2906 panic("vm_object_pager_create: mismatch");
2907
2908 /*
2909 * Drop the reference we were passed.
2910 */
2911 memory_object_deallocate(pager);
2912
2913 vm_object_lock(object);
2914
2915 /*
2916 * Release the paging reference
2917 */
2918 vm_object_paging_end(object);
2919 }
2920
2921 /*
2922 * Routine: vm_object_remove
2923 * Purpose:
2924 * Eliminate the pager/object association
2925 * for this pager.
2926 * Conditions:
2927 * The object cache must be locked.
2928 */
2929 __private_extern__ void
2930 vm_object_remove(
2931 vm_object_t object)
2932 {
2933 memory_object_t pager;
2934
2935 if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
2936 vm_object_hash_entry_t entry;
2937
2938 entry = vm_object_hash_lookup(pager, FALSE);
2939 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
2940 entry->object = VM_OBJECT_NULL;
2941 }
2942
2943 }
2944
2945 /*
2946 * Global variables for vm_object_collapse():
2947 *
2948 * Counts for normal collapses and bypasses.
2949 * Debugging variables, to watch or disable collapse.
2950 */
2951 static long object_collapses = 0;
2952 static long object_bypasses = 0;
2953
2954 static boolean_t vm_object_collapse_allowed = TRUE;
2955 static boolean_t vm_object_bypass_allowed = TRUE;
2956
2957 static int vm_external_discarded;
2958 static int vm_external_collapsed;
2959
2960 unsigned long vm_object_collapse_encrypted = 0;
2961
2962 /*
2963 * Routine: vm_object_do_collapse
2964 * Purpose:
2965 * Collapse an object with the object backing it.
2966 * Pages in the backing object are moved into the
2967 * parent, and the backing object is deallocated.
2968 * Conditions:
2969 * Both objects and the cache are locked; the page
2970 * queues are unlocked.
2971 *
2972 */
2973 static void
2974 vm_object_do_collapse(
2975 vm_object_t object,
2976 vm_object_t backing_object)
2977 {
2978 vm_page_t p, pp;
2979 vm_object_offset_t new_offset, backing_offset;
2980 vm_object_size_t size;
2981
2982 backing_offset = object->shadow_offset;
2983 size = object->size;
2984
2985 /*
2986 * Move all in-memory pages from backing_object
2987 * to the parent. Pages that have been paged out
2988 * will be overwritten by any of the parent's
2989 * pages that shadow them.
2990 */
2991
2992 while (!queue_empty(&backing_object->memq)) {
2993
2994 p = (vm_page_t) queue_first(&backing_object->memq);
2995
2996 new_offset = (p->offset - backing_offset);
2997
2998 assert(!p->busy || p->absent);
2999
3000 /*
3001 * If the parent has a page here, or if
3002 * this page falls outside the parent,
3003 * dispose of it.
3004 *
3005 * Otherwise, move it as planned.
3006 */
3007
3008 if (p->offset < backing_offset || new_offset >= size) {
3009 VM_PAGE_FREE(p);
3010 } else {
3011 /*
3012 * ENCRYPTED SWAP:
3013 * The encryption key includes the "pager" and the
3014 * "paging_offset". These might not be the same in
3015 * the new object, so we can't just move an encrypted
3016 * page from one object to the other. We can't just
3017 * decrypt the page here either, because that would drop
3018 * the object lock.
3019 * The caller should check for encrypted pages before
3020 * attempting to collapse.
3021 */
3022 ASSERT_PAGE_DECRYPTED(p);
3023
3024 pp = vm_page_lookup(object, new_offset);
3025 if (pp == VM_PAGE_NULL) {
3026
3027 /*
3028 * Parent now has no page.
3029 * Move the backing object's page up.
3030 */
3031
3032 vm_page_rename(p, object, new_offset);
3033 #if MACH_PAGEMAP
3034 } else if (pp->absent) {
3035
3036 /*
3037 * Parent has an absent page...
3038 * it's not being paged in, so
3039 * it must really be missing from
3040 * the parent.
3041 *
3042 * Throw out the absent page...
3043 * any faults looking for that
3044 * page will restart with the new
3045 * one.
3046 */
3047
3048 VM_PAGE_FREE(pp);
3049 vm_page_rename(p, object, new_offset);
3050 #endif /* MACH_PAGEMAP */
3051 } else {
3052 assert(! pp->absent);
3053
3054 /*
3055 * Parent object has a real page.
3056 * Throw away the backing object's
3057 * page.
3058 */
3059 VM_PAGE_FREE(p);
3060 }
3061 }
3062 }
3063
3064 #if !MACH_PAGEMAP
3065 assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL
3066 || (!backing_object->pager_created
3067 && backing_object->pager == MEMORY_OBJECT_NULL));
3068 #else
3069 assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL);
3070 #endif /* !MACH_PAGEMAP */
3071
3072 if (backing_object->pager != MEMORY_OBJECT_NULL) {
3073 vm_object_hash_entry_t entry;
3074
3075 /*
3076 * Move the pager from backing_object to object.
3077 *
3078 * XXX We're only using part of the paging space
3079 * for keeps now... we ought to discard the
3080 * unused portion.
3081 */
3082
3083 assert(!object->paging_in_progress);
3084 object->pager = backing_object->pager;
3085 entry = vm_object_hash_lookup(object->pager, FALSE);
3086 assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
3087 entry->object = object;
3088 object->pager_created = backing_object->pager_created;
3089 object->pager_control = backing_object->pager_control;
3090 object->pager_ready = backing_object->pager_ready;
3091 object->pager_initialized = backing_object->pager_initialized;
3092 object->cluster_size = backing_object->cluster_size;
3093 object->paging_offset =
3094 backing_object->paging_offset + backing_offset;
3095 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
3096 memory_object_control_collapse(object->pager_control,
3097 object);
3098 }
3099 }
3100
3101 vm_object_cache_unlock();
3102
3103 #if MACH_PAGEMAP
3104 /*
3105 * If the shadow offset is 0, the use the existence map from
3106 * the backing object if there is one. If the shadow offset is
3107 * not zero, toss it.
3108 *
3109 * XXX - If the shadow offset is not 0 then a bit copy is needed
3110 * if the map is to be salvaged. For now, we just just toss the
3111 * old map, giving the collapsed object no map. This means that
3112 * the pager is invoked for zero fill pages. If analysis shows
3113 * that this happens frequently and is a performance hit, then
3114 * this code should be fixed to salvage the map.
3115 */
3116 assert(object->existence_map == VM_EXTERNAL_NULL);
3117 if (backing_offset || (size != backing_object->size)) {
3118 vm_external_discarded++;
3119 vm_external_destroy(backing_object->existence_map,
3120 backing_object->size);
3121 }
3122 else {
3123 vm_external_collapsed++;
3124 object->existence_map = backing_object->existence_map;
3125 }
3126 backing_object->existence_map = VM_EXTERNAL_NULL;
3127 #endif /* MACH_PAGEMAP */
3128
3129 /*
3130 * Object now shadows whatever backing_object did.
3131 * Note that the reference to backing_object->shadow
3132 * moves from within backing_object to within object.
3133 */
3134
3135 assert(!object->phys_contiguous);
3136 assert(!backing_object->phys_contiguous);
3137 object->shadow = backing_object->shadow;
3138 if (object->shadow) {
3139 object->shadow_offset += backing_object->shadow_offset;
3140 } else {
3141 /* no shadow, therefore no shadow offset... */
3142 object->shadow_offset = 0;
3143 }
3144 assert((object->shadow == VM_OBJECT_NULL) ||
3145 (object->shadow->copy != backing_object));
3146
3147 /*
3148 * Discard backing_object.
3149 *
3150 * Since the backing object has no pages, no
3151 * pager left, and no object references within it,
3152 * all that is necessary is to dispose of it.
3153 */
3154
3155 assert((backing_object->ref_count == 1) &&
3156 (backing_object->resident_page_count == 0) &&
3157 (backing_object->paging_in_progress == 0));
3158
3159 backing_object->alive = FALSE;
3160 vm_object_unlock(backing_object);
3161
3162 XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
3163 (integer_t)backing_object, 0,0,0,0);
3164
3165 zfree(vm_object_zone, backing_object);
3166
3167 object_collapses++;
3168 }
3169
3170 static void
3171 vm_object_do_bypass(
3172 vm_object_t object,
3173 vm_object_t backing_object)
3174 {
3175 /*
3176 * Make the parent shadow the next object
3177 * in the chain.
3178 */
3179
3180 #if TASK_SWAPPER
3181 /*
3182 * Do object reference in-line to
3183 * conditionally increment shadow's
3184 * residence count. If object is not
3185 * resident, leave residence count
3186 * on shadow alone.
3187 */
3188 if (backing_object->shadow != VM_OBJECT_NULL) {
3189 vm_object_lock(backing_object->shadow);
3190 backing_object->shadow->ref_count++;
3191 if (object->res_count != 0)
3192 vm_object_res_reference(backing_object->shadow);
3193 vm_object_unlock(backing_object->shadow);
3194 }
3195 #else /* TASK_SWAPPER */
3196 vm_object_reference(backing_object->shadow);
3197 #endif /* TASK_SWAPPER */
3198
3199 assert(!object->phys_contiguous);
3200 assert(!backing_object->phys_contiguous);
3201 object->shadow = backing_object->shadow;
3202 if (object->shadow) {
3203 object->shadow_offset += backing_object->shadow_offset;
3204 } else {
3205 /* no shadow, therefore no shadow offset... */
3206 object->shadow_offset = 0;
3207 }
3208
3209 /*
3210 * Backing object might have had a copy pointer
3211 * to us. If it did, clear it.
3212 */
3213 if (backing_object->copy == object) {
3214 backing_object->copy = VM_OBJECT_NULL;
3215 }
3216
3217 /*
3218 * Drop the reference count on backing_object.
3219 #if TASK_SWAPPER
3220 * Since its ref_count was at least 2, it
3221 * will not vanish; so we don't need to call
3222 * vm_object_deallocate.
3223 * [FBDP: that doesn't seem to be true any more]
3224 *
3225 * The res_count on the backing object is
3226 * conditionally decremented. It's possible
3227 * (via vm_pageout_scan) to get here with
3228 * a "swapped" object, which has a 0 res_count,
3229 * in which case, the backing object res_count
3230 * is already down by one.
3231 #else
3232 * Don't call vm_object_deallocate unless
3233 * ref_count drops to zero.
3234 *
3235 * The ref_count can drop to zero here if the
3236 * backing object could be bypassed but not
3237 * collapsed, such as when the backing object
3238 * is temporary and cachable.
3239 #endif
3240 */
3241 if (backing_object->ref_count > 1) {
3242 backing_object->ref_count--;
3243 #if TASK_SWAPPER
3244 if (object->res_count != 0)
3245 vm_object_res_deallocate(backing_object);
3246 assert(backing_object->ref_count > 0);
3247 #endif /* TASK_SWAPPER */
3248 vm_object_unlock(backing_object);
3249 } else {
3250
3251 /*
3252 * Drop locks so that we can deallocate
3253 * the backing object.
3254 */
3255
3256 #if TASK_SWAPPER
3257 if (object->res_count == 0) {
3258 /* XXX get a reference for the deallocate below */
3259 vm_object_res_reference(backing_object);
3260 }
3261 #endif /* TASK_SWAPPER */
3262 vm_object_unlock(object);
3263 vm_object_unlock(backing_object);
3264 vm_object_deallocate(backing_object);
3265
3266 /*
3267 * Relock object. We don't have to reverify
3268 * its state since vm_object_collapse will
3269 * do that for us as it starts at the
3270 * top of its loop.
3271 */
3272
3273 vm_object_lock(object);
3274 }
3275
3276 object_bypasses++;
3277 }
3278
3279
3280 /*
3281 * vm_object_collapse:
3282 *
3283 * Perform an object collapse or an object bypass if appropriate.
3284 * The real work of collapsing and bypassing is performed in
3285 * the routines vm_object_do_collapse and vm_object_do_bypass.
3286 *
3287 * Requires that the object be locked and the page queues be unlocked.
3288 *
3289 */
3290 static unsigned long vm_object_collapse_calls = 0;
3291 static unsigned long vm_object_collapse_objects = 0;
3292 static unsigned long vm_object_collapse_do_collapse = 0;
3293 static unsigned long vm_object_collapse_do_bypass = 0;
3294 __private_extern__ void
3295 vm_object_collapse(
3296 register vm_object_t object,
3297 register vm_object_offset_t hint_offset)
3298 {
3299 register vm_object_t backing_object;
3300 register unsigned int rcount;
3301 register unsigned int size;
3302 vm_object_offset_t collapse_min_offset;
3303 vm_object_offset_t collapse_max_offset;
3304 vm_page_t page;
3305 vm_object_t original_object;
3306
3307 vm_object_collapse_calls++;
3308
3309 if (! vm_object_collapse_allowed && ! vm_object_bypass_allowed) {
3310 return;
3311 }
3312
3313 XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
3314 (integer_t)object, 0,0,0,0);
3315
3316 if (object == VM_OBJECT_NULL)
3317 return;
3318
3319 original_object = object;
3320
3321 while (TRUE) {
3322 vm_object_collapse_objects++;
3323 /*
3324 * Verify that the conditions are right for either
3325 * collapse or bypass:
3326 */
3327
3328 /*
3329 * There is a backing object, and
3330 */
3331
3332 backing_object = object->shadow;
3333 if (backing_object == VM_OBJECT_NULL) {
3334 if (object != original_object) {
3335 vm_object_unlock(object);
3336 }
3337 return;
3338 }
3339
3340 /*
3341 * No pages in the object are currently
3342 * being paged out, and
3343 */
3344 if (object->paging_in_progress != 0 ||
3345 object->absent_count != 0) {
3346 /* try and collapse the rest of the shadow chain */
3347 vm_object_lock(backing_object);
3348 if (object != original_object) {
3349 vm_object_unlock(object);
3350 }
3351 object = backing_object;
3352 continue;
3353 }
3354
3355 vm_object_lock(backing_object);
3356
3357 /*
3358 * ...
3359 * The backing object is not read_only,
3360 * and no pages in the backing object are
3361 * currently being paged out.
3362 * The backing object is internal.
3363 *
3364 */
3365
3366 if (!backing_object->internal ||
3367 backing_object->paging_in_progress != 0) {
3368 /* try and collapse the rest of the shadow chain */
3369 if (object != original_object) {
3370 vm_object_unlock(object);
3371 }
3372 object = backing_object;
3373 continue;
3374 }
3375
3376 /*
3377 * The backing object can't be a copy-object:
3378 * the shadow_offset for the copy-object must stay
3379 * as 0. Furthermore (for the 'we have all the
3380 * pages' case), if we bypass backing_object and
3381 * just shadow the next object in the chain, old
3382 * pages from that object would then have to be copied
3383 * BOTH into the (former) backing_object and into the
3384 * parent object.
3385 */
3386 if (backing_object->shadow != VM_OBJECT_NULL &&
3387 backing_object->shadow->copy == backing_object) {
3388 /* try and collapse the rest of the shadow chain */
3389 if (object != original_object) {
3390 vm_object_unlock(object);
3391 }
3392 object = backing_object;
3393 continue;
3394 }
3395
3396 /*
3397 * We can now try to either collapse the backing
3398 * object (if the parent is the only reference to
3399 * it) or (perhaps) remove the parent's reference
3400 * to it.
3401 *
3402 * If there is exactly one reference to the backing
3403 * object, we may be able to collapse it into the
3404 * parent.
3405 *
3406 * If MACH_PAGEMAP is defined:
3407 * The parent must not have a pager created for it,
3408 * since collapsing a backing_object dumps new pages
3409 * into the parent that its pager doesn't know about
3410 * (and the collapse code can't merge the existence
3411 * maps).
3412 * Otherwise:
3413 * As long as one of the objects is still not known
3414 * to the pager, we can collapse them.
3415 */
3416 if (backing_object->ref_count == 1 &&
3417 (!object->pager_created
3418 #if !MACH_PAGEMAP
3419 || !backing_object->pager_created
3420 #endif /*!MACH_PAGEMAP */
3421 ) && vm_object_collapse_allowed) {
3422
3423 XPR(XPR_VM_OBJECT,
3424 "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
3425 (integer_t)backing_object, (integer_t)object,
3426 (integer_t)backing_object->pager,
3427 (integer_t)backing_object->pager_control, 0);
3428
3429 /*
3430 * We need the cache lock for collapsing,
3431 * but we must not deadlock.
3432 */
3433
3434 if (! vm_object_cache_lock_try()) {
3435 if (object != original_object) {
3436 vm_object_unlock(object);
3437 }
3438 vm_object_unlock(backing_object);
3439 return;
3440 }
3441
3442 /*
3443 * ENCRYPTED SWAP
3444 * We can't collapse the object if it contains
3445 * any encypted page, because the encryption key
3446 * includes the <object,offset> info. We can't
3447 * drop the object lock in vm_object_do_collapse()
3448 * so we can't decrypt the page there either.
3449 */
3450 if (vm_pages_encrypted) {
3451 collapse_min_offset = object->shadow_offset;
3452 collapse_max_offset =
3453 object->shadow_offset + object->size;
3454 queue_iterate(&backing_object->memq,
3455 page, vm_page_t, listq) {
3456 if (page->encrypted &&
3457 (page->offset >=
3458 collapse_min_offset) &&
3459 (page->offset <
3460 collapse_max_offset)) {
3461 /*
3462 * We found an encrypted page
3463 * in the backing object,
3464 * within the range covered
3465 * by the parent object: we can
3466 * not collapse them.
3467 */
3468 vm_object_collapse_encrypted++;
3469 vm_object_cache_unlock();
3470 goto try_bypass;
3471 }
3472 }
3473 }
3474
3475 /*
3476 * Collapse the object with its backing
3477 * object, and try again with the object's
3478 * new backing object.
3479 */
3480
3481 vm_object_do_collapse(object, backing_object);
3482 vm_object_collapse_do_collapse++;
3483 continue;
3484 }
3485
3486 try_bypass:
3487 /*
3488 * Collapsing the backing object was not possible
3489 * or permitted, so let's try bypassing it.
3490 */
3491
3492 if (! vm_object_bypass_allowed) {
3493 /* try and collapse the rest of the shadow chain */
3494 if (object != original_object) {
3495 vm_object_unlock(object);
3496 }
3497 object = backing_object;
3498 continue;
3499 }
3500
3501
3502 /*
3503 * If the object doesn't have all its pages present,
3504 * we have to make sure no pages in the backing object
3505 * "show through" before bypassing it.
3506 */
3507 size = atop(object->size);
3508 rcount = object->resident_page_count;
3509 if (rcount != size) {
3510 vm_object_offset_t offset;
3511 vm_object_offset_t backing_offset;
3512 unsigned int backing_rcount;
3513 unsigned int lookups = 0;
3514
3515 /*
3516 * If the backing object has a pager but no pagemap,
3517 * then we cannot bypass it, because we don't know
3518 * what pages it has.
3519 */
3520 if (backing_object->pager_created
3521 #if MACH_PAGEMAP
3522 && (backing_object->existence_map == VM_EXTERNAL_NULL)
3523 #endif /* MACH_PAGEMAP */
3524 ) {
3525 /* try and collapse the rest of the shadow chain */
3526 if (object != original_object) {
3527 vm_object_unlock(object);
3528 }
3529 object = backing_object;
3530 continue;
3531 }
3532
3533 /*
3534 * If the object has a pager but no pagemap,
3535 * then we cannot bypass it, because we don't know
3536 * what pages it has.
3537 */
3538 if (object->pager_created
3539 #if MACH_PAGEMAP
3540 && (object->existence_map == VM_EXTERNAL_NULL)
3541 #endif /* MACH_PAGEMAP */
3542 ) {
3543 /* try and collapse the rest of the shadow chain */
3544 if (object != original_object) {
3545 vm_object_unlock(object);
3546 }
3547 object = backing_object;
3548 continue;
3549 }
3550
3551 /*
3552 * If all of the pages in the backing object are
3553 * shadowed by the parent object, the parent
3554 * object no longer has to shadow the backing
3555 * object; it can shadow the next one in the
3556 * chain.
3557 *
3558 * If the backing object has existence info,
3559 * we must check examine its existence info
3560 * as well.
3561 *
3562 */
3563
3564 backing_offset = object->shadow_offset;
3565 backing_rcount = backing_object->resident_page_count;
3566
3567 #define EXISTS_IN_OBJECT(obj, off, rc) \
3568 (vm_external_state_get((obj)->existence_map, \
3569 (vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \
3570 ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
3571
3572 /*
3573 * Check the hint location first
3574 * (since it is often the quickest way out of here).
3575 */
3576 if (object->cow_hint != ~(vm_offset_t)0)
3577 hint_offset = (vm_object_offset_t)object->cow_hint;
3578 else
3579 hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
3580 (hint_offset - 8 * PAGE_SIZE_64) : 0;
3581
3582 if (EXISTS_IN_OBJECT(backing_object, hint_offset +
3583 backing_offset, backing_rcount) &&
3584 !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
3585 /* dependency right at the hint */
3586 object->cow_hint = (vm_offset_t)hint_offset;
3587 /* try and collapse the rest of the shadow chain */
3588 if (object != original_object) {
3589 vm_object_unlock(object);
3590 }
3591 object = backing_object;
3592 continue;
3593 }
3594
3595 /*
3596 * If the object's window onto the backing_object
3597 * is large compared to the number of resident
3598 * pages in the backing object, it makes sense to
3599 * walk the backing_object's resident pages first.
3600 *
3601 * NOTE: Pages may be in both the existence map and
3602 * resident. So, we can't permanently decrement
3603 * the rcount here because the second loop may
3604 * find the same pages in the backing object'
3605 * existence map that we found here and we would
3606 * double-decrement the rcount. We also may or
3607 * may not have found the
3608 */
3609 if (backing_rcount && size >
3610 ((backing_object->existence_map) ?
3611 backing_rcount : (backing_rcount >> 1))) {
3612 unsigned int rc = rcount;
3613 vm_page_t p;
3614
3615 backing_rcount = backing_object->resident_page_count;
3616 p = (vm_page_t)queue_first(&backing_object->memq);
3617 do {
3618 /* Until we get more than one lookup lock */
3619 if (lookups > 256) {
3620 lookups = 0;
3621 delay(1);
3622 }
3623
3624 offset = (p->offset - backing_offset);
3625 if (offset < object->size &&
3626 offset != hint_offset &&
3627 !EXISTS_IN_OBJECT(object, offset, rc)) {
3628 /* found a dependency */
3629 object->cow_hint = (vm_offset_t)offset;
3630 break;
3631 }
3632 p = (vm_page_t) queue_next(&p->listq);
3633
3634 } while (--backing_rcount);
3635 if (backing_rcount != 0 ) {
3636 /* try and collapse the rest of the shadow chain */
3637 if (object != original_object) {
3638 vm_object_unlock(object);
3639 }
3640 object = backing_object;
3641 continue;
3642 }
3643 }
3644
3645 /*
3646 * Walk through the offsets looking for pages in the
3647 * backing object that show through to the object.
3648 */
3649 if (backing_rcount || backing_object->existence_map) {
3650 offset = hint_offset;
3651
3652 while((offset =
3653 (offset + PAGE_SIZE_64 < object->size) ?
3654 (offset + PAGE_SIZE_64) : 0) != hint_offset) {
3655
3656 /* Until we get more than one lookup lock */
3657 if (lookups > 256) {
3658 lookups = 0;
3659 delay(1);
3660 }
3661
3662 if (EXISTS_IN_OBJECT(backing_object, offset +
3663 backing_offset, backing_rcount) &&
3664 !EXISTS_IN_OBJECT(object, offset, rcount)) {
3665 /* found a dependency */
3666 object->cow_hint = (vm_offset_t)offset;
3667 break;
3668 }
3669 }
3670 if (offset != hint_offset) {
3671 /* try and collapse the rest of the shadow chain */
3672 if (object != original_object) {
3673 vm_object_unlock(object);
3674 }
3675 object = backing_object;
3676 continue;
3677 }
3678 }
3679 }
3680
3681 /* reset the offset hint for any objects deeper in the chain */
3682 object->cow_hint = (vm_offset_t)0;
3683
3684 /*
3685 * All interesting pages in the backing object
3686 * already live in the parent or its pager.
3687 * Thus we can bypass the backing object.
3688 */
3689
3690 vm_object_do_bypass(object, backing_object);
3691 vm_object_collapse_do_bypass++;
3692
3693 /*
3694 * Try again with this object's new backing object.
3695 */
3696
3697 continue;
3698 }
3699
3700 if (object != original_object) {
3701 vm_object_unlock(object);
3702 }
3703 }
3704
3705 /*
3706 * Routine: vm_object_page_remove: [internal]
3707 * Purpose:
3708 * Removes all physical pages in the specified
3709 * object range from the object's list of pages.
3710 *
3711 * In/out conditions:
3712 * The object must be locked.
3713 * The object must not have paging_in_progress, usually
3714 * guaranteed by not having a pager.
3715 */
3716 unsigned int vm_object_page_remove_lookup = 0;
3717 unsigned int vm_object_page_remove_iterate = 0;
3718
3719 __private_extern__ void
3720 vm_object_page_remove(
3721 register vm_object_t object,
3722 register vm_object_offset_t start,
3723 register vm_object_offset_t end)
3724 {
3725 register vm_page_t p, next;
3726
3727 /*
3728 * One and two page removals are most popular.
3729 * The factor of 16 here is somewhat arbitrary.
3730 * It balances vm_object_lookup vs iteration.
3731 */
3732
3733 if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
3734 vm_object_page_remove_lookup++;
3735
3736 for (; start < end; start += PAGE_SIZE_64) {
3737 p = vm_page_lookup(object, start);
3738 if (p != VM_PAGE_NULL) {
3739 assert(!p->cleaning && !p->pageout);
3740 if (!p->fictitious)
3741 pmap_disconnect(p->phys_page);
3742 VM_PAGE_FREE(p);
3743 }
3744 }
3745 } else {
3746 vm_object_page_remove_iterate++;
3747
3748 p = (vm_page_t) queue_first(&object->memq);
3749 while (!queue_end(&object->memq, (queue_entry_t) p)) {
3750 next = (vm_page_t) queue_next(&p->listq);
3751 if ((start <= p->offset) && (p->offset < end)) {
3752 assert(!p->cleaning && !p->pageout);
3753 if (!p->fictitious)
3754 pmap_disconnect(p->phys_page);
3755 VM_PAGE_FREE(p);
3756 }
3757 p = next;
3758 }
3759 }
3760 }
3761
3762
3763 /*
3764 * Routine: vm_object_coalesce
3765 * Function: Coalesces two objects backing up adjoining
3766 * regions of memory into a single object.
3767 *
3768 * returns TRUE if objects were combined.
3769 *
3770 * NOTE: Only works at the moment if the second object is NULL -
3771 * if it's not, which object do we lock first?
3772 *
3773 * Parameters:
3774 * prev_object First object to coalesce
3775 * prev_offset Offset into prev_object
3776 * next_object Second object into coalesce
3777 * next_offset Offset into next_object
3778 *
3779 * prev_size Size of reference to prev_object
3780 * next_size Size of reference to next_object
3781 *
3782 * Conditions:
3783 * The object(s) must *not* be locked. The map must be locked
3784 * to preserve the reference to the object(s).
3785 */
3786 static int vm_object_coalesce_count = 0;
3787
3788 __private_extern__ boolean_t
3789 vm_object_coalesce(
3790 register vm_object_t prev_object,
3791 vm_object_t next_object,
3792 vm_object_offset_t prev_offset,
3793 __unused vm_object_offset_t next_offset,
3794 vm_object_size_t prev_size,
3795 vm_object_size_t next_size)
3796 {
3797 vm_object_size_t newsize;
3798
3799 #ifdef lint
3800 next_offset++;
3801 #endif /* lint */
3802
3803 if (next_object != VM_OBJECT_NULL) {
3804 return(FALSE);
3805 }
3806
3807 if (prev_object == VM_OBJECT_NULL) {
3808 return(TRUE);
3809 }
3810
3811 XPR(XPR_VM_OBJECT,
3812 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
3813 (integer_t)prev_object, prev_offset, prev_size, next_size, 0);
3814
3815 vm_object_lock(prev_object);
3816
3817 /*
3818 * Try to collapse the object first
3819 */
3820 vm_object_collapse(prev_object, prev_offset);
3821
3822 /*
3823 * Can't coalesce if pages not mapped to
3824 * prev_entry may be in use any way:
3825 * . more than one reference
3826 * . paged out
3827 * . shadows another object
3828 * . has a copy elsewhere
3829 * . is purgable
3830 * . paging references (pages might be in page-list)
3831 */
3832
3833 if ((prev_object->ref_count > 1) ||
3834 prev_object->pager_created ||
3835 (prev_object->shadow != VM_OBJECT_NULL) ||
3836 (prev_object->copy != VM_OBJECT_NULL) ||
3837 (prev_object->true_share != FALSE) ||
3838 (prev_object->purgable != VM_OBJECT_NONPURGABLE) ||
3839 (prev_object->paging_in_progress != 0)) {
3840 vm_object_unlock(prev_object);
3841 return(FALSE);
3842 }
3843
3844 vm_object_coalesce_count++;
3845
3846 /*
3847 * Remove any pages that may still be in the object from
3848 * a previous deallocation.
3849 */
3850 vm_object_page_remove(prev_object,
3851 prev_offset + prev_size,
3852 prev_offset + prev_size + next_size);
3853
3854 /*
3855 * Extend the object if necessary.
3856 */
3857 newsize = prev_offset + prev_size + next_size;
3858 if (newsize > prev_object->size) {
3859 #if MACH_PAGEMAP
3860 /*
3861 * We cannot extend an object that has existence info,
3862 * since the existence info might then fail to cover
3863 * the entire object.
3864 *
3865 * This assertion must be true because the object
3866 * has no pager, and we only create existence info
3867 * for objects with pagers.
3868 */
3869 assert(prev_object->existence_map == VM_EXTERNAL_NULL);
3870 #endif /* MACH_PAGEMAP */
3871 prev_object->size = newsize;
3872 }
3873
3874 vm_object_unlock(prev_object);
3875 return(TRUE);
3876 }
3877
3878 /*
3879 * Attach a set of physical pages to an object, so that they can
3880 * be mapped by mapping the object. Typically used to map IO memory.
3881 *
3882 * The mapping function and its private data are used to obtain the
3883 * physical addresses for each page to be mapped.
3884 */
3885 void
3886 vm_object_page_map(
3887 vm_object_t object,
3888 vm_object_offset_t offset,
3889 vm_object_size_t size,
3890 vm_object_offset_t (*map_fn)(void *map_fn_data,
3891 vm_object_offset_t offset),
3892 void *map_fn_data) /* private to map_fn */
3893 {
3894 int num_pages;
3895 int i;
3896 vm_page_t m;
3897 vm_page_t old_page;
3898 vm_object_offset_t addr;
3899
3900 num_pages = atop_64(size);
3901
3902 for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) {
3903
3904 addr = (*map_fn)(map_fn_data, offset);
3905
3906 while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
3907 vm_page_more_fictitious();
3908
3909 vm_object_lock(object);
3910 if ((old_page = vm_page_lookup(object, offset))
3911 != VM_PAGE_NULL)
3912 {
3913 vm_page_lock_queues();
3914 vm_page_free(old_page);
3915 vm_page_unlock_queues();
3916 }
3917
3918 vm_page_init(m, addr);
3919 /* private normally requires lock_queues but since we */
3920 /* are initializing the page, its not necessary here */
3921 m->private = TRUE; /* don`t free page */
3922 m->wire_count = 1;
3923 vm_page_insert(m, object, offset);
3924
3925 PAGE_WAKEUP_DONE(m);
3926 vm_object_unlock(object);
3927 }
3928 }
3929
3930 #include <mach_kdb.h>
3931
3932 #if MACH_KDB
3933 #include <ddb/db_output.h>
3934 #include <vm/vm_print.h>
3935
3936 #define printf kdbprintf
3937
3938 extern boolean_t vm_object_cached(
3939 vm_object_t object);
3940
3941 extern void print_bitstring(
3942 char byte);
3943
3944 boolean_t vm_object_print_pages = FALSE;
3945
3946 void
3947 print_bitstring(
3948 char byte)
3949 {
3950 printf("%c%c%c%c%c%c%c%c",
3951 ((byte & (1 << 0)) ? '1' : '0'),
3952 ((byte & (1 << 1)) ? '1' : '0'),
3953 ((byte & (1 << 2)) ? '1' : '0'),
3954 ((byte & (1 << 3)) ? '1' : '0'),
3955 ((byte & (1 << 4)) ? '1' : '0'),
3956 ((byte & (1 << 5)) ? '1' : '0'),
3957 ((byte & (1 << 6)) ? '1' : '0'),
3958 ((byte & (1 << 7)) ? '1' : '0'));
3959 }
3960
3961 boolean_t
3962 vm_object_cached(
3963 register vm_object_t object)
3964 {
3965 register vm_object_t o;
3966
3967 queue_iterate(&vm_object_cached_list, o, vm_object_t, cached_list) {
3968 if (object == o) {
3969 return TRUE;
3970 }
3971 }
3972 return FALSE;
3973 }
3974
3975 #if MACH_PAGEMAP
3976 /*
3977 * vm_external_print: [ debug ]
3978 */
3979 void
3980 vm_external_print(
3981 vm_external_map_t emap,
3982 vm_size_t size)
3983 {
3984 if (emap == VM_EXTERNAL_NULL) {
3985 printf("0 ");
3986 } else {
3987 vm_size_t existence_size = stob(size);
3988 printf("{ size=%d, map=[", existence_size);
3989 if (existence_size > 0) {
3990 print_bitstring(emap[0]);
3991 }
3992 if (existence_size > 1) {
3993 print_bitstring(emap[1]);
3994 }
3995 if (existence_size > 2) {
3996 printf("...");
3997 print_bitstring(emap[existence_size-1]);
3998 }
3999 printf("] }\n");
4000 }
4001 return;
4002 }
4003 #endif /* MACH_PAGEMAP */
4004
4005 int
4006 vm_follow_object(
4007 vm_object_t object)
4008 {
4009 int count = 0;
4010 int orig_db_indent = db_indent;
4011
4012 while (TRUE) {
4013 if (object == VM_OBJECT_NULL) {
4014 db_indent = orig_db_indent;
4015 return count;
4016 }
4017
4018 count += 1;
4019
4020 iprintf("object 0x%x", object);
4021 printf(", shadow=0x%x", object->shadow);
4022 printf(", copy=0x%x", object->copy);
4023 printf(", pager=0x%x", object->pager);
4024 printf(", ref=%d\n", object->ref_count);
4025
4026 db_indent += 2;
4027 object = object->shadow;
4028 }
4029
4030 }
4031
4032 /*
4033 * vm_object_print: [ debug ]
4034 */
4035 void
4036 vm_object_print(
4037 db_addr_t db_addr,
4038 __unused boolean_t have_addr,
4039 __unused int arg_count,
4040 __unused char *modif)
4041 {
4042 vm_object_t object;
4043 register vm_page_t p;
4044 const char *s;
4045
4046 register int count;
4047
4048 object = (vm_object_t) (long) db_addr;
4049 if (object == VM_OBJECT_NULL)
4050 return;
4051
4052 iprintf("object 0x%x\n", object);
4053
4054 db_indent += 2;
4055
4056 iprintf("size=0x%x", object->size);
4057 printf(", cluster=0x%x", object->cluster_size);
4058 printf(", memq_hint=%p", object->memq_hint);
4059 printf(", ref_count=%d\n", object->ref_count);
4060 iprintf("");
4061 #if TASK_SWAPPER
4062 printf("res_count=%d, ", object->res_count);
4063 #endif /* TASK_SWAPPER */
4064 printf("resident_page_count=%d\n", object->resident_page_count);
4065
4066 iprintf("shadow=0x%x", object->shadow);
4067 if (object->shadow) {
4068 register int i = 0;
4069 vm_object_t shadow = object;
4070 while((shadow = shadow->shadow))
4071 i++;
4072 printf(" (depth %d)", i);
4073 }
4074 printf(", copy=0x%x", object->copy);
4075 printf(", shadow_offset=0x%x", object->shadow_offset);
4076 printf(", last_alloc=0x%x\n", object->last_alloc);
4077
4078 iprintf("pager=0x%x", object->pager);
4079 printf(", paging_offset=0x%x", object->paging_offset);
4080 printf(", pager_control=0x%x\n", object->pager_control);
4081
4082 iprintf("copy_strategy=%d[", object->copy_strategy);
4083 switch (object->copy_strategy) {
4084 case MEMORY_OBJECT_COPY_NONE:
4085 printf("copy_none");
4086 break;
4087
4088 case MEMORY_OBJECT_COPY_CALL:
4089 printf("copy_call");
4090 break;
4091
4092 case MEMORY_OBJECT_COPY_DELAY:
4093 printf("copy_delay");
4094 break;
4095
4096 case MEMORY_OBJECT_COPY_SYMMETRIC:
4097 printf("copy_symmetric");
4098 break;
4099
4100 case MEMORY_OBJECT_COPY_INVALID:
4101 printf("copy_invalid");
4102 break;
4103
4104 default:
4105 printf("?");
4106 }
4107 printf("]");
4108 printf(", absent_count=%d\n", object->absent_count);
4109
4110 iprintf("all_wanted=0x%x<", object->all_wanted);
4111 s = "";
4112 if (vm_object_wanted(object, VM_OBJECT_EVENT_INITIALIZED)) {
4113 printf("%sinit", s);
4114 s = ",";
4115 }
4116 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGER_READY)) {
4117 printf("%sready", s);
4118 s = ",";
4119 }
4120 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS)) {
4121 printf("%spaging", s);
4122 s = ",";
4123 }
4124 if (vm_object_wanted(object, VM_OBJECT_EVENT_ABSENT_COUNT)) {
4125 printf("%sabsent", s);
4126 s = ",";
4127 }
4128 if (vm_object_wanted(object, VM_OBJECT_EVENT_LOCK_IN_PROGRESS)) {
4129 printf("%slock", s);
4130 s = ",";
4131 }
4132 if (vm_object_wanted(object, VM_OBJECT_EVENT_UNCACHING)) {
4133 printf("%suncaching", s);
4134 s = ",";
4135 }
4136 if (vm_object_wanted(object, VM_OBJECT_EVENT_COPY_CALL)) {
4137 printf("%scopy_call", s);
4138 s = ",";
4139 }
4140 if (vm_object_wanted(object, VM_OBJECT_EVENT_CACHING)) {
4141 printf("%scaching", s);
4142 s = ",";
4143 }
4144 printf(">");
4145 printf(", paging_in_progress=%d\n", object->paging_in_progress);
4146
4147 iprintf("%screated, %sinit, %sready, %spersist, %strusted, %spageout, %s, %s\n",
4148 (object->pager_created ? "" : "!"),
4149 (object->pager_initialized ? "" : "!"),
4150 (object->pager_ready ? "" : "!"),
4151 (object->can_persist ? "" : "!"),
4152 (object->pager_trusted ? "" : "!"),
4153 (object->pageout ? "" : "!"),
4154 (object->internal ? "internal" : "external"),
4155 (object->temporary ? "temporary" : "permanent"));
4156 iprintf("%salive, %spurgable, %spurgable_volatile, %spurgable_empty, %sshadowed, %scached, %sprivate\n",
4157 (object->alive ? "" : "!"),
4158 ((object->purgable != VM_OBJECT_NONPURGABLE) ? "" : "!"),
4159 ((object->purgable == VM_OBJECT_PURGABLE_VOLATILE) ? "" : "!"),
4160 ((object->purgable == VM_OBJECT_PURGABLE_EMPTY) ? "" : "!"),
4161 (object->shadowed ? "" : "!"),
4162 (vm_object_cached(object) ? "" : "!"),
4163 (object->private ? "" : "!"));
4164 iprintf("%sadvisory_pageout, %ssilent_overwrite\n",
4165 (object->advisory_pageout ? "" : "!"),
4166 (object->silent_overwrite ? "" : "!"));
4167
4168 #if MACH_PAGEMAP
4169 iprintf("existence_map=");
4170 vm_external_print(object->existence_map, object->size);
4171 #endif /* MACH_PAGEMAP */
4172 #if MACH_ASSERT
4173 iprintf("paging_object=0x%x\n", object->paging_object);
4174 #endif /* MACH_ASSERT */
4175
4176 if (vm_object_print_pages) {
4177 count = 0;
4178 p = (vm_page_t) queue_first(&object->memq);
4179 while (!queue_end(&object->memq, (queue_entry_t) p)) {
4180 if (count == 0) {
4181 iprintf("memory:=");
4182 } else if (count == 2) {
4183 printf("\n");
4184 iprintf(" ...");
4185 count = 0;
4186 } else {
4187 printf(",");
4188 }
4189 count++;
4190
4191 printf("(off=0x%llX,page=%p)", p->offset, p);
4192 p = (vm_page_t) queue_next(&p->listq);
4193 }
4194 if (count != 0) {
4195 printf("\n");
4196 }
4197 }
4198 db_indent -= 2;
4199 }
4200
4201
4202 /*
4203 * vm_object_find [ debug ]
4204 *
4205 * Find all tasks which reference the given vm_object.
4206 */
4207
4208 boolean_t vm_object_find(vm_object_t object);
4209 boolean_t vm_object_print_verbose = FALSE;
4210
4211 boolean_t
4212 vm_object_find(
4213 vm_object_t object)
4214 {
4215 task_t task;
4216 vm_map_t map;
4217 vm_map_entry_t entry;
4218 processor_set_t pset = &default_pset;
4219 boolean_t found = FALSE;
4220
4221 queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
4222 map = task->map;
4223 for (entry = vm_map_first_entry(map);
4224 entry && entry != vm_map_to_entry(map);
4225 entry = entry->vme_next) {
4226
4227 vm_object_t obj;
4228
4229 /*
4230 * For the time being skip submaps,
4231 * only the kernel can have submaps,
4232 * and unless we are interested in
4233 * kernel objects, we can simply skip
4234 * submaps. See sb/dejan/nmk18b7/src/mach_kernel/vm
4235 * for a full solution.
4236 */
4237 if (entry->is_sub_map)
4238 continue;
4239 if (entry)
4240 obj = entry->object.vm_object;
4241 else
4242 continue;
4243
4244 while (obj != VM_OBJECT_NULL) {
4245 if (obj == object) {
4246 if (!found) {
4247 printf("TASK\t\tMAP\t\tENTRY\n");
4248 found = TRUE;
4249 }
4250 printf("0x%x\t0x%x\t0x%x\n",
4251 task, map, entry);
4252 }
4253 obj = obj->shadow;
4254 }
4255 }
4256 }
4257
4258 return(found);
4259 }
4260
4261 #endif /* MACH_KDB */
4262
4263 kern_return_t
4264 vm_object_populate_with_private(
4265 vm_object_t object,
4266 vm_object_offset_t offset,
4267 ppnum_t phys_page,
4268 vm_size_t size)
4269 {
4270 ppnum_t base_page;
4271 vm_object_offset_t base_offset;
4272
4273
4274 if(!object->private)
4275 return KERN_FAILURE;
4276
4277 base_page = phys_page;
4278
4279 vm_object_lock(object);
4280 if(!object->phys_contiguous) {
4281 vm_page_t m;
4282 if((base_offset = trunc_page_64(offset)) != offset) {
4283 vm_object_unlock(object);
4284 return KERN_FAILURE;
4285 }
4286 base_offset += object->paging_offset;
4287 while(size) {
4288 m = vm_page_lookup(object, base_offset);
4289 if(m != VM_PAGE_NULL) {
4290 if(m->fictitious) {
4291 vm_page_lock_queues();
4292 m->fictitious = FALSE;
4293 m->private = TRUE;
4294 m->phys_page = base_page;
4295 if(!m->busy) {
4296 m->busy = TRUE;
4297 }
4298 if(!m->absent) {
4299 m->absent = TRUE;
4300 object->absent_count++;
4301 }
4302 m->list_req_pending = TRUE;
4303 vm_page_unlock_queues();
4304 } else if (m->phys_page != base_page) {
4305 /* pmap call to clear old mapping */
4306 pmap_disconnect(m->phys_page);
4307 m->phys_page = base_page;
4308 }
4309
4310 /*
4311 * ENCRYPTED SWAP:
4312 * We're not pointing to the same
4313 * physical page any longer and the
4314 * contents of the new one are not
4315 * supposed to be encrypted.
4316 * XXX What happens to the original
4317 * physical page. Is it lost ?
4318 */
4319 m->encrypted = FALSE;
4320
4321 } else {
4322 while ((m = vm_page_grab_fictitious())
4323 == VM_PAGE_NULL)
4324 vm_page_more_fictitious();
4325 vm_page_lock_queues();
4326 m->fictitious = FALSE;
4327 m->private = TRUE;
4328 m->phys_page = base_page;
4329 m->list_req_pending = TRUE;
4330 m->absent = TRUE;
4331 m->unusual = TRUE;
4332 object->absent_count++;
4333 vm_page_unlock_queues();
4334 vm_page_insert(m, object, base_offset);
4335 }
4336 base_page++; /* Go to the next physical page */
4337 base_offset += PAGE_SIZE;
4338 size -= PAGE_SIZE;
4339 }
4340 } else {
4341 /* NOTE: we should check the original settings here */
4342 /* if we have a size > zero a pmap call should be made */
4343 /* to disable the range */
4344
4345 /* pmap_? */
4346
4347 /* shadows on contiguous memory are not allowed */
4348 /* we therefore can use the offset field */
4349 object->shadow_offset = (vm_object_offset_t)(phys_page << 12);
4350 object->size = size;
4351 }
4352 vm_object_unlock(object);
4353 return KERN_SUCCESS;
4354 }
4355
4356 /*
4357 * memory_object_free_from_cache:
4358 *
4359 * Walk the vm_object cache list, removing and freeing vm_objects
4360 * which are backed by the pager identified by the caller, (pager_id).
4361 * Remove up to "count" objects, if there are that may available
4362 * in the cache.
4363 *
4364 * Walk the list at most once, return the number of vm_objects
4365 * actually freed.
4366 */
4367
4368 __private_extern__ kern_return_t
4369 memory_object_free_from_cache(
4370 __unused host_t host,
4371 int *pager_id,
4372 int *count)
4373 {
4374
4375 int object_released = 0;
4376
4377 register vm_object_t object = VM_OBJECT_NULL;
4378 vm_object_t shadow;
4379
4380 /*
4381 if(host == HOST_NULL)
4382 return(KERN_INVALID_ARGUMENT);
4383 */
4384
4385 try_again:
4386 vm_object_cache_lock();
4387
4388 queue_iterate(&vm_object_cached_list, object,
4389 vm_object_t, cached_list) {
4390 if (object->pager && (pager_id == object->pager->pager)) {
4391 vm_object_lock(object);
4392 queue_remove(&vm_object_cached_list, object,
4393 vm_object_t, cached_list);
4394 vm_object_cached_count--;
4395
4396 /*
4397 * Since this object is in the cache, we know
4398 * that it is initialized and has only a pager's
4399 * (implicit) reference. Take a reference to avoid
4400 * recursive deallocations.
4401 */
4402
4403 assert(object->pager_initialized);
4404 assert(object->ref_count == 0);
4405 object->ref_count++;
4406
4407 /*
4408 * Terminate the object.
4409 * If the object had a shadow, we let
4410 * vm_object_deallocate deallocate it.
4411 * "pageout" objects have a shadow, but
4412 * maintain a "paging reference" rather
4413 * than a normal reference.
4414 * (We are careful here to limit recursion.)
4415 */
4416 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
4417 if ((vm_object_terminate(object) == KERN_SUCCESS)
4418 && (shadow != VM_OBJECT_NULL)) {
4419 vm_object_deallocate(shadow);
4420 }
4421
4422 if(object_released++ == *count)
4423 return KERN_SUCCESS;
4424 goto try_again;
4425 }
4426 }
4427 vm_object_cache_unlock();
4428 *count = object_released;
4429 return KERN_SUCCESS;
4430 }
4431
4432
4433
4434 kern_return_t
4435 memory_object_create_named(
4436 memory_object_t pager,
4437 memory_object_offset_t size,
4438 memory_object_control_t *control)
4439 {
4440 vm_object_t object;
4441 vm_object_hash_entry_t entry;
4442
4443 *control = MEMORY_OBJECT_CONTROL_NULL;
4444 if (pager == MEMORY_OBJECT_NULL)
4445 return KERN_INVALID_ARGUMENT;
4446
4447 vm_object_cache_lock();
4448 entry = vm_object_hash_lookup(pager, FALSE);
4449 if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
4450 (entry->object != VM_OBJECT_NULL)) {
4451 if (entry->object->named == TRUE)
4452 panic("memory_object_create_named: caller already holds the right"); }
4453
4454 vm_object_cache_unlock();
4455 if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE))
4456 == VM_OBJECT_NULL) {
4457 return(KERN_INVALID_OBJECT);
4458 }
4459
4460 /* wait for object (if any) to be ready */
4461 if (object != VM_OBJECT_NULL) {
4462 vm_object_lock(object);
4463 object->named = TRUE;
4464 while (!object->pager_ready) {
4465 vm_object_sleep(object,
4466 VM_OBJECT_EVENT_PAGER_READY,
4467 THREAD_UNINT);
4468 }
4469 *control = object->pager_control;
4470 vm_object_unlock(object);
4471 }
4472 return (KERN_SUCCESS);
4473 }
4474
4475
4476 /*
4477 * Routine: memory_object_recover_named [user interface]
4478 * Purpose:
4479 * Attempt to recover a named reference for a VM object.
4480 * VM will verify that the object has not already started
4481 * down the termination path, and if it has, will optionally
4482 * wait for that to finish.
4483 * Returns:
4484 * KERN_SUCCESS - we recovered a named reference on the object
4485 * KERN_FAILURE - we could not recover a reference (object dead)
4486 * KERN_INVALID_ARGUMENT - bad memory object control
4487 */
4488 kern_return_t
4489 memory_object_recover_named(
4490 memory_object_control_t control,
4491 boolean_t wait_on_terminating)
4492 {
4493 vm_object_t object;
4494
4495 vm_object_cache_lock();
4496 object = memory_object_control_to_vm_object(control);
4497 if (object == VM_OBJECT_NULL) {
4498 vm_object_cache_unlock();
4499 return (KERN_INVALID_ARGUMENT);
4500 }
4501
4502 restart:
4503 vm_object_lock(object);
4504
4505 if (object->terminating && wait_on_terminating) {
4506 vm_object_cache_unlock();
4507 vm_object_wait(object,
4508 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
4509 THREAD_UNINT);
4510 vm_object_cache_lock();
4511 goto restart;
4512 }
4513
4514 if (!object->alive) {
4515 vm_object_cache_unlock();
4516 vm_object_unlock(object);
4517 return KERN_FAILURE;
4518 }
4519
4520 if (object->named == TRUE) {
4521 vm_object_cache_unlock();
4522 vm_object_unlock(object);
4523 return KERN_SUCCESS;
4524 }
4525
4526 if((object->ref_count == 0) && (!object->terminating)){
4527 queue_remove(&vm_object_cached_list, object,
4528 vm_object_t, cached_list);
4529 vm_object_cached_count--;
4530 XPR(XPR_VM_OBJECT_CACHE,
4531 "memory_object_recover_named: removing %X, head (%X, %X)\n",
4532 (integer_t)object,
4533 (integer_t)vm_object_cached_list.next,
4534 (integer_t)vm_object_cached_list.prev, 0,0);
4535 }
4536
4537 vm_object_cache_unlock();
4538
4539 object->named = TRUE;
4540 object->ref_count++;
4541 vm_object_res_reference(object);
4542 while (!object->pager_ready) {
4543 vm_object_sleep(object,
4544 VM_OBJECT_EVENT_PAGER_READY,
4545 THREAD_UNINT);
4546 }
4547 vm_object_unlock(object);
4548 return (KERN_SUCCESS);
4549 }
4550
4551
4552 /*
4553 * vm_object_release_name:
4554 *
4555 * Enforces name semantic on memory_object reference count decrement
4556 * This routine should not be called unless the caller holds a name
4557 * reference gained through the memory_object_create_named.
4558 *
4559 * If the TERMINATE_IDLE flag is set, the call will return if the
4560 * reference count is not 1. i.e. idle with the only remaining reference
4561 * being the name.
4562 * If the decision is made to proceed the name field flag is set to
4563 * false and the reference count is decremented. If the RESPECT_CACHE
4564 * flag is set and the reference count has gone to zero, the
4565 * memory_object is checked to see if it is cacheable otherwise when
4566 * the reference count is zero, it is simply terminated.
4567 */
4568
4569 __private_extern__ kern_return_t
4570 vm_object_release_name(
4571 vm_object_t object,
4572 int flags)
4573 {
4574 vm_object_t shadow;
4575 boolean_t original_object = TRUE;
4576
4577 while (object != VM_OBJECT_NULL) {
4578
4579 /*
4580 * The cache holds a reference (uncounted) to
4581 * the object. We must locke it before removing
4582 * the object.
4583 *
4584 */
4585
4586 vm_object_cache_lock();
4587 vm_object_lock(object);
4588 assert(object->alive);
4589 if(original_object)
4590 assert(object->named);
4591 assert(object->ref_count > 0);
4592
4593 /*
4594 * We have to wait for initialization before
4595 * destroying or caching the object.
4596 */
4597
4598 if (object->pager_created && !object->pager_initialized) {
4599 assert(!object->can_persist);
4600 vm_object_assert_wait(object,
4601 VM_OBJECT_EVENT_INITIALIZED,
4602 THREAD_UNINT);
4603 vm_object_unlock(object);
4604 vm_object_cache_unlock();
4605 thread_block(THREAD_CONTINUE_NULL);
4606 continue;
4607 }
4608
4609 if (((object->ref_count > 1)
4610 && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
4611 || (object->terminating)) {
4612 vm_object_unlock(object);
4613 vm_object_cache_unlock();
4614 return KERN_FAILURE;
4615 } else {
4616 if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
4617 vm_object_unlock(object);
4618 vm_object_cache_unlock();
4619 return KERN_SUCCESS;
4620 }
4621 }
4622
4623 if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
4624 (object->ref_count == 1)) {
4625 if(original_object)
4626 object->named = FALSE;
4627 vm_object_unlock(object);
4628 vm_object_cache_unlock();
4629 /* let vm_object_deallocate push this thing into */
4630 /* the cache, if that it is where it is bound */
4631 vm_object_deallocate(object);
4632 return KERN_SUCCESS;
4633 }
4634 VM_OBJ_RES_DECR(object);
4635 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
4636 if(object->ref_count == 1) {
4637 if(vm_object_terminate(object) != KERN_SUCCESS) {
4638 if(original_object) {
4639 return KERN_FAILURE;
4640 } else {
4641 return KERN_SUCCESS;
4642 }
4643 }
4644 if (shadow != VM_OBJECT_NULL) {
4645 original_object = FALSE;
4646 object = shadow;
4647 continue;
4648 }
4649 return KERN_SUCCESS;
4650 } else {
4651 object->ref_count--;
4652 assert(object->ref_count > 0);
4653 if(original_object)
4654 object->named = FALSE;
4655 vm_object_unlock(object);
4656 vm_object_cache_unlock();
4657 return KERN_SUCCESS;
4658 }
4659 }
4660 /*NOTREACHED*/
4661 assert(0);
4662 return KERN_FAILURE;
4663 }
4664
4665
4666 __private_extern__ kern_return_t
4667 vm_object_lock_request(
4668 vm_object_t object,
4669 vm_object_offset_t offset,
4670 vm_object_size_t size,
4671 memory_object_return_t should_return,
4672 int flags,
4673 vm_prot_t prot)
4674 {
4675 __unused boolean_t should_flush;
4676
4677 should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
4678
4679 XPR(XPR_MEMORY_OBJECT,
4680 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
4681 (integer_t)object, offset, size,
4682 (((should_return&1)<<1)|should_flush), prot);
4683
4684 /*
4685 * Check for bogus arguments.
4686 */
4687 if (object == VM_OBJECT_NULL)
4688 return (KERN_INVALID_ARGUMENT);
4689
4690 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
4691 return (KERN_INVALID_ARGUMENT);
4692
4693 size = round_page_64(size);
4694
4695 /*
4696 * Lock the object, and acquire a paging reference to
4697 * prevent the memory_object reference from being released.
4698 */
4699 vm_object_lock(object);
4700 vm_object_paging_begin(object);
4701
4702 (void)vm_object_update(object,
4703 offset, size, NULL, NULL, should_return, flags, prot);
4704
4705 vm_object_paging_end(object);
4706 vm_object_unlock(object);
4707
4708 return (KERN_SUCCESS);
4709 }
4710
4711 /*
4712 * Empty a purgable object by grabbing the physical pages assigned to it and
4713 * putting them on the free queue without writing them to backing store, etc.
4714 * When the pages are next touched they will be demand zero-fill pages. We
4715 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
4716 * skip referenced/dirty pages, pages on the active queue, etc. We're more
4717 * than happy to grab these since this is a purgable object. We mark the
4718 * object as "empty" after reaping its pages.
4719 *
4720 * On entry the object and page queues are locked, the object must be a
4721 * purgable object with no delayed copies pending.
4722 */
4723 unsigned int
4724 vm_object_purge(vm_object_t object)
4725 {
4726 vm_page_t p, next;
4727 unsigned int num_purged_pages;
4728 vm_page_t local_freeq;
4729 unsigned long local_freed;
4730 int purge_loop_quota;
4731 /* free pages as soon as we gather PURGE_BATCH_FREE_LIMIT pages to free */
4732 #define PURGE_BATCH_FREE_LIMIT 50
4733 /* release page queues lock every PURGE_LOOP_QUOTA iterations */
4734 #define PURGE_LOOP_QUOTA 100
4735
4736 num_purged_pages = 0;
4737 if (object->purgable == VM_OBJECT_NONPURGABLE)
4738 return num_purged_pages;
4739
4740 object->purgable = VM_OBJECT_PURGABLE_EMPTY;
4741
4742 assert(object->copy == VM_OBJECT_NULL);
4743 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
4744 purge_loop_quota = PURGE_LOOP_QUOTA;
4745
4746 local_freeq = VM_PAGE_NULL;
4747 local_freed = 0;
4748
4749 /*
4750 * Go through the object's resident pages and try and discard them.
4751 */
4752 next = (vm_page_t)queue_first(&object->memq);
4753 while (!queue_end(&object->memq, (queue_entry_t)next)) {
4754 p = next;
4755 next = (vm_page_t)queue_next(&next->listq);
4756
4757 if (purge_loop_quota-- == 0) {
4758 /*
4759 * Avoid holding the page queues lock for too long.
4760 * Let someone else take it for a while if needed.
4761 * Keep holding the object's lock to guarantee that
4762 * the object's page list doesn't change under us
4763 * while we yield.
4764 */
4765 if (local_freeq != VM_PAGE_NULL) {
4766 /*
4767 * Flush our queue of pages to free.
4768 */
4769 vm_page_free_list(local_freeq);
4770 local_freeq = VM_PAGE_NULL;
4771 local_freed = 0;
4772 }
4773 vm_page_unlock_queues();
4774 mutex_pause();
4775 vm_page_lock_queues();
4776
4777 /* resume with the current page and a new quota */
4778 purge_loop_quota = PURGE_LOOP_QUOTA;
4779 }
4780
4781
4782 if (p->busy || p->cleaning || p->laundry ||
4783 p->list_req_pending) {
4784 /* page is being acted upon, so don't mess with it */
4785 continue;
4786 }
4787 if (p->wire_count) {
4788 /* don't discard a wired page */
4789 continue;
4790 }
4791
4792 if (p->tabled) {
4793 /* clean up the object/offset table */
4794 vm_page_remove(p);
4795 }
4796 if (p->absent) {
4797 /* update the object's count of absent pages */
4798 vm_object_absent_release(object);
4799 }
4800
4801 /* we can discard this page */
4802
4803 /* advertize that this page is in a transition state */
4804 p->busy = TRUE;
4805
4806 if (p->no_isync == TRUE) {
4807 /* the page hasn't been mapped yet */
4808 /* (optimization to delay the i-cache sync) */
4809 } else {
4810 /* unmap the page */
4811 int refmod_state;
4812
4813 refmod_state = pmap_disconnect(p->phys_page);
4814 if (refmod_state & VM_MEM_MODIFIED) {
4815 p->dirty = TRUE;
4816 }
4817 }
4818
4819 if (p->dirty || p->precious) {
4820 /* we saved the cost of cleaning this page ! */
4821 num_purged_pages++;
4822 vm_page_purged_count++;
4823 }
4824
4825 /* remove page from active or inactive queue... */
4826 VM_PAGE_QUEUES_REMOVE(p);
4827
4828 /* ... and put it on our queue of pages to free */
4829 assert(!p->laundry);
4830 assert(p->object != kernel_object);
4831 assert(p->pageq.next == NULL &&
4832 p->pageq.prev == NULL);
4833 p->pageq.next = (queue_entry_t) local_freeq;
4834 local_freeq = p;
4835 if (++local_freed >= PURGE_BATCH_FREE_LIMIT) {
4836 /* flush our queue of pages to free */
4837 vm_page_free_list(local_freeq);
4838 local_freeq = VM_PAGE_NULL;
4839 local_freed = 0;
4840 }
4841 }
4842
4843 /* flush our local queue of pages to free one last time */
4844 if (local_freeq != VM_PAGE_NULL) {
4845 vm_page_free_list(local_freeq);
4846 local_freeq = VM_PAGE_NULL;
4847 local_freed = 0;
4848 }
4849
4850 return num_purged_pages;
4851 }
4852
4853 /*
4854 * vm_object_purgable_control() allows the caller to control and investigate the
4855 * state of a purgable object. A purgable object is created via a call to
4856 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgable object will
4857 * never be coalesced with any other object -- even other purgable objects --
4858 * and will thus always remain a distinct object. A purgable object has
4859 * special semantics when its reference count is exactly 1. If its reference
4860 * count is greater than 1, then a purgable object will behave like a normal
4861 * object and attempts to use this interface will result in an error return
4862 * of KERN_INVALID_ARGUMENT.
4863 *
4864 * A purgable object may be put into a "volatile" state which will make the
4865 * object's pages elligable for being reclaimed without paging to backing
4866 * store if the system runs low on memory. If the pages in a volatile
4867 * purgable object are reclaimed, the purgable object is said to have been
4868 * "emptied." When a purgable object is emptied the system will reclaim as
4869 * many pages from the object as it can in a convenient manner (pages already
4870 * en route to backing store or busy for other reasons are left as is). When
4871 * a purgable object is made volatile, its pages will generally be reclaimed
4872 * before other pages in the application's working set. This semantic is
4873 * generally used by applications which can recreate the data in the object
4874 * faster than it can be paged in. One such example might be media assets
4875 * which can be reread from a much faster RAID volume.
4876 *
4877 * A purgable object may be designated as "non-volatile" which means it will
4878 * behave like all other objects in the system with pages being written to and
4879 * read from backing store as needed to satisfy system memory needs. If the
4880 * object was emptied before the object was made non-volatile, that fact will
4881 * be returned as the old state of the purgable object (see
4882 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
4883 * were reclaimed as part of emptying the object will be refaulted in as
4884 * zero-fill on demand. It is up to the application to note that an object
4885 * was emptied and recreate the objects contents if necessary. When a
4886 * purgable object is made non-volatile, its pages will generally not be paged
4887 * out to backing store in the immediate future. A purgable object may also
4888 * be manually emptied.
4889 *
4890 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
4891 * volatile purgable object may be queried at any time. This information may
4892 * be used as a control input to let the application know when the system is
4893 * experiencing memory pressure and is reclaiming memory.
4894 *
4895 * The specified address may be any address within the purgable object. If
4896 * the specified address does not represent any object in the target task's
4897 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
4898 * object containing the specified address is not a purgable object, then
4899 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
4900 * returned.
4901 *
4902 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
4903 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
4904 * state is used to set the new state of the purgable object and return its
4905 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgable
4906 * object is returned in the parameter state.
4907 *
4908 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
4909 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
4910 * the non-volatile, volatile and volatile/empty states described above.
4911 * Setting the state of a purgable object to VM_PURGABLE_EMPTY will
4912 * immediately reclaim as many pages in the object as can be conveniently
4913 * collected (some may have already been written to backing store or be
4914 * otherwise busy).
4915 *
4916 * The process of making a purgable object non-volatile and determining its
4917 * previous state is atomic. Thus, if a purgable object is made
4918 * VM_PURGABLE_NONVOLATILE and the old state is returned as
4919 * VM_PURGABLE_VOLATILE, then the purgable object's previous contents are
4920 * completely intact and will remain so until the object is made volatile
4921 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
4922 * was reclaimed while it was in a volatile state and its previous contents
4923 * have been lost.
4924 */
4925 /*
4926 * The object must be locked.
4927 */
4928 kern_return_t
4929 vm_object_purgable_control(
4930 vm_object_t object,
4931 vm_purgable_t control,
4932 int *state)
4933 {
4934 int old_state;
4935 vm_page_t p;
4936
4937 if (object == VM_OBJECT_NULL) {
4938 /*
4939 * Object must already be present or it can't be purgable.
4940 */
4941 return KERN_INVALID_ARGUMENT;
4942 }
4943
4944 /*
4945 * Get current state of the purgable object.
4946 */
4947 switch (object->purgable) {
4948 case VM_OBJECT_NONPURGABLE:
4949 return KERN_INVALID_ARGUMENT;
4950
4951 case VM_OBJECT_PURGABLE_NONVOLATILE:
4952 old_state = VM_PURGABLE_NONVOLATILE;
4953 break;
4954
4955 case VM_OBJECT_PURGABLE_VOLATILE:
4956 old_state = VM_PURGABLE_VOLATILE;
4957 break;
4958
4959 case VM_OBJECT_PURGABLE_EMPTY:
4960 old_state = VM_PURGABLE_EMPTY;
4961 break;
4962
4963 default:
4964 old_state = VM_PURGABLE_NONVOLATILE;
4965 panic("Bad state (%d) for purgable object!\n",
4966 object->purgable);
4967 /*NOTREACHED*/
4968 }
4969
4970 /* purgable cant have delayed copies - now or in the future */
4971 assert(object->copy == VM_OBJECT_NULL);
4972 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
4973
4974 /*
4975 * Execute the desired operation.
4976 */
4977 if (control == VM_PURGABLE_GET_STATE) {
4978 *state = old_state;
4979 return KERN_SUCCESS;
4980 }
4981
4982 switch (*state) {
4983 case VM_PURGABLE_NONVOLATILE:
4984 vm_page_lock_queues();
4985 if (object->purgable != VM_OBJECT_PURGABLE_NONVOLATILE) {
4986 assert(vm_page_purgeable_count >=
4987 object->resident_page_count);
4988 vm_page_purgeable_count -= object->resident_page_count;
4989 }
4990
4991 object->purgable = VM_OBJECT_PURGABLE_NONVOLATILE;
4992
4993 /*
4994 * If the object wasn't emptied, then mark all pages of the
4995 * object as referenced in order to give them a complete turn
4996 * of the virtual memory "clock" before becoming candidates
4997 * for paging out (if the system is suffering from memory
4998 * pressure). We don't really need to set the pmap reference
4999 * bits (which would be expensive) since the software copies
5000 * are believed if they're set to true ...
5001 */
5002 if (old_state != VM_PURGABLE_EMPTY) {
5003 for (p = (vm_page_t)queue_first(&object->memq);
5004 !queue_end(&object->memq, (queue_entry_t)p);
5005 p = (vm_page_t)queue_next(&p->listq))
5006 p->reference = TRUE;
5007 }
5008
5009 vm_page_unlock_queues();
5010
5011 break;
5012
5013 case VM_PURGABLE_VOLATILE:
5014 vm_page_lock_queues();
5015
5016 if (object->purgable != VM_OBJECT_PURGABLE_VOLATILE &&
5017 object->purgable != VM_OBJECT_PURGABLE_EMPTY) {
5018 vm_page_purgeable_count += object->resident_page_count;
5019 }
5020
5021 object->purgable = VM_OBJECT_PURGABLE_VOLATILE;
5022
5023 /*
5024 * We want the newly volatile purgable object to be a
5025 * candidate for the pageout scan before other pages in the
5026 * application if the system is suffering from memory
5027 * pressure. To do this, we move a page of the object from
5028 * the active queue onto the inactive queue in order to
5029 * promote the object for early reclaim. We only need to move
5030 * a single page since the pageout scan will reap the entire
5031 * purgable object if it finds a single page in a volatile
5032 * state. Obviously we don't do this if there are no pages
5033 * associated with the object or we find a page of the object
5034 * already on the inactive queue.
5035 */
5036 for (p = (vm_page_t)queue_first(&object->memq);
5037 !queue_end(&object->memq, (queue_entry_t)p);
5038 p = (vm_page_t)queue_next(&p->listq)) {
5039 if (p->inactive) {
5040 /* already a page on the inactive queue */
5041 break;
5042 }
5043 if (p->active && !p->busy) {
5044 /* found one we can move */
5045 vm_page_deactivate(p);
5046 break;
5047 }
5048 }
5049 vm_page_unlock_queues();
5050
5051 break;
5052
5053
5054 case VM_PURGABLE_EMPTY:
5055 vm_page_lock_queues();
5056 if (object->purgable != VM_OBJECT_PURGABLE_VOLATILE &&
5057 object->purgable != VM_OBJECT_PURGABLE_EMPTY) {
5058 vm_page_purgeable_count += object->resident_page_count;
5059 }
5060 (void) vm_object_purge(object);
5061 vm_page_unlock_queues();
5062 break;
5063
5064 }
5065 *state = old_state;
5066
5067 return KERN_SUCCESS;
5068 }
5069
5070 #if TASK_SWAPPER
5071 /*
5072 * vm_object_res_deallocate
5073 *
5074 * (recursively) decrement residence counts on vm objects and their shadows.
5075 * Called from vm_object_deallocate and when swapping out an object.
5076 *
5077 * The object is locked, and remains locked throughout the function,
5078 * even as we iterate down the shadow chain. Locks on intermediate objects
5079 * will be dropped, but not the original object.
5080 *
5081 * NOTE: this function used to use recursion, rather than iteration.
5082 */
5083
5084 __private_extern__ void
5085 vm_object_res_deallocate(
5086 vm_object_t object)
5087 {
5088 vm_object_t orig_object = object;
5089 /*
5090 * Object is locked so it can be called directly
5091 * from vm_object_deallocate. Original object is never
5092 * unlocked.
5093 */
5094 assert(object->res_count > 0);
5095 while (--object->res_count == 0) {
5096 assert(object->ref_count >= object->res_count);
5097 vm_object_deactivate_all_pages(object);
5098 /* iterate on shadow, if present */
5099 if (object->shadow != VM_OBJECT_NULL) {
5100 vm_object_t tmp_object = object->shadow;
5101 vm_object_lock(tmp_object);
5102 if (object != orig_object)
5103 vm_object_unlock(object);
5104 object = tmp_object;
5105 assert(object->res_count > 0);
5106 } else
5107 break;
5108 }
5109 if (object != orig_object)
5110 vm_object_unlock(object);
5111 }
5112
5113 /*
5114 * vm_object_res_reference
5115 *
5116 * Internal function to increment residence count on a vm object
5117 * and its shadows. It is called only from vm_object_reference, and
5118 * when swapping in a vm object, via vm_map_swap.
5119 *
5120 * The object is locked, and remains locked throughout the function,
5121 * even as we iterate down the shadow chain. Locks on intermediate objects
5122 * will be dropped, but not the original object.
5123 *
5124 * NOTE: this function used to use recursion, rather than iteration.
5125 */
5126
5127 __private_extern__ void
5128 vm_object_res_reference(
5129 vm_object_t object)
5130 {
5131 vm_object_t orig_object = object;
5132 /*
5133 * Object is locked, so this can be called directly
5134 * from vm_object_reference. This lock is never released.
5135 */
5136 while ((++object->res_count == 1) &&
5137 (object->shadow != VM_OBJECT_NULL)) {
5138 vm_object_t tmp_object = object->shadow;
5139
5140 assert(object->ref_count >= object->res_count);
5141 vm_object_lock(tmp_object);
5142 if (object != orig_object)
5143 vm_object_unlock(object);
5144 object = tmp_object;
5145 }
5146 if (object != orig_object)
5147 vm_object_unlock(object);
5148 assert(orig_object->ref_count >= orig_object->res_count);
5149 }
5150 #endif /* TASK_SWAPPER */
5151
5152 /*
5153 * vm_object_reference:
5154 *
5155 * Gets another reference to the given object.
5156 */
5157 #ifdef vm_object_reference
5158 #undef vm_object_reference
5159 #endif
5160 __private_extern__ void
5161 vm_object_reference(
5162 register vm_object_t object)
5163 {
5164 if (object == VM_OBJECT_NULL)
5165 return;
5166
5167 vm_object_lock(object);
5168 assert(object->ref_count > 0);
5169 vm_object_reference_locked(object);
5170 vm_object_unlock(object);
5171 }
5172
5173 #ifdef MACH_BSD
5174 /*
5175 * Scale the vm_object_cache
5176 * This is required to make sure that the vm_object_cache is big
5177 * enough to effectively cache the mapped file.
5178 * This is really important with UBC as all the regular file vnodes
5179 * have memory object associated with them. Havving this cache too
5180 * small results in rapid reclaim of vnodes and hurts performance a LOT!
5181 *
5182 * This is also needed as number of vnodes can be dynamically scaled.
5183 */
5184 kern_return_t
5185 adjust_vm_object_cache(
5186 __unused vm_size_t oval,
5187 vm_size_t nval)
5188 {
5189 vm_object_cached_max = nval;
5190 vm_object_cache_trim(FALSE);
5191 return (KERN_SUCCESS);
5192 }
5193 #endif /* MACH_BSD */
5194
5195
5196 /*
5197 * vm_object_transpose
5198 *
5199 * This routine takes two VM objects of the same size and exchanges
5200 * their backing store.
5201 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
5202 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
5203 *
5204 * The VM objects must not be locked by caller.
5205 */
5206 kern_return_t
5207 vm_object_transpose(
5208 vm_object_t object1,
5209 vm_object_t object2,
5210 vm_object_size_t transpose_size)
5211 {
5212 vm_object_t tmp_object;
5213 kern_return_t retval;
5214 boolean_t object1_locked, object2_locked;
5215 boolean_t object1_paging, object2_paging;
5216 vm_page_t page;
5217 vm_object_offset_t page_offset;
5218
5219 tmp_object = VM_OBJECT_NULL;
5220 object1_locked = FALSE; object2_locked = FALSE;
5221 object1_paging = FALSE; object2_paging = FALSE;
5222
5223 if (object1 == object2 ||
5224 object1 == VM_OBJECT_NULL ||
5225 object2 == VM_OBJECT_NULL) {
5226 /*
5227 * If the 2 VM objects are the same, there's
5228 * no point in exchanging their backing store.
5229 */
5230 retval = KERN_INVALID_VALUE;
5231 goto done;
5232 }
5233
5234 vm_object_lock(object1);
5235 object1_locked = TRUE;
5236 if (object1->copy || object1->shadow || object1->shadowed ||
5237 object1->purgable != VM_OBJECT_NONPURGABLE) {
5238 /*
5239 * We don't deal with copy or shadow objects (yet).
5240 */
5241 retval = KERN_INVALID_VALUE;
5242 goto done;
5243 }
5244 /*
5245 * Since we're about to mess with the object's backing store,
5246 * mark it as "paging_in_progress". Note that this is not enough
5247 * to prevent any paging activity on this object, so the caller should
5248 * have "quiesced" the objects beforehand, via a UPL operation with
5249 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
5250 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
5251 */
5252 vm_object_paging_begin(object1);
5253 object1_paging = TRUE;
5254 vm_object_unlock(object1);
5255 object1_locked = FALSE;
5256
5257 /*
5258 * Same as above for the 2nd object...
5259 */
5260 vm_object_lock(object2);
5261 object2_locked = TRUE;
5262 if (object2->copy || object2->shadow || object2->shadowed ||
5263 object2->purgable != VM_OBJECT_NONPURGABLE) {
5264 retval = KERN_INVALID_VALUE;
5265 goto done;
5266 }
5267 vm_object_paging_begin(object2);
5268 object2_paging = TRUE;
5269 vm_object_unlock(object2);
5270 object2_locked = FALSE;
5271
5272 /*
5273 * Allocate a temporary VM object to hold object1's contents
5274 * while we copy object2 to object1.
5275 */
5276 tmp_object = vm_object_allocate(transpose_size);
5277 vm_object_lock(tmp_object);
5278 vm_object_paging_begin(tmp_object);
5279 tmp_object->can_persist = FALSE;
5280
5281 /*
5282 * Since we need to lock both objects at the same time,
5283 * make sure we always lock them in the same order to
5284 * avoid deadlocks.
5285 */
5286 if (object1 < object2) {
5287 vm_object_lock(object1);
5288 vm_object_lock(object2);
5289 } else {
5290 vm_object_lock(object2);
5291 vm_object_lock(object1);
5292 }
5293 object1_locked = TRUE;
5294 object2_locked = TRUE;
5295
5296 if (object1->size != object2->size ||
5297 object1->size != transpose_size) {
5298 /*
5299 * If the 2 objects don't have the same size, we can't
5300 * exchange their backing stores or one would overflow.
5301 * If their size doesn't match the caller's
5302 * "transpose_size", we can't do it either because the
5303 * transpose operation will affect the entire span of
5304 * the objects.
5305 */
5306 retval = KERN_INVALID_VALUE;
5307 goto done;
5308 }
5309
5310
5311 /*
5312 * Transpose the lists of resident pages.
5313 */
5314 if (object1->phys_contiguous || queue_empty(&object1->memq)) {
5315 /*
5316 * No pages in object1, just transfer pages
5317 * from object2 to object1. No need to go through
5318 * an intermediate object.
5319 */
5320 while (!queue_empty(&object2->memq)) {
5321 page = (vm_page_t) queue_first(&object2->memq);
5322 vm_page_rename(page, object1, page->offset);
5323 }
5324 assert(queue_empty(&object2->memq));
5325 } else if (object2->phys_contiguous || queue_empty(&object2->memq)) {
5326 /*
5327 * No pages in object2, just transfer pages
5328 * from object1 to object2. No need to go through
5329 * an intermediate object.
5330 */
5331 while (!queue_empty(&object1->memq)) {
5332 page = (vm_page_t) queue_first(&object1->memq);
5333 vm_page_rename(page, object2, page->offset);
5334 }
5335 assert(queue_empty(&object1->memq));
5336 } else {
5337 /* transfer object1's pages to tmp_object */
5338 vm_page_lock_queues();
5339 while (!queue_empty(&object1->memq)) {
5340 page = (vm_page_t) queue_first(&object1->memq);
5341 page_offset = page->offset;
5342 vm_page_remove(page);
5343 page->offset = page_offset;
5344 queue_enter(&tmp_object->memq, page, vm_page_t, listq);
5345 }
5346 vm_page_unlock_queues();
5347 assert(queue_empty(&object1->memq));
5348 /* transfer object2's pages to object1 */
5349 while (!queue_empty(&object2->memq)) {
5350 page = (vm_page_t) queue_first(&object2->memq);
5351 vm_page_rename(page, object1, page->offset);
5352 }
5353 assert(queue_empty(&object2->memq));
5354 /* transfer tmp_object's pages to object1 */
5355 while (!queue_empty(&tmp_object->memq)) {
5356 page = (vm_page_t) queue_first(&tmp_object->memq);
5357 queue_remove(&tmp_object->memq, page,
5358 vm_page_t, listq);
5359 vm_page_insert(page, object2, page->offset);
5360 }
5361 assert(queue_empty(&tmp_object->memq));
5362 }
5363
5364 /* no need to transpose the size: they should be identical */
5365 assert(object1->size == object2->size);
5366
5367 #define __TRANSPOSE_FIELD(field) \
5368 MACRO_BEGIN \
5369 tmp_object->field = object1->field; \
5370 object1->field = object2->field; \
5371 object2->field = tmp_object->field; \
5372 MACRO_END
5373
5374 assert(!object1->copy);
5375 assert(!object2->copy);
5376
5377 assert(!object1->shadow);
5378 assert(!object2->shadow);
5379
5380 __TRANSPOSE_FIELD(shadow_offset); /* used by phys_contiguous objects */
5381 __TRANSPOSE_FIELD(pager);
5382 __TRANSPOSE_FIELD(paging_offset);
5383
5384 __TRANSPOSE_FIELD(pager_control);
5385 /* update the memory_objects' pointers back to the VM objects */
5386 if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
5387 memory_object_control_collapse(object1->pager_control,
5388 object1);
5389 }
5390 if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
5391 memory_object_control_collapse(object2->pager_control,
5392 object2);
5393 }
5394
5395 __TRANSPOSE_FIELD(absent_count);
5396
5397 assert(object1->paging_in_progress);
5398 assert(object2->paging_in_progress);
5399
5400 __TRANSPOSE_FIELD(pager_created);
5401 __TRANSPOSE_FIELD(pager_initialized);
5402 __TRANSPOSE_FIELD(pager_ready);
5403 __TRANSPOSE_FIELD(pager_trusted);
5404 __TRANSPOSE_FIELD(internal);
5405 __TRANSPOSE_FIELD(temporary);
5406 __TRANSPOSE_FIELD(private);
5407 __TRANSPOSE_FIELD(pageout);
5408 __TRANSPOSE_FIELD(true_share);
5409 __TRANSPOSE_FIELD(phys_contiguous);
5410 __TRANSPOSE_FIELD(nophyscache);
5411 __TRANSPOSE_FIELD(last_alloc);
5412 __TRANSPOSE_FIELD(sequential);
5413 __TRANSPOSE_FIELD(cluster_size);
5414 __TRANSPOSE_FIELD(existence_map);
5415 __TRANSPOSE_FIELD(cow_hint);
5416 __TRANSPOSE_FIELD(wimg_bits);
5417
5418 #undef __TRANSPOSE_FIELD
5419
5420 retval = KERN_SUCCESS;
5421
5422 done:
5423 /*
5424 * Cleanup.
5425 */
5426 if (tmp_object != VM_OBJECT_NULL) {
5427 vm_object_paging_end(tmp_object);
5428 vm_object_unlock(tmp_object);
5429 /*
5430 * Re-initialize the temporary object to avoid
5431 * deallocating a real pager.
5432 */
5433 _vm_object_allocate(transpose_size, tmp_object);
5434 vm_object_deallocate(tmp_object);
5435 tmp_object = VM_OBJECT_NULL;
5436 }
5437
5438 if (object1_locked) {
5439 vm_object_unlock(object1);
5440 object1_locked = FALSE;
5441 }
5442 if (object2_locked) {
5443 vm_object_unlock(object2);
5444 object2_locked = FALSE;
5445 }
5446 if (object1_paging) {
5447 vm_object_lock(object1);
5448 vm_object_paging_end(object1);
5449 vm_object_unlock(object1);
5450 object1_paging = FALSE;
5451 }
5452 if (object2_paging) {
5453 vm_object_lock(object2);
5454 vm_object_paging_end(object2);
5455 vm_object_unlock(object2);
5456 object2_paging = FALSE;
5457 }
5458
5459 return retval;
5460 }