2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: vm/vm_object.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
59 * Virtual memory object module.
63 /* remove as part of compoenent support merge */
64 extern int vnode_pager_workaround
;
67 #include <mach_pagemap.h>
68 #include <task_swapper.h>
70 #include <mach/mach_types.h>
71 #include <mach/memory_object.h>
72 #include <mach/memory_object_default.h>
73 #include <mach/memory_object_control_server.h>
74 #include <mach/vm_param.h>
75 #include <ipc/ipc_port.h>
76 #include <kern/assert.h>
77 #include <kern/lock.h>
78 #include <kern/queue.h>
80 #include <kern/zalloc.h>
81 #include <kern/host.h>
82 #include <kern/host_statistics.h>
83 #include <kern/processor.h>
84 #include <vm/memory_object.h>
85 #include <vm/vm_fault.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <kern/misc_protos.h>
93 * Virtual memory objects maintain the actual data
94 * associated with allocated virtual memory. A given
95 * page of memory exists within exactly one object.
97 * An object is only deallocated when all "references"
100 * Associated with each object is a list of all resident
101 * memory pages belonging to that object; this list is
102 * maintained by the "vm_page" module, but locked by the object's
105 * Each object also records the memory object reference
106 * that is used by the kernel to request and write
107 * back data (the memory object, field "pager"), etc...
109 * Virtual memory objects are allocated to provide
110 * zero-filled memory (vm_allocate) or map a user-defined
111 * memory object into a virtual address space (vm_map).
113 * Virtual memory objects that refer to a user-defined
114 * memory object are called "permanent", because all changes
115 * made in virtual memory are reflected back to the
116 * memory manager, which may then store it permanently.
117 * Other virtual memory objects are called "temporary",
118 * meaning that changes need be written back only when
119 * necessary to reclaim pages, and that storage associated
120 * with the object can be discarded once it is no longer
123 * A permanent memory object may be mapped into more
124 * than one virtual address space. Moreover, two threads
125 * may attempt to make the first mapping of a memory
126 * object concurrently. Only one thread is allowed to
127 * complete this mapping; all others wait for the
128 * "pager_initialized" field is asserted, indicating
129 * that the first thread has initialized all of the
130 * necessary fields in the virtual memory object structure.
132 * The kernel relies on a *default memory manager* to
133 * provide backing storage for the zero-filled virtual
134 * memory objects. The pager memory objects associated
135 * with these temporary virtual memory objects are only
136 * requested from the default memory manager when it
137 * becomes necessary. Virtual memory objects
138 * that depend on the default memory manager are called
139 * "internal". The "pager_created" field is provided to
140 * indicate whether these ports have ever been allocated.
142 * The kernel may also create virtual memory objects to
143 * hold changed pages after a copy-on-write operation.
144 * In this case, the virtual memory object (and its
145 * backing storage -- its memory object) only contain
146 * those pages that have been changed. The "shadow"
147 * field refers to the virtual memory object that contains
148 * the remainder of the contents. The "shadow_offset"
149 * field indicates where in the "shadow" these contents begin.
150 * The "copy" field refers to a virtual memory object
151 * to which changed pages must be copied before changing
152 * this object, in order to implement another form
153 * of copy-on-write optimization.
155 * The virtual memory object structure also records
156 * the attributes associated with its memory object.
157 * The "pager_ready", "can_persist" and "copy_strategy"
158 * fields represent those attributes. The "cached_list"
159 * field is used in the implementation of the persistence
162 * ZZZ Continue this comment.
165 /* Forward declarations for internal functions. */
166 static void _vm_object_allocate(
167 vm_object_size_t size
,
170 static kern_return_t
vm_object_terminate(
173 extern void vm_object_remove(
176 static vm_object_t
vm_object_cache_trim(
177 boolean_t called_from_vm_object_deallocate
);
179 static void vm_object_deactivate_all_pages(
182 static void vm_object_abort_activity(
185 static kern_return_t
vm_object_copy_call(
186 vm_object_t src_object
,
187 vm_object_offset_t src_offset
,
188 vm_object_size_t size
,
189 vm_object_t
*_result_object
);
191 static void vm_object_do_collapse(
193 vm_object_t backing_object
);
195 static void vm_object_do_bypass(
197 vm_object_t backing_object
);
199 static void vm_object_release_pager(
200 memory_object_t pager
);
202 static zone_t vm_object_zone
; /* vm backing store zone */
205 * All wired-down kernel memory belongs to a single virtual
206 * memory object (kernel_object) to avoid wasting data structures.
208 static struct vm_object kernel_object_store
;
209 __private_extern__ vm_object_t kernel_object
= &kernel_object_store
;
212 * The submap object is used as a placeholder for vm_map_submap
213 * operations. The object is declared in vm_map.c because it
214 * is exported by the vm_map module. The storage is declared
215 * here because it must be initialized here.
217 static struct vm_object vm_submap_object_store
;
220 * Virtual memory objects are initialized from
221 * a template (see vm_object_allocate).
223 * When adding a new field to the virtual memory
224 * object structure, be sure to add initialization
225 * (see _vm_object_allocate()).
227 static struct vm_object vm_object_template
;
230 * Virtual memory objects that are not referenced by
231 * any address maps, but that are allowed to persist
232 * (an attribute specified by the associated memory manager),
233 * are kept in a queue (vm_object_cached_list).
235 * When an object from this queue is referenced again,
236 * for example to make another address space mapping,
237 * it must be removed from the queue. That is, the
238 * queue contains *only* objects with zero references.
240 * The kernel may choose to terminate objects from this
241 * queue in order to reclaim storage. The current policy
242 * is to permit a fixed maximum number of unreferenced
243 * objects (vm_object_cached_max).
245 * A spin lock (accessed by routines
246 * vm_object_cache_{lock,lock_try,unlock}) governs the
247 * object cache. It must be held when objects are
248 * added to or removed from the cache (in vm_object_terminate).
249 * The routines that acquire a reference to a virtual
250 * memory object based on one of the memory object ports
251 * must also lock the cache.
253 * Ideally, the object cache should be more isolated
254 * from the reference mechanism, so that the lock need
255 * not be held to make simple references.
257 static queue_head_t vm_object_cached_list
;
258 static int vm_object_cached_count
=0;
259 static int vm_object_cached_high
; /* highest # cached objects */
260 static int vm_object_cached_max
= 512; /* may be patched*/
262 static decl_mutex_data(,vm_object_cached_lock_data
)
264 #define vm_object_cache_lock() \
265 mutex_lock(&vm_object_cached_lock_data)
266 #define vm_object_cache_lock_try() \
267 mutex_try(&vm_object_cached_lock_data)
268 #define vm_object_cache_unlock() \
269 mutex_unlock(&vm_object_cached_lock_data)
271 #define VM_OBJECT_HASH_COUNT 1024
272 static queue_head_t vm_object_hashtable
[VM_OBJECT_HASH_COUNT
];
273 static struct zone
*vm_object_hash_zone
;
275 struct vm_object_hash_entry
{
276 queue_chain_t hash_link
; /* hash chain link */
277 memory_object_t pager
; /* pager we represent */
278 vm_object_t object
; /* corresponding object */
279 boolean_t waiting
; /* someone waiting for
283 typedef struct vm_object_hash_entry
*vm_object_hash_entry_t
;
284 #define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
286 #define VM_OBJECT_HASH_SHIFT 8
287 #define vm_object_hash(pager) \
288 ((((unsigned)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT)
291 * vm_object_hash_lookup looks up a pager in the hashtable
292 * and returns the corresponding entry, with optional removal.
295 static vm_object_hash_entry_t
296 vm_object_hash_lookup(
297 memory_object_t pager
,
298 boolean_t remove_entry
)
300 register queue_t bucket
;
301 register vm_object_hash_entry_t entry
;
303 bucket
= &vm_object_hashtable
[vm_object_hash(pager
)];
305 entry
= (vm_object_hash_entry_t
)queue_first(bucket
);
306 while (!queue_end(bucket
, (queue_entry_t
)entry
)) {
307 if (entry
->pager
== pager
&& !remove_entry
)
309 else if (entry
->pager
== pager
) {
310 queue_remove(bucket
, entry
,
311 vm_object_hash_entry_t
, hash_link
);
315 entry
= (vm_object_hash_entry_t
)queue_next(&entry
->hash_link
);
318 return(VM_OBJECT_HASH_ENTRY_NULL
);
322 * vm_object_hash_enter enters the specified
323 * pager / cache object association in the hashtable.
327 vm_object_hash_insert(
328 vm_object_hash_entry_t entry
)
330 register queue_t bucket
;
332 bucket
= &vm_object_hashtable
[vm_object_hash(entry
->pager
)];
334 queue_enter(bucket
, entry
, vm_object_hash_entry_t
, hash_link
);
337 static vm_object_hash_entry_t
338 vm_object_hash_entry_alloc(
339 memory_object_t pager
)
341 vm_object_hash_entry_t entry
;
343 entry
= (vm_object_hash_entry_t
)zalloc(vm_object_hash_zone
);
344 entry
->pager
= pager
;
345 entry
->object
= VM_OBJECT_NULL
;
346 entry
->waiting
= FALSE
;
352 vm_object_hash_entry_free(
353 vm_object_hash_entry_t entry
)
355 zfree(vm_object_hash_zone
, (vm_offset_t
)entry
);
359 * vm_object_allocate:
361 * Returns a new object with the given size.
366 vm_object_size_t size
,
370 "vm_object_allocate, object 0x%X size 0x%X\n",
371 (integer_t
)object
, size
, 0,0,0);
373 *object
= vm_object_template
;
374 queue_init(&object
->memq
);
375 queue_init(&object
->msr_q
);
377 queue_init(&object
->uplq
);
378 #endif /* UBC_DEBUG */
379 vm_object_lock_init(object
);
383 __private_extern__ vm_object_t
385 vm_object_size_t size
)
387 register vm_object_t object
;
389 object
= (vm_object_t
) zalloc(vm_object_zone
);
391 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
393 if (object
!= VM_OBJECT_NULL
)
394 _vm_object_allocate(size
, object
);
400 * vm_object_bootstrap:
402 * Initialize the VM objects module.
404 __private_extern__
void
405 vm_object_bootstrap(void)
409 vm_object_zone
= zinit((vm_size_t
) sizeof(struct vm_object
),
410 round_page_32(512*1024),
411 round_page_32(12*1024),
414 queue_init(&vm_object_cached_list
);
415 mutex_init(&vm_object_cached_lock_data
, ETAP_VM_OBJ_CACHE
);
417 vm_object_hash_zone
=
418 zinit((vm_size_t
) sizeof (struct vm_object_hash_entry
),
419 round_page_32(512*1024),
420 round_page_32(12*1024),
421 "vm object hash entries");
423 for (i
= 0; i
< VM_OBJECT_HASH_COUNT
; i
++)
424 queue_init(&vm_object_hashtable
[i
]);
427 * Fill in a template object, for quick initialization
430 /* memq; Lock; init after allocation */
431 vm_object_template
.size
= 0;
432 vm_object_template
.frozen_size
= 0;
433 vm_object_template
.ref_count
= 1;
435 vm_object_template
.res_count
= 1;
436 #endif /* TASK_SWAPPER */
437 vm_object_template
.resident_page_count
= 0;
438 vm_object_template
.copy
= VM_OBJECT_NULL
;
439 vm_object_template
.shadow
= VM_OBJECT_NULL
;
440 vm_object_template
.shadow_offset
= (vm_object_offset_t
) 0;
441 vm_object_template
.cow_hint
= ~(vm_offset_t
)0;
442 vm_object_template
.true_share
= FALSE
;
444 vm_object_template
.pager
= MEMORY_OBJECT_NULL
;
445 vm_object_template
.paging_offset
= 0;
446 vm_object_template
.pager_request
= PAGER_REQUEST_NULL
;
447 /* msr_q; init after allocation */
449 vm_object_template
.copy_strategy
= MEMORY_OBJECT_COPY_SYMMETRIC
;
450 vm_object_template
.absent_count
= 0;
451 vm_object_template
.paging_in_progress
= 0;
453 /* Begin bitfields */
454 vm_object_template
.all_wanted
= 0; /* all bits FALSE */
455 vm_object_template
.pager_created
= FALSE
;
456 vm_object_template
.pager_initialized
= FALSE
;
457 vm_object_template
.pager_ready
= FALSE
;
458 vm_object_template
.pager_trusted
= FALSE
;
459 vm_object_template
.can_persist
= FALSE
;
460 vm_object_template
.internal
= TRUE
;
461 vm_object_template
.temporary
= TRUE
;
462 vm_object_template
.private = FALSE
;
463 vm_object_template
.pageout
= FALSE
;
464 vm_object_template
.alive
= TRUE
;
465 vm_object_template
.lock_in_progress
= FALSE
;
466 vm_object_template
.lock_restart
= FALSE
;
467 vm_object_template
.silent_overwrite
= FALSE
;
468 vm_object_template
.advisory_pageout
= FALSE
;
469 vm_object_template
.shadowed
= FALSE
;
470 vm_object_template
.terminating
= FALSE
;
471 vm_object_template
.shadow_severed
= FALSE
;
472 vm_object_template
.phys_contiguous
= FALSE
;
473 vm_object_template
.nophyscache
= FALSE
;
476 /* cache bitfields */
477 vm_object_template
.wimg_bits
= VM_WIMG_DEFAULT
;
479 /* cached_list; init after allocation */
480 vm_object_template
.last_alloc
= (vm_object_offset_t
) 0;
481 vm_object_template
.cluster_size
= 0;
483 vm_object_template
.existence_map
= VM_EXTERNAL_NULL
;
484 #endif /* MACH_PAGEMAP */
486 vm_object_template
.paging_object
= VM_OBJECT_NULL
;
487 #endif /* MACH_ASSERT */
490 * Initialize the "kernel object"
493 kernel_object
= &kernel_object_store
;
496 * Note that in the following size specifications, we need to add 1 because
497 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
501 _vm_object_allocate((vm_last_addr
- VM_MIN_KERNEL_ADDRESS
) + 1,
504 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
) + 1,
507 kernel_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
510 * Initialize the "submap object". Make it as large as the
511 * kernel object so that no limit is imposed on submap sizes.
514 vm_submap_object
= &vm_submap_object_store
;
516 _vm_object_allocate((vm_last_addr
- VM_MIN_KERNEL_ADDRESS
) + 1,
519 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
) + 1,
522 vm_submap_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
525 * Create an "extra" reference to this object so that we never
526 * try to deallocate it; zfree doesn't like to be called with
529 vm_object_reference(vm_submap_object
);
532 vm_external_module_initialize();
533 #endif /* MACH_PAGEMAP */
536 __private_extern__
void
540 * Finish initializing the kernel object.
544 /* remove the typedef below when emergency work-around is taken out */
545 typedef struct vnode_pager
{
546 memory_object_t pager
;
547 memory_object_t pager_handle
; /* pager */
548 memory_object_control_t control_handle
; /* memory object's control handle */
549 void *vnode_handle
; /* vnode handle */
552 #define MIGHT_NOT_CACHE_SHADOWS 1
553 #if MIGHT_NOT_CACHE_SHADOWS
554 static int cache_shadows
= TRUE
;
555 #endif /* MIGHT_NOT_CACHE_SHADOWS */
558 * vm_object_deallocate:
560 * Release a reference to the specified object,
561 * gained either through a vm_object_allocate
562 * or a vm_object_reference call. When all references
563 * are gone, storage associated with this object
564 * may be relinquished.
566 * No object may be locked.
568 __private_extern__
void
569 vm_object_deallocate(
570 register vm_object_t object
)
572 boolean_t retry_cache_trim
= FALSE
;
575 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
576 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
579 while (object
!= VM_OBJECT_NULL
) {
582 * The cache holds a reference (uncounted) to
583 * the object; we must lock it before removing
587 vm_object_cache_lock();
590 * if we try to take a regular lock here
591 * we risk deadlocking against someone
592 * holding a lock on this object while
593 * trying to vm_object_deallocate a different
596 if (vm_object_lock_try(object
))
598 vm_object_cache_unlock();
599 mutex_pause(); /* wait a bit */
601 assert(object
->ref_count
> 0);
604 * If the object has a named reference, and only
605 * that reference would remain, inform the pager
606 * about the last "mapping" reference going away.
608 if ((object
->ref_count
== 2) && (object
->named
)) {
609 memory_object_t pager
= object
->pager
;
611 /* Notify the Pager that there are no */
612 /* more mappers for this object */
614 if (pager
!= MEMORY_OBJECT_NULL
) {
615 vm_object_unlock(object
);
616 vm_object_cache_unlock();
618 memory_object_unmap(pager
);
621 vm_object_cache_lock();
624 * if we try to take a regular lock here
625 * we risk deadlocking against someone
626 * holding a lock on this object while
627 * trying to vm_object_deallocate a different
630 if (vm_object_lock_try(object
))
632 vm_object_cache_unlock();
633 mutex_pause(); /* wait a bit */
635 assert(object
->ref_count
> 0);
640 * Lose the reference. If other references
641 * remain, then we are done, unless we need
642 * to retry a cache trim.
643 * If it is the last reference, then keep it
644 * until any pending initialization is completed.
647 /* if the object is terminating, it cannot go into */
648 /* the cache and we obviously should not call */
649 /* terminate again. */
651 if ((object
->ref_count
> 1) || object
->terminating
) {
653 vm_object_res_deallocate(object
);
654 vm_object_unlock(object
);
655 vm_object_cache_unlock();
656 if (retry_cache_trim
&&
657 ((object
= vm_object_cache_trim(TRUE
)) !=
665 * We have to wait for initialization
666 * before destroying or caching the object.
669 if (object
->pager_created
&& ! object
->pager_initialized
) {
670 assert(! object
->can_persist
);
671 vm_object_assert_wait(object
,
672 VM_OBJECT_EVENT_INITIALIZED
,
674 vm_object_unlock(object
);
675 vm_object_cache_unlock();
676 thread_block(THREAD_CONTINUE_NULL
);
681 * If this object can persist, then enter it in
682 * the cache. Otherwise, terminate it.
684 * NOTE: Only permanent objects are cached, and
685 * permanent objects cannot have shadows. This
686 * affects the residence counting logic in a minor
687 * way (can do it in-line, mostly).
690 if ((object
->can_persist
) && (object
->alive
)) {
692 * Now it is safe to decrement reference count,
693 * and to return if reference count is > 0.
695 if (--object
->ref_count
> 0) {
696 vm_object_res_deallocate(object
);
697 vm_object_unlock(object
);
698 vm_object_cache_unlock();
699 if (retry_cache_trim
&&
700 ((object
= vm_object_cache_trim(TRUE
)) !=
707 #if MIGHT_NOT_CACHE_SHADOWS
709 * Remove shadow now if we don't
710 * want to cache shadows.
712 if (! cache_shadows
) {
713 shadow
= object
->shadow
;
714 object
->shadow
= VM_OBJECT_NULL
;
716 #endif /* MIGHT_NOT_CACHE_SHADOWS */
719 * Enter the object onto the queue of
720 * cached objects, and deactivate
723 assert(object
->shadow
== VM_OBJECT_NULL
);
724 VM_OBJ_RES_DECR(object
);
726 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
728 (integer_t
)vm_object_cached_list
.next
,
729 (integer_t
)vm_object_cached_list
.prev
,0,0);
731 vm_object_cached_count
++;
732 if (vm_object_cached_count
> vm_object_cached_high
)
733 vm_object_cached_high
= vm_object_cached_count
;
734 queue_enter(&vm_object_cached_list
, object
,
735 vm_object_t
, cached_list
);
736 vm_object_cache_unlock();
737 vm_object_deactivate_all_pages(object
);
738 vm_object_unlock(object
);
740 #if MIGHT_NOT_CACHE_SHADOWS
742 * If we have a shadow that we need
743 * to deallocate, do so now, remembering
744 * to trim the cache later.
746 if (! cache_shadows
&& shadow
!= VM_OBJECT_NULL
) {
748 retry_cache_trim
= TRUE
;
751 #endif /* MIGHT_NOT_CACHE_SHADOWS */
754 * Trim the cache. If the cache trim
755 * returns with a shadow for us to deallocate,
756 * then remember to retry the cache trim
757 * when we are done deallocating the shadow.
758 * Otherwise, we are done.
761 object
= vm_object_cache_trim(TRUE
);
762 if (object
== VM_OBJECT_NULL
) {
765 retry_cache_trim
= TRUE
;
769 * This object is not cachable; terminate it.
772 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%lX ref %d\n",
773 (integer_t
)object
, object
->resident_page_count
,
774 object
->paging_in_progress
,
775 (natural_t
)current_thread(),object
->ref_count
);
777 VM_OBJ_RES_DECR(object
); /* XXX ? */
779 * Terminate this object. If it had a shadow,
780 * then deallocate it; otherwise, if we need
781 * to retry a cache trim, do so now; otherwise,
782 * we are done. "pageout" objects have a shadow,
783 * but maintain a "paging reference" rather than
784 * a normal reference.
786 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
787 if(vm_object_terminate(object
) != KERN_SUCCESS
) {
790 if (shadow
!= VM_OBJECT_NULL
) {
794 if (retry_cache_trim
&&
795 ((object
= vm_object_cache_trim(TRUE
)) !=
802 assert(! retry_cache_trim
);
806 * Check to see whether we really need to trim
807 * down the cache. If so, remove an object from
808 * the cache, terminate it, and repeat.
810 * Called with, and returns with, cache lock unlocked.
813 vm_object_cache_trim(
814 boolean_t called_from_vm_object_deallocate
)
816 register vm_object_t object
= VM_OBJECT_NULL
;
822 * If we no longer need to trim the cache,
826 vm_object_cache_lock();
827 if (vm_object_cached_count
<= vm_object_cached_max
) {
828 vm_object_cache_unlock();
829 return VM_OBJECT_NULL
;
833 * We must trim down the cache, so remove
834 * the first object in the cache.
837 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
838 (integer_t
)vm_object_cached_list
.next
,
839 (integer_t
)vm_object_cached_list
.prev
, 0, 0, 0);
841 object
= (vm_object_t
) queue_first(&vm_object_cached_list
);
842 if(object
== (vm_object_t
) &vm_object_cached_list
) {
843 /* something's wrong with the calling parameter or */
844 /* the value of vm_object_cached_count, just fix */
846 if(vm_object_cached_max
< 0)
847 vm_object_cached_max
= 0;
848 vm_object_cached_count
= 0;
849 vm_object_cache_unlock();
850 return VM_OBJECT_NULL
;
852 vm_object_lock(object
);
853 queue_remove(&vm_object_cached_list
, object
, vm_object_t
,
855 vm_object_cached_count
--;
858 * Since this object is in the cache, we know
859 * that it is initialized and has no references.
860 * Take a reference to avoid recursive deallocations.
863 assert(object
->pager_initialized
);
864 assert(object
->ref_count
== 0);
868 * Terminate the object.
869 * If the object had a shadow, we let vm_object_deallocate
870 * deallocate it. "pageout" objects have a shadow, but
871 * maintain a "paging reference" rather than a normal
873 * (We are careful here to limit recursion.)
875 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
876 if(vm_object_terminate(object
) != KERN_SUCCESS
)
878 if (shadow
!= VM_OBJECT_NULL
) {
879 if (called_from_vm_object_deallocate
) {
882 vm_object_deallocate(shadow
);
888 boolean_t vm_object_terminate_remove_all
= FALSE
;
891 * Routine: vm_object_terminate
893 * Free all resources associated with a vm_object.
895 * Upon entry, the object must be locked,
896 * and the object must have exactly one reference.
898 * The shadow object reference is left alone.
900 * The object must be unlocked if its found that pages
901 * must be flushed to a backing object. If someone
902 * manages to map the object while it is being flushed
903 * the object is returned unlocked and unchanged. Otherwise,
904 * upon exit, the cache will be unlocked, and the
905 * object will cease to exist.
909 register vm_object_t object
)
911 memory_object_t pager
;
912 register vm_page_t p
;
913 vm_object_t shadow_object
;
915 XPR(XPR_VM_OBJECT
, "vm_object_terminate, object 0x%X ref %d\n",
916 (integer_t
)object
, object
->ref_count
, 0, 0, 0);
918 if (!object
->pageout
&& (!object
->temporary
|| object
->can_persist
)
919 && (object
->pager
!= NULL
|| object
->shadow_severed
)) {
920 vm_object_cache_unlock();
921 while (!queue_empty(&object
->memq
)) {
923 * Clear pager_trusted bit so that the pages get yanked
924 * out of the object instead of cleaned in place. This
925 * prevents a deadlock in XMM and makes more sense anyway.
927 object
->pager_trusted
= FALSE
;
929 p
= (vm_page_t
) queue_first(&object
->memq
);
933 if (p
->busy
|| p
->cleaning
) {
934 if(p
->cleaning
|| p
->absent
) {
935 vm_object_paging_wait(object
, THREAD_UNINT
);
938 panic("vm_object_terminate.3 0x%x 0x%x", object
, p
);
942 vm_page_lock_queues();
944 VM_PAGE_QUEUES_REMOVE(p
);
945 vm_page_unlock_queues();
947 if (p
->absent
|| p
->private) {
950 * For private pages, VM_PAGE_FREE just
951 * leaves the page structure around for
952 * its owner to clean up. For absent
953 * pages, the structure is returned to
954 * the appropriate pool.
961 panic("vm_object_terminate.4 0x%x 0x%x", object
, p
);
964 p
->dirty
= pmap_is_modified(p
->phys_page
);
966 if ((p
->dirty
|| p
->precious
) && !p
->error
&& object
->alive
) {
967 vm_pageout_cluster(p
); /* flush page */
968 vm_object_paging_wait(object
, THREAD_UNINT
);
970 "vm_object_terminate restart, object 0x%X ref %d\n",
971 (integer_t
)object
, object
->ref_count
, 0, 0, 0);
977 vm_object_unlock(object
);
978 vm_object_cache_lock();
979 vm_object_lock(object
);
983 * Make sure the object isn't already being terminated
985 if(object
->terminating
) {
986 object
->ref_count
-= 1;
987 assert(object
->ref_count
> 0);
988 vm_object_cache_unlock();
989 vm_object_unlock(object
);
994 * Did somebody get a reference to the object while we were
997 if(object
->ref_count
!= 1) {
998 object
->ref_count
-= 1;
999 assert(object
->ref_count
> 0);
1000 vm_object_res_deallocate(object
);
1001 vm_object_cache_unlock();
1002 vm_object_unlock(object
);
1003 return KERN_FAILURE
;
1007 * Make sure no one can look us up now.
1010 object
->terminating
= TRUE
;
1011 object
->alive
= FALSE
;
1012 vm_object_remove(object
);
1015 * Detach the object from its shadow if we are the shadow's
1016 * copy. The reference we hold on the shadow must be dropped
1019 if (((shadow_object
= object
->shadow
) != VM_OBJECT_NULL
) &&
1020 !(object
->pageout
)) {
1021 vm_object_lock(shadow_object
);
1022 if (shadow_object
->copy
== object
)
1023 shadow_object
->copy
= VM_OBJECT_NULL
;
1024 vm_object_unlock(shadow_object
);
1028 * The pageout daemon might be playing with our pages.
1029 * Now that the object is dead, it won't touch any more
1030 * pages, but some pages might already be on their way out.
1031 * Hence, we wait until the active paging activities have ceased
1032 * before we break the association with the pager itself.
1034 while (object
->paging_in_progress
!= 0) {
1035 vm_object_cache_unlock();
1036 vm_object_wait(object
,
1037 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
1039 vm_object_cache_lock();
1040 vm_object_lock(object
);
1043 pager
= object
->pager
;
1044 object
->pager
= MEMORY_OBJECT_NULL
;
1046 if (pager
!= MEMORY_OBJECT_NULL
)
1047 memory_object_control_disable(object
->pager_request
);
1048 vm_object_cache_unlock();
1050 object
->ref_count
--;
1052 assert(object
->res_count
== 0);
1053 #endif /* TASK_SWAPPER */
1055 assert (object
->ref_count
== 0);
1058 * Clean or free the pages, as appropriate.
1059 * It is possible for us to find busy/absent pages,
1060 * if some faults on this object were aborted.
1062 if (object
->pageout
) {
1063 assert(shadow_object
!= VM_OBJECT_NULL
);
1064 assert(shadow_object
== object
->shadow
);
1066 vm_pageout_object_terminate(object
);
1068 } else if ((object
->temporary
&& !object
->can_persist
) ||
1069 (pager
== MEMORY_OBJECT_NULL
)) {
1070 while (!queue_empty(&object
->memq
)) {
1071 p
= (vm_page_t
) queue_first(&object
->memq
);
1076 } else if (!queue_empty(&object
->memq
)) {
1077 panic("vm_object_terminate: queue just emptied isn't");
1080 assert(object
->paging_in_progress
== 0);
1081 assert(object
->ref_count
== 0);
1084 * If the pager has not already been released by
1085 * vm_object_destroy, we need to terminate it and
1086 * release our reference to it here.
1088 if (pager
!= MEMORY_OBJECT_NULL
) {
1089 vm_object_unlock(object
);
1090 vm_object_release_pager(pager
);
1091 vm_object_lock(object
);
1094 /* kick off anyone waiting on terminating */
1095 object
->terminating
= FALSE
;
1096 vm_object_paging_begin(object
);
1097 vm_object_paging_end(object
);
1098 vm_object_unlock(object
);
1101 vm_external_destroy(object
->existence_map
, object
->size
);
1102 #endif /* MACH_PAGEMAP */
1105 * Free the space for the object.
1107 zfree(vm_object_zone
, (vm_offset_t
) object
);
1108 return KERN_SUCCESS
;
1112 * Routine: vm_object_pager_wakeup
1113 * Purpose: Wake up anyone waiting for termination of a pager.
1117 vm_object_pager_wakeup(
1118 memory_object_t pager
)
1120 vm_object_hash_entry_t entry
;
1121 boolean_t waiting
= FALSE
;
1124 * If anyone was waiting for the memory_object_terminate
1125 * to be queued, wake them up now.
1127 vm_object_cache_lock();
1128 entry
= vm_object_hash_lookup(pager
, TRUE
);
1129 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
1130 waiting
= entry
->waiting
;
1131 vm_object_cache_unlock();
1132 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
) {
1134 thread_wakeup((event_t
) pager
);
1135 vm_object_hash_entry_free(entry
);
1140 * Routine: vm_object_release_pager
1141 * Purpose: Terminate the pager and, upon completion,
1142 * release our last reference to it.
1143 * just like memory_object_terminate, except
1144 * that we wake up anyone blocked in vm_object_enter
1145 * waiting for termination message to be queued
1146 * before calling memory_object_init.
1149 vm_object_release_pager(
1150 memory_object_t pager
)
1154 * Terminate the pager.
1157 (void) memory_object_terminate(pager
);
1160 * Wakeup anyone waiting for this terminate
1162 vm_object_pager_wakeup(pager
);
1165 * Release reference to pager.
1167 memory_object_deallocate(pager
);
1171 * Routine: vm_object_abort_activity [internal use only]
1173 * Abort paging requests pending on this object.
1174 * In/out conditions:
1175 * The object is locked on entry and exit.
1178 vm_object_abort_activity(
1185 XPR(XPR_VM_OBJECT
, "vm_object_abort_activity, object 0x%X\n",
1186 (integer_t
)object
, 0, 0, 0, 0);
1189 * Abort all activity that would be waiting
1190 * for a result on this memory object.
1192 * We could also choose to destroy all pages
1193 * that we have in memory for this object, but
1197 p
= (vm_page_t
) queue_first(&object
->memq
);
1198 while (!queue_end(&object
->memq
, (queue_entry_t
) p
)) {
1199 next
= (vm_page_t
) queue_next(&p
->listq
);
1202 * If it's being paged in, destroy it.
1203 * If an unlock has been requested, start it again.
1206 if (p
->busy
&& p
->absent
) {
1210 if (p
->unlock_request
!= VM_PROT_NONE
)
1211 p
->unlock_request
= VM_PROT_NONE
;
1219 * Wake up threads waiting for the memory object to
1223 object
->pager_ready
= TRUE
;
1224 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
1228 * Routine: vm_object_destroy
1230 * Shut down a VM object, despite the
1231 * presence of address map (or other) references
1237 kern_return_t reason
)
1239 memory_object_t old_pager
;
1241 if (object
== VM_OBJECT_NULL
)
1242 return(KERN_SUCCESS
);
1245 * Remove the pager association immediately.
1247 * This will prevent the memory manager from further
1248 * meddling. [If it wanted to flush data or make
1249 * other changes, it should have done so before performing
1250 * the destroy call.]
1253 vm_object_cache_lock();
1254 vm_object_lock(object
);
1255 object
->can_persist
= FALSE
;
1256 object
->named
= FALSE
;
1257 object
->alive
= FALSE
;
1260 * Rip out the pager from the vm_object now...
1263 vm_object_remove(object
);
1264 old_pager
= object
->pager
;
1265 object
->pager
= MEMORY_OBJECT_NULL
;
1266 if (old_pager
!= MEMORY_OBJECT_NULL
)
1267 memory_object_control_disable(object
->pager_request
);
1268 vm_object_cache_unlock();
1271 * Wait for the existing paging activity (that got
1272 * through before we nulled out the pager) to subside.
1275 vm_object_paging_wait(object
, THREAD_UNINT
);
1276 vm_object_unlock(object
);
1279 * Terminate the object now.
1281 if (old_pager
!= MEMORY_OBJECT_NULL
) {
1282 vm_object_release_pager(old_pager
);
1285 * JMM - Release the caller's reference. This assumes the
1286 * caller had a reference to release, which is a big (but
1287 * currently valid) assumption if this is driven from the
1288 * vnode pager (it is holding a named reference when making
1291 vm_object_deallocate(object
);
1294 return(KERN_SUCCESS
);
1298 * vm_object_deactivate_pages
1300 * Deactivate all pages in the specified object. (Keep its pages
1301 * in memory even though it is no longer referenced.)
1303 * The object must be locked.
1306 vm_object_deactivate_all_pages(
1307 register vm_object_t object
)
1309 register vm_page_t p
;
1311 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
1312 vm_page_lock_queues();
1314 vm_page_deactivate(p
);
1315 vm_page_unlock_queues();
1319 __private_extern__
void
1320 vm_object_deactivate_pages(
1322 vm_object_offset_t offset
,
1323 vm_object_size_t size
,
1324 boolean_t kill_page
)
1326 vm_object_t orig_object
;
1327 int pages_moved
= 0;
1328 int pages_found
= 0;
1331 * entered with object lock held, acquire a paging reference to
1332 * prevent the memory_object and control ports from
1335 orig_object
= object
;
1338 register vm_page_t m
;
1339 vm_object_offset_t toffset
;
1340 vm_object_size_t tsize
;
1342 vm_object_paging_begin(object
);
1343 vm_page_lock_queues();
1345 for (tsize
= size
, toffset
= offset
; tsize
; tsize
-= PAGE_SIZE
, toffset
+= PAGE_SIZE
) {
1347 if ((m
= vm_page_lookup(object
, toffset
)) != VM_PAGE_NULL
) {
1351 if ((m
->wire_count
== 0) && (!m
->private) && (!m
->gobbled
) && (!m
->busy
)) {
1353 m
->reference
= FALSE
;
1354 pmap_clear_reference(m
->phys_page
);
1356 if ((kill_page
) && (object
->internal
)) {
1357 m
->precious
= FALSE
;
1359 pmap_clear_modify(m
->phys_page
);
1360 vm_external_state_clr(object
->existence_map
, offset
);
1362 VM_PAGE_QUEUES_REMOVE(m
);
1367 m
, vm_page_t
, pageq
);
1370 &vm_page_queue_inactive
,
1371 m
, vm_page_t
, pageq
);
1376 vm_page_inactive_count
++;
1382 vm_page_unlock_queues();
1383 vm_object_paging_end(object
);
1385 if (object
->shadow
) {
1386 vm_object_t tmp_object
;
1390 offset
+= object
->shadow_offset
;
1392 tmp_object
= object
->shadow
;
1393 vm_object_lock(tmp_object
);
1395 if (object
!= orig_object
)
1396 vm_object_unlock(object
);
1397 object
= tmp_object
;
1401 if (object
!= orig_object
)
1402 vm_object_unlock(object
);
1406 * Routine: vm_object_pmap_protect
1409 * Reduces the permission for all physical
1410 * pages in the specified object range.
1412 * If removing write permission only, it is
1413 * sufficient to protect only the pages in
1414 * the top-level object; only those pages may
1415 * have write permission.
1417 * If removing all access, we must follow the
1418 * shadow chain from the top-level object to
1419 * remove access to all pages in shadowed objects.
1421 * The object must *not* be locked. The object must
1422 * be temporary/internal.
1424 * If pmap is not NULL, this routine assumes that
1425 * the only mappings for the pages are in that
1429 __private_extern__
void
1430 vm_object_pmap_protect(
1431 register vm_object_t object
,
1432 register vm_object_offset_t offset
,
1435 vm_offset_t pmap_start
,
1438 if (object
== VM_OBJECT_NULL
)
1440 size
= round_page_64(size
);
1441 offset
= trunc_page_64(offset
);
1443 vm_object_lock(object
);
1445 assert(object
->internal
);
1448 if (object
->resident_page_count
> atop_32(size
) / 2 &&
1449 pmap
!= PMAP_NULL
) {
1450 vm_object_unlock(object
);
1451 pmap_protect(pmap
, pmap_start
, pmap_start
+ size
, prot
);
1455 /* if we are doing large ranges with respect to resident */
1456 /* page count then we should interate over pages otherwise */
1457 /* inverse page look-up will be faster */
1458 if ((object
->resident_page_count
/ 4) < atop_32(size
)) {
1460 vm_object_offset_t end
;
1462 end
= offset
+ size
;
1464 if (pmap
!= PMAP_NULL
) {
1465 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
1466 if (!p
->fictitious
&&
1467 (offset
<= p
->offset
) && (p
->offset
< end
)) {
1469 vm_offset_t start
= pmap_start
+
1470 (vm_offset_t
)(p
->offset
- offset
);
1472 pmap_protect(pmap
, start
, start
+ PAGE_SIZE
, prot
);
1476 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
1477 if (!p
->fictitious
&&
1478 (offset
<= p
->offset
) && (p
->offset
< end
)) {
1480 pmap_page_protect(p
->phys_page
,
1481 prot
& ~p
->page_lock
);
1487 vm_object_offset_t end
;
1488 vm_object_offset_t target_off
;
1490 end
= offset
+ size
;
1492 if (pmap
!= PMAP_NULL
) {
1493 for(target_off
= offset
;
1494 target_off
< end
; target_off
+= PAGE_SIZE
) {
1495 if(p
= vm_page_lookup(object
, target_off
)) {
1496 vm_offset_t start
= pmap_start
+
1497 (vm_offset_t
)(p
->offset
- offset
);
1498 pmap_protect(pmap
, start
,
1499 start
+ PAGE_SIZE
, prot
);
1503 for(target_off
= offset
;
1504 target_off
< end
; target_off
+= PAGE_SIZE
) {
1505 if(p
= vm_page_lookup(object
, target_off
)) {
1506 pmap_page_protect(p
->phys_page
,
1507 prot
& ~p
->page_lock
);
1513 if (prot
== VM_PROT_NONE
) {
1515 * Must follow shadow chain to remove access
1516 * to pages in shadowed objects.
1518 register vm_object_t next_object
;
1520 next_object
= object
->shadow
;
1521 if (next_object
!= VM_OBJECT_NULL
) {
1522 offset
+= object
->shadow_offset
;
1523 vm_object_lock(next_object
);
1524 vm_object_unlock(object
);
1525 object
= next_object
;
1529 * End of chain - we are done.
1536 * Pages in shadowed objects may never have
1537 * write permission - we may stop here.
1543 vm_object_unlock(object
);
1547 * Routine: vm_object_copy_slowly
1550 * Copy the specified range of the source
1551 * virtual memory object without using
1552 * protection-based optimizations (such
1553 * as copy-on-write). The pages in the
1554 * region are actually copied.
1556 * In/out conditions:
1557 * The caller must hold a reference and a lock
1558 * for the source virtual memory object. The source
1559 * object will be returned *unlocked*.
1562 * If the copy is completed successfully, KERN_SUCCESS is
1563 * returned. If the caller asserted the interruptible
1564 * argument, and an interruption occurred while waiting
1565 * for a user-generated event, MACH_SEND_INTERRUPTED is
1566 * returned. Other values may be returned to indicate
1567 * hard errors during the copy operation.
1569 * A new virtual memory object is returned in a
1570 * parameter (_result_object). The contents of this
1571 * new object, starting at a zero offset, are a copy
1572 * of the source memory region. In the event of
1573 * an error, this parameter will contain the value
1576 __private_extern__ kern_return_t
1577 vm_object_copy_slowly(
1578 register vm_object_t src_object
,
1579 vm_object_offset_t src_offset
,
1580 vm_object_size_t size
,
1581 boolean_t interruptible
,
1582 vm_object_t
*_result_object
) /* OUT */
1584 vm_object_t new_object
;
1585 vm_object_offset_t new_offset
;
1587 vm_object_offset_t src_lo_offset
= src_offset
;
1588 vm_object_offset_t src_hi_offset
= src_offset
+ size
;
1590 XPR(XPR_VM_OBJECT
, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
1591 src_object
, src_offset
, size
, 0, 0);
1594 vm_object_unlock(src_object
);
1595 *_result_object
= VM_OBJECT_NULL
;
1596 return(KERN_INVALID_ARGUMENT
);
1600 * Prevent destruction of the source object while we copy.
1603 assert(src_object
->ref_count
> 0);
1604 src_object
->ref_count
++;
1605 VM_OBJ_RES_INCR(src_object
);
1606 vm_object_unlock(src_object
);
1609 * Create a new object to hold the copied pages.
1611 * We fill the new object starting at offset 0,
1612 * regardless of the input offset.
1613 * We don't bother to lock the new object within
1614 * this routine, since we have the only reference.
1617 new_object
= vm_object_allocate(size
);
1620 assert(size
== trunc_page_64(size
)); /* Will the loop terminate? */
1624 src_offset
+= PAGE_SIZE_64
,
1625 new_offset
+= PAGE_SIZE_64
, size
-= PAGE_SIZE_64
1628 vm_fault_return_t result
;
1630 while ((new_page
= vm_page_alloc(new_object
, new_offset
))
1632 if (!vm_page_wait(interruptible
)) {
1633 vm_object_deallocate(new_object
);
1634 *_result_object
= VM_OBJECT_NULL
;
1635 return(MACH_SEND_INTERRUPTED
);
1640 vm_prot_t prot
= VM_PROT_READ
;
1641 vm_page_t _result_page
;
1644 vm_page_t result_page
;
1645 kern_return_t error_code
;
1647 vm_object_lock(src_object
);
1648 vm_object_paging_begin(src_object
);
1650 XPR(XPR_VM_FAULT
,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
1651 result
= vm_fault_page(src_object
, src_offset
,
1652 VM_PROT_READ
, FALSE
, interruptible
,
1653 src_lo_offset
, src_hi_offset
,
1654 VM_BEHAVIOR_SEQUENTIAL
,
1655 &prot
, &_result_page
, &top_page
,
1657 &error_code
, FALSE
, FALSE
, NULL
, 0);
1660 case VM_FAULT_SUCCESS
:
1661 result_page
= _result_page
;
1664 * We don't need to hold the object
1665 * lock -- the busy page will be enough.
1666 * [We don't care about picking up any
1667 * new modifications.]
1669 * Copy the page to the new object.
1672 * If result_page is clean,
1673 * we could steal it instead
1677 vm_object_unlock(result_page
->object
);
1678 vm_page_copy(result_page
, new_page
);
1681 * Let go of both pages (make them
1682 * not busy, perform wakeup, activate).
1685 new_page
->busy
= FALSE
;
1686 new_page
->dirty
= TRUE
;
1687 vm_object_lock(result_page
->object
);
1688 PAGE_WAKEUP_DONE(result_page
);
1690 vm_page_lock_queues();
1691 if (!result_page
->active
&&
1692 !result_page
->inactive
)
1693 vm_page_activate(result_page
);
1694 vm_page_activate(new_page
);
1695 vm_page_unlock_queues();
1698 * Release paging references and
1699 * top-level placeholder page, if any.
1702 vm_fault_cleanup(result_page
->object
,
1707 case VM_FAULT_RETRY
:
1710 case VM_FAULT_FICTITIOUS_SHORTAGE
:
1711 vm_page_more_fictitious();
1714 case VM_FAULT_MEMORY_SHORTAGE
:
1715 if (vm_page_wait(interruptible
))
1719 case VM_FAULT_INTERRUPTED
:
1720 vm_page_free(new_page
);
1721 vm_object_deallocate(new_object
);
1722 vm_object_deallocate(src_object
);
1723 *_result_object
= VM_OBJECT_NULL
;
1724 return(MACH_SEND_INTERRUPTED
);
1726 case VM_FAULT_MEMORY_ERROR
:
1729 * (a) ignore pages that we can't
1731 * (b) return the null object if
1732 * any page fails [chosen]
1735 vm_page_lock_queues();
1736 vm_page_free(new_page
);
1737 vm_page_unlock_queues();
1738 vm_object_deallocate(new_object
);
1739 vm_object_deallocate(src_object
);
1740 *_result_object
= VM_OBJECT_NULL
;
1741 return(error_code
? error_code
:
1744 } while (result
!= VM_FAULT_SUCCESS
);
1748 * Lose the extra reference, and return our object.
1751 vm_object_deallocate(src_object
);
1752 *_result_object
= new_object
;
1753 return(KERN_SUCCESS
);
1757 * Routine: vm_object_copy_quickly
1760 * Copy the specified range of the source virtual
1761 * memory object, if it can be done without waiting
1762 * for user-generated events.
1765 * If the copy is successful, the copy is returned in
1766 * the arguments; otherwise, the arguments are not
1769 * In/out conditions:
1770 * The object should be unlocked on entry and exit.
1774 __private_extern__ boolean_t
1775 vm_object_copy_quickly(
1776 vm_object_t
*_object
, /* INOUT */
1777 vm_object_offset_t offset
, /* IN */
1778 vm_object_size_t size
, /* IN */
1779 boolean_t
*_src_needs_copy
, /* OUT */
1780 boolean_t
*_dst_needs_copy
) /* OUT */
1782 vm_object_t object
= *_object
;
1783 memory_object_copy_strategy_t copy_strategy
;
1785 XPR(XPR_VM_OBJECT
, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
1786 *_object
, offset
, size
, 0, 0);
1787 if (object
== VM_OBJECT_NULL
) {
1788 *_src_needs_copy
= FALSE
;
1789 *_dst_needs_copy
= FALSE
;
1793 vm_object_lock(object
);
1795 copy_strategy
= object
->copy_strategy
;
1797 switch (copy_strategy
) {
1798 case MEMORY_OBJECT_COPY_SYMMETRIC
:
1801 * Symmetric copy strategy.
1802 * Make another reference to the object.
1803 * Leave object/offset unchanged.
1806 assert(object
->ref_count
> 0);
1807 object
->ref_count
++;
1808 vm_object_res_reference(object
);
1809 object
->shadowed
= TRUE
;
1810 vm_object_unlock(object
);
1813 * Both source and destination must make
1814 * shadows, and the source must be made
1815 * read-only if not already.
1818 *_src_needs_copy
= TRUE
;
1819 *_dst_needs_copy
= TRUE
;
1823 case MEMORY_OBJECT_COPY_DELAY
:
1824 vm_object_unlock(object
);
1828 vm_object_unlock(object
);
1834 static int copy_call_count
= 0;
1835 static int copy_call_sleep_count
= 0;
1836 static int copy_call_restart_count
= 0;
1839 * Routine: vm_object_copy_call [internal]
1842 * Copy the source object (src_object), using the
1843 * user-managed copy algorithm.
1845 * In/out conditions:
1846 * The source object must be locked on entry. It
1847 * will be *unlocked* on exit.
1850 * If the copy is successful, KERN_SUCCESS is returned.
1851 * A new object that represents the copied virtual
1852 * memory is returned in a parameter (*_result_object).
1853 * If the return value indicates an error, this parameter
1856 static kern_return_t
1857 vm_object_copy_call(
1858 vm_object_t src_object
,
1859 vm_object_offset_t src_offset
,
1860 vm_object_size_t size
,
1861 vm_object_t
*_result_object
) /* OUT */
1865 boolean_t check_ready
= FALSE
;
1868 * If a copy is already in progress, wait and retry.
1871 * Consider making this call interruptable, as Mike
1872 * intended it to be.
1875 * Need a counter or version or something to allow
1876 * us to use the copy that the currently requesting
1877 * thread is obtaining -- is it worth adding to the
1878 * vm object structure? Depends how common this case it.
1881 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
1882 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
1884 copy_call_restart_count
++;
1888 * Indicate (for the benefit of memory_object_create_copy)
1889 * that we want a copy for src_object. (Note that we cannot
1890 * do a real assert_wait before calling memory_object_copy,
1891 * so we simply set the flag.)
1894 vm_object_set_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
);
1895 vm_object_unlock(src_object
);
1898 * Ask the memory manager to give us a memory object
1899 * which represents a copy of the src object.
1900 * The memory manager may give us a memory object
1901 * which we already have, or it may give us a
1902 * new memory object. This memory object will arrive
1903 * via memory_object_create_copy.
1906 kr
= KERN_FAILURE
; /* XXX need to change memory_object.defs */
1907 if (kr
!= KERN_SUCCESS
) {
1912 * Wait for the copy to arrive.
1914 vm_object_lock(src_object
);
1915 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
1916 vm_object_sleep(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
1918 copy_call_sleep_count
++;
1921 assert(src_object
->copy
!= VM_OBJECT_NULL
);
1922 copy
= src_object
->copy
;
1923 if (!vm_object_lock_try(copy
)) {
1924 vm_object_unlock(src_object
);
1925 mutex_pause(); /* wait a bit */
1926 vm_object_lock(src_object
);
1929 if (copy
->size
< src_offset
+size
)
1930 copy
->size
= src_offset
+size
;
1932 if (!copy
->pager_ready
)
1938 *_result_object
= copy
;
1939 vm_object_unlock(copy
);
1940 vm_object_unlock(src_object
);
1942 /* Wait for the copy to be ready. */
1943 if (check_ready
== TRUE
) {
1944 vm_object_lock(copy
);
1945 while (!copy
->pager_ready
) {
1946 vm_object_sleep(copy
, VM_OBJECT_EVENT_PAGER_READY
, THREAD_UNINT
);
1948 vm_object_unlock(copy
);
1951 return KERN_SUCCESS
;
1954 static int copy_delayed_lock_collisions
= 0;
1955 static int copy_delayed_max_collisions
= 0;
1956 static int copy_delayed_lock_contention
= 0;
1957 static int copy_delayed_protect_iterate
= 0;
1958 static int copy_delayed_protect_lookup
= 0;
1959 static int copy_delayed_protect_lookup_wait
= 0;
1962 * Routine: vm_object_copy_delayed [internal]
1965 * Copy the specified virtual memory object, using
1966 * the asymmetric copy-on-write algorithm.
1968 * In/out conditions:
1969 * The src_object must be locked on entry. It will be unlocked
1970 * on exit - so the caller must also hold a reference to it.
1972 * This routine will not block waiting for user-generated
1973 * events. It is not interruptible.
1975 __private_extern__ vm_object_t
1976 vm_object_copy_delayed(
1977 vm_object_t src_object
,
1978 vm_object_offset_t src_offset
,
1979 vm_object_size_t size
)
1981 vm_object_t new_copy
= VM_OBJECT_NULL
;
1982 vm_object_t old_copy
;
1984 vm_object_size_t copy_size
= src_offset
+ size
;
1988 * The user-level memory manager wants to see all of the changes
1989 * to this object, but it has promised not to make any changes on
1992 * Perform an asymmetric copy-on-write, as follows:
1993 * Create a new object, called a "copy object" to hold
1994 * pages modified by the new mapping (i.e., the copy,
1995 * not the original mapping).
1996 * Record the original object as the backing object for
1997 * the copy object. If the original mapping does not
1998 * change a page, it may be used read-only by the copy.
1999 * Record the copy object in the original object.
2000 * When the original mapping causes a page to be modified,
2001 * it must be copied to a new page that is "pushed" to
2003 * Mark the new mapping (the copy object) copy-on-write.
2004 * This makes the copy object itself read-only, allowing
2005 * it to be reused if the original mapping makes no
2006 * changes, and simplifying the synchronization required
2007 * in the "push" operation described above.
2009 * The copy-on-write is said to be assymetric because the original
2010 * object is *not* marked copy-on-write. A copied page is pushed
2011 * to the copy object, regardless which party attempted to modify
2014 * Repeated asymmetric copy operations may be done. If the
2015 * original object has not been changed since the last copy, its
2016 * copy object can be reused. Otherwise, a new copy object can be
2017 * inserted between the original object and its previous copy
2018 * object. Since any copy object is read-only, this cannot affect
2019 * affect the contents of the previous copy object.
2021 * Note that a copy object is higher in the object tree than the
2022 * original object; therefore, use of the copy object recorded in
2023 * the original object must be done carefully, to avoid deadlock.
2029 * Wait for paging in progress.
2031 if (!src_object
->true_share
)
2032 vm_object_paging_wait(src_object
, THREAD_UNINT
);
2035 * See whether we can reuse the result of a previous
2039 old_copy
= src_object
->copy
;
2040 if (old_copy
!= VM_OBJECT_NULL
) {
2042 * Try to get the locks (out of order)
2044 if (!vm_object_lock_try(old_copy
)) {
2045 vm_object_unlock(src_object
);
2048 /* Heisenberg Rules */
2049 copy_delayed_lock_collisions
++;
2050 if (collisions
++ == 0)
2051 copy_delayed_lock_contention
++;
2053 if (collisions
> copy_delayed_max_collisions
)
2054 copy_delayed_max_collisions
= collisions
;
2056 vm_object_lock(src_object
);
2061 * Determine whether the old copy object has
2065 if (old_copy
->resident_page_count
== 0 &&
2066 !old_copy
->pager_created
) {
2068 * It has not been modified.
2070 * Return another reference to
2071 * the existing copy-object if
2072 * we can safely grow it (if
2076 if (new_copy
!= VM_OBJECT_NULL
) {
2077 vm_object_unlock(new_copy
);
2078 vm_object_deallocate(new_copy
);
2081 if (old_copy
->size
< copy_size
) {
2083 * We can't perform a delayed copy if any of the
2084 * pages in the extended range are wired (because
2085 * we can't safely take write permission away from
2086 * wired pages). If the pages aren't wired, then
2087 * go ahead and protect them.
2089 copy_delayed_protect_iterate
++;
2090 queue_iterate(&src_object
->memq
, p
, vm_page_t
, listq
) {
2091 if (!p
->fictitious
&&
2092 p
->offset
>= old_copy
->size
&&
2093 p
->offset
< copy_size
) {
2094 if (p
->wire_count
> 0) {
2095 vm_object_unlock(old_copy
);
2096 vm_object_unlock(src_object
);
2097 return VM_OBJECT_NULL
;
2099 pmap_page_protect(p
->phys_page
,
2100 (VM_PROT_ALL
& ~VM_PROT_WRITE
&
2105 old_copy
->size
= copy_size
;
2108 vm_object_reference_locked(old_copy
);
2109 vm_object_unlock(old_copy
);
2110 vm_object_unlock(src_object
);
2115 * Adjust the size argument so that the newly-created
2116 * copy object will be large enough to back either the
2117 * old copy object or the new mapping.
2119 if (old_copy
->size
> copy_size
)
2120 copy_size
= old_copy
->size
;
2122 if (new_copy
== VM_OBJECT_NULL
) {
2123 vm_object_unlock(old_copy
);
2124 vm_object_unlock(src_object
);
2125 new_copy
= vm_object_allocate(copy_size
);
2126 vm_object_lock(src_object
);
2127 vm_object_lock(new_copy
);
2130 new_copy
->size
= copy_size
;
2133 * The copy-object is always made large enough to
2134 * completely shadow the original object, since
2135 * it may have several users who want to shadow
2136 * the original object at different points.
2139 assert((old_copy
->shadow
== src_object
) &&
2140 (old_copy
->shadow_offset
== (vm_object_offset_t
) 0));
2142 } else if (new_copy
== VM_OBJECT_NULL
) {
2143 vm_object_unlock(src_object
);
2144 new_copy
= vm_object_allocate(copy_size
);
2145 vm_object_lock(src_object
);
2146 vm_object_lock(new_copy
);
2151 * We now have the src object locked, and the new copy object
2152 * allocated and locked (and potentially the old copy locked).
2153 * Before we go any further, make sure we can still perform
2154 * a delayed copy, as the situation may have changed.
2156 * Specifically, we can't perform a delayed copy if any of the
2157 * pages in the range are wired (because we can't safely take
2158 * write permission away from wired pages). If the pages aren't
2159 * wired, then go ahead and protect them.
2161 copy_delayed_protect_iterate
++;
2162 queue_iterate(&src_object
->memq
, p
, vm_page_t
, listq
) {
2163 if (!p
->fictitious
&& p
->offset
< copy_size
) {
2164 if (p
->wire_count
> 0) {
2166 vm_object_unlock(old_copy
);
2167 vm_object_unlock(src_object
);
2168 vm_object_unlock(new_copy
);
2169 vm_object_deallocate(new_copy
);
2170 return VM_OBJECT_NULL
;
2172 pmap_page_protect(p
->phys_page
,
2173 (VM_PROT_ALL
& ~VM_PROT_WRITE
&
2179 if (old_copy
!= VM_OBJECT_NULL
) {
2181 * Make the old copy-object shadow the new one.
2182 * It will receive no more pages from the original
2186 src_object
->ref_count
--; /* remove ref. from old_copy */
2187 assert(src_object
->ref_count
> 0);
2188 old_copy
->shadow
= new_copy
;
2189 assert(new_copy
->ref_count
> 0);
2190 new_copy
->ref_count
++; /* for old_copy->shadow ref. */
2193 if (old_copy
->res_count
) {
2194 VM_OBJ_RES_INCR(new_copy
);
2195 VM_OBJ_RES_DECR(src_object
);
2199 vm_object_unlock(old_copy
); /* done with old_copy */
2203 * Point the new copy at the existing object.
2205 new_copy
->shadow
= src_object
;
2206 new_copy
->shadow_offset
= 0;
2207 new_copy
->shadowed
= TRUE
; /* caller must set needs_copy */
2208 assert(src_object
->ref_count
> 0);
2209 src_object
->ref_count
++;
2210 VM_OBJ_RES_INCR(src_object
);
2211 src_object
->copy
= new_copy
;
2212 vm_object_unlock(src_object
);
2213 vm_object_unlock(new_copy
);
2216 "vm_object_copy_delayed: used copy object %X for source %X\n",
2217 (integer_t
)new_copy
, (integer_t
)src_object
, 0, 0, 0);
2223 * Routine: vm_object_copy_strategically
2226 * Perform a copy according to the source object's
2227 * declared strategy. This operation may block,
2228 * and may be interrupted.
2230 __private_extern__ kern_return_t
2231 vm_object_copy_strategically(
2232 register vm_object_t src_object
,
2233 vm_object_offset_t src_offset
,
2234 vm_object_size_t size
,
2235 vm_object_t
*dst_object
, /* OUT */
2236 vm_object_offset_t
*dst_offset
, /* OUT */
2237 boolean_t
*dst_needs_copy
) /* OUT */
2240 boolean_t interruptible
= THREAD_ABORTSAFE
; /* XXX */
2241 memory_object_copy_strategy_t copy_strategy
;
2243 assert(src_object
!= VM_OBJECT_NULL
);
2245 vm_object_lock(src_object
);
2248 * The copy strategy is only valid if the memory manager
2249 * is "ready". Internal objects are always ready.
2252 while (!src_object
->internal
&& !src_object
->pager_ready
) {
2253 wait_result_t wait_result
;
2255 wait_result
= vm_object_sleep( src_object
,
2256 VM_OBJECT_EVENT_PAGER_READY
,
2258 if (wait_result
!= THREAD_AWAKENED
) {
2259 vm_object_unlock(src_object
);
2260 *dst_object
= VM_OBJECT_NULL
;
2262 *dst_needs_copy
= FALSE
;
2263 return(MACH_SEND_INTERRUPTED
);
2267 copy_strategy
= src_object
->copy_strategy
;
2270 * Use the appropriate copy strategy.
2273 switch (copy_strategy
) {
2274 case MEMORY_OBJECT_COPY_DELAY
:
2275 *dst_object
= vm_object_copy_delayed(src_object
,
2277 if (*dst_object
!= VM_OBJECT_NULL
) {
2278 *dst_offset
= src_offset
;
2279 *dst_needs_copy
= TRUE
;
2280 result
= KERN_SUCCESS
;
2283 vm_object_lock(src_object
);
2284 /* fall thru when delayed copy not allowed */
2286 case MEMORY_OBJECT_COPY_NONE
:
2287 result
= vm_object_copy_slowly(src_object
, src_offset
, size
,
2288 interruptible
, dst_object
);
2289 if (result
== KERN_SUCCESS
) {
2291 *dst_needs_copy
= FALSE
;
2295 case MEMORY_OBJECT_COPY_CALL
:
2296 result
= vm_object_copy_call(src_object
, src_offset
, size
,
2298 if (result
== KERN_SUCCESS
) {
2299 *dst_offset
= src_offset
;
2300 *dst_needs_copy
= TRUE
;
2304 case MEMORY_OBJECT_COPY_SYMMETRIC
:
2305 XPR(XPR_VM_OBJECT
, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n",(natural_t
)src_object
, src_offset
, size
, 0, 0);
2306 vm_object_unlock(src_object
);
2307 result
= KERN_MEMORY_RESTART_COPY
;
2311 panic("copy_strategically: bad strategy");
2312 result
= KERN_INVALID_ARGUMENT
;
2320 * Create a new object which is backed by the
2321 * specified existing object range. The source
2322 * object reference is deallocated.
2324 * The new object and offset into that object
2325 * are returned in the source parameters.
2327 boolean_t vm_object_shadow_check
= FALSE
;
2329 __private_extern__ boolean_t
2331 vm_object_t
*object
, /* IN/OUT */
2332 vm_object_offset_t
*offset
, /* IN/OUT */
2333 vm_object_size_t length
)
2335 register vm_object_t source
;
2336 register vm_object_t result
;
2339 assert(source
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
);
2342 * Determine if we really need a shadow.
2345 if (vm_object_shadow_check
&& source
->ref_count
== 1 &&
2346 (source
->shadow
== VM_OBJECT_NULL
||
2347 source
->shadow
->copy
== VM_OBJECT_NULL
))
2349 source
->shadowed
= FALSE
;
2354 * Allocate a new object with the given length
2357 if ((result
= vm_object_allocate(length
)) == VM_OBJECT_NULL
)
2358 panic("vm_object_shadow: no object for shadowing");
2361 * The new object shadows the source object, adding
2362 * a reference to it. Our caller changes his reference
2363 * to point to the new object, removing a reference to
2364 * the source object. Net result: no change of reference
2367 result
->shadow
= source
;
2370 * Store the offset into the source object,
2371 * and fix up the offset into the new object.
2374 result
->shadow_offset
= *offset
;
2377 * Return the new things
2386 * The relationship between vm_object structures and
2387 * the memory_object requires careful synchronization.
2389 * All associations are created by memory_object_create_named
2390 * for external pagers and vm_object_pager_create for internal
2391 * objects as follows:
2393 * pager: the memory_object itself, supplied by
2394 * the user requesting a mapping (or the kernel,
2395 * when initializing internal objects); the
2396 * kernel simulates holding send rights by keeping
2400 * the memory object control port,
2401 * created by the kernel; the kernel holds
2402 * receive (and ownership) rights to this
2403 * port, but no other references.
2405 * When initialization is complete, the "initialized" field
2406 * is asserted. Other mappings using a particular memory object,
2407 * and any references to the vm_object gained through the
2408 * port association must wait for this initialization to occur.
2410 * In order to allow the memory manager to set attributes before
2411 * requests (notably virtual copy operations, but also data or
2412 * unlock requests) are made, a "ready" attribute is made available.
2413 * Only the memory manager may affect the value of this attribute.
2414 * Its value does not affect critical kernel functions, such as
2415 * internal object initialization or destruction. [Furthermore,
2416 * memory objects created by the kernel are assumed to be ready
2417 * immediately; the default memory manager need not explicitly
2418 * set the "ready" attribute.]
2420 * [Both the "initialized" and "ready" attribute wait conditions
2421 * use the "pager" field as the wait event.]
2423 * The port associations can be broken down by any of the
2424 * following routines:
2425 * vm_object_terminate:
2426 * No references to the vm_object remain, and
2427 * the object cannot (or will not) be cached.
2428 * This is the normal case, and is done even
2429 * though one of the other cases has already been
2431 * memory_object_destroy:
2432 * The memory manager has requested that the
2433 * kernel relinquish references to the memory
2434 * object. [The memory manager may not want to
2435 * destroy the memory object, but may wish to
2436 * refuse or tear down existing memory mappings.]
2438 * Each routine that breaks an association must break all of
2439 * them at once. At some later time, that routine must clear
2440 * the pager field and release the memory object references.
2441 * [Furthermore, each routine must cope with the simultaneous
2442 * or previous operations of the others.]
2444 * In addition to the lock on the object, the vm_object_cache_lock
2445 * governs the associations. References gained through the
2446 * association require use of the cache lock.
2448 * Because the pager field may be cleared spontaneously, it
2449 * cannot be used to determine whether a memory object has
2450 * ever been associated with a particular vm_object. [This
2451 * knowledge is important to the shadow object mechanism.]
2452 * For this reason, an additional "created" attribute is
2455 * During various paging operations, the pager reference found in the
2456 * vm_object must be valid. To prevent this from being released,
2457 * (other than being removed, i.e., made null), routines may use
2458 * the vm_object_paging_begin/end routines [actually, macros].
2459 * The implementation uses the "paging_in_progress" and "wanted" fields.
2460 * [Operations that alter the validity of the pager values include the
2461 * termination routines and vm_object_collapse.]
2466 * Routine: vm_object_pager_dead
2469 * A port is being destroy, and the IPC kobject code
2470 * can't tell if it represents a pager port or not.
2471 * So this function is called each time it sees a port
2473 * THIS IS HORRIBLY INEFFICIENT. We should only call
2474 * this routine if we had requested a notification on
2478 __private_extern__
void
2479 vm_object_pager_dead(
2483 vm_object_hash_entry_t entry
;
2486 * Perform essentially the same operations as in vm_object_lookup,
2487 * except that this time we look up based on the memory_object
2488 * port, not the control port.
2490 vm_object_cache_lock();
2491 entry
= vm_object_hash_lookup(pager
, FALSE
);
2492 if (entry
== VM_OBJECT_HASH_ENTRY_NULL
||
2493 entry
->object
== VM_OBJECT_NULL
) {
2494 vm_object_cache_unlock();
2498 object
= entry
->object
;
2499 entry
->object
= VM_OBJECT_NULL
;
2501 vm_object_lock(object
);
2502 if (object
->ref_count
== 0) {
2503 XPR(XPR_VM_OBJECT_CACHE
,
2504 "vm_object_destroy: removing %x from cache, head (%x, %x)\n",
2506 (integer_t
)vm_object_cached_list
.next
,
2507 (integer_t
)vm_object_cached_list
.prev
, 0,0);
2509 queue_remove(&vm_object_cached_list
, object
,
2510 vm_object_t
, cached_list
);
2511 vm_object_cached_count
--;
2513 object
->ref_count
++;
2514 vm_object_res_reference(object
);
2516 object
->can_persist
= FALSE
;
2518 assert(object
->pager
== pager
);
2521 * Remove the pager association.
2523 * Note that the memory_object itself is dead, so
2524 * we don't bother with it.
2527 object
->pager
= MEMORY_OBJECT_NULL
;
2529 vm_object_unlock(object
);
2530 vm_object_cache_unlock();
2532 vm_object_pager_wakeup(pager
);
2535 * Release the pager reference. Note that there's no
2536 * point in trying the memory_object_terminate call
2537 * because the memory_object itself is dead. Also
2538 * release the memory_object_control reference, since
2539 * the pager didn't do that either.
2542 memory_object_deallocate(pager
);
2543 memory_object_control_deallocate(object
->pager_request
);
2547 * Restart pending page requests
2549 vm_object_lock(object
);
2550 vm_object_abort_activity(object
);
2551 vm_object_unlock(object
);
2554 * Lose the object reference.
2557 vm_object_deallocate(object
);
2562 * Routine: vm_object_enter
2564 * Find a VM object corresponding to the given
2565 * pager; if no such object exists, create one,
2566 * and initialize the pager.
2570 memory_object_t pager
,
2571 vm_object_size_t size
,
2576 register vm_object_t object
;
2577 vm_object_t new_object
;
2578 boolean_t must_init
;
2579 vm_object_hash_entry_t entry
, new_entry
;
2581 if (pager
== MEMORY_OBJECT_NULL
)
2582 return(vm_object_allocate(size
));
2584 new_object
= VM_OBJECT_NULL
;
2585 new_entry
= VM_OBJECT_HASH_ENTRY_NULL
;
2589 * Look for an object associated with this port.
2592 vm_object_cache_lock();
2594 entry
= vm_object_hash_lookup(pager
, FALSE
);
2596 if (entry
== VM_OBJECT_HASH_ENTRY_NULL
) {
2597 if (new_object
== VM_OBJECT_NULL
) {
2599 * We must unlock to create a new object;
2600 * if we do so, we must try the lookup again.
2602 vm_object_cache_unlock();
2603 assert(new_entry
== VM_OBJECT_HASH_ENTRY_NULL
);
2604 new_entry
= vm_object_hash_entry_alloc(pager
);
2605 new_object
= vm_object_allocate(size
);
2606 vm_object_cache_lock();
2609 * Lookup failed twice, and we have something
2610 * to insert; set the object.
2612 vm_object_hash_insert(new_entry
);
2614 entry
->object
= new_object
;
2615 new_entry
= VM_OBJECT_HASH_ENTRY_NULL
;
2616 new_object
= VM_OBJECT_NULL
;
2619 } else if (entry
->object
== VM_OBJECT_NULL
) {
2621 * If a previous object is being terminated,
2622 * we must wait for the termination message
2623 * to be queued (and lookup the entry again).
2625 entry
->waiting
= TRUE
;
2626 entry
= VM_OBJECT_HASH_ENTRY_NULL
;
2627 assert_wait((event_t
) pager
, THREAD_UNINT
);
2628 vm_object_cache_unlock();
2629 thread_block((void (*)(void))0);
2630 vm_object_cache_lock();
2632 } while (entry
== VM_OBJECT_HASH_ENTRY_NULL
);
2634 object
= entry
->object
;
2635 assert(object
!= VM_OBJECT_NULL
);
2638 vm_object_lock(object
);
2639 assert(!internal
|| object
->internal
);
2641 assert(!object
->named
);
2642 object
->named
= TRUE
;
2644 if (object
->ref_count
== 0) {
2645 XPR(XPR_VM_OBJECT_CACHE
,
2646 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
2648 (integer_t
)vm_object_cached_list
.next
,
2649 (integer_t
)vm_object_cached_list
.prev
, 0,0);
2650 queue_remove(&vm_object_cached_list
, object
,
2651 vm_object_t
, cached_list
);
2652 vm_object_cached_count
--;
2654 object
->ref_count
++;
2655 vm_object_res_reference(object
);
2656 vm_object_unlock(object
);
2660 assert(object
->ref_count
> 0);
2664 vm_object_cache_unlock();
2667 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
2668 (integer_t
)pager
, (integer_t
)object
, must_init
, 0, 0);
2671 * If we raced to create a vm_object but lost, let's
2675 if (new_object
!= VM_OBJECT_NULL
)
2676 vm_object_deallocate(new_object
);
2678 if (new_entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
2679 vm_object_hash_entry_free(new_entry
);
2682 pager_request_t pager_request
;
2685 * Allocate request port.
2688 pager_request
= memory_object_control_allocate(object
);
2689 assert (pager_request
!= PAGER_REQUEST_NULL
);
2691 vm_object_lock(object
);
2694 * Copy the reference we were given.
2697 memory_object_reference(pager
);
2698 object
->pager_created
= TRUE
;
2699 object
->pager
= pager
;
2700 object
->internal
= internal
;
2701 object
->pager_trusted
= internal
;
2703 /* copy strategy invalid until set by memory manager */
2704 object
->copy_strategy
= MEMORY_OBJECT_COPY_INVALID
;
2706 object
->pager_request
= pager_request
;
2707 object
->pager_ready
= FALSE
;
2709 vm_object_unlock(object
);
2712 * Let the pager know we're using it.
2715 (void) memory_object_init(pager
,
2716 object
->pager_request
,
2719 vm_object_lock(object
);
2721 object
->named
= TRUE
;
2723 object
->pager_ready
= TRUE
;
2724 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
2727 object
->pager_initialized
= TRUE
;
2728 vm_object_wakeup(object
, VM_OBJECT_EVENT_INITIALIZED
);
2730 vm_object_lock(object
);
2734 * [At this point, the object must be locked]
2738 * Wait for the work above to be done by the first
2739 * thread to map this object.
2742 while (!object
->pager_initialized
) {
2743 vm_object_sleep(object
,
2744 VM_OBJECT_EVENT_INITIALIZED
,
2747 vm_object_unlock(object
);
2750 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
2751 (integer_t
)object
, (integer_t
)object
->pager
, internal
, 0,0);
2756 * Routine: vm_object_pager_create
2758 * Create a memory object for an internal object.
2759 * In/out conditions:
2760 * The object is locked on entry and exit;
2761 * it may be unlocked within this call.
2763 * Only one thread may be performing a
2764 * vm_object_pager_create on an object at
2765 * a time. Presumably, only the pageout
2766 * daemon will be using this routine.
2770 vm_object_pager_create(
2771 register vm_object_t object
)
2773 memory_object_t pager
;
2774 vm_object_hash_entry_t entry
;
2776 vm_object_size_t size
;
2777 vm_external_map_t map
;
2778 #endif /* MACH_PAGEMAP */
2780 XPR(XPR_VM_OBJECT
, "vm_object_pager_create, object 0x%X\n",
2781 (integer_t
)object
, 0,0,0,0);
2783 if (memory_manager_default_check() != KERN_SUCCESS
)
2787 * Prevent collapse or termination by holding a paging reference
2790 vm_object_paging_begin(object
);
2791 if (object
->pager_created
) {
2793 * Someone else got to it first...
2794 * wait for them to finish initializing the ports
2796 while (!object
->pager_initialized
) {
2797 vm_object_sleep(object
,
2798 VM_OBJECT_EVENT_INITIALIZED
,
2801 vm_object_paging_end(object
);
2806 * Indicate that a memory object has been assigned
2807 * before dropping the lock, to prevent a race.
2810 object
->pager_created
= TRUE
;
2811 object
->paging_offset
= 0;
2814 size
= object
->size
;
2815 #endif /* MACH_PAGEMAP */
2816 vm_object_unlock(object
);
2819 map
= vm_external_create(size
);
2820 vm_object_lock(object
);
2821 assert(object
->size
== size
);
2822 object
->existence_map
= map
;
2823 vm_object_unlock(object
);
2824 #endif /* MACH_PAGEMAP */
2827 * Create the [internal] pager, and associate it with this object.
2829 * We make the association here so that vm_object_enter()
2830 * can look up the object to complete initializing it. No
2831 * user will ever map this object.
2834 memory_object_default_t dmm
;
2835 vm_size_t cluster_size
;
2837 /* acquire a reference for the default memory manager */
2838 dmm
= memory_manager_default_reference(&cluster_size
);
2839 assert(cluster_size
>= PAGE_SIZE
);
2841 object
->cluster_size
= cluster_size
; /* XXX ??? */
2842 assert(object
->temporary
);
2844 /* create our new memory object */
2845 (void) memory_object_create(dmm
, object
->size
, &pager
);
2847 memory_object_default_deallocate(dmm
);
2850 entry
= vm_object_hash_entry_alloc(pager
);
2852 vm_object_cache_lock();
2853 vm_object_hash_insert(entry
);
2855 entry
->object
= object
;
2856 vm_object_cache_unlock();
2859 * A reference was returned by
2860 * memory_object_create(), and it is
2861 * copied by vm_object_enter().
2864 if (vm_object_enter(pager
, object
->size
, TRUE
, TRUE
, FALSE
) != object
)
2865 panic("vm_object_pager_create: mismatch");
2868 * Drop the reference we were passed.
2870 memory_object_deallocate(pager
);
2872 vm_object_lock(object
);
2875 * Release the paging reference
2877 vm_object_paging_end(object
);
2881 * Routine: vm_object_remove
2883 * Eliminate the pager/object association
2886 * The object cache must be locked.
2888 __private_extern__
void
2892 memory_object_t pager
;
2893 pager_request_t pager_request
;
2895 if ((pager
= object
->pager
) != MEMORY_OBJECT_NULL
) {
2896 vm_object_hash_entry_t entry
;
2898 entry
= vm_object_hash_lookup(pager
, FALSE
);
2899 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
2900 entry
->object
= VM_OBJECT_NULL
;
2906 * Global variables for vm_object_collapse():
2908 * Counts for normal collapses and bypasses.
2909 * Debugging variables, to watch or disable collapse.
2911 static long object_collapses
= 0;
2912 static long object_bypasses
= 0;
2914 static boolean_t vm_object_collapse_allowed
= TRUE
;
2915 static boolean_t vm_object_bypass_allowed
= TRUE
;
2917 static int vm_external_discarded
;
2918 static int vm_external_collapsed
;
2921 * Routine: vm_object_do_collapse
2923 * Collapse an object with the object backing it.
2924 * Pages in the backing object are moved into the
2925 * parent, and the backing object is deallocated.
2927 * Both objects and the cache are locked; the page
2928 * queues are unlocked.
2932 vm_object_do_collapse(
2934 vm_object_t backing_object
)
2937 vm_object_offset_t new_offset
, backing_offset
;
2938 vm_object_size_t size
;
2940 backing_offset
= object
->shadow_offset
;
2941 size
= object
->size
;
2944 * Move all in-memory pages from backing_object
2945 * to the parent. Pages that have been paged out
2946 * will be overwritten by any of the parent's
2947 * pages that shadow them.
2950 while (!queue_empty(&backing_object
->memq
)) {
2952 p
= (vm_page_t
) queue_first(&backing_object
->memq
);
2954 new_offset
= (p
->offset
- backing_offset
);
2956 assert(!p
->busy
|| p
->absent
);
2959 * If the parent has a page here, or if
2960 * this page falls outside the parent,
2963 * Otherwise, move it as planned.
2966 if (p
->offset
< backing_offset
|| new_offset
>= size
) {
2969 pp
= vm_page_lookup(object
, new_offset
);
2970 if (pp
== VM_PAGE_NULL
) {
2973 * Parent now has no page.
2974 * Move the backing object's page up.
2977 vm_page_rename(p
, object
, new_offset
);
2979 } else if (pp
->absent
) {
2982 * Parent has an absent page...
2983 * it's not being paged in, so
2984 * it must really be missing from
2987 * Throw out the absent page...
2988 * any faults looking for that
2989 * page will restart with the new
2994 vm_page_rename(p
, object
, new_offset
);
2995 #endif /* MACH_PAGEMAP */
2997 assert(! pp
->absent
);
3000 * Parent object has a real page.
3001 * Throw away the backing object's
3010 assert(!object
->pager_created
&& object
->pager
== MEMORY_OBJECT_NULL
3011 || (!backing_object
->pager_created
3012 && backing_object
->pager
== MEMORY_OBJECT_NULL
));
3014 assert(!object
->pager_created
&& object
->pager
== MEMORY_OBJECT_NULL
);
3015 #endif /* !MACH_PAGEMAP */
3017 if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
3018 vm_object_hash_entry_t entry
;
3021 * Move the pager from backing_object to object.
3023 * XXX We're only using part of the paging space
3024 * for keeps now... we ought to discard the
3028 assert(!object
->paging_in_progress
);
3029 object
->pager
= backing_object
->pager
;
3030 entry
= vm_object_hash_lookup(object
->pager
, FALSE
);
3031 assert(entry
!= VM_OBJECT_HASH_ENTRY_NULL
);
3032 entry
->object
= object
;
3033 object
->pager_created
= backing_object
->pager_created
;
3034 object
->pager_request
= backing_object
->pager_request
;
3035 object
->pager_ready
= backing_object
->pager_ready
;
3036 object
->pager_initialized
= backing_object
->pager_initialized
;
3037 object
->cluster_size
= backing_object
->cluster_size
;
3038 object
->paging_offset
=
3039 backing_object
->paging_offset
+ backing_offset
;
3040 if (object
->pager_request
!= PAGER_REQUEST_NULL
) {
3041 memory_object_control_collapse(object
->pager_request
,
3046 vm_object_cache_unlock();
3050 * If the shadow offset is 0, the use the existence map from
3051 * the backing object if there is one. If the shadow offset is
3052 * not zero, toss it.
3054 * XXX - If the shadow offset is not 0 then a bit copy is needed
3055 * if the map is to be salvaged. For now, we just just toss the
3056 * old map, giving the collapsed object no map. This means that
3057 * the pager is invoked for zero fill pages. If analysis shows
3058 * that this happens frequently and is a performance hit, then
3059 * this code should be fixed to salvage the map.
3061 assert(object
->existence_map
== VM_EXTERNAL_NULL
);
3062 if (backing_offset
|| (size
!= backing_object
->size
)) {
3063 vm_external_discarded
++;
3064 vm_external_destroy(backing_object
->existence_map
,
3065 backing_object
->size
);
3068 vm_external_collapsed
++;
3069 object
->existence_map
= backing_object
->existence_map
;
3071 backing_object
->existence_map
= VM_EXTERNAL_NULL
;
3072 #endif /* MACH_PAGEMAP */
3075 * Object now shadows whatever backing_object did.
3076 * Note that the reference to backing_object->shadow
3077 * moves from within backing_object to within object.
3080 object
->shadow
= backing_object
->shadow
;
3081 object
->shadow_offset
+= backing_object
->shadow_offset
;
3082 assert((object
->shadow
== VM_OBJECT_NULL
) ||
3083 (object
->shadow
->copy
!= backing_object
));
3086 * Discard backing_object.
3088 * Since the backing object has no pages, no
3089 * pager left, and no object references within it,
3090 * all that is necessary is to dispose of it.
3093 assert((backing_object
->ref_count
== 1) &&
3094 (backing_object
->resident_page_count
== 0) &&
3095 (backing_object
->paging_in_progress
== 0));
3097 backing_object
->alive
= FALSE
;
3098 vm_object_unlock(backing_object
);
3100 XPR(XPR_VM_OBJECT
, "vm_object_collapse, collapsed 0x%X\n",
3101 (integer_t
)backing_object
, 0,0,0,0);
3103 zfree(vm_object_zone
, (vm_offset_t
) backing_object
);
3109 vm_object_do_bypass(
3111 vm_object_t backing_object
)
3114 * Make the parent shadow the next object
3120 * Do object reference in-line to
3121 * conditionally increment shadow's
3122 * residence count. If object is not
3123 * resident, leave residence count
3126 if (backing_object
->shadow
!= VM_OBJECT_NULL
) {
3127 vm_object_lock(backing_object
->shadow
);
3128 backing_object
->shadow
->ref_count
++;
3129 if (object
->res_count
!= 0)
3130 vm_object_res_reference(backing_object
->shadow
);
3131 vm_object_unlock(backing_object
->shadow
);
3133 #else /* TASK_SWAPPER */
3134 vm_object_reference(backing_object
->shadow
);
3135 #endif /* TASK_SWAPPER */
3137 object
->shadow
= backing_object
->shadow
;
3138 object
->shadow_offset
+= backing_object
->shadow_offset
;
3141 * Backing object might have had a copy pointer
3142 * to us. If it did, clear it.
3144 if (backing_object
->copy
== object
) {
3145 backing_object
->copy
= VM_OBJECT_NULL
;
3149 * Drop the reference count on backing_object.
3151 * Since its ref_count was at least 2, it
3152 * will not vanish; so we don't need to call
3153 * vm_object_deallocate.
3154 * [FBDP: that doesn't seem to be true any more]
3156 * The res_count on the backing object is
3157 * conditionally decremented. It's possible
3158 * (via vm_pageout_scan) to get here with
3159 * a "swapped" object, which has a 0 res_count,
3160 * in which case, the backing object res_count
3161 * is already down by one.
3163 * Don't call vm_object_deallocate unless
3164 * ref_count drops to zero.
3166 * The ref_count can drop to zero here if the
3167 * backing object could be bypassed but not
3168 * collapsed, such as when the backing object
3169 * is temporary and cachable.
3172 if (backing_object
->ref_count
> 1) {
3173 backing_object
->ref_count
--;
3175 if (object
->res_count
!= 0)
3176 vm_object_res_deallocate(backing_object
);
3177 assert(backing_object
->ref_count
> 0);
3178 #endif /* TASK_SWAPPER */
3179 vm_object_unlock(backing_object
);
3183 * Drop locks so that we can deallocate
3184 * the backing object.
3188 if (object
->res_count
== 0) {
3189 /* XXX get a reference for the deallocate below */
3190 vm_object_res_reference(backing_object
);
3192 #endif /* TASK_SWAPPER */
3193 vm_object_unlock(object
);
3194 vm_object_unlock(backing_object
);
3195 vm_object_deallocate(backing_object
);
3198 * Relock object. We don't have to reverify
3199 * its state since vm_object_collapse will
3200 * do that for us as it starts at the
3204 vm_object_lock(object
);
3212 * vm_object_collapse:
3214 * Perform an object collapse or an object bypass if appropriate.
3215 * The real work of collapsing and bypassing is performed in
3216 * the routines vm_object_do_collapse and vm_object_do_bypass.
3218 * Requires that the object be locked and the page queues be unlocked.
3221 __private_extern__
void
3223 register vm_object_t object
,
3224 register vm_object_offset_t hint_offset
)
3226 register vm_object_t backing_object
;
3227 register unsigned int rcount
;
3228 register unsigned int size
;
3230 if (! vm_object_collapse_allowed
&& ! vm_object_bypass_allowed
) {
3234 XPR(XPR_VM_OBJECT
, "vm_object_collapse, obj 0x%X\n",
3235 (integer_t
)object
, 0,0,0,0);
3239 * Verify that the conditions are right for either
3240 * collapse or bypass:
3242 * The object exists and no pages in it are currently
3243 * being paged out, and
3245 if (object
== VM_OBJECT_NULL
||
3246 object
->paging_in_progress
!= 0 ||
3247 object
->absent_count
!= 0)
3251 * There is a backing object, and
3254 if ((backing_object
= object
->shadow
) == VM_OBJECT_NULL
)
3257 vm_object_lock(backing_object
);
3261 * The backing object is not read_only,
3262 * and no pages in the backing object are
3263 * currently being paged out.
3264 * The backing object is internal.
3268 if (!backing_object
->internal
||
3269 backing_object
->paging_in_progress
!= 0) {
3270 vm_object_unlock(backing_object
);
3275 * The backing object can't be a copy-object:
3276 * the shadow_offset for the copy-object must stay
3277 * as 0. Furthermore (for the 'we have all the
3278 * pages' case), if we bypass backing_object and
3279 * just shadow the next object in the chain, old
3280 * pages from that object would then have to be copied
3281 * BOTH into the (former) backing_object and into the
3284 if (backing_object
->shadow
!= VM_OBJECT_NULL
&&
3285 backing_object
->shadow
->copy
== backing_object
) {
3286 vm_object_unlock(backing_object
);
3291 * We can now try to either collapse the backing
3292 * object (if the parent is the only reference to
3293 * it) or (perhaps) remove the parent's reference
3296 * If there is exactly one reference to the backing
3297 * object, we may be able to collapse it into the
3300 * If MACH_PAGEMAP is defined:
3301 * The parent must not have a pager created for it,
3302 * since collapsing a backing_object dumps new pages
3303 * into the parent that its pager doesn't know about
3304 * (and the collapse code can't merge the existence
3307 * As long as one of the objects is still not known
3308 * to the pager, we can collapse them.
3310 if (backing_object
->ref_count
== 1 &&
3311 (!object
->pager_created
3313 || !backing_object
->pager_created
3314 #endif /*!MACH_PAGEMAP */
3315 ) && vm_object_collapse_allowed
) {
3318 "vm_object_collapse: %x to %x, pager %x, pager_request %x\n",
3319 (integer_t
)backing_object
, (integer_t
)object
,
3320 (integer_t
)backing_object
->pager
,
3321 (integer_t
)backing_object
->pager_request
, 0);
3324 * We need the cache lock for collapsing,
3325 * but we must not deadlock.
3328 if (! vm_object_cache_lock_try()) {
3329 vm_object_unlock(backing_object
);
3334 * Collapse the object with its backing
3335 * object, and try again with the object's
3336 * new backing object.
3339 vm_object_do_collapse(object
, backing_object
);
3345 * Collapsing the backing object was not possible
3346 * or permitted, so let's try bypassing it.
3349 if (! vm_object_bypass_allowed
) {
3350 vm_object_unlock(backing_object
);
3356 * If the object doesn't have all its pages present,
3357 * we have to make sure no pages in the backing object
3358 * "show through" before bypassing it.
3360 size
= atop(object
->size
);
3361 rcount
= object
->resident_page_count
;
3362 if (rcount
!= size
) {
3363 vm_object_size_t size
;
3364 vm_object_offset_t offset
;
3365 vm_object_offset_t backing_offset
;
3366 unsigned int backing_rcount
;
3367 unsigned int lookups
= 0;
3370 * If the backing object has a pager but no pagemap,
3371 * then we cannot bypass it, because we don't know
3372 * what pages it has.
3374 if (backing_object
->pager_created
3376 && (backing_object
->existence_map
== VM_EXTERNAL_NULL
)
3377 #endif /* MACH_PAGEMAP */
3379 vm_object_unlock(backing_object
);
3384 * If the object has a pager but no pagemap,
3385 * then we cannot bypass it, because we don't know
3386 * what pages it has.
3388 if (object
->pager_created
3390 && (object
->existence_map
== VM_EXTERNAL_NULL
)
3391 #endif /* MACH_PAGEMAP */
3393 vm_object_unlock(backing_object
);
3398 * If all of the pages in the backing object are
3399 * shadowed by the parent object, the parent
3400 * object no longer has to shadow the backing
3401 * object; it can shadow the next one in the
3404 * If the backing object has existence info,
3405 * we must check examine its existence info
3410 backing_offset
= object
->shadow_offset
;
3411 backing_rcount
= backing_object
->resident_page_count
;
3413 #define EXISTS_IN_OBJECT(obj, off, rc) \
3414 (vm_external_state_get((obj)->existence_map, \
3415 (vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \
3416 ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
3419 * Check the hint location first
3420 * (since it is often the quickest way out of here).
3422 if (object
->cow_hint
!= ~(vm_offset_t
)0)
3423 hint_offset
= (vm_object_offset_t
)object
->cow_hint
;
3425 hint_offset
= (hint_offset
> 8 * PAGE_SIZE_64
) ?
3426 (hint_offset
- 8 * PAGE_SIZE_64
) : 0;
3428 if (EXISTS_IN_OBJECT(backing_object
, hint_offset
+
3429 backing_offset
, backing_rcount
) &&
3430 !EXISTS_IN_OBJECT(object
, hint_offset
, rcount
)) {
3431 /* dependency right at the hint */
3432 object
->cow_hint
= (vm_offset_t
)hint_offset
;
3433 vm_object_unlock(backing_object
);
3438 * If the object's window onto the backing_object
3439 * is large compared to the number of resident
3440 * pages in the backing object, it makes sense to
3441 * walk the backing_object's resident pages first.
3443 * NOTE: Pages may be in both the existence map and
3444 * resident. So, we can't permanently decrement
3445 * the rcount here because the second loop may
3446 * find the same pages in the backing object'
3447 * existence map that we found here and we would
3448 * double-decrement the rcount. We also may or
3449 * may not have found the
3451 if (backing_rcount
&& size
>
3452 ((backing_object
->existence_map
) ?
3453 backing_rcount
: (backing_rcount
>> 1))) {
3454 unsigned int rc
= rcount
;
3457 backing_rcount
= backing_object
->resident_page_count
;
3458 p
= (vm_page_t
)queue_first(&backing_object
->memq
);
3460 /* Until we get more than one lookup lock */
3461 if (lookups
> 256) {
3466 offset
= (p
->offset
- backing_offset
);
3467 if (offset
< object
->size
&&
3468 offset
!= hint_offset
&&
3469 !EXISTS_IN_OBJECT(object
, offset
, rc
)) {
3470 /* found a dependency */
3471 object
->cow_hint
= (vm_offset_t
)offset
;
3472 vm_object_unlock(backing_object
);
3477 } while (--backing_rcount
);
3481 * Walk through the offsets looking for pages in the
3482 * backing object that show through to the object.
3484 if (backing_rcount
|| backing_object
->existence_map
) {
3485 offset
= hint_offset
;
3488 (offset
+ PAGE_SIZE_64
< object
->size
) ?
3489 (offset
+ PAGE_SIZE_64
) : 0) != hint_offset
) {
3491 /* Until we get more than one lookup lock */
3492 if (lookups
> 256) {
3497 if (EXISTS_IN_OBJECT(backing_object
, offset
+
3498 backing_offset
, backing_rcount
) &&
3499 !EXISTS_IN_OBJECT(object
, offset
, rcount
)) {
3500 /* found a dependency */
3501 object
->cow_hint
= (vm_offset_t
)offset
;
3502 vm_object_unlock(backing_object
);
3509 /* reset the offset hint for any objects deeper in the chain */
3510 object
->cow_hint
= (vm_offset_t
)0;
3513 * All interesting pages in the backing object
3514 * already live in the parent or its pager.
3515 * Thus we can bypass the backing object.
3518 vm_object_do_bypass(object
, backing_object
);
3521 * Try again with this object's new backing object.
3529 * Routine: vm_object_page_remove: [internal]
3531 * Removes all physical pages in the specified
3532 * object range from the object's list of pages.
3534 * In/out conditions:
3535 * The object must be locked.
3536 * The object must not have paging_in_progress, usually
3537 * guaranteed by not having a pager.
3539 unsigned int vm_object_page_remove_lookup
= 0;
3540 unsigned int vm_object_page_remove_iterate
= 0;
3542 __private_extern__
void
3543 vm_object_page_remove(
3544 register vm_object_t object
,
3545 register vm_object_offset_t start
,
3546 register vm_object_offset_t end
)
3548 register vm_page_t p
, next
;
3551 * One and two page removals are most popular.
3552 * The factor of 16 here is somewhat arbitrary.
3553 * It balances vm_object_lookup vs iteration.
3556 if (atop_64(end
- start
) < (unsigned)object
->resident_page_count
/16) {
3557 vm_object_page_remove_lookup
++;
3559 for (; start
< end
; start
+= PAGE_SIZE_64
) {
3560 p
= vm_page_lookup(object
, start
);
3561 if (p
!= VM_PAGE_NULL
) {
3562 assert(!p
->cleaning
&& !p
->pageout
);
3564 pmap_page_protect(p
->phys_page
,
3570 vm_object_page_remove_iterate
++;
3572 p
= (vm_page_t
) queue_first(&object
->memq
);
3573 while (!queue_end(&object
->memq
, (queue_entry_t
) p
)) {
3574 next
= (vm_page_t
) queue_next(&p
->listq
);
3575 if ((start
<= p
->offset
) && (p
->offset
< end
)) {
3576 assert(!p
->cleaning
&& !p
->pageout
);
3578 pmap_page_protect(p
->phys_page
,
3589 * Routine: vm_object_coalesce
3590 * Function: Coalesces two objects backing up adjoining
3591 * regions of memory into a single object.
3593 * returns TRUE if objects were combined.
3595 * NOTE: Only works at the moment if the second object is NULL -
3596 * if it's not, which object do we lock first?
3599 * prev_object First object to coalesce
3600 * prev_offset Offset into prev_object
3601 * next_object Second object into coalesce
3602 * next_offset Offset into next_object
3604 * prev_size Size of reference to prev_object
3605 * next_size Size of reference to next_object
3608 * The object(s) must *not* be locked. The map must be locked
3609 * to preserve the reference to the object(s).
3611 static int vm_object_coalesce_count
= 0;
3613 __private_extern__ boolean_t
3615 register vm_object_t prev_object
,
3616 vm_object_t next_object
,
3617 vm_object_offset_t prev_offset
,
3618 vm_object_offset_t next_offset
,
3619 vm_object_size_t prev_size
,
3620 vm_object_size_t next_size
)
3622 vm_object_size_t newsize
;
3628 if (next_object
!= VM_OBJECT_NULL
) {
3632 if (prev_object
== VM_OBJECT_NULL
) {
3637 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
3638 (integer_t
)prev_object
, prev_offset
, prev_size
, next_size
, 0);
3640 vm_object_lock(prev_object
);
3643 * Try to collapse the object first
3645 vm_object_collapse(prev_object
, prev_offset
);
3648 * Can't coalesce if pages not mapped to
3649 * prev_entry may be in use any way:
3650 * . more than one reference
3652 * . shadows another object
3653 * . has a copy elsewhere
3654 * . paging references (pages might be in page-list)
3657 if ((prev_object
->ref_count
> 1) ||
3658 prev_object
->pager_created
||
3659 (prev_object
->shadow
!= VM_OBJECT_NULL
) ||
3660 (prev_object
->copy
!= VM_OBJECT_NULL
) ||
3661 (prev_object
->true_share
!= FALSE
) ||
3662 (prev_object
->paging_in_progress
!= 0)) {
3663 vm_object_unlock(prev_object
);
3667 vm_object_coalesce_count
++;
3670 * Remove any pages that may still be in the object from
3671 * a previous deallocation.
3673 vm_object_page_remove(prev_object
,
3674 prev_offset
+ prev_size
,
3675 prev_offset
+ prev_size
+ next_size
);
3678 * Extend the object if necessary.
3680 newsize
= prev_offset
+ prev_size
+ next_size
;
3681 if (newsize
> prev_object
->size
) {
3684 * We cannot extend an object that has existence info,
3685 * since the existence info might then fail to cover
3686 * the entire object.
3688 * This assertion must be true because the object
3689 * has no pager, and we only create existence info
3690 * for objects with pagers.
3692 assert(prev_object
->existence_map
== VM_EXTERNAL_NULL
);
3693 #endif /* MACH_PAGEMAP */
3694 prev_object
->size
= newsize
;
3697 vm_object_unlock(prev_object
);
3702 * Attach a set of physical pages to an object, so that they can
3703 * be mapped by mapping the object. Typically used to map IO memory.
3705 * The mapping function and its private data are used to obtain the
3706 * physical addresses for each page to be mapped.
3711 vm_object_offset_t offset
,
3712 vm_object_size_t size
,
3713 vm_object_offset_t (*map_fn
)(void *map_fn_data
,
3714 vm_object_offset_t offset
),
3715 void *map_fn_data
) /* private to map_fn */
3721 vm_object_offset_t addr
;
3723 num_pages
= atop_64(size
);
3725 for (i
= 0; i
< num_pages
; i
++, offset
+= PAGE_SIZE_64
) {
3727 addr
= (*map_fn
)(map_fn_data
, offset
);
3729 while ((m
= vm_page_grab_fictitious()) == VM_PAGE_NULL
)
3730 vm_page_more_fictitious();
3732 vm_object_lock(object
);
3733 if ((old_page
= vm_page_lookup(object
, offset
))
3736 vm_page_lock_queues();
3737 vm_page_free(old_page
);
3738 vm_page_unlock_queues();
3741 vm_page_init(m
, addr
);
3742 /* private normally requires lock_queues but since we */
3743 /* are initializing the page, its not necessary here */
3744 m
->private = TRUE
; /* don`t free page */
3746 vm_page_insert(m
, object
, offset
);
3748 PAGE_WAKEUP_DONE(m
);
3749 vm_object_unlock(object
);
3753 #include <mach_kdb.h>
3756 #include <ddb/db_output.h>
3757 #include <vm/vm_print.h>
3759 #define printf kdbprintf
3761 extern boolean_t
vm_object_cached(
3762 vm_object_t object
);
3764 extern void print_bitstring(
3767 boolean_t vm_object_print_pages
= FALSE
;
3773 printf("%c%c%c%c%c%c%c%c",
3774 ((byte
& (1 << 0)) ? '1' : '0'),
3775 ((byte
& (1 << 1)) ? '1' : '0'),
3776 ((byte
& (1 << 2)) ? '1' : '0'),
3777 ((byte
& (1 << 3)) ? '1' : '0'),
3778 ((byte
& (1 << 4)) ? '1' : '0'),
3779 ((byte
& (1 << 5)) ? '1' : '0'),
3780 ((byte
& (1 << 6)) ? '1' : '0'),
3781 ((byte
& (1 << 7)) ? '1' : '0'));
3786 register vm_object_t object
)
3788 register vm_object_t o
;
3790 queue_iterate(&vm_object_cached_list
, o
, vm_object_t
, cached_list
) {
3800 * vm_external_print: [ debug ]
3804 vm_external_map_t map
,
3807 if (map
== VM_EXTERNAL_NULL
) {
3810 vm_size_t existence_size
= stob(size
);
3811 printf("{ size=%d, map=[", existence_size
);
3812 if (existence_size
> 0) {
3813 print_bitstring(map
[0]);
3815 if (existence_size
> 1) {
3816 print_bitstring(map
[1]);
3818 if (existence_size
> 2) {
3820 print_bitstring(map
[existence_size
-1]);
3826 #endif /* MACH_PAGEMAP */
3835 int orig_db_indent
= db_indent
;
3838 if (object
== VM_OBJECT_NULL
) {
3839 db_indent
= orig_db_indent
;
3845 iprintf("object 0x%x", object
);
3846 printf(", shadow=0x%x", object
->shadow
);
3847 printf(", copy=0x%x", object
->copy
);
3848 printf(", pager=0x%x", object
->pager
);
3849 printf(", ref=%d\n", object
->ref_count
);
3852 object
= object
->shadow
;
3858 * vm_object_print: [ debug ]
3863 boolean_t have_addr
,
3867 register vm_page_t p
;
3873 if (object
== VM_OBJECT_NULL
)
3876 iprintf("object 0x%x\n", object
);
3880 iprintf("size=0x%x", object
->size
);
3881 printf(", cluster=0x%x", object
->cluster_size
);
3882 printf(", frozen=0x%x", object
->frozen_size
);
3883 printf(", ref_count=%d\n", object
->ref_count
);
3886 printf("res_count=%d, ", object
->res_count
);
3887 #endif /* TASK_SWAPPER */
3888 printf("resident_page_count=%d\n", object
->resident_page_count
);
3890 iprintf("shadow=0x%x", object
->shadow
);
3891 if (object
->shadow
) {
3893 vm_object_t shadow
= object
;
3894 while(shadow
= shadow
->shadow
)
3896 printf(" (depth %d)", i
);
3898 printf(", copy=0x%x", object
->copy
);
3899 printf(", shadow_offset=0x%x", object
->shadow_offset
);
3900 printf(", last_alloc=0x%x\n", object
->last_alloc
);
3902 iprintf("pager=0x%x", object
->pager
);
3903 printf(", paging_offset=0x%x", object
->paging_offset
);
3904 printf(", pager_request=0x%x\n", object
->pager_request
);
3906 iprintf("copy_strategy=%d[", object
->copy_strategy
);
3907 switch (object
->copy_strategy
) {
3908 case MEMORY_OBJECT_COPY_NONE
:
3909 printf("copy_none");
3912 case MEMORY_OBJECT_COPY_CALL
:
3913 printf("copy_call");
3916 case MEMORY_OBJECT_COPY_DELAY
:
3917 printf("copy_delay");
3920 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3921 printf("copy_symmetric");
3924 case MEMORY_OBJECT_COPY_INVALID
:
3925 printf("copy_invalid");
3932 printf(", absent_count=%d\n", object
->absent_count
);
3934 iprintf("all_wanted=0x%x<", object
->all_wanted
);
3936 if (vm_object_wanted(object
, VM_OBJECT_EVENT_INITIALIZED
)) {
3937 printf("%sinit", s
);
3940 if (vm_object_wanted(object
, VM_OBJECT_EVENT_PAGER_READY
)) {
3941 printf("%sready", s
);
3944 if (vm_object_wanted(object
, VM_OBJECT_EVENT_PAGING_IN_PROGRESS
)) {
3945 printf("%spaging", s
);
3948 if (vm_object_wanted(object
, VM_OBJECT_EVENT_ABSENT_COUNT
)) {
3949 printf("%sabsent", s
);
3952 if (vm_object_wanted(object
, VM_OBJECT_EVENT_LOCK_IN_PROGRESS
)) {
3953 printf("%slock", s
);
3956 if (vm_object_wanted(object
, VM_OBJECT_EVENT_UNCACHING
)) {
3957 printf("%suncaching", s
);
3960 if (vm_object_wanted(object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3961 printf("%scopy_call", s
);
3964 if (vm_object_wanted(object
, VM_OBJECT_EVENT_CACHING
)) {
3965 printf("%scaching", s
);
3969 printf(", paging_in_progress=%d\n", object
->paging_in_progress
);
3971 iprintf("%screated, %sinit, %sready, %spersist, %strusted, %spageout, %s, %s\n",
3972 (object
->pager_created
? "" : "!"),
3973 (object
->pager_initialized
? "" : "!"),
3974 (object
->pager_ready
? "" : "!"),
3975 (object
->can_persist
? "" : "!"),
3976 (object
->pager_trusted
? "" : "!"),
3977 (object
->pageout
? "" : "!"),
3978 (object
->internal
? "internal" : "external"),
3979 (object
->temporary
? "temporary" : "permanent"));
3980 iprintf("%salive, %slock_in_progress, %slock_restart, %sshadowed, %scached, %sprivate\n",
3981 (object
->alive
? "" : "!"),
3982 (object
->lock_in_progress
? "" : "!"),
3983 (object
->lock_restart
? "" : "!"),
3984 (object
->shadowed
? "" : "!"),
3985 (vm_object_cached(object
) ? "" : "!"),
3986 (object
->private ? "" : "!"));
3987 iprintf("%sadvisory_pageout, %ssilent_overwrite\n",
3988 (object
->advisory_pageout
? "" : "!"),
3989 (object
->silent_overwrite
? "" : "!"));
3992 iprintf("existence_map=");
3993 vm_external_print(object
->existence_map
, object
->size
);
3994 #endif /* MACH_PAGEMAP */
3996 iprintf("paging_object=0x%x\n", object
->paging_object
);
3997 #endif /* MACH_ASSERT */
3999 if (vm_object_print_pages
) {
4001 p
= (vm_page_t
) queue_first(&object
->memq
);
4002 while (!queue_end(&object
->memq
, (queue_entry_t
) p
)) {
4004 iprintf("memory:=");
4005 } else if (count
== 2) {
4014 printf("(off=0x%X,page=0x%X)", p
->offset
, (integer_t
) p
);
4015 p
= (vm_page_t
) queue_next(&p
->listq
);
4026 * vm_object_find [ debug ]
4028 * Find all tasks which reference the given vm_object.
4031 boolean_t
vm_object_find(vm_object_t object
);
4032 boolean_t vm_object_print_verbose
= FALSE
;
4040 vm_map_entry_t entry
;
4041 processor_set_t pset
= &default_pset
;
4042 boolean_t found
= FALSE
;
4044 queue_iterate(&pset
->tasks
, task
, task_t
, pset_tasks
) {
4046 for (entry
= vm_map_first_entry(map
);
4047 entry
&& entry
!= vm_map_to_entry(map
);
4048 entry
= entry
->vme_next
) {
4053 * For the time being skip submaps,
4054 * only the kernel can have submaps,
4055 * and unless we are interested in
4056 * kernel objects, we can simply skip
4057 * submaps. See sb/dejan/nmk18b7/src/mach_kernel/vm
4058 * for a full solution.
4060 if (entry
->is_sub_map
)
4063 obj
= entry
->object
.vm_object
;
4067 while (obj
!= VM_OBJECT_NULL
) {
4068 if (obj
== object
) {
4070 printf("TASK\t\tMAP\t\tENTRY\n");
4073 printf("0x%x\t0x%x\t0x%x\n",
4084 #endif /* MACH_KDB */
4087 vm_object_populate_with_private(
4089 vm_object_offset_t offset
,
4094 vm_object_offset_t base_offset
;
4097 if(!object
->private)
4098 return KERN_FAILURE
;
4100 base_page
= phys_page
;
4102 vm_object_lock(object
);
4103 if(!object
->phys_contiguous
) {
4105 if((base_offset
= trunc_page_64(offset
)) != offset
) {
4106 vm_object_unlock(object
);
4107 return KERN_FAILURE
;
4109 base_offset
+= object
->paging_offset
;
4111 m
= vm_page_lookup(object
, base_offset
);
4112 if(m
!= VM_PAGE_NULL
) {
4114 vm_page_lock_queues();
4115 m
->fictitious
= FALSE
;
4117 m
->phys_page
= base_page
;
4123 object
->absent_count
++;
4125 m
->list_req_pending
= TRUE
;
4126 vm_page_unlock_queues();
4127 } else if (m
->phys_page
!= base_page
) {
4128 /* pmap call to clear old mapping */
4129 pmap_page_protect(m
->phys_page
,
4131 m
->phys_page
= base_page
;
4134 while ((m
= vm_page_grab_fictitious())
4136 vm_page_more_fictitious();
4137 vm_page_lock_queues();
4138 m
->fictitious
= FALSE
;
4140 m
->phys_page
= base_page
;
4141 m
->list_req_pending
= TRUE
;
4144 object
->absent_count
++;
4145 vm_page_unlock_queues();
4146 vm_page_insert(m
, object
, base_offset
);
4148 base_page
++; /* Go to the next physical page */
4149 base_offset
+= PAGE_SIZE
;
4153 /* NOTE: we should check the original settings here */
4154 /* if we have a size > zero a pmap call should be made */
4155 /* to disable the range */
4159 /* shadows on contiguous memory are not allowed */
4160 /* we therefore can use the offset field */
4161 object
->shadow_offset
= (vm_object_offset_t
)(phys_page
<< 12);
4162 object
->size
= size
;
4164 vm_object_unlock(object
);
4165 return KERN_SUCCESS
;
4169 * memory_object_free_from_cache:
4171 * Walk the vm_object cache list, removing and freeing vm_objects
4172 * which are backed by the pager identified by the caller, (pager_id).
4173 * Remove up to "count" objects, if there are that may available
4176 * Walk the list at most once, return the number of vm_objects
4180 __private_extern__ kern_return_t
4181 memory_object_free_from_cache(
4187 int object_released
= 0;
4190 register vm_object_t object
= VM_OBJECT_NULL
;
4194 if(host == HOST_NULL)
4195 return(KERN_INVALID_ARGUMENT);
4199 vm_object_cache_lock();
4201 queue_iterate(&vm_object_cached_list
, object
,
4202 vm_object_t
, cached_list
) {
4203 if (object
->pager
&& (pager_id
== object
->pager
->pager
)) {
4204 vm_object_lock(object
);
4205 queue_remove(&vm_object_cached_list
, object
,
4206 vm_object_t
, cached_list
);
4207 vm_object_cached_count
--;
4210 * Since this object is in the cache, we know
4211 * that it is initialized and has only a pager's
4212 * (implicit) reference. Take a reference to avoid
4213 * recursive deallocations.
4216 assert(object
->pager_initialized
);
4217 assert(object
->ref_count
== 0);
4218 object
->ref_count
++;
4221 * Terminate the object.
4222 * If the object had a shadow, we let
4223 * vm_object_deallocate deallocate it.
4224 * "pageout" objects have a shadow, but
4225 * maintain a "paging reference" rather
4226 * than a normal reference.
4227 * (We are careful here to limit recursion.)
4229 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
4230 if ((vm_object_terminate(object
) == KERN_SUCCESS
)
4231 && (shadow
!= VM_OBJECT_NULL
)) {
4232 vm_object_deallocate(shadow
);
4235 if(object_released
++ == *count
)
4236 return KERN_SUCCESS
;
4240 vm_object_cache_unlock();
4241 *count
= object_released
;
4242 return KERN_SUCCESS
;
4248 memory_object_create_named(
4249 memory_object_t pager
,
4250 memory_object_offset_t size
,
4251 memory_object_control_t
*control
)
4254 vm_object_hash_entry_t entry
;
4256 *control
= MEMORY_OBJECT_CONTROL_NULL
;
4257 if (pager
== MEMORY_OBJECT_NULL
)
4258 return KERN_INVALID_ARGUMENT
;
4260 vm_object_cache_lock();
4261 entry
= vm_object_hash_lookup(pager
, FALSE
);
4262 if ((entry
!= VM_OBJECT_HASH_ENTRY_NULL
) &&
4263 (entry
->object
!= VM_OBJECT_NULL
)) {
4264 if (entry
->object
->named
== TRUE
)
4265 panic("memory_object_create_named: caller already holds the right"); }
4267 vm_object_cache_unlock();
4268 if ((object
= vm_object_enter(pager
, size
, FALSE
, FALSE
, TRUE
))
4269 == VM_OBJECT_NULL
) {
4270 return(KERN_INVALID_OBJECT
);
4273 /* wait for object (if any) to be ready */
4274 if (object
!= VM_OBJECT_NULL
) {
4275 vm_object_lock(object
);
4276 object
->named
= TRUE
;
4277 while (!object
->pager_ready
) {
4278 vm_object_sleep(object
,
4279 VM_OBJECT_EVENT_PAGER_READY
,
4282 *control
= object
->pager_request
;
4283 vm_object_unlock(object
);
4285 return (KERN_SUCCESS
);
4290 * Routine: memory_object_recover_named [user interface]
4292 * Attempt to recover a named reference for a VM object.
4293 * VM will verify that the object has not already started
4294 * down the termination path, and if it has, will optionally
4295 * wait for that to finish.
4297 * KERN_SUCCESS - we recovered a named reference on the object
4298 * KERN_FAILURE - we could not recover a reference (object dead)
4299 * KERN_INVALID_ARGUMENT - bad memory object control
4302 memory_object_recover_named(
4303 memory_object_control_t control
,
4304 boolean_t wait_on_terminating
)
4308 vm_object_cache_lock();
4309 object
= memory_object_control_to_vm_object(control
);
4310 if (object
== VM_OBJECT_NULL
) {
4311 vm_object_cache_unlock();
4312 return (KERN_INVALID_ARGUMENT
);
4316 vm_object_lock(object
);
4318 if (object
->terminating
&& wait_on_terminating
) {
4319 vm_object_cache_unlock();
4320 vm_object_wait(object
,
4321 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
4323 vm_object_cache_lock();
4327 if (!object
->alive
) {
4328 vm_object_cache_unlock();
4329 vm_object_unlock(object
);
4330 return KERN_FAILURE
;
4333 if (object
->named
== TRUE
) {
4334 vm_object_cache_unlock();
4335 vm_object_unlock(object
);
4336 return KERN_SUCCESS
;
4339 if((object
->ref_count
== 0) && (!object
->terminating
)){
4340 queue_remove(&vm_object_cached_list
, object
,
4341 vm_object_t
, cached_list
);
4342 vm_object_cached_count
--;
4343 XPR(XPR_VM_OBJECT_CACHE
,
4344 "memory_object_recover_named: removing %X, head (%X, %X)\n",
4346 (integer_t
)vm_object_cached_list
.next
,
4347 (integer_t
)vm_object_cached_list
.prev
, 0,0);
4350 vm_object_cache_unlock();
4352 object
->named
= TRUE
;
4353 object
->ref_count
++;
4354 vm_object_res_reference(object
);
4355 while (!object
->pager_ready
) {
4356 vm_object_sleep(object
,
4357 VM_OBJECT_EVENT_PAGER_READY
,
4360 vm_object_unlock(object
);
4361 return (KERN_SUCCESS
);
4366 * vm_object_release_name:
4368 * Enforces name semantic on memory_object reference count decrement
4369 * This routine should not be called unless the caller holds a name
4370 * reference gained through the memory_object_create_named.
4372 * If the TERMINATE_IDLE flag is set, the call will return if the
4373 * reference count is not 1. i.e. idle with the only remaining reference
4375 * If the decision is made to proceed the name field flag is set to
4376 * false and the reference count is decremented. If the RESPECT_CACHE
4377 * flag is set and the reference count has gone to zero, the
4378 * memory_object is checked to see if it is cacheable otherwise when
4379 * the reference count is zero, it is simply terminated.
4382 __private_extern__ kern_return_t
4383 vm_object_release_name(
4388 boolean_t original_object
= TRUE
;
4390 while (object
!= VM_OBJECT_NULL
) {
4393 * The cache holds a reference (uncounted) to
4394 * the object. We must locke it before removing
4399 vm_object_cache_lock();
4400 vm_object_lock(object
);
4401 assert(object
->alive
);
4403 assert(object
->named
);
4404 assert(object
->ref_count
> 0);
4407 * We have to wait for initialization before
4408 * destroying or caching the object.
4411 if (object
->pager_created
&& !object
->pager_initialized
) {
4412 assert(!object
->can_persist
);
4413 vm_object_assert_wait(object
,
4414 VM_OBJECT_EVENT_INITIALIZED
,
4416 vm_object_unlock(object
);
4417 vm_object_cache_unlock();
4418 thread_block(THREAD_CONTINUE_NULL
);
4422 if (((object
->ref_count
> 1)
4423 && (flags
& MEMORY_OBJECT_TERMINATE_IDLE
))
4424 || (object
->terminating
)) {
4425 vm_object_unlock(object
);
4426 vm_object_cache_unlock();
4427 return KERN_FAILURE
;
4429 if (flags
& MEMORY_OBJECT_RELEASE_NO_OP
) {
4430 vm_object_unlock(object
);
4431 vm_object_cache_unlock();
4432 return KERN_SUCCESS
;
4436 if ((flags
& MEMORY_OBJECT_RESPECT_CACHE
) &&
4437 (object
->ref_count
== 1)) {
4439 object
->named
= FALSE
;
4440 vm_object_unlock(object
);
4441 vm_object_cache_unlock();
4442 /* let vm_object_deallocate push this thing into */
4443 /* the cache, if that it is where it is bound */
4444 vm_object_deallocate(object
);
4445 return KERN_SUCCESS
;
4447 VM_OBJ_RES_DECR(object
);
4448 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
4449 if(object
->ref_count
== 1) {
4450 if(vm_object_terminate(object
) != KERN_SUCCESS
) {
4451 if(original_object
) {
4452 return KERN_FAILURE
;
4454 return KERN_SUCCESS
;
4457 if (shadow
!= VM_OBJECT_NULL
) {
4458 original_object
= FALSE
;
4462 return KERN_SUCCESS
;
4464 object
->ref_count
--;
4465 assert(object
->ref_count
> 0);
4467 object
->named
= FALSE
;
4468 vm_object_unlock(object
);
4469 vm_object_cache_unlock();
4470 return KERN_SUCCESS
;
4476 __private_extern__ kern_return_t
4477 vm_object_lock_request(
4479 vm_object_offset_t offset
,
4480 vm_object_size_t size
,
4481 memory_object_return_t should_return
,
4485 vm_object_offset_t original_offset
= offset
;
4486 boolean_t should_flush
=flags
& MEMORY_OBJECT_DATA_FLUSH
;
4488 XPR(XPR_MEMORY_OBJECT
,
4489 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
4490 (integer_t
)object
, offset
, size
,
4491 (((should_return
&1)<<1)|should_flush
), prot
);
4494 * Check for bogus arguments.
4496 if (object
== VM_OBJECT_NULL
)
4497 return (KERN_INVALID_ARGUMENT
);
4499 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
4500 return (KERN_INVALID_ARGUMENT
);
4502 size
= round_page_64(size
);
4505 * Lock the object, and acquire a paging reference to
4506 * prevent the memory_object reference from being released.
4508 vm_object_lock(object
);
4509 vm_object_paging_begin(object
);
4511 (void)vm_object_update(object
,
4512 offset
, size
, should_return
, flags
, prot
);
4514 vm_object_paging_end(object
);
4515 vm_object_unlock(object
);
4517 return (KERN_SUCCESS
);
4524 * vm_object_res_deallocate
4526 * (recursively) decrement residence counts on vm objects and their shadows.
4527 * Called from vm_object_deallocate and when swapping out an object.
4529 * The object is locked, and remains locked throughout the function,
4530 * even as we iterate down the shadow chain. Locks on intermediate objects
4531 * will be dropped, but not the original object.
4533 * NOTE: this function used to use recursion, rather than iteration.
4536 __private_extern__
void
4537 vm_object_res_deallocate(
4540 vm_object_t orig_object
= object
;
4542 * Object is locked so it can be called directly
4543 * from vm_object_deallocate. Original object is never
4546 assert(object
->res_count
> 0);
4547 while (--object
->res_count
== 0) {
4548 assert(object
->ref_count
>= object
->res_count
);
4549 vm_object_deactivate_all_pages(object
);
4550 /* iterate on shadow, if present */
4551 if (object
->shadow
!= VM_OBJECT_NULL
) {
4552 vm_object_t tmp_object
= object
->shadow
;
4553 vm_object_lock(tmp_object
);
4554 if (object
!= orig_object
)
4555 vm_object_unlock(object
);
4556 object
= tmp_object
;
4557 assert(object
->res_count
> 0);
4561 if (object
!= orig_object
)
4562 vm_object_unlock(object
);
4566 * vm_object_res_reference
4568 * Internal function to increment residence count on a vm object
4569 * and its shadows. It is called only from vm_object_reference, and
4570 * when swapping in a vm object, via vm_map_swap.
4572 * The object is locked, and remains locked throughout the function,
4573 * even as we iterate down the shadow chain. Locks on intermediate objects
4574 * will be dropped, but not the original object.
4576 * NOTE: this function used to use recursion, rather than iteration.
4579 __private_extern__
void
4580 vm_object_res_reference(
4583 vm_object_t orig_object
= object
;
4585 * Object is locked, so this can be called directly
4586 * from vm_object_reference. This lock is never released.
4588 while ((++object
->res_count
== 1) &&
4589 (object
->shadow
!= VM_OBJECT_NULL
)) {
4590 vm_object_t tmp_object
= object
->shadow
;
4592 assert(object
->ref_count
>= object
->res_count
);
4593 vm_object_lock(tmp_object
);
4594 if (object
!= orig_object
)
4595 vm_object_unlock(object
);
4596 object
= tmp_object
;
4598 if (object
!= orig_object
)
4599 vm_object_unlock(object
);
4600 assert(orig_object
->ref_count
>= orig_object
->res_count
);
4602 #endif /* TASK_SWAPPER */
4605 * vm_object_reference:
4607 * Gets another reference to the given object.
4609 #ifdef vm_object_reference
4610 #undef vm_object_reference
4612 __private_extern__
void
4613 vm_object_reference(
4614 register vm_object_t object
)
4616 if (object
== VM_OBJECT_NULL
)
4619 vm_object_lock(object
);
4620 assert(object
->ref_count
> 0);
4621 vm_object_reference_locked(object
);
4622 vm_object_unlock(object
);
4627 * Scale the vm_object_cache
4628 * This is required to make sure that the vm_object_cache is big
4629 * enough to effectively cache the mapped file.
4630 * This is really important with UBC as all the regular file vnodes
4631 * have memory object associated with them. Havving this cache too
4632 * small results in rapid reclaim of vnodes and hurts performance a LOT!
4634 * This is also needed as number of vnodes can be dynamically scaled.
4637 adjust_vm_object_cache(vm_size_t oval
, vm_size_t nval
)
4639 vm_object_cached_max
= nval
;
4640 vm_object_cache_trim(FALSE
);
4641 return (KERN_SUCCESS
);
4643 #endif /* MACH_BSD */