2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: vm/vm_object.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * Virtual memory object module.
60 /* remove as part of compoenent support merge */
61 extern int vnode_pager_workaround
;
64 #include <mach_pagemap.h>
65 #include <task_swapper.h>
67 #include <mach/mach_types.h>
68 #include <mach/memory_object.h>
69 #include <mach/memory_object_default.h>
70 #include <mach/memory_object_control_server.h>
71 #include <mach/vm_param.h>
72 #include <ipc/ipc_port.h>
73 #include <kern/assert.h>
74 #include <kern/lock.h>
75 #include <kern/queue.h>
77 #include <kern/zalloc.h>
78 #include <kern/host.h>
79 #include <kern/host_statistics.h>
80 #include <kern/processor.h>
81 #include <vm/memory_object.h>
82 #include <vm/vm_fault.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <kern/misc_protos.h>
92 * Virtual memory objects maintain the actual data
93 * associated with allocated virtual memory. A given
94 * page of memory exists within exactly one object.
96 * An object is only deallocated when all "references"
99 * Associated with each object is a list of all resident
100 * memory pages belonging to that object; this list is
101 * maintained by the "vm_page" module, but locked by the object's
104 * Each object also records the memory object reference
105 * that is used by the kernel to request and write
106 * back data (the memory object, field "pager"), etc...
108 * Virtual memory objects are allocated to provide
109 * zero-filled memory (vm_allocate) or map a user-defined
110 * memory object into a virtual address space (vm_map).
112 * Virtual memory objects that refer to a user-defined
113 * memory object are called "permanent", because all changes
114 * made in virtual memory are reflected back to the
115 * memory manager, which may then store it permanently.
116 * Other virtual memory objects are called "temporary",
117 * meaning that changes need be written back only when
118 * necessary to reclaim pages, and that storage associated
119 * with the object can be discarded once it is no longer
122 * A permanent memory object may be mapped into more
123 * than one virtual address space. Moreover, two threads
124 * may attempt to make the first mapping of a memory
125 * object concurrently. Only one thread is allowed to
126 * complete this mapping; all others wait for the
127 * "pager_initialized" field is asserted, indicating
128 * that the first thread has initialized all of the
129 * necessary fields in the virtual memory object structure.
131 * The kernel relies on a *default memory manager* to
132 * provide backing storage for the zero-filled virtual
133 * memory objects. The pager memory objects associated
134 * with these temporary virtual memory objects are only
135 * requested from the default memory manager when it
136 * becomes necessary. Virtual memory objects
137 * that depend on the default memory manager are called
138 * "internal". The "pager_created" field is provided to
139 * indicate whether these ports have ever been allocated.
141 * The kernel may also create virtual memory objects to
142 * hold changed pages after a copy-on-write operation.
143 * In this case, the virtual memory object (and its
144 * backing storage -- its memory object) only contain
145 * those pages that have been changed. The "shadow"
146 * field refers to the virtual memory object that contains
147 * the remainder of the contents. The "shadow_offset"
148 * field indicates where in the "shadow" these contents begin.
149 * The "copy" field refers to a virtual memory object
150 * to which changed pages must be copied before changing
151 * this object, in order to implement another form
152 * of copy-on-write optimization.
154 * The virtual memory object structure also records
155 * the attributes associated with its memory object.
156 * The "pager_ready", "can_persist" and "copy_strategy"
157 * fields represent those attributes. The "cached_list"
158 * field is used in the implementation of the persistence
161 * ZZZ Continue this comment.
164 /* Forward declarations for internal functions. */
165 static void _vm_object_allocate(
166 vm_object_size_t size
,
169 static kern_return_t
vm_object_terminate(
172 extern void vm_object_remove(
175 static vm_object_t
vm_object_cache_trim(
176 boolean_t called_from_vm_object_deallocate
);
178 static void vm_object_deactivate_all_pages(
181 static void vm_object_abort_activity(
184 static kern_return_t
vm_object_copy_call(
185 vm_object_t src_object
,
186 vm_object_offset_t src_offset
,
187 vm_object_size_t size
,
188 vm_object_t
*_result_object
);
190 static void vm_object_do_collapse(
192 vm_object_t backing_object
);
194 static void vm_object_do_bypass(
196 vm_object_t backing_object
);
198 static void vm_object_release_pager(
199 memory_object_t pager
);
201 static zone_t vm_object_zone
; /* vm backing store zone */
204 * All wired-down kernel memory belongs to a single virtual
205 * memory object (kernel_object) to avoid wasting data structures.
207 static struct vm_object kernel_object_store
;
208 __private_extern__ vm_object_t kernel_object
= &kernel_object_store
;
211 * The submap object is used as a placeholder for vm_map_submap
212 * operations. The object is declared in vm_map.c because it
213 * is exported by the vm_map module. The storage is declared
214 * here because it must be initialized here.
216 static struct vm_object vm_submap_object_store
;
219 * Virtual memory objects are initialized from
220 * a template (see vm_object_allocate).
222 * When adding a new field to the virtual memory
223 * object structure, be sure to add initialization
224 * (see _vm_object_allocate()).
226 static struct vm_object vm_object_template
;
229 * Virtual memory objects that are not referenced by
230 * any address maps, but that are allowed to persist
231 * (an attribute specified by the associated memory manager),
232 * are kept in a queue (vm_object_cached_list).
234 * When an object from this queue is referenced again,
235 * for example to make another address space mapping,
236 * it must be removed from the queue. That is, the
237 * queue contains *only* objects with zero references.
239 * The kernel may choose to terminate objects from this
240 * queue in order to reclaim storage. The current policy
241 * is to permit a fixed maximum number of unreferenced
242 * objects (vm_object_cached_max).
244 * A spin lock (accessed by routines
245 * vm_object_cache_{lock,lock_try,unlock}) governs the
246 * object cache. It must be held when objects are
247 * added to or removed from the cache (in vm_object_terminate).
248 * The routines that acquire a reference to a virtual
249 * memory object based on one of the memory object ports
250 * must also lock the cache.
252 * Ideally, the object cache should be more isolated
253 * from the reference mechanism, so that the lock need
254 * not be held to make simple references.
256 static queue_head_t vm_object_cached_list
;
257 static int vm_object_cached_count
;
258 static int vm_object_cached_high
; /* highest # cached objects */
259 static int vm_object_cached_max
= 512; /* may be patched*/
261 static decl_mutex_data(,vm_object_cached_lock_data
)
263 #define vm_object_cache_lock() \
264 mutex_lock(&vm_object_cached_lock_data)
265 #define vm_object_cache_lock_try() \
266 mutex_try(&vm_object_cached_lock_data)
267 #define vm_object_cache_unlock() \
268 mutex_unlock(&vm_object_cached_lock_data)
270 #define VM_OBJECT_HASH_COUNT 1024
271 static queue_head_t vm_object_hashtable
[VM_OBJECT_HASH_COUNT
];
272 static struct zone
*vm_object_hash_zone
;
274 struct vm_object_hash_entry
{
275 queue_chain_t hash_link
; /* hash chain link */
276 memory_object_t pager
; /* pager we represent */
277 vm_object_t object
; /* corresponding object */
278 boolean_t waiting
; /* someone waiting for
282 typedef struct vm_object_hash_entry
*vm_object_hash_entry_t
;
283 #define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
285 #define VM_OBJECT_HASH_SHIFT 8
286 #define vm_object_hash(pager) \
287 ((((unsigned)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT)
290 * vm_object_hash_lookup looks up a pager in the hashtable
291 * and returns the corresponding entry, with optional removal.
294 static vm_object_hash_entry_t
295 vm_object_hash_lookup(
296 memory_object_t pager
,
297 boolean_t remove_entry
)
299 register queue_t bucket
;
300 register vm_object_hash_entry_t entry
;
302 bucket
= &vm_object_hashtable
[vm_object_hash(pager
)];
304 entry
= (vm_object_hash_entry_t
)queue_first(bucket
);
305 while (!queue_end(bucket
, (queue_entry_t
)entry
)) {
306 if (entry
->pager
== pager
&& !remove_entry
)
308 else if (entry
->pager
== pager
) {
309 queue_remove(bucket
, entry
,
310 vm_object_hash_entry_t
, hash_link
);
314 entry
= (vm_object_hash_entry_t
)queue_next(&entry
->hash_link
);
317 return(VM_OBJECT_HASH_ENTRY_NULL
);
321 * vm_object_hash_enter enters the specified
322 * pager / cache object association in the hashtable.
326 vm_object_hash_insert(
327 vm_object_hash_entry_t entry
)
329 register queue_t bucket
;
331 bucket
= &vm_object_hashtable
[vm_object_hash(entry
->pager
)];
333 queue_enter(bucket
, entry
, vm_object_hash_entry_t
, hash_link
);
336 static vm_object_hash_entry_t
337 vm_object_hash_entry_alloc(
338 memory_object_t pager
)
340 vm_object_hash_entry_t entry
;
342 entry
= (vm_object_hash_entry_t
)zalloc(vm_object_hash_zone
);
343 entry
->pager
= pager
;
344 entry
->object
= VM_OBJECT_NULL
;
345 entry
->waiting
= FALSE
;
351 vm_object_hash_entry_free(
352 vm_object_hash_entry_t entry
)
354 zfree(vm_object_hash_zone
, (vm_offset_t
)entry
);
358 * vm_object_allocate:
360 * Returns a new object with the given size.
365 vm_object_size_t size
,
369 "vm_object_allocate, object 0x%X size 0x%X\n",
370 (integer_t
)object
, size
, 0,0,0);
372 *object
= vm_object_template
;
373 queue_init(&object
->memq
);
374 queue_init(&object
->msr_q
);
376 queue_init(&object
->uplq
);
377 #endif /* UBC_DEBUG */
378 vm_object_lock_init(object
);
382 __private_extern__ vm_object_t
384 vm_object_size_t size
)
386 register vm_object_t object
;
388 object
= (vm_object_t
) zalloc(vm_object_zone
);
390 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
392 if (object
!= VM_OBJECT_NULL
)
393 _vm_object_allocate(size
, object
);
399 * vm_object_bootstrap:
401 * Initialize the VM objects module.
403 __private_extern__
void
404 vm_object_bootstrap(void)
408 vm_object_zone
= zinit((vm_size_t
) sizeof(struct vm_object
),
409 round_page(512*1024),
413 queue_init(&vm_object_cached_list
);
414 mutex_init(&vm_object_cached_lock_data
, ETAP_VM_OBJ_CACHE
);
416 vm_object_hash_zone
=
417 zinit((vm_size_t
) sizeof (struct vm_object_hash_entry
),
418 round_page(512*1024),
420 "vm object hash entries");
422 for (i
= 0; i
< VM_OBJECT_HASH_COUNT
; i
++)
423 queue_init(&vm_object_hashtable
[i
]);
426 * Fill in a template object, for quick initialization
429 /* memq; Lock; init after allocation */
430 vm_object_template
.size
= 0;
431 vm_object_template
.frozen_size
= 0;
432 vm_object_template
.ref_count
= 1;
434 vm_object_template
.res_count
= 1;
435 #endif /* TASK_SWAPPER */
436 vm_object_template
.resident_page_count
= 0;
437 vm_object_template
.copy
= VM_OBJECT_NULL
;
438 vm_object_template
.shadow
= VM_OBJECT_NULL
;
439 vm_object_template
.shadow_offset
= (vm_object_offset_t
) 0;
440 vm_object_template
.cow_hint
= 0;
441 vm_object_template
.true_share
= FALSE
;
443 vm_object_template
.pager
= MEMORY_OBJECT_NULL
;
444 vm_object_template
.paging_offset
= 0;
445 vm_object_template
.pager_request
= PAGER_REQUEST_NULL
;
446 /* msr_q; init after allocation */
448 vm_object_template
.copy_strategy
= MEMORY_OBJECT_COPY_SYMMETRIC
;
449 vm_object_template
.absent_count
= 0;
450 vm_object_template
.paging_in_progress
= 0;
452 /* Begin bitfields */
453 vm_object_template
.all_wanted
= 0; /* all bits FALSE */
454 vm_object_template
.pager_created
= FALSE
;
455 vm_object_template
.pager_initialized
= FALSE
;
456 vm_object_template
.pager_ready
= FALSE
;
457 vm_object_template
.pager_trusted
= FALSE
;
458 vm_object_template
.can_persist
= FALSE
;
459 vm_object_template
.internal
= TRUE
;
460 vm_object_template
.temporary
= TRUE
;
461 vm_object_template
.private = FALSE
;
462 vm_object_template
.pageout
= FALSE
;
463 vm_object_template
.alive
= TRUE
;
464 vm_object_template
.lock_in_progress
= FALSE
;
465 vm_object_template
.lock_restart
= FALSE
;
466 vm_object_template
.silent_overwrite
= FALSE
;
467 vm_object_template
.advisory_pageout
= FALSE
;
468 vm_object_template
.shadowed
= FALSE
;
469 vm_object_template
.terminating
= FALSE
;
470 vm_object_template
.shadow_severed
= FALSE
;
471 vm_object_template
.phys_contiguous
= FALSE
;
472 vm_object_template
.nophyscache
= FALSE
;
475 /* cached_list; init after allocation */
476 vm_object_template
.last_alloc
= (vm_object_offset_t
) 0;
477 vm_object_template
.cluster_size
= 0;
479 vm_object_template
.existence_map
= VM_EXTERNAL_NULL
;
480 #endif /* MACH_PAGEMAP */
482 vm_object_template
.paging_object
= VM_OBJECT_NULL
;
483 #endif /* MACH_ASSERT */
486 * Initialize the "kernel object"
489 kernel_object
= &kernel_object_store
;
492 * Note that in the following size specifications, we need to add 1 because
493 * VM_MAX_KERNEL_ADDRESS is a maximum address, not a size.
495 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
) + 1,
499 * Initialize the "submap object". Make it as large as the
500 * kernel object so that no limit is imposed on submap sizes.
503 vm_submap_object
= &vm_submap_object_store
;
504 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
) + 1,
507 * Create an "extra" reference to this object so that we never
508 * try to deallocate it; zfree doesn't like to be called with
511 vm_object_reference(vm_submap_object
);
514 vm_external_module_initialize();
515 #endif /* MACH_PAGEMAP */
518 __private_extern__
void
522 * Finish initializing the kernel object.
526 /* remove the typedef below when emergency work-around is taken out */
527 typedef struct vnode_pager
{
528 memory_object_t pager
;
529 memory_object_t pager_handle
; /* pager */
530 memory_object_control_t control_handle
; /* memory object's control handle */
531 void *vnode_handle
; /* vnode handle */
534 #define MIGHT_NOT_CACHE_SHADOWS 1
535 #if MIGHT_NOT_CACHE_SHADOWS
536 static int cache_shadows
= TRUE
;
537 #endif /* MIGHT_NOT_CACHE_SHADOWS */
540 * vm_object_deallocate:
542 * Release a reference to the specified object,
543 * gained either through a vm_object_allocate
544 * or a vm_object_reference call. When all references
545 * are gone, storage associated with this object
546 * may be relinquished.
548 * No object may be locked.
550 __private_extern__
void
551 vm_object_deallocate(
552 register vm_object_t object
)
554 boolean_t retry_cache_trim
= FALSE
;
557 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
558 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
561 while (object
!= VM_OBJECT_NULL
) {
564 * The cache holds a reference (uncounted) to
565 * the object; we must lock it before removing
569 vm_object_cache_lock();
570 vm_object_lock(object
);
572 assert(object
->ref_count
> 0);
575 * If the object has a named reference, and only
576 * that reference would remain, inform the pager
577 * about the last "mapping" reference going away.
579 if ((object
->ref_count
== 2) && (object
->named
)) {
580 memory_object_t pager
= object
->pager
;
582 /* Notify the Pager that there are no */
583 /* more mappers for this object */
585 if (pager
!= MEMORY_OBJECT_NULL
) {
586 vm_object_unlock(object
);
587 vm_object_cache_unlock();
589 memory_object_unmap(pager
);
591 vm_object_cache_lock();
592 vm_object_lock(object
);
593 assert(object
->ref_count
> 0);
598 * Lose the reference. If other references
599 * remain, then we are done, unless we need
600 * to retry a cache trim.
601 * If it is the last reference, then keep it
602 * until any pending initialization is completed.
605 /* if the object is terminating, it cannot go into */
606 /* the cache and we obviously should not call */
607 /* terminate again. */
609 if ((object
->ref_count
> 1) || object
->terminating
) {
611 vm_object_res_deallocate(object
);
612 vm_object_unlock(object
);
613 vm_object_cache_unlock();
614 if (retry_cache_trim
&&
615 ((object
= vm_object_cache_trim(TRUE
)) !=
623 * We have to wait for initialization
624 * before destroying or caching the object.
627 if (object
->pager_created
&& ! object
->pager_initialized
) {
628 assert(! object
->can_persist
);
629 vm_object_assert_wait(object
,
630 VM_OBJECT_EVENT_INITIALIZED
,
632 vm_object_unlock(object
);
633 vm_object_cache_unlock();
634 thread_block((void (*)(void))0);
639 * If this object can persist, then enter it in
640 * the cache. Otherwise, terminate it.
642 * NOTE: Only permanent objects are cached, and
643 * permanent objects cannot have shadows. This
644 * affects the residence counting logic in a minor
645 * way (can do it in-line, mostly).
648 if ((object
->can_persist
) && (object
->alive
)) {
650 * Now it is safe to decrement reference count,
651 * and to return if reference count is > 0.
653 if (--object
->ref_count
> 0) {
654 vm_object_res_deallocate(object
);
655 vm_object_unlock(object
);
656 vm_object_cache_unlock();
657 if (retry_cache_trim
&&
658 ((object
= vm_object_cache_trim(TRUE
)) !=
665 #if MIGHT_NOT_CACHE_SHADOWS
667 * Remove shadow now if we don't
668 * want to cache shadows.
670 if (! cache_shadows
) {
671 shadow
= object
->shadow
;
672 object
->shadow
= VM_OBJECT_NULL
;
674 #endif /* MIGHT_NOT_CACHE_SHADOWS */
677 * Enter the object onto the queue of
678 * cached objects, and deactivate
681 assert(object
->shadow
== VM_OBJECT_NULL
);
682 VM_OBJ_RES_DECR(object
);
684 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
686 (integer_t
)vm_object_cached_list
.next
,
687 (integer_t
)vm_object_cached_list
.prev
,0,0);
689 vm_object_cached_count
++;
690 if (vm_object_cached_count
> vm_object_cached_high
)
691 vm_object_cached_high
= vm_object_cached_count
;
692 queue_enter(&vm_object_cached_list
, object
,
693 vm_object_t
, cached_list
);
694 vm_object_cache_unlock();
695 vm_object_deactivate_all_pages(object
);
696 vm_object_unlock(object
);
698 #if MIGHT_NOT_CACHE_SHADOWS
700 * If we have a shadow that we need
701 * to deallocate, do so now, remembering
702 * to trim the cache later.
704 if (! cache_shadows
&& shadow
!= VM_OBJECT_NULL
) {
706 retry_cache_trim
= TRUE
;
709 #endif /* MIGHT_NOT_CACHE_SHADOWS */
712 * Trim the cache. If the cache trim
713 * returns with a shadow for us to deallocate,
714 * then remember to retry the cache trim
715 * when we are done deallocating the shadow.
716 * Otherwise, we are done.
719 object
= vm_object_cache_trim(TRUE
);
720 if (object
== VM_OBJECT_NULL
) {
723 retry_cache_trim
= TRUE
;
727 * This object is not cachable; terminate it.
730 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%lX ref %d\n",
731 (integer_t
)object
, object
->resident_page_count
,
732 object
->paging_in_progress
,
733 (natural_t
)current_thread(),object
->ref_count
);
735 VM_OBJ_RES_DECR(object
); /* XXX ? */
737 * Terminate this object. If it had a shadow,
738 * then deallocate it; otherwise, if we need
739 * to retry a cache trim, do so now; otherwise,
740 * we are done. "pageout" objects have a shadow,
741 * but maintain a "paging reference" rather than
742 * a normal reference.
744 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
745 if(vm_object_terminate(object
) != KERN_SUCCESS
) {
748 if (shadow
!= VM_OBJECT_NULL
) {
752 if (retry_cache_trim
&&
753 ((object
= vm_object_cache_trim(TRUE
)) !=
760 assert(! retry_cache_trim
);
764 * Check to see whether we really need to trim
765 * down the cache. If so, remove an object from
766 * the cache, terminate it, and repeat.
768 * Called with, and returns with, cache lock unlocked.
771 vm_object_cache_trim(
772 boolean_t called_from_vm_object_deallocate
)
774 register vm_object_t object
= VM_OBJECT_NULL
;
780 * If we no longer need to trim the cache,
784 vm_object_cache_lock();
785 if (vm_object_cached_count
<= vm_object_cached_max
) {
786 vm_object_cache_unlock();
787 return VM_OBJECT_NULL
;
791 * We must trim down the cache, so remove
792 * the first object in the cache.
795 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
796 (integer_t
)vm_object_cached_list
.next
,
797 (integer_t
)vm_object_cached_list
.prev
, 0, 0, 0);
799 object
= (vm_object_t
) queue_first(&vm_object_cached_list
);
800 vm_object_lock(object
);
801 queue_remove(&vm_object_cached_list
, object
, vm_object_t
,
803 vm_object_cached_count
--;
806 * Since this object is in the cache, we know
807 * that it is initialized and has no references.
808 * Take a reference to avoid recursive deallocations.
811 assert(object
->pager_initialized
);
812 assert(object
->ref_count
== 0);
816 * Terminate the object.
817 * If the object had a shadow, we let vm_object_deallocate
818 * deallocate it. "pageout" objects have a shadow, but
819 * maintain a "paging reference" rather than a normal
821 * (We are careful here to limit recursion.)
823 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
824 if(vm_object_terminate(object
) != KERN_SUCCESS
)
826 if (shadow
!= VM_OBJECT_NULL
) {
827 if (called_from_vm_object_deallocate
) {
830 vm_object_deallocate(shadow
);
836 boolean_t vm_object_terminate_remove_all
= FALSE
;
839 * Routine: vm_object_terminate
841 * Free all resources associated with a vm_object.
843 * Upon entry, the object must be locked,
844 * and the object must have exactly one reference.
846 * The shadow object reference is left alone.
848 * The object must be unlocked if its found that pages
849 * must be flushed to a backing object. If someone
850 * manages to map the object while it is being flushed
851 * the object is returned unlocked and unchanged. Otherwise,
852 * upon exit, the cache will be unlocked, and the
853 * object will cease to exist.
857 register vm_object_t object
)
859 memory_object_t pager
;
860 register vm_page_t p
;
861 vm_object_t shadow_object
;
863 XPR(XPR_VM_OBJECT
, "vm_object_terminate, object 0x%X ref %d\n",
864 (integer_t
)object
, object
->ref_count
, 0, 0, 0);
866 if (!object
->pageout
&& (!object
->temporary
|| object
->can_persist
)
867 && (object
->pager
!= NULL
|| object
->shadow_severed
)) {
868 vm_object_cache_unlock();
869 while (!queue_empty(&object
->memq
)) {
871 * Clear pager_trusted bit so that the pages get yanked
872 * out of the object instead of cleaned in place. This
873 * prevents a deadlock in XMM and makes more sense anyway.
875 object
->pager_trusted
= FALSE
;
877 p
= (vm_page_t
) queue_first(&object
->memq
);
881 if (p
->busy
|| p
->cleaning
) {
882 if(p
->cleaning
|| p
->absent
) {
883 vm_object_paging_wait(object
, THREAD_UNINT
);
886 panic("vm_object_terminate.3 0x%x 0x%x", object
, p
);
890 vm_page_lock_queues();
891 VM_PAGE_QUEUES_REMOVE(p
);
892 vm_page_unlock_queues();
894 if (p
->absent
|| p
->private) {
897 * For private pages, VM_PAGE_FREE just
898 * leaves the page structure around for
899 * its owner to clean up. For absent
900 * pages, the structure is returned to
901 * the appropriate pool.
908 panic("vm_object_terminate.4 0x%x 0x%x", object
, p
);
911 p
->dirty
= pmap_is_modified(p
->phys_addr
);
913 if ((p
->dirty
|| p
->precious
) && !p
->error
&& object
->alive
) {
915 vm_object_paging_begin(object
);
916 /* protect the object from re-use/caching while it */
918 vm_object_unlock(object
);
919 vm_pageout_cluster(p
); /* flush page */
920 vm_object_lock(object
);
921 vm_object_paging_wait(object
, THREAD_UNINT
);
923 "vm_object_terminate restart, object 0x%X ref %d\n",
924 (integer_t
)object
, object
->ref_count
, 0, 0, 0);
930 vm_object_unlock(object
);
931 vm_object_cache_lock();
932 vm_object_lock(object
);
936 * Make sure the object isn't already being terminated
938 if(object
->terminating
) {
939 object
->ref_count
-= 1;
940 assert(object
->ref_count
> 0);
941 vm_object_cache_unlock();
942 vm_object_unlock(object
);
947 * Did somebody get a reference to the object while we were
950 if(object
->ref_count
!= 1) {
951 object
->ref_count
-= 1;
952 assert(object
->ref_count
> 0);
953 vm_object_res_deallocate(object
);
954 vm_object_cache_unlock();
955 vm_object_unlock(object
);
960 * Make sure no one can look us up now.
963 object
->terminating
= TRUE
;
964 object
->alive
= FALSE
;
965 vm_object_remove(object
);
968 * Detach the object from its shadow if we are the shadow's
971 if (((shadow_object
= object
->shadow
) != VM_OBJECT_NULL
) &&
972 !(object
->pageout
)) {
973 vm_object_lock(shadow_object
);
974 assert((shadow_object
->copy
== object
) ||
975 (shadow_object
->copy
== VM_OBJECT_NULL
));
976 shadow_object
->copy
= VM_OBJECT_NULL
;
977 vm_object_unlock(shadow_object
);
981 * The pageout daemon might be playing with our pages.
982 * Now that the object is dead, it won't touch any more
983 * pages, but some pages might already be on their way out.
984 * Hence, we wait until the active paging activities have ceased
985 * before we break the association with the pager itself.
987 while (object
->paging_in_progress
!= 0) {
988 vm_object_cache_unlock();
989 vm_object_wait(object
,
990 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
992 vm_object_cache_lock();
993 vm_object_lock(object
);
996 pager
= object
->pager
;
997 object
->pager
= MEMORY_OBJECT_NULL
;
999 if (pager
!= MEMORY_OBJECT_NULL
)
1000 memory_object_control_disable(object
->pager_request
);
1001 vm_object_cache_unlock();
1003 object
->ref_count
--;
1005 assert(object
->res_count
== 0);
1006 #endif /* TASK_SWAPPER */
1008 assert (object
->ref_count
== 0);
1011 * Clean or free the pages, as appropriate.
1012 * It is possible for us to find busy/absent pages,
1013 * if some faults on this object were aborted.
1015 if (object
->pageout
) {
1016 assert(shadow_object
!= VM_OBJECT_NULL
);
1017 assert(shadow_object
== object
->shadow
);
1019 vm_pageout_object_terminate(object
);
1021 } else if ((object
->temporary
&& !object
->can_persist
) ||
1022 (pager
== MEMORY_OBJECT_NULL
)) {
1023 while (!queue_empty(&object
->memq
)) {
1024 p
= (vm_page_t
) queue_first(&object
->memq
);
1029 } else if (!queue_empty(&object
->memq
)) {
1030 panic("vm_object_terminate: queue just emptied isn't");
1033 assert(object
->paging_in_progress
== 0);
1034 assert(object
->ref_count
== 0);
1037 * If the pager has not already been released by
1038 * vm_object_destroy, we need to terminate it and
1039 * release our reference to it here.
1041 if (pager
!= MEMORY_OBJECT_NULL
) {
1042 vm_object_unlock(object
);
1043 vm_object_release_pager(pager
);
1044 vm_object_lock(object
);
1047 /* kick off anyone waiting on terminating */
1048 object
->terminating
= FALSE
;
1049 vm_object_paging_begin(object
);
1050 vm_object_paging_end(object
);
1051 vm_object_unlock(object
);
1054 vm_external_destroy(object
->existence_map
, object
->size
);
1055 #endif /* MACH_PAGEMAP */
1058 * Free the space for the object.
1060 zfree(vm_object_zone
, (vm_offset_t
) object
);
1061 return KERN_SUCCESS
;
1065 * Routine: vm_object_pager_wakeup
1066 * Purpose: Wake up anyone waiting for termination of a pager.
1070 vm_object_pager_wakeup(
1071 memory_object_t pager
)
1073 vm_object_hash_entry_t entry
;
1074 boolean_t waiting
= FALSE
;
1077 * If anyone was waiting for the memory_object_terminate
1078 * to be queued, wake them up now.
1080 vm_object_cache_lock();
1081 entry
= vm_object_hash_lookup(pager
, TRUE
);
1082 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
1083 waiting
= entry
->waiting
;
1084 vm_object_cache_unlock();
1085 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
) {
1087 thread_wakeup((event_t
) pager
);
1088 vm_object_hash_entry_free(entry
);
1093 * Routine: vm_object_release_pager
1094 * Purpose: Terminate the pager and, upon completion,
1095 * release our last reference to it.
1096 * just like memory_object_terminate, except
1097 * that we wake up anyone blocked in vm_object_enter
1098 * waiting for termination message to be queued
1099 * before calling memory_object_init.
1102 vm_object_release_pager(
1103 memory_object_t pager
)
1107 * Terminate the pager.
1110 (void) memory_object_terminate(pager
);
1113 * Wakeup anyone waiting for this terminate
1115 vm_object_pager_wakeup(pager
);
1118 * Release reference to pager.
1120 memory_object_deallocate(pager
);
1124 * Routine: vm_object_abort_activity [internal use only]
1126 * Abort paging requests pending on this object.
1127 * In/out conditions:
1128 * The object is locked on entry and exit.
1131 vm_object_abort_activity(
1138 XPR(XPR_VM_OBJECT
, "vm_object_abort_activity, object 0x%X\n",
1139 (integer_t
)object
, 0, 0, 0, 0);
1142 * Abort all activity that would be waiting
1143 * for a result on this memory object.
1145 * We could also choose to destroy all pages
1146 * that we have in memory for this object, but
1150 p
= (vm_page_t
) queue_first(&object
->memq
);
1151 while (!queue_end(&object
->memq
, (queue_entry_t
) p
)) {
1152 next
= (vm_page_t
) queue_next(&p
->listq
);
1155 * If it's being paged in, destroy it.
1156 * If an unlock has been requested, start it again.
1159 if (p
->busy
&& p
->absent
) {
1163 if (p
->unlock_request
!= VM_PROT_NONE
)
1164 p
->unlock_request
= VM_PROT_NONE
;
1172 * Wake up threads waiting for the memory object to
1176 object
->pager_ready
= TRUE
;
1177 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
1181 * Routine: vm_object_destroy
1183 * Shut down a VM object, despite the
1184 * presence of address map (or other) references
1190 kern_return_t reason
)
1192 memory_object_t old_pager
;
1194 if (object
== VM_OBJECT_NULL
)
1195 return(KERN_SUCCESS
);
1198 * Remove the pager association immediately.
1200 * This will prevent the memory manager from further
1201 * meddling. [If it wanted to flush data or make
1202 * other changes, it should have done so before performing
1203 * the destroy call.]
1206 vm_object_cache_lock();
1207 vm_object_lock(object
);
1208 object
->can_persist
= FALSE
;
1209 object
->named
= FALSE
;
1210 object
->alive
= FALSE
;
1213 * Rip out the pager from the vm_object now...
1216 vm_object_remove(object
);
1217 old_pager
= object
->pager
;
1218 object
->pager
= MEMORY_OBJECT_NULL
;
1219 if (old_pager
!= MEMORY_OBJECT_NULL
)
1220 memory_object_control_disable(object
->pager_request
);
1221 vm_object_cache_unlock();
1224 * Wait for the existing paging activity (that got
1225 * through before we nulled out the pager) to subside.
1228 vm_object_paging_wait(object
, THREAD_UNINT
);
1229 vm_object_unlock(object
);
1232 * Terminate the object now.
1234 if (old_pager
!= MEMORY_OBJECT_NULL
) {
1235 vm_object_release_pager(old_pager
);
1238 * JMM - Release the caller's reference. This assumes the
1239 * caller had a reference to release, which is a big (but
1240 * currently valid) assumption if this is driven from the
1241 * vnode pager (it is holding a named reference when making
1244 vm_object_deallocate(object
);
1247 return(KERN_SUCCESS
);
1251 * vm_object_deactivate_pages
1253 * Deactivate all pages in the specified object. (Keep its pages
1254 * in memory even though it is no longer referenced.)
1256 * The object must be locked.
1259 vm_object_deactivate_all_pages(
1260 register vm_object_t object
)
1262 register vm_page_t p
;
1264 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
1265 vm_page_lock_queues();
1267 vm_page_deactivate(p
);
1268 vm_page_unlock_queues();
1272 __private_extern__
void
1273 vm_object_deactivate_pages(
1275 vm_object_offset_t offset
,
1276 vm_object_size_t size
,
1277 boolean_t kill_page
)
1279 vm_object_t orig_object
;
1280 int pages_moved
= 0;
1281 int pages_found
= 0;
1284 * entered with object lock held, acquire a paging reference to
1285 * prevent the memory_object and control ports from
1288 orig_object
= object
;
1291 register vm_page_t m
;
1292 vm_object_offset_t toffset
;
1293 vm_object_size_t tsize
;
1295 vm_object_paging_begin(object
);
1296 vm_page_lock_queues();
1298 for (tsize
= size
, toffset
= offset
; tsize
; tsize
-= PAGE_SIZE
, toffset
+= PAGE_SIZE
) {
1300 if ((m
= vm_page_lookup(object
, toffset
)) != VM_PAGE_NULL
) {
1304 if ((m
->wire_count
== 0) && (!m
->private) && (!m
->gobbled
) && (!m
->busy
)) {
1306 m
->reference
= FALSE
;
1307 pmap_clear_reference(m
->phys_addr
);
1309 if ((kill_page
) && (object
->internal
)) {
1310 m
->precious
= FALSE
;
1312 pmap_clear_modify(m
->phys_addr
);
1313 vm_external_state_clr(object
->existence_map
, offset
);
1315 VM_PAGE_QUEUES_REMOVE(m
);
1317 queue_enter_first(&vm_page_queue_inactive
, m
, vm_page_t
, pageq
);
1321 vm_page_inactive_count
++;
1327 vm_page_unlock_queues();
1328 vm_object_paging_end(object
);
1330 if (object
->shadow
) {
1331 vm_object_t tmp_object
;
1335 offset
+= object
->shadow_offset
;
1337 tmp_object
= object
->shadow
;
1338 vm_object_lock(tmp_object
);
1340 if (object
!= orig_object
)
1341 vm_object_unlock(object
);
1342 object
= tmp_object
;
1346 if (object
!= orig_object
)
1347 vm_object_unlock(object
);
1351 * Routine: vm_object_pmap_protect
1354 * Reduces the permission for all physical
1355 * pages in the specified object range.
1357 * If removing write permission only, it is
1358 * sufficient to protect only the pages in
1359 * the top-level object; only those pages may
1360 * have write permission.
1362 * If removing all access, we must follow the
1363 * shadow chain from the top-level object to
1364 * remove access to all pages in shadowed objects.
1366 * The object must *not* be locked. The object must
1367 * be temporary/internal.
1369 * If pmap is not NULL, this routine assumes that
1370 * the only mappings for the pages are in that
1374 __private_extern__
void
1375 vm_object_pmap_protect(
1376 register vm_object_t object
,
1377 register vm_object_offset_t offset
,
1380 vm_offset_t pmap_start
,
1383 if (object
== VM_OBJECT_NULL
)
1386 vm_object_lock(object
);
1388 assert(object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
);
1391 if (object
->resident_page_count
> atop(size
) / 2 &&
1392 pmap
!= PMAP_NULL
) {
1393 vm_object_unlock(object
);
1394 pmap_protect(pmap
, pmap_start
, pmap_start
+ size
, prot
);
1399 register vm_page_t p
;
1400 register vm_object_offset_t end
;
1402 end
= offset
+ size
;
1404 if (pmap
!= PMAP_NULL
) {
1405 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
1406 if (!p
->fictitious
&&
1407 (offset
<= p
->offset
) && (p
->offset
< end
)) {
1409 vm_offset_t start
= pmap_start
+
1410 (vm_offset_t
)(p
->offset
- offset
);
1412 pmap_protect(pmap
, start
, start
+ PAGE_SIZE
, prot
);
1416 queue_iterate(&object
->memq
, p
, vm_page_t
, listq
) {
1417 if (!p
->fictitious
&&
1418 (offset
<= p
->offset
) && (p
->offset
< end
)) {
1420 pmap_page_protect(p
->phys_addr
,
1421 prot
& ~p
->page_lock
);
1427 if (prot
== VM_PROT_NONE
) {
1429 * Must follow shadow chain to remove access
1430 * to pages in shadowed objects.
1432 register vm_object_t next_object
;
1434 next_object
= object
->shadow
;
1435 if (next_object
!= VM_OBJECT_NULL
) {
1436 offset
+= object
->shadow_offset
;
1437 vm_object_lock(next_object
);
1438 vm_object_unlock(object
);
1439 object
= next_object
;
1443 * End of chain - we are done.
1450 * Pages in shadowed objects may never have
1451 * write permission - we may stop here.
1457 vm_object_unlock(object
);
1461 * Routine: vm_object_copy_slowly
1464 * Copy the specified range of the source
1465 * virtual memory object without using
1466 * protection-based optimizations (such
1467 * as copy-on-write). The pages in the
1468 * region are actually copied.
1470 * In/out conditions:
1471 * The caller must hold a reference and a lock
1472 * for the source virtual memory object. The source
1473 * object will be returned *unlocked*.
1476 * If the copy is completed successfully, KERN_SUCCESS is
1477 * returned. If the caller asserted the interruptible
1478 * argument, and an interruption occurred while waiting
1479 * for a user-generated event, MACH_SEND_INTERRUPTED is
1480 * returned. Other values may be returned to indicate
1481 * hard errors during the copy operation.
1483 * A new virtual memory object is returned in a
1484 * parameter (_result_object). The contents of this
1485 * new object, starting at a zero offset, are a copy
1486 * of the source memory region. In the event of
1487 * an error, this parameter will contain the value
1490 __private_extern__ kern_return_t
1491 vm_object_copy_slowly(
1492 register vm_object_t src_object
,
1493 vm_object_offset_t src_offset
,
1494 vm_object_size_t size
,
1495 boolean_t interruptible
,
1496 vm_object_t
*_result_object
) /* OUT */
1498 vm_object_t new_object
;
1499 vm_object_offset_t new_offset
;
1501 vm_object_offset_t src_lo_offset
= src_offset
;
1502 vm_object_offset_t src_hi_offset
= src_offset
+ size
;
1504 XPR(XPR_VM_OBJECT
, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
1505 src_object
, src_offset
, size
, 0, 0);
1508 vm_object_unlock(src_object
);
1509 *_result_object
= VM_OBJECT_NULL
;
1510 return(KERN_INVALID_ARGUMENT
);
1514 * Prevent destruction of the source object while we copy.
1517 assert(src_object
->ref_count
> 0);
1518 src_object
->ref_count
++;
1519 VM_OBJ_RES_INCR(src_object
);
1520 vm_object_unlock(src_object
);
1523 * Create a new object to hold the copied pages.
1525 * We fill the new object starting at offset 0,
1526 * regardless of the input offset.
1527 * We don't bother to lock the new object within
1528 * this routine, since we have the only reference.
1531 new_object
= vm_object_allocate(size
);
1534 assert(size
== trunc_page_64(size
)); /* Will the loop terminate? */
1538 src_offset
+= PAGE_SIZE_64
,
1539 new_offset
+= PAGE_SIZE_64
, size
-= PAGE_SIZE_64
1542 vm_fault_return_t result
;
1544 while ((new_page
= vm_page_alloc(new_object
, new_offset
))
1546 if (!vm_page_wait(interruptible
)) {
1547 vm_object_deallocate(new_object
);
1548 *_result_object
= VM_OBJECT_NULL
;
1549 return(MACH_SEND_INTERRUPTED
);
1554 vm_prot_t prot
= VM_PROT_READ
;
1555 vm_page_t _result_page
;
1558 vm_page_t result_page
;
1559 kern_return_t error_code
;
1561 vm_object_lock(src_object
);
1562 vm_object_paging_begin(src_object
);
1564 XPR(XPR_VM_FAULT
,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
1565 result
= vm_fault_page(src_object
, src_offset
,
1566 VM_PROT_READ
, FALSE
, interruptible
,
1567 src_lo_offset
, src_hi_offset
,
1568 VM_BEHAVIOR_SEQUENTIAL
,
1569 &prot
, &_result_page
, &top_page
,
1571 &error_code
, FALSE
, FALSE
, NULL
, 0);
1574 case VM_FAULT_SUCCESS
:
1575 result_page
= _result_page
;
1578 * We don't need to hold the object
1579 * lock -- the busy page will be enough.
1580 * [We don't care about picking up any
1581 * new modifications.]
1583 * Copy the page to the new object.
1586 * If result_page is clean,
1587 * we could steal it instead
1591 vm_object_unlock(result_page
->object
);
1592 vm_page_copy(result_page
, new_page
);
1595 * Let go of both pages (make them
1596 * not busy, perform wakeup, activate).
1599 new_page
->busy
= FALSE
;
1600 new_page
->dirty
= TRUE
;
1601 vm_object_lock(result_page
->object
);
1602 PAGE_WAKEUP_DONE(result_page
);
1604 vm_page_lock_queues();
1605 if (!result_page
->active
&&
1606 !result_page
->inactive
)
1607 vm_page_activate(result_page
);
1608 vm_page_activate(new_page
);
1609 vm_page_unlock_queues();
1612 * Release paging references and
1613 * top-level placeholder page, if any.
1616 vm_fault_cleanup(result_page
->object
,
1621 case VM_FAULT_RETRY
:
1624 case VM_FAULT_FICTITIOUS_SHORTAGE
:
1625 vm_page_more_fictitious();
1628 case VM_FAULT_MEMORY_SHORTAGE
:
1629 if (vm_page_wait(interruptible
))
1633 case VM_FAULT_INTERRUPTED
:
1634 vm_page_free(new_page
);
1635 vm_object_deallocate(new_object
);
1636 vm_object_deallocate(src_object
);
1637 *_result_object
= VM_OBJECT_NULL
;
1638 return(MACH_SEND_INTERRUPTED
);
1640 case VM_FAULT_MEMORY_ERROR
:
1643 * (a) ignore pages that we can't
1645 * (b) return the null object if
1646 * any page fails [chosen]
1649 vm_page_lock_queues();
1650 vm_page_free(new_page
);
1651 vm_page_unlock_queues();
1652 vm_object_deallocate(new_object
);
1653 vm_object_deallocate(src_object
);
1654 *_result_object
= VM_OBJECT_NULL
;
1655 return(error_code
? error_code
:
1658 } while (result
!= VM_FAULT_SUCCESS
);
1662 * Lose the extra reference, and return our object.
1665 vm_object_deallocate(src_object
);
1666 *_result_object
= new_object
;
1667 return(KERN_SUCCESS
);
1671 * Routine: vm_object_copy_quickly
1674 * Copy the specified range of the source virtual
1675 * memory object, if it can be done without waiting
1676 * for user-generated events.
1679 * If the copy is successful, the copy is returned in
1680 * the arguments; otherwise, the arguments are not
1683 * In/out conditions:
1684 * The object should be unlocked on entry and exit.
1688 __private_extern__ boolean_t
1689 vm_object_copy_quickly(
1690 vm_object_t
*_object
, /* INOUT */
1691 vm_object_offset_t offset
, /* IN */
1692 vm_object_size_t size
, /* IN */
1693 boolean_t
*_src_needs_copy
, /* OUT */
1694 boolean_t
*_dst_needs_copy
) /* OUT */
1696 vm_object_t object
= *_object
;
1697 memory_object_copy_strategy_t copy_strategy
;
1699 XPR(XPR_VM_OBJECT
, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
1700 *_object
, offset
, size
, 0, 0);
1701 if (object
== VM_OBJECT_NULL
) {
1702 *_src_needs_copy
= FALSE
;
1703 *_dst_needs_copy
= FALSE
;
1707 vm_object_lock(object
);
1709 copy_strategy
= object
->copy_strategy
;
1711 switch (copy_strategy
) {
1712 case MEMORY_OBJECT_COPY_SYMMETRIC
:
1715 * Symmetric copy strategy.
1716 * Make another reference to the object.
1717 * Leave object/offset unchanged.
1720 assert(object
->ref_count
> 0);
1721 object
->ref_count
++;
1722 vm_object_res_reference(object
);
1723 object
->shadowed
= TRUE
;
1724 vm_object_unlock(object
);
1727 * Both source and destination must make
1728 * shadows, and the source must be made
1729 * read-only if not already.
1732 *_src_needs_copy
= TRUE
;
1733 *_dst_needs_copy
= TRUE
;
1737 case MEMORY_OBJECT_COPY_DELAY
:
1738 vm_object_unlock(object
);
1742 vm_object_unlock(object
);
1748 static int copy_call_count
= 0;
1749 static int copy_call_sleep_count
= 0;
1750 static int copy_call_restart_count
= 0;
1753 * Routine: vm_object_copy_call [internal]
1756 * Copy the source object (src_object), using the
1757 * user-managed copy algorithm.
1759 * In/out conditions:
1760 * The source object must be locked on entry. It
1761 * will be *unlocked* on exit.
1764 * If the copy is successful, KERN_SUCCESS is returned.
1765 * A new object that represents the copied virtual
1766 * memory is returned in a parameter (*_result_object).
1767 * If the return value indicates an error, this parameter
1770 static kern_return_t
1771 vm_object_copy_call(
1772 vm_object_t src_object
,
1773 vm_object_offset_t src_offset
,
1774 vm_object_size_t size
,
1775 vm_object_t
*_result_object
) /* OUT */
1779 boolean_t check_ready
= FALSE
;
1782 * If a copy is already in progress, wait and retry.
1785 * Consider making this call interruptable, as Mike
1786 * intended it to be.
1789 * Need a counter or version or something to allow
1790 * us to use the copy that the currently requesting
1791 * thread is obtaining -- is it worth adding to the
1792 * vm object structure? Depends how common this case it.
1795 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
1796 vm_object_wait(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
1798 vm_object_lock(src_object
);
1799 copy_call_restart_count
++;
1803 * Indicate (for the benefit of memory_object_create_copy)
1804 * that we want a copy for src_object. (Note that we cannot
1805 * do a real assert_wait before calling memory_object_copy,
1806 * so we simply set the flag.)
1809 vm_object_set_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
);
1810 vm_object_unlock(src_object
);
1813 * Ask the memory manager to give us a memory object
1814 * which represents a copy of the src object.
1815 * The memory manager may give us a memory object
1816 * which we already have, or it may give us a
1817 * new memory object. This memory object will arrive
1818 * via memory_object_create_copy.
1821 kr
= KERN_FAILURE
; /* XXX need to change memory_object.defs */
1822 if (kr
!= KERN_SUCCESS
) {
1827 * Wait for the copy to arrive.
1829 vm_object_lock(src_object
);
1830 while (vm_object_wanted(src_object
, VM_OBJECT_EVENT_COPY_CALL
)) {
1831 vm_object_wait(src_object
, VM_OBJECT_EVENT_COPY_CALL
,
1833 vm_object_lock(src_object
);
1834 copy_call_sleep_count
++;
1837 assert(src_object
->copy
!= VM_OBJECT_NULL
);
1838 copy
= src_object
->copy
;
1839 if (!vm_object_lock_try(copy
)) {
1840 vm_object_unlock(src_object
);
1841 mutex_pause(); /* wait a bit */
1842 vm_object_lock(src_object
);
1845 if (copy
->size
< src_offset
+size
)
1846 copy
->size
= src_offset
+size
;
1848 if (!copy
->pager_ready
)
1854 *_result_object
= copy
;
1855 vm_object_unlock(copy
);
1856 vm_object_unlock(src_object
);
1858 /* Wait for the copy to be ready. */
1859 if (check_ready
== TRUE
) {
1860 vm_object_lock(copy
);
1861 while (!copy
->pager_ready
) {
1862 vm_object_wait(copy
, VM_OBJECT_EVENT_PAGER_READY
,
1864 vm_object_lock(copy
);
1866 vm_object_unlock(copy
);
1869 return KERN_SUCCESS
;
1872 static int copy_delayed_lock_collisions
= 0;
1873 static int copy_delayed_max_collisions
= 0;
1874 static int copy_delayed_lock_contention
= 0;
1875 static int copy_delayed_protect_iterate
= 0;
1876 static int copy_delayed_protect_lookup
= 0;
1877 static int copy_delayed_protect_lookup_wait
= 0;
1880 * Routine: vm_object_copy_delayed [internal]
1883 * Copy the specified virtual memory object, using
1884 * the asymmetric copy-on-write algorithm.
1886 * In/out conditions:
1887 * The object must be unlocked on entry.
1889 * This routine will not block waiting for user-generated
1890 * events. It is not interruptible.
1892 __private_extern__ vm_object_t
1893 vm_object_copy_delayed(
1894 vm_object_t src_object
,
1895 vm_object_offset_t src_offset
,
1896 vm_object_size_t size
)
1898 vm_object_t new_copy
= VM_OBJECT_NULL
;
1899 vm_object_t old_copy
;
1901 vm_object_size_t copy_size
;
1905 * The user-level memory manager wants to see all of the changes
1906 * to this object, but it has promised not to make any changes on
1909 * Perform an asymmetric copy-on-write, as follows:
1910 * Create a new object, called a "copy object" to hold
1911 * pages modified by the new mapping (i.e., the copy,
1912 * not the original mapping).
1913 * Record the original object as the backing object for
1914 * the copy object. If the original mapping does not
1915 * change a page, it may be used read-only by the copy.
1916 * Record the copy object in the original object.
1917 * When the original mapping causes a page to be modified,
1918 * it must be copied to a new page that is "pushed" to
1920 * Mark the new mapping (the copy object) copy-on-write.
1921 * This makes the copy object itself read-only, allowing
1922 * it to be reused if the original mapping makes no
1923 * changes, and simplifying the synchronization required
1924 * in the "push" operation described above.
1926 * The copy-on-write is said to be assymetric because the original
1927 * object is *not* marked copy-on-write. A copied page is pushed
1928 * to the copy object, regardless which party attempted to modify
1931 * Repeated asymmetric copy operations may be done. If the
1932 * original object has not been changed since the last copy, its
1933 * copy object can be reused. Otherwise, a new copy object can be
1934 * inserted between the original object and its previous copy
1935 * object. Since any copy object is read-only, this cannot affect
1936 * affect the contents of the previous copy object.
1938 * Note that a copy object is higher in the object tree than the
1939 * original object; therefore, use of the copy object recorded in
1940 * the original object must be done carefully, to avoid deadlock.
1944 vm_object_lock(src_object
);
1947 * See whether we can reuse the result of a previous
1951 old_copy
= src_object
->copy
;
1952 if (old_copy
!= VM_OBJECT_NULL
) {
1954 * Try to get the locks (out of order)
1956 if (!vm_object_lock_try(old_copy
)) {
1957 vm_object_unlock(src_object
);
1960 /* Heisenberg Rules */
1961 copy_delayed_lock_collisions
++;
1962 if (collisions
++ == 0)
1963 copy_delayed_lock_contention
++;
1965 if (collisions
> copy_delayed_max_collisions
)
1966 copy_delayed_max_collisions
= collisions
;
1972 * Determine whether the old copy object has
1976 if (old_copy
->resident_page_count
== 0 &&
1977 !old_copy
->pager_created
) {
1979 * It has not been modified.
1981 * Return another reference to
1982 * the existing copy-object.
1984 assert(old_copy
->ref_count
> 0);
1985 old_copy
->ref_count
++;
1987 if (old_copy
->size
< src_offset
+size
)
1988 old_copy
->size
= src_offset
+size
;
1992 * We have to reproduce some of the code from
1993 * vm_object_res_reference because we've taken
1994 * the locks out of order here, and deadlock
1995 * would result if we simply called that function.
1997 if (++old_copy
->res_count
== 1) {
1998 assert(old_copy
->shadow
== src_object
);
1999 vm_object_res_reference(src_object
);
2001 #endif /* TASK_SWAPPER */
2003 vm_object_unlock(old_copy
);
2004 vm_object_unlock(src_object
);
2006 if (new_copy
!= VM_OBJECT_NULL
) {
2007 vm_object_unlock(new_copy
);
2008 vm_object_deallocate(new_copy
);
2013 if (new_copy
== VM_OBJECT_NULL
) {
2014 vm_object_unlock(old_copy
);
2015 vm_object_unlock(src_object
);
2016 new_copy
= vm_object_allocate(src_offset
+ size
);
2017 vm_object_lock(new_copy
);
2022 * Adjust the size argument so that the newly-created
2023 * copy object will be large enough to back either the
2024 * new old copy object or the new mapping.
2026 if (old_copy
->size
> src_offset
+size
)
2027 size
= old_copy
->size
- src_offset
;
2030 * The copy-object is always made large enough to
2031 * completely shadow the original object, since
2032 * it may have several users who want to shadow
2033 * the original object at different points.
2036 assert((old_copy
->shadow
== src_object
) &&
2037 (old_copy
->shadow_offset
== (vm_object_offset_t
) 0));
2040 * Make the old copy-object shadow the new one.
2041 * It will receive no more pages from the original
2045 src_object
->ref_count
--; /* remove ref. from old_copy */
2046 assert(src_object
->ref_count
> 0);
2047 old_copy
->shadow
= new_copy
;
2048 assert(new_copy
->ref_count
> 0);
2049 new_copy
->ref_count
++; /* for old_copy->shadow ref. */
2052 if (old_copy
->res_count
) {
2053 VM_OBJ_RES_INCR(new_copy
);
2054 VM_OBJ_RES_DECR(src_object
);
2058 vm_object_unlock(old_copy
); /* done with old_copy */
2059 } else if (new_copy
== VM_OBJECT_NULL
) {
2060 vm_object_unlock(src_object
);
2061 new_copy
= vm_object_allocate(src_offset
+ size
);
2062 vm_object_lock(new_copy
);
2067 * Readjust the copy-object size if necessary.
2069 copy_size
= new_copy
->size
;
2070 if (copy_size
< src_offset
+size
) {
2071 copy_size
= src_offset
+size
;
2072 new_copy
->size
= copy_size
;
2076 * Point the new copy at the existing object.
2079 new_copy
->shadow
= src_object
;
2080 new_copy
->shadow_offset
= 0;
2081 new_copy
->shadowed
= TRUE
; /* caller must set needs_copy */
2082 assert(src_object
->ref_count
> 0);
2083 src_object
->ref_count
++;
2084 VM_OBJ_RES_INCR(src_object
);
2085 src_object
->copy
= new_copy
;
2086 vm_object_unlock(new_copy
);
2089 * Mark all (current) pages of the existing object copy-on-write.
2090 * This object may have a shadow chain below it, but
2091 * those pages will already be marked copy-on-write.
2094 vm_object_paging_wait(src_object
, THREAD_UNINT
);
2095 copy_delayed_protect_iterate
++;
2096 queue_iterate(&src_object
->memq
, p
, vm_page_t
, listq
) {
2098 pmap_page_protect(p
->phys_addr
,
2099 (VM_PROT_ALL
& ~VM_PROT_WRITE
&
2102 vm_object_unlock(src_object
);
2104 "vm_object_copy_delayed: used copy object %X for source %X\n",
2105 (integer_t
)new_copy
, (integer_t
)src_object
, 0, 0, 0);
2111 * Routine: vm_object_copy_strategically
2114 * Perform a copy according to the source object's
2115 * declared strategy. This operation may block,
2116 * and may be interrupted.
2118 __private_extern__ kern_return_t
2119 vm_object_copy_strategically(
2120 register vm_object_t src_object
,
2121 vm_object_offset_t src_offset
,
2122 vm_object_size_t size
,
2123 vm_object_t
*dst_object
, /* OUT */
2124 vm_object_offset_t
*dst_offset
, /* OUT */
2125 boolean_t
*dst_needs_copy
) /* OUT */
2128 boolean_t interruptible
= THREAD_ABORTSAFE
; /* XXX */
2129 memory_object_copy_strategy_t copy_strategy
;
2131 assert(src_object
!= VM_OBJECT_NULL
);
2133 vm_object_lock(src_object
);
2136 * The copy strategy is only valid if the memory manager
2137 * is "ready". Internal objects are always ready.
2140 while (!src_object
->internal
&& !src_object
->pager_ready
) {
2142 vm_object_wait( src_object
,
2143 VM_OBJECT_EVENT_PAGER_READY
,
2145 if (interruptible
&&
2146 (current_thread()->wait_result
!= THREAD_AWAKENED
)) {
2147 *dst_object
= VM_OBJECT_NULL
;
2149 *dst_needs_copy
= FALSE
;
2150 return(MACH_SEND_INTERRUPTED
);
2152 vm_object_lock(src_object
);
2155 copy_strategy
= src_object
->copy_strategy
;
2158 * Use the appropriate copy strategy.
2161 switch (copy_strategy
) {
2162 case MEMORY_OBJECT_COPY_NONE
:
2163 result
= vm_object_copy_slowly(src_object
, src_offset
, size
,
2164 interruptible
, dst_object
);
2165 if (result
== KERN_SUCCESS
) {
2167 *dst_needs_copy
= FALSE
;
2171 case MEMORY_OBJECT_COPY_CALL
:
2172 result
= vm_object_copy_call(src_object
, src_offset
, size
,
2174 if (result
== KERN_SUCCESS
) {
2175 *dst_offset
= src_offset
;
2176 *dst_needs_copy
= TRUE
;
2180 case MEMORY_OBJECT_COPY_DELAY
:
2181 vm_object_unlock(src_object
);
2182 *dst_object
= vm_object_copy_delayed(src_object
,
2184 *dst_offset
= src_offset
;
2185 *dst_needs_copy
= TRUE
;
2186 result
= KERN_SUCCESS
;
2189 case MEMORY_OBJECT_COPY_SYMMETRIC
:
2190 XPR(XPR_VM_OBJECT
, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n",(natural_t
)src_object
, src_offset
, size
, 0, 0);
2191 vm_object_unlock(src_object
);
2192 result
= KERN_MEMORY_RESTART_COPY
;
2196 panic("copy_strategically: bad strategy");
2197 result
= KERN_INVALID_ARGUMENT
;
2205 * Create a new object which is backed by the
2206 * specified existing object range. The source
2207 * object reference is deallocated.
2209 * The new object and offset into that object
2210 * are returned in the source parameters.
2212 boolean_t vm_object_shadow_check
= FALSE
;
2214 __private_extern__ boolean_t
2216 vm_object_t
*object
, /* IN/OUT */
2217 vm_object_offset_t
*offset
, /* IN/OUT */
2218 vm_object_size_t length
)
2220 register vm_object_t source
;
2221 register vm_object_t result
;
2224 assert(source
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
);
2227 * Determine if we really need a shadow.
2230 if (vm_object_shadow_check
&& source
->ref_count
== 1 &&
2231 (source
->shadow
== VM_OBJECT_NULL
||
2232 source
->shadow
->copy
== VM_OBJECT_NULL
))
2234 source
->shadowed
= FALSE
;
2239 * Allocate a new object with the given length
2242 if ((result
= vm_object_allocate(length
)) == VM_OBJECT_NULL
)
2243 panic("vm_object_shadow: no object for shadowing");
2246 * The new object shadows the source object, adding
2247 * a reference to it. Our caller changes his reference
2248 * to point to the new object, removing a reference to
2249 * the source object. Net result: no change of reference
2252 result
->shadow
= source
;
2255 * Store the offset into the source object,
2256 * and fix up the offset into the new object.
2259 result
->shadow_offset
= *offset
;
2262 * Return the new things
2271 * The relationship between vm_object structures and
2272 * the memory_object requires careful synchronization.
2274 * All associations are created by memory_object_create_named
2275 * for external pagers and vm_object_pager_create for internal
2276 * objects as follows:
2278 * pager: the memory_object itself, supplied by
2279 * the user requesting a mapping (or the kernel,
2280 * when initializing internal objects); the
2281 * kernel simulates holding send rights by keeping
2285 * the memory object control port,
2286 * created by the kernel; the kernel holds
2287 * receive (and ownership) rights to this
2288 * port, but no other references.
2290 * When initialization is complete, the "initialized" field
2291 * is asserted. Other mappings using a particular memory object,
2292 * and any references to the vm_object gained through the
2293 * port association must wait for this initialization to occur.
2295 * In order to allow the memory manager to set attributes before
2296 * requests (notably virtual copy operations, but also data or
2297 * unlock requests) are made, a "ready" attribute is made available.
2298 * Only the memory manager may affect the value of this attribute.
2299 * Its value does not affect critical kernel functions, such as
2300 * internal object initialization or destruction. [Furthermore,
2301 * memory objects created by the kernel are assumed to be ready
2302 * immediately; the default memory manager need not explicitly
2303 * set the "ready" attribute.]
2305 * [Both the "initialized" and "ready" attribute wait conditions
2306 * use the "pager" field as the wait event.]
2308 * The port associations can be broken down by any of the
2309 * following routines:
2310 * vm_object_terminate:
2311 * No references to the vm_object remain, and
2312 * the object cannot (or will not) be cached.
2313 * This is the normal case, and is done even
2314 * though one of the other cases has already been
2316 * memory_object_destroy:
2317 * The memory manager has requested that the
2318 * kernel relinquish references to the memory
2319 * object. [The memory manager may not want to
2320 * destroy the memory object, but may wish to
2321 * refuse or tear down existing memory mappings.]
2323 * Each routine that breaks an association must break all of
2324 * them at once. At some later time, that routine must clear
2325 * the pager field and release the memory object references.
2326 * [Furthermore, each routine must cope with the simultaneous
2327 * or previous operations of the others.]
2329 * In addition to the lock on the object, the vm_object_cache_lock
2330 * governs the associations. References gained through the
2331 * association require use of the cache lock.
2333 * Because the pager field may be cleared spontaneously, it
2334 * cannot be used to determine whether a memory object has
2335 * ever been associated with a particular vm_object. [This
2336 * knowledge is important to the shadow object mechanism.]
2337 * For this reason, an additional "created" attribute is
2340 * During various paging operations, the pager reference found in the
2341 * vm_object must be valid. To prevent this from being released,
2342 * (other than being removed, i.e., made null), routines may use
2343 * the vm_object_paging_begin/end routines [actually, macros].
2344 * The implementation uses the "paging_in_progress" and "wanted" fields.
2345 * [Operations that alter the validity of the pager values include the
2346 * termination routines and vm_object_collapse.]
2351 * Routine: vm_object_pager_dead
2354 * A port is being destroy, and the IPC kobject code
2355 * can't tell if it represents a pager port or not.
2356 * So this function is called each time it sees a port
2358 * THIS IS HORRIBLY INEFFICIENT. We should only call
2359 * this routine if we had requested a notification on
2363 __private_extern__
void
2364 vm_object_pager_dead(
2368 vm_object_hash_entry_t entry
;
2371 * Perform essentially the same operations as in vm_object_lookup,
2372 * except that this time we look up based on the memory_object
2373 * port, not the control port.
2375 vm_object_cache_lock();
2376 entry
= vm_object_hash_lookup(pager
, FALSE
);
2377 if (entry
== VM_OBJECT_HASH_ENTRY_NULL
||
2378 entry
->object
== VM_OBJECT_NULL
) {
2379 vm_object_cache_unlock();
2383 object
= entry
->object
;
2384 entry
->object
= VM_OBJECT_NULL
;
2386 vm_object_lock(object
);
2387 if (object
->ref_count
== 0) {
2388 XPR(XPR_VM_OBJECT_CACHE
,
2389 "vm_object_destroy: removing %x from cache, head (%x, %x)\n",
2391 (integer_t
)vm_object_cached_list
.next
,
2392 (integer_t
)vm_object_cached_list
.prev
, 0,0);
2394 queue_remove(&vm_object_cached_list
, object
,
2395 vm_object_t
, cached_list
);
2396 vm_object_cached_count
--;
2398 object
->ref_count
++;
2399 vm_object_res_reference(object
);
2401 object
->can_persist
= FALSE
;
2403 assert(object
->pager
== pager
);
2406 * Remove the pager association.
2408 * Note that the memory_object itself is dead, so
2409 * we don't bother with it.
2412 object
->pager
= MEMORY_OBJECT_NULL
;
2414 vm_object_unlock(object
);
2415 vm_object_cache_unlock();
2417 vm_object_pager_wakeup(pager
);
2420 * Release the pager reference. Note that there's no
2421 * point in trying the memory_object_terminate call
2422 * because the memory_object itself is dead. Also
2423 * release the memory_object_control reference, since
2424 * the pager didn't do that either.
2427 memory_object_deallocate(pager
);
2428 memory_object_control_deallocate(object
->pager_request
);
2432 * Restart pending page requests
2434 vm_object_lock(object
);
2435 vm_object_abort_activity(object
);
2436 vm_object_unlock(object
);
2439 * Lose the object reference.
2442 vm_object_deallocate(object
);
2447 * Routine: vm_object_enter
2449 * Find a VM object corresponding to the given
2450 * pager; if no such object exists, create one,
2451 * and initialize the pager.
2455 memory_object_t pager
,
2456 vm_object_size_t size
,
2461 register vm_object_t object
;
2462 vm_object_t new_object
;
2463 boolean_t must_init
;
2464 vm_object_hash_entry_t entry
, new_entry
;
2466 if (pager
== MEMORY_OBJECT_NULL
)
2467 return(vm_object_allocate(size
));
2469 new_object
= VM_OBJECT_NULL
;
2470 new_entry
= VM_OBJECT_HASH_ENTRY_NULL
;
2474 * Look for an object associated with this port.
2478 vm_object_cache_lock();
2480 entry
= vm_object_hash_lookup(pager
, FALSE
);
2483 * If a previous object is being terminated,
2484 * we must wait for the termination message
2487 * We set kobject to a non-null value to let the
2488 * terminator know that someone is waiting.
2489 * Among the possibilities is that the port
2490 * could die while we're waiting. Must restart
2491 * instead of continuing the loop.
2494 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
) {
2495 if (entry
->object
!= VM_OBJECT_NULL
)
2498 entry
->waiting
= TRUE
;
2499 assert_wait((event_t
) pager
, THREAD_UNINT
);
2500 vm_object_cache_unlock();
2501 thread_block((void (*)(void))0);
2506 * We must unlock to create a new object;
2507 * if we do so, we must try the lookup again.
2510 if (new_object
== VM_OBJECT_NULL
) {
2511 vm_object_cache_unlock();
2512 assert(new_entry
== VM_OBJECT_HASH_ENTRY_NULL
);
2513 new_entry
= vm_object_hash_entry_alloc(pager
);
2514 new_object
= vm_object_allocate(size
);
2515 vm_object_cache_lock();
2518 * Lookup failed twice, and we have something
2519 * to insert; set the object.
2522 if (entry
== VM_OBJECT_HASH_ENTRY_NULL
) {
2523 vm_object_hash_insert(new_entry
);
2525 new_entry
= VM_OBJECT_HASH_ENTRY_NULL
;
2528 entry
->object
= new_object
;
2529 new_object
= VM_OBJECT_NULL
;
2534 object
= entry
->object
;
2535 assert(object
!= VM_OBJECT_NULL
);
2538 vm_object_lock(object
);
2539 assert(object
->pager_created
);
2540 assert(!internal
|| object
->internal
);
2542 assert(!object
->named
);
2543 object
->named
= TRUE
;
2545 if (object
->ref_count
== 0) {
2546 XPR(XPR_VM_OBJECT_CACHE
,
2547 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
2549 (integer_t
)vm_object_cached_list
.next
,
2550 (integer_t
)vm_object_cached_list
.prev
, 0,0);
2551 queue_remove(&vm_object_cached_list
, object
,
2552 vm_object_t
, cached_list
);
2553 vm_object_cached_count
--;
2555 object
->ref_count
++;
2556 vm_object_res_reference(object
);
2557 vm_object_unlock(object
);
2561 assert(object
->ref_count
> 0);
2565 vm_object_cache_unlock();
2568 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
2569 (integer_t
)pager
, (integer_t
)object
, must_init
, 0, 0);
2572 * If we raced to create a vm_object but lost, let's
2576 if (new_object
!= VM_OBJECT_NULL
)
2577 vm_object_deallocate(new_object
);
2579 if (new_entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
2580 vm_object_hash_entry_free(new_entry
);
2583 pager_request_t pager_request
;
2586 * Allocate request port.
2589 pager_request
= memory_object_control_allocate(object
);
2590 assert (pager_request
!= PAGER_REQUEST_NULL
);
2592 vm_object_lock(object
);
2595 * Copy the reference we were given.
2598 memory_object_reference(pager
);
2599 object
->pager_created
= TRUE
;
2600 object
->pager
= pager
;
2601 object
->internal
= internal
;
2602 object
->pager_trusted
= internal
;
2604 /* copy strategy invalid until set by memory manager */
2605 object
->copy_strategy
= MEMORY_OBJECT_COPY_INVALID
;
2607 object
->pager_request
= pager_request
;
2608 object
->pager_ready
= FALSE
;
2610 vm_object_unlock(object
);
2613 * Let the pager know we're using it.
2616 (void) memory_object_init(pager
,
2617 object
->pager_request
,
2620 vm_object_lock(object
);
2622 object
->named
= TRUE
;
2624 object
->pager_ready
= TRUE
;
2625 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
2628 object
->pager_initialized
= TRUE
;
2629 vm_object_wakeup(object
, VM_OBJECT_EVENT_INITIALIZED
);
2631 vm_object_lock(object
);
2635 * [At this point, the object must be locked]
2639 * Wait for the work above to be done by the first
2640 * thread to map this object.
2643 while (!object
->pager_initialized
) {
2644 vm_object_wait( object
,
2645 VM_OBJECT_EVENT_INITIALIZED
,
2647 vm_object_lock(object
);
2649 vm_object_unlock(object
);
2652 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
2653 (integer_t
)object
, (integer_t
)object
->pager
, internal
, 0,0);
2658 * Routine: vm_object_pager_create
2660 * Create a memory object for an internal object.
2661 * In/out conditions:
2662 * The object is locked on entry and exit;
2663 * it may be unlocked within this call.
2665 * Only one thread may be performing a
2666 * vm_object_pager_create on an object at
2667 * a time. Presumably, only the pageout
2668 * daemon will be using this routine.
2672 vm_object_pager_create(
2673 register vm_object_t object
)
2675 memory_object_t pager
;
2676 vm_object_hash_entry_t entry
;
2678 vm_object_size_t size
;
2679 vm_external_map_t map
;
2680 #endif /* MACH_PAGEMAP */
2682 XPR(XPR_VM_OBJECT
, "vm_object_pager_create, object 0x%X\n",
2683 (integer_t
)object
, 0,0,0,0);
2685 if (memory_manager_default_check() != KERN_SUCCESS
)
2689 * Prevent collapse or termination by holding a paging reference
2692 vm_object_paging_begin(object
);
2693 if (object
->pager_created
) {
2695 * Someone else got to it first...
2696 * wait for them to finish initializing the ports
2698 while (!object
->pager_initialized
) {
2699 vm_object_wait( object
,
2700 VM_OBJECT_EVENT_INITIALIZED
,
2702 vm_object_lock(object
);
2704 vm_object_paging_end(object
);
2709 * Indicate that a memory object has been assigned
2710 * before dropping the lock, to prevent a race.
2713 object
->pager_created
= TRUE
;
2714 object
->paging_offset
= 0;
2717 size
= object
->size
;
2718 #endif /* MACH_PAGEMAP */
2719 vm_object_unlock(object
);
2722 map
= vm_external_create(size
);
2723 vm_object_lock(object
);
2724 assert(object
->size
== size
);
2725 object
->existence_map
= map
;
2726 vm_object_unlock(object
);
2727 #endif /* MACH_PAGEMAP */
2730 * Create the [internal] pager, and associate it with this object.
2732 * We make the association here so that vm_object_enter()
2733 * can look up the object to complete initializing it. No
2734 * user will ever map this object.
2737 memory_object_default_t dmm
;
2738 vm_size_t cluster_size
;
2740 /* acquire a reference for the default memory manager */
2741 dmm
= memory_manager_default_reference(&cluster_size
);
2742 assert(cluster_size
>= PAGE_SIZE
);
2744 object
->cluster_size
= cluster_size
; /* XXX ??? */
2745 assert(object
->temporary
);
2747 /* create our new memory object */
2748 (void) memory_object_create(dmm
, object
->size
, &pager
);
2750 memory_object_default_deallocate(dmm
);
2753 entry
= vm_object_hash_entry_alloc(pager
);
2755 vm_object_cache_lock();
2756 vm_object_hash_insert(entry
);
2758 entry
->object
= object
;
2759 vm_object_cache_unlock();
2762 * A reference was returned by
2763 * memory_object_create(), and it is
2764 * copied by vm_object_enter().
2767 if (vm_object_enter(pager
, object
->size
, TRUE
, TRUE
, FALSE
) != object
)
2768 panic("vm_object_pager_create: mismatch");
2771 * Drop the reference we were passed.
2773 memory_object_deallocate(pager
);
2775 vm_object_lock(object
);
2778 * Release the paging reference
2780 vm_object_paging_end(object
);
2784 * Routine: vm_object_remove
2786 * Eliminate the pager/object association
2789 * The object cache must be locked.
2791 __private_extern__
void
2795 memory_object_t pager
;
2796 pager_request_t pager_request
;
2798 if ((pager
= object
->pager
) != MEMORY_OBJECT_NULL
) {
2799 vm_object_hash_entry_t entry
;
2801 entry
= vm_object_hash_lookup(pager
, FALSE
);
2802 if (entry
!= VM_OBJECT_HASH_ENTRY_NULL
)
2803 entry
->object
= VM_OBJECT_NULL
;
2809 * Global variables for vm_object_collapse():
2811 * Counts for normal collapses and bypasses.
2812 * Debugging variables, to watch or disable collapse.
2814 static long object_collapses
= 0;
2815 static long object_bypasses
= 0;
2817 static boolean_t vm_object_collapse_allowed
= TRUE
;
2818 static boolean_t vm_object_bypass_allowed
= TRUE
;
2820 static int vm_external_discarded
;
2821 static int vm_external_collapsed
;
2824 * Routine: vm_object_do_collapse
2826 * Collapse an object with the object backing it.
2827 * Pages in the backing object are moved into the
2828 * parent, and the backing object is deallocated.
2830 * Both objects and the cache are locked; the page
2831 * queues are unlocked.
2835 vm_object_do_collapse(
2837 vm_object_t backing_object
)
2840 vm_object_offset_t new_offset
, backing_offset
;
2841 vm_object_size_t size
;
2843 backing_offset
= object
->shadow_offset
;
2844 size
= object
->size
;
2847 * Move all in-memory pages from backing_object
2848 * to the parent. Pages that have been paged out
2849 * will be overwritten by any of the parent's
2850 * pages that shadow them.
2853 while (!queue_empty(&backing_object
->memq
)) {
2855 p
= (vm_page_t
) queue_first(&backing_object
->memq
);
2857 new_offset
= (p
->offset
- backing_offset
);
2859 assert(!p
->busy
|| p
->absent
);
2862 * If the parent has a page here, or if
2863 * this page falls outside the parent,
2866 * Otherwise, move it as planned.
2869 if (p
->offset
< backing_offset
|| new_offset
>= size
) {
2872 pp
= vm_page_lookup(object
, new_offset
);
2873 if (pp
== VM_PAGE_NULL
) {
2876 * Parent now has no page.
2877 * Move the backing object's page up.
2880 vm_page_rename(p
, object
, new_offset
);
2882 } else if (pp
->absent
) {
2885 * Parent has an absent page...
2886 * it's not being paged in, so
2887 * it must really be missing from
2890 * Throw out the absent page...
2891 * any faults looking for that
2892 * page will restart with the new
2897 vm_page_rename(p
, object
, new_offset
);
2898 #endif /* MACH_PAGEMAP */
2900 assert(! pp
->absent
);
2903 * Parent object has a real page.
2904 * Throw away the backing object's
2912 assert(object
->pager
== MEMORY_OBJECT_NULL
||
2913 backing_object
->pager
== MEMORY_OBJECT_NULL
);
2915 if (backing_object
->pager
!= MEMORY_OBJECT_NULL
) {
2916 vm_object_hash_entry_t entry
;
2919 * Move the pager from backing_object to object.
2921 * XXX We're only using part of the paging space
2922 * for keeps now... we ought to discard the
2926 object
->pager
= backing_object
->pager
;
2927 entry
= vm_object_hash_lookup(object
->pager
, FALSE
);
2928 assert(entry
!= VM_OBJECT_HASH_ENTRY_NULL
);
2929 entry
->object
= object
;
2930 object
->pager_created
= backing_object
->pager_created
;
2931 object
->pager_request
= backing_object
->pager_request
;
2932 object
->pager_ready
= backing_object
->pager_ready
;
2933 object
->pager_initialized
= backing_object
->pager_initialized
;
2934 object
->cluster_size
= backing_object
->cluster_size
;
2935 object
->paging_offset
=
2936 backing_object
->paging_offset
+ backing_offset
;
2937 if (object
->pager_request
!= PAGER_REQUEST_NULL
) {
2938 memory_object_control_collapse(object
->pager_request
,
2943 vm_object_cache_unlock();
2945 object
->paging_offset
= backing_object
->paging_offset
+ backing_offset
;
2949 * If the shadow offset is 0, the use the existence map from
2950 * the backing object if there is one. If the shadow offset is
2951 * not zero, toss it.
2953 * XXX - If the shadow offset is not 0 then a bit copy is needed
2954 * if the map is to be salvaged. For now, we just just toss the
2955 * old map, giving the collapsed object no map. This means that
2956 * the pager is invoked for zero fill pages. If analysis shows
2957 * that this happens frequently and is a performance hit, then
2958 * this code should be fixed to salvage the map.
2960 assert(object
->existence_map
== VM_EXTERNAL_NULL
);
2961 if (backing_offset
|| (size
!= backing_object
->size
)) {
2962 vm_external_discarded
++;
2963 vm_external_destroy(backing_object
->existence_map
,
2964 backing_object
->size
);
2967 vm_external_collapsed
++;
2968 object
->existence_map
= backing_object
->existence_map
;
2970 backing_object
->existence_map
= VM_EXTERNAL_NULL
;
2971 #endif /* MACH_PAGEMAP */
2974 * Object now shadows whatever backing_object did.
2975 * Note that the reference to backing_object->shadow
2976 * moves from within backing_object to within object.
2979 object
->shadow
= backing_object
->shadow
;
2980 object
->shadow_offset
+= backing_object
->shadow_offset
;
2981 assert((object
->shadow
== VM_OBJECT_NULL
) ||
2982 (object
->shadow
->copy
== VM_OBJECT_NULL
));
2985 * Discard backing_object.
2987 * Since the backing object has no pages, no
2988 * pager left, and no object references within it,
2989 * all that is necessary is to dispose of it.
2992 assert((backing_object
->ref_count
== 1) &&
2993 (backing_object
->resident_page_count
== 0) &&
2994 (backing_object
->paging_in_progress
== 0));
2996 backing_object
->alive
= FALSE
;
2997 vm_object_unlock(backing_object
);
2999 XPR(XPR_VM_OBJECT
, "vm_object_collapse, collapsed 0x%X\n",
3000 (integer_t
)backing_object
, 0,0,0,0);
3002 zfree(vm_object_zone
, (vm_offset_t
) backing_object
);
3008 vm_object_do_bypass(
3010 vm_object_t backing_object
)
3013 * Make the parent shadow the next object
3019 * Do object reference in-line to
3020 * conditionally increment shadow's
3021 * residence count. If object is not
3022 * resident, leave residence count
3025 if (backing_object
->shadow
!= VM_OBJECT_NULL
) {
3026 vm_object_lock(backing_object
->shadow
);
3027 backing_object
->shadow
->ref_count
++;
3028 if (object
->res_count
!= 0)
3029 vm_object_res_reference(backing_object
->shadow
);
3030 vm_object_unlock(backing_object
->shadow
);
3032 #else /* TASK_SWAPPER */
3033 vm_object_reference(backing_object
->shadow
);
3034 #endif /* TASK_SWAPPER */
3036 object
->shadow
= backing_object
->shadow
;
3037 object
->shadow_offset
+= backing_object
->shadow_offset
;
3040 * Backing object might have had a copy pointer
3041 * to us. If it did, clear it.
3043 if (backing_object
->copy
== object
) {
3044 backing_object
->copy
= VM_OBJECT_NULL
;
3048 * Drop the reference count on backing_object.
3050 * Since its ref_count was at least 2, it
3051 * will not vanish; so we don't need to call
3052 * vm_object_deallocate.
3053 * [FBDP: that doesn't seem to be true any more]
3055 * The res_count on the backing object is
3056 * conditionally decremented. It's possible
3057 * (via vm_pageout_scan) to get here with
3058 * a "swapped" object, which has a 0 res_count,
3059 * in which case, the backing object res_count
3060 * is already down by one.
3062 * Don't call vm_object_deallocate unless
3063 * ref_count drops to zero.
3065 * The ref_count can drop to zero here if the
3066 * backing object could be bypassed but not
3067 * collapsed, such as when the backing object
3068 * is temporary and cachable.
3071 if (backing_object
->ref_count
> 1) {
3072 backing_object
->ref_count
--;
3074 if (object
->res_count
!= 0)
3075 vm_object_res_deallocate(backing_object
);
3076 assert(backing_object
->ref_count
> 0);
3077 #endif /* TASK_SWAPPER */
3078 vm_object_unlock(backing_object
);
3082 * Drop locks so that we can deallocate
3083 * the backing object.
3087 if (object
->res_count
== 0) {
3088 /* XXX get a reference for the deallocate below */
3089 vm_object_res_reference(backing_object
);
3091 #endif /* TASK_SWAPPER */
3092 vm_object_unlock(object
);
3093 vm_object_unlock(backing_object
);
3094 vm_object_deallocate(backing_object
);
3097 * Relock object. We don't have to reverify
3098 * its state since vm_object_collapse will
3099 * do that for us as it starts at the
3103 vm_object_lock(object
);
3111 * vm_object_collapse:
3113 * Perform an object collapse or an object bypass if appropriate.
3114 * The real work of collapsing and bypassing is performed in
3115 * the routines vm_object_do_collapse and vm_object_do_bypass.
3117 * Requires that the object be locked and the page queues be unlocked.
3120 __private_extern__
void
3122 register vm_object_t object
)
3124 register vm_object_t backing_object
;
3125 register vm_object_offset_t backing_offset
;
3126 register vm_object_size_t size
;
3127 register vm_object_offset_t new_offset
;
3128 register vm_page_t p
;
3130 vm_offset_t current_offset
;
3132 if (! vm_object_collapse_allowed
&& ! vm_object_bypass_allowed
) {
3136 XPR(XPR_VM_OBJECT
, "vm_object_collapse, obj 0x%X\n",
3137 (integer_t
)object
, 0,0,0,0);
3141 * Verify that the conditions are right for either
3142 * collapse or bypass:
3144 * The object exists and no pages in it are currently
3145 * being paged out, and
3147 if (object
== VM_OBJECT_NULL
||
3148 object
->paging_in_progress
!= 0 ||
3149 object
->absent_count
!= 0)
3153 * There is a backing object, and
3156 if ((backing_object
= object
->shadow
) == VM_OBJECT_NULL
)
3159 vm_object_lock(backing_object
);
3163 * The backing object is not read_only,
3164 * and no pages in the backing object are
3165 * currently being paged out.
3166 * The backing object is internal.
3170 if (!backing_object
->internal
||
3171 backing_object
->paging_in_progress
!= 0) {
3172 vm_object_unlock(backing_object
);
3177 * The backing object can't be a copy-object:
3178 * the shadow_offset for the copy-object must stay
3179 * as 0. Furthermore (for the 'we have all the
3180 * pages' case), if we bypass backing_object and
3181 * just shadow the next object in the chain, old
3182 * pages from that object would then have to be copied
3183 * BOTH into the (former) backing_object and into the
3186 if (backing_object
->shadow
!= VM_OBJECT_NULL
&&
3187 backing_object
->shadow
->copy
!= VM_OBJECT_NULL
) {
3188 vm_object_unlock(backing_object
);
3193 * We can now try to either collapse the backing
3194 * object (if the parent is the only reference to
3195 * it) or (perhaps) remove the parent's reference
3198 * If there is exactly one reference to the backing
3199 * object, we may be able to collapse it into the
3202 * The backing object must not have a pager
3203 * created for it, since collapsing an object
3204 * into a backing_object dumps new pages into
3205 * the backing_object that its pager doesn't
3209 if (backing_object
->ref_count
== 1 &&
3210 ! object
->pager_created
&&
3211 vm_object_collapse_allowed
) {
3214 "vm_object_collapse: %x to %x, pager %x, pager_request %x\n",
3215 (integer_t
)backing_object
, (integer_t
)object
,
3216 (integer_t
)backing_object
->pager
,
3217 (integer_t
)backing_object
->pager_request
, 0);
3220 * We need the cache lock for collapsing,
3221 * but we must not deadlock.
3224 if (! vm_object_cache_lock_try()) {
3225 vm_object_unlock(backing_object
);
3230 * Collapse the object with its backing
3231 * object, and try again with the object's
3232 * new backing object.
3235 vm_object_do_collapse(object
, backing_object
);
3241 * Collapsing the backing object was not possible
3242 * or permitted, so let's try bypassing it.
3245 if (! vm_object_bypass_allowed
) {
3246 vm_object_unlock(backing_object
);
3252 * If the backing object has a pager but no pagemap,
3253 * then we cannot bypass it, because we don't know
3254 * what pages it has.
3256 if (backing_object
->pager_created
3258 && (backing_object
->existence_map
== VM_EXTERNAL_NULL
)
3259 #endif /* MACH_PAGEMAP */
3261 vm_object_unlock(backing_object
);
3266 * If the object has a pager but no pagemap,
3267 * then we cannot bypass it, because we don't know
3268 * what pages it has.
3270 if (object
->pager_created
3272 && (object
->existence_map
== VM_EXTERNAL_NULL
)
3273 #endif /* MACH_PAGEMAP */
3275 vm_object_unlock(backing_object
);
3279 backing_offset
= object
->shadow_offset
;
3280 size
= object
->size
;
3283 * If all of the pages in the backing object are
3284 * shadowed by the parent object, the parent
3285 * object no longer has to shadow the backing
3286 * object; it can shadow the next one in the
3289 * If the backing object has existence info,
3290 * we must check examine its existence info
3295 if(object
->cow_hint
>= size
)
3296 object
->cow_hint
= 0;
3297 current_offset
= object
->cow_hint
;
3299 if (vm_page_lookup(object
,
3300 (vm_object_offset_t
)current_offset
)
3302 current_offset
+=PAGE_SIZE
;
3303 } else if ((object
->pager_created
) &&
3304 (object
->existence_map
!= NULL
) &&
3305 (vm_external_state_get(object
->existence_map
,
3307 != VM_EXTERNAL_STATE_ABSENT
)) {
3308 current_offset
+=PAGE_SIZE
;
3309 } else if (vm_page_lookup(backing_object
,
3310 (vm_object_offset_t
)current_offset
3311 + backing_offset
)!= VM_PAGE_NULL
) {
3312 /* found a dependency */
3313 object
->cow_hint
= current_offset
;
3314 vm_object_unlock(backing_object
);
3316 } else if ((backing_object
->pager_created
) &&
3317 (backing_object
->existence_map
!= NULL
) &&
3318 (vm_external_state_get(
3319 backing_object
->existence_map
,
3320 current_offset
+ backing_offset
)
3321 != VM_EXTERNAL_STATE_ABSENT
)) {
3322 /* found a dependency */
3323 object
->cow_hint
= current_offset
;
3324 vm_object_unlock(backing_object
);
3327 current_offset
+=PAGE_SIZE
;
3329 if(current_offset
>= size
) {
3330 /* wrap at end of object */
3333 if(current_offset
== object
->cow_hint
) {
3334 /* we are free of shadow influence */
3338 /* reset the cow_hint for any objects deeper in the chain */
3339 object
->cow_hint
= 0;
3344 * All interesting pages in the backing object
3345 * already live in the parent or its pager.
3346 * Thus we can bypass the backing object.
3349 vm_object_do_bypass(object
, backing_object
);
3352 * Try again with this object's new backing object.
3360 * Routine: vm_object_page_remove: [internal]
3362 * Removes all physical pages in the specified
3363 * object range from the object's list of pages.
3365 * In/out conditions:
3366 * The object must be locked.
3367 * The object must not have paging_in_progress, usually
3368 * guaranteed by not having a pager.
3370 unsigned int vm_object_page_remove_lookup
= 0;
3371 unsigned int vm_object_page_remove_iterate
= 0;
3373 __private_extern__
void
3374 vm_object_page_remove(
3375 register vm_object_t object
,
3376 register vm_object_offset_t start
,
3377 register vm_object_offset_t end
)
3379 register vm_page_t p
, next
;
3382 * One and two page removals are most popular.
3383 * The factor of 16 here is somewhat arbitrary.
3384 * It balances vm_object_lookup vs iteration.
3387 if (atop(end
- start
) < (unsigned)object
->resident_page_count
/16) {
3388 vm_object_page_remove_lookup
++;
3390 for (; start
< end
; start
+= PAGE_SIZE_64
) {
3391 p
= vm_page_lookup(object
, start
);
3392 if (p
!= VM_PAGE_NULL
) {
3393 assert(!p
->cleaning
&& !p
->pageout
);
3395 pmap_page_protect(p
->phys_addr
,
3401 vm_object_page_remove_iterate
++;
3403 p
= (vm_page_t
) queue_first(&object
->memq
);
3404 while (!queue_end(&object
->memq
, (queue_entry_t
) p
)) {
3405 next
= (vm_page_t
) queue_next(&p
->listq
);
3406 if ((start
<= p
->offset
) && (p
->offset
< end
)) {
3407 assert(!p
->cleaning
&& !p
->pageout
);
3409 pmap_page_protect(p
->phys_addr
,
3420 * Routine: vm_object_coalesce
3421 * Function: Coalesces two objects backing up adjoining
3422 * regions of memory into a single object.
3424 * returns TRUE if objects were combined.
3426 * NOTE: Only works at the moment if the second object is NULL -
3427 * if it's not, which object do we lock first?
3430 * prev_object First object to coalesce
3431 * prev_offset Offset into prev_object
3432 * next_object Second object into coalesce
3433 * next_offset Offset into next_object
3435 * prev_size Size of reference to prev_object
3436 * next_size Size of reference to next_object
3439 * The object(s) must *not* be locked. The map must be locked
3440 * to preserve the reference to the object(s).
3442 static int vm_object_coalesce_count
= 0;
3444 __private_extern__ boolean_t
3446 register vm_object_t prev_object
,
3447 vm_object_t next_object
,
3448 vm_object_offset_t prev_offset
,
3449 vm_object_offset_t next_offset
,
3450 vm_object_size_t prev_size
,
3451 vm_object_size_t next_size
)
3453 vm_object_size_t newsize
;
3459 if (next_object
!= VM_OBJECT_NULL
) {
3463 if (prev_object
== VM_OBJECT_NULL
) {
3468 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
3469 (integer_t
)prev_object
, prev_offset
, prev_size
, next_size
, 0);
3471 vm_object_lock(prev_object
);
3474 * Try to collapse the object first
3476 vm_object_collapse(prev_object
);
3479 * Can't coalesce if pages not mapped to
3480 * prev_entry may be in use any way:
3481 * . more than one reference
3483 * . shadows another object
3484 * . has a copy elsewhere
3485 * . paging references (pages might be in page-list)
3488 if ((prev_object
->ref_count
> 1) ||
3489 prev_object
->pager_created
||
3490 (prev_object
->shadow
!= VM_OBJECT_NULL
) ||
3491 (prev_object
->copy
!= VM_OBJECT_NULL
) ||
3492 (prev_object
->true_share
!= FALSE
) ||
3493 (prev_object
->paging_in_progress
!= 0)) {
3494 vm_object_unlock(prev_object
);
3498 vm_object_coalesce_count
++;
3501 * Remove any pages that may still be in the object from
3502 * a previous deallocation.
3504 vm_object_page_remove(prev_object
,
3505 prev_offset
+ prev_size
,
3506 prev_offset
+ prev_size
+ next_size
);
3509 * Extend the object if necessary.
3511 newsize
= prev_offset
+ prev_size
+ next_size
;
3512 if (newsize
> prev_object
->size
) {
3515 * We cannot extend an object that has existence info,
3516 * since the existence info might then fail to cover
3517 * the entire object.
3519 * This assertion must be true because the object
3520 * has no pager, and we only create existence info
3521 * for objects with pagers.
3523 assert(prev_object
->existence_map
== VM_EXTERNAL_NULL
);
3524 #endif /* MACH_PAGEMAP */
3525 prev_object
->size
= newsize
;
3528 vm_object_unlock(prev_object
);
3533 * Attach a set of physical pages to an object, so that they can
3534 * be mapped by mapping the object. Typically used to map IO memory.
3536 * The mapping function and its private data are used to obtain the
3537 * physical addresses for each page to be mapped.
3542 vm_object_offset_t offset
,
3543 vm_object_size_t size
,
3544 vm_object_offset_t (*map_fn
)(void *map_fn_data
,
3545 vm_object_offset_t offset
),
3546 void *map_fn_data
) /* private to map_fn */
3552 vm_object_offset_t addr
;
3554 num_pages
= atop(size
);
3556 for (i
= 0; i
< num_pages
; i
++, offset
+= PAGE_SIZE_64
) {
3558 addr
= (*map_fn
)(map_fn_data
, offset
);
3560 while ((m
= vm_page_grab_fictitious()) == VM_PAGE_NULL
)
3561 vm_page_more_fictitious();
3563 vm_object_lock(object
);
3564 if ((old_page
= vm_page_lookup(object
, offset
))
3567 vm_page_lock_queues();
3568 vm_page_free(old_page
);
3569 vm_page_unlock_queues();
3572 vm_page_init(m
, addr
);
3573 /* private normally requires lock_queues but since we */
3574 /* are initializing the page, its not necessary here */
3575 m
->private = TRUE
; /* don`t free page */
3577 vm_page_insert(m
, object
, offset
);
3579 PAGE_WAKEUP_DONE(m
);
3580 vm_object_unlock(object
);
3584 #include <mach_kdb.h>
3587 #include <ddb/db_output.h>
3588 #include <vm/vm_print.h>
3590 #define printf kdbprintf
3592 extern boolean_t
vm_object_cached(
3593 vm_object_t object
);
3595 extern void print_bitstring(
3598 boolean_t vm_object_print_pages
= FALSE
;
3604 printf("%c%c%c%c%c%c%c%c",
3605 ((byte
& (1 << 0)) ? '1' : '0'),
3606 ((byte
& (1 << 1)) ? '1' : '0'),
3607 ((byte
& (1 << 2)) ? '1' : '0'),
3608 ((byte
& (1 << 3)) ? '1' : '0'),
3609 ((byte
& (1 << 4)) ? '1' : '0'),
3610 ((byte
& (1 << 5)) ? '1' : '0'),
3611 ((byte
& (1 << 6)) ? '1' : '0'),
3612 ((byte
& (1 << 7)) ? '1' : '0'));
3617 register vm_object_t object
)
3619 register vm_object_t o
;
3621 queue_iterate(&vm_object_cached_list
, o
, vm_object_t
, cached_list
) {
3631 * vm_external_print: [ debug ]
3635 vm_external_map_t map
,
3638 if (map
== VM_EXTERNAL_NULL
) {
3641 vm_size_t existence_size
= stob(size
);
3642 printf("{ size=%d, map=[", existence_size
);
3643 if (existence_size
> 0) {
3644 print_bitstring(map
[0]);
3646 if (existence_size
> 1) {
3647 print_bitstring(map
[1]);
3649 if (existence_size
> 2) {
3651 print_bitstring(map
[existence_size
-1]);
3657 #endif /* MACH_PAGEMAP */
3666 int orig_db_indent
= db_indent
;
3669 if (object
== VM_OBJECT_NULL
) {
3670 db_indent
= orig_db_indent
;
3676 iprintf("object 0x%x", object
);
3677 printf(", shadow=0x%x", object
->shadow
);
3678 printf(", copy=0x%x", object
->copy
);
3679 printf(", pager=0x%x", object
->pager
);
3680 printf(", ref=%d\n", object
->ref_count
);
3683 object
= object
->shadow
;
3689 * vm_object_print: [ debug ]
3694 boolean_t have_addr
,
3698 register vm_page_t p
;
3704 if (object
== VM_OBJECT_NULL
)
3707 iprintf("object 0x%x\n", object
);
3711 iprintf("size=0x%x", object
->size
);
3712 printf(", cluster=0x%x", object
->cluster_size
);
3713 printf(", frozen=0x%x", object
->frozen_size
);
3714 printf(", ref_count=%d\n", object
->ref_count
);
3717 printf("res_count=%d, ", object
->res_count
);
3718 #endif /* TASK_SWAPPER */
3719 printf("resident_page_count=%d\n", object
->resident_page_count
);
3721 iprintf("shadow=0x%x", object
->shadow
);
3722 if (object
->shadow
) {
3724 vm_object_t shadow
= object
;
3725 while(shadow
= shadow
->shadow
)
3727 printf(" (depth %d)", i
);
3729 printf(", copy=0x%x", object
->copy
);
3730 printf(", shadow_offset=0x%x", object
->shadow_offset
);
3731 printf(", last_alloc=0x%x\n", object
->last_alloc
);
3733 iprintf("pager=0x%x", object
->pager
);
3734 printf(", paging_offset=0x%x", object
->paging_offset
);
3735 printf(", pager_request=0x%x\n", object
->pager_request
);
3737 iprintf("copy_strategy=%d[", object
->copy_strategy
);
3738 switch (object
->copy_strategy
) {
3739 case MEMORY_OBJECT_COPY_NONE
:
3740 printf("copy_none");
3743 case MEMORY_OBJECT_COPY_CALL
:
3744 printf("copy_call");
3747 case MEMORY_OBJECT_COPY_DELAY
:
3748 printf("copy_delay");
3751 case MEMORY_OBJECT_COPY_SYMMETRIC
:
3752 printf("copy_symmetric");
3755 case MEMORY_OBJECT_COPY_INVALID
:
3756 printf("copy_invalid");
3763 printf(", absent_count=%d\n", object
->absent_count
);
3765 iprintf("all_wanted=0x%x<", object
->all_wanted
);
3767 if (vm_object_wanted(object
, VM_OBJECT_EVENT_INITIALIZED
)) {
3768 printf("%sinit", s
);
3771 if (vm_object_wanted(object
, VM_OBJECT_EVENT_PAGER_READY
)) {
3772 printf("%sready", s
);
3775 if (vm_object_wanted(object
, VM_OBJECT_EVENT_PAGING_IN_PROGRESS
)) {
3776 printf("%spaging", s
);
3779 if (vm_object_wanted(object
, VM_OBJECT_EVENT_ABSENT_COUNT
)) {
3780 printf("%sabsent", s
);
3783 if (vm_object_wanted(object
, VM_OBJECT_EVENT_LOCK_IN_PROGRESS
)) {
3784 printf("%slock", s
);
3787 if (vm_object_wanted(object
, VM_OBJECT_EVENT_UNCACHING
)) {
3788 printf("%suncaching", s
);
3791 if (vm_object_wanted(object
, VM_OBJECT_EVENT_COPY_CALL
)) {
3792 printf("%scopy_call", s
);
3795 if (vm_object_wanted(object
, VM_OBJECT_EVENT_CACHING
)) {
3796 printf("%scaching", s
);
3800 printf(", paging_in_progress=%d\n", object
->paging_in_progress
);
3802 iprintf("%screated, %sinit, %sready, %spersist, %strusted, %spageout, %s, %s\n",
3803 (object
->pager_created
? "" : "!"),
3804 (object
->pager_initialized
? "" : "!"),
3805 (object
->pager_ready
? "" : "!"),
3806 (object
->can_persist
? "" : "!"),
3807 (object
->pager_trusted
? "" : "!"),
3808 (object
->pageout
? "" : "!"),
3809 (object
->internal
? "internal" : "external"),
3810 (object
->temporary
? "temporary" : "permanent"));
3811 iprintf("%salive, %slock_in_progress, %slock_restart, %sshadowed, %scached, %sprivate\n",
3812 (object
->alive
? "" : "!"),
3813 (object
->lock_in_progress
? "" : "!"),
3814 (object
->lock_restart
? "" : "!"),
3815 (object
->shadowed
? "" : "!"),
3816 (vm_object_cached(object
) ? "" : "!"),
3817 (object
->private ? "" : "!"));
3818 iprintf("%sadvisory_pageout, %ssilent_overwrite\n",
3819 (object
->advisory_pageout
? "" : "!"),
3820 (object
->silent_overwrite
? "" : "!"));
3823 iprintf("existence_map=");
3824 vm_external_print(object
->existence_map
, object
->size
);
3825 #endif /* MACH_PAGEMAP */
3827 iprintf("paging_object=0x%x\n", object
->paging_object
);
3828 #endif /* MACH_ASSERT */
3830 if (vm_object_print_pages
) {
3832 p
= (vm_page_t
) queue_first(&object
->memq
);
3833 while (!queue_end(&object
->memq
, (queue_entry_t
) p
)) {
3835 iprintf("memory:=");
3836 } else if (count
== 2) {
3845 printf("(off=0x%X,page=0x%X)", p
->offset
, (integer_t
) p
);
3846 p
= (vm_page_t
) queue_next(&p
->listq
);
3857 * vm_object_find [ debug ]
3859 * Find all tasks which reference the given vm_object.
3862 boolean_t
vm_object_find(vm_object_t object
);
3863 boolean_t vm_object_print_verbose
= FALSE
;
3871 vm_map_entry_t entry
;
3872 processor_set_t pset
= &default_pset
;
3873 boolean_t found
= FALSE
;
3875 queue_iterate(&pset
->tasks
, task
, task_t
, pset_tasks
) {
3877 for (entry
= vm_map_first_entry(map
);
3878 entry
&& entry
!= vm_map_to_entry(map
);
3879 entry
= entry
->vme_next
) {
3884 * For the time being skip submaps,
3885 * only the kernel can have submaps,
3886 * and unless we are interested in
3887 * kernel objects, we can simply skip
3888 * submaps. See sb/dejan/nmk18b7/src/mach_kernel/vm
3889 * for a full solution.
3891 if (entry
->is_sub_map
)
3894 obj
= entry
->object
.vm_object
;
3898 while (obj
!= VM_OBJECT_NULL
) {
3899 if (obj
== object
) {
3901 printf("TASK\t\tMAP\t\tENTRY\n");
3904 printf("0x%x\t0x%x\t0x%x\n",
3915 #endif /* MACH_KDB */
3918 vm_object_populate_with_private(
3920 vm_object_offset_t offset
,
3921 vm_offset_t phys_addr
,
3924 vm_offset_t base_addr
;
3925 vm_object_offset_t base_offset
;
3928 if(!object
->private)
3929 return KERN_FAILURE
;
3931 if((base_addr
= trunc_page(phys_addr
)) != phys_addr
) {
3932 return KERN_FAILURE
;
3936 vm_object_lock(object
);
3937 if(!object
->phys_contiguous
) {
3939 if((base_offset
= trunc_page(offset
)) != offset
) {
3940 vm_object_unlock(object
);
3941 return KERN_FAILURE
;
3943 base_offset
+= object
->paging_offset
;
3945 m
= vm_page_lookup(object
, base_offset
);
3946 if(m
!= VM_PAGE_NULL
) {
3948 vm_page_lock_queues();
3949 m
->fictitious
= FALSE
;
3951 m
->phys_addr
= base_addr
;
3957 object
->absent_count
++;
3959 m
->list_req_pending
= TRUE
;
3960 vm_page_unlock_queues();
3961 } else if (m
->phys_addr
!= base_addr
) {
3962 /* pmap call to clear old mapping */
3963 pmap_page_protect(m
->phys_addr
,
3965 m
->phys_addr
= base_addr
;
3968 while ((m
= vm_page_grab_fictitious())
3970 vm_page_more_fictitious();
3971 vm_page_lock_queues();
3972 m
->fictitious
= FALSE
;
3974 m
->phys_addr
= base_addr
;
3975 m
->list_req_pending
= TRUE
;
3978 object
->absent_count
++;
3979 vm_page_unlock_queues();
3980 vm_page_insert(m
, object
, base_offset
);
3982 base_addr
+= PAGE_SIZE
;
3983 base_offset
+= PAGE_SIZE
;
3987 /* NOTE: we should check the original settings here */
3988 /* if we have a size > zero a pmap call should be made */
3989 /* to disable the range */
3993 /* shadows on contiguous memory are not allowed */
3994 /* we therefore can use the offset field */
3995 object
->shadow_offset
= (vm_object_offset_t
)phys_addr
;
3996 object
->size
= size
;
3998 vm_object_unlock(object
);
3999 return KERN_SUCCESS
;
4003 * memory_object_free_from_cache:
4005 * Walk the vm_object cache list, removing and freeing vm_objects
4006 * which are backed by the pager identified by the caller, (pager_id).
4007 * Remove up to "count" objects, if there are that may available
4010 * Walk the list at most once, return the number of vm_objects
4014 __private_extern__ kern_return_t
4015 memory_object_free_from_cache(
4021 int object_released
= 0;
4024 register vm_object_t object
= VM_OBJECT_NULL
;
4028 if(host == HOST_NULL)
4029 return(KERN_INVALID_ARGUMENT);
4033 vm_object_cache_lock();
4035 queue_iterate(&vm_object_cached_list
, object
,
4036 vm_object_t
, cached_list
) {
4037 if (object
->pager
&& (pager_id
== object
->pager
->pager
)) {
4038 vm_object_lock(object
);
4039 queue_remove(&vm_object_cached_list
, object
,
4040 vm_object_t
, cached_list
);
4041 vm_object_cached_count
--;
4044 * Since this object is in the cache, we know
4045 * that it is initialized and has only a pager's
4046 * (implicit) reference. Take a reference to avoid
4047 * recursive deallocations.
4050 assert(object
->pager_initialized
);
4051 assert(object
->ref_count
== 0);
4052 object
->ref_count
++;
4055 * Terminate the object.
4056 * If the object had a shadow, we let
4057 * vm_object_deallocate deallocate it.
4058 * "pageout" objects have a shadow, but
4059 * maintain a "paging reference" rather
4060 * than a normal reference.
4061 * (We are careful here to limit recursion.)
4063 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
4064 if ((vm_object_terminate(object
) == KERN_SUCCESS
)
4065 && (shadow
!= VM_OBJECT_NULL
)) {
4066 vm_object_deallocate(shadow
);
4069 if(object_released
++ == *count
)
4070 return KERN_SUCCESS
;
4074 vm_object_cache_unlock();
4075 *count
= object_released
;
4076 return KERN_SUCCESS
;
4082 memory_object_create_named(
4083 memory_object_t pager
,
4084 memory_object_offset_t size
,
4085 memory_object_control_t
*control
)
4088 vm_object_hash_entry_t entry
;
4090 *control
= MEMORY_OBJECT_CONTROL_NULL
;
4091 if (pager
== MEMORY_OBJECT_NULL
)
4092 return KERN_INVALID_ARGUMENT
;
4094 vm_object_cache_lock();
4095 entry
= vm_object_hash_lookup(pager
, FALSE
);
4096 if ((entry
!= VM_OBJECT_HASH_ENTRY_NULL
) &&
4097 (entry
->object
!= VM_OBJECT_NULL
)) {
4098 if (entry
->object
->named
== TRUE
)
4099 panic("memory_object_create_named: caller already holds the right"); }
4101 vm_object_cache_unlock();
4102 if ((object
= vm_object_enter(pager
, size
, FALSE
, FALSE
, TRUE
))
4103 == VM_OBJECT_NULL
) {
4104 return(KERN_INVALID_OBJECT
);
4107 /* wait for object (if any) to be ready */
4108 if (object
!= VM_OBJECT_NULL
) {
4109 vm_object_lock(object
);
4110 object
->named
= TRUE
;
4111 while (!object
->pager_ready
) {
4112 vm_object_wait(object
,
4113 VM_OBJECT_EVENT_PAGER_READY
,
4115 vm_object_lock(object
);
4117 *control
= object
->pager_request
;
4118 vm_object_unlock(object
);
4120 return (KERN_SUCCESS
);
4125 * Routine: memory_object_recover_named [user interface]
4127 * Attempt to recover a named reference for a VM object.
4128 * VM will verify that the object has not already started
4129 * down the termination path, and if it has, will optionally
4130 * wait for that to finish.
4132 * KERN_SUCCESS - we recovered a named reference on the object
4133 * KERN_FAILURE - we could not recover a reference (object dead)
4134 * KERN_INVALID_ARGUMENT - bad memory object control
4137 memory_object_recover_named(
4138 memory_object_control_t control
,
4139 boolean_t wait_on_terminating
)
4143 vm_object_cache_lock();
4144 object
= memory_object_control_to_vm_object(control
);
4145 if (object
== VM_OBJECT_NULL
) {
4146 vm_object_cache_unlock();
4147 return (KERN_INVALID_ARGUMENT
);
4151 vm_object_lock(object
);
4153 if (object
->terminating
&& wait_on_terminating
) {
4154 vm_object_cache_unlock();
4155 vm_object_wait(object
,
4156 VM_OBJECT_EVENT_PAGING_IN_PROGRESS
,
4158 vm_object_cache_lock();
4162 if (!object
->alive
) {
4163 vm_object_cache_unlock();
4164 vm_object_unlock(object
);
4165 return KERN_FAILURE
;
4168 if (object
->named
== TRUE
) {
4169 vm_object_cache_unlock();
4170 vm_object_unlock(object
);
4171 return KERN_SUCCESS
;
4174 if((object
->ref_count
== 0) && (!object
->terminating
)){
4175 queue_remove(&vm_object_cached_list
, object
,
4176 vm_object_t
, cached_list
);
4177 vm_object_cached_count
--;
4178 XPR(XPR_VM_OBJECT_CACHE
,
4179 "memory_object_recover_named: removing %X, head (%X, %X)\n",
4181 (integer_t
)vm_object_cached_list
.next
,
4182 (integer_t
)vm_object_cached_list
.prev
, 0,0);
4185 vm_object_cache_unlock();
4187 object
->named
= TRUE
;
4188 object
->ref_count
++;
4189 vm_object_res_reference(object
);
4190 while (!object
->pager_ready
) {
4191 vm_object_wait(object
,
4192 VM_OBJECT_EVENT_PAGER_READY
,
4194 vm_object_lock(object
);
4196 vm_object_unlock(object
);
4197 return (KERN_SUCCESS
);
4202 * vm_object_release_name:
4204 * Enforces name semantic on memory_object reference count decrement
4205 * This routine should not be called unless the caller holds a name
4206 * reference gained through the memory_object_create_named.
4208 * If the TERMINATE_IDLE flag is set, the call will return if the
4209 * reference count is not 1. i.e. idle with the only remaining reference
4211 * If the decision is made to proceed the name field flag is set to
4212 * false and the reference count is decremented. If the RESPECT_CACHE
4213 * flag is set and the reference count has gone to zero, the
4214 * memory_object is checked to see if it is cacheable otherwise when
4215 * the reference count is zero, it is simply terminated.
4218 __private_extern__ kern_return_t
4219 vm_object_release_name(
4224 boolean_t original_object
= TRUE
;
4226 while (object
!= VM_OBJECT_NULL
) {
4229 * The cache holds a reference (uncounted) to
4230 * the object. We must locke it before removing
4235 vm_object_cache_lock();
4236 vm_object_lock(object
);
4237 assert(object
->alive
);
4239 assert(object
->named
);
4240 assert(object
->ref_count
> 0);
4243 * We have to wait for initialization before
4244 * destroying or caching the object.
4247 if (object
->pager_created
&& !object
->pager_initialized
) {
4248 assert(!object
->can_persist
);
4249 vm_object_assert_wait(object
,
4250 VM_OBJECT_EVENT_INITIALIZED
,
4252 vm_object_unlock(object
);
4253 vm_object_cache_unlock();
4254 thread_block((void (*)(void)) 0);
4258 if (((object
->ref_count
> 1)
4259 && (flags
& MEMORY_OBJECT_TERMINATE_IDLE
))
4260 || (object
->terminating
)) {
4261 vm_object_unlock(object
);
4262 vm_object_cache_unlock();
4263 return KERN_FAILURE
;
4265 if (flags
& MEMORY_OBJECT_RELEASE_NO_OP
) {
4266 vm_object_unlock(object
);
4267 vm_object_cache_unlock();
4268 return KERN_SUCCESS
;
4272 if ((flags
& MEMORY_OBJECT_RESPECT_CACHE
) &&
4273 (object
->ref_count
== 1)) {
4275 object
->named
= FALSE
;
4276 vm_object_unlock(object
);
4277 vm_object_cache_unlock();
4278 /* let vm_object_deallocate push this thing into */
4279 /* the cache, if that it is where it is bound */
4280 vm_object_deallocate(object
);
4281 return KERN_SUCCESS
;
4283 VM_OBJ_RES_DECR(object
);
4284 shadow
= object
->pageout
?VM_OBJECT_NULL
:object
->shadow
;
4285 if(object
->ref_count
== 1) {
4286 if(vm_object_terminate(object
) != KERN_SUCCESS
) {
4287 if(original_object
) {
4288 return KERN_FAILURE
;
4290 return KERN_SUCCESS
;
4293 if (shadow
!= VM_OBJECT_NULL
) {
4294 original_object
= FALSE
;
4298 return KERN_SUCCESS
;
4300 object
->ref_count
--;
4301 assert(object
->ref_count
> 0);
4303 object
->named
= FALSE
;
4304 vm_object_unlock(object
);
4305 vm_object_cache_unlock();
4306 return KERN_SUCCESS
;
4312 __private_extern__ kern_return_t
4313 vm_object_lock_request(
4315 vm_object_offset_t offset
,
4316 vm_object_size_t size
,
4317 memory_object_return_t should_return
,
4321 vm_object_offset_t original_offset
= offset
;
4322 boolean_t should_flush
=flags
& MEMORY_OBJECT_DATA_FLUSH
;
4324 XPR(XPR_MEMORY_OBJECT
,
4325 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
4326 (integer_t
)object
, offset
, size
,
4327 (((should_return
&1)<<1)|should_flush
), prot
);
4330 * Check for bogus arguments.
4332 if (object
== VM_OBJECT_NULL
)
4333 return (KERN_INVALID_ARGUMENT
);
4335 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
4336 return (KERN_INVALID_ARGUMENT
);
4338 size
= round_page(size
);
4341 * Lock the object, and acquire a paging reference to
4342 * prevent the memory_object reference from being released.
4344 vm_object_lock(object
);
4345 vm_object_paging_begin(object
);
4346 offset
-= object
->paging_offset
;
4348 (void)vm_object_update(object
,
4349 offset
, size
, should_return
, flags
, prot
);
4351 vm_object_paging_end(object
);
4352 vm_object_unlock(object
);
4354 return (KERN_SUCCESS
);
4361 * vm_object_res_deallocate
4363 * (recursively) decrement residence counts on vm objects and their shadows.
4364 * Called from vm_object_deallocate and when swapping out an object.
4366 * The object is locked, and remains locked throughout the function,
4367 * even as we iterate down the shadow chain. Locks on intermediate objects
4368 * will be dropped, but not the original object.
4370 * NOTE: this function used to use recursion, rather than iteration.
4373 __private_extern__
void
4374 vm_object_res_deallocate(
4377 vm_object_t orig_object
= object
;
4379 * Object is locked so it can be called directly
4380 * from vm_object_deallocate. Original object is never
4383 assert(object
->res_count
> 0);
4384 while (--object
->res_count
== 0) {
4385 assert(object
->ref_count
>= object
->res_count
);
4386 vm_object_deactivate_all_pages(object
);
4387 /* iterate on shadow, if present */
4388 if (object
->shadow
!= VM_OBJECT_NULL
) {
4389 vm_object_t tmp_object
= object
->shadow
;
4390 vm_object_lock(tmp_object
);
4391 if (object
!= orig_object
)
4392 vm_object_unlock(object
);
4393 object
= tmp_object
;
4394 assert(object
->res_count
> 0);
4398 if (object
!= orig_object
)
4399 vm_object_unlock(object
);
4403 * vm_object_res_reference
4405 * Internal function to increment residence count on a vm object
4406 * and its shadows. It is called only from vm_object_reference, and
4407 * when swapping in a vm object, via vm_map_swap.
4409 * The object is locked, and remains locked throughout the function,
4410 * even as we iterate down the shadow chain. Locks on intermediate objects
4411 * will be dropped, but not the original object.
4413 * NOTE: this function used to use recursion, rather than iteration.
4416 __private_extern__
void
4417 vm_object_res_reference(
4420 vm_object_t orig_object
= object
;
4422 * Object is locked, so this can be called directly
4423 * from vm_object_reference. This lock is never released.
4425 while ((++object
->res_count
== 1) &&
4426 (object
->shadow
!= VM_OBJECT_NULL
)) {
4427 vm_object_t tmp_object
= object
->shadow
;
4429 assert(object
->ref_count
>= object
->res_count
);
4430 vm_object_lock(tmp_object
);
4431 if (object
!= orig_object
)
4432 vm_object_unlock(object
);
4433 object
= tmp_object
;
4435 if (object
!= orig_object
)
4436 vm_object_unlock(object
);
4437 assert(orig_object
->ref_count
>= orig_object
->res_count
);
4439 #endif /* TASK_SWAPPER */
4442 * vm_object_reference:
4444 * Gets another reference to the given object.
4446 #ifdef vm_object_reference
4447 #undef vm_object_reference
4449 __private_extern__
void
4450 vm_object_reference(
4451 register vm_object_t object
)
4453 if (object
== VM_OBJECT_NULL
)
4456 vm_object_lock(object
);
4457 assert(object
->ref_count
> 0);
4458 vm_object_reference_locked(object
);
4459 vm_object_unlock(object
);
4464 * Scale the vm_object_cache
4465 * This is required to make sure that the vm_object_cache is big
4466 * enough to effectively cache the mapped file.
4467 * This is really important with UBC as all the regular file vnodes
4468 * have memory object associated with them. Havving this cache too
4469 * small results in rapid reclaim of vnodes and hurts performance a LOT!
4471 * This is also needed as number of vnodes can be dynamically scaled.
4474 adjust_vm_object_cache(vm_size_t oval
, vm_size_t nval
)
4476 vm_object_cached_max
= nval
;
4477 vm_object_cache_trim(FALSE
);
4478 return (KERN_SUCCESS
);
4480 #endif /* MACH_BSD */