/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
* Virtual memory object module.
*/
-#ifdef MACH_BSD
-/* remove as part of compoenent support merge */
-extern int vnode_pager_workaround;
-#endif
-
#include <mach_pagemap.h>
#include <task_swapper.h>
+#include <mach/mach_types.h>
#include <mach/memory_object.h>
#include <mach/memory_object_default.h>
#include <mach/memory_object_control_server.h>
#include <mach/vm_param.h>
+
+#include <ipc/ipc_types.h>
#include <ipc/ipc_port.h>
-#include <ipc/ipc_space.h>
+
+#include <kern/kern_types.h>
#include <kern/assert.h>
#include <kern/lock.h>
#include <kern/queue.h>
#include <kern/host.h>
#include <kern/host_statistics.h>
#include <kern/processor.h>
+#include <kern/misc_protos.h>
+
#include <vm/memory_object.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
-#include <kern/misc_protos.h>
-
-
+#include <vm/vm_protos.h>
/*
* Virtual memory objects maintain the actual data
* page of memory exists within exactly one object.
*
* An object is only deallocated when all "references"
- * are given up. Only one "reference" to a given
- * region of an object should be writeable.
+ * are given up.
*
* Associated with each object is a list of all resident
* memory pages belonging to that object; this list is
* maintained by the "vm_page" module, but locked by the object's
* lock.
*
- * Each object also records the memory object port
+ * Each object also records the memory object reference
* that is used by the kernel to request and write
- * back data (the memory object port, field "pager"),
- * and the ports provided to the memory manager, the server that
- * manages that data, to return data and control its
- * use (the memory object control port, field "pager_request")
- * and for naming (the memory object name port, field "pager_name").
+ * back data (the memory object, field "pager"), etc...
*
* Virtual memory objects are allocated to provide
* zero-filled memory (vm_allocate) or map a user-defined
*
* The kernel relies on a *default memory manager* to
* provide backing storage for the zero-filled virtual
- * memory objects. The memory object ports associated
+ * memory objects. The pager memory objects associated
* with these temporary virtual memory objects are only
- * generated and passed to the default memory manager
- * when it becomes necessary. Virtual memory objects
+ * requested from the default memory manager when it
+ * becomes necessary. Virtual memory objects
* that depend on the default memory manager are called
* "internal". The "pager_created" field is provided to
* indicate whether these ports have ever been allocated.
*/
/* Forward declarations for internal functions. */
-extern void _vm_object_allocate(
- vm_object_size_t size,
- vm_object_t object);
-
-extern kern_return_t vm_object_terminate(
+static kern_return_t vm_object_terminate(
vm_object_t object);
extern void vm_object_remove(
vm_object_t object);
-extern vm_object_t vm_object_cache_trim(
+static vm_object_t vm_object_cache_trim(
boolean_t called_from_vm_object_deallocate);
-extern void vm_object_deactivate_pages(
- vm_object_t object);
-
-extern void vm_object_abort_activity(
+static void vm_object_deactivate_all_pages(
vm_object_t object);
-extern kern_return_t vm_object_copy_call(
+static kern_return_t vm_object_copy_call(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
vm_object_t *_result_object);
-extern void vm_object_do_collapse(
+static void vm_object_do_collapse(
vm_object_t object,
vm_object_t backing_object);
-extern void vm_object_do_bypass(
+static void vm_object_do_bypass(
vm_object_t object,
vm_object_t backing_object);
-extern void memory_object_release(
- ipc_port_t pager,
- pager_request_t pager_request);
+static void vm_object_release_pager(
+ memory_object_t pager);
-zone_t vm_object_zone; /* vm backing store zone */
+static zone_t vm_object_zone; /* vm backing store zone */
/*
* All wired-down kernel memory belongs to a single virtual
* memory object (kernel_object) to avoid wasting data structures.
*/
-struct vm_object kernel_object_store;
-vm_object_t kernel_object = &kernel_object_store;
+static struct vm_object kernel_object_store;
+vm_object_t kernel_object;
/*
* The submap object is used as a placeholder for vm_map_submap
* is exported by the vm_map module. The storage is declared
* here because it must be initialized here.
*/
-struct vm_object vm_submap_object_store;
+static struct vm_object vm_submap_object_store;
/*
* Virtual memory objects are initialized from
*
* When adding a new field to the virtual memory
* object structure, be sure to add initialization
- * (see vm_object_init).
+ * (see _vm_object_allocate()).
*/
-struct vm_object vm_object_template;
+static struct vm_object vm_object_template;
/*
* Virtual memory objects that are not referenced by
* from the reference mechanism, so that the lock need
* not be held to make simple references.
*/
-queue_head_t vm_object_cached_list;
-int vm_object_cached_count;
-int vm_object_cached_high; /* highest # of cached objects */
-int vm_object_cached_max = 500; /* may be patched*/
+static queue_head_t vm_object_cached_list;
+static int vm_object_cached_count=0;
+static int vm_object_cached_high; /* highest # cached objects */
+static int vm_object_cached_max = 512; /* may be patched*/
-decl_mutex_data(,vm_object_cached_lock_data)
+static decl_mutex_data(,vm_object_cached_lock_data)
#define vm_object_cache_lock() \
mutex_lock(&vm_object_cached_lock_data)
mutex_unlock(&vm_object_cached_lock_data)
#define VM_OBJECT_HASH_COUNT 1024
-queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
-struct zone *vm_object_hash_zone;
+static queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
+static struct zone *vm_object_hash_zone;
struct vm_object_hash_entry {
queue_chain_t hash_link; /* hash chain link */
- ipc_port_t pager; /* pager we represent */
+ memory_object_t pager; /* pager we represent */
vm_object_t object; /* corresponding object */
boolean_t waiting; /* someone waiting for
* termination */
#define vm_object_hash(pager) \
((((unsigned)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT)
+void vm_object_hash_entry_free(
+ vm_object_hash_entry_t entry);
+
+static void vm_object_reap(vm_object_t object);
+static void vm_object_reap_async(vm_object_t object);
+static void vm_object_reaper_thread(void);
+static queue_head_t vm_object_reaper_queue; /* protected by vm_object_cache_lock() */
+unsigned int vm_object_reap_count = 0;
+unsigned int vm_object_reap_count_async = 0;
+
/*
* vm_object_hash_lookup looks up a pager in the hashtable
* and returns the corresponding entry, with optional removal.
*/
-vm_object_hash_entry_t
+static vm_object_hash_entry_t
vm_object_hash_lookup(
- ipc_port_t pager,
+ memory_object_t pager,
boolean_t remove_entry)
{
register queue_t bucket;
* pager / cache object association in the hashtable.
*/
-void
+static void
vm_object_hash_insert(
vm_object_hash_entry_t entry)
{
queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link);
}
-vm_object_hash_entry_t
+static vm_object_hash_entry_t
vm_object_hash_entry_alloc(
- ipc_port_t pager)
+ memory_object_t pager)
{
vm_object_hash_entry_t entry;
vm_object_hash_entry_free(
vm_object_hash_entry_t entry)
{
- zfree(vm_object_hash_zone, (vm_offset_t)entry);
+ zfree(vm_object_hash_zone, entry);
}
/*
* Returns a new object with the given size.
*/
-void
+__private_extern__ void
_vm_object_allocate(
vm_object_size_t size,
vm_object_t object)
*object = vm_object_template;
queue_init(&object->memq);
queue_init(&object->msr_q);
-#ifdef UBC_DEBUG
+#ifdef UPL_DEBUG
queue_init(&object->uplq);
-#endif /* UBC_DEBUG */
+#endif /* UPL_DEBUG */
vm_object_lock_init(object);
object->size = size;
}
-vm_object_t
+__private_extern__ vm_object_t
vm_object_allocate(
vm_object_size_t size)
{
register vm_object_t object;
- register ipc_port_t port;
object = (vm_object_t) zalloc(vm_object_zone);
-// dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
-
- _vm_object_allocate(size, object);
+// dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
+
+ if (object != VM_OBJECT_NULL)
+ _vm_object_allocate(size, object);
return object;
}
*
* Initialize the VM objects module.
*/
-void
+__private_extern__ void
vm_object_bootstrap(void)
{
- register i;
+ register int i;
vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object),
- round_page(512*1024),
- round_page(12*1024),
+ round_page_32(512*1024),
+ round_page_32(12*1024),
"vm objects");
queue_init(&vm_object_cached_list);
- mutex_init(&vm_object_cached_lock_data, ETAP_VM_OBJ_CACHE);
+ mutex_init(&vm_object_cached_lock_data, 0);
vm_object_hash_zone =
zinit((vm_size_t) sizeof (struct vm_object_hash_entry),
- round_page(512*1024),
- round_page(12*1024),
+ round_page_32(512*1024),
+ round_page_32(12*1024),
"vm object hash entries");
for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
/* memq; Lock; init after allocation */
vm_object_template.size = 0;
- vm_object_template.frozen_size = 0;
+ vm_object_template.memq_hint = VM_PAGE_NULL;
vm_object_template.ref_count = 1;
#if TASK_SWAPPER
vm_object_template.res_count = 1;
vm_object_template.copy = VM_OBJECT_NULL;
vm_object_template.shadow = VM_OBJECT_NULL;
vm_object_template.shadow_offset = (vm_object_offset_t) 0;
+ vm_object_template.cow_hint = ~(vm_offset_t)0;
vm_object_template.true_share = FALSE;
- vm_object_template.pager = IP_NULL;
+ vm_object_template.pager = MEMORY_OBJECT_NULL;
vm_object_template.paging_offset = 0;
- vm_object_template.pager_request = PAGER_REQUEST_NULL;
+ vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL;
/* msr_q; init after allocation */
vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC;
vm_object_template.private = FALSE;
vm_object_template.pageout = FALSE;
vm_object_template.alive = TRUE;
- vm_object_template.lock_in_progress = FALSE;
- vm_object_template.lock_restart = FALSE;
+ vm_object_template.purgable = VM_OBJECT_NONPURGABLE;
vm_object_template.silent_overwrite = FALSE;
vm_object_template.advisory_pageout = FALSE;
vm_object_template.shadowed = FALSE;
vm_object_template.terminating = FALSE;
vm_object_template.shadow_severed = FALSE;
vm_object_template.phys_contiguous = FALSE;
+ vm_object_template.nophyscache = FALSE;
/* End bitfields */
+ /* cache bitfields */
+ vm_object_template.wimg_bits = VM_WIMG_DEFAULT;
+
/* cached_list; init after allocation */
vm_object_template.last_alloc = (vm_object_offset_t) 0;
vm_object_template.cluster_size = 0;
/*
* Note that in the following size specifications, we need to add 1 because
- * VM_MAX_KERNEL_ADDRESS is a maximum address, not a size.
+ * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
*/
+
+#ifdef ppc
+ _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1,
+ kernel_object);
+#else
_vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1,
kernel_object);
+#endif
+ kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
/*
* Initialize the "submap object". Make it as large as the
*/
vm_submap_object = &vm_submap_object_store;
+#ifdef ppc
+ _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1,
+ vm_submap_object);
+#else
_vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1,
vm_submap_object);
+#endif
+ vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+
/*
* Create an "extra" reference to this object so that we never
* try to deallocate it; zfree doesn't like to be called with
}
void
-vm_object_init(void)
-{
- /*
- * Finish initializing the kernel object.
- */
-}
-
-#if TASK_SWAPPER
-/*
- * vm_object_res_deallocate
- *
- * (recursively) decrement residence counts on vm objects and their shadows.
- * Called from vm_object_deallocate and when swapping out an object.
- *
- * The object is locked, and remains locked throughout the function,
- * even as we iterate down the shadow chain. Locks on intermediate objects
- * will be dropped, but not the original object.
- *
- * NOTE: this function used to use recursion, rather than iteration.
- */
-
-void
-vm_object_res_deallocate(
- vm_object_t object)
+vm_object_reaper_init(void)
{
- vm_object_t orig_object = object;
- /*
- * Object is locked so it can be called directly
- * from vm_object_deallocate. Original object is never
- * unlocked.
- */
- assert(object->res_count > 0);
- while (--object->res_count == 0) {
- assert(object->ref_count >= object->res_count);
- vm_object_deactivate_pages(object);
- /* iterate on shadow, if present */
- if (object->shadow != VM_OBJECT_NULL) {
- vm_object_t tmp_object = object->shadow;
- vm_object_lock(tmp_object);
- if (object != orig_object)
- vm_object_unlock(object);
- object = tmp_object;
- assert(object->res_count > 0);
- } else
- break;
+ kern_return_t kr;
+ thread_t thread;
+
+ queue_init(&vm_object_reaper_queue);
+ kr = kernel_thread_start_priority(
+ (thread_continue_t) vm_object_reaper_thread,
+ NULL,
+ BASEPRI_PREEMPT - 1,
+ &thread);
+ if (kr != KERN_SUCCESS) {
+ panic("failed to launch vm_object_reaper_thread kr=0x%x\n", kr);
}
- if (object != orig_object)
- vm_object_unlock(object);
+ thread_deallocate(thread);
}
-/*
- * vm_object_res_reference
- *
- * Internal function to increment residence count on a vm object
- * and its shadows. It is called only from vm_object_reference, and
- * when swapping in a vm object, via vm_map_swap.
- *
- * The object is locked, and remains locked throughout the function,
- * even as we iterate down the shadow chain. Locks on intermediate objects
- * will be dropped, but not the original object.
- *
- * NOTE: this function used to use recursion, rather than iteration.
- */
-
-void
-vm_object_res_reference(
- vm_object_t object)
+__private_extern__ void
+vm_object_init(void)
{
- vm_object_t orig_object = object;
- /*
- * Object is locked, so this can be called directly
- * from vm_object_reference. This lock is never released.
+ /*
+ * Finish initializing the kernel object.
*/
- while ((++object->res_count == 1) &&
- (object->shadow != VM_OBJECT_NULL)) {
- vm_object_t tmp_object = object->shadow;
-
- assert(object->ref_count >= object->res_count);
- vm_object_lock(tmp_object);
- if (object != orig_object)
- vm_object_unlock(object);
- object = tmp_object;
- }
- if (object != orig_object)
- vm_object_unlock(object);
- assert(orig_object->ref_count >= orig_object->res_count);
-}
-#endif /* TASK_SWAPPER */
-
-#if MACH_ASSERT
-/*
- * vm_object_reference:
- *
- * Gets another reference to the given object.
- */
-void
-vm_object_reference(
- register vm_object_t object)
-{
- if (object == VM_OBJECT_NULL)
- return;
-
- vm_object_lock(object);
- assert(object->ref_count > 0);
- object->ref_count++;
- vm_object_res_reference(object);
- vm_object_unlock(object);
}
-#endif /* MACH_ASSERT */
/* remove the typedef below when emergency work-around is taken out */
typedef struct vnode_pager {
- ipc_port_t pager; /* pager */
- ipc_port_t pager_handle; /* pager handle */
- ipc_port_t vm_obj_handle; /* memory object's control handle */
- void *vnode_handle; /* vnode handle */
+ memory_object_t pager;
+ memory_object_t pager_handle; /* pager */
+ memory_object_control_t control_handle; /* memory object's control handle */
+ void *vnode_handle; /* vnode handle */
} *vnode_pager_t;
#define MIGHT_NOT_CACHE_SHADOWS 1
#if MIGHT_NOT_CACHE_SHADOWS
-int cache_shadows = TRUE;
+static int cache_shadows = TRUE;
#endif /* MIGHT_NOT_CACHE_SHADOWS */
/*
*
* No object may be locked.
*/
-void
+__private_extern__ void
vm_object_deallocate(
register vm_object_t object)
{
boolean_t retry_cache_trim = FALSE;
- vm_object_t shadow;
+ vm_object_t shadow = VM_OBJECT_NULL;
// if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
// else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
* the object; we must lock it before removing
* the object.
*/
+ for (;;) {
+ vm_object_cache_lock();
- vm_object_cache_lock();
- vm_object_lock(object);
- assert(object->alive);
+ /*
+ * if we try to take a regular lock here
+ * we risk deadlocking against someone
+ * holding a lock on this object while
+ * trying to vm_object_deallocate a different
+ * object
+ */
+ if (vm_object_lock_try(object))
+ break;
+ vm_object_cache_unlock();
+ mutex_pause(); /* wait a bit */
+ }
+ assert(object->ref_count > 0);
+
+ /*
+ * If the object has a named reference, and only
+ * that reference would remain, inform the pager
+ * about the last "mapping" reference going away.
+ */
+ if ((object->ref_count == 2) && (object->named)) {
+ memory_object_t pager = object->pager;
+
+ /* Notify the Pager that there are no */
+ /* more mappers for this object */
+
+ if (pager != MEMORY_OBJECT_NULL) {
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+
+ memory_object_unmap(pager);
+
+ for (;;) {
+ vm_object_cache_lock();
+
+ /*
+ * if we try to take a regular lock here
+ * we risk deadlocking against someone
+ * holding a lock on this object while
+ * trying to vm_object_deallocate a different
+ * object
+ */
+ if (vm_object_lock_try(object))
+ break;
+ vm_object_cache_unlock();
+ mutex_pause(); /* wait a bit */
+ }
+ assert(object->ref_count > 0);
+ }
+ }
/*
* Lose the reference. If other references
* until any pending initialization is completed.
*/
- assert(object->ref_count > 0);
- if ((object->ref_count > 1) || (object->terminating)) {
- /* if the object is terminating, it cannot go into */
- /* the cache and we obviously should not call */
- /* terminate again. */
+ /* if the object is terminating, it cannot go into */
+ /* the cache and we obviously should not call */
+ /* terminate again. */
+
+ if ((object->ref_count > 1) || object->terminating) {
object->ref_count--;
- {
- /* The following is an emergency work-around for */
- /* no-mappings left notification to UBC. This fix */
- /* violates numerous layering boundaries, is not */
- /* provable with respect to races for new mappings */
- /* from the UBC layer and is just plain ugly. The */
- /* proper fix requires a guarantee of state */
- /* between the vnode and the memory object and a */
- /* sequenced delivery of empty status. This can */
- /* be provided by the object_named interface and */
- /* the effort to convert over should be undertaken */
- /* at the earliest possible moment. */
- if(object->ref_count == 1) {
- vnode_pager_t vnode_pager;
- if(object->pager) {
- vnode_pager = (vnode_pager_t)
- vnode_port_hash_lookup(
- object->pager);
- if(vnode_pager) {
- extern void ubc_unmap(void *);
- ubc_unmap(vnode_pager->vnode_handle);
- }
- }
- }
- }
vm_object_res_deallocate(object);
- vm_object_unlock(object);
vm_object_cache_unlock();
+
+ if (object->ref_count == 1 &&
+ object->shadow != VM_OBJECT_NULL) {
+ /*
+ * There's only one reference left on this
+ * VM object. We can't tell if it's a valid
+ * one (from a mapping for example) or if this
+ * object is just part of a possibly stale and
+ * useless shadow chain.
+ * We would like to try and collapse it into
+ * its parent, but we don't have any pointers
+ * back to this parent object.
+ * But we can try and collapse this object with
+ * its own shadows, in case these are useless
+ * too...
+ * We can't bypass this object though, since we
+ * don't know if this last reference on it is
+ * meaningful or not.
+ */
+ vm_object_collapse(object, 0, FALSE);
+ }
+
+ vm_object_unlock(object);
if (retry_cache_trim &&
((object = vm_object_cache_trim(TRUE)) !=
VM_OBJECT_NULL)) {
THREAD_UNINT);
vm_object_unlock(object);
vm_object_cache_unlock();
- thread_block((void (*)(void))0);
+ thread_block(THREAD_CONTINUE_NULL);
continue;
}
* way (can do it in-line, mostly).
*/
- if (object->can_persist) {
+ if ((object->can_persist) && (object->alive)) {
/*
* Now it is safe to decrement reference count,
* and to return if reference count is > 0.
queue_enter(&vm_object_cached_list, object,
vm_object_t, cached_list);
vm_object_cache_unlock();
- vm_object_deactivate_pages(object);
+ vm_object_deactivate_all_pages(object);
vm_object_unlock(object);
#if MIGHT_NOT_CACHE_SHADOWS
* This object is not cachable; terminate it.
*/
XPR(XPR_VM_OBJECT,
- "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%lX ref %d\n",
- (integer_t)object, object->resident_page_count,
- object->paging_in_progress,
- (natural_t)current_thread(),object->ref_count);
+ "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
+ (integer_t)object, object->resident_page_count,
+ object->paging_in_progress,
+ (void *)current_thread(),object->ref_count);
VM_OBJ_RES_DECR(object); /* XXX ? */
/*
(integer_t)vm_object_cached_list.prev, 0, 0, 0);
object = (vm_object_t) queue_first(&vm_object_cached_list);
+ if(object == (vm_object_t) &vm_object_cached_list) {
+ /* something's wrong with the calling parameter or */
+ /* the value of vm_object_cached_count, just fix */
+ /* and return */
+ if(vm_object_cached_max < 0)
+ vm_object_cached_max = 0;
+ vm_object_cached_count = 0;
+ vm_object_cache_unlock();
+ return VM_OBJECT_NULL;
+ }
vm_object_lock(object);
queue_remove(&vm_object_cached_list, object, vm_object_t,
cached_list);
* Purpose:
* Free all resources associated with a vm_object.
* In/out conditions:
- * Upon entry, the object and the cache must be locked,
+ * Upon entry, the object must be locked,
* and the object must have exactly one reference.
*
* The shadow object reference is left alone.
* upon exit, the cache will be unlocked, and the
* object will cease to exist.
*/
-kern_return_t
+static kern_return_t
vm_object_terminate(
register vm_object_t object)
{
XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
(integer_t)object, object->ref_count, 0, 0, 0);
- /*
- * Make sure the object isn't already being terminated
- */
-
- assert(object->alive);
- if(object->terminating) {
- vm_object_cache_unlock();
- object->ref_count -= 1;
- vm_object_unlock(object);
- return KERN_FAILURE;
- }
- object->terminating = TRUE;
-
- vm_object_cache_unlock();
if (!object->pageout && (!object->temporary || object->can_persist)
&& (object->pager != NULL || object->shadow_severed)) {
+ vm_object_cache_unlock();
while (!queue_empty(&object->memq)) {
/*
* Clear pager_trusted bit so that the pages get yanked
}
vm_page_lock_queues();
+ p->busy = TRUE;
VM_PAGE_QUEUES_REMOVE(p);
vm_page_unlock_queues();
panic("vm_object_terminate.4 0x%x 0x%x", object, p);
if (!p->dirty)
- p->dirty = pmap_is_modified(p->phys_addr);
+ p->dirty = pmap_is_modified(p->phys_page);
- if (p->dirty || p->precious) {
- p->busy = TRUE;
- vm_object_paging_begin(object);
- /* protect the object from re-use/caching while it */
- /* is unlocked */
- vm_object_unlock(object);
+ if ((p->dirty || p->precious) && !p->error && object->alive) {
vm_pageout_cluster(p); /* flush page */
- vm_object_lock(object);
vm_object_paging_wait(object, THREAD_UNINT);
XPR(XPR_VM_OBJECT,
"vm_object_terminate restart, object 0x%X ref %d\n",
VM_PAGE_FREE(p);
}
}
+ vm_object_unlock(object);
+ vm_object_cache_lock();
+ vm_object_lock(object);
+ }
+
+ /*
+ * Make sure the object isn't already being terminated
+ */
+ if(object->terminating) {
+ object->ref_count -= 1;
+ assert(object->ref_count > 0);
+ vm_object_cache_unlock();
+ vm_object_unlock(object);
+ return KERN_FAILURE;
}
+
+ /*
+ * Did somebody get a reference to the object while we were
+ * cleaning it?
+ */
if(object->ref_count != 1) {
object->ref_count -= 1;
+ assert(object->ref_count > 0);
vm_object_res_deallocate(object);
- object->terminating = FALSE;
- /* kick off anyone waiting on terminating */
- vm_object_paging_begin(object);
- vm_object_paging_end(object);
+ vm_object_cache_unlock();
vm_object_unlock(object);
return KERN_FAILURE;
}
- object->alive = FALSE;
-
/*
* Make sure no one can look us up now.
*/
- vm_object_cache_lock();
-
- if(object->pager != IP_NULL) {
- vm_object_hash_entry_t entry;
-
- entry = vm_object_hash_lookup(object->pager, FALSE);
- if (entry != VM_OBJECT_HASH_ENTRY_NULL)
- entry->object = VM_OBJECT_NULL;
- }
-
- vm_object_cache_unlock();
+ object->terminating = TRUE;
+ object->alive = FALSE;
+ vm_object_remove(object);
/*
* Detach the object from its shadow if we are the shadow's
- * copy.
+ * copy. The reference we hold on the shadow must be dropped
+ * by our caller.
*/
if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
!(object->pageout)) {
vm_object_lock(shadow_object);
- assert((shadow_object->copy == object) ||
- (shadow_object->copy == VM_OBJECT_NULL));
- shadow_object->copy = VM_OBJECT_NULL;
+ if (shadow_object->copy == object)
+ shadow_object->copy = VM_OBJECT_NULL;
vm_object_unlock(shadow_object);
}
- /*
- * The pageout daemon might be playing with our pages.
- * Now that the object is dead, it won't touch any more
- * pages, but some pages might already be on their way out.
- * Hence, we wait until the active paging activities have ceased.
- */
- vm_object_paging_wait(object, THREAD_UNINT);
- object->ref_count--;
-#if TASK_SWAPPER
- assert(object->res_count == 0);
-#endif /* TASK_SWAPPER */
-
-Restart:
- assert (object->ref_count == 0);
-
- /*
- * Clean or free the pages, as appropriate.
- * It is possible for us to find busy/absent pages,
- * if some faults on this object were aborted.
- */
- if (object->pageout) {
- assert(shadow_object != VM_OBJECT_NULL);
- assert(shadow_object == object->shadow);
+ if (FALSE && object->paging_in_progress != 0) {
+ /*
+ * There are still some paging_in_progress references
+ * on this object, meaning that there are some paging
+ * or other I/O operations in progress for this VM object.
+ * Such operations take some paging_in_progress references
+ * up front to ensure that the object doesn't go away, but
+ * they may also need to acquire a reference on the VM object,
+ * to map it in kernel space, for example. That means that
+ * they may end up releasing the last reference on the VM
+ * object, triggering its termination, while still holding
+ * paging_in_progress references. Waiting for these
+ * pending paging_in_progress references to go away here would
+ * deadlock.
+ *
+ * To avoid deadlocking, we'll let the vm_object_reaper_thread
+ * complete the VM object termination if it still holds
+ * paging_in_progress references at this point.
+ *
+ * No new paging_in_progress should appear now that the
+ * VM object is "terminating" and not "alive".
+ */
+ vm_object_reap_async(object);
+ vm_object_cache_unlock();
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
- vm_pageout_object_terminate(object);
+ /* complete the VM object termination */
+ vm_object_reap(object);
+ object = VM_OBJECT_NULL;
+ /* cache lock and object lock were released by vm_object_reap() */
- } else if (object->temporary && ! object->can_persist ||
- object->pager == IP_NULL) {
- while (!queue_empty(&object->memq)) {
- p = (vm_page_t) queue_first(&object->memq);
+ return KERN_SUCCESS;
+}
- VM_PAGE_CHECK(p);
+/*
+ * vm_object_reap():
+ *
+ * Complete the termination of a VM object after it's been marked
+ * as "terminating" and "!alive" by vm_object_terminate().
+ *
+ * The VM object cache and the VM object must be locked by caller.
+ * The locks will be released on return and the VM object is no longer valid.
+ */
+void
+vm_object_reap(
+ vm_object_t object)
+{
+ memory_object_t pager;
+ vm_page_t p;
+
+#if DEBUG
+ mutex_assert(&vm_object_cached_lock_data, MA_OWNED);
+ mutex_assert(&object->Lock, MA_OWNED);
+#endif /* DEBUG */
+
+ vm_object_reap_count++;
+
+ /*
+ * The pageout daemon might be playing with our pages.
+ * Now that the object is dead, it won't touch any more
+ * pages, but some pages might already be on their way out.
+ * Hence, we wait until the active paging activities have
+ * ceased before we break the association with the pager
+ * itself.
+ */
+ while (object->paging_in_progress != 0) {
+ vm_object_cache_unlock();
+ vm_object_wait(object,
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
+ THREAD_UNINT);
+ vm_object_cache_lock();
+ vm_object_lock(object);
+ }
+
+ assert(object->paging_in_progress == 0);
+ pager = object->pager;
+ object->pager = MEMORY_OBJECT_NULL;
+
+ if (pager != MEMORY_OBJECT_NULL)
+ memory_object_control_disable(object->pager_control);
+ vm_object_cache_unlock();
+
+ object->ref_count--;
+#if TASK_SWAPPER
+ assert(object->res_count == 0);
+#endif /* TASK_SWAPPER */
+
+ assert (object->ref_count == 0);
+
+ /*
+ * Clean or free the pages, as appropriate.
+ * It is possible for us to find busy/absent pages,
+ * if some faults on this object were aborted.
+ */
+ if (object->pageout) {
+ assert(object->shadow != VM_OBJECT_NULL);
+
+ vm_pageout_object_terminate(object);
+
+ } else if ((object->temporary && !object->can_persist) ||
+ (pager == MEMORY_OBJECT_NULL)) {
+ while (!queue_empty(&object->memq)) {
+ p = (vm_page_t) queue_first(&object->memq);
+
+ VM_PAGE_CHECK(p);
VM_PAGE_FREE(p);
}
} else if (!queue_empty(&object->memq)) {
- panic("vm_object_terminate: queue just emptied isn't");
+ panic("vm_object_reap: queue just emptied isn't");
}
assert(object->paging_in_progress == 0);
assert(object->ref_count == 0);
- vm_object_remove(object);
-
/*
- * Throw away port rights... note that they may
- * already have been thrown away (by vm_object_destroy
- * or memory_object_destroy).
- *
- * Instead of destroying the control port,
- * we send all rights off to the memory manager,
- * using memory_object_terminate.
+ * If the pager has not already been released by
+ * vm_object_destroy, we need to terminate it and
+ * release our reference to it here.
*/
-
- vm_object_unlock(object);
- if (object->pager != IP_NULL) {
- /* consumes our rights for pager, pager_request */
- memory_object_release(object->pager, object->pager_request);
+ if (pager != MEMORY_OBJECT_NULL) {
+ vm_object_unlock(object);
+ vm_object_release_pager(pager);
+ vm_object_lock(object);
}
+
/* kick off anyone waiting on terminating */
- vm_object_lock(object);
+ object->terminating = FALSE;
vm_object_paging_begin(object);
vm_object_paging_end(object);
vm_object_unlock(object);
/*
* Free the space for the object.
*/
+ zfree(vm_object_zone, object);
+ object = VM_OBJECT_NULL;
+}
- zfree(vm_object_zone, (vm_offset_t) object);
- return KERN_SUCCESS;
+void
+vm_object_reap_async(
+ vm_object_t object)
+{
+#if DEBUG
+ mutex_assert(&vm_object_cached_lock_data, MA_OWNED);
+ mutex_assert(&object->Lock, MA_OWNED);
+#endif /* DEBUG */
+
+ vm_object_reap_count_async++;
+
+ /* enqueue the VM object... */
+ queue_enter(&vm_object_reaper_queue, object,
+ vm_object_t, cached_list);
+ /* ... and wake up the reaper thread */
+ thread_wakeup((event_t) &vm_object_reaper_queue);
+}
+
+void
+vm_object_reaper_thread(void)
+{
+ vm_object_t object;
+
+ vm_object_cache_lock();
+
+ while (!queue_empty(&vm_object_reaper_queue)) {
+ queue_remove_first(&vm_object_reaper_queue,
+ object,
+ vm_object_t,
+ cached_list);
+ vm_object_lock(object);
+ assert(object->terminating);
+ assert(!object->alive);
+
+ vm_object_reap(object);
+ /* cache is unlocked and object is no longer valid */
+ object = VM_OBJECT_NULL;
+
+ vm_object_cache_lock();
+ }
+
+ /* wait for more work... */
+ assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
+ vm_object_cache_unlock();
+ thread_block((thread_continue_t) vm_object_reaper_thread);
+ /*NOTREACHED*/
}
/*
* Purpose: Wake up anyone waiting for termination of a pager.
*/
-void
+static void
vm_object_pager_wakeup(
- ipc_port_t pager)
+ memory_object_t pager)
{
vm_object_hash_entry_t entry;
boolean_t waiting = FALSE;
}
/*
- * memory_object_release_name:
- * Enforces name semantic on memory_object reference count decrement
- * This routine should not be called unless the caller holds a name
- * reference gained through the memory_object_named_create or the
- * memory_object_rename call.
- * If the TERMINATE_IDLE flag is set, the call will return if the
- * reference count is not 1. i.e. idle with the only remaining reference
- * being the name.
- * If the decision is made to proceed the name field flag is set to
- * false and the reference count is decremented. If the RESPECT_CACHE
- * flag is set and the reference count has gone to zero, the
- * memory_object is checked to see if it is cacheable otherwise when
- * the reference count is zero, it is simply terminated.
- */
-
-kern_return_t
-memory_object_release_name(
- vm_object_t object,
- int flags)
-{
- vm_object_t shadow;
- boolean_t original_object = TRUE;
-
- while (object != VM_OBJECT_NULL) {
-
- /*
- * The cache holds a reference (uncounted) to
- * the object. We must locke it before removing
- * the object.
- *
- */
-
- vm_object_cache_lock();
- vm_object_lock(object);
- assert(object->alive);
- if(original_object)
- assert(object->named);
- assert(object->ref_count > 0);
-
- /*
- * We have to wait for initialization before
- * destroying or caching the object.
- */
-
- if (object->pager_created && !object->pager_initialized) {
- assert(!object->can_persist);
- vm_object_assert_wait(object,
- VM_OBJECT_EVENT_INITIALIZED,
- THREAD_UNINT);
- vm_object_unlock(object);
- vm_object_cache_unlock();
- thread_block((void (*)(void)) 0);
- continue;
- }
-
- if (((object->ref_count > 1)
- && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
- || (object->terminating)) {
- vm_object_unlock(object);
- vm_object_cache_unlock();
- return KERN_FAILURE;
- } else {
- if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
- vm_object_unlock(object);
- vm_object_cache_unlock();
- return KERN_SUCCESS;
- }
- }
-
- if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
- (object->ref_count == 1)) {
- if(original_object)
- object->named = FALSE;
- vm_object_unlock(object);
- vm_object_cache_unlock();
- /* let vm_object_deallocate push this thing into */
- /* the cache, if that it is where it is bound */
- vm_object_deallocate(object);
- return KERN_SUCCESS;
- }
- VM_OBJ_RES_DECR(object);
- shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
- if(object->ref_count == 1) {
- if(vm_object_terminate(object) != KERN_SUCCESS) {
- if(original_object) {
- return KERN_FAILURE;
- } else {
- return KERN_SUCCESS;
- }
- }
- if (shadow != VM_OBJECT_NULL) {
- original_object = FALSE;
- object = shadow;
- continue;
- }
- return KERN_SUCCESS;
- } else {
- object->ref_count--;
- if(original_object)
- object->named = FALSE;
- vm_object_unlock(object);
- vm_object_cache_unlock();
- return KERN_SUCCESS;
- }
- }
-}
-
-/*
- * Routine: memory_object_release
- * Purpose: Terminate the pager and release port rights,
+ * Routine: vm_object_release_pager
+ * Purpose: Terminate the pager and, upon completion,
+ * release our last reference to it.
* just like memory_object_terminate, except
* that we wake up anyone blocked in vm_object_enter
* waiting for termination message to be queued
* before calling memory_object_init.
*/
-void
-memory_object_release(
- ipc_port_t pager,
- pager_request_t pager_request)
+static void
+vm_object_release_pager(
+ memory_object_t pager)
{
-#ifdef MACH_BSD
- kern_return_t vnode_pager_terminate(ipc_port_t, ipc_port_t);
-#endif
-
- /*
- * Keep a reference to pager port;
- * the terminate might otherwise release all references.
- */
- ipc_port_copy_send(pager);
/*
* Terminate the pager.
*/
-#ifdef MACH_BSD
- if(((rpc_subsystem_t)pager_mux_hash_lookup(pager)) ==
- ((rpc_subsystem_t) &vnode_pager_workaround)) {
- (void) vnode_pager_terminate(pager, pager_request);
- } else {
- (void) memory_object_terminate(pager, pager_request);
- }
-#else
- (void) memory_object_terminate(pager, pager_request);
-#endif
+ (void) memory_object_terminate(pager);
/*
* Wakeup anyone waiting for this terminate
vm_object_pager_wakeup(pager);
/*
- * Release reference to pager port.
- */
- ipc_port_release_send(pager);
-}
-
-/*
- * Routine: vm_object_abort_activity [internal use only]
- * Purpose:
- * Abort paging requests pending on this object.
- * In/out conditions:
- * The object is locked on entry and exit.
- */
-void
-vm_object_abort_activity(
- vm_object_t object)
-{
- register
- vm_page_t p;
- vm_page_t next;
-
- XPR(XPR_VM_OBJECT, "vm_object_abort_activity, object 0x%X\n",
- (integer_t)object, 0, 0, 0, 0);
-
- /*
- * Abort all activity that would be waiting
- * for a result on this memory object.
- *
- * We could also choose to destroy all pages
- * that we have in memory for this object, but
- * we don't.
- */
-
- p = (vm_page_t) queue_first(&object->memq);
- while (!queue_end(&object->memq, (queue_entry_t) p)) {
- next = (vm_page_t) queue_next(&p->listq);
-
- /*
- * If it's being paged in, destroy it.
- * If an unlock has been requested, start it again.
- */
-
- if (p->busy && p->absent) {
- VM_PAGE_FREE(p);
- }
- else {
- if (p->unlock_request != VM_PROT_NONE)
- p->unlock_request = VM_PROT_NONE;
- PAGE_WAKEUP(p);
- }
-
- p = next;
- }
-
- /*
- * Wake up threads waiting for the memory object to
- * become ready.
+ * Release reference to pager.
*/
-
- object->pager_ready = TRUE;
- vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+ memory_object_deallocate(pager);
}
/*
- * Routine: memory_object_destroy [user interface]
+ * Routine: vm_object_destroy
* Purpose:
- * Shut down a memory object, despite the
+ * Shut down a VM object, despite the
* presence of address map (or other) references
* to the vm_object.
*/
kern_return_t
-memory_object_destroy(
- register vm_object_t object,
- kern_return_t reason)
+vm_object_destroy(
+ vm_object_t object,
+ __unused kern_return_t reason)
{
- ipc_port_t old_object;
- pager_request_t old_pager_request;
-
-#ifdef lint
- reason++;
-#endif /* lint */
+ memory_object_t old_pager;
if (object == VM_OBJECT_NULL)
return(KERN_SUCCESS);
/*
- * Remove the port associations immediately.
+ * Remove the pager association immediately.
*
* This will prevent the memory manager from further
* meddling. [If it wanted to flush data or make
vm_object_cache_lock();
vm_object_lock(object);
- vm_object_remove(object);
object->can_persist = FALSE;
object->named = FALSE;
- vm_object_cache_unlock();
+ object->alive = FALSE;
/*
- * Rip out the ports from the vm_object now... this
- * will prevent new memory_object calls from succeeding.
+ * Rip out the pager from the vm_object now...
*/
- old_object = object->pager;
- old_pager_request = object->pager_request;
-
- object->pager = IP_NULL;
- object->pager_request = PAGER_REQUEST_NULL;
+ vm_object_remove(object);
+ old_pager = object->pager;
+ object->pager = MEMORY_OBJECT_NULL;
+ if (old_pager != MEMORY_OBJECT_NULL)
+ memory_object_control_disable(object->pager_control);
+ vm_object_cache_unlock();
/*
- * Wait for existing paging activity (that might
- * have the old ports) to subside.
+ * Wait for the existing paging activity (that got
+ * through before we nulled out the pager) to subside.
*/
vm_object_paging_wait(object, THREAD_UNINT);
vm_object_unlock(object);
/*
- * Shut down the ports now.
- *
- * [Paging operations may be proceeding concurrently --
- * they'll get the null values established above.]
+ * Terminate the object now.
*/
+ if (old_pager != MEMORY_OBJECT_NULL) {
+ vm_object_release_pager(old_pager);
+
+ /*
+ * JMM - Release the caller's reference. This assumes the
+ * caller had a reference to release, which is a big (but
+ * currently valid) assumption if this is driven from the
+ * vnode pager (it is holding a named reference when making
+ * this call)..
+ */
+ vm_object_deallocate(object);
- if (old_object != IP_NULL) {
- /* consumes our rights for object, control */
- memory_object_release(old_object, old_pager_request);
}
-
- /*
- * Lose the reference that was donated for this routine
- */
-
- vm_object_deallocate(object);
-
return(KERN_SUCCESS);
}
*
* The object must be locked.
*/
-void
-vm_object_deactivate_pages(
+static void
+vm_object_deactivate_all_pages(
register vm_object_t object)
{
register vm_page_t p;
}
}
+__private_extern__ void
+vm_object_deactivate_pages(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ boolean_t kill_page)
+{
+ vm_object_t orig_object;
+ int pages_moved = 0;
+ int pages_found = 0;
+
+ /*
+ * entered with object lock held, acquire a paging reference to
+ * prevent the memory_object and control ports from
+ * being destroyed.
+ */
+ orig_object = object;
+
+ for (;;) {
+ register vm_page_t m;
+ vm_object_offset_t toffset;
+ vm_object_size_t tsize;
+
+ vm_object_paging_begin(object);
+ vm_page_lock_queues();
+
+ for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) {
+
+ if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) {
+
+ pages_found++;
+
+ if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) {
+
+ assert(!m->laundry);
+
+ m->reference = FALSE;
+ pmap_clear_reference(m->phys_page);
+
+ if ((kill_page) && (object->internal)) {
+ m->precious = FALSE;
+ m->dirty = FALSE;
+ pmap_clear_modify(m->phys_page);
+ vm_external_state_clr(object->existence_map, offset);
+ }
+ VM_PAGE_QUEUES_REMOVE(m);
+
+ assert(!m->laundry);
+ assert(m->object != kernel_object);
+ assert(m->pageq.next == NULL &&
+ m->pageq.prev == NULL);
+ if(m->zero_fill) {
+ queue_enter_first(
+ &vm_page_queue_zf,
+ m, vm_page_t, pageq);
+ } else {
+ queue_enter_first(
+ &vm_page_queue_inactive,
+ m, vm_page_t, pageq);
+ }
+
+ m->inactive = TRUE;
+ if (!m->fictitious)
+ vm_page_inactive_count++;
+
+ pages_moved++;
+ }
+ }
+ }
+ vm_page_unlock_queues();
+ vm_object_paging_end(object);
+
+ if (object->shadow) {
+ vm_object_t tmp_object;
+
+ kill_page = 0;
+
+ offset += object->shadow_offset;
+
+ tmp_object = object->shadow;
+ vm_object_lock(tmp_object);
+
+ if (object != orig_object)
+ vm_object_unlock(object);
+ object = tmp_object;
+ } else
+ break;
+ }
+ if (object != orig_object)
+ vm_object_unlock(object);
+}
/*
* Routine: vm_object_pmap_protect
* pmap.
*/
-void
+__private_extern__ void
vm_object_pmap_protect(
register vm_object_t object,
register vm_object_offset_t offset,
- vm_size_t size,
+ vm_object_size_t size,
pmap_t pmap,
- vm_offset_t pmap_start,
+ vm_map_offset_t pmap_start,
vm_prot_t prot)
{
if (object == VM_OBJECT_NULL)
return;
+ size = vm_object_round_page(size);
+ offset = vm_object_trunc_page(offset);
vm_object_lock(object);
- assert(object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
+ assert(object->internal);
while (TRUE) {
- if (object->resident_page_count > atop(size) / 2 &&
- pmap != PMAP_NULL) {
+ if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
vm_object_unlock(object);
pmap_protect(pmap, pmap_start, pmap_start + size, prot);
return;
}
- {
- register vm_page_t p;
- register vm_object_offset_t end;
+ /* if we are doing large ranges with respect to resident */
+ /* page count then we should interate over pages otherwise */
+ /* inverse page look-up will be faster */
+ if (ptoa_64(object->resident_page_count / 4) < size) {
+ vm_page_t p;
+ vm_object_offset_t end;
end = offset + size;
queue_iterate(&object->memq, p, vm_page_t, listq) {
if (!p->fictitious &&
(offset <= p->offset) && (p->offset < end)) {
+ vm_map_offset_t start;
- vm_offset_t start = pmap_start +
- (vm_offset_t)(p->offset - offset);
-
- pmap_protect(pmap, start, start + PAGE_SIZE, prot);
+ start = pmap_start + p->offset - offset;
+ pmap_protect(pmap, start, start + PAGE_SIZE_64, prot);
}
}
} else {
if (!p->fictitious &&
(offset <= p->offset) && (p->offset < end)) {
- pmap_page_protect(p->phys_addr,
+ pmap_page_protect(p->phys_page,
prot & ~p->page_lock);
}
}
}
- }
+ } else {
+ vm_page_t p;
+ vm_object_offset_t end;
+ vm_object_offset_t target_off;
+
+ end = offset + size;
+
+ if (pmap != PMAP_NULL) {
+ for(target_off = offset;
+ target_off < end;
+ target_off += PAGE_SIZE) {
+ p = vm_page_lookup(object, target_off);
+ if (p != VM_PAGE_NULL) {
+ vm_offset_t start;
+ start = pmap_start +
+ (vm_offset_t)(p->offset - offset);
+ pmap_protect(pmap, start,
+ start + PAGE_SIZE, prot);
+ }
+ }
+ } else {
+ for(target_off = offset;
+ target_off < end; target_off += PAGE_SIZE) {
+ p = vm_page_lookup(object, target_off);
+ if (p != VM_PAGE_NULL) {
+ pmap_page_protect(p->phys_page,
+ prot & ~p->page_lock);
+ }
+ }
+ }
+ }
if (prot == VM_PROT_NONE) {
/*
* an error, this parameter will contain the value
* VM_OBJECT_NULL.
*/
-kern_return_t
+__private_extern__ kern_return_t
vm_object_copy_slowly(
register vm_object_t src_object,
vm_object_offset_t src_offset,
new_object = vm_object_allocate(size);
new_offset = 0;
+ vm_object_lock(new_object);
assert(size == trunc_page_64(size)); /* Will the loop terminate? */
while ((new_page = vm_page_alloc(new_object, new_offset))
== VM_PAGE_NULL) {
if (!vm_page_wait(interruptible)) {
+ vm_object_unlock(new_object);
vm_object_deallocate(new_object);
+ vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
return(MACH_SEND_INTERRUPTED);
}
VM_BEHAVIOR_SEQUENTIAL,
&prot, &_result_page, &top_page,
(int *)0,
- &error_code, FALSE, FALSE);
+ &error_code, FALSE, FALSE, NULL, 0);
switch(result) {
case VM_FAULT_SUCCESS:
case VM_FAULT_INTERRUPTED:
vm_page_free(new_page);
+ vm_object_unlock(new_object);
vm_object_deallocate(new_object);
vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
vm_page_lock_queues();
vm_page_free(new_page);
vm_page_unlock_queues();
+ vm_object_unlock(new_object);
vm_object_deallocate(new_object);
vm_object_deallocate(src_object);
*_result_object = VM_OBJECT_NULL;
* Lose the extra reference, and return our object.
*/
+ vm_object_unlock(new_object);
vm_object_deallocate(src_object);
*_result_object = new_object;
return(KERN_SUCCESS);
*/
/*ARGSUSED*/
-boolean_t
+__private_extern__ boolean_t
vm_object_copy_quickly(
vm_object_t *_object, /* INOUT */
- vm_object_offset_t offset, /* IN */
- vm_object_size_t size, /* IN */
+ __unused vm_object_offset_t offset, /* IN */
+ __unused vm_object_size_t size, /* IN */
boolean_t *_src_needs_copy, /* OUT */
boolean_t *_dst_needs_copy) /* OUT */
{
return(TRUE);
}
-int copy_call_count = 0;
-int copy_call_sleep_count = 0;
-int copy_call_restart_count = 0;
+static int copy_call_count = 0;
+static int copy_call_sleep_count = 0;
+static int copy_call_restart_count = 0;
/*
* Routine: vm_object_copy_call [internal]
* If the return value indicates an error, this parameter
* is not valid.
*/
-kern_return_t
+static kern_return_t
vm_object_copy_call(
vm_object_t src_object,
vm_object_offset_t src_offset,
*/
copy_call_count++;
while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
- vm_object_wait(src_object, VM_OBJECT_EVENT_COPY_CALL,
+ vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
THREAD_UNINT);
- vm_object_lock(src_object);
copy_call_restart_count++;
}
*/
vm_object_lock(src_object);
while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
- vm_object_wait(src_object, VM_OBJECT_EVENT_COPY_CALL,
+ vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
THREAD_UNINT);
- vm_object_lock(src_object);
copy_call_sleep_count++;
}
Retry:
if (check_ready == TRUE) {
vm_object_lock(copy);
while (!copy->pager_ready) {
- vm_object_wait(copy, VM_OBJECT_EVENT_PAGER_READY,
- FALSE);
- vm_object_lock(copy);
+ vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
}
vm_object_unlock(copy);
}
return KERN_SUCCESS;
}
-int copy_delayed_lock_collisions = 0;
-int copy_delayed_max_collisions = 0;
-int copy_delayed_lock_contention = 0;
-int copy_delayed_protect_iterate = 0;
-int copy_delayed_protect_lookup = 0;
-int copy_delayed_protect_lookup_wait = 0;
+static int copy_delayed_lock_collisions = 0;
+static int copy_delayed_max_collisions = 0;
+static int copy_delayed_lock_contention = 0;
+static int copy_delayed_protect_iterate = 0;
/*
* Routine: vm_object_copy_delayed [internal]
* the asymmetric copy-on-write algorithm.
*
* In/out conditions:
- * The object must be unlocked on entry.
+ * The src_object must be locked on entry. It will be unlocked
+ * on exit - so the caller must also hold a reference to it.
*
* This routine will not block waiting for user-generated
* events. It is not interruptible.
*/
-vm_object_t
+__private_extern__ vm_object_t
vm_object_copy_delayed(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_t new_copy = VM_OBJECT_NULL;
vm_object_t old_copy;
vm_page_t p;
- vm_object_size_t copy_size;
+ vm_object_size_t copy_size = src_offset + size;
int collisions = 0;
/*
*/
Retry:
- vm_object_lock(src_object);
+ /*
+ * Wait for paging in progress.
+ */
+ if (!src_object->true_share)
+ vm_object_paging_wait(src_object, THREAD_UNINT);
+
/*
* See whether we can reuse the result of a previous
* copy operation.
if (collisions > copy_delayed_max_collisions)
copy_delayed_max_collisions = collisions;
+ vm_object_lock(src_object);
goto Retry;
}
* It has not been modified.
*
* Return another reference to
- * the existing copy-object.
+ * the existing copy-object if
+ * we can safely grow it (if
+ * needed).
*/
- assert(old_copy->ref_count > 0);
- old_copy->ref_count++;
-
- if (old_copy->size < src_offset+size)
- old_copy->size = src_offset+size;
-#if TASK_SWAPPER
- /*
- * We have to reproduce some of the code from
- * vm_object_res_reference because we've taken
- * the locks out of order here, and deadlock
- * would result if we simply called that function.
- */
- if (++old_copy->res_count == 1) {
- assert(old_copy->shadow == src_object);
- vm_object_res_reference(src_object);
+ if (old_copy->size < copy_size) {
+ /*
+ * We can't perform a delayed copy if any of the
+ * pages in the extended range are wired (because
+ * we can't safely take write permission away from
+ * wired pages). If the pages aren't wired, then
+ * go ahead and protect them.
+ */
+ copy_delayed_protect_iterate++;
+ queue_iterate(&src_object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious &&
+ p->offset >= old_copy->size &&
+ p->offset < copy_size) {
+ if (p->wire_count > 0) {
+ vm_object_unlock(old_copy);
+ vm_object_unlock(src_object);
+
+ if (new_copy != VM_OBJECT_NULL) {
+ vm_object_unlock(new_copy);
+ vm_object_deallocate(new_copy);
+ }
+
+ return VM_OBJECT_NULL;
+ } else {
+ pmap_page_protect(p->phys_page,
+ (VM_PROT_ALL & ~VM_PROT_WRITE &
+ ~p->page_lock));
+ }
+ }
+ }
+ old_copy->size = copy_size;
}
-#endif /* TASK_SWAPPER */
-
+
+ vm_object_reference_locked(old_copy);
vm_object_unlock(old_copy);
vm_object_unlock(src_object);
return(old_copy);
}
+
+ /*
+ * Adjust the size argument so that the newly-created
+ * copy object will be large enough to back either the
+ * old copy object or the new mapping.
+ */
+ if (old_copy->size > copy_size)
+ copy_size = old_copy->size;
+
if (new_copy == VM_OBJECT_NULL) {
vm_object_unlock(old_copy);
vm_object_unlock(src_object);
- new_copy = vm_object_allocate(src_offset + size);
+ new_copy = vm_object_allocate(copy_size);
+ vm_object_lock(src_object);
vm_object_lock(new_copy);
goto Retry;
}
-
- /*
- * Adjust the size argument so that the newly-created
- * copy object will be large enough to back either the
- * new old copy object or the new mapping.
- */
- if (old_copy->size > src_offset+size)
- size = old_copy->size - src_offset;
+ new_copy->size = copy_size;
/*
* The copy-object is always made large enough to
assert((old_copy->shadow == src_object) &&
(old_copy->shadow_offset == (vm_object_offset_t) 0));
+ } else if (new_copy == VM_OBJECT_NULL) {
+ vm_object_unlock(src_object);
+ new_copy = vm_object_allocate(copy_size);
+ vm_object_lock(src_object);
+ vm_object_lock(new_copy);
+ goto Retry;
+ }
+
+ /*
+ * We now have the src object locked, and the new copy object
+ * allocated and locked (and potentially the old copy locked).
+ * Before we go any further, make sure we can still perform
+ * a delayed copy, as the situation may have changed.
+ *
+ * Specifically, we can't perform a delayed copy if any of the
+ * pages in the range are wired (because we can't safely take
+ * write permission away from wired pages). If the pages aren't
+ * wired, then go ahead and protect them.
+ */
+ copy_delayed_protect_iterate++;
+ queue_iterate(&src_object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious && p->offset < copy_size) {
+ if (p->wire_count > 0) {
+ if (old_copy)
+ vm_object_unlock(old_copy);
+ vm_object_unlock(src_object);
+ vm_object_unlock(new_copy);
+ vm_object_deallocate(new_copy);
+ return VM_OBJECT_NULL;
+ } else {
+ pmap_page_protect(p->phys_page,
+ (VM_PROT_ALL & ~VM_PROT_WRITE &
+ ~p->page_lock));
+ }
+ }
+ }
+
+ if (old_copy != VM_OBJECT_NULL) {
/*
* Make the old copy-object shadow the new one.
* It will receive no more pages from the original
#endif
vm_object_unlock(old_copy); /* done with old_copy */
- } else if (new_copy == VM_OBJECT_NULL) {
- vm_object_unlock(src_object);
- new_copy = vm_object_allocate(src_offset + size);
- vm_object_lock(new_copy);
- goto Retry;
- }
-
- /*
- * Readjust the copy-object size if necessary.
- */
- copy_size = new_copy->size;
- if (copy_size < src_offset+size) {
- copy_size = src_offset+size;
- new_copy->size = copy_size;
}
/*
* Point the new copy at the existing object.
*/
-
new_copy->shadow = src_object;
new_copy->shadow_offset = 0;
new_copy->shadowed = TRUE; /* caller must set needs_copy */
src_object->ref_count++;
VM_OBJ_RES_INCR(src_object);
src_object->copy = new_copy;
+ vm_object_unlock(src_object);
vm_object_unlock(new_copy);
- /*
- * Mark all (current) pages of the existing object copy-on-write.
- * This object may have a shadow chain below it, but
- * those pages will already be marked copy-on-write.
- */
-
- vm_object_paging_wait(src_object, THREAD_UNINT);
- copy_delayed_protect_iterate++;
- queue_iterate(&src_object->memq, p, vm_page_t, listq) {
- if (!p->fictitious)
- pmap_page_protect(p->phys_addr,
- (VM_PROT_ALL & ~VM_PROT_WRITE &
- ~p->page_lock));
- }
- vm_object_unlock(src_object);
XPR(XPR_VM_OBJECT,
"vm_object_copy_delayed: used copy object %X for source %X\n",
(integer_t)new_copy, (integer_t)src_object, 0, 0, 0);
* declared strategy. This operation may block,
* and may be interrupted.
*/
-kern_return_t
+__private_extern__ kern_return_t
vm_object_copy_strategically(
register vm_object_t src_object,
vm_object_offset_t src_offset,
*/
while (!src_object->internal && !src_object->pager_ready) {
+ wait_result_t wait_result;
- vm_object_wait( src_object,
- VM_OBJECT_EVENT_PAGER_READY,
- interruptible);
- if (interruptible &&
- (current_thread()->wait_result != THREAD_AWAKENED)) {
+ wait_result = vm_object_sleep( src_object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ interruptible);
+ if (wait_result != THREAD_AWAKENED) {
+ vm_object_unlock(src_object);
*dst_object = VM_OBJECT_NULL;
*dst_offset = 0;
*dst_needs_copy = FALSE;
return(MACH_SEND_INTERRUPTED);
}
- vm_object_lock(src_object);
}
copy_strategy = src_object->copy_strategy;
*/
switch (copy_strategy) {
+ case MEMORY_OBJECT_COPY_DELAY:
+ *dst_object = vm_object_copy_delayed(src_object,
+ src_offset, size);
+ if (*dst_object != VM_OBJECT_NULL) {
+ *dst_offset = src_offset;
+ *dst_needs_copy = TRUE;
+ result = KERN_SUCCESS;
+ break;
+ }
+ vm_object_lock(src_object);
+ /* fall thru when delayed copy not allowed */
+
case MEMORY_OBJECT_COPY_NONE:
result = vm_object_copy_slowly(src_object, src_offset, size,
interruptible, dst_object);
}
break;
- case MEMORY_OBJECT_COPY_DELAY:
- vm_object_unlock(src_object);
- *dst_object = vm_object_copy_delayed(src_object,
- src_offset, size);
- *dst_offset = src_offset;
- *dst_needs_copy = TRUE;
- result = KERN_SUCCESS;
- break;
-
case MEMORY_OBJECT_COPY_SYMMETRIC:
XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n",(natural_t)src_object, src_offset, size, 0, 0);
vm_object_unlock(src_object);
*/
boolean_t vm_object_shadow_check = FALSE;
-boolean_t
+__private_extern__ boolean_t
vm_object_shadow(
vm_object_t *object, /* IN/OUT */
vm_object_offset_t *offset, /* IN/OUT */
/*
* The relationship between vm_object structures and
- * the memory_object ports requires careful synchronization.
+ * the memory_object requires careful synchronization.
*
- * All associations are created by vm_object_enter. All three
- * port fields are filled in, as follows:
- * pager: the memory_object port itself, supplied by
+ * All associations are created by memory_object_create_named
+ * for external pagers and vm_object_pager_create for internal
+ * objects as follows:
+ *
+ * pager: the memory_object itself, supplied by
* the user requesting a mapping (or the kernel,
* when initializing internal objects); the
* kernel simulates holding send rights by keeping
* a port reference;
+ *
* pager_request:
* the memory object control port,
* created by the kernel; the kernel holds
* receive (and ownership) rights to this
* port, but no other references.
- * All of the ports are referenced by their global names.
*
* When initialization is complete, the "initialized" field
* is asserted. Other mappings using a particular memory object,
* This is the normal case, and is done even
* though one of the other cases has already been
* done.
- * vm_object_destroy:
- * The memory_object port has been destroyed,
- * meaning that the kernel cannot flush dirty
- * pages or request new data or unlock existing
- * data.
* memory_object_destroy:
* The memory manager has requested that the
- * kernel relinquish rights to the memory object
- * port. [The memory manager may not want to
- * destroy the port, but may wish to refuse or
- * tear down existing memory mappings.]
+ * kernel relinquish references to the memory
+ * object. [The memory manager may not want to
+ * destroy the memory object, but may wish to
+ * refuse or tear down existing memory mappings.]
+ *
* Each routine that breaks an association must break all of
* them at once. At some later time, that routine must clear
- * the vm_object port fields and release the port rights.
+ * the pager field and release the memory object references.
* [Furthermore, each routine must cope with the simultaneous
* or previous operations of the others.]
*
* In addition to the lock on the object, the vm_object_cache_lock
- * governs the port associations. References gained through the
- * port association require use of the cache lock.
+ * governs the associations. References gained through the
+ * association require use of the cache lock.
*
- * Because the port fields may be cleared spontaneously, they
+ * Because the pager field may be cleared spontaneously, it
* cannot be used to determine whether a memory object has
* ever been associated with a particular vm_object. [This
* knowledge is important to the shadow object mechanism.]
* For this reason, an additional "created" attribute is
* provided.
*
- * During various paging operations, the port values found in the
- * vm_object must be valid. To prevent these port rights from being
- * released, and to prevent the port associations from changing
+ * During various paging operations, the pager reference found in the
+ * vm_object must be valid. To prevent this from being released,
* (other than being removed, i.e., made null), routines may use
* the vm_object_paging_begin/end routines [actually, macros].
* The implementation uses the "paging_in_progress" and "wanted" fields.
- * [Operations that alter the validity of the port values include the
+ * [Operations that alter the validity of the pager values include the
* termination routines and vm_object_collapse.]
*/
-#define IKOT_PAGER_LOOKUP_TYPE IKOT_PAGING_REQUEST
+#if 0
+static void vm_object_abort_activity(
+ vm_object_t object);
-vm_object_t
-vm_object_lookup(
- ipc_port_t port)
+/*
+ * Routine: vm_object_abort_activity [internal use only]
+ * Purpose:
+ * Abort paging requests pending on this object.
+ * In/out conditions:
+ * The object is locked on entry and exit.
+ */
+static void
+vm_object_abort_activity(
+ vm_object_t object)
{
- vm_object_t object;
+ register
+ vm_page_t p;
+ vm_page_t next;
-start_over:
- object = VM_OBJECT_NULL;
+ XPR(XPR_VM_OBJECT, "vm_object_abort_activity, object 0x%X\n",
+ (integer_t)object, 0, 0, 0, 0);
- if (IP_VALID(port)) {
- vm_object_cache_lock();
- ip_lock(port);
- if (ip_active(port) &&
- (ip_kotype(port) == IKOT_PAGER_LOOKUP_TYPE)) {
- object = (vm_object_t) port->ip_kobject;
- if (!vm_object_lock_try(object)) {
- /*
- * failed to acquire object lock. Drop the
- * other two locks and wait for it, then go
- * back and start over in case the port
- * associations changed in the interim.
- */
- ip_unlock(port);
- vm_object_cache_unlock();
- vm_object_lock(object);
- vm_object_unlock(object);
- goto start_over;
- }
+ /*
+ * Abort all activity that would be waiting
+ * for a result on this memory object.
+ *
+ * We could also choose to destroy all pages
+ * that we have in memory for this object, but
+ * we don't.
+ */
- assert(object->alive);
-
- if((object->ref_count == 0) && (!object->terminating)){
- queue_remove(&vm_object_cached_list, object,
- vm_object_t, cached_list);
- vm_object_cached_count--;
- XPR(XPR_VM_OBJECT_CACHE,
- "vm_object_lookup: removing %X, head (%X, %X)\n",
- (integer_t)object,
- (integer_t)vm_object_cached_list.next,
- (integer_t)vm_object_cached_list.prev, 0,0);
- }
+ p = (vm_page_t) queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t) p)) {
+ next = (vm_page_t) queue_next(&p->listq);
- object->ref_count++;
- vm_object_res_reference(object);
- vm_object_unlock(object);
+ /*
+ * If it's being paged in, destroy it.
+ * If an unlock has been requested, start it again.
+ */
+
+ if (p->busy && p->absent) {
+ VM_PAGE_FREE(p);
}
- ip_unlock(port);
- vm_object_cache_unlock();
+ else {
+ if (p->unlock_request != VM_PROT_NONE)
+ p->unlock_request = VM_PROT_NONE;
+ PAGE_WAKEUP(p);
+ }
+
+ p = next;
}
- return object;
-}
+ /*
+ * Wake up threads waiting for the memory object to
+ * become ready.
+ */
+ object->pager_ready = TRUE;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+}
+/*
+ * Routine: vm_object_pager_dead
+ *
+ * Purpose:
+ * A port is being destroy, and the IPC kobject code
+ * can't tell if it represents a pager port or not.
+ * So this function is called each time it sees a port
+ * die.
+ * THIS IS HORRIBLY INEFFICIENT. We should only call
+ * this routine if we had requested a notification on
+ * the port.
+ */
-void
-vm_object_destroy(
+__private_extern__ void
+vm_object_pager_dead(
ipc_port_t pager)
{
vm_object_t object;
vm_object_hash_entry_t entry;
- pager_request_t old_pager_request;
/*
* Perform essentially the same operations as in vm_object_lookup,
assert(object->pager == pager);
/*
- * Remove the port associations.
+ * Remove the pager association.
*
* Note that the memory_object itself is dead, so
* we don't bother with it.
*/
- object->pager = IP_NULL;
- vm_object_remove(object);
-
- old_pager_request = object->pager_request;
-
- object->pager_request = PAGER_REQUEST_NULL;
+ object->pager = MEMORY_OBJECT_NULL;
vm_object_unlock(object);
vm_object_cache_unlock();
vm_object_pager_wakeup(pager);
/*
- * Clean up the port references. Note that there's no
+ * Release the pager reference. Note that there's no
* point in trying the memory_object_terminate call
- * because the memory_object itself is dead.
+ * because the memory_object itself is dead. Also
+ * release the memory_object_control reference, since
+ * the pager didn't do that either.
*/
- ipc_port_release_send(pager);
-
- if ((ipc_port_t)old_pager_request != IP_NULL)
- ipc_port_dealloc_kernel((ipc_port_t)old_pager_request);
+ memory_object_deallocate(pager);
+ memory_object_control_deallocate(object->pager_request);
+
/*
* Restart pending page requests
*/
vm_object_lock(object);
-
vm_object_abort_activity(object);
-
vm_object_unlock(object);
/*
vm_object_deallocate(object);
}
+#endif
/*
* Routine: vm_object_enter
*/
vm_object_t
vm_object_enter(
- ipc_port_t pager,
+ memory_object_t pager,
vm_object_size_t size,
boolean_t internal,
boolean_t init,
- boolean_t check_named)
+ boolean_t named)
{
register vm_object_t object;
vm_object_t new_object;
boolean_t must_init;
- ipc_port_t pager_request;
vm_object_hash_entry_t entry, new_entry;
-#ifdef MACH_BSD
-kern_return_t vnode_pager_init( ipc_port_t, ipc_port_t, vm_object_size_t);
-#endif
- if (!IP_VALID(pager))
+ if (pager == MEMORY_OBJECT_NULL)
return(vm_object_allocate(size));
new_object = VM_OBJECT_NULL;
* Look for an object associated with this port.
*/
-restart:
vm_object_cache_lock();
- for (;;) {
+ do {
entry = vm_object_hash_lookup(pager, FALSE);
- /*
- * If a previous object is being terminated,
- * we must wait for the termination message
- * to be queued.
- *
- * We set kobject to a non-null value to let the
- * terminator know that someone is waiting.
- * Among the possibilities is that the port
- * could die while we're waiting. Must restart
- * instead of continuing the loop.
- */
-
- if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
- if (entry->object != VM_OBJECT_NULL) {
- if(check_named) {
- if(entry->object->named) {
- vm_object_cache_unlock();
- return(entry->object);
- }
- }
- break;
+ if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
+ if (new_object == VM_OBJECT_NULL) {
+ /*
+ * We must unlock to create a new object;
+ * if we do so, we must try the lookup again.
+ */
+ vm_object_cache_unlock();
+ assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
+ new_entry = vm_object_hash_entry_alloc(pager);
+ new_object = vm_object_allocate(size);
+ vm_object_cache_lock();
+ } else {
+ /*
+ * Lookup failed twice, and we have something
+ * to insert; set the object.
+ */
+ vm_object_hash_insert(new_entry);
+ entry = new_entry;
+ entry->object = new_object;
+ new_entry = VM_OBJECT_HASH_ENTRY_NULL;
+ new_object = VM_OBJECT_NULL;
+ must_init = TRUE;
}
-
+ } else if (entry->object == VM_OBJECT_NULL) {
+ /*
+ * If a previous object is being terminated,
+ * we must wait for the termination message
+ * to be queued (and lookup the entry again).
+ */
entry->waiting = TRUE;
+ entry = VM_OBJECT_HASH_ENTRY_NULL;
assert_wait((event_t) pager, THREAD_UNINT);
vm_object_cache_unlock();
- thread_block((void (*)(void))0);
- goto restart;
+ thread_block(THREAD_CONTINUE_NULL);
+ vm_object_cache_lock();
}
-
- /*
- * We must unlock to create a new object;
- * if we do so, we must try the lookup again.
- */
-
- if (new_object == VM_OBJECT_NULL) {
- vm_object_cache_unlock();
- assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
- new_entry = vm_object_hash_entry_alloc(pager);
- new_object = vm_object_allocate(size);
- vm_object_cache_lock();
- } else {
- /*
- * Lookup failed twice, and we have something
- * to insert; set the object.
- */
-
- if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
- vm_object_hash_insert(new_entry);
- entry = new_entry;
- new_entry = VM_OBJECT_HASH_ENTRY_NULL;
- }
-
- entry->object = new_object;
- new_object = VM_OBJECT_NULL;
- must_init = TRUE;
- }
- }
+ } while (entry == VM_OBJECT_HASH_ENTRY_NULL);
object = entry->object;
assert(object != VM_OBJECT_NULL);
if (!must_init) {
vm_object_lock(object);
- assert(object->pager_created);
assert(!internal || object->internal);
- if (check_named)
+ if (named) {
+ assert(!object->named);
object->named = TRUE;
+ }
if (object->ref_count == 0) {
XPR(XPR_VM_OBJECT_CACHE,
"vm_object_enter: removing %x from cache, head (%x, %x)\n",
vm_object_hash_entry_free(new_entry);
if (must_init) {
+ memory_object_control_t control;
/*
* Allocate request port.
*/
- pager_request = ipc_port_alloc_kernel();
- assert (pager_request != IP_NULL);
- ipc_kobject_set(pager_request, (ipc_kobject_t) object,
- IKOT_PAGING_REQUEST);
+ control = memory_object_control_allocate(object);
+ assert (control != MEMORY_OBJECT_CONTROL_NULL);
vm_object_lock(object);
+ assert(object != kernel_object);
/*
- * Copy the naked send right we were given.
+ * Copy the reference we were given.
*/
- pager = ipc_port_copy_send(pager);
- if (!IP_VALID(pager))
- panic("vm_object_enter: port died"); /* XXX */
-
+ memory_object_reference(pager);
object->pager_created = TRUE;
object->pager = pager;
object->internal = internal;
/* copy strategy invalid until set by memory manager */
object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
}
- object->pager_request = pager_request;
+ object->pager_control = control;
object->pager_ready = FALSE;
- if (check_named)
- object->named = TRUE;
vm_object_unlock(object);
/*
* Let the pager know we're using it.
*/
-#ifdef MACH_BSD
- if(((rpc_subsystem_t)pager_mux_hash_lookup(pager)) ==
- ((rpc_subsystem_t) &vnode_pager_workaround)) {
- (void) vnode_pager_init(pager,
- object->pager_request,
- PAGE_SIZE);
- } else {
- (void) memory_object_init(pager,
- object->pager_request,
- PAGE_SIZE);
- }
-#else
- (void) memory_object_init(pager,
- object->pager_request,
- PAGE_SIZE);
-#endif
+ (void) memory_object_init(pager,
+ object->pager_control,
+ PAGE_SIZE);
vm_object_lock(object);
+ if (named)
+ object->named = TRUE;
if (internal) {
object->pager_ready = TRUE;
vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
*/
while (!object->pager_initialized) {
- vm_object_wait( object,
+ vm_object_sleep(object,
VM_OBJECT_EVENT_INITIALIZED,
THREAD_UNINT);
- vm_object_lock(object);
}
vm_object_unlock(object);
vm_object_pager_create(
register vm_object_t object)
{
- ipc_port_t pager;
+ memory_object_t pager;
vm_object_hash_entry_t entry;
#if MACH_PAGEMAP
vm_object_size_t size;
XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n",
(integer_t)object, 0,0,0,0);
+ assert(object != kernel_object);
+
if (memory_manager_default_check() != KERN_SUCCESS)
return;
* wait for them to finish initializing the ports
*/
while (!object->pager_initialized) {
- vm_object_wait( object,
- VM_OBJECT_EVENT_INITIALIZED,
- THREAD_UNINT);
- vm_object_lock(object);
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ THREAD_UNINT);
}
vm_object_paging_end(object);
return;
#endif /* MACH_PAGEMAP */
/*
- * Create the pager ports, and associate them with this object.
+ * Create the [internal] pager, and associate it with this object.
*
- * We make the port association here so that vm_object_enter()
+ * We make the association here so that vm_object_enter()
* can look up the object to complete initializing it. No
* user will ever map this object.
*/
{
- ipc_port_t DMM;
+ memory_object_default_t dmm;
vm_size_t cluster_size;
- /* acquire a naked send right for the DMM */
- DMM = memory_manager_default_reference(&cluster_size);
+ /* acquire a reference for the default memory manager */
+ dmm = memory_manager_default_reference(&cluster_size);
assert(cluster_size >= PAGE_SIZE);
object->cluster_size = cluster_size; /* XXX ??? */
assert(object->temporary);
- /* consumes the naked send right for DMM */
- (void) memory_object_create(DMM, &pager, object->size);
- assert(IP_VALID(pager));
+ /* create our new memory object */
+ (void) memory_object_create(dmm, object->size, &pager);
+
+ memory_object_default_deallocate(dmm);
}
entry = vm_object_hash_entry_alloc(pager);
vm_object_cache_unlock();
/*
- * A naked send right was returned by
+ * A reference was returned by
* memory_object_create(), and it is
* copied by vm_object_enter().
*/
panic("vm_object_pager_create: mismatch");
/*
- * Drop the naked send right.
+ * Drop the reference we were passed.
*/
- ipc_port_release_send(pager);
+ memory_object_deallocate(pager);
vm_object_lock(object);
* Conditions:
* The object cache must be locked.
*/
-void
+__private_extern__ void
vm_object_remove(
vm_object_t object)
{
- ipc_port_t port;
+ memory_object_t pager;
- if ((port = object->pager) != IP_NULL) {
+ if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
vm_object_hash_entry_t entry;
- entry = vm_object_hash_lookup(port, FALSE);
+ entry = vm_object_hash_lookup(pager, FALSE);
if (entry != VM_OBJECT_HASH_ENTRY_NULL)
entry->object = VM_OBJECT_NULL;
}
- if ((port = object->pager_request) != IP_NULL) {
- if (ip_kotype(port) == IKOT_PAGING_REQUEST)
- ipc_kobject_set(port, IKO_NULL, IKOT_NONE);
- else if (ip_kotype(port) != IKOT_NONE)
- panic("vm_object_remove: bad request port");
- }
}
/*
* Counts for normal collapses and bypasses.
* Debugging variables, to watch or disable collapse.
*/
-long object_collapses = 0;
-long object_bypasses = 0;
+static long object_collapses = 0;
+static long object_bypasses = 0;
+
+static boolean_t vm_object_collapse_allowed = TRUE;
+static boolean_t vm_object_bypass_allowed = TRUE;
-boolean_t vm_object_collapse_allowed = TRUE;
-boolean_t vm_object_bypass_allowed = TRUE;
+static int vm_external_discarded;
+static int vm_external_collapsed;
+
+unsigned long vm_object_collapse_encrypted = 0;
-int vm_external_discarded;
-int vm_external_collapsed;
/*
- * vm_object_do_collapse:
- *
- * Collapse an object with the object backing it.
- * Pages in the backing object are moved into the
- * parent, and the backing object is deallocated.
- *
- * Both objects and the cache are locked; the page
- * queues are unlocked.
+ * Routine: vm_object_do_collapse
+ * Purpose:
+ * Collapse an object with the object backing it.
+ * Pages in the backing object are moved into the
+ * parent, and the backing object is deallocated.
+ * Conditions:
+ * Both objects and the cache are locked; the page
+ * queues are unlocked.
*
*/
-void
+static void
vm_object_do_collapse(
vm_object_t object,
vm_object_t backing_object)
backing_offset = object->shadow_offset;
size = object->size;
-
/*
* Move all in-memory pages from backing_object
* to the parent. Pages that have been paged out
new_offset = (p->offset - backing_offset);
assert(!p->busy || p->absent);
-
+
/*
* If the parent has a page here, or if
* this page falls outside the parent,
if (p->offset < backing_offset || new_offset >= size) {
VM_PAGE_FREE(p);
} else {
+ /*
+ * ENCRYPTED SWAP:
+ * The encryption key includes the "pager" and the
+ * "paging_offset". These might not be the same in
+ * the new object, so we can't just move an encrypted
+ * page from one object to the other. We can't just
+ * decrypt the page here either, because that would drop
+ * the object lock.
+ * The caller should check for encrypted pages before
+ * attempting to collapse.
+ */
+ ASSERT_PAGE_DECRYPTED(p);
+
pp = vm_page_lookup(object, new_offset);
if (pp == VM_PAGE_NULL) {
}
}
- assert(object->pager == IP_NULL || backing_object->pager == IP_NULL);
-
- if (backing_object->pager != IP_NULL) {
+#if !MACH_PAGEMAP
+ assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL
+ || (!backing_object->pager_created
+ && backing_object->pager == MEMORY_OBJECT_NULL));
+#else
+ assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL);
+#endif /* !MACH_PAGEMAP */
+
+ if (backing_object->pager != MEMORY_OBJECT_NULL) {
vm_object_hash_entry_t entry;
/*
* unused portion.
*/
+ assert(!object->paging_in_progress);
object->pager = backing_object->pager;
entry = vm_object_hash_lookup(object->pager, FALSE);
assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
entry->object = object;
object->pager_created = backing_object->pager_created;
- object->pager_request = backing_object->pager_request;
+ object->pager_control = backing_object->pager_control;
object->pager_ready = backing_object->pager_ready;
object->pager_initialized = backing_object->pager_initialized;
object->cluster_size = backing_object->cluster_size;
object->paging_offset =
backing_object->paging_offset + backing_offset;
- if (object->pager_request != IP_NULL) {
- ipc_kobject_set(object->pager_request,
- (ipc_kobject_t) object,
- IKOT_PAGING_REQUEST);
+ if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object->pager_control,
+ object);
}
}
vm_object_cache_unlock();
- object->paging_offset = backing_object->paging_offset + backing_offset;
-
#if MACH_PAGEMAP
/*
* If the shadow offset is 0, the use the existence map from
* moves from within backing_object to within object.
*/
+ assert(!object->phys_contiguous);
+ assert(!backing_object->phys_contiguous);
object->shadow = backing_object->shadow;
- object->shadow_offset += backing_object->shadow_offset;
+ if (object->shadow) {
+ object->shadow_offset += backing_object->shadow_offset;
+ } else {
+ /* no shadow, therefore no shadow offset... */
+ object->shadow_offset = 0;
+ }
assert((object->shadow == VM_OBJECT_NULL) ||
- (object->shadow->copy == VM_OBJECT_NULL));
+ (object->shadow->copy != backing_object));
/*
* Discard backing_object.
(backing_object->resident_page_count == 0) &&
(backing_object->paging_in_progress == 0));
- assert(backing_object->alive);
backing_object->alive = FALSE;
vm_object_unlock(backing_object);
XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
(integer_t)backing_object, 0,0,0,0);
- zfree(vm_object_zone, (vm_offset_t) backing_object);
+ zfree(vm_object_zone, backing_object);
object_collapses++;
}
-void
+static void
vm_object_do_bypass(
vm_object_t object,
vm_object_t backing_object)
vm_object_reference(backing_object->shadow);
#endif /* TASK_SWAPPER */
+ assert(!object->phys_contiguous);
+ assert(!backing_object->phys_contiguous);
object->shadow = backing_object->shadow;
- object->shadow_offset += backing_object->shadow_offset;
+ if (object->shadow) {
+ object->shadow_offset += backing_object->shadow_offset;
+ } else {
+ /* no shadow, therefore no shadow offset... */
+ object->shadow_offset = 0;
+ }
/*
* Backing object might have had a copy pointer
object_bypasses++;
}
+
/*
* vm_object_collapse:
* Requires that the object be locked and the page queues be unlocked.
*
*/
-void
+static unsigned long vm_object_collapse_calls = 0;
+static unsigned long vm_object_collapse_objects = 0;
+static unsigned long vm_object_collapse_do_collapse = 0;
+static unsigned long vm_object_collapse_do_bypass = 0;
+__private_extern__ void
vm_object_collapse(
- register vm_object_t object)
+ register vm_object_t object,
+ register vm_object_offset_t hint_offset,
+ boolean_t can_bypass)
{
register vm_object_t backing_object;
- register vm_object_offset_t backing_offset;
- register vm_object_size_t size;
- register vm_object_offset_t new_offset;
- register vm_page_t p;
+ register unsigned int rcount;
+ register unsigned int size;
+ vm_object_offset_t collapse_min_offset;
+ vm_object_offset_t collapse_max_offset;
+ vm_page_t page;
+ vm_object_t original_object;
- if (! vm_object_collapse_allowed && ! vm_object_bypass_allowed) {
+ vm_object_collapse_calls++;
+
+ if (! vm_object_collapse_allowed &&
+ ! (can_bypass && vm_object_bypass_allowed)) {
return;
}
XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
(integer_t)object, 0,0,0,0);
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ original_object = object;
+
while (TRUE) {
+ vm_object_collapse_objects++;
/*
* Verify that the conditions are right for either
* collapse or bypass:
- *
- * The object exists and no pages in it are currently
- * being paged out, and
*/
- if (object == VM_OBJECT_NULL ||
- object->paging_in_progress != 0 ||
- object->absent_count != 0)
- return;
/*
* There is a backing object, and
*/
- if ((backing_object = object->shadow) == VM_OBJECT_NULL)
+ backing_object = object->shadow;
+ if (backing_object == VM_OBJECT_NULL) {
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
return;
+ }
+ /*
+ * No pages in the object are currently
+ * being paged out, and
+ */
+ if (object->paging_in_progress != 0 ||
+ object->absent_count != 0) {
+ /* try and collapse the rest of the shadow chain */
+ vm_object_lock(backing_object);
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ continue;
+ }
+
vm_object_lock(backing_object);
/*
if (!backing_object->internal ||
backing_object->paging_in_progress != 0) {
- vm_object_unlock(backing_object);
- return;
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ continue;
}
/*
* parent object.
*/
if (backing_object->shadow != VM_OBJECT_NULL &&
- backing_object->shadow->copy != VM_OBJECT_NULL) {
- vm_object_unlock(backing_object);
- return;
+ backing_object->shadow->copy == backing_object) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ continue;
}
/*
* object (if the parent is the only reference to
* it) or (perhaps) remove the parent's reference
* to it.
- */
-
- /*
- * If there is exactly one reference to the backing
- * object, we may be able to collapse it into the parent.
- *
- * XXXO (norma vm):
*
- * The backing object must not have a pager
- * created for it, since collapsing an object
- * into a backing_object dumps new pages into
- * the backing_object that its pager doesn't
- * know about, and we've already declared pages.
- * This page dumping is deadly if other kernels
- * are shadowing this object; this is the
- * distributed equivalent of the ref_count == 1
- * condition.
+ * If there is exactly one reference to the backing
+ * object, we may be able to collapse it into the
+ * parent.
*
- * With some work, we could downgrade this
- * restriction to the backing object must not
- * be cachable, since when a temporary object
- * is uncachable we are allowed to do anything
- * to it. We would have to do something like
- * call declare_pages again, and we would have
- * to be prepared for the memory manager
- * disabling temporary termination, which right
- * now is a difficult race to deal with, since
- * the memory manager currently assumes that
- * termination is the only possible failure
- * for disabling temporary termination.
+ * If MACH_PAGEMAP is defined:
+ * The parent must not have a pager created for it,
+ * since collapsing a backing_object dumps new pages
+ * into the parent that its pager doesn't know about
+ * (and the collapse code can't merge the existence
+ * maps).
+ * Otherwise:
+ * As long as one of the objects is still not known
+ * to the pager, we can collapse them.
*/
-
if (backing_object->ref_count == 1 &&
- ! object->pager_created &&
- vm_object_collapse_allowed) {
+ (!object->pager_created
+#if !MACH_PAGEMAP
+ || !backing_object->pager_created
+#endif /*!MACH_PAGEMAP */
+ ) && vm_object_collapse_allowed) {
XPR(XPR_VM_OBJECT,
- "vm_object_collapse: %x to %x, pager %x, pager_request %x\n",
+ "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
(integer_t)backing_object, (integer_t)object,
(integer_t)backing_object->pager,
- (integer_t)backing_object->pager_request, 0);
+ (integer_t)backing_object->pager_control, 0);
/*
* We need the cache lock for collapsing,
*/
if (! vm_object_cache_lock_try()) {
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
vm_object_unlock(backing_object);
return;
}
+ /*
+ * ENCRYPTED SWAP
+ * We can't collapse the object if it contains
+ * any encypted page, because the encryption key
+ * includes the <object,offset> info. We can't
+ * drop the object lock in vm_object_do_collapse()
+ * so we can't decrypt the page there either.
+ */
+ if (vm_pages_encrypted) {
+ collapse_min_offset = object->shadow_offset;
+ collapse_max_offset =
+ object->shadow_offset + object->size;
+ queue_iterate(&backing_object->memq,
+ page, vm_page_t, listq) {
+ if (page->encrypted &&
+ (page->offset >=
+ collapse_min_offset) &&
+ (page->offset <
+ collapse_max_offset)) {
+ /*
+ * We found an encrypted page
+ * in the backing object,
+ * within the range covered
+ * by the parent object: we can
+ * not collapse them.
+ */
+ vm_object_collapse_encrypted++;
+ vm_object_cache_unlock();
+ goto try_bypass;
+ }
+ }
+ }
+
/*
* Collapse the object with its backing
* object, and try again with the object's
*/
vm_object_do_collapse(object, backing_object);
+ vm_object_collapse_do_collapse++;
continue;
}
-
+ try_bypass:
/*
* Collapsing the backing object was not possible
* or permitted, so let's try bypassing it.
*/
- if (! vm_object_bypass_allowed) {
- vm_object_unlock(backing_object);
- return;
- }
-
- /*
- * If the backing object has a pager but no pagemap,
- * then we cannot bypass it, because we don't know
- * what pages it has.
- */
- if (backing_object->pager_created
-#if MACH_PAGEMAP
- && (backing_object->existence_map == VM_EXTERNAL_NULL)
-#endif /* MACH_PAGEMAP */
- ) {
- vm_object_unlock(backing_object);
- return;
+ if (! (can_bypass && vm_object_bypass_allowed)) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ continue;
}
- backing_offset = object->shadow_offset;
- size = object->size;
-
- /*
- * If all of the pages in the backing object are
- * shadowed by the parent object, the parent
- * object no longer has to shadow the backing
- * object; it can shadow the next one in the
- * chain.
- *
- * If the backing object has existence info,
- * we must check examine its existence info
- * as well.
- *
- * XXX
- * Should have a check for a 'small' number
- * of pages here.
- */
/*
- * First, check pages resident in the backing object.
+ * If the object doesn't have all its pages present,
+ * we have to make sure no pages in the backing object
+ * "show through" before bypassing it.
*/
+ size = atop(object->size);
+ rcount = object->resident_page_count;
+ if (rcount != size) {
+ vm_object_offset_t offset;
+ vm_object_offset_t backing_offset;
+ unsigned int backing_rcount;
+ unsigned int lookups = 0;
- queue_iterate(&backing_object->memq, p, vm_page_t, listq) {
-
/*
- * If the parent has a page here, or if
- * this page falls outside the parent,
- * keep going.
- *
- * Otherwise, the backing_object must be
- * left in the chain.
+ * If the backing object has a pager but no pagemap,
+ * then we cannot bypass it, because we don't know
+ * what pages it has.
*/
-
- new_offset = (p->offset - backing_offset);
- if (p->offset < backing_offset || new_offset >= size) {
-
- /*
- * Page falls outside of parent.
- * Keep going.
- */
-
+ if (backing_object->pager_created
+#if MACH_PAGEMAP
+ && (backing_object->existence_map == VM_EXTERNAL_NULL)
+#endif /* MACH_PAGEMAP */
+ ) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
continue;
}
- if ((vm_page_lookup(object, new_offset) == VM_PAGE_NULL)
+ /*
+ * If the object has a pager but no pagemap,
+ * then we cannot bypass it, because we don't know
+ * what pages it has.
+ */
+ if (object->pager_created
#if MACH_PAGEMAP
- &&
- (vm_external_state_get(object->existence_map,
- new_offset)
- != VM_EXTERNAL_STATE_EXISTS)
+ && (object->existence_map == VM_EXTERNAL_NULL)
#endif /* MACH_PAGEMAP */
- ) {
+ ) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ continue;
+ }
- /*
- * Page still needed.
- * Can't go any further.
- */
+ /*
+ * If all of the pages in the backing object are
+ * shadowed by the parent object, the parent
+ * object no longer has to shadow the backing
+ * object; it can shadow the next one in the
+ * chain.
+ *
+ * If the backing object has existence info,
+ * we must check examine its existence info
+ * as well.
+ *
+ */
- vm_object_unlock(backing_object);
- return;
- }
- }
+ backing_offset = object->shadow_offset;
+ backing_rcount = backing_object->resident_page_count;
-#if MACH_PAGEMAP
- /*
- * Next, if backing object has been paged out,
- * we must check its existence info for pages
- * that the parent doesn't have.
- */
+#define EXISTS_IN_OBJECT(obj, off, rc) \
+ (vm_external_state_get((obj)->existence_map, \
+ (vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \
+ ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+
+ /*
+ * Check the hint location first
+ * (since it is often the quickest way out of here).
+ */
+ if (object->cow_hint != ~(vm_offset_t)0)
+ hint_offset = (vm_object_offset_t)object->cow_hint;
+ else
+ hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
+ (hint_offset - 8 * PAGE_SIZE_64) : 0;
+
+ if (EXISTS_IN_OBJECT(backing_object, hint_offset +
+ backing_offset, backing_rcount) &&
+ !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
+ /* dependency right at the hint */
+ object->cow_hint = (vm_offset_t)hint_offset;
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ continue;
+ }
- if (backing_object->pager_created) {
- assert(backing_object->existence_map
- != VM_EXTERNAL_NULL);
- for (new_offset = 0; new_offset < object->size;
- new_offset += PAGE_SIZE_64) {
- vm_object_offset_t
- offset = new_offset + backing_offset;
+ /*
+ * If the object's window onto the backing_object
+ * is large compared to the number of resident
+ * pages in the backing object, it makes sense to
+ * walk the backing_object's resident pages first.
+ *
+ * NOTE: Pages may be in both the existence map and
+ * resident. So, we can't permanently decrement
+ * the rcount here because the second loop may
+ * find the same pages in the backing object'
+ * existence map that we found here and we would
+ * double-decrement the rcount. We also may or
+ * may not have found the
+ */
+ if (backing_rcount && size >
+ ((backing_object->existence_map) ?
+ backing_rcount : (backing_rcount >> 1))) {
+ unsigned int rc = rcount;
+ vm_page_t p;
+
+ backing_rcount = backing_object->resident_page_count;
+ p = (vm_page_t)queue_first(&backing_object->memq);
+ do {
+ /* Until we get more than one lookup lock */
+ if (lookups > 256) {
+ lookups = 0;
+ delay(1);
+ }
- /*
- * If this page doesn't exist in
- * the backing object's existence
- * info, then continue.
- */
+ offset = (p->offset - backing_offset);
+ if (offset < object->size &&
+ offset != hint_offset &&
+ !EXISTS_IN_OBJECT(object, offset, rc)) {
+ /* found a dependency */
+ object->cow_hint = (vm_offset_t)offset;
+ break;
+ }
+ p = (vm_page_t) queue_next(&p->listq);
- if (vm_external_state_get(
- backing_object->existence_map,
- offset) == VM_EXTERNAL_STATE_ABSENT) {
+ } while (--backing_rcount);
+ if (backing_rcount != 0 ) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
continue;
}
+ }
- /*
- * If this page is neither resident
- * in the parent nor paged out to
- * the parent's pager, then we cannot
- * bypass the backing object.
- */
+ /*
+ * Walk through the offsets looking for pages in the
+ * backing object that show through to the object.
+ */
+ if (backing_rcount || backing_object->existence_map) {
+ offset = hint_offset;
+
+ while((offset =
+ (offset + PAGE_SIZE_64 < object->size) ?
+ (offset + PAGE_SIZE_64) : 0) != hint_offset) {
+
+ /* Until we get more than one lookup lock */
+ if (lookups > 256) {
+ lookups = 0;
+ delay(1);
+ }
- if ((vm_page_lookup(object, new_offset) ==
- VM_PAGE_NULL) &&
- ((object->existence_map == VM_EXTERNAL_NULL)
- || (vm_external_state_get(
- object->existence_map, new_offset)
- == VM_EXTERNAL_STATE_ABSENT))) {
- vm_object_unlock(backing_object);
- return;
+ if (EXISTS_IN_OBJECT(backing_object, offset +
+ backing_offset, backing_rcount) &&
+ !EXISTS_IN_OBJECT(object, offset, rcount)) {
+ /* found a dependency */
+ object->cow_hint = (vm_offset_t)offset;
+ break;
+ }
+ }
+ if (offset != hint_offset) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ continue;
}
}
}
-#else /* MACH_PAGEMAP */
- assert(! backing_object->pager_created);
-#endif /* MACH_PAGEMAP */
+
+ /* reset the offset hint for any objects deeper in the chain */
+ object->cow_hint = (vm_offset_t)0;
/*
* All interesting pages in the backing object
*/
vm_object_do_bypass(object, backing_object);
+ vm_object_collapse_do_bypass++;
/*
* Try again with this object's new backing object.
continue;
}
+
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
}
/*
unsigned int vm_object_page_remove_lookup = 0;
unsigned int vm_object_page_remove_iterate = 0;
-void
+__private_extern__ void
vm_object_page_remove(
register vm_object_t object,
register vm_object_offset_t start,
* It balances vm_object_lookup vs iteration.
*/
- if (atop(end - start) < (unsigned)object->resident_page_count/16) {
+ if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
vm_object_page_remove_lookup++;
for (; start < end; start += PAGE_SIZE_64) {
if (p != VM_PAGE_NULL) {
assert(!p->cleaning && !p->pageout);
if (!p->fictitious)
- pmap_page_protect(p->phys_addr,
- VM_PROT_NONE);
+ pmap_disconnect(p->phys_page);
VM_PAGE_FREE(p);
}
}
if ((start <= p->offset) && (p->offset < end)) {
assert(!p->cleaning && !p->pageout);
if (!p->fictitious)
- pmap_page_protect(p->phys_addr,
- VM_PROT_NONE);
+ pmap_disconnect(p->phys_page);
VM_PAGE_FREE(p);
}
p = next;
}
}
+
/*
* Routine: vm_object_coalesce
* Function: Coalesces two objects backing up adjoining
* The object(s) must *not* be locked. The map must be locked
* to preserve the reference to the object(s).
*/
-int vm_object_coalesce_count = 0;
+static int vm_object_coalesce_count = 0;
-boolean_t
+__private_extern__ boolean_t
vm_object_coalesce(
register vm_object_t prev_object,
vm_object_t next_object,
vm_object_offset_t prev_offset,
- vm_object_offset_t next_offset,
+ __unused vm_object_offset_t next_offset,
vm_object_size_t prev_size,
vm_object_size_t next_size)
{
/*
* Try to collapse the object first
*/
- vm_object_collapse(prev_object);
+ vm_object_collapse(prev_object, prev_offset, TRUE);
/*
* Can't coalesce if pages not mapped to
* . paged out
* . shadows another object
* . has a copy elsewhere
+ * . is purgable
* . paging references (pages might be in page-list)
*/
(prev_object->shadow != VM_OBJECT_NULL) ||
(prev_object->copy != VM_OBJECT_NULL) ||
(prev_object->true_share != FALSE) ||
+ (prev_object->purgable != VM_OBJECT_NONPURGABLE) ||
(prev_object->paging_in_progress != 0)) {
vm_object_unlock(prev_object);
return(FALSE);
vm_page_t old_page;
vm_object_offset_t addr;
- num_pages = atop(size);
+ num_pages = atop_64(size);
for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) {
}
vm_page_init(m, addr);
+ /* private normally requires lock_queues but since we */
+ /* are initializing the page, its not necessary here */
m->private = TRUE; /* don`t free page */
m->wire_count = 1;
vm_page_insert(m, object, offset);
*/
void
vm_external_print(
- vm_external_map_t map,
- vm_size_t size)
+ vm_external_map_t emap,
+ vm_size_t size)
{
- if (map == VM_EXTERNAL_NULL) {
+ if (emap == VM_EXTERNAL_NULL) {
printf("0 ");
} else {
vm_size_t existence_size = stob(size);
printf("{ size=%d, map=[", existence_size);
if (existence_size > 0) {
- print_bitstring(map[0]);
+ print_bitstring(emap[0]);
}
if (existence_size > 1) {
- print_bitstring(map[1]);
+ print_bitstring(emap[1]);
}
if (existence_size > 2) {
printf("...");
- print_bitstring(map[existence_size-1]);
+ print_bitstring(emap[existence_size-1]);
}
printf("] }\n");
}
vm_follow_object(
vm_object_t object)
{
- extern db_indent;
+ int count = 0;
+ int orig_db_indent = db_indent;
- int count = 1;
+ while (TRUE) {
+ if (object == VM_OBJECT_NULL) {
+ db_indent = orig_db_indent;
+ return count;
+ }
- if (object == VM_OBJECT_NULL)
- return 0;
+ count += 1;
- iprintf("object 0x%x", object);
- printf(", shadow=0x%x", object->shadow);
- printf(", copy=0x%x", object->copy);
- printf(", pager=0x%x", object->pager);
- printf(", ref=%d\n", object->ref_count);
+ iprintf("object 0x%x", object);
+ printf(", shadow=0x%x", object->shadow);
+ printf(", copy=0x%x", object->copy);
+ printf(", pager=0x%x", object->pager);
+ printf(", ref=%d\n", object->ref_count);
- db_indent += 2;
- if (object->shadow)
- count += vm_follow_object(object->shadow);
+ db_indent += 2;
+ object = object->shadow;
+ }
- db_indent -= 2;
- return count;
}
/*
*/
void
vm_object_print(
- vm_object_t object,
- boolean_t have_addr,
- int arg_count,
- char *modif)
+ db_addr_t db_addr,
+ __unused boolean_t have_addr,
+ __unused int arg_count,
+ __unused char *modif)
{
+ vm_object_t object;
register vm_page_t p;
- extern db_indent;
- char *s;
+ const char *s;
register int count;
+ object = (vm_object_t) (long) db_addr;
if (object == VM_OBJECT_NULL)
return;
iprintf("size=0x%x", object->size);
printf(", cluster=0x%x", object->cluster_size);
- printf(", frozen=0x%x", object->frozen_size);
+ printf(", memq_hint=%p", object->memq_hint);
printf(", ref_count=%d\n", object->ref_count);
iprintf("");
#if TASK_SWAPPER
if (object->shadow) {
register int i = 0;
vm_object_t shadow = object;
- while(shadow = shadow->shadow)
+ while((shadow = shadow->shadow))
i++;
printf(" (depth %d)", i);
}
iprintf("pager=0x%x", object->pager);
printf(", paging_offset=0x%x", object->paging_offset);
- printf(", pager_request=0x%x\n", object->pager_request);
+ printf(", pager_control=0x%x\n", object->pager_control);
iprintf("copy_strategy=%d[", object->copy_strategy);
switch (object->copy_strategy) {
(object->pageout ? "" : "!"),
(object->internal ? "internal" : "external"),
(object->temporary ? "temporary" : "permanent"));
- iprintf("%salive, %slock_in_progress, %slock_restart, %sshadowed, %scached, %sprivate\n",
+ iprintf("%salive, %spurgable, %spurgable_volatile, %spurgable_empty, %sshadowed, %scached, %sprivate\n",
(object->alive ? "" : "!"),
- (object->lock_in_progress ? "" : "!"),
- (object->lock_restart ? "" : "!"),
+ ((object->purgable != VM_OBJECT_NONPURGABLE) ? "" : "!"),
+ ((object->purgable == VM_OBJECT_PURGABLE_VOLATILE) ? "" : "!"),
+ ((object->purgable == VM_OBJECT_PURGABLE_EMPTY) ? "" : "!"),
(object->shadowed ? "" : "!"),
(vm_object_cached(object) ? "" : "!"),
(object->private ? "" : "!"));
}
count++;
- printf("(off=0x%X,page=0x%X)", p->offset, (integer_t) p);
+ printf("(off=0x%llX,page=%p)", p->offset, p);
p = (vm_page_t) queue_next(&p->listq);
}
if (count != 0) {
#endif /* MACH_KDB */
+kern_return_t
+vm_object_populate_with_private(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ ppnum_t phys_page,
+ vm_size_t size)
+{
+ ppnum_t base_page;
+ vm_object_offset_t base_offset;
+
+
+ if(!object->private)
+ return KERN_FAILURE;
+
+ base_page = phys_page;
+
+ vm_object_lock(object);
+ if(!object->phys_contiguous) {
+ vm_page_t m;
+ if((base_offset = trunc_page_64(offset)) != offset) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+ base_offset += object->paging_offset;
+ while(size) {
+ m = vm_page_lookup(object, base_offset);
+ if(m != VM_PAGE_NULL) {
+ if(m->fictitious) {
+ vm_page_lock_queues();
+ m->fictitious = FALSE;
+ m->private = TRUE;
+ m->phys_page = base_page;
+ if(!m->busy) {
+ m->busy = TRUE;
+ }
+ if(!m->absent) {
+ m->absent = TRUE;
+ object->absent_count++;
+ }
+ m->list_req_pending = TRUE;
+ vm_page_unlock_queues();
+ } else if (m->phys_page != base_page) {
+ /* pmap call to clear old mapping */
+ pmap_disconnect(m->phys_page);
+ m->phys_page = base_page;
+ }
+
+ /*
+ * ENCRYPTED SWAP:
+ * We're not pointing to the same
+ * physical page any longer and the
+ * contents of the new one are not
+ * supposed to be encrypted.
+ * XXX What happens to the original
+ * physical page. Is it lost ?
+ */
+ m->encrypted = FALSE;
+
+ } else {
+ while ((m = vm_page_grab_fictitious())
+ == VM_PAGE_NULL)
+ vm_page_more_fictitious();
+ vm_page_lock_queues();
+ m->fictitious = FALSE;
+ m->private = TRUE;
+ m->phys_page = base_page;
+ m->list_req_pending = TRUE;
+ m->absent = TRUE;
+ m->unusual = TRUE;
+ object->absent_count++;
+ vm_page_unlock_queues();
+ vm_page_insert(m, object, base_offset);
+ }
+ base_page++; /* Go to the next physical page */
+ base_offset += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ } else {
+ /* NOTE: we should check the original settings here */
+ /* if we have a size > zero a pmap call should be made */
+ /* to disable the range */
+
+ /* pmap_? */
+
+ /* shadows on contiguous memory are not allowed */
+ /* we therefore can use the offset field */
+ object->shadow_offset = (vm_object_offset_t)(phys_page << 12);
+ object->size = size;
+ }
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+}
+
/*
* memory_object_free_from_cache:
*
* Walk the vm_object cache list, removing and freeing vm_objects
- * which are backed by the pager identified by the caller, (pager_id).
+ * which are backed by the pager identified by the caller, (pager_ops).
* Remove up to "count" objects, if there are that may available
* in the cache.
+ *
* Walk the list at most once, return the number of vm_objects
* actually freed.
- *
*/
-kern_return_t
+__private_extern__ kern_return_t
memory_object_free_from_cache(
- host_t host,
- int pager_id,
+ __unused host_t host,
+ memory_object_pager_ops_t pager_ops,
int *count)
{
int object_released = 0;
- int i;
register vm_object_t object = VM_OBJECT_NULL;
vm_object_t shadow;
queue_iterate(&vm_object_cached_list, object,
vm_object_t, cached_list) {
- if (pager_id == (int) pager_mux_hash_lookup(
- (ipc_port_t)object->pager)) {
+ if (object->pager &&
+ (pager_ops == object->pager->mo_pager_ops)) {
vm_object_lock(object);
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
/*
* Since this object is in the cache, we know
- * that it is initialized and has no references.
- * Take a reference to avoid recursive
- * deallocations.
+ * that it is initialized and has only a pager's
+ * (implicit) reference. Take a reference to avoid
+ * recursive deallocations.
*/
assert(object->pager_initialized);
return KERN_SUCCESS;
}
-/*
- * memory_object_remove_cached_object:
- *
- * Check for the existance of a memory object represented by the
- * supplied port. If one exists and it is not in use, remove the
- * memory object from the vm_object cache.
- * If the memory object is in use, turn off the the "can_persist"
- * property so that it will not go in the cache when the last user
- * gives it up.
- *
- */
+
kern_return_t
-memory_object_remove_cached_object(
- ipc_port_t port)
+memory_object_create_named(
+ memory_object_t pager,
+ memory_object_offset_t size,
+ memory_object_control_t *control)
{
- vm_object_t object;
- vm_object_t shadow;
+ vm_object_t object;
+ vm_object_hash_entry_t entry;
-repeat_lock_acquire:
- object = VM_OBJECT_NULL;
+ *control = MEMORY_OBJECT_CONTROL_NULL;
+ if (pager == MEMORY_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
- if (IP_VALID(port)) {
- vm_object_cache_lock();
- ip_lock(port);
- if (ip_active(port) &&
- (ip_kotype(port) == IKOT_PAGER_LOOKUP_TYPE)) {
- object = (vm_object_t) port->ip_kobject;
- if (!vm_object_lock_try(object)) {
- /*
- * failed to acquire object lock. Drop the
- * other two locks and wait for it, then go
- * back and start over in case the port
- * associations changed in the interim.
- */
- ip_unlock(port);
- vm_object_cache_unlock();
- vm_object_lock(object);
- vm_object_unlock(object);
- goto repeat_lock_acquire;
- }
+ vm_object_cache_lock();
+ entry = vm_object_hash_lookup(pager, FALSE);
+ if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
+ (entry->object != VM_OBJECT_NULL)) {
+ if (entry->object->named == TRUE)
+ panic("memory_object_create_named: caller already holds the right"); }
- if(object->terminating) {
- ip_unlock(port);
- vm_object_unlock(object);
- vm_object_cache_unlock();
- return KERN_RIGHT_EXISTS;
- }
+ vm_object_cache_unlock();
+ if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE))
+ == VM_OBJECT_NULL) {
+ return(KERN_INVALID_OBJECT);
+ }
+
+ /* wait for object (if any) to be ready */
+ if (object != VM_OBJECT_NULL) {
+ vm_object_lock(object);
+ object->named = TRUE;
+ while (!object->pager_ready) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ }
+ *control = object->pager_control;
+ vm_object_unlock(object);
+ }
+ return (KERN_SUCCESS);
+}
- assert(object->alive);
- ip_unlock(port);
- if (object->ref_count == 0) {
- queue_remove(&vm_object_cached_list, object,
- vm_object_t, cached_list);
- vm_object_cached_count--;
- object->ref_count++;
- /*
- * Terminate the object.
- * If the object had a shadow, we let
- * vm_object_deallocate deallocate it.
- * "pageout" objects have a shadow, but
- * maintain a "paging reference" rather
- * than a normal reference.
- * (We are careful here to limit
- * recursion.)
- */
- shadow = object->pageout?
- VM_OBJECT_NULL:object->shadow;
- /* will do the vm_object_cache_unlock */
- if((vm_object_terminate(object)
- == KERN_SUCCESS)
- && (shadow != VM_OBJECT_NULL)) {
- /* will lock and unlock cache_lock */
- vm_object_deallocate(shadow);
- }
- }
- else {
- /*
- * We cannot free object but we can
- * make sure it doesn't go into the
- * cache when it is no longer in
- * use.
- */
- object->can_persist = FALSE;
+/*
+ * Routine: memory_object_recover_named [user interface]
+ * Purpose:
+ * Attempt to recover a named reference for a VM object.
+ * VM will verify that the object has not already started
+ * down the termination path, and if it has, will optionally
+ * wait for that to finish.
+ * Returns:
+ * KERN_SUCCESS - we recovered a named reference on the object
+ * KERN_FAILURE - we could not recover a reference (object dead)
+ * KERN_INVALID_ARGUMENT - bad memory object control
+ */
+kern_return_t
+memory_object_recover_named(
+ memory_object_control_t control,
+ boolean_t wait_on_terminating)
+{
+ vm_object_t object;
+
+ vm_object_cache_lock();
+ object = memory_object_control_to_vm_object(control);
+ if (object == VM_OBJECT_NULL) {
+ vm_object_cache_unlock();
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+restart:
+ vm_object_lock(object);
+
+ if (object->terminating && wait_on_terminating) {
+ vm_object_cache_unlock();
+ vm_object_wait(object,
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
+ THREAD_UNINT);
+ vm_object_cache_lock();
+ goto restart;
+ }
+
+ if (!object->alive) {
+ vm_object_cache_unlock();
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ if (object->named == TRUE) {
+ vm_object_cache_unlock();
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+
+ if((object->ref_count == 0) && (!object->terminating)){
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+ XPR(XPR_VM_OBJECT_CACHE,
+ "memory_object_recover_named: removing %X, head (%X, %X)\n",
+ (integer_t)object,
+ (integer_t)vm_object_cached_list.next,
+ (integer_t)vm_object_cached_list.prev, 0,0);
+ }
+
+ vm_object_cache_unlock();
+
+ object->named = TRUE;
+ object->ref_count++;
+ vm_object_res_reference(object);
+ while (!object->pager_ready) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ }
+ vm_object_unlock(object);
+ return (KERN_SUCCESS);
+}
+
+
+/*
+ * vm_object_release_name:
+ *
+ * Enforces name semantic on memory_object reference count decrement
+ * This routine should not be called unless the caller holds a name
+ * reference gained through the memory_object_create_named.
+ *
+ * If the TERMINATE_IDLE flag is set, the call will return if the
+ * reference count is not 1. i.e. idle with the only remaining reference
+ * being the name.
+ * If the decision is made to proceed the name field flag is set to
+ * false and the reference count is decremented. If the RESPECT_CACHE
+ * flag is set and the reference count has gone to zero, the
+ * memory_object is checked to see if it is cacheable otherwise when
+ * the reference count is zero, it is simply terminated.
+ */
+
+__private_extern__ kern_return_t
+vm_object_release_name(
+ vm_object_t object,
+ int flags)
+{
+ vm_object_t shadow;
+ boolean_t original_object = TRUE;
+
+ while (object != VM_OBJECT_NULL) {
+
+ /*
+ * The cache holds a reference (uncounted) to
+ * the object. We must locke it before removing
+ * the object.
+ *
+ */
+
+ vm_object_cache_lock();
+ vm_object_lock(object);
+ assert(object->alive);
+ if(original_object)
+ assert(object->named);
+ assert(object->ref_count > 0);
+
+ /*
+ * We have to wait for initialization before
+ * destroying or caching the object.
+ */
+
+ if (object->pager_created && !object->pager_initialized) {
+ assert(!object->can_persist);
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ THREAD_UNINT);
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ thread_block(THREAD_CONTINUE_NULL);
+ continue;
+ }
+ if (((object->ref_count > 1)
+ && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
+ || (object->terminating)) {
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ return KERN_FAILURE;
+ } else {
+ if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
vm_object_unlock(object);
vm_object_cache_unlock();
- return KERN_RIGHT_EXISTS;
+ return KERN_SUCCESS;
+ }
+ }
+
+ if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
+ (object->ref_count == 1)) {
+ if(original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ /* let vm_object_deallocate push this thing into */
+ /* the cache, if that it is where it is bound */
+ vm_object_deallocate(object);
+ return KERN_SUCCESS;
+ }
+ VM_OBJ_RES_DECR(object);
+ shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
+ if(object->ref_count == 1) {
+ if(vm_object_terminate(object) != KERN_SUCCESS) {
+ if(original_object) {
+ return KERN_FAILURE;
+ } else {
+ return KERN_SUCCESS;
+ }
+ }
+ if (shadow != VM_OBJECT_NULL) {
+ original_object = FALSE;
+ object = shadow;
+ continue;
+ }
+ return KERN_SUCCESS;
+ } else {
+ object->ref_count--;
+ assert(object->ref_count > 0);
+ if(original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ return KERN_SUCCESS;
+ }
+ }
+ /*NOTREACHED*/
+ assert(0);
+ return KERN_FAILURE;
+}
+
+
+__private_extern__ kern_return_t
+vm_object_lock_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ memory_object_return_t should_return,
+ int flags,
+ vm_prot_t prot)
+{
+ __unused boolean_t should_flush;
+
+ should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
+
+ XPR(XPR_MEMORY_OBJECT,
+ "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
+ (integer_t)object, offset, size,
+ (((should_return&1)<<1)|should_flush), prot);
+
+ /*
+ * Check for bogus arguments.
+ */
+ if (object == VM_OBJECT_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
+ return (KERN_INVALID_ARGUMENT);
+
+ size = round_page_64(size);
+
+ /*
+ * Lock the object, and acquire a paging reference to
+ * prevent the memory_object reference from being released.
+ */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ (void)vm_object_update(object,
+ offset, size, NULL, NULL, should_return, flags, prot);
+
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Empty a purgable object by grabbing the physical pages assigned to it and
+ * putting them on the free queue without writing them to backing store, etc.
+ * When the pages are next touched they will be demand zero-fill pages. We
+ * skip pages which are busy, being paged in/out, wired, etc. We do _not_
+ * skip referenced/dirty pages, pages on the active queue, etc. We're more
+ * than happy to grab these since this is a purgable object. We mark the
+ * object as "empty" after reaping its pages.
+ *
+ * On entry the object and page queues are locked, the object must be a
+ * purgable object with no delayed copies pending.
+ */
+unsigned int
+vm_object_purge(vm_object_t object)
+{
+ vm_page_t p, next;
+ unsigned int num_purged_pages;
+ vm_page_t local_freeq;
+ unsigned long local_freed;
+ int purge_loop_quota;
+/* free pages as soon as we gather PURGE_BATCH_FREE_LIMIT pages to free */
+#define PURGE_BATCH_FREE_LIMIT 50
+/* release page queues lock every PURGE_LOOP_QUOTA iterations */
+#define PURGE_LOOP_QUOTA 100
+
+ num_purged_pages = 0;
+ if (object->purgable == VM_OBJECT_NONPURGABLE)
+ return num_purged_pages;
+
+ object->purgable = VM_OBJECT_PURGABLE_EMPTY;
+
+ assert(object->copy == VM_OBJECT_NULL);
+ assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
+ purge_loop_quota = PURGE_LOOP_QUOTA;
+
+ local_freeq = VM_PAGE_NULL;
+ local_freed = 0;
+
+ /*
+ * Go through the object's resident pages and try and discard them.
+ */
+ next = (vm_page_t)queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t)next)) {
+ p = next;
+ next = (vm_page_t)queue_next(&next->listq);
+
+ if (purge_loop_quota-- == 0) {
+ /*
+ * Avoid holding the page queues lock for too long.
+ * Let someone else take it for a while if needed.
+ * Keep holding the object's lock to guarantee that
+ * the object's page list doesn't change under us
+ * while we yield.
+ */
+ if (local_freeq != VM_PAGE_NULL) {
+ /*
+ * Flush our queue of pages to free.
+ */
+ vm_page_free_list(local_freeq);
+ local_freeq = VM_PAGE_NULL;
+ local_freed = 0;
+ }
+ vm_page_unlock_queues();
+ mutex_pause();
+ vm_page_lock_queues();
+
+ /* resume with the current page and a new quota */
+ purge_loop_quota = PURGE_LOOP_QUOTA;
+ }
+
+
+ if (p->busy || p->cleaning || p->laundry ||
+ p->list_req_pending) {
+ /* page is being acted upon, so don't mess with it */
+ continue;
+ }
+ if (p->wire_count) {
+ /* don't discard a wired page */
+ continue;
+ }
+
+ if (p->tabled) {
+ /* clean up the object/offset table */
+ vm_page_remove(p);
+ }
+ if (p->absent) {
+ /* update the object's count of absent pages */
+ vm_object_absent_release(object);
+ }
+
+ /* we can discard this page */
+
+ /* advertize that this page is in a transition state */
+ p->busy = TRUE;
+
+ if (p->no_isync == TRUE) {
+ /* the page hasn't been mapped yet */
+ /* (optimization to delay the i-cache sync) */
+ } else {
+ /* unmap the page */
+ int refmod_state;
+
+ refmod_state = pmap_disconnect(p->phys_page);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ p->dirty = TRUE;
+ }
+ }
+
+ if (p->dirty || p->precious) {
+ /* we saved the cost of cleaning this page ! */
+ num_purged_pages++;
+ vm_page_purged_count++;
+ }
+
+ /* remove page from active or inactive queue... */
+ VM_PAGE_QUEUES_REMOVE(p);
+
+ /* ... and put it on our queue of pages to free */
+ assert(!p->laundry);
+ assert(p->object != kernel_object);
+ assert(p->pageq.next == NULL &&
+ p->pageq.prev == NULL);
+ p->pageq.next = (queue_entry_t) local_freeq;
+ local_freeq = p;
+ if (++local_freed >= PURGE_BATCH_FREE_LIMIT) {
+ /* flush our queue of pages to free */
+ vm_page_free_list(local_freeq);
+ local_freeq = VM_PAGE_NULL;
+ local_freed = 0;
+ }
+ }
+
+ /* flush our local queue of pages to free one last time */
+ if (local_freeq != VM_PAGE_NULL) {
+ vm_page_free_list(local_freeq);
+ local_freeq = VM_PAGE_NULL;
+ local_freed = 0;
+ }
+
+ return num_purged_pages;
+}
+
+/*
+ * vm_object_purgable_control() allows the caller to control and investigate the
+ * state of a purgable object. A purgable object is created via a call to
+ * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgable object will
+ * never be coalesced with any other object -- even other purgable objects --
+ * and will thus always remain a distinct object. A purgable object has
+ * special semantics when its reference count is exactly 1. If its reference
+ * count is greater than 1, then a purgable object will behave like a normal
+ * object and attempts to use this interface will result in an error return
+ * of KERN_INVALID_ARGUMENT.
+ *
+ * A purgable object may be put into a "volatile" state which will make the
+ * object's pages elligable for being reclaimed without paging to backing
+ * store if the system runs low on memory. If the pages in a volatile
+ * purgable object are reclaimed, the purgable object is said to have been
+ * "emptied." When a purgable object is emptied the system will reclaim as
+ * many pages from the object as it can in a convenient manner (pages already
+ * en route to backing store or busy for other reasons are left as is). When
+ * a purgable object is made volatile, its pages will generally be reclaimed
+ * before other pages in the application's working set. This semantic is
+ * generally used by applications which can recreate the data in the object
+ * faster than it can be paged in. One such example might be media assets
+ * which can be reread from a much faster RAID volume.
+ *
+ * A purgable object may be designated as "non-volatile" which means it will
+ * behave like all other objects in the system with pages being written to and
+ * read from backing store as needed to satisfy system memory needs. If the
+ * object was emptied before the object was made non-volatile, that fact will
+ * be returned as the old state of the purgable object (see
+ * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
+ * were reclaimed as part of emptying the object will be refaulted in as
+ * zero-fill on demand. It is up to the application to note that an object
+ * was emptied and recreate the objects contents if necessary. When a
+ * purgable object is made non-volatile, its pages will generally not be paged
+ * out to backing store in the immediate future. A purgable object may also
+ * be manually emptied.
+ *
+ * Finally, the current state (non-volatile, volatile, volatile & empty) of a
+ * volatile purgable object may be queried at any time. This information may
+ * be used as a control input to let the application know when the system is
+ * experiencing memory pressure and is reclaiming memory.
+ *
+ * The specified address may be any address within the purgable object. If
+ * the specified address does not represent any object in the target task's
+ * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
+ * object containing the specified address is not a purgable object, then
+ * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
+ * returned.
+ *
+ * The control parameter may be any one of VM_PURGABLE_SET_STATE or
+ * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
+ * state is used to set the new state of the purgable object and return its
+ * old state. For VM_PURGABLE_GET_STATE, the current state of the purgable
+ * object is returned in the parameter state.
+ *
+ * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
+ * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
+ * the non-volatile, volatile and volatile/empty states described above.
+ * Setting the state of a purgable object to VM_PURGABLE_EMPTY will
+ * immediately reclaim as many pages in the object as can be conveniently
+ * collected (some may have already been written to backing store or be
+ * otherwise busy).
+ *
+ * The process of making a purgable object non-volatile and determining its
+ * previous state is atomic. Thus, if a purgable object is made
+ * VM_PURGABLE_NONVOLATILE and the old state is returned as
+ * VM_PURGABLE_VOLATILE, then the purgable object's previous contents are
+ * completely intact and will remain so until the object is made volatile
+ * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
+ * was reclaimed while it was in a volatile state and its previous contents
+ * have been lost.
+ */
+/*
+ * The object must be locked.
+ */
+kern_return_t
+vm_object_purgable_control(
+ vm_object_t object,
+ vm_purgable_t control,
+ int *state)
+{
+ int old_state;
+ vm_page_t p;
+
+ if (object == VM_OBJECT_NULL) {
+ /*
+ * Object must already be present or it can't be purgable.
+ */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Get current state of the purgable object.
+ */
+ switch (object->purgable) {
+ case VM_OBJECT_NONPURGABLE:
+ return KERN_INVALID_ARGUMENT;
+
+ case VM_OBJECT_PURGABLE_NONVOLATILE:
+ old_state = VM_PURGABLE_NONVOLATILE;
+ break;
+
+ case VM_OBJECT_PURGABLE_VOLATILE:
+ old_state = VM_PURGABLE_VOLATILE;
+ break;
+
+ case VM_OBJECT_PURGABLE_EMPTY:
+ old_state = VM_PURGABLE_EMPTY;
+ break;
+
+ default:
+ old_state = VM_PURGABLE_NONVOLATILE;
+ panic("Bad state (%d) for purgable object!\n",
+ object->purgable);
+ /*NOTREACHED*/
+ }
+
+ /* purgable cant have delayed copies - now or in the future */
+ assert(object->copy == VM_OBJECT_NULL);
+ assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
+
+ /*
+ * Execute the desired operation.
+ */
+ if (control == VM_PURGABLE_GET_STATE) {
+ *state = old_state;
+ return KERN_SUCCESS;
+ }
+
+ switch (*state) {
+ case VM_PURGABLE_NONVOLATILE:
+ vm_page_lock_queues();
+ if (object->purgable != VM_OBJECT_PURGABLE_NONVOLATILE) {
+ assert(vm_page_purgeable_count >=
+ object->resident_page_count);
+ vm_page_purgeable_count -= object->resident_page_count;
+ }
+
+ object->purgable = VM_OBJECT_PURGABLE_NONVOLATILE;
+
+ /*
+ * If the object wasn't emptied, then mark all pages of the
+ * object as referenced in order to give them a complete turn
+ * of the virtual memory "clock" before becoming candidates
+ * for paging out (if the system is suffering from memory
+ * pressure). We don't really need to set the pmap reference
+ * bits (which would be expensive) since the software copies
+ * are believed if they're set to true ...
+ */
+ if (old_state != VM_PURGABLE_EMPTY) {
+ for (p = (vm_page_t)queue_first(&object->memq);
+ !queue_end(&object->memq, (queue_entry_t)p);
+ p = (vm_page_t)queue_next(&p->listq))
+ p->reference = TRUE;
+ }
+
+ vm_page_unlock_queues();
+
+ break;
+
+ case VM_PURGABLE_VOLATILE:
+ vm_page_lock_queues();
+
+ if (object->purgable != VM_OBJECT_PURGABLE_VOLATILE &&
+ object->purgable != VM_OBJECT_PURGABLE_EMPTY) {
+ vm_page_purgeable_count += object->resident_page_count;
+ }
+
+ object->purgable = VM_OBJECT_PURGABLE_VOLATILE;
+
+ /*
+ * We want the newly volatile purgable object to be a
+ * candidate for the pageout scan before other pages in the
+ * application if the system is suffering from memory
+ * pressure. To do this, we move a page of the object from
+ * the active queue onto the inactive queue in order to
+ * promote the object for early reclaim. We only need to move
+ * a single page since the pageout scan will reap the entire
+ * purgable object if it finds a single page in a volatile
+ * state. Obviously we don't do this if there are no pages
+ * associated with the object or we find a page of the object
+ * already on the inactive queue.
+ */
+ for (p = (vm_page_t)queue_first(&object->memq);
+ !queue_end(&object->memq, (queue_entry_t)p);
+ p = (vm_page_t)queue_next(&p->listq)) {
+ if (p->inactive) {
+ /* already a page on the inactive queue */
+ break;
+ }
+ if (p->active && !p->busy) {
+ /* found one we can move */
+ vm_page_deactivate(p);
+ break;
}
+ }
+ vm_page_unlock_queues();
+
+ break;
+
+
+ case VM_PURGABLE_EMPTY:
+ vm_page_lock_queues();
+ if (object->purgable != VM_OBJECT_PURGABLE_VOLATILE &&
+ object->purgable != VM_OBJECT_PURGABLE_EMPTY) {
+ vm_page_purgeable_count += object->resident_page_count;
+ }
+ (void) vm_object_purge(object);
+ vm_page_unlock_queues();
+ break;
+
+ }
+ *state = old_state;
+
+ return KERN_SUCCESS;
+}
+
+#if TASK_SWAPPER
+/*
+ * vm_object_res_deallocate
+ *
+ * (recursively) decrement residence counts on vm objects and their shadows.
+ * Called from vm_object_deallocate and when swapping out an object.
+ *
+ * The object is locked, and remains locked throughout the function,
+ * even as we iterate down the shadow chain. Locks on intermediate objects
+ * will be dropped, but not the original object.
+ *
+ * NOTE: this function used to use recursion, rather than iteration.
+ */
+
+__private_extern__ void
+vm_object_res_deallocate(
+ vm_object_t object)
+{
+ vm_object_t orig_object = object;
+ /*
+ * Object is locked so it can be called directly
+ * from vm_object_deallocate. Original object is never
+ * unlocked.
+ */
+ assert(object->res_count > 0);
+ while (--object->res_count == 0) {
+ assert(object->ref_count >= object->res_count);
+ vm_object_deactivate_all_pages(object);
+ /* iterate on shadow, if present */
+ if (object->shadow != VM_OBJECT_NULL) {
+ vm_object_t tmp_object = object->shadow;
+ vm_object_lock(tmp_object);
+ if (object != orig_object)
+ vm_object_unlock(object);
+ object = tmp_object;
+ assert(object->res_count > 0);
+ } else
+ break;
+ }
+ if (object != orig_object)
+ vm_object_unlock(object);
+}
+/*
+ * vm_object_res_reference
+ *
+ * Internal function to increment residence count on a vm object
+ * and its shadows. It is called only from vm_object_reference, and
+ * when swapping in a vm object, via vm_map_swap.
+ *
+ * The object is locked, and remains locked throughout the function,
+ * even as we iterate down the shadow chain. Locks on intermediate objects
+ * will be dropped, but not the original object.
+ *
+ * NOTE: this function used to use recursion, rather than iteration.
+ */
- }
- else {
- ip_unlock(port);
- vm_object_cache_unlock();
- }
- } else {
- return KERN_INVALID_ARGUMENT;
+__private_extern__ void
+vm_object_res_reference(
+ vm_object_t object)
+{
+ vm_object_t orig_object = object;
+ /*
+ * Object is locked, so this can be called directly
+ * from vm_object_reference. This lock is never released.
+ */
+ while ((++object->res_count == 1) &&
+ (object->shadow != VM_OBJECT_NULL)) {
+ vm_object_t tmp_object = object->shadow;
+
+ assert(object->ref_count >= object->res_count);
+ vm_object_lock(tmp_object);
+ if (object != orig_object)
+ vm_object_unlock(object);
+ object = tmp_object;
}
+ if (object != orig_object)
+ vm_object_unlock(object);
+ assert(orig_object->ref_count >= orig_object->res_count);
+}
+#endif /* TASK_SWAPPER */
+
+/*
+ * vm_object_reference:
+ *
+ * Gets another reference to the given object.
+ */
+#ifdef vm_object_reference
+#undef vm_object_reference
+#endif
+__private_extern__ void
+vm_object_reference(
+ register vm_object_t object)
+{
+ if (object == VM_OBJECT_NULL)
+ return;
+ vm_object_lock(object);
+ assert(object->ref_count > 0);
+ vm_object_reference_locked(object);
+ vm_object_unlock(object);
+}
- return KERN_SUCCESS;
+#ifdef MACH_BSD
+/*
+ * Scale the vm_object_cache
+ * This is required to make sure that the vm_object_cache is big
+ * enough to effectively cache the mapped file.
+ * This is really important with UBC as all the regular file vnodes
+ * have memory object associated with them. Havving this cache too
+ * small results in rapid reclaim of vnodes and hurts performance a LOT!
+ *
+ * This is also needed as number of vnodes can be dynamically scaled.
+ */
+kern_return_t
+adjust_vm_object_cache(
+ __unused vm_size_t oval,
+ vm_size_t nval)
+{
+ vm_object_cached_max = nval;
+ vm_object_cache_trim(FALSE);
+ return (KERN_SUCCESS);
}
+#endif /* MACH_BSD */
+
+/*
+ * vm_object_transpose
+ *
+ * This routine takes two VM objects of the same size and exchanges
+ * their backing store.
+ * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
+ * and UPL_BLOCK_ACCESS if they are referenced anywhere.
+ *
+ * The VM objects must not be locked by caller.
+ */
kern_return_t
-memory_object_create_named(
- ipc_port_t port,
- vm_object_size_t size,
- vm_object_t *object_ptr)
+vm_object_transpose(
+ vm_object_t object1,
+ vm_object_t object2,
+ vm_object_size_t transpose_size)
{
- vm_object_t object;
- vm_object_hash_entry_t entry;
+ vm_object_t tmp_object;
+ kern_return_t retval;
+ boolean_t object1_locked, object2_locked;
+ boolean_t object1_paging, object2_paging;
+ vm_page_t page;
+ vm_object_offset_t page_offset;
+
+ tmp_object = VM_OBJECT_NULL;
+ object1_locked = FALSE; object2_locked = FALSE;
+ object1_paging = FALSE; object2_paging = FALSE;
+
+ if (object1 == object2 ||
+ object1 == VM_OBJECT_NULL ||
+ object2 == VM_OBJECT_NULL) {
+ /*
+ * If the 2 VM objects are the same, there's
+ * no point in exchanging their backing store.
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
- *object_ptr = (vm_object_t)NULL;
- if (IP_VALID(port)) {
+ vm_object_lock(object1);
+ object1_locked = TRUE;
+ if (object1->copy || object1->shadow || object1->shadowed ||
+ object1->purgable != VM_OBJECT_NONPURGABLE) {
+ /*
+ * We don't deal with copy or shadow objects (yet).
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+ /*
+ * Since we're about to mess with the object's backing store,
+ * mark it as "paging_in_progress". Note that this is not enough
+ * to prevent any paging activity on this object, so the caller should
+ * have "quiesced" the objects beforehand, via a UPL operation with
+ * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
+ * and UPL_BLOCK_ACCESS (to mark the pages "busy").
+ */
+ vm_object_paging_begin(object1);
+ object1_paging = TRUE;
+ vm_object_unlock(object1);
+ object1_locked = FALSE;
- vm_object_cache_lock();
- entry = vm_object_hash_lookup(port, FALSE);
- if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
- (entry->object != VM_OBJECT_NULL)) {
- if (entry->object->named == TRUE)
- panic("memory_object_create_named: caller already holds the right");
- }
+ /*
+ * Same as above for the 2nd object...
+ */
+ vm_object_lock(object2);
+ object2_locked = TRUE;
+ if (object2->copy || object2->shadow || object2->shadowed ||
+ object2->purgable != VM_OBJECT_NONPURGABLE) {
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+ vm_object_paging_begin(object2);
+ object2_paging = TRUE;
+ vm_object_unlock(object2);
+ object2_locked = FALSE;
- vm_object_cache_unlock();
- if ((object = vm_object_enter(port, size, FALSE, FALSE, TRUE))
- == VM_OBJECT_NULL)
- return(KERN_INVALID_OBJECT);
-
- /* wait for object (if any) to be ready */
- if (object != VM_OBJECT_NULL) {
- vm_object_lock(object);
- object->named = TRUE;
- while (!object->pager_ready) {
- vm_object_wait(object,
- VM_OBJECT_EVENT_PAGER_READY,
- FALSE);
- vm_object_lock(object);
- }
- vm_object_unlock(object);
+ /*
+ * Allocate a temporary VM object to hold object1's contents
+ * while we copy object2 to object1.
+ */
+ tmp_object = vm_object_allocate(transpose_size);
+ vm_object_lock(tmp_object);
+ vm_object_paging_begin(tmp_object);
+ tmp_object->can_persist = FALSE;
+
+ /*
+ * Since we need to lock both objects at the same time,
+ * make sure we always lock them in the same order to
+ * avoid deadlocks.
+ */
+ if (object1 < object2) {
+ vm_object_lock(object1);
+ vm_object_lock(object2);
+ } else {
+ vm_object_lock(object2);
+ vm_object_lock(object1);
+ }
+ object1_locked = TRUE;
+ object2_locked = TRUE;
+
+ if (object1->size != object2->size ||
+ object1->size != transpose_size) {
+ /*
+ * If the 2 objects don't have the same size, we can't
+ * exchange their backing stores or one would overflow.
+ * If their size doesn't match the caller's
+ * "transpose_size", we can't do it either because the
+ * transpose operation will affect the entire span of
+ * the objects.
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+
+
+ /*
+ * Transpose the lists of resident pages.
+ */
+ if (object1->phys_contiguous || queue_empty(&object1->memq)) {
+ /*
+ * No pages in object1, just transfer pages
+ * from object2 to object1. No need to go through
+ * an intermediate object.
+ */
+ while (!queue_empty(&object2->memq)) {
+ page = (vm_page_t) queue_first(&object2->memq);
+ vm_page_rename(page, object1, page->offset);
+ }
+ assert(queue_empty(&object2->memq));
+ } else if (object2->phys_contiguous || queue_empty(&object2->memq)) {
+ /*
+ * No pages in object2, just transfer pages
+ * from object1 to object2. No need to go through
+ * an intermediate object.
+ */
+ while (!queue_empty(&object1->memq)) {
+ page = (vm_page_t) queue_first(&object1->memq);
+ vm_page_rename(page, object2, page->offset);
}
- *object_ptr = object;
- return (KERN_SUCCESS);
+ assert(queue_empty(&object1->memq));
} else {
- return (KERN_INVALID_ARGUMENT);
+ /* transfer object1's pages to tmp_object */
+ vm_page_lock_queues();
+ while (!queue_empty(&object1->memq)) {
+ page = (vm_page_t) queue_first(&object1->memq);
+ page_offset = page->offset;
+ vm_page_remove(page);
+ page->offset = page_offset;
+ queue_enter(&tmp_object->memq, page, vm_page_t, listq);
+ }
+ vm_page_unlock_queues();
+ assert(queue_empty(&object1->memq));
+ /* transfer object2's pages to object1 */
+ while (!queue_empty(&object2->memq)) {
+ page = (vm_page_t) queue_first(&object2->memq);
+ vm_page_rename(page, object1, page->offset);
+ }
+ assert(queue_empty(&object2->memq));
+ /* transfer tmp_object's pages to object1 */
+ while (!queue_empty(&tmp_object->memq)) {
+ page = (vm_page_t) queue_first(&tmp_object->memq);
+ queue_remove(&tmp_object->memq, page,
+ vm_page_t, listq);
+ vm_page_insert(page, object2, page->offset);
+ }
+ assert(queue_empty(&tmp_object->memq));
+ }
+
+ /* no need to transpose the size: they should be identical */
+ assert(object1->size == object2->size);
+
+#define __TRANSPOSE_FIELD(field) \
+MACRO_BEGIN \
+ tmp_object->field = object1->field; \
+ object1->field = object2->field; \
+ object2->field = tmp_object->field; \
+MACRO_END
+
+ assert(!object1->copy);
+ assert(!object2->copy);
+
+ assert(!object1->shadow);
+ assert(!object2->shadow);
+
+ __TRANSPOSE_FIELD(shadow_offset); /* used by phys_contiguous objects */
+ __TRANSPOSE_FIELD(pager);
+ __TRANSPOSE_FIELD(paging_offset);
+
+ __TRANSPOSE_FIELD(pager_control);
+ /* update the memory_objects' pointers back to the VM objects */
+ if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object1->pager_control,
+ object1);
+ }
+ if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object2->pager_control,
+ object2);
+ }
+
+ __TRANSPOSE_FIELD(absent_count);
+
+ assert(object1->paging_in_progress);
+ assert(object2->paging_in_progress);
+
+ __TRANSPOSE_FIELD(pager_created);
+ __TRANSPOSE_FIELD(pager_initialized);
+ __TRANSPOSE_FIELD(pager_ready);
+ __TRANSPOSE_FIELD(pager_trusted);
+ __TRANSPOSE_FIELD(internal);
+ __TRANSPOSE_FIELD(temporary);
+ __TRANSPOSE_FIELD(private);
+ __TRANSPOSE_FIELD(pageout);
+ __TRANSPOSE_FIELD(true_share);
+ __TRANSPOSE_FIELD(phys_contiguous);
+ __TRANSPOSE_FIELD(nophyscache);
+ __TRANSPOSE_FIELD(last_alloc);
+ __TRANSPOSE_FIELD(sequential);
+ __TRANSPOSE_FIELD(cluster_size);
+ __TRANSPOSE_FIELD(existence_map);
+ __TRANSPOSE_FIELD(cow_hint);
+ __TRANSPOSE_FIELD(wimg_bits);
+
+#undef __TRANSPOSE_FIELD
+
+ retval = KERN_SUCCESS;
+
+done:
+ /*
+ * Cleanup.
+ */
+ if (tmp_object != VM_OBJECT_NULL) {
+ vm_object_paging_end(tmp_object);
+ vm_object_unlock(tmp_object);
+ /*
+ * Re-initialize the temporary object to avoid
+ * deallocating a real pager.
+ */
+ _vm_object_allocate(transpose_size, tmp_object);
+ vm_object_deallocate(tmp_object);
+ tmp_object = VM_OBJECT_NULL;
+ }
+
+ if (object1_locked) {
+ vm_object_unlock(object1);
+ object1_locked = FALSE;
+ }
+ if (object2_locked) {
+ vm_object_unlock(object2);
+ object2_locked = FALSE;
}
+ if (object1_paging) {
+ vm_object_lock(object1);
+ vm_object_paging_end(object1);
+ vm_object_unlock(object1);
+ object1_paging = FALSE;
+ }
+ if (object2_paging) {
+ vm_object_lock(object2);
+ vm_object_paging_end(object2);
+ vm_object_unlock(object2);
+ object2_paging = FALSE;
+ }
+
+ return retval;
}
+
+/* Allow manipulation of individual page state. This is actually part of */
+/* the UPL regimen but takes place on the VM object rather than on a UPL */
+
kern_return_t
-memory_object_recover_named(
- ipc_port_t pager,
- boolean_t wait_on_terminating,
- vm_object_t *object_ptr)
+vm_object_page_op(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ int ops,
+ ppnum_t *phys_entry,
+ int *flags)
{
- vm_object_t object;
- vm_object_hash_entry_t entry;
+ vm_page_t dst_page;
- *object_ptr = (vm_object_t)NULL;
-lookup_entry:
- if (IP_VALID(pager)) {
+ vm_object_lock(object);
- vm_object_cache_lock();
- entry = vm_object_hash_lookup(pager, FALSE);
- if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
- (entry->object != VM_OBJECT_NULL)) {
- if (entry->object->named == TRUE)
- panic("memory_object_recover_named: caller already holds the right");
- object = entry->object;
- vm_object_lock(object);
- vm_object_cache_unlock();
- if (object->terminating && wait_on_terminating) {
- vm_object_wait(object,
- VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
- THREAD_UNINT);
- vm_object_unlock(object);
- goto lookup_entry;
+ if(ops & UPL_POP_PHYSICAL) {
+ if(object->phys_contiguous) {
+ if (phys_entry) {
+ *phys_entry = (ppnum_t)
+ (object->shadow_offset >> 12);
}
- } else {
- vm_object_cache_unlock();
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ } else {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+ }
+ if(object->phys_contiguous) {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+
+ while(TRUE) {
+ if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
+ vm_object_unlock(object);
return KERN_FAILURE;
}
- if((object->ref_count == 0) && (!object->terminating)){
- queue_remove(&vm_object_cached_list, object,
- vm_object_t, cached_list);
- vm_object_cached_count--;
- XPR(XPR_VM_OBJECT_CACHE,
- "memory_object_recover_named: removing %X, head (%X, %X)\n",
- (integer_t)object,
- (integer_t)vm_object_cached_list.next,
- (integer_t)vm_object_cached_list.prev, 0,0);
+ /* Sync up on getting the busy bit */
+ if((dst_page->busy || dst_page->cleaning) &&
+ (((ops & UPL_POP_SET) &&
+ (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
+ /* someone else is playing with the page, we will */
+ /* have to wait */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ continue;
}
- object->named = TRUE;
- object->ref_count++;
- vm_object_res_reference(object);
- while (!object->pager_ready) {
- vm_object_wait(object,
- VM_OBJECT_EVENT_PAGER_READY,
- FALSE);
- vm_object_lock(object);
+ if (ops & UPL_POP_DUMP) {
+ vm_page_lock_queues();
+
+ if (dst_page->no_isync == FALSE)
+ pmap_disconnect(dst_page->phys_page);
+ vm_page_free(dst_page);
+
+ vm_page_unlock_queues();
+ break;
}
- vm_object_unlock(object);
- *object_ptr = object;
- return (KERN_SUCCESS);
- } else {
- return (KERN_INVALID_ARGUMENT);
+
+ if (flags) {
+ *flags = 0;
+
+ /* Get the condition of flags before requested ops */
+ /* are undertaken */
+
+ if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
+ if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
+ if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
+ if(dst_page->absent) *flags |= UPL_POP_ABSENT;
+ if(dst_page->busy) *flags |= UPL_POP_BUSY;
+ }
+
+ /* The caller should have made a call either contingent with */
+ /* or prior to this call to set UPL_POP_BUSY */
+ if(ops & UPL_POP_SET) {
+ /* The protection granted with this assert will */
+ /* not be complete. If the caller violates the */
+ /* convention and attempts to change page state */
+ /* without first setting busy we may not see it */
+ /* because the page may already be busy. However */
+ /* if such violations occur we will assert sooner */
+ /* or later. */
+ assert(dst_page->busy || (ops & UPL_POP_BUSY));
+ if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
+ if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
+ if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
+ if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
+ if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
+ }
+
+ if(ops & UPL_POP_CLR) {
+ assert(dst_page->busy);
+ if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
+ if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
+ if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
+ if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
+ if (ops & UPL_POP_BUSY) {
+ dst_page->busy = FALSE;
+ PAGE_WAKEUP(dst_page);
+ }
+ }
+
+ if (dst_page->encrypted) {
+ /*
+ * ENCRYPTED SWAP:
+ * We need to decrypt this encrypted page before the
+ * caller can access its contents.
+ * But if the caller really wants to access the page's
+ * contents, they have to keep the page "busy".
+ * Otherwise, the page could get recycled or re-encrypted
+ * at any time.
+ */
+ if ((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY) &&
+ dst_page->busy) {
+ /*
+ * The page is stable enough to be accessed by
+ * the caller, so make sure its contents are
+ * not encrypted.
+ */
+ vm_page_decrypt(dst_page, 0);
+ } else {
+ /*
+ * The page is not busy, so don't bother
+ * decrypting it, since anything could
+ * happen to it between now and when the
+ * caller wants to access it.
+ * We should not give the caller access
+ * to this page.
+ */
+ assert(!phys_entry);
+ }
+ }
+
+ if (phys_entry) {
+ /*
+ * The physical page number will remain valid
+ * only if the page is kept busy.
+ * ENCRYPTED SWAP: make sure we don't let the
+ * caller access an encrypted page.
+ */
+ assert(dst_page->busy);
+ assert(!dst_page->encrypted);
+ *phys_entry = dst_page->phys_page;
+ }
+
+ break;
}
+
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+
}
-#ifdef MACH_BSD
+
/*
- * Scale the vm_object_cache
- * This is required to make sure that the vm_object_cache is big
- * enough to effectively cache the mapped file.
- * This is really important with UBC as all the regular file vnodes
- * have memory object associated with them. Havving this cache too
- * small results in rapid reclaim of vnodes and hurts performance a LOT!
- *
- * This is also needed as number of vnodes can be dynamically scaled.
+ * vm_object_range_op offers performance enhancement over
+ * vm_object_page_op for page_op functions which do not require page
+ * level state to be returned from the call. Page_op was created to provide
+ * a low-cost alternative to page manipulation via UPLs when only a single
+ * page was involved. The range_op call establishes the ability in the _op
+ * family of functions to work on multiple pages where the lack of page level
+ * state handling allows the caller to avoid the overhead of the upl structures.
*/
+
kern_return_t
-adjust_vm_object_cache(vm_size_t oval, vm_size_t nval)
+vm_object_range_op(
+ vm_object_t object,
+ vm_object_offset_t offset_beg,
+ vm_object_offset_t offset_end,
+ int ops,
+ int *range)
{
- vm_object_cached_max = nval;
- vm_object_cache_trim(FALSE);
- return (KERN_SUCCESS);
-}
-#endif /* MACH_BSD */
+ vm_object_offset_t offset;
+ vm_page_t dst_page;
+
+ if (object->resident_page_count == 0) {
+ if (range) {
+ if (ops & UPL_ROP_PRESENT)
+ *range = 0;
+ else
+ *range = offset_end - offset_beg;
+ }
+ return KERN_SUCCESS;
+ }
+ vm_object_lock(object);
+
+ if (object->phys_contiguous) {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+
+ offset = offset_beg;
+
+ while (offset < offset_end) {
+ dst_page = vm_page_lookup(object, offset);
+ if (dst_page != VM_PAGE_NULL) {
+ if (ops & UPL_ROP_DUMP) {
+ if (dst_page->busy || dst_page->cleaning) {
+ /*
+ * someone else is playing with the
+ * page, we will have to wait
+ */
+ PAGE_SLEEP(object,
+ dst_page, THREAD_UNINT);
+ /*
+ * need to relook the page up since it's
+ * state may have changed while we slept
+ * it might even belong to a different object
+ * at this point
+ */
+ continue;
+ }
+ vm_page_lock_queues();
+ if (dst_page->no_isync == FALSE)
+ pmap_disconnect(dst_page->phys_page);
+ vm_page_free(dst_page);
+
+ vm_page_unlock_queues();
+ } else if (ops & UPL_ROP_ABSENT)
+ break;
+ } else if (ops & UPL_ROP_PRESENT)
+ break;
+
+ offset += PAGE_SIZE;
+ }
+ vm_object_unlock(object);
+
+ if (range)
+ *range = offset - offset_beg;
+
+ return KERN_SUCCESS;
+}