/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
#include "default_pager_internal.h"
+#include <default_pager/default_pager_object_server.h>
+#include <mach/memory_object_default_server.h>
+#include <mach/memory_object_control.h>
#include <mach/memory_object_types.h>
#include <mach/memory_object_server.h>
+#include <mach/upl.h>
+#include <mach/vm_map.h>
#include <vm/memory_object.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_map.h>
+#include <vm/vm_protos.h>
+/* forward declaration */
+vstruct_t vs_object_create(dp_size_t size);
/*
* List of all vstructs. A specific vstruct is
static unsigned int default_pager_wait_seqno = 0; /* debugging */
static unsigned int default_pager_wait_read = 0; /* debugging */
static unsigned int default_pager_wait_write = 0; /* debugging */
-static unsigned int default_pager_wait_refs = 0; /* debugging */
__private_extern__ void
vs_async_wait(
vstruct_t
vs_object_create(
- vm_size_t size)
+ dp_size_t size)
{
vstruct_t vs;
pset = default_pager_external_set;
}
- ipc_port_make_sonce(mem_obj);
ip_lock(mem_obj); /* unlocked in nsrequest below */
+ ipc_port_make_sonce_locked(mem_obj);
ipc_port_nsrequest(mem_obj, sync, mem_obj, &previous);
}
#endif
+const struct memory_object_pager_ops default_pager_ops = {
+ dp_memory_object_reference,
+ dp_memory_object_deallocate,
+ dp_memory_object_init,
+ dp_memory_object_terminate,
+ dp_memory_object_data_request,
+ dp_memory_object_data_return,
+ dp_memory_object_data_initialize,
+ dp_memory_object_data_unlock,
+ dp_memory_object_synchronize,
+ dp_memory_object_map,
+ dp_memory_object_last_unmap,
+ dp_memory_object_data_reclaim,
+ "default pager"
+};
+
kern_return_t
dp_memory_object_init(
memory_object_t mem_obj,
memory_object_control_t control,
- vm_size_t pager_page_size)
+ __unused memory_object_cluster_size_t pager_page_size)
{
vstruct_t vs;
dp_memory_object_synchronize(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t length,
- vm_sync_t flags)
+ memory_object_size_t length,
+ __unused vm_sync_t flags)
{
vstruct_t vs;
}
kern_return_t
-dp_memory_object_unmap(
- memory_object_t mem_obj)
+dp_memory_object_map(
+ __unused memory_object_t mem_obj,
+ __unused vm_prot_t prot)
{
- panic("dp_memory_object_unmap");
+ panic("dp_memory_object_map");
+ return KERN_FAILURE;
+}
+kern_return_t
+dp_memory_object_last_unmap(
+ __unused memory_object_t mem_obj)
+{
+ panic("dp_memory_object_last_unmap");
return KERN_FAILURE;
}
+kern_return_t
+dp_memory_object_data_reclaim(
+ memory_object_t mem_obj,
+ boolean_t reclaim_backing_store)
+{
+ vstruct_t vs;
+
+ vs_lookup(mem_obj, vs);
+ for (;;) {
+ vs_lock(vs);
+ vs_async_wait(vs);
+ if (!vs->vs_xfer_pending) {
+ break;
+ }
+ }
+ vs->vs_xfer_pending = TRUE;
+ vs_unlock(vs);
+
+ ps_vstruct_reclaim(vs, TRUE, reclaim_backing_store);
+
+ vs_lock(vs);
+ vs->vs_xfer_pending = FALSE;
+ vs_unlock(vs);
+
+ return KERN_SUCCESS;
+}
+
kern_return_t
dp_memory_object_terminate(
memory_object_t mem_obj)
{
memory_object_control_t control;
vstruct_t vs;
- kern_return_t kr;
/*
* control port is a receive right, not a send right.
VS_UNLOCK(vs);
}
-extern ipc_port_t max_pages_trigger_port;
-extern int dp_pages_free;
-extern int maximum_pages_free;
void
dp_memory_object_deallocate(
memory_object_t mem_obj)
{
vstruct_t vs;
mach_port_seqno_t seqno;
- ipc_port_t trigger;
/*
* Because we don't give out multiple first references
thread_wakeup((event_t)&backing_store_release_trigger_disable);
}
VSL_UNLOCK();
-
- PSL_LOCK();
- if(max_pages_trigger_port
- && (backing_store_release_trigger_disable == 0)
- && (dp_pages_free > maximum_pages_free)) {
- trigger = max_pages_trigger_port;
- max_pages_trigger_port = NULL;
- } else
- trigger = IP_NULL;
- PSL_UNLOCK();
-
- if (trigger != IP_NULL) {
- default_pager_space_alert(trigger, LO_WAT_ALERT);
- ipc_port_release_send(trigger);
- }
-
}
kern_return_t
dp_memory_object_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t length,
- vm_prot_t protection_required)
+ memory_object_cluster_size_t length,
+ __unused vm_prot_t protection_required,
+ memory_object_fault_info_t fault_info)
{
vstruct_t vs;
+ kern_return_t kr = KERN_SUCCESS;
GSTAT(global_stats.gs_pagein_calls++);
if ((offset & vm_page_mask) != 0 || (length & vm_page_mask) != 0)
Panic("bad alignment");
- pvs_cluster_read(vs, (vm_offset_t)offset, length);
-
+ assert((dp_offset_t) offset == offset);
+ kr = pvs_cluster_read(vs, (dp_offset_t) offset, length, fault_info);
+
+ /* Regular data requests have a non-zero length and always return KERN_SUCCESS.
+ Their actual success is determined by the fact that they provide a page or not,
+ i.e whether we call upl_commit() or upl_abort(). A length of 0 means that the
+ caller is only asking if the pager has a copy of that page or not. The answer to
+ that question is provided by the return value. KERN_SUCCESS means that the pager
+ does have that page.
+ */
+ if(length) {
+ kr = KERN_SUCCESS;
+ }
+
vs_finish_read(vs);
- return KERN_SUCCESS;
+ return kr;
}
/*
dp_memory_object_data_initialize(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t size)
+ memory_object_cluster_size_t size)
{
vstruct_t vs;
- DEBUG(DEBUG_MO_EXTERNAL,
- ("mem_obj=0x%x,offset=0x%x,cnt=0x%x\n",
- (int)mem_obj, (int)offset, (int)size));
- GSTAT(global_stats.gs_pages_init += atop(size));
+ DP_DEBUG(DEBUG_MO_EXTERNAL,
+ ("mem_obj=0x%x,offset=0x%x,cnt=0x%x\n",
+ (int)mem_obj, (int)offset, (int)size));
+ GSTAT(global_stats.gs_pages_init += atop_32(size));
vs_lookup(mem_obj, vs);
vs_lock(vs);
* loop if the address range specified crosses cluster
* boundaries.
*/
- vs_cluster_write(vs, 0, (vm_offset_t)offset, size, FALSE, 0);
+ assert((upl_offset_t) offset == offset);
+ vs_cluster_write(vs, 0, (upl_offset_t)offset, size, FALSE, 0);
vs_finish_write(vs);
kern_return_t
dp_memory_object_data_unlock(
- memory_object_t mem_obj,
- memory_object_offset_t offset,
- vm_size_t size,
- vm_prot_t desired_access)
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_size_t size,
+ __unused vm_prot_t desired_access)
{
Panic("dp_memory_object_data_unlock: illegal");
return KERN_FAILURE;
}
+/*ARGSUSED8*/
kern_return_t
dp_memory_object_data_return(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t size,
- boolean_t dirty,
- boolean_t kernel_copy)
+ memory_object_cluster_size_t size,
+ __unused memory_object_offset_t *resid_offset,
+ __unused int *io_error,
+ __unused boolean_t dirty,
+ __unused boolean_t kernel_copy,
+ __unused int upl_flags)
{
vstruct_t vs;
- DEBUG(DEBUG_MO_EXTERNAL,
- ("mem_obj=0x%x,offset=0x%x,size=0x%x\n",
- (int)mem_obj, (int)offset, (int)size));
+ DP_DEBUG(DEBUG_MO_EXTERNAL,
+ ("mem_obj=0x%x,offset=0x%x,size=0x%x\n",
+ (int)mem_obj, (int)offset, (int)size));
GSTAT(global_stats.gs_pageout_calls++);
/* This routine is called by the pageout thread. The pageout thread */
/* a synchronous interface */
/* return KERN_LOCK_OWNED; */
upl_t upl;
- int page_list_count = 0;
+ unsigned int page_list_count = 0;
memory_object_super_upl_request(vs->vs_control,
(memory_object_offset_t)offset,
size, size,
if ((vs->vs_seqno != vs->vs_next_seqno++)
|| (vs->vs_readers)
|| (vs->vs_xfer_pending)) {
- upl_t upl;
- int page_list_count = 0;
+ upl_t upl;
+ unsigned int page_list_count = 0;
vs->vs_next_seqno--;
VS_UNLOCK(vs);
* loop if the address range specified crosses cluster
* boundaries.
*/
- vs_cluster_write(vs, 0, (vm_offset_t)offset, size, FALSE, 0);
+ assert((upl_offset_t) offset == offset);
+ vs_cluster_write(vs, 0, (upl_offset_t) offset, size, FALSE, 0);
vs_finish_write(vs);
*/
kern_return_t
default_pager_memory_object_create(
- memory_object_default_t dmm,
+ __unused memory_object_default_t dmm,
vm_size_t new_size,
memory_object_t *new_mem_obj)
{
assert(dmm == default_pager_object);
- vs = vs_object_create(new_size);
+ if ((dp_size_t) new_size != new_size) {
+ /* 32-bit overflow */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vs = vs_object_create((dp_size_t) new_size);
if (vs == VSTRUCT_NULL)
return KERN_RESOURCE_SHORTAGE;
* and this default_pager structure
*/
- vs->vs_mem_obj = ISVS;
- vs->vs_mem_obj_ikot = IKOT_MEMORY_OBJECT;
+ vs->vs_pager_ops = &default_pager_ops;
+ vs->vs_pager_header.io_bits = IKOT_MEMORY_OBJECT;
/*
* After this, other threads might receive requests
*/
kern_return_t
default_pager_object_create(
- default_pager_t pager,
+ default_pager_t default_pager,
vm_size_t size,
memory_object_t *mem_objp)
{
vstruct_t vs;
- kern_return_t result;
- struct vstruct_alias *alias_struct;
+ if (default_pager != default_pager_object)
+ return KERN_INVALID_ARGUMENT;
- if (pager != default_pager_object)
+ if ((dp_size_t) size != size) {
+ /* 32-bit overflow */
return KERN_INVALID_ARGUMENT;
+ }
- vs = vs_object_create(size);
+ vs = vs_object_create((dp_size_t) size);
if (vs == VSTRUCT_NULL)
return KERN_RESOURCE_SHORTAGE;
* Set up associations between the default pager
* and this vstruct structure
*/
- vs->vs_mem_obj = ISVS;
+ vs->vs_pager_ops = &default_pager_ops;
vstruct_list_insert(vs);
*mem_objp = vs_to_mem_obj(vs);
return KERN_SUCCESS;
kern_return_t
default_pager_objects(
- default_pager_t pager,
+ default_pager_t default_pager,
default_pager_object_array_t *objectsp,
mach_msg_type_number_t *ocountp,
- memory_object_array_t *pagersp,
+ mach_port_array_t *portsp,
mach_msg_type_number_t *pcountp)
{
vm_offset_t oaddr = 0; /* memory for objects */
vm_size_t osize = 0; /* current size */
default_pager_object_t * objects;
- unsigned int opotential;
+ unsigned int opotential = 0;
- vm_offset_t paddr = 0; /* memory for pagers */
+ vm_map_copy_t pcopy = 0; /* copy handle for pagers */
vm_size_t psize = 0; /* current size */
memory_object_t * pagers;
- unsigned int ppotential;
+ unsigned int ppotential = 0;
unsigned int actual;
unsigned int num_objects;
kern_return_t kr;
vstruct_t entry;
-/*
- if (pager != default_pager_default_port)
- return KERN_INVALID_ARGUMENT;
-*/
-
- /* start with the inline memory */
-
- kr = vm_map_copyout(ipc_kernel_map, (vm_offset_t *)&objects,
- (vm_map_copy_t) *objectsp);
-
- if (kr != KERN_SUCCESS)
- return kr;
-
- osize = round_page(*ocountp * sizeof * objects);
- kr = vm_map_wire(ipc_kernel_map,
- trunc_page((vm_offset_t)objects),
- round_page(((vm_offset_t)objects) + osize),
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
- osize=0;
-
- *objectsp = objects;
- /* we start with the inline space */
-
- num_objects = 0;
- opotential = *ocountp;
-
- pagers = (memory_object_t *) *pagersp;
- ppotential = *pcountp;
-
- VSL_LOCK();
+ if (default_pager != default_pager_object)
+ return KERN_INVALID_ARGUMENT;
/*
* We will send no more than this many
*/
actual = vstruct_list.vsl_count;
- VSL_UNLOCK();
- if (opotential < actual) {
- vm_offset_t newaddr;
- vm_size_t newsize;
-
- newsize = 2 * round_page(actual * sizeof * objects);
-
- kr = vm_allocate(kernel_map, &newaddr, newsize, TRUE);
- if (kr != KERN_SUCCESS)
- goto nomemory;
-
- oaddr = newaddr;
- osize = newsize;
- opotential = osize / sizeof * objects;
- objects = (default_pager_object_t *)oaddr;
+ /*
+ * Out out-of-line port arrays are simply kalloc'ed.
+ */
+ psize = round_page(actual * sizeof (*pagers));
+ ppotential = (unsigned int) (psize / sizeof (*pagers));
+ pagers = (memory_object_t *)kalloc(psize);
+ if (0 == pagers)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /*
+ * returned out of line data must be allocated out
+ * the ipc_kernel_map, wired down, filled in, and
+ * then "copied in" as if it had been sent by a
+ * user process.
+ */
+ osize = round_page(actual * sizeof (*objects));
+ opotential = (unsigned int) (osize / sizeof (*objects));
+ kr = kmem_alloc(ipc_kernel_map, &oaddr, osize);
+ if (KERN_SUCCESS != kr) {
+ kfree(pagers, psize);
+ return KERN_RESOURCE_SHORTAGE;
}
+ objects = (default_pager_object_t *)oaddr;
- if (ppotential < actual) {
- vm_offset_t newaddr;
- vm_size_t newsize;
-
- newsize = 2 * round_page(actual * sizeof * pagers);
-
- kr = vm_allocate(kernel_map, &newaddr, newsize, TRUE);
- if (kr != KERN_SUCCESS)
- goto nomemory;
-
- paddr = newaddr;
- psize = newsize;
- ppotential = psize / sizeof * pagers;
- pagers = (memory_object_t *)paddr;
- }
/*
* Now scan the list.
num_objects = 0;
queue_iterate(&vstruct_list.vsl_queue, entry, vstruct_t, vs_links) {
- memory_object_t pager;
- vm_size_t size;
+ memory_object_t pager;
+ vm_size_t size;
if ((num_objects >= opotential) ||
(num_objects >= ppotential)) {
VS_UNLOCK(entry);
goto not_this_one;
}
- dp_memory_object_reference(vs_to_mem_obj(entry));
+ pager = vs_to_mem_obj(entry);
+ dp_memory_object_reference(pager);
VS_UNLOCK(entry);
/* the arrays are wired, so no deadlock worries */
VSL_UNLOCK();
- /*
- * Deallocate and clear unused memory.
- * (Returned memory will automagically become pageable.)
- */
-
- if (objects == *objectsp) {
-
- /*
- * Our returned information fit inline.
- * Nothing to deallocate.
- */
- *ocountp = num_objects;
- } else if (actual == 0) {
- (void) vm_deallocate(kernel_map, oaddr, osize);
-
- /* return zero items inline */
- *ocountp = 0;
- } else {
- vm_offset_t used;
-
- used = round_page(actual * sizeof * objects);
-
- if (used != osize)
- (void) vm_deallocate(kernel_map,
- oaddr + used, osize - used);
-
- *objectsp = objects;
- *ocountp = num_objects;
- }
-
- if (pagers == (memory_object_t *)*pagersp) {
-
- /*
- * Our returned information fit inline.
- * Nothing to deallocate.
- */
-
- *pcountp = num_objects;
- } else if (actual == 0) {
- (void) vm_deallocate(kernel_map, paddr, psize);
-
- /* return zero items inline */
- *pcountp = 0;
- } else {
- vm_offset_t used;
-
- used = round_page(actual * sizeof * pagers);
-
- if (used != psize)
- (void) vm_deallocate(kernel_map,
- paddr + used, psize - used);
-
- *pagersp = (memory_object_array_t)pagers;
- *pcountp = num_objects;
+ /* clear out any excess allocation */
+ while (num_objects < opotential) {
+ objects[--opotential].dpo_object = (vm_offset_t) 0;
+ objects[opotential].dpo_size = 0;
}
- (void) vm_map_unwire(kernel_map, (vm_offset_t)objects,
- *ocountp + (vm_offset_t)objects, FALSE);
- (void) vm_map_copyin(kernel_map, (vm_offset_t)objects,
- *ocountp, TRUE, (vm_map_copy_t *)objectsp);
-
- return KERN_SUCCESS;
-
- nomemory:
- {
- register int i;
- for (i = 0; i < num_objects; i++)
- if (pagers[i] != MEMORY_OBJECT_NULL)
- memory_object_deallocate(pagers[i]);
+ while (num_objects < ppotential) {
+ pagers[--ppotential] = MEMORY_OBJECT_NULL;
}
- if (objects != *objectsp)
- (void) vm_deallocate(kernel_map, oaddr, osize);
+ kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(oaddr),
+ vm_map_round_page(oaddr + osize), FALSE);
+ assert(KERN_SUCCESS == kr);
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)oaddr,
+ (vm_map_size_t)osize, TRUE, &pcopy);
+ assert(KERN_SUCCESS == kr);
- if (pagers != (memory_object_t *)*pagersp)
- (void) vm_deallocate(kernel_map, paddr, psize);
+ *objectsp = (default_pager_object_array_t)objects;
+ *ocountp = num_objects;
+ *portsp = (mach_port_array_t)pcopy;
+ *pcountp = num_objects;
- return KERN_RESOURCE_SHORTAGE;
+ return KERN_SUCCESS;
}
kern_return_t
default_pager_object_pages(
- default_pager_t pager,
- memory_object_t object,
+ default_pager_t default_pager,
+ mach_port_t memory_object,
default_pager_page_array_t *pagesp,
mach_msg_type_number_t *countp)
{
- vm_offset_t addr; /* memory for page offsets */
+ vm_offset_t addr = 0; /* memory for page offsets */
vm_size_t size = 0; /* current memory size */
- default_pager_page_t * pages;
- unsigned int potential, actual;
+ vm_map_copy_t copy;
+ default_pager_page_t * pages = 0;
+ unsigned int potential;
+ unsigned int actual;
kern_return_t kr;
+ memory_object_t object;
-
- if (pager != default_pager_object)
+ if (default_pager != default_pager_object)
return KERN_INVALID_ARGUMENT;
- kr = vm_map_copyout(ipc_kernel_map, (vm_offset_t *)&pages,
- (vm_map_copy_t) *pagesp);
-
- if (kr != KERN_SUCCESS)
- return kr;
-
- size = round_page(*countp * sizeof * pages);
- kr = vm_map_wire(ipc_kernel_map,
- trunc_page((vm_offset_t)pages),
- round_page(((vm_offset_t)pages) + size),
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
- size=0;
-
- *pagesp = pages;
- /* we start with the inline space */
-
- addr = (vm_offset_t)pages;
- potential = *countp;
+ object = (memory_object_t) memory_object;
+ potential = 0;
for (;;) {
vstruct_t entry;
VSL_UNLOCK();
/* did not find the object */
+ if (0 != addr)
+ kmem_free(ipc_kernel_map, addr, size);
- if (pages != *pagesp)
- (void) vm_deallocate(kernel_map, addr, size);
return KERN_INVALID_ARGUMENT;
found_object:
VS_UNLOCK(entry);
- assert_wait_timeout( 1, THREAD_UNINT );
+ assert_wait_timeout((event_t)assert_wait_timeout, THREAD_UNINT, 1, 1000*NSEC_PER_USEC);
wresult = thread_block(THREAD_CONTINUE_NULL);
assert(wresult == THREAD_TIMED_OUT);
continue;
break;
/* allocate more memory */
+ if (0 != addr)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = round_page(actual * sizeof (*pages));
+ kr = kmem_alloc(ipc_kernel_map, &addr, size);
+ if (KERN_SUCCESS != kr)
+ return KERN_RESOURCE_SHORTAGE;
- if (pages != *pagesp)
- (void) vm_deallocate(kernel_map, addr, size);
- size = round_page(actual * sizeof * pages);
- kr = vm_allocate(kernel_map, &addr, size, TRUE);
- if (kr != KERN_SUCCESS)
- return kr;
pages = (default_pager_page_t *)addr;
- potential = size / sizeof * pages;
+ potential = (unsigned int) (size / sizeof (*pages));
}
/*
- * Deallocate and clear unused memory.
- * (Returned memory will automagically become pageable.)
+ * Clear unused memory.
*/
-
- if (pages == *pagesp) {
-
- /*
- * Our returned information fit inline.
- * Nothing to deallocate.
- */
-
- *countp = actual;
- } else if (actual == 0) {
- (void) vm_deallocate(kernel_map, addr, size);
-
- /* return zero items inline */
- *countp = 0;
- } else {
- vm_offset_t used;
-
- used = round_page(actual * sizeof * pages);
-
- if (used != size)
- (void) vm_deallocate(kernel_map,
- addr + used, size - used);
-
- *pagesp = pages;
- *countp = actual;
- }
- (void) vm_map_unwire(kernel_map, (vm_offset_t)pages,
- *countp + (vm_offset_t)pages, FALSE);
- (void) vm_map_copyin(kernel_map, (vm_offset_t)pages,
- *countp, TRUE, (vm_map_copy_t *)pagesp);
+ while (actual < potential)
+ pages[--potential].dpp_offset = 0;
+
+ kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
+ vm_map_round_page(addr + size), FALSE);
+ assert(KERN_SUCCESS == kr);
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
+ (vm_map_size_t)size, TRUE, ©);
+ assert(KERN_SUCCESS == kr);
+
+
+ *pagesp = (default_pager_page_array_t)copy;
+ *countp = actual;
return KERN_SUCCESS;
}