/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <vm/vm_protos.h>
/* forward declaration */
-vstruct_t vs_object_create(vm_size_t size);
+vstruct_t vs_object_create(dp_size_t size);
/*
* List of all vstructs. A specific vstruct is
vstruct_t
vs_object_create(
- vm_size_t size)
+ dp_size_t size)
{
vstruct_t vs;
pset = default_pager_external_set;
}
- ipc_port_make_sonce(mem_obj);
ip_lock(mem_obj); /* unlocked in nsrequest below */
+ ipc_port_make_sonce_locked(mem_obj);
ipc_port_nsrequest(mem_obj, sync, mem_obj, &previous);
}
#endif
+const struct memory_object_pager_ops default_pager_ops = {
+ dp_memory_object_reference,
+ dp_memory_object_deallocate,
+ dp_memory_object_init,
+ dp_memory_object_terminate,
+ dp_memory_object_data_request,
+ dp_memory_object_data_return,
+ dp_memory_object_data_initialize,
+ dp_memory_object_data_unlock,
+ dp_memory_object_synchronize,
+ dp_memory_object_map,
+ dp_memory_object_last_unmap,
+ dp_memory_object_data_reclaim,
+ "default pager"
+};
+
kern_return_t
dp_memory_object_init(
memory_object_t mem_obj,
memory_object_control_t control,
- __unused vm_size_t pager_page_size)
+ __unused memory_object_cluster_size_t pager_page_size)
{
vstruct_t vs;
dp_memory_object_synchronize(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t length,
+ memory_object_size_t length,
__unused vm_sync_t flags)
{
vstruct_t vs;
}
kern_return_t
-dp_memory_object_unmap(
- __unused memory_object_t mem_obj)
+dp_memory_object_map(
+ __unused memory_object_t mem_obj,
+ __unused vm_prot_t prot)
{
- panic("dp_memory_object_unmap");
+ panic("dp_memory_object_map");
+ return KERN_FAILURE;
+}
+kern_return_t
+dp_memory_object_last_unmap(
+ __unused memory_object_t mem_obj)
+{
+ panic("dp_memory_object_last_unmap");
return KERN_FAILURE;
}
+kern_return_t
+dp_memory_object_data_reclaim(
+ memory_object_t mem_obj,
+ boolean_t reclaim_backing_store)
+{
+ vstruct_t vs;
+
+ vs_lookup(mem_obj, vs);
+ for (;;) {
+ vs_lock(vs);
+ vs_async_wait(vs);
+ if (!vs->vs_xfer_pending) {
+ break;
+ }
+ }
+ vs->vs_xfer_pending = TRUE;
+ vs_unlock(vs);
+
+ ps_vstruct_reclaim(vs, TRUE, reclaim_backing_store);
+
+ vs_lock(vs);
+ vs->vs_xfer_pending = FALSE;
+ vs_unlock(vs);
+
+ return KERN_SUCCESS;
+}
+
kern_return_t
dp_memory_object_terminate(
memory_object_t mem_obj)
dp_memory_object_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t length,
- __unused vm_prot_t protection_required)
+ memory_object_cluster_size_t length,
+ __unused vm_prot_t protection_required,
+ memory_object_fault_info_t fault_info)
{
vstruct_t vs;
+ kern_return_t kr = KERN_SUCCESS;
GSTAT(global_stats.gs_pagein_calls++);
if ((offset & vm_page_mask) != 0 || (length & vm_page_mask) != 0)
Panic("bad alignment");
- pvs_cluster_read(vs, (vm_offset_t)offset, length);
-
+ assert((dp_offset_t) offset == offset);
+ kr = pvs_cluster_read(vs, (dp_offset_t) offset, length, fault_info);
+
+ /* Regular data requests have a non-zero length and always return KERN_SUCCESS.
+ Their actual success is determined by the fact that they provide a page or not,
+ i.e whether we call upl_commit() or upl_abort(). A length of 0 means that the
+ caller is only asking if the pager has a copy of that page or not. The answer to
+ that question is provided by the return value. KERN_SUCCESS means that the pager
+ does have that page.
+ */
+ if(length) {
+ kr = KERN_SUCCESS;
+ }
+
vs_finish_read(vs);
- return KERN_SUCCESS;
+ return kr;
}
/*
dp_memory_object_data_initialize(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t size)
+ memory_object_cluster_size_t size)
{
vstruct_t vs;
* loop if the address range specified crosses cluster
* boundaries.
*/
- vs_cluster_write(vs, 0, (vm_offset_t)offset, size, FALSE, 0);
+ assert((upl_offset_t) offset == offset);
+ vs_cluster_write(vs, 0, (upl_offset_t)offset, size, FALSE, 0);
vs_finish_write(vs);
dp_memory_object_data_unlock(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
- __unused vm_size_t size,
+ __unused memory_object_size_t size,
__unused vm_prot_t desired_access)
{
Panic("dp_memory_object_data_unlock: illegal");
dp_memory_object_data_return(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t size,
+ memory_object_cluster_size_t size,
__unused memory_object_offset_t *resid_offset,
__unused int *io_error,
__unused boolean_t dirty,
/* a synchronous interface */
/* return KERN_LOCK_OWNED; */
upl_t upl;
- int page_list_count = 0;
+ unsigned int page_list_count = 0;
memory_object_super_upl_request(vs->vs_control,
(memory_object_offset_t)offset,
size, size,
if ((vs->vs_seqno != vs->vs_next_seqno++)
|| (vs->vs_readers)
|| (vs->vs_xfer_pending)) {
- upl_t upl;
- int page_list_count = 0;
+ upl_t upl;
+ unsigned int page_list_count = 0;
vs->vs_next_seqno--;
VS_UNLOCK(vs);
* loop if the address range specified crosses cluster
* boundaries.
*/
- vs_cluster_write(vs, 0, (vm_offset_t)offset, size, FALSE, 0);
+ assert((upl_offset_t) offset == offset);
+ vs_cluster_write(vs, 0, (upl_offset_t) offset, size, FALSE, 0);
vs_finish_write(vs);
assert(dmm == default_pager_object);
- vs = vs_object_create(new_size);
+ if ((dp_size_t) new_size != new_size) {
+ /* 32-bit overflow */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vs = vs_object_create((dp_size_t) new_size);
if (vs == VSTRUCT_NULL)
return KERN_RESOURCE_SHORTAGE;
* and this default_pager structure
*/
- vs->vs_mem_obj = ISVS;
- vs->vs_mem_obj_ikot = IKOT_MEMORY_OBJECT;
+ vs->vs_pager_ops = &default_pager_ops;
+ vs->vs_pager_header.io_bits = IKOT_MEMORY_OBJECT;
/*
* After this, other threads might receive requests
if (default_pager != default_pager_object)
return KERN_INVALID_ARGUMENT;
- vs = vs_object_create(size);
+ if ((dp_size_t) size != size) {
+ /* 32-bit overflow */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vs = vs_object_create((dp_size_t) size);
if (vs == VSTRUCT_NULL)
return KERN_RESOURCE_SHORTAGE;
* Set up associations between the default pager
* and this vstruct structure
*/
- vs->vs_mem_obj = ISVS;
+ vs->vs_pager_ops = &default_pager_ops;
vstruct_list_insert(vs);
*mem_objp = vs_to_mem_obj(vs);
return KERN_SUCCESS;
/*
* Out out-of-line port arrays are simply kalloc'ed.
*/
- psize = round_page(actual * sizeof * pagers);
- ppotential = psize / sizeof * pagers;
+ psize = round_page(actual * sizeof (*pagers));
+ ppotential = (unsigned int) (psize / sizeof (*pagers));
pagers = (memory_object_t *)kalloc(psize);
if (0 == pagers)
return KERN_RESOURCE_SHORTAGE;
* then "copied in" as if it had been sent by a
* user process.
*/
- osize = round_page(actual * sizeof * objects);
- opotential = osize / sizeof * objects;
+ osize = round_page(actual * sizeof (*objects));
+ opotential = (unsigned int) (osize / sizeof (*objects));
kr = kmem_alloc(ipc_kernel_map, &oaddr, osize);
if (KERN_SUCCESS != kr) {
kfree(pagers, psize);
if (0 != addr)
kmem_free(ipc_kernel_map, addr, size);
- size = round_page(actual * sizeof * pages);
+ size = round_page(actual * sizeof (*pages));
kr = kmem_alloc(ipc_kernel_map, &addr, size);
if (KERN_SUCCESS != kr)
return KERN_RESOURCE_SHORTAGE;
pages = (default_pager_page_t *)addr;
- potential = size / sizeof * pages;
+ potential = (unsigned int) (size / sizeof (*pages));
}
/*