/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <mach/upl.h>
#include <mach/thread_act.h>
+#include <kern/assert.h>
#include <kern/host.h>
#include <kern/thread.h>
#include <default_pager/default_pager_object_server.h>
#include <vm/vm_map.h>
-#include <vm/vm_kern.h>
#include <vm/vm_pageout.h>
#include <vm/memory_object.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
+#include <vm/vm_purgeable_internal.h>
+
/* BSD VM COMPONENT INTERFACES */
int
return( vm_map_last_entry(map)->vme_end);
}
-/*
- * Legacy routines to get the start and end for a vm_map_t. They
- * return them in the vm_offset_t format. So, they should only be
- * called on maps that are the same size as the kernel map for
- * accurate results.
- */
-vm_offset_t
-get_vm_start(
- vm_map_t map)
-{
- return(CAST_DOWN(vm_offset_t, vm_map_first_entry(map)->vme_start));
-}
-
-vm_offset_t
-get_vm_end(
- vm_map_t map)
-{
- return(CAST_DOWN(vm_offset_t, vm_map_last_entry(map)->vme_end));
-}
-
/*
* BSD VNODE PAGER
*/
-/* until component support available */
-int vnode_pager_workaround;
+const struct memory_object_pager_ops vnode_pager_ops = {
+ vnode_pager_reference,
+ vnode_pager_deallocate,
+ vnode_pager_init,
+ vnode_pager_terminate,
+ vnode_pager_data_request,
+ vnode_pager_data_return,
+ vnode_pager_data_initialize,
+ vnode_pager_data_unlock,
+ vnode_pager_synchronize,
+ vnode_pager_map,
+ vnode_pager_last_unmap,
+ NULL, /* data_reclaim */
+ "vnode pager"
+};
typedef struct vnode_pager {
- int *pager; /* pager workaround pointer */
- unsigned int pager_ikot; /* JMM: fake ip_kotype() */
+ struct ipc_object_header pager_header; /* fake ip_kotype() */
+ memory_object_pager_ops_t pager_ops; /* == &vnode_pager_ops */
unsigned int ref_count; /* reference count */
memory_object_control_t control_handle; /* mem object control handle */
struct vnode *vnode_handle; /* vnode handle */
} *vnode_pager_t;
+#define pager_ikot pager_header.io_bits
ipc_port_t
trigger_name_to_port( /* forward */
kern_return_t
vnode_pager_cluster_read( /* forward */
vnode_pager_t,
- vm_object_offset_t,
+ vm_object_offset_t,
+ vm_object_offset_t,
+ uint32_t,
vm_size_t);
void
#define PAGER_DEBUG(LEVEL, A)
#endif
+extern int proc_resetpcontrol(int);
+
+#if DEVELOPMENT || DEBUG
+extern unsigned long vm_cs_validated_resets;
+#endif
+
/*
- * Routine: macx_triggers
+ * Routine: mach_macx_triggers
* Function:
* Syscall interface to set the call backs for low and
* high water marks.
*/
int
-macx_triggers(
+mach_macx_triggers(
struct macx_triggers_args *args)
{
int hi_water = args->hi_water;
return EINVAL;
}
- if ((flags & SWAP_ENCRYPT_ON) &&
- (flags & SWAP_ENCRYPT_OFF)) {
+ if (((flags & SWAP_ENCRYPT_ON) && (flags & SWAP_ENCRYPT_OFF)) ||
+ ((flags & SWAP_COMPACT_ENABLE) && (flags & SWAP_COMPACT_DISABLE))) {
/* can't have it both ways */
return EINVAL;
}
+ if (default_pager_init_flag == 0) {
+ start_def_pager(NULL);
+ default_pager_init_flag = 1;
+ }
+
if (flags & SWAP_ENCRYPT_ON) {
/* ENCRYPTED SWAP: tell default_pager to encrypt */
default_pager_triggers(default_pager,
IP_NULL);
}
+ if (flags & USE_EMERGENCY_SWAP_FILE_FIRST) {
+ /*
+ * Time to switch to the emergency segment.
+ */
+ return default_pager_triggers(default_pager,
+ 0, 0,
+ USE_EMERGENCY_SWAP_FILE_FIRST,
+ IP_NULL);
+ }
+
+ if (flags & SWAP_FILE_CREATION_ERROR) {
+ /*
+ * For some reason, the dynamic pager failed to create a swap file.
+ */
+ trigger_port = trigger_name_to_port(trigger_name);
+ if(trigger_port == NULL) {
+ return EINVAL;
+ }
+ /* trigger_port is locked and active */
+ ipc_port_make_send_locked(trigger_port);
+ /* now unlocked */
+ default_pager_triggers(default_pager,
+ 0, 0,
+ SWAP_FILE_CREATION_ERROR,
+ trigger_port);
+ }
+
if (flags & HI_WAT_ALERT) {
trigger_port = trigger_name_to_port(trigger_name);
if(trigger_port == NULL) {
LO_WAT_ALERT, trigger_port);
}
+
+ if (flags & PROC_RESUME) {
+
+ /*
+ * For this call, hi_water is used to pass in the pid of the process we want to resume
+ * or unthrottle. This is of course restricted to the superuser (checked inside of
+ * proc_resetpcontrol).
+ */
+
+ return proc_resetpcontrol(hi_water);
+ }
+
/*
* Set thread scheduling priority and policy for the current thread
* it is assumed for the time being that the thread setting the alert
*
* XXX This does not belong in the kernel XXX
*/
- {
+ if (flags & HI_WAT_ALERT) {
thread_precedence_policy_data_t pre;
thread_extended_policy_data_t ext;
THREAD_PRECEDENCE_POLICY,
(thread_policy_t)&pre,
THREAD_PRECEDENCE_POLICY_COUNT);
+
+ current_thread()->options |= TH_OPT_VMPRIV;
}
- current_thread()->options |= TH_OPT_VMPRIV;
+ if (flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE)) {
+ return macx_backing_store_compaction(flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE));
+ }
return 0;
}
return (NULL);
space = current_space();
- if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
+ if(ipc_port_translate_receive(space, CAST_MACH_PORT_TO_NAME(trigger_name),
&trigger_port) != KERN_SUCCESS)
return (NULL);
return trigger_port;
void * uio,
int start_offset,
int io_requested,
- int mark_dirty)
+ int mark_dirty,
+ int take_reference)
{
vm_object_t object;
vm_page_t dst_page;
int cur_run;
int cur_needed;
int i;
+ int orig_offset;
vm_page_t page_run[MAX_RUN];
-
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL) {
return (0);
vm_object_unlock(object);
return 0;
}
+ orig_offset = start_offset;
while (io_requested && retval == 0) {
if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
break;
+
/*
- * Sync up on getting the busy bit
+ * if we're in this routine, we are inside a filesystem's
+ * locking model, so we don't ever want to wait for pages that have
+ * list_req_pending == TRUE since it means that the
+ * page is a candidate for some type of I/O operation,
+ * but that it has not yet been gathered into a UPL...
+ * this implies that it is still outside the domain
+ * of the filesystem and that whoever is responsible for
+ * grabbing it into a UPL may be stuck behind the filesystem
+ * lock this thread owns, or trying to take a lock exclusively
+ * and waiting for the readers to drain from a rw lock...
+ * if we block in those cases, we will deadlock
*/
- if ((dst_page->busy || dst_page->cleaning)) {
- /*
+ if (dst_page->list_req_pending) {
+
+ if (dst_page->absent) {
+ /*
+ * this is the list_req_pending | absent | busy case
+ * which originates from vm_fault_page... we want
+ * to fall out of the fast path and go back
+ * to the caller which will gather this page
+ * into a UPL and issue the I/O if no one
+ * else beats us to it
+ */
+ break;
+ }
+ if (dst_page->pageout || dst_page->cleaning) {
+ /*
+ * this is the list_req_pending | pageout | busy case
+ * or the list_req_pending | cleaning case...
+ * which originate from the pageout_scan and
+ * msync worlds for the pageout case and the hibernate
+ * pre-cleaning world for the cleaning case...
+ * we need to reset the state of this page to indicate
+ * it should stay in the cache marked dirty... nothing else we
+ * can do at this point... we can't block on it, we can't busy
+ * it and we can't clean it from this routine.
+ */
+ vm_page_lockspin_queues();
+
+ vm_pageout_queue_steal(dst_page, TRUE);
+ vm_page_deactivate(dst_page);
+
+ vm_page_unlock_queues();
+ }
+ /*
+ * this is the list_req_pending | cleaning case...
+ * we can go ahead and deal with this page since
+ * its ok for us to mark this page busy... if a UPL
+ * tries to gather this page, it will block until the
+ * busy is cleared, thus allowing us safe use of the page
+ * when we're done with it, we will clear busy and wake
+ * up anyone waiting on it, thus allowing the UPL creation
+ * to finish
+ */
+
+ } else if (dst_page->busy || dst_page->cleaning) {
+ /*
* someone else is playing with the page... if we've
* already collected pages into this run, go ahead
* and process now, we can't block on this
* page while holding other pages in the BUSY state
* otherwise we will wait
*/
- if (cur_run)
- break;
- PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ if (cur_run)
+ break;
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
+
/*
* this routine is only called when copying
* to/from real files... no need to consider
*/
assert(!dst_page->encrypted);
- if (mark_dirty)
+ if (mark_dirty) {
dst_page->dirty = TRUE;
+ if (dst_page->cs_validated &&
+ !dst_page->cs_tainted) {
+ /*
+ * CODE SIGNING:
+ * We're modifying a code-signed
+ * page: force revalidate
+ */
+ dst_page->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_cs_validated_resets++;
+#endif
+ pmap_disconnect(dst_page->phys_page);
+ }
+ }
dst_page->busy = TRUE;
page_run[cur_run++] = dst_page;
}
if (cur_run == 0)
/*
- * we hit a 'hole' in the cache
- * we bail at this point
+ * we hit a 'hole' in the cache or
+ * a page we don't want to try to handle,
+ * so bail at this point
* we'll unlock the object below
*/
break;
}
vm_object_lock(object);
+ /*
+ * if we have more than 1 page to work on
+ * in the current run, or the original request
+ * started at offset 0 of the page, or we're
+ * processing multiple batches, we will move
+ * the pages to the tail of the inactive queue
+ * to implement an LRU for read/write accesses
+ *
+ * the check for orig_offset == 0 is there to
+ * mitigate the cost of small (< page_size) requests
+ * to the same page (this way we only move it once)
+ */
+ if (take_reference && (cur_run > 1 || orig_offset == 0)) {
+
+ vm_page_lockspin_queues();
+
+ for (i = 0; i < cur_run; i++)
+ vm_page_lru(page_run[i]);
+
+ vm_page_unlock_queues();
+ }
for (i = 0; i < cur_run; i++) {
dst_page = page_run[i];
+ /*
+ * someone is explicitly referencing this page...
+ * update clustered and speculative state
+ *
+ */
+ VM_PAGE_CONSUME_CLUSTERED(dst_page);
+
PAGE_WAKEUP_DONE(dst_page);
}
+ orig_offset = 0;
}
vm_object_unlock(object);
size = (vm_size_t) sizeof(struct vnode_pager);
vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
PAGE_SIZE, "vnode pager structures");
+ zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE);
+ zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE);
+
+
+#if CONFIG_CODE_DECRYPTION
+ apple_protect_pager_bootstrap();
+#endif /* CONFIG_CODE_DECRYPTION */
+ swapfile_pager_bootstrap();
return;
}
#if !DEBUG
__unused
#endif
- vm_size_t pg_size)
+ memory_object_cluster_size_t pg_size)
{
vnode_pager_t vnode_object;
kern_return_t kr;
memory_object_attr_info_data_t attributes;
- PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %x\n", mem_obj, control, pg_size));
+ PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
vnode_pager_data_return(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t data_cnt,
+ memory_object_cluster_size_t data_cnt,
memory_object_offset_t *resid_offset,
int *io_error,
__unused boolean_t dirty,
vnode_pager_data_initialize(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
- __unused vm_size_t data_cnt)
+ __unused memory_object_cluster_size_t data_cnt)
{
panic("vnode_pager_data_initialize");
return KERN_FAILURE;
vnode_pager_data_unlock(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
- __unused vm_size_t size,
+ __unused memory_object_size_t size,
__unused vm_prot_t desired_access)
{
return KERN_FAILURE;
}
+kern_return_t
+vnode_pager_get_isinuse(
+ memory_object_t mem_obj,
+ uint32_t *isinuse)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
+ *isinuse = 1;
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vnode_pager_check_hard_throttle(
+ memory_object_t mem_obj,
+ uint32_t *limit,
+ uint32_t hard_throttle)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops)
+ return KERN_INVALID_ARGUMENT;
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ (void)vnode_pager_return_hard_throttle_limit(vnode_object->vnode_handle, limit, hard_throttle);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vnode_pager_get_isSSD(
+ memory_object_t mem_obj,
+ boolean_t *isSSD)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops)
+ return KERN_INVALID_ARGUMENT;
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
+ return KERN_SUCCESS;
+}
+
kern_return_t
vnode_pager_get_object_size(
memory_object_t mem_obj,
{
vnode_pager_t vnode_object;
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
+ *length = 0;
+ return KERN_INVALID_ARGUMENT;
+ }
+
vnode_object = vnode_pager_lookup(mem_obj);
*length = vnode_pager_get_filesize(vnode_object->vnode_handle);
return KERN_SUCCESS;
}
+kern_return_t
+vnode_pager_get_object_pathname(
+ memory_object_t mem_obj,
+ char *pathname,
+ vm_size_t *length_p)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ return vnode_pager_get_pathname(vnode_object->vnode_handle,
+ pathname,
+ length_p);
+}
+
+kern_return_t
+vnode_pager_get_object_filename(
+ memory_object_t mem_obj,
+ const char **filename)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ return vnode_pager_get_filename(vnode_object->vnode_handle,
+ filename);
+}
+
+kern_return_t
+vnode_pager_get_object_cs_blobs(
+ memory_object_t mem_obj,
+ void **blobs)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj == MEMORY_OBJECT_NULL ||
+ mem_obj->mo_pager_ops != &vnode_pager_ops) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ return vnode_pager_get_cs_blobs(vnode_object->vnode_handle,
+ blobs);
+}
+
+#if CHECK_CS_VALIDATION_BITMAP
+kern_return_t
+vnode_pager_cs_check_validation_bitmap(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ int optype )
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj == MEMORY_OBJECT_NULL ||
+ mem_obj->mo_pager_ops != &vnode_pager_ops) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+ return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
+}
+#endif /* CHECK_CS_VALIDATION_BITMAP */
+
/*
*
*/
vnode_pager_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t length,
-#if !DEBUG
- __unused
-#endif
-vm_prot_t protection_required)
+ __unused memory_object_cluster_size_t length,
+ __unused vm_prot_t desired_access,
+ memory_object_fault_info_t fault_info)
{
- register vnode_pager_t vnode_object;
-
- PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
+ vnode_pager_t vnode_object;
+ memory_object_offset_t base_offset;
+ vm_size_t size;
+ uint32_t io_streaming = 0;
vnode_object = vnode_pager_lookup(mem_obj);
- PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
-
- return vnode_pager_cluster_read(vnode_object, offset, length);
+ size = MAX_UPL_TRANSFER * PAGE_SIZE;
+ base_offset = offset;
+
+ if (memory_object_cluster_size(vnode_object->control_handle, &base_offset, &size, &io_streaming, fault_info) != KERN_SUCCESS)
+ size = PAGE_SIZE;
+
+ assert(offset >= base_offset &&
+ offset < base_offset + size);
+
+ return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
}
/*
{
register vnode_pager_t vnode_object;
- PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
+ PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
vnode_object = vnode_pager_lookup(mem_obj);
#endif
memory_object_t mem_obj)
{
- PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
+ PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj));
return(KERN_SUCCESS);
}
vnode_pager_synchronize(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t length,
+ memory_object_size_t length,
__unused vm_sync_t sync_flags)
{
register vnode_pager_t vnode_object;
- PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
+ PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %p\n", mem_obj));
vnode_object = vnode_pager_lookup(mem_obj);
*
*/
kern_return_t
-vnode_pager_unmap(
+vnode_pager_map(
+ memory_object_t mem_obj,
+ vm_prot_t prot)
+{
+ vnode_pager_t vnode_object;
+ int ret;
+ kern_return_t kr;
+
+ PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ ret = ubc_map(vnode_object->vnode_handle, prot);
+
+ if (ret != 0) {
+ kr = KERN_FAILURE;
+ } else {
+ kr = KERN_SUCCESS;
+ }
+
+ return kr;
+}
+
+kern_return_t
+vnode_pager_last_unmap(
memory_object_t mem_obj)
{
register vnode_pager_t vnode_object;
- PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
+ PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
vnode_object = vnode_pager_lookup(mem_obj);
}
+
/*
*
*/
int * io_error,
int upl_flags)
{
- vm_size_t size;
- upl_t upl = NULL;
- int request_flags;
+ vm_size_t size;
int errno;
if (upl_flags & UPL_MSYNC) {
upl_flags |= UPL_KEEPCACHED;
while (cnt) {
- kern_return_t kr;
-
size = (cnt < (PAGE_SIZE * MAX_UPL_TRANSFER)) ? cnt : (PAGE_SIZE * MAX_UPL_TRANSFER); /* effective max */
- request_flags = UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
- UPL_SET_INTERNAL | UPL_SET_LITE;
-
- kr = memory_object_upl_request(vnode_object->control_handle,
- offset, size, &upl, NULL, NULL, request_flags);
- if (kr != KERN_SUCCESS)
- panic("vnode_pager_cluster_write: upl request failed\n");
-
+ assert((upl_size_t) size == size);
vnode_pageout(vnode_object->vnode_handle,
- upl, (vm_offset_t)0, offset, size, upl_flags, &errno);
+ NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
if ( (upl_flags & UPL_KEEPCACHED) ) {
if ( (*io_error = errno) )
} else {
vm_object_offset_t vnode_size;
vm_object_offset_t base_offset;
- vm_object_t object;
- vm_page_t target_page;
- int ticket;
/*
* this is the pageout path
base_offset = offset & ~((signed)(size - 1));
if ((base_offset + size) > vnode_size)
- size = round_page_32(((vm_size_t)(vnode_size - base_offset)));
+ size = round_page(((vm_size_t)(vnode_size - base_offset)));
} else {
/*
* we've been requested to page out a page beyond the current
base_offset = offset;
size = PAGE_SIZE;
}
- object = memory_object_control_to_vm_object(vnode_object->control_handle);
-
- if (object == VM_OBJECT_NULL)
- panic("vnode_pager_cluster_write: NULL vm_object in control handle\n");
-
- request_flags = UPL_NOBLOCK | UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
- UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
- UPL_SET_INTERNAL | UPL_SET_LITE;
-
- vm_object_lock(object);
-
- if ((target_page = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
- /*
- * only pick up pages whose ticket number matches
- * the ticket number of the page orginally targeted
- * for pageout
- */
- ticket = target_page->page_ticket;
-
- request_flags |= ((ticket << UPL_PAGE_TICKET_SHIFT) & UPL_PAGE_TICKET_MASK);
- }
- vm_object_unlock(object);
-
- vm_object_upl_request(object, base_offset, size,
- &upl, NULL, NULL, request_flags);
- if (upl == NULL)
- panic("vnode_pager_cluster_write: upl request failed\n");
-
+ assert((upl_size_t) size == size);
vnode_pageout(vnode_object->vnode_handle,
- upl, (vm_offset_t)0, upl->offset, upl->size, UPL_VNODE_PAGER, NULL);
+ NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size, UPL_VNODE_PAGER, NULL);
}
}
kern_return_t
vnode_pager_cluster_read(
vnode_pager_t vnode_object,
+ vm_object_offset_t base_offset,
vm_object_offset_t offset,
+ uint32_t io_streaming,
vm_size_t cnt)
{
int local_error = 0;
int kret;
+ int flags = 0;
assert(! (cnt & PAGE_MASK));
+ if (io_streaming)
+ flags |= UPL_IOSTREAMING;
+
+ assert((upl_size_t) cnt == cnt);
kret = vnode_pagein(vnode_object->vnode_handle,
(upl_t) NULL,
- (vm_offset_t) NULL,
- offset,
- cnt,
- 0,
+ (upl_offset_t) (offset - base_offset),
+ base_offset,
+ (upl_size_t) cnt,
+ flags,
&local_error);
/*
if(kret == PAGER_ABSENT) {
if (kret == 1) {
int uplflags;
upl_t upl = NULL;
- int count = 0;
+ unsigned int count = 0;
kern_return_t kr;
uplflags = (UPL_NO_SYNC |
UPL_CLEAN_IN_PLACE |
UPL_SET_INTERNAL);
count = 0;
+ assert((upl_size_t) cnt == cnt);
kr = memory_object_upl_request(vnode_object->control_handle,
- offset, cnt,
+ base_offset, (upl_size_t) cnt,
&upl, NULL, &count, uplflags);
if (kr == KERN_SUCCESS) {
upl_abort(upl, 0);
int *cnt)
{
memory_object_free_from_cache(
- &realhost, &vnode_pager_workaround, cnt);
+ &realhost, &vnode_pager_ops, cnt);
}
/*
* The vm_map call takes both named entry ports and raw memory
* objects in the same parameter. We need to make sure that
* vm_map does not see this object as a named entry port. So,
- * we reserve the second word in the object for a fake ip_kotype
+ * we reserve the first word in the object for a fake ip_kotype
* setting - that will tell vm_map to use it as a memory object.
*/
- vnode_object->pager = &vnode_pager_workaround;
+ vnode_object->pager_ops = &vnode_pager_ops;
vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
vnode_object->ref_count = 1;
vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
vnode_pager_t vnode_object;
vnode_object = (vnode_pager_t)name;
- assert(vnode_object->pager == &vnode_pager_workaround);
+ assert(vnode_object->pager_ops == &vnode_pager_ops);
return (vnode_object);
}
+
+/*********************** proc_info implementation *************/
+
+#include <sys/bsdtask_info.h>
+
+static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid);
+
+
+int
+fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
+{
+
+ vm_map_t map;
+ vm_map_offset_t address = (vm_map_offset_t )arg;
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+ vm_map_offset_t start;
+ vm_region_extended_info_data_t extended;
+ vm_region_top_info_data_t top;
+
+ task_lock(task);
+ map = task->map;
+ if (map == VM_MAP_NULL)
+ {
+ task_unlock(task);
+ return(0);
+ }
+ vm_map_reference(map);
+ task_unlock(task);
+
+ vm_map_lock_read(map);
+
+ start = address;
+ if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return(0);
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ start = entry->vme_start;
+
+ pinfo->pri_offset = entry->offset;
+ pinfo->pri_protection = entry->protection;
+ pinfo->pri_max_protection = entry->max_protection;
+ pinfo->pri_inheritance = entry->inheritance;
+ pinfo->pri_behavior = entry->behavior;
+ pinfo->pri_user_wired_count = entry->user_wired_count;
+ pinfo->pri_user_tag = entry->alias;
+
+ if (entry->is_sub_map) {
+ pinfo->pri_flags |= PROC_REGION_SUBMAP;
+ } else {
+ if (entry->is_shared)
+ pinfo->pri_flags |= PROC_REGION_SHARED;
+ }
+
+
+ extended.protection = entry->protection;
+ extended.user_tag = entry->alias;
+ extended.pages_resident = 0;
+ extended.pages_swapped_out = 0;
+ extended.pages_shared_now_private = 0;
+ extended.pages_dirtied = 0;
+ extended.external_pager = 0;
+ extended.shadow_depth = 0;
+
+ vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, &extended);
+
+ if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED)
+ extended.share_mode = SM_PRIVATE;
+
+ top.private_pages_resident = 0;
+ top.shared_pages_resident = 0;
+ vm_map_region_top_walk(entry, &top);
+
+
+ pinfo->pri_pages_resident = extended.pages_resident;
+ pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
+ pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
+ pinfo->pri_pages_dirtied = extended.pages_dirtied;
+ pinfo->pri_ref_count = extended.ref_count;
+ pinfo->pri_shadow_depth = extended.shadow_depth;
+ pinfo->pri_share_mode = extended.share_mode;
+
+ pinfo->pri_private_pages_resident = top.private_pages_resident;
+ pinfo->pri_shared_pages_resident = top.shared_pages_resident;
+ pinfo->pri_obj_id = top.obj_id;
+
+ pinfo->pri_address = (uint64_t)start;
+ pinfo->pri_size = (uint64_t)(entry->vme_end - start);
+ pinfo->pri_depth = 0;
+
+ if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
+ *vnodeaddr = (uintptr_t)0;
+
+ if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) ==0) {
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return(1);
+ }
+ }
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return(1);
+}
+
+static int
+fill_vnodeinfoforaddr(
+ vm_map_entry_t entry,
+ uintptr_t * vnodeaddr,
+ uint32_t * vid)
+{
+ vm_object_t top_object, object;
+ memory_object_t memory_object;
+ memory_object_pager_ops_t pager_ops;
+ kern_return_t kr;
+ int shadow_depth;
+
+
+ if (entry->is_sub_map) {
+ return(0);
+ } else {
+ /*
+ * The last object in the shadow chain has the
+ * relevant pager information.
+ */
+ top_object = entry->object.vm_object;
+ if (top_object == VM_OBJECT_NULL) {
+ object = VM_OBJECT_NULL;
+ shadow_depth = 0;
+ } else {
+ vm_object_lock(top_object);
+ for (object = top_object, shadow_depth = 0;
+ object->shadow != VM_OBJECT_NULL;
+ object = object->shadow, shadow_depth++) {
+ vm_object_lock(object->shadow);
+ vm_object_unlock(object);
+ }
+ }
+ }
+
+ if (object == VM_OBJECT_NULL) {
+ return(0);
+ } else if (object->internal) {
+ vm_object_unlock(object);
+ return(0);
+ } else if (! object->pager_ready ||
+ object->terminating ||
+ ! object->alive) {
+ vm_object_unlock(object);
+ return(0);
+ } else {
+ memory_object = object->pager;
+ pager_ops = memory_object->mo_pager_ops;
+ if (pager_ops == &vnode_pager_ops) {
+ kr = vnode_pager_get_object_vnode(
+ memory_object,
+ vnodeaddr, vid);
+ if (kr != KERN_SUCCESS) {
+ vm_object_unlock(object);
+ return(0);
+ }
+ } else {
+ vm_object_unlock(object);
+ return(0);
+ }
+ }
+ vm_object_unlock(object);
+ return(1);
+}
+
+kern_return_t
+vnode_pager_get_object_vnode (
+ memory_object_t mem_obj,
+ uintptr_t * vnodeaddr,
+ uint32_t * vid)
+{
+ vnode_pager_t vnode_object;
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+ if (vnode_object->vnode_handle) {
+ *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
+ *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
+
+ return(KERN_SUCCESS);
+ }
+
+ return(KERN_FAILURE);
+}
+
+
+/*
+ * Find the underlying vnode object for the given vm_map_entry. If found, return with the
+ * object locked, otherwise return NULL with nothing locked.
+ */
+
+vm_object_t
+find_vnode_object(
+ vm_map_entry_t entry
+)
+{
+ vm_object_t top_object, object;
+ memory_object_t memory_object;
+ memory_object_pager_ops_t pager_ops;
+
+ if (!entry->is_sub_map) {
+
+ /*
+ * The last object in the shadow chain has the
+ * relevant pager information.
+ */
+
+ top_object = entry->object.vm_object;
+
+ if (top_object) {
+ vm_object_lock(top_object);
+
+ for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
+ vm_object_lock(object->shadow);
+ vm_object_unlock(object);
+ }
+
+ if (object && !object->internal && object->pager_ready && !object->terminating &&
+ object->alive) {
+ memory_object = object->pager;
+ pager_ops = memory_object->mo_pager_ops;
+
+ /*
+ * If this object points to the vnode_pager_ops, then we found what we're
+ * looking for. Otherwise, this vm_map_entry doesn't have an underlying
+ * vnode and so we fall through to the bottom and return NULL.
+ */
+
+ if (pager_ops == &vnode_pager_ops)
+ return object; /* we return with the object locked */
+ }
+
+ vm_object_unlock(object);
+ }
+
+ }
+
+ return(VM_OBJECT_NULL);
+}