#include <kern/assert.h>
#include <kern/host.h>
+#include <kern/ledger.h>
#include <kern/thread.h>
+#include <kern/ipc_kobject.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
-#include <default_pager/default_pager_types.h>
-#include <default_pager/default_pager_object_server.h>
-
#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
#include <vm/memory_object.h>
return( vm_map_last_entry(map)->vme_end);
}
-/*
- * Legacy routines to get the start and end for a vm_map_t. They
- * return them in the vm_offset_t format. So, they should only be
- * called on maps that are the same size as the kernel map for
- * accurate results.
- */
-vm_offset_t
-get_vm_start(
- vm_map_t map)
-{
- return(CAST_DOWN(vm_offset_t, vm_map_first_entry(map)->vme_start));
-}
-
-vm_offset_t
-get_vm_end(
- vm_map_t map)
-{
- return(CAST_DOWN(vm_offset_t, vm_map_last_entry(map)->vme_end));
-}
-
/*
* BSD VNODE PAGER
*/
vnode_pager_data_initialize,
vnode_pager_data_unlock,
vnode_pager_synchronize,
- vnode_pager_unmap,
+ vnode_pager_map,
+ vnode_pager_last_unmap,
+ NULL, /* data_reclaim */
"vnode pager"
};
typedef struct vnode_pager {
- memory_object_pager_ops_t pager_ops; /* == &vnode_pager_ops */
- unsigned int pager_ikot; /* JMM: fake ip_kotype() */
+ /* mandatory generic header */
+ struct memory_object vn_pgr_hdr;
+
+ /* pager-specific */
unsigned int ref_count; /* reference count */
- memory_object_control_t control_handle; /* mem object control handle */
struct vnode *vnode_handle; /* vnode handle */
} *vnode_pager_t;
-ipc_port_t
-trigger_name_to_port( /* forward */
- mach_port_t);
-
kern_return_t
vnode_pager_cluster_read( /* forward */
vnode_pager_t,
- vm_object_offset_t,
+ vm_object_offset_t,
+ vm_object_offset_t,
+ uint32_t,
vm_size_t);
void
vnode_pager_lookup( /* forward */
memory_object_t);
+struct vnode *
+vnode_pager_lookup_vnode( /* forward */
+ memory_object_t);
+
zone_t vnode_pager_zone;
#define PAGER_DEBUG(LEVEL, A)
#endif
-/*
- * Routine: macx_triggers
- * Function:
- * Syscall interface to set the call backs for low and
- * high water marks.
- */
-int
-macx_triggers(
- struct macx_triggers_args *args)
-{
- int hi_water = args->hi_water;
- int low_water = args->low_water;
- int flags = args->flags;
- mach_port_t trigger_name = args->alert_port;
- kern_return_t kr;
- memory_object_default_t default_pager;
- ipc_port_t trigger_port;
-
- default_pager = MEMORY_OBJECT_DEFAULT_NULL;
- kr = host_default_memory_manager(host_priv_self(),
- &default_pager, 0);
- if(kr != KERN_SUCCESS) {
- return EINVAL;
- }
-
- if ((flags & SWAP_ENCRYPT_ON) &&
- (flags & SWAP_ENCRYPT_OFF)) {
- /* can't have it both ways */
- return EINVAL;
- }
-
- if (default_pager_init_flag == 0) {
- start_def_pager(NULL);
- default_pager_init_flag = 1;
- }
-
- if (flags & SWAP_ENCRYPT_ON) {
- /* ENCRYPTED SWAP: tell default_pager to encrypt */
- default_pager_triggers(default_pager,
- 0, 0,
- SWAP_ENCRYPT_ON,
- IP_NULL);
- } else if (flags & SWAP_ENCRYPT_OFF) {
- /* ENCRYPTED SWAP: tell default_pager not to encrypt */
- default_pager_triggers(default_pager,
- 0, 0,
- SWAP_ENCRYPT_OFF,
- IP_NULL);
- }
+extern int proc_resetpcontrol(int);
- if (flags & HI_WAT_ALERT) {
- trigger_port = trigger_name_to_port(trigger_name);
- if(trigger_port == NULL) {
- return EINVAL;
- }
- /* trigger_port is locked and active */
- ipc_port_make_send_locked(trigger_port);
- /* now unlocked */
- default_pager_triggers(default_pager,
- hi_water, low_water,
- HI_WAT_ALERT, trigger_port);
- }
-
- if (flags & LO_WAT_ALERT) {
- trigger_port = trigger_name_to_port(trigger_name);
- if(trigger_port == NULL) {
- return EINVAL;
- }
- /* trigger_port is locked and active */
- ipc_port_make_send_locked(trigger_port);
- /* and now its unlocked */
- default_pager_triggers(default_pager,
- hi_water, low_water,
- LO_WAT_ALERT, trigger_port);
- }
-
- /*
- * Set thread scheduling priority and policy for the current thread
- * it is assumed for the time being that the thread setting the alert
- * is the same one which will be servicing it.
- *
- * XXX This does not belong in the kernel XXX
- */
- {
- thread_precedence_policy_data_t pre;
- thread_extended_policy_data_t ext;
-
- ext.timeshare = FALSE;
- pre.importance = INT32_MAX;
-
- thread_policy_set(current_thread(),
- THREAD_EXTENDED_POLICY,
- (thread_policy_t)&ext,
- THREAD_EXTENDED_POLICY_COUNT);
-
- thread_policy_set(current_thread(),
- THREAD_PRECEDENCE_POLICY,
- (thread_policy_t)&pre,
- THREAD_PRECEDENCE_POLICY_COUNT);
- }
-
- current_thread()->options |= TH_OPT_VMPRIV;
-
- return 0;
-}
-
-/*
- *
- */
-ipc_port_t
-trigger_name_to_port(
- mach_port_t trigger_name)
-{
- ipc_port_t trigger_port;
- ipc_space_t space;
-
- if (trigger_name == 0)
- return (NULL);
-
- space = current_space();
- if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
- &trigger_port) != KERN_SUCCESS)
- return (NULL);
- return trigger_port;
-}
+#if DEVELOPMENT || DEBUG
+extern unsigned long vm_cs_validated_resets;
+#endif
extern int uiomove64(addr64_t, int, void *);
#define MAX_RUN 32
-unsigned long vm_cs_tainted_forces = 0;
-
int
memory_object_control_uiomove(
memory_object_control_t control,
int cur_needed;
int i;
int orig_offset;
- boolean_t make_lru = FALSE;
vm_page_t page_run[MAX_RUN];
+ int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */
object = memory_object_control_to_vm_object(control);
if (object == VM_OBJECT_NULL) {
return 0;
}
orig_offset = start_offset;
-
+
+ dirty_count = 0;
while (io_requested && retval == 0) {
cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
if (cur_needed > MAX_RUN)
cur_needed = MAX_RUN;
-
+
for (cur_run = 0; cur_run < cur_needed; ) {
if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
break;
- /*
- * Sync up on getting the busy bit
- */
- if ((dst_page->busy || dst_page->cleaning)) {
- /*
+
+
+ if (dst_page->busy || dst_page->cleaning) {
+ /*
* someone else is playing with the page... if we've
* already collected pages into this run, go ahead
* and process now, we can't block on this
* page while holding other pages in the BUSY state
* otherwise we will wait
*/
- if (cur_run)
- break;
- PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ if (cur_run)
+ break;
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
- /*
- * this routine is only called when copying
- * to/from real files... no need to consider
- * encrypted swap pages
- */
- assert(!dst_page->encrypted);
+ if (dst_page->laundry)
+ vm_pageout_steal_laundry(dst_page, FALSE);
if (mark_dirty) {
- dst_page->dirty = TRUE;
- if (dst_page->cs_validated) {
+ if (dst_page->dirty == FALSE)
+ dirty_count++;
+ SET_PAGE_DIRTY(dst_page, FALSE);
+ if (dst_page->cs_validated &&
+ !dst_page->cs_tainted) {
/*
* CODE SIGNING:
* We're modifying a code-signed
- * page: assume that it is now tainted.
+ * page: force revalidate
*/
- dst_page->cs_tainted = TRUE;
- vm_cs_tainted_forces++;
+ dst_page->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_cs_validated_resets++;
+#endif
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
}
}
dst_page->busy = TRUE;
}
if (cur_run == 0)
/*
- * we hit a 'hole' in the cache
- * we bail at this point
+ * we hit a 'hole' in the cache or
+ * a page we don't want to try to handle,
+ * so bail at this point
* we'll unlock the object below
*/
break;
if ((xsize = PAGE_SIZE - start_offset) > io_requested)
xsize = io_requested;
- if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
+ if ( (retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio)) )
break;
io_requested -= xsize;
* to the same page (this way we only move it once)
*/
if (take_reference && (cur_run > 1 || orig_offset == 0)) {
+
vm_page_lockspin_queues();
- make_lru = TRUE;
+
+ for (i = 0; i < cur_run; i++)
+ vm_page_lru(page_run[i]);
+
+ vm_page_unlock_queues();
}
for (i = 0; i < cur_run; i++) {
dst_page = page_run[i];
* update clustered and speculative state
*
*/
- VM_PAGE_CONSUME_CLUSTERED(dst_page);
-
- if (make_lru == TRUE)
- vm_page_lru(dst_page);
+ if (dst_page->clustered)
+ VM_PAGE_CONSUME_CLUSTERED(dst_page);
PAGE_WAKEUP_DONE(dst_page);
}
- if (make_lru == TRUE) {
- vm_page_unlock_queues();
- make_lru = FALSE;
- }
orig_offset = 0;
}
+ if (object->pager)
+ task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
vm_object_unlock(object);
-
return (retval);
}
void
vnode_pager_bootstrap(void)
{
- register vm_size_t size;
+ vm_size_t size;
size = (vm_size_t) sizeof(struct vnode_pager);
vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
PAGE_SIZE, "vnode pager structures");
-#ifdef __i386__
+ zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE);
+ zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE);
+
+
+#if CONFIG_CODE_DECRYPTION
apple_protect_pager_bootstrap();
-#endif /* __i386__ */
+#endif /* CONFIG_CODE_DECRYPTION */
+ swapfile_pager_bootstrap();
+#if __arm64__
+ fourk_pager_bootstrap();
+#endif /* __arm64__ */
return;
}
#if !DEBUG
__unused
#endif
- vm_size_t pg_size)
+ memory_object_cluster_size_t pg_size)
{
vnode_pager_t vnode_object;
kern_return_t kr;
memory_object_attr_info_data_t attributes;
- PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %x\n", mem_obj, control, pg_size));
+ PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
memory_object_control_reference(control);
- vnode_object->control_handle = control;
+ vnode_object->vn_pgr_hdr.mo_control = control;
attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
vnode_pager_data_return(
memory_object_t mem_obj,
memory_object_offset_t offset,
- vm_size_t data_cnt,
+ memory_object_cluster_size_t data_cnt,
memory_object_offset_t *resid_offset,
int *io_error,
__unused boolean_t dirty,
__unused boolean_t kernel_copy,
int upl_flags)
{
- register vnode_pager_t vnode_object;
+ vnode_pager_t vnode_object;
vnode_object = vnode_pager_lookup(mem_obj);
vnode_pager_data_initialize(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
- __unused vm_size_t data_cnt)
+ __unused memory_object_cluster_size_t data_cnt)
{
panic("vnode_pager_data_initialize");
return KERN_FAILURE;
vnode_pager_data_unlock(
__unused memory_object_t mem_obj,
__unused memory_object_offset_t offset,
- __unused vm_size_t size,
+ __unused memory_object_size_t size,
__unused vm_prot_t desired_access)
{
return KERN_FAILURE;
}
+kern_return_t
+vnode_pager_get_isinuse(
+ memory_object_t mem_obj,
+ uint32_t *isinuse)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
+ *isinuse = 1;
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vnode_pager_get_throttle_io_limit(
+ memory_object_t mem_obj,
+ uint32_t *limit)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops)
+ return KERN_INVALID_ARGUMENT;
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vnode_pager_get_isSSD(
+ memory_object_t mem_obj,
+ boolean_t *isSSD)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops)
+ return KERN_INVALID_ARGUMENT;
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
+ return KERN_SUCCESS;
+}
+
kern_return_t
vnode_pager_get_object_size(
memory_object_t mem_obj,
}
kern_return_t
-vnode_pager_get_object_pathname(
+vnode_pager_get_object_name(
memory_object_t mem_obj,
char *pathname,
- vm_size_t *length_p)
+ vm_size_t pathname_len,
+ char *filename,
+ vm_size_t filename_len,
+ boolean_t *truncated_path_p)
{
vnode_pager_t vnode_object;
vnode_object = vnode_pager_lookup(mem_obj);
- return vnode_pager_get_pathname(vnode_object->vnode_handle,
- pathname,
- length_p);
+ return vnode_pager_get_name(vnode_object->vnode_handle,
+ pathname,
+ pathname_len,
+ filename,
+ filename_len,
+ truncated_path_p);
}
kern_return_t
-vnode_pager_get_object_filename(
- memory_object_t mem_obj,
- const char **filename)
+vnode_pager_get_object_mtime(
+ memory_object_t mem_obj,
+ struct timespec *mtime,
+ struct timespec *cs_mtime)
{
vnode_pager_t vnode_object;
vnode_object = vnode_pager_lookup(mem_obj);
- return vnode_pager_get_filename(vnode_object->vnode_handle,
- filename);
+ return vnode_pager_get_mtime(vnode_object->vnode_handle,
+ mtime,
+ cs_mtime);
}
+#if CHECK_CS_VALIDATION_BITMAP
kern_return_t
-vnode_pager_get_object_cs_blobs(
- memory_object_t mem_obj,
- void **blobs)
+vnode_pager_cs_check_validation_bitmap(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ int optype )
{
vnode_pager_t vnode_object;
}
vnode_object = vnode_pager_lookup(mem_obj);
-
- return vnode_pager_get_cs_blobs(vnode_object->vnode_handle,
- blobs);
+ return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
}
+#endif /* CHECK_CS_VALIDATION_BITMAP */
/*
*
vnode_pager_data_request(
memory_object_t mem_obj,
memory_object_offset_t offset,
- __unused vm_size_t length,
+ __unused memory_object_cluster_size_t length,
__unused vm_prot_t desired_access,
memory_object_fault_info_t fault_info)
{
- register vnode_pager_t vnode_object;
+ vnode_pager_t vnode_object;
+ memory_object_offset_t base_offset;
vm_size_t size;
-#if MACH_ASSERT
- memory_object_offset_t original_offset = offset;
-#endif /* MACH_ASSERT */
+ uint32_t io_streaming = 0;
vnode_object = vnode_pager_lookup(mem_obj);
- size = MAX_UPL_TRANSFER * PAGE_SIZE;
+ size = MAX_UPL_TRANSFER_BYTES;
+ base_offset = offset;
- if (memory_object_cluster_size(vnode_object->control_handle, &offset, &size, fault_info) != KERN_SUCCESS)
+ if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control,
+ &base_offset, &size, &io_streaming,
+ fault_info) != KERN_SUCCESS)
size = PAGE_SIZE;
- assert(original_offset >= offset &&
- original_offset < offset + size);
+ assert(offset >= base_offset &&
+ offset < base_offset + size);
- return vnode_pager_cluster_read(vnode_object, offset, size);
+ return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
}
/*
vnode_pager_reference(
memory_object_t mem_obj)
{
- register vnode_pager_t vnode_object;
+ vnode_pager_t vnode_object;
unsigned int new_ref_count;
vnode_object = vnode_pager_lookup(mem_obj);
vnode_pager_deallocate(
memory_object_t mem_obj)
{
- register vnode_pager_t vnode_object;
+ vnode_pager_t vnode_object;
PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
*/
kern_return_t
vnode_pager_synchronize(
- memory_object_t mem_obj,
- memory_object_offset_t offset,
- vm_size_t length,
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_size_t length,
__unused vm_sync_t sync_flags)
{
- register vnode_pager_t vnode_object;
+ panic("vnode_pager_synchronize: memory_object_synchronize no longer supported\n");
+ return (KERN_FAILURE);
+}
- PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %p\n", mem_obj));
+/*
+ *
+ */
+kern_return_t
+vnode_pager_map(
+ memory_object_t mem_obj,
+ vm_prot_t prot)
+{
+ vnode_pager_t vnode_object;
+ int ret;
+ kern_return_t kr;
+
+ PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
vnode_object = vnode_pager_lookup(mem_obj);
- memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
+ ret = ubc_map(vnode_object->vnode_handle, prot);
+
+ if (ret != 0) {
+ kr = KERN_FAILURE;
+ } else {
+ kr = KERN_SUCCESS;
+ }
- return (KERN_SUCCESS);
+ return kr;
}
-/*
- *
- */
kern_return_t
-vnode_pager_unmap(
+vnode_pager_last_unmap(
memory_object_t mem_obj)
{
- register vnode_pager_t vnode_object;
+ vnode_pager_t vnode_object;
- PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %p\n", mem_obj));
+ PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
vnode_object = vnode_pager_lookup(mem_obj);
}
+
/*
*
*/
int * io_error,
int upl_flags)
{
- vm_size_t size;
- upl_t upl = NULL;
- int request_flags;
+ vm_size_t size;
int errno;
if (upl_flags & UPL_MSYNC) {
upl_flags |= UPL_KEEPCACHED;
while (cnt) {
- kern_return_t kr;
-
- size = (cnt < (PAGE_SIZE * MAX_UPL_TRANSFER)) ? cnt : (PAGE_SIZE * MAX_UPL_TRANSFER); /* effective max */
-
- request_flags = UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
- UPL_SET_INTERNAL | UPL_SET_LITE;
-
- kr = memory_object_upl_request(vnode_object->control_handle,
- offset, size, &upl, NULL, NULL, request_flags);
- if (kr != KERN_SUCCESS)
- panic("vnode_pager_cluster_write: upl request failed\n");
+ size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
+ assert((upl_size_t) size == size);
vnode_pageout(vnode_object->vnode_handle,
- upl, (vm_offset_t)0, offset, size, upl_flags, &errno);
+ NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
if ( (upl_flags & UPL_KEEPCACHED) ) {
if ( (*io_error = errno) )
} else {
vm_object_offset_t vnode_size;
vm_object_offset_t base_offset;
- vm_object_t object;
/*
* this is the pageout path
* and then clip the size to insure we
* don't request past the end of the underlying file
*/
- size = PAGE_SIZE * MAX_UPL_TRANSFER;
+ size = MAX_UPL_TRANSFER_BYTES;
base_offset = offset & ~((signed)(size - 1));
if ((base_offset + size) > vnode_size)
- size = round_page_32(((vm_size_t)(vnode_size - base_offset)));
+ size = round_page(((vm_size_t)(vnode_size - base_offset)));
} else {
/*
* we've been requested to page out a page beyond the current
base_offset = offset;
size = PAGE_SIZE;
}
- object = memory_object_control_to_vm_object(vnode_object->control_handle);
-
- if (object == VM_OBJECT_NULL)
- panic("vnode_pager_cluster_write: NULL vm_object in control handle\n");
-
- request_flags = UPL_NOBLOCK | UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
- UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
- UPL_SET_INTERNAL | UPL_SET_LITE;
-
- vm_object_upl_request(object, base_offset, size,
- &upl, NULL, NULL, request_flags);
- if (upl == NULL)
- panic("vnode_pager_cluster_write: upl request failed\n");
-
+ assert((upl_size_t) size == size);
vnode_pageout(vnode_object->vnode_handle,
- upl, (vm_offset_t)0, upl->offset, upl->size, UPL_VNODE_PAGER, NULL);
+ NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
+ (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
}
}
kern_return_t
vnode_pager_cluster_read(
vnode_pager_t vnode_object,
+ vm_object_offset_t base_offset,
vm_object_offset_t offset,
+ uint32_t io_streaming,
vm_size_t cnt)
{
int local_error = 0;
int kret;
+ int flags = 0;
assert(! (cnt & PAGE_MASK));
+ if (io_streaming)
+ flags |= UPL_IOSTREAMING;
+
+ assert((upl_size_t) cnt == cnt);
kret = vnode_pagein(vnode_object->vnode_handle,
(upl_t) NULL,
- (vm_offset_t) NULL,
- offset,
- cnt,
- 0,
+ (upl_offset_t) (offset - base_offset),
+ base_offset,
+ (upl_size_t) cnt,
+ flags,
&local_error);
/*
if(kret == PAGER_ABSENT) {
UPL_CLEAN_IN_PLACE |
UPL_SET_INTERNAL);
count = 0;
- kr = memory_object_upl_request(vnode_object->control_handle,
- offset, cnt,
- &upl, NULL, &count, uplflags);
+ assert((upl_size_t) cnt == cnt);
+ kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control,
+ base_offset, (upl_size_t) cnt,
+ &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE);
if (kr == KERN_SUCCESS) {
upl_abort(upl, 0);
upl_deallocate(upl);
}
-
-/*
- *
- */
-void
-vnode_pager_release_from_cache(
- int *cnt)
-{
- memory_object_free_from_cache(
- &realhost, &vnode_pager_ops, cnt);
-}
-
/*
*
*/
vnode_object_create(
struct vnode *vp)
{
- register vnode_pager_t vnode_object;
+ vnode_pager_t vnode_object;
vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
if (vnode_object == VNODE_PAGER_NULL)
* The vm_map call takes both named entry ports and raw memory
* objects in the same parameter. We need to make sure that
* vm_map does not see this object as a named entry port. So,
- * we reserve the second word in the object for a fake ip_kotype
+ * we reserve the first word in the object for a fake ip_kotype
* setting - that will tell vm_map to use it as a memory object.
*/
- vnode_object->pager_ops = &vnode_pager_ops;
- vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
+ vnode_object->vn_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
+ vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops;
+ vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
+
vnode_object->ref_count = 1;
- vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
vnode_object->vnode_handle = vp;
return(vnode_object);
vnode_pager_t vnode_object;
vnode_object = (vnode_pager_t)name;
- assert(vnode_object->pager_ops == &vnode_pager_ops);
+ assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
return (vnode_object);
}
+struct vnode *
+vnode_pager_lookup_vnode(
+ memory_object_t name)
+{
+ vnode_pager_t vnode_object;
+ vnode_object = (vnode_pager_t)name;
+ if(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops)
+ return (vnode_object->vnode_handle);
+ else
+ return NULL;
+}
+
/*********************** proc_info implementation *************/
#include <sys/bsdtask_info.h>
-static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uint32_t * vnodeaddr, uint32_t * vid);
+static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid);
int
-fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uint32_t *vnodeaddr, uint32_t *vid)
+fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
{
- vm_map_t map = task->map;
+ vm_map_t map;
vm_map_offset_t address = (vm_map_offset_t )arg;
vm_map_entry_t tmp_entry;
vm_map_entry_t entry;
vm_map_offset_t start;
vm_region_extended_info_data_t extended;
vm_region_top_info_data_t top;
+ boolean_t do_region_footprint;
+ task_lock(task);
+ map = task->map;
+ if (map == VM_MAP_NULL)
+ {
+ task_unlock(task);
+ return(0);
+ }
+ vm_map_reference(map);
+ task_unlock(task);
- if (map == VM_MAP_NULL)
- return(0);
+ do_region_footprint = task_self_region_footprint();
vm_map_lock_read(map);
start = address;
+
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ if (do_region_footprint &&
+ address == tmp_entry->vme_end) {
+ ledger_amount_t nonvol, nonvol_compressed;
+
+ /*
+ * This request is right after the last valid
+ * memory region; instead of reporting the
+ * end of the address space, report a fake
+ * memory region to account for non-volatile
+ * purgeable memory owned by this task.
+ */
+
+ ledger_get_balance(
+ task->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ &nonvol);
+ ledger_get_balance(
+ task->ledger,
+ task_ledgers.purgeable_nonvolatile_compressed,
+ &nonvol_compressed);
+ if (nonvol + nonvol_compressed == 0) {
+ /* nothing to report */
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 0;
+ }
+ /* provide fake region for purgeable */
+ pinfo->pri_offset = address;
+ pinfo->pri_protection = VM_PROT_DEFAULT;
+ pinfo->pri_max_protection = VM_PROT_DEFAULT;
+ pinfo->pri_inheritance = VM_INHERIT_NONE;
+ pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
+ pinfo->pri_user_wired_count = 0;
+ pinfo->pri_user_tag = -1;
+ pinfo->pri_pages_resident =
+ (uint32_t) (nonvol / PAGE_SIZE);
+ pinfo->pri_pages_shared_now_private = 0;
+ pinfo->pri_pages_swapped_out =
+ (uint32_t) (nonvol_compressed / PAGE_SIZE);
+ pinfo->pri_pages_dirtied =
+ (uint32_t) (nonvol / PAGE_SIZE);
+ pinfo->pri_ref_count = 1;
+ pinfo->pri_shadow_depth = 0;
+ pinfo->pri_share_mode = SM_PRIVATE;
+ pinfo->pri_private_pages_resident =
+ (uint32_t) (nonvol / PAGE_SIZE);
+ pinfo->pri_shared_pages_resident = 0;
+ pinfo->pri_obj_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile);
+ pinfo->pri_address = address;
+ pinfo->pri_size =
+ (uint64_t) (nonvol + nonvol_compressed);
+ pinfo->pri_depth = 0;
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 1;
+ }
vm_map_unlock_read(map);
- return(0);
+ vm_map_deallocate(map);
+ return 0;
}
} else {
entry = tmp_entry;
start = entry->vme_start;
- pinfo->pri_offset = entry->offset;
+ pinfo->pri_offset = VME_OFFSET(entry);
pinfo->pri_protection = entry->protection;
pinfo->pri_max_protection = entry->max_protection;
pinfo->pri_inheritance = entry->inheritance;
pinfo->pri_behavior = entry->behavior;
pinfo->pri_user_wired_count = entry->user_wired_count;
- pinfo->pri_user_tag = entry->alias;
+ pinfo->pri_user_tag = VME_ALIAS(entry);
if (entry->is_sub_map) {
pinfo->pri_flags |= PROC_REGION_SUBMAP;
extended.protection = entry->protection;
- extended.user_tag = entry->alias;
+ extended.user_tag = VME_ALIAS(entry);
extended.pages_resident = 0;
extended.pages_swapped_out = 0;
extended.pages_shared_now_private = 0;
extended.external_pager = 0;
extended.shadow_depth = 0;
- vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, &extended);
+ vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED)
extended.share_mode = SM_PRIVATE;
pinfo->pri_depth = 0;
if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
- *vnodeaddr = (uint32_t)0;
+ *vnodeaddr = (uintptr_t)0;
if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) ==0) {
vm_map_unlock_read(map);
+ vm_map_deallocate(map);
return(1);
}
}
vm_map_unlock_read(map);
+ vm_map_deallocate(map);
return(1);
}
+int
+fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
+{
+
+ vm_map_t map;
+ vm_map_offset_t address = (vm_map_offset_t )arg;
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+
+ task_lock(task);
+ map = task->map;
+ if (map == VM_MAP_NULL)
+ {
+ task_unlock(task);
+ return(0);
+ }
+ vm_map_reference(map);
+ task_unlock(task);
+
+ vm_map_lock_read(map);
+
+ if (!vm_map_lookup_entry(map, address, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return(0);
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ while (entry != vm_map_to_entry(map)) {
+ *vnodeaddr = 0;
+ *vid = 0;
+
+ if (entry->is_sub_map == 0) {
+ if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
+
+ pinfo->pri_offset = VME_OFFSET(entry);
+ pinfo->pri_protection = entry->protection;
+ pinfo->pri_max_protection = entry->max_protection;
+ pinfo->pri_inheritance = entry->inheritance;
+ pinfo->pri_behavior = entry->behavior;
+ pinfo->pri_user_wired_count = entry->user_wired_count;
+ pinfo->pri_user_tag = VME_ALIAS(entry);
+
+ if (entry->is_shared)
+ pinfo->pri_flags |= PROC_REGION_SHARED;
+
+ pinfo->pri_pages_resident = 0;
+ pinfo->pri_pages_shared_now_private = 0;
+ pinfo->pri_pages_swapped_out = 0;
+ pinfo->pri_pages_dirtied = 0;
+ pinfo->pri_ref_count = 0;
+ pinfo->pri_shadow_depth = 0;
+ pinfo->pri_share_mode = 0;
+
+ pinfo->pri_private_pages_resident = 0;
+ pinfo->pri_shared_pages_resident = 0;
+ pinfo->pri_obj_id = 0;
+
+ pinfo->pri_address = (uint64_t)entry->vme_start;
+ pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
+ pinfo->pri_depth = 0;
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return(1);
+ }
+ }
+
+ /* Keep searching for a vnode-backed mapping */
+ entry = entry->vme_next;
+ }
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return(0);
+}
+
static int
fill_vnodeinfoforaddr(
vm_map_entry_t entry,
- uint32_t * vnodeaddr,
+ uintptr_t * vnodeaddr,
uint32_t * vid)
{
vm_object_t top_object, object;
* The last object in the shadow chain has the
* relevant pager information.
*/
- top_object = entry->object.vm_object;
+ top_object = VME_OBJECT(entry);
if (top_object == VM_OBJECT_NULL) {
object = VM_OBJECT_NULL;
shadow_depth = 0;
kern_return_t
vnode_pager_get_object_vnode (
memory_object_t mem_obj,
- uint32_t * vnodeaddr,
+ uintptr_t * vnodeaddr,
uint32_t * vid)
{
vnode_pager_t vnode_object;
vnode_object = vnode_pager_lookup(mem_obj);
if (vnode_object->vnode_handle) {
- *vnodeaddr = (uint32_t)vnode_object->vnode_handle;
+ *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
*vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
return(KERN_SUCCESS);
return(KERN_FAILURE);
}
+#if CONFIG_IOSCHED
+kern_return_t
+vnode_pager_get_object_devvp(
+ memory_object_t mem_obj,
+ uintptr_t *devvp)
+{
+ struct vnode *vp;
+ uint32_t vid;
+
+ if(vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS)
+ return (KERN_FAILURE);
+ *devvp = (uintptr_t)vnode_mountdevvp(vp);
+ if (*devvp)
+ return (KERN_SUCCESS);
+ return (KERN_FAILURE);
+}
+#endif
+
+/*
+ * Find the underlying vnode object for the given vm_map_entry. If found, return with the
+ * object locked, otherwise return NULL with nothing locked.
+ */
+
+vm_object_t
+find_vnode_object(
+ vm_map_entry_t entry
+)
+{
+ vm_object_t top_object, object;
+ memory_object_t memory_object;
+ memory_object_pager_ops_t pager_ops;
+
+ if (!entry->is_sub_map) {
+
+ /*
+ * The last object in the shadow chain has the
+ * relevant pager information.
+ */
+
+ top_object = VME_OBJECT(entry);
+
+ if (top_object) {
+ vm_object_lock(top_object);
+
+ for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
+ vm_object_lock(object->shadow);
+ vm_object_unlock(object);
+ }
+
+ if (object && !object->internal && object->pager_ready && !object->terminating &&
+ object->alive) {
+ memory_object = object->pager;
+ pager_ops = memory_object->mo_pager_ops;
+
+ /*
+ * If this object points to the vnode_pager_ops, then we found what we're
+ * looking for. Otherwise, this vm_map_entry doesn't have an underlying
+ * vnode and so we fall through to the bottom and return NULL.
+ */
+
+ if (pager_ops == &vnode_pager_ops)
+ return object; /* we return with the object locked */
+ }
+
+ vm_object_unlock(object);
+ }
+
+ }
+
+ return(VM_OBJECT_NULL);
+}