return( vm_map_last_entry(map)->vme_end);
}
-/*
- * Legacy routines to get the start and end for a vm_map_t. They
- * return them in the vm_offset_t format. So, they should only be
- * called on maps that are the same size as the kernel map for
- * accurate results.
- */
-vm_offset_t
-get_vm_start(
- vm_map_t map)
-{
- return(CAST_DOWN(vm_offset_t, vm_map_first_entry(map)->vme_start));
-}
-
-vm_offset_t
-get_vm_end(
- vm_map_t map)
-{
- return(CAST_DOWN(vm_offset_t, vm_map_last_entry(map)->vme_end));
-}
-
/*
* BSD VNODE PAGER
*/
vnode_pager_synchronize,
vnode_pager_map,
vnode_pager_last_unmap,
+ NULL, /* data_reclaim */
"vnode pager"
};
struct vnode *vnode_handle; /* vnode handle */
} *vnode_pager_t;
+
#define pager_ikot pager_header.io_bits
ipc_port_t
}
/* trigger_port is locked and active */
ipc_port_make_send_locked(trigger_port);
- /* now unlocked */
+ ip_unlock(trigger_port);
default_pager_triggers(default_pager,
0, 0,
SWAP_FILE_CREATION_ERROR,
}
/* trigger_port is locked and active */
ipc_port_make_send_locked(trigger_port);
- /* now unlocked */
+ ip_unlock(trigger_port);
default_pager_triggers(default_pager,
hi_water, low_water,
HI_WAT_ALERT, trigger_port);
}
/* trigger_port is locked and active */
ipc_port_make_send_locked(trigger_port);
- /* and now its unlocked */
+ ip_unlock(trigger_port);
default_pager_triggers(default_pager,
hi_water, low_water,
LO_WAT_ALERT, trigger_port);
if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
break;
- /*
- * if we're in this routine, we are inside a filesystem's
- * locking model, so we don't ever want to wait for pages that have
- * list_req_pending == TRUE since it means that the
- * page is a candidate for some type of I/O operation,
- * but that it has not yet been gathered into a UPL...
- * this implies that it is still outside the domain
- * of the filesystem and that whoever is responsible for
- * grabbing it into a UPL may be stuck behind the filesystem
- * lock this thread owns, or trying to take a lock exclusively
- * and waiting for the readers to drain from a rw lock...
- * if we block in those cases, we will deadlock
- */
- if (dst_page->list_req_pending) {
-
- if (dst_page->absent) {
- /*
- * this is the list_req_pending | absent | busy case
- * which originates from vm_fault_page... we want
- * to fall out of the fast path and go back
- * to the caller which will gather this page
- * into a UPL and issue the I/O if no one
- * else beats us to it
- */
- break;
- }
- if (dst_page->pageout) {
- /*
- * this is the list_req_pending | pageout | busy case
- * which can originate from both the pageout_scan and
- * msync worlds... we need to reset the state of this page to indicate
- * it should stay in the cache marked dirty... nothing else we
- * can do at this point... we can't block on it, we can't busy
- * it and we can't clean it from this routine.
- */
- vm_page_lockspin_queues();
-
- vm_pageout_queue_steal(dst_page, TRUE);
- vm_page_deactivate(dst_page);
- vm_page_unlock_queues();
- }
- /*
- * this is the list_req_pending | cleaning case...
- * we can go ahead and deal with this page since
- * its ok for us to mark this page busy... if a UPL
- * tries to gather this page, it will block until the
- * busy is cleared, thus allowing us safe use of the page
- * when we're done with it, we will clear busy and wake
- * up anyone waiting on it, thus allowing the UPL creation
- * to finish
- */
-
- } else if (dst_page->busy || dst_page->cleaning) {
+ if (dst_page->busy || dst_page->cleaning) {
/*
* someone else is playing with the page... if we've
* already collected pages into this run, go ahead
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
-
+ if (dst_page->laundry) {
+ dst_page->pageout = FALSE;
+
+ vm_pageout_steal_laundry(dst_page, FALSE);
+ }
/*
* this routine is only called when copying
* to/from real files... no need to consider
assert(!dst_page->encrypted);
if (mark_dirty) {
- dst_page->dirty = TRUE;
+ SET_PAGE_DIRTY(dst_page, FALSE);
if (dst_page->cs_validated &&
!dst_page->cs_tainted) {
/*
if ((xsize = PAGE_SIZE - start_offset) > io_requested)
xsize = io_requested;
- if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
+ if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << PAGE_SHIFT) + start_offset), xsize, uio)) )
break;
io_requested -= xsize;
* update clustered and speculative state
*
*/
- VM_PAGE_CONSUME_CLUSTERED(dst_page);
+ if (dst_page->clustered)
+ VM_PAGE_CONSUME_CLUSTERED(dst_page);
PAGE_WAKEUP_DONE(dst_page);
}
size = (vm_size_t) sizeof(struct vnode_pager);
vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
PAGE_SIZE, "vnode pager structures");
+ zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE);
+ zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE);
+
+
#if CONFIG_CODE_DECRYPTION
apple_protect_pager_bootstrap();
#endif /* CONFIG_CODE_DECRYPTION */
}
kern_return_t
-vnode_pager_check_hard_throttle(
+vnode_pager_get_throttle_io_limit(
memory_object_t mem_obj,
- uint32_t *limit,
- uint32_t hard_throttle)
+ uint32_t *limit)
{
vnode_pager_t vnode_object;
vnode_object = vnode_pager_lookup(mem_obj);
- (void)vnode_pager_return_hard_throttle_limit(vnode_object->vnode_handle, limit, hard_throttle);
+ (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vnode_pager_get_isSSD(
+ memory_object_t mem_obj,
+ boolean_t *isSSD)
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj->mo_pager_ops != &vnode_pager_ops)
+ return KERN_INVALID_ARGUMENT;
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+
+ *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
return KERN_SUCCESS;
}
}
kern_return_t
-vnode_pager_get_object_pathname(
+vnode_pager_get_object_name(
memory_object_t mem_obj,
char *pathname,
- vm_size_t *length_p)
+ vm_size_t pathname_len,
+ char *filename,
+ vm_size_t filename_len,
+ boolean_t *truncated_path_p)
{
vnode_pager_t vnode_object;
vnode_object = vnode_pager_lookup(mem_obj);
- return vnode_pager_get_pathname(vnode_object->vnode_handle,
- pathname,
- length_p);
+ return vnode_pager_get_name(vnode_object->vnode_handle,
+ pathname,
+ pathname_len,
+ filename,
+ filename_len,
+ truncated_path_p);
}
kern_return_t
-vnode_pager_get_object_filename(
- memory_object_t mem_obj,
- const char **filename)
+vnode_pager_get_object_mtime(
+ memory_object_t mem_obj,
+ struct timespec *mtime,
+ struct timespec *cs_mtime)
{
vnode_pager_t vnode_object;
vnode_object = vnode_pager_lookup(mem_obj);
- return vnode_pager_get_filename(vnode_object->vnode_handle,
- filename);
+ return vnode_pager_get_mtime(vnode_object->vnode_handle,
+ mtime,
+ cs_mtime);
}
kern_return_t
blobs);
}
+#if CHECK_CS_VALIDATION_BITMAP
+kern_return_t
+vnode_pager_cs_check_validation_bitmap(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ int optype )
+{
+ vnode_pager_t vnode_object;
+
+ if (mem_obj == MEMORY_OBJECT_NULL ||
+ mem_obj->mo_pager_ops != &vnode_pager_ops) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+ return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
+}
+#endif /* CHECK_CS_VALIDATION_BITMAP */
+
/*
*
*/
vnode_object = vnode_pager_lookup(mem_obj);
- size = MAX_UPL_TRANSFER * PAGE_SIZE;
+ size = MAX_UPL_TRANSFER_BYTES;
base_offset = offset;
if (memory_object_cluster_size(vnode_object->control_handle, &base_offset, &size, &io_streaming, fault_info) != KERN_SUCCESS)
upl_flags |= UPL_KEEPCACHED;
while (cnt) {
- size = (cnt < (PAGE_SIZE * MAX_UPL_TRANSFER)) ? cnt : (PAGE_SIZE * MAX_UPL_TRANSFER); /* effective max */
+ size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
assert((upl_size_t) size == size);
vnode_pageout(vnode_object->vnode_handle,
* and then clip the size to insure we
* don't request past the end of the underlying file
*/
- size = PAGE_SIZE * MAX_UPL_TRANSFER;
+ size = MAX_UPL_TRANSFER_BYTES;
base_offset = offset & ~((signed)(size - 1));
if ((base_offset + size) > vnode_size)
}
assert((upl_size_t) size == size);
vnode_pageout(vnode_object->vnode_handle,
- NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size, UPL_VNODE_PAGER, NULL);
+ NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
+ (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
}
}
start = entry->vme_start;
- pinfo->pri_offset = entry->offset;
+ pinfo->pri_offset = VME_OFFSET(entry);
pinfo->pri_protection = entry->protection;
pinfo->pri_max_protection = entry->max_protection;
pinfo->pri_inheritance = entry->inheritance;
pinfo->pri_behavior = entry->behavior;
pinfo->pri_user_wired_count = entry->user_wired_count;
- pinfo->pri_user_tag = entry->alias;
+ pinfo->pri_user_tag = VME_ALIAS(entry);
if (entry->is_sub_map) {
pinfo->pri_flags |= PROC_REGION_SUBMAP;
extended.protection = entry->protection;
- extended.user_tag = entry->alias;
+ extended.user_tag = VME_ALIAS(entry);
extended.pages_resident = 0;
extended.pages_swapped_out = 0;
extended.pages_shared_now_private = 0;
extended.external_pager = 0;
extended.shadow_depth = 0;
- vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, &extended);
+ vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended);
if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED)
extended.share_mode = SM_PRIVATE;
return(1);
}
+int
+fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
+{
+
+ vm_map_t map;
+ vm_map_offset_t address = (vm_map_offset_t )arg;
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+
+ task_lock(task);
+ map = task->map;
+ if (map == VM_MAP_NULL)
+ {
+ task_unlock(task);
+ return(0);
+ }
+ vm_map_reference(map);
+ task_unlock(task);
+
+ vm_map_lock_read(map);
+
+ if (!vm_map_lookup_entry(map, address, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return(0);
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ while (entry != vm_map_to_entry(map)) {
+ *vnodeaddr = 0;
+ *vid = 0;
+
+ if (entry->is_sub_map == 0) {
+ if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
+
+ pinfo->pri_offset = VME_OFFSET(entry);
+ pinfo->pri_protection = entry->protection;
+ pinfo->pri_max_protection = entry->max_protection;
+ pinfo->pri_inheritance = entry->inheritance;
+ pinfo->pri_behavior = entry->behavior;
+ pinfo->pri_user_wired_count = entry->user_wired_count;
+ pinfo->pri_user_tag = VME_ALIAS(entry);
+
+ if (entry->is_shared)
+ pinfo->pri_flags |= PROC_REGION_SHARED;
+
+ pinfo->pri_pages_resident = 0;
+ pinfo->pri_pages_shared_now_private = 0;
+ pinfo->pri_pages_swapped_out = 0;
+ pinfo->pri_pages_dirtied = 0;
+ pinfo->pri_ref_count = 0;
+ pinfo->pri_shadow_depth = 0;
+ pinfo->pri_share_mode = 0;
+
+ pinfo->pri_private_pages_resident = 0;
+ pinfo->pri_shared_pages_resident = 0;
+ pinfo->pri_obj_id = 0;
+
+ pinfo->pri_address = (uint64_t)entry->vme_start;
+ pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
+ pinfo->pri_depth = 0;
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return(1);
+ }
+ }
+
+ /* Keep searching for a vnode-backed mapping */
+ entry = entry->vme_next;
+ }
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return(0);
+}
+
static int
fill_vnodeinfoforaddr(
vm_map_entry_t entry,
* The last object in the shadow chain has the
* relevant pager information.
*/
- top_object = entry->object.vm_object;
+ top_object = VME_OBJECT(entry);
if (top_object == VM_OBJECT_NULL) {
object = VM_OBJECT_NULL;
shadow_depth = 0;
return(KERN_FAILURE);
}
+#if CONFIG_IOSCHED
+kern_return_t
+vnode_pager_get_object_devvp(
+ memory_object_t mem_obj,
+ uintptr_t *devvp)
+{
+ struct vnode *vp;
+ uint32_t vid;
+
+ if(vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS)
+ return (KERN_FAILURE);
+ *devvp = (uintptr_t)vnode_mountdevvp(vp);
+ if (*devvp)
+ return (KERN_SUCCESS);
+ return (KERN_FAILURE);
+}
+#endif
/*
* Find the underlying vnode object for the given vm_map_entry. If found, return with the
* relevant pager information.
*/
- top_object = entry->object.vm_object;
+ top_object = VME_OBJECT(entry);
if (top_object) {
vm_object_lock(top_object);