+ assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
+ return vnode_object;
+}
+
+
+struct vnode *
+vnode_pager_lookup_vnode(
+ memory_object_t name)
+{
+ vnode_pager_t vnode_object;
+ vnode_object = (vnode_pager_t)name;
+ if (vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops) {
+ return vnode_object->vnode_handle;
+ } else {
+ return NULL;
+ }
+}
+
+/*********************** proc_info implementation *************/
+
+#include <sys/bsdtask_info.h>
+
+static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid);
+
+int
+fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
+{
+ vm_map_t map;
+ vm_map_offset_t address = (vm_map_offset_t)arg;
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+ vm_map_offset_t start;
+ vm_region_extended_info_data_t extended;
+ vm_region_top_info_data_t top;
+ boolean_t do_region_footprint;
+
+ task_lock(task);
+ map = task->map;
+ if (map == VM_MAP_NULL) {
+ task_unlock(task);
+ return 0;
+ }
+ vm_map_reference(map);
+ task_unlock(task);
+
+ do_region_footprint = task_self_region_footprint();
+
+ vm_map_lock_read(map);
+
+ start = address;
+
+ if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ if (do_region_footprint &&
+ address == tmp_entry->vme_end) {
+ ledger_amount_t ledger_resident;
+ ledger_amount_t ledger_compressed;
+
+ /*
+ * This request is right after the last valid
+ * memory region; instead of reporting the
+ * end of the address space, report a fake
+ * memory region to account for non-volatile
+ * purgeable and/or ledger-tagged memory
+ * owned by this task.
+ */
+ task_ledgers_footprint(task->ledger,
+ &ledger_resident,
+ &ledger_compressed);
+ if (ledger_resident + ledger_compressed == 0) {
+ /* nothing to report */
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 0;
+ }
+
+ /* provide fake region for purgeable */
+ pinfo->pri_offset = address;
+ pinfo->pri_protection = VM_PROT_DEFAULT;
+ pinfo->pri_max_protection = VM_PROT_DEFAULT;
+ pinfo->pri_inheritance = VM_INHERIT_NONE;
+ pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
+ pinfo->pri_user_wired_count = 0;
+ pinfo->pri_user_tag = -1;
+ pinfo->pri_pages_resident =
+ (uint32_t) (ledger_resident / PAGE_SIZE);
+ pinfo->pri_pages_shared_now_private = 0;
+ pinfo->pri_pages_swapped_out =
+ (uint32_t) (ledger_compressed / PAGE_SIZE);
+ pinfo->pri_pages_dirtied =
+ (uint32_t) (ledger_resident / PAGE_SIZE);
+ pinfo->pri_ref_count = 1;
+ pinfo->pri_shadow_depth = 0;
+ pinfo->pri_share_mode = SM_PRIVATE;
+ pinfo->pri_private_pages_resident =
+ (uint32_t) (ledger_resident / PAGE_SIZE);
+ pinfo->pri_shared_pages_resident = 0;
+ pinfo->pri_obj_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile);
+ pinfo->pri_address = address;
+ pinfo->pri_size =
+ (uint64_t) (ledger_resident + ledger_compressed);
+ pinfo->pri_depth = 0;
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 1;
+ }
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 0;
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ start = entry->vme_start;
+
+ pinfo->pri_offset = VME_OFFSET(entry);
+ pinfo->pri_protection = entry->protection;
+ pinfo->pri_max_protection = entry->max_protection;
+ pinfo->pri_inheritance = entry->inheritance;
+ pinfo->pri_behavior = entry->behavior;
+ pinfo->pri_user_wired_count = entry->user_wired_count;
+ pinfo->pri_user_tag = VME_ALIAS(entry);
+
+ if (entry->is_sub_map) {
+ pinfo->pri_flags |= PROC_REGION_SUBMAP;
+ } else {
+ if (entry->is_shared) {
+ pinfo->pri_flags |= PROC_REGION_SHARED;
+ }
+ }
+
+
+ extended.protection = entry->protection;
+ extended.user_tag = VME_ALIAS(entry);
+ extended.pages_resident = 0;
+ extended.pages_swapped_out = 0;
+ extended.pages_shared_now_private = 0;
+ extended.pages_dirtied = 0;
+ extended.external_pager = 0;
+ extended.shadow_depth = 0;
+
+ vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
+
+ if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED) {
+ extended.share_mode = SM_PRIVATE;
+ }
+
+ top.private_pages_resident = 0;
+ top.shared_pages_resident = 0;
+ vm_map_region_top_walk(entry, &top);
+
+
+ pinfo->pri_pages_resident = extended.pages_resident;
+ pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
+ pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
+ pinfo->pri_pages_dirtied = extended.pages_dirtied;
+ pinfo->pri_ref_count = extended.ref_count;
+ pinfo->pri_shadow_depth = extended.shadow_depth;
+ pinfo->pri_share_mode = extended.share_mode;
+
+ pinfo->pri_private_pages_resident = top.private_pages_resident;
+ pinfo->pri_shared_pages_resident = top.shared_pages_resident;
+ pinfo->pri_obj_id = top.obj_id;
+
+ pinfo->pri_address = (uint64_t)start;
+ pinfo->pri_size = (uint64_t)(entry->vme_end - start);
+ pinfo->pri_depth = 0;
+
+ if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
+ *vnodeaddr = (uintptr_t)0;
+
+ if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) == 0) {
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 1;
+ }
+ }
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 1;
+}
+
+int
+fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
+{
+ vm_map_t map;
+ vm_map_offset_t address = (vm_map_offset_t)arg;
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+
+ task_lock(task);
+ map = task->map;
+ if (map == VM_MAP_NULL) {
+ task_unlock(task);
+ return 0;
+ }
+ vm_map_reference(map);
+ task_unlock(task);
+
+ vm_map_lock_read(map);
+
+ if (!vm_map_lookup_entry(map, address, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 0;
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ while (entry != vm_map_to_entry(map)) {
+ *vnodeaddr = 0;
+ *vid = 0;
+
+ if (entry->is_sub_map == 0) {
+ if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
+ pinfo->pri_offset = VME_OFFSET(entry);
+ pinfo->pri_protection = entry->protection;
+ pinfo->pri_max_protection = entry->max_protection;
+ pinfo->pri_inheritance = entry->inheritance;
+ pinfo->pri_behavior = entry->behavior;
+ pinfo->pri_user_wired_count = entry->user_wired_count;
+ pinfo->pri_user_tag = VME_ALIAS(entry);
+
+ if (entry->is_shared) {
+ pinfo->pri_flags |= PROC_REGION_SHARED;
+ }
+
+ pinfo->pri_pages_resident = 0;
+ pinfo->pri_pages_shared_now_private = 0;
+ pinfo->pri_pages_swapped_out = 0;
+ pinfo->pri_pages_dirtied = 0;
+ pinfo->pri_ref_count = 0;
+ pinfo->pri_shadow_depth = 0;
+ pinfo->pri_share_mode = 0;
+
+ pinfo->pri_private_pages_resident = 0;
+ pinfo->pri_shared_pages_resident = 0;
+ pinfo->pri_obj_id = 0;
+
+ pinfo->pri_address = (uint64_t)entry->vme_start;
+ pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
+ pinfo->pri_depth = 0;
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 1;
+ }
+ }
+
+ /* Keep searching for a vnode-backed mapping */
+ entry = entry->vme_next;
+ }
+
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return 0;
+}
+
+int
+find_region_details(task_t task, vm_map_offset_t offset,
+ uintptr_t *vnodeaddr, uint32_t *vid,
+ uint64_t *start, uint64_t *len)
+{
+ vm_map_t map;
+ vm_map_entry_t tmp_entry, entry;
+ int rc = 0;
+
+ task_lock(task);
+ map = task->map;
+ if (map == VM_MAP_NULL) {
+ task_unlock(task);
+ return 0;
+ }
+ vm_map_reference(map);
+ task_unlock(task);
+
+ vm_map_lock_read(map);
+ if (!vm_map_lookup_entry(map, offset, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ rc = 0;
+ goto ret;
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ while (entry != vm_map_to_entry(map)) {
+ *vnodeaddr = 0;
+ *vid = 0;
+ *start = 0;
+ *len = 0;
+
+ if (entry->is_sub_map == 0) {
+ if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
+ *start = entry->vme_start;
+ *len = entry->vme_end - entry->vme_start;
+ rc = 1;
+ goto ret;
+ }
+ }
+
+ entry = entry->vme_next;
+ }
+
+ret:
+ vm_map_unlock_read(map);
+ vm_map_deallocate(map);
+ return rc;
+}
+
+static int
+fill_vnodeinfoforaddr(
+ vm_map_entry_t entry,
+ uintptr_t * vnodeaddr,
+ uint32_t * vid)
+{
+ vm_object_t top_object, object;
+ memory_object_t memory_object;
+ memory_object_pager_ops_t pager_ops;
+ kern_return_t kr;
+ int shadow_depth;
+
+
+ if (entry->is_sub_map) {
+ return 0;
+ } else {
+ /*
+ * The last object in the shadow chain has the
+ * relevant pager information.
+ */
+ top_object = VME_OBJECT(entry);
+ if (top_object == VM_OBJECT_NULL) {
+ object = VM_OBJECT_NULL;
+ shadow_depth = 0;
+ } else {
+ vm_object_lock(top_object);
+ for (object = top_object, shadow_depth = 0;
+ object->shadow != VM_OBJECT_NULL;
+ object = object->shadow, shadow_depth++) {
+ vm_object_lock(object->shadow);
+ vm_object_unlock(object);
+ }
+ }
+ }
+
+ if (object == VM_OBJECT_NULL) {
+ return 0;
+ } else if (object->internal) {
+ vm_object_unlock(object);
+ return 0;
+ } else if (!object->pager_ready ||
+ object->terminating ||
+ !object->alive) {
+ vm_object_unlock(object);
+ return 0;
+ } else {
+ memory_object = object->pager;
+ pager_ops = memory_object->mo_pager_ops;
+ if (pager_ops == &vnode_pager_ops) {
+ kr = vnode_pager_get_object_vnode(
+ memory_object,
+ vnodeaddr, vid);
+ if (kr != KERN_SUCCESS) {
+ vm_object_unlock(object);
+ return 0;
+ }
+ } else {
+ vm_object_unlock(object);
+ return 0;
+ }
+ }
+ vm_object_unlock(object);
+ return 1;
+}
+
+kern_return_t
+vnode_pager_get_object_vnode(
+ memory_object_t mem_obj,
+ uintptr_t * vnodeaddr,
+ uint32_t * vid)
+{
+ vnode_pager_t vnode_object;
+
+ vnode_object = vnode_pager_lookup(mem_obj);
+ if (vnode_object->vnode_handle) {
+ *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
+ *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
+
+ return KERN_SUCCESS;
+ }
+
+ return KERN_FAILURE;
+}
+
+#if CONFIG_IOSCHED
+kern_return_t
+vnode_pager_get_object_devvp(
+ memory_object_t mem_obj,
+ uintptr_t *devvp)
+{
+ struct vnode *vp;
+ uint32_t vid;
+
+ if (vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS) {
+ return KERN_FAILURE;
+ }
+ *devvp = (uintptr_t)vnode_mountdevvp(vp);
+ if (*devvp) {
+ return KERN_SUCCESS;
+ }
+ return KERN_FAILURE;