+
+
+@lldb_command("vm_page_lookup_in_map")
+def VmPageLookupInMap(cmd_args=None):
+ """Lookup up a page at a virtual address in a VM map
+ usage: vm_page_lookup_in_map <map> <vaddr>
+ """
+ if cmd_args == None or len(cmd_args) < 2:
+ print "Invalid argument.", VmPageLookupInMap.__doc__
+ return
+ map = kern.GetValueFromAddress(cmd_args[0], 'vm_map_t')
+ vaddr = kern.GetValueFromAddress(cmd_args[1], 'vm_map_offset_t')
+ print "vaddr {:#018x} in map {: <#018x}".format(vaddr, map)
+ vm_page_lookup_in_map(map, vaddr)
+
+def vm_page_lookup_in_map(map, vaddr):
+ vaddr = unsigned(vaddr)
+ vme_list_head = map.hdr.links
+ vme_ptr_type = GetType('vm_map_entry *')
+ for vme in IterateQueue(vme_list_head, vme_ptr_type, "links"):
+ if unsigned(vme.links.start) > vaddr:
+ break
+ if unsigned(vme.links.end) <= vaddr:
+ continue
+ offset_in_vme = vaddr - unsigned(vme.links.start)
+ print " offset {:#018x} in map entry {: <#018x} [{:#018x}:{:#018x}] object {: <#018x} offset {:#018x}".format(offset_in_vme, vme, unsigned(vme.links.start), unsigned(vme.links.end), vme.vme_object.vmo_object, unsigned(vme.vme_offset) & ~0xFFF)
+ offset_in_object = offset_in_vme + (unsigned(vme.vme_offset) & ~0xFFF)
+ if vme.is_sub_map:
+ print "vaddr {:#018x} in map {: <#018x}".format(offset_in_object, vme.vme_object.vmo_submap)
+ vm_page_lookup_in_map(vme.vme_object.vmo_submap, offset_in_object)
+ else:
+ vm_page_lookup_in_object(vme.vme_object.vmo_object, offset_in_object)
+
+@lldb_command("vm_page_lookup_in_object")
+def VmPageLookupInObject(cmd_args=None):
+ """Lookup up a page at a given offset in a VM object
+ usage: vm_page_lookup_in_object <object> <offset>
+ """
+ if cmd_args == None or len(cmd_args) < 2:
+ print "Invalid argument.", VmPageLookupInObject.__doc__
+ return
+ object = kern.GetValueFromAddress(cmd_args[0], 'vm_object_t')
+ offset = kern.GetValueFromAddress(cmd_args[1], 'vm_object_offset_t')
+ print "offset {:#018x} in object {: <#018x}".format(offset, object)
+ vm_page_lookup_in_object(object, offset)
+
+def vm_page_lookup_in_object(object, offset):
+ offset = unsigned(offset)
+ page_size = kern.globals.page_size
+ trunc_offset = offset & ~(page_size - 1)
+ print " offset {:#018x} in VM object {: <#018x}".format(offset, object)
+ hash_id = _calc_vm_page_hash(object, trunc_offset)
+ page_list = kern.globals.vm_page_buckets[hash_id].page_list
+ page = _vm_page_unpack_ptr(page_list)
+ while page != 0:
+ m = kern.GetValueFromAddress(page, 'vm_page_t')
+ m_object_val = _vm_page_unpack_ptr(m.vmp_object)
+ m_object = kern.GetValueFromAddress(m_object_val, 'vm_object_t')
+ if unsigned(m_object) != unsigned(object) or unsigned(m.vmp_offset) != unsigned(trunc_offset):
+ page = _vm_page_unpack_ptr(m.vmp_next_m)
+ continue
+ print " resident page {: <#018x} phys {:#010x}".format(m, _vm_page_get_phys_page(m))
+ return
+ if object.pager and object.pager_ready:
+ offset_in_pager = trunc_offset + unsigned(object.paging_offset)
+ if not object.internal:
+ print " offset {:#018x} in external '{:s}' {: <#018x}".format(offset_in_pager, object.pager.mo_pager_ops.memory_object_pager_name, object.pager)
+ return
+ pager = Cast(object.pager, 'compressor_pager *')
+ ret = vm_page_lookup_in_compressor_pager(pager, offset_in_pager)
+ if ret:
+ return
+ if object.shadow and not object.phys_contiguous:
+ offset_in_shadow = offset + unsigned(object.vo_un2.vou_shadow_offset)
+ vm_page_lookup_in_object(object.shadow, offset_in_shadow)
+ return
+ print " page is absent and will be zero-filled on demand"
+ return
+
+@lldb_command("vm_page_lookup_in_compressor_pager")
+def VmPageLookupInCompressorPager(cmd_args=None):
+ """Lookup up a page at a given offset in a compressor pager
+ usage: vm_page_lookup_in_compressor_pager <pager> <offset>
+ """
+ if cmd_args == None or len(cmd_args) < 2:
+ print "Invalid argument.", VmPageLookupInCompressorPager.__doc__
+ return
+ pager = kern.GetValueFromAddress(cmd_args[0], 'compressor_pager_t')
+ offset = kern.GetValueFromAddress(cmd_args[1], 'memory_object_offset_t')
+ print "offset {:#018x} in compressor pager {: <#018x}".format(offset, pager)
+ vm_page_lookup_in_compressor_pager(pager, offset)
+
+def vm_page_lookup_in_compressor_pager(pager, offset):
+ offset = unsigned(offset)
+ page_size = unsigned(kern.globals.page_size)
+ page_num = unsigned(offset / page_size)
+ if page_num > pager.cpgr_num_slots:
+ print " *** ERROR: vm_page_lookup_in_compressor_pager({: <#018x},{:#018x}): page_num {:#x} > num_slots {:#x}".format(pager, offset, page_num, pager.cpgr_num_slots)
+ return 0
+ slots_per_chunk = 512 / sizeof ('compressor_slot_t')
+ num_chunks = unsigned((pager.cpgr_num_slots+slots_per_chunk-1) / slots_per_chunk)
+ if num_chunks > 1:
+ chunk_idx = unsigned(page_num / slots_per_chunk)
+ chunk = pager.cpgr_slots.cpgr_islots[chunk_idx]
+ slot_idx = unsigned(page_num % slots_per_chunk)
+ slot = GetObjectAtIndexFromArray(chunk, slot_idx)
+ slot_str = "islots[{:d}][{:d}]".format(chunk_idx, slot_idx)
+ elif pager.cpgr_num_slots > 2:
+ slot_idx = page_num
+ slot = GetObjectAtIndexFromArray(pager.cpgr_slots.cpgr_dslots, slot_idx)
+ slot_str = "dslots[{:d}]".format(slot_idx)
+ else:
+ slot_idx = page_num
+ slot = GetObjectAtIndexFromArray(pager.cpgr_slots.cpgr_eslots, slot_idx)
+ slot_str = "eslots[{:d}]".format(slot_idx)
+ print " offset {:#018x} in compressor pager {: <#018x} {:s} slot {: <#018x}".format(offset, pager, slot_str, slot)
+ if slot == 0:
+ return 0
+ slot_value = dereference(slot)
+ print " value {:#010x}".format(slot_value)
+ vm_page_lookup_in_compressor(Cast(slot, 'c_slot_mapping_t'))
+ return 1
+
+@lldb_command("vm_page_lookup_in_compressor")
+def VmPageLookupInCompressor(cmd_args=None):
+ """Lookup up a page in a given compressor slot
+ usage: vm_page_lookup_in_compressor <slot>
+ """
+ if cmd_args == None or len(cmd_args) < 1:
+ print "Invalid argument.", VmPageLookupInCompressor.__doc__
+ return
+ slot = kern.GetValueFromAddress(cmd_args[0], 'compressor_slot_t *')
+ print "compressor slot {: <#018x}".format(slot)
+ vm_page_lookup_in_compressor(slot)
+
+C_SV_CSEG_ID = ((1 << 22) - 1)
+
+def vm_page_lookup_in_compressor(slot_ptr):
+ slot_ptr = Cast(slot_ptr, 'compressor_slot_t *')
+ slot_value = dereference(slot_ptr)
+ slot = Cast(slot_value, 'c_slot_mapping')
+ print slot
+ print "compressor slot {: <#018x} -> {:#010x} cseg {:d} cindx {:d}".format(unsigned(slot_ptr), unsigned(slot_value), slot.s_cseg, slot.s_cindx)
+ if slot_ptr == 0:
+ return
+ if slot.s_cseg == C_SV_CSEG_ID:
+ sv = kern.globals.c_segment_sv_hash_table
+ print "single value[{:#d}]: ref {:d} value {:#010x}".format(slot.s_cindx, sv[slot.s_cindx].c_sv_he_un.c_sv_he.c_sv_he_ref, sv[slot.s_cindx].c_sv_he_un.c_sv_he.c_sv_he_data)
+ return
+ if slot.s_cseg == 0 or unsigned(slot.s_cseg) > unsigned(kern.globals.c_segments_available):
+ print "*** ERROR: s_cseg {:d} is out of bounds (1 - {:d})".format(slot.s_cseg, unsigned(kern.globals.c_segments_available))
+ return
+ c_segments = kern.globals.c_segments
+ c_segments_elt = GetObjectAtIndexFromArray(c_segments, slot.s_cseg-1)
+ c_seg = c_segments_elt.c_seg
+ c_no_data = 0
+ if hasattr(c_seg, 'c_state'):
+ c_state = c_seg.c_state
+ if c_state == 0:
+ c_state_str = "C_IS_EMPTY"
+ c_no_data = 1
+ elif c_state == 1:
+ c_state_str = "C_IS_FREE"
+ c_no_data = 1
+ elif c_state == 2:
+ c_state_str = "C_IS_FILLING"
+ elif c_state == 3:
+ c_state_str = "C_ON_AGE_Q"
+ elif c_state == 4:
+ c_state_str = "C_ON_SWAPOUT_Q"
+ elif c_state == 5:
+ c_state_str = "C_ON_SWAPPEDOUT_Q"
+ c_no_data = 1
+ elif c_state == 6:
+ c_state_str = "C_ON_SWAPPEDOUTSPARSE_Q"
+ c_no_data = 1
+ elif c_state == 7:
+ c_state_str = "C_ON_SWAPPEDIN_Q"
+ elif c_state == 8:
+ c_state_str = "C_ON_MAJORCOMPACT_Q"
+ elif c_state == 9:
+ c_state_str = "C_ON_BAD_Q"
+ c_no_data = 1
+ else:
+ c_state_str = "<unknown>"
+ else:
+ c_state = -1
+ c_state_str = "<no c_state field>"
+ print "c_segments[{:d}] {: <#018x} c_seg {: <#018x} c_state {:#x}={:s}".format(slot.s_cseg-1, c_segments_elt, c_seg, c_state, c_state_str)
+ c_indx = unsigned(slot.s_cindx)
+ if hasattr(c_seg, 'c_slot_var_array'):
+ c_seg_fixed_array_len = kern.globals.c_seg_fixed_array_len
+ if c_indx < c_seg_fixed_array_len:
+ cs = c_seg.c_slot_fixed_array[c_indx]
+ else:
+ cs = GetObjectAtIndexFromArray(c_seg.c_slot_var_array, c_indx - c_seg_fixed_array_len)
+ else:
+ C_SEG_SLOT_ARRAY_SIZE = 64
+ C_SEG_SLOT_ARRAY_MASK = C_SEG_SLOT_ARRAY_SIZE - 1
+ cs = GetObjectAtIndexFromArray(c_seg.c_slots[c_indx / C_SEG_SLOT_ARRAY_SIZE], c_indx & C_SEG_SLOT_ARRAY_MASK)
+ print cs
+ c_slot_unpacked_ptr = (unsigned(cs.c_packed_ptr) << 2) + vm_min_kernel_and_kext_address()
+ print "c_slot {: <#018x} c_offset {:#x} c_size {:#x} c_packed_ptr {:#x} (unpacked: {: <#018x})".format(cs, cs.c_offset, cs.c_size, cs.c_packed_ptr, unsigned(c_slot_unpacked_ptr))
+ if unsigned(slot_ptr) != unsigned(c_slot_unpacked_ptr):
+ print "*** ERROR: compressor slot {: <#018x} points back to {: <#018x} instead of itself".format(slot_ptr, c_slot_unpacked_ptr)
+ if c_no_data == 0:
+ c_data = c_seg.c_store.c_buffer + (4 * cs.c_offset)
+ c_size = cs.c_size
+ cmd = "memory read {: <#018x} {: <#018x} --force".format(c_data, c_data + c_size)
+ print cmd
+ print lldb_run_command(cmd)
+ else:
+ print "<no compressed data>"
+
+def vm_min_kernel_and_kext_address(cmd_args=None):
+ if hasattr(kern.globals, 'vm_min_kernel_and_kext_address'):
+ return unsigned(kern.globals.vm_min_kernel_and_kext_address)
+ elif kern.arch == 'x86_64':
+ return unsigned(0xffffff7f80000000)
+ elif kern.arch == 'arm64':
+ return unsigned(0xffffff8000000000)
+ elif kern.arch == 'arm':
+ return unsigned(0x80000000)
+ else:
+ print "vm_min_kernel_and_kext_address(): unknown arch '{:s}'".format(kern.arch)
+ return unsigned(0)
+
+def print_hex_data(data, begin_offset=0, desc=""):
+ """ print on stdout "hexdump -C < data" like output
+ params:
+ data - bytearray or array of int where each int < 255
+ begin_offset - int offset that should be printed in left column
+ desc - str optional description to print on the first line to describe data
+ """
+ if desc:
+ print "{}:".format(desc)
+ index = 0
+ total_len = len(data)
+ hex_buf = ""
+ char_buf = ""
+ while index < total_len:
+ hex_buf += " {:02x}".format(data[index])
+ if data[index] < 0x20 or data[index] > 0x7e:
+ char_buf += "."
+ else:
+ char_buf += "{:c}".format(data[index])
+ index += 1
+ if index and index % 8 == 0:
+ hex_buf += " "
+ if index > 1 and (index % 16) == 0:
+ print "{:08x} {: <50s} |{: <16s}|".format(begin_offset + index - 16, hex_buf, char_buf)
+ hex_buf = ""
+ char_buf = ""
+ print "{:08x} {: <50s} |{: <16s}|".format(begin_offset + index - 16, hex_buf, char_buf)
+ return
+
+@lldb_command('vm_scan_all_pages')
+def VMScanAllPages(cmd_args=None):
+ """Scans the vm_pages[] array
+ """
+ vm_pages_count = kern.globals.vm_pages_count
+ vm_pages = kern.globals.vm_pages
+
+ free_count = 0
+ local_free_count = 0
+ active_count = 0
+ local_active_count = 0
+ inactive_count = 0
+ speculative_count = 0
+ throttled_count = 0
+ wired_count = 0
+ compressor_count = 0
+ pageable_internal_count = 0
+ pageable_external_count = 0
+ secluded_count = 0
+ secluded_free_count = 0
+ secluded_inuse_count = 0
+
+ i = 0
+ while i < vm_pages_count:
+
+ if i % 10000 == 0:
+ print "{:d}/{:d}...\n".format(i,vm_pages_count)
+
+ m = vm_pages[i]
+
+ internal = 0
+ external = 0
+ m_object_val = _vm_page_unpack_ptr(m.vmp_object)
+
+ if m_object:
+ if m_object.internal:
+ internal = 1
+ else:
+ external = 1
+
+ if m.vmp_wire_count != 0 and m.vmp_local == 0:
+ wired_count = wired_count + 1
+ pageable = 0
+ elif m.vmp_throttled:
+ throttled_count = throttled_count + 1
+ pageable = 0
+ elif m.vmp_active:
+ active_count = active_count + 1
+ pageable = 1
+ elif m.vmp_local:
+ local_active_count = local_active_count + 1
+ pageable = 0
+ elif m.vmp_inactive:
+ inactive_count = inactive_count + 1
+ pageable = 1
+ elif m.vmp_speculative:
+ speculative_count = speculative_count + 1
+ pageable = 0
+ elif m.vmp_free:
+ free_count = free_count + 1
+ pageable = 0
+ elif m.vmp_secluded:
+ secluded_count = secluded_count + 1
+ if m_object == 0:
+ secluded_free_count = secluded_free_count + 1
+ else:
+ secluded_inuse_count = secluded_inuse_count + 1
+ pageable = 0
+ elif m_object == 0 and m.vmp_busy:
+ local_free_count = local_free_count + 1
+ pageable = 0
+ elif m.vmp_compressor:
+ compressor_count = compressor_count + 1
+ pageable = 0
+ else:
+ print "weird page vm_pages[{:d}]?\n".format(i)
+ pageable = 0
+
+ if pageable:
+ if internal:
+ pageable_internal_count = pageable_internal_count + 1
+ else:
+ pageable_external_count = pageable_external_count + 1
+ i = i + 1
+
+ print "vm_pages_count = {:d}\n".format(vm_pages_count)
+
+ print "wired_count = {:d}\n".format(wired_count)
+ print "throttled_count = {:d}\n".format(throttled_count)
+ print "active_count = {:d}\n".format(active_count)
+ print "local_active_count = {:d}\n".format(local_active_count)
+ print "inactive_count = {:d}\n".format(inactive_count)
+ print "speculative_count = {:d}\n".format(speculative_count)
+ print "free_count = {:d}\n".format(free_count)
+ print "local_free_count = {:d}\n".format(local_free_count)
+ print "compressor_count = {:d}\n".format(compressor_count)
+
+ print "pageable_internal_count = {:d}\n".format(pageable_internal_count)
+ print "pageable_external_count = {:d}\n".format(pageable_external_count)
+ print "secluded_count = {:d}\n".format(secluded_count)
+ print "secluded_free_count = {:d}\n".format(secluded_free_count)
+ print "secluded_inuse_count = {:d}\n".format(secluded_inuse_count)
+
+
+@lldb_command('show_all_vm_named_entries')
+def ShowAllVMNamedEntries(cmd_args=None):
+ """ Routine to print a summary listing of all the VM named entries
+ """
+ queue_len = kern.globals.vm_named_entry_count
+ queue_head = kern.globals.vm_named_entry_list
+
+ print 'vm_named_entry_list:{: <#018x} vm_named_entry_count:{:d}\n'.format(kern.GetLoadAddressForSymbol('vm_named_entry_list'),queue_len)
+
+ print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:>3s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","tag","owner","pid","process")
+ idx = 0
+ for entry in IterateQueue(queue_head, 'struct vm_named_entry *', 'named_entry_list'):
+ idx += 1
+ showmemoryentry(entry, idx, queue_len)
+
+@lldb_command('show_vm_named_entry')
+def ShowVMNamedEntry(cmd_args=None):
+ """ Routine to print a VM named entry
+ """
+ if cmd_args == None or len(cmd_args) < 1:
+ print "Invalid argument.", ShowMapVMNamedEntry.__doc__
+ return
+ named_entry = kern.GetValueFromAddress(cmd_args[0], 'vm_named_entry_t')
+ showmemoryentry(named_entry, 0, 0)
+
+def showmemoryentry(entry, idx=0, queue_len=0):
+ """ Routine to print out a summary a VM memory entry
+ params:
+ entry - core.value : a object of type 'struct vm_named_entry *'
+ returns:
+ None
+ """
+ show_pager_info = True
+ show_all_shadows = True
+
+ backing = ""
+ if entry.is_sub_map == 1:
+ backing += "SUBMAP"
+ if entry.is_copy == 1:
+ backing += "COPY"
+ if entry.is_sub_map == 0 and entry.is_copy == 0:
+ backing += "OBJECT"
+ prot=""
+ if entry.protection & 0x1:
+ prot += "r"
+ else:
+ prot += "-"
+ if entry.protection & 0x2:
+ prot += "w"
+ else:
+ prot += "-"
+ if entry.protection & 0x4:
+ prot += "x"
+ else:
+ prot += "-"
+ extra_str = ""
+ if hasattr(entry, 'named_entry_alias'):
+ extra_str += " alias={:d}".format(entry.named_entry_alias)
+ if hasattr(entry, 'named_entry_port'):
+ extra_str += " port={:#016x}".format(entry.named_entry_port)
+ print "{:>6d}/{:<6d} {: <#018x} ref={:d} prot={:d}/{:s} type={:s} backing={: <#018x} offset={:#016x} dataoffset={:#016x} size={:#016x}{:s}\n".format(idx,queue_len,entry,entry.ref_count,entry.protection,prot,backing,entry.backing.object,entry.offset,entry.data_offset,entry.size,extra_str)
+ if entry.is_sub_map == 1:
+ showmapvme(entry.backing.map, 0, 0, show_pager_info, show_all_shadows)
+ if entry.is_copy == 1:
+ showmapcopyvme(entry.backing.copy, 0, 0, 0, show_pager_info, show_all_shadows, 0)
+ if entry.is_sub_map == 0 and entry.is_copy == 0:
+ showvmobject(entry.backing.object, entry.offset, entry.size, show_pager_info, show_all_shadows)
+
+
+def IterateRBTreeEntry2(element, element_type, field_name1, field_name2):
+ """ iterate over a rbtree as defined with RB_HEAD in libkern/tree.h
+ element - value : Value object for rbh_root
+ element_type - str : Type of the link element
+ field_name - str : Name of the field in link element's structure
+ returns:
+ A generator does not return. It is used for iterating
+ value : an object thats of type (element_type) head->sle_next. Always a pointer object
+ """
+ elt = element.__getattr__('rbh_root')
+ if type(element_type) == str:
+ element_type = gettype(element_type)
+ charp_type = gettype('char *');
+
+ # Walk to find min
+ parent = elt
+ while unsigned(elt) != 0:
+ parent = elt
+ elt = cast(elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_left'), element_type)
+ elt = parent
+
+ # Now elt is min
+ while unsigned(elt) != 0:
+ yield elt
+ # implementation cribbed from RB_NEXT in libkern/tree.h
+ right = cast(elt.__getattr__(field_name1).__getattr__(fieldname2).__getattr__('rbe_right'), element_type)
+ if unsigned(right) != 0:
+ elt = right
+ left = cast(elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_left'), element_type)
+ while unsigned(left) != 0:
+ elt = left
+ left = cast(elt.__getattr__(field_name1).__getattr(__field_name2).__getattr__('rbe_left'), element_type)
+ else:
+
+ # avoid using GetValueFromAddress
+ addr = elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_parent')&~1
+ parent = value(elt.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(addr)))
+ parent = cast(parent, element_type)
+
+ if unsigned(parent) != 0:
+ left = cast(parent.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_left'), element_type)
+ if (unsigned(parent) != 0) and (unsigned(elt) == unsigned(left)):
+ elt = parent
+ else:
+ if unsigned(parent) != 0:
+ right = cast(parent.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_right'), element_type)
+ while unsigned(parent) != 0 and (unsigned(elt) == unsigned(right)):
+ elt = parent
+
+ # avoid using GetValueFromAddress
+ addr = elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_parent')&~1
+ parent = value(elt.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(addr)))
+ parent = cast(parent, element_type)
+
+ right = cast(parent.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_right'), element_type)
+
+ # avoid using GetValueFromAddress
+ addr = elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_parent')&~1
+ elt = value(elt.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(addr)))
+ elt = cast(elt, element_type)
+
+
+@lldb_command("showmaprb")
+def ShowMapRB(cmd_args=None):
+ """Routine to print out a VM map's RB tree
+ usage: showmaprb <vm_map>
+ """
+ if cmd_args == None or len(cmd_args) < 1:
+ print "Invalid argument.", ShowMapRB.__doc__
+ return
+ map_val = kern.GetValueFromAddress(cmd_args[0], 'vm_map_t')
+ print GetVMMapSummary.header
+ print GetVMMapSummary(map_val)
+ vme_rb_root = map_val.hdr.rb_head_store
+ vme_ptr_type = GetType('struct vm_map_entry *')
+ print GetVMEntrySummary.header
+ for vme in IterateRBTreeEntry2(vme_rb_root, 'struct vm_map_entry *', 'store', 'entry'):
+ print GetVMEntrySummary(vme)
+ return None
+
+@lldb_command('show_all_owned_objects', 'T')
+def ShowAllOwnedObjects(cmd_args=None, cmd_options={}):
+ """ Routine to print the list of VM objects owned by each task
+ -T: show only ledger-tagged objects
+ """
+ showonlytagged = False
+ if "-T" in cmd_options:
+ showonlytagged = True
+ for task in kern.tasks:
+ ShowTaskOwnedVmObjects(task, showonlytagged)
+
+@lldb_command('show_task_owned_objects', 'T')
+def ShowTaskOwnedObjects(cmd_args=None, cmd_options={}):
+ """ Routine to print the list of VM objects owned by the specified task
+ -T: show only ledger-tagged objects
+ """
+ showonlytagged = False
+ if "-T" in cmd_options:
+ showonlytagged = True
+ task = kern.GetValueFromAddress(cmd_args[0], 'task *')
+ ShowTaskOwnedVmObjects(task, showonlytagged)
+
+def ShowTaskOwnedVmObjects(task, showonlytagged=False):
+ """ Routine to print out a summary listing of all the entries in a vm_map
+ params:
+ task - core.value : a object of type 'task *'
+ returns:
+ None
+ """
+ taskobjq_total = lambda:None
+ taskobjq_total.objects = 0
+ taskobjq_total.vsize = 0
+ taskobjq_total.rsize = 0
+ taskobjq_total.wsize = 0
+ taskobjq_total.csize = 0
+ vmo_list_head = task.task_objq
+ vmo_ptr_type = GetType('vm_object *')
+ idx = 0
+ for vmo in IterateQueue(vmo_list_head, vmo_ptr_type, "task_objq"):
+ idx += 1
+ if not showonlytagged or vmo.vo_ledger_tag != 0:
+ if taskobjq_total.objects == 0:
+ print ' \n'
+ print GetTaskSummary.header + ' ' + GetProcSummary.header
+ print GetTaskSummary(task) + ' ' + GetProcSummary(Cast(task.bsd_info, 'proc *'))
+ print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:>2s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","tg","owner","pid","process")
+ ShowOwnedVmObject(vmo, idx, 0, taskobjq_total)
+ if taskobjq_total.objects != 0:
+ print " total:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(taskobjq_total.objects, taskobjq_total.vsize, taskobjq_total.rsize, taskobjq_total.wsize, taskobjq_total.csize)
+ return None
+
+def ShowOwnedVmObject(object, idx, queue_len, taskobjq_total):
+ """ Routine to print out a VM object owned by a task
+ params:
+ object - core.value : a object of type 'struct vm_object *'
+ returns:
+ None
+ """
+ page_size = kern.globals.page_size
+ if object.purgable == 0:
+ purgable = "N"
+ elif object.purgable == 1:
+ purgable = "V"
+ elif object.purgable == 2:
+ purgable = "E"
+ elif object.purgable == 3:
+ purgable = "D"
+ else:
+ purgable = "?"
+ if object.pager == 0:
+ compressed_count = 0
+ else:
+ compressor_pager = Cast(object.pager, 'compressor_pager *')
+ compressed_count = compressor_pager.cpgr_num_slots_occupied
+
+ print "{:>6d}/{:<6d} {: <#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:>2d} {: <#018x} {:>6d} {:<20s}\n".format(idx,queue_len,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_ledger_tag, object.vo_un2.vou_owner,GetProcPIDForObjectOwner(object.vo_un2.vou_owner),GetProcNameForObjectOwner(object.vo_un2.vou_owner))
+
+ taskobjq_total.objects += 1
+ taskobjq_total.vsize += object.vo_un1.vou_size/page_size
+ taskobjq_total.rsize += object.resident_page_count
+ taskobjq_total.wsize += object.wired_page_count
+ taskobjq_total.csize += compressed_count
+
+def GetProcPIDForObjectOwner(owner):
+ """ same as GetProcPIDForTask() but deals with -1 for a disowned object
+ """
+ if unsigned(Cast(owner, 'int')) == unsigned(int(0xffffffff)):
+ return -1
+ return GetProcPIDForTask(owner)
+
+def GetProcNameForObjectOwner(owner):
+ """ same as GetProcNameForTask() but deals with -1 for a disowned object
+ """
+ if unsigned(Cast(owner, 'int')) == unsigned(int(0xffffffff)):
+ return "<disowned>"
+ return GetProcNameForTask(owner)
+
+def GetDescForNamedEntry(mem_entry):
+ out_str = "\n"
+ out_str += "\t\tmem_entry {:#08x} ref:{:d} offset:{:#08x} size:{:#08x} prot{:d} backing {:#08x}".format(mem_entry, mem_entry.ref_count, mem_entry.offset, mem_entry.size, mem_entry.protection, mem_entry.backing.object)
+ if mem_entry.is_sub_map:
+ out_str += " is_sub_map"
+ elif mem_entry.is_copy:
+ out_str += " is_copy"
+ else:
+ out_str += " is_object"
+ return out_str