@xnudebug_test('test_memstats')
def TestMemstats(kernel_target, config, lldb_obj, isConnected ):
""" Test the functionality of memstats command
- returns
+ returns
- False on failure
- - True on success
+ - True on success
"""
if not isConnected:
print "Target is not connected. Cannot test memstats"
res = lldb.SBCommandReturnObject()
lldb_obj.debugger.GetCommandInterpreter().HandleCommand("memstats", res)
result = res.GetOutput()
- if result.split(":")[1].strip().find('None') == -1 :
+ if result.split(":")[1].strip().find('None') == -1 :
return True
- else:
+ else:
return False
# EndMacro: memstats
# Macro: showmemorystatus
def CalculateLedgerPeak(phys_footprint_entry):
""" Internal function to calculate ledger peak value for the given phys footprint entry
- params: phys_footprint_entry - value representing struct ledger_entry *
+ params: phys_footprint_entry - value representing struct ledger_entry *
return: value - representing the ledger peak for the given phys footprint entry
"""
now = kern.globals.sched_tick / 20
ledger_peak = phys_footprint_entry.le_credit - phys_footprint_entry.le_debit
- if (now - phys_footprint_entry._le.le_peaks[0].le_time <= 1) and (phys_footprint_entry._le.le_peaks[0].le_max > ledger_peak):
- ledger_peak = phys_footprint_entry._le.le_peaks[0].le_max
- if (now - phys_footprint_entry._le.le_peaks[1].le_time <= 1) and (phys_footprint_entry._le.le_peaks[1].le_max > ledger_peak):
- ledger_peak = phys_footprint_entry._le.le_peaks[1].le_max
+ if hasattr(phys_footprint_entry._le._le_max, 'le_interval_max') and (phys_footprint_entry._le._le_max.le_interval_max > ledger_peak):
+ ledger_peak = phys_footprint_entry._le._le_max.le_interval_max
return ledger_peak
-@header("{: >8s} {: >22s} {: >22s} {: >11s} {: >11s} {: >12s} {: >10s} {: >13s} {: ^10s} {: >8s} {: <20s}\n".format(
-'pid', 'effective priority', 'requested priority', 'state', 'user_data', 'physical', 'iokit', 'footprint',
-'spike', 'limit', 'command'))
+@header("{: >8s} {: >12s} {: >12s} {: >10s} {: >12s} {: >14s} {: >10s} {: >12s} {: >10s} {: >10s} {: >10s} {: <20s}\n".format(
+'pid', 'effective', 'requested', 'state', 'user_data', 'physical', 'iokit', 'footprint',
+'recent peak', 'lifemax', 'limit', 'command'))
def GetMemoryStatusNode(proc_val):
""" Internal function to get memorystatus information from the given proc
params: proc - value representing struct proc *
task_iokit_footprint_ledger_entry = task_ledgerp.l_entries[kern.globals.task_ledgers.iokit_mapped]
task_phys_footprint_ledger_entry = task_ledgerp.l_entries[kern.globals.task_ledgers.phys_footprint]
page_size = kern.globals.page_size
-
+
phys_mem_footprint = (task_physmem_footprint_ledger_entry.le_credit - task_physmem_footprint_ledger_entry.le_debit) / page_size
iokit_footprint = (task_iokit_footprint_ledger_entry.le_credit - task_iokit_footprint_ledger_entry.le_debit) / page_size
phys_footprint = (task_phys_footprint_ledger_entry.le_credit - task_phys_footprint_ledger_entry.le_debit) / page_size
phys_footprint_limit = task_phys_footprint_ledger_entry.le_limit / page_size
ledger_peak = CalculateLedgerPeak(task_phys_footprint_ledger_entry)
phys_footprint_spike = ledger_peak / page_size
+ phys_footprint_lifetime_max = task_phys_footprint_ledger_entry._le._le_max.le_lifetime_max / page_size
- format_string = '{0: >8d} {1: >22d} {2: >22d} {3: #011x} {4: #011x} {5: >12d} {6: >10d} {7: >13d}'
+ format_string = '{0: >8d} {1: >12d} {2: >12d} {3: #011x} {4: #011x} {5: >12d} {6: >10d} {7: >13d}'
out_str += format_string.format(proc_val.p_pid, proc_val.p_memstat_effectivepriority,
proc_val.p_memstat_requestedpriority, proc_val.p_memstat_state, proc_val.p_memstat_userdata,
phys_mem_footprint, iokit_footprint, phys_footprint)
if phys_footprint != phys_footprint_spike:
- out_str += "{: ^12d}".format(phys_footprint_spike)
+ out_str += "{: >12d}".format(phys_footprint_spike)
else:
- out_str += "{: ^12s}".format('-')
- out_str += "{: 8d} {: <20s}\n".format(phys_footprint_limit, proc_val.p_comm)
- return out_str
+ out_str += "{: >12s}".format('-')
+
+ out_str += "{: >10d} ".format(phys_footprint_lifetime_max)
+ out_str += "{: >10d} {: <20s}\n".format(phys_footprint_limit, proc_val.p_comm)
+ return out_str
@lldb_command('showmemorystatus')
def ShowMemoryStatus(cmd_args=None):
bucket_index = 0
bucket_count = 20
print GetMemoryStatusNode.header
- print "{: >91s} {: >10s} {: >13s} {: ^10s} {: >8s}\n".format("(pages)", "(pages)", "(pages)",
- "(pages)", "(pages)")
+ print "{: >21s} {: >12s} {: >38s} {: >10s} {: >12s} {: >10s} {: >10s}\n".format("priority", "priority", "(pages)", "(pages)", "(pages)",
+ "(pages)", "(pages)", "(pages)")
while bucket_index < bucket_count:
current_bucket = kern.globals.memstat_bucket[bucket_index]
current_list = current_bucket.list
bucket_index += 1
print "\n\n"
Memstats()
-
+
# EndMacro: showmemorystatus
def GetRealMetadata(meta):
""" Get real metadata for a given metadata pointer
"""
try:
- if unsigned(meta.zindex) != 255:
+ if unsigned(meta.zindex) != 0x03FF:
return meta
else:
return kern.GetValueFromAddress(unsigned(meta) - unsigned(meta.real_metadata_offset), "struct zone_page_metadata *")
metadata_offset = (unsigned(addr) - unsigned(zone_metadata_region_min)) % sizeof('struct zone_page_metadata')
page_offset_str = "{:d}/{:d}".format((unsigned(addr) - (unsigned(addr) & ~(pagesize - 1))), pagesize)
out_str += WhatIs.header + '\n'
- out_str += "{:#018x} {:>18s} {:>18s} {:#018x}\n\n".format(unsigned(addr), "Metadata", page_offset_str, unsigned(addr) - metadata_offset)
+ out_str += "{:#018x} {:>18s} {:>18s} {:#018x}\n\n".format(unsigned(addr), "Metadata", page_offset_str, unsigned(addr) - metadata_offset)
out_str += GetZoneMetadataSummary((unsigned(addr) - metadata_offset)) + '\n\n'
else:
page_index = ((unsigned(addr) & ~(pagesize - 1)) - unsigned(zone_map_min_address)) / pagesize
pass
return
+# Macro: showzcache
+
+@lldb_type_summary(['zone','zone_t'])
+@header("{:^18s} {:<40s} {:>10s} {:>10s} {:>10s} {:>10s}".format(
+'ZONE', 'NAME', 'CACHE_ELTS', 'DEP_VALID', 'DEP_EMPTY','DEP_FULL'))
+
+def GetZoneCacheSummary(zone):
+ """ Summarize a zone's cache with important information.
+ params:
+ zone: value - obj representing a zone in kernel
+ returns:
+ str - summary of the zone's cache contents
+ """
+ out_string = ""
+ format_string = '{:#018x} {:<40s} {:>10d} {:>10s} {:>10d} {:>10d}'
+ cache_elem_count = 0
+ mag_capacity = kern.GetGlobalVariable('magazine_element_count')
+ depot_capacity = kern.GetGlobalVariable('depot_element_count')
+
+
+ if zone.__getattr__('cpu_cache_enabled') :
+ for i in range(0, kern.globals.machine_info.physical_cpu):
+ cache = zone.zcache[0].zcc_per_cpu_caches[i]
+ cache_elem_count += cache.current.zcc_magazine_index
+ cache_elem_count += cache.previous.zcc_magazine_index
+
+ if zone.zcache[0].zcc_depot_index != -1:
+ cache_elem_count += zone.zcache[0].zcc_depot_index * mag_capacity
+ out_string += format_string.format(zone, zone.zone_name, cache_elem_count, "Y", depot_capacity - zone.zcache[0].zcc_depot_index, zone.zcache[0].zcc_depot_index)
+ else:
+ out_string += format_string.format(zone, zone.zone_name, cache_elem_count, "N", 0, 0)
+
+ return out_string
+
+@lldb_command('showzcache')
+def ZcachePrint(cmd_args=None):
+ """ Routine to print a summary listing of all the kernel zones cache contents
+ All columns are printed in decimal
+ """
+ global kern
+ print GetZoneCacheSummary.header
+ for zval in kern.zones:
+ if zval.__getattr__('cpu_cache_enabled') :
+ print GetZoneCacheSummary(zval)
+
+# EndMacro: showzcache
+
+# Macro: showzcachecpu
+
+@lldb_type_summary(['zone','zone_t'])
+@header("{:^18s} {:40s} {:>10s} {:>10s}".format(
+'ZONE', 'NAME', 'CACHE_ELTS', 'CPU_INFO'))
+
+def GetZoneCacheCPUSummary(zone):
+ """ Summarize a zone's cache broken up per cpu
+ params:
+ zone: value - obj representing a zone in kernel
+ returns:
+ str - summary of the zone's per CPU cache contents
+ """
+ out_string = ""
+ format_string = '{:#018x} {:40s} {:10d} {cpuinfo:s}'
+ cache_elem_count = 0
+ cpu_info = ""
+ per_cpu_count = 0
+ mag_capacity = kern.GetGlobalVariable('magazine_element_count')
+ depot_capacity = kern.GetGlobalVariable('depot_element_count')
+
+
+ if zone.__getattr__('cpu_cache_enabled') :
+ for i in range(0, kern.globals.machine_info.physical_cpu):
+ if i != 0:
+ cpu_info += ", "
+ cache = zone.zcache[0].zcc_per_cpu_caches[i]
+ per_cpu_count = cache.current.zcc_magazine_index
+ per_cpu_count += cache.previous.zcc_magazine_index
+ cache_elem_count += per_cpu_count
+ cpu_info += "CPU {:d}: {:5}".format(i,per_cpu_count)
+ if zone.zcache[0].zcc_depot_index != -1:
+ cache_elem_count += zone.zcache[0].zcc_depot_index * mag_capacity
+
+ out_string += format_string.format(zone, zone.zone_name, cache_elem_count,cpuinfo = cpu_info)
+
+ return out_string
+
+@lldb_command('showzcachecpu')
+def ZcacheCPUPrint(cmd_args=None):
+ """ Routine to print a summary listing of all the kernel zones cache contents
+ All columns are printed in decimal
+ """
+ global kern
+ print GetZoneCacheCPUSummary.header
+ for zval in kern.zones:
+ if zval.__getattr__('cpu_cache_enabled') :
+ print GetZoneCacheCPUSummary(zval)
+
+# EndMacro: showzcachecpu
+
# Macro: zprint
@lldb_type_summary(['zone','zone_t'])
-@header("{:^18s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}({:>6s} {:>6s} {:>6s}) {:^15s} {:<20s}".format(
-'ZONE', 'TOT_SZ', 'PAGE_COUNT', 'ALLOC_ELTS', 'FREE_ELTS', 'FREE_SZ', 'ALL_FREE_PGS', 'ELT_SZ', 'ALLOC', 'ELTS', 'PGS', 'WASTE', 'FLAGS', 'NAME'))
+@header("{:^18s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:^6s} {:^6s} {:^6s} {:>10s} {:^15s} {:<20s}".format(
+'ZONE', 'TOT_SZ', 'PAGE_COUNT', 'ALLOC_ELTS', 'FREE_ELTS', 'FREE_SZ', 'ALL_FREE_PGS', 'ELT_SZ', 'ALLOC', '(ELTS', 'PGS', 'WASTE)', 'CACHE_ELTS', 'FLAGS', 'NAME'))
def GetZoneSummary(zone):
""" Summarize a zone with important information. See help zprint for description of each field
- params:
+ params:
zone: value - obj representing a zone in kernel
- returns:
+ returns:
str - summary of the zone
"""
out_string = ""
- format_string = '{:#018x} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:6d} {:6d} {:6d} {markings} {name:s} '
+ format_string = '{:#018x} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:6d} {:6d} {:6d} {:10d} {markings} {name:s} '
pagesize = kern.globals.page_size
-
+
free_elements = zone.countfree
free_size = free_elements * zone.elem_size
-
+ mag_capacity = kern.GetGlobalVariable('magazine_element_count')
+
alloc_pages = zone.alloc_size / pagesize
alloc_count = zone.alloc_size / zone.elem_size
alloc_waste = zone.alloc_size % zone.elem_size
["zleak_on", "L"],
["doing_alloc_without_vm_priv", "A"],
["doing_alloc_with_vm_priv", "S"],
- ["waiting", "W"]
+ ["waiting", "W"],
+ ["cpu_cache_enabled", "E"]
]
if kern.arch == 'x86_64':
marks.append(["gzalloc_exempt", "M"])
marks.append(["alignment_required", "N"])
-
+
markings=""
+ if not zone.__getattr__("zone_valid") :
+ markings+="I"
for mark in marks:
if zone.__getattr__(mark[0]) :
markings+=mark[1]
else:
markings+=" "
+ cache_elem_count = 0
+ if zone.__getattr__('cpu_cache_enabled') :
+ for i in range(0, kern.globals.machine_info.physical_cpu):
+ cache = zone.zcache[0].zcc_per_cpu_caches[i]
+ cache_elem_count += cache.current.zcc_magazine_index
+ cache_elem_count += cache.previous.zcc_magazine_index
+ if zone.zcache[0].zcc_depot_index != -1:
+ cache_elem_count += zone.zcache[0].zcc_depot_index * mag_capacity
+
out_string += format_string.format(zone, zone.cur_size, zone.page_count,
- zone.count, free_elements, free_size, zone.count_all_free_pages,
+ zone.count, free_elements, free_size, zone.count_all_free_pages,
zone.elem_size, zone.alloc_size, alloc_count,
- alloc_pages, alloc_waste, name = zone.zone_name, markings=markings)
-
+ alloc_pages, alloc_waste, cache_elem_count, name = zone.zone_name, markings=markings)
+
if zone.exhaustible :
out_string += "(max: {:d})".format(zone.max_size)
-
+
return out_string
@lldb_command('zprint')
A - currently trying to allocate more backing memory from kernel_memory_allocate without VM priv
S - currently trying to allocate more backing memory from kernel_memory_allocate with VM priv
W - another thread is waiting for more memory
+ E - Per-cpu caching is enabled for this zone
L - zone is being monitored by zleaks
G - currently running GC
+ I - zone was destroyed and is no longer valid
"""
global kern
print GetZoneSummary.header
@xnudebug_test('test_zprint')
def TestZprint(kernel_target, config, lldb_obj, isConnected ):
""" Test the functionality of zprint command
- returns
+ returns
- False on failure
- - True on success
+ - True on success
"""
if not isConnected:
print "Target is not connected. Cannot test memstats"
result = res.GetOutput()
if len(result.split("\n")) > 2:
return True
- else:
+ else:
return False
returns:
None
"""
-
+
scaled_factor = (unsigned(kern.globals.zp_factor) +
(unsigned(zone.elem_size) >> unsigned(kern.globals.zp_scale)))
zfirst = kern.GetValueFromAddress(GetFreeList(free_page_meta), 'void *')
if unsigned(zfirst) != 0:
ShowZfreeListChain(zone, zfirst, zlimit)
-
+
if ShowZfreeList.elts_found == zlimit:
print "Stopped at {0: <d} elements!".format(zlimit)
else:
@lldb_command('zstack_showzonesbeinglogged')
def ZstackShowZonesBeingLogged(cmd_args=None):
+ """ Show all zones which have BTLog enabled.
"""
- """
global kern
for zval in kern.zones:
if zval.zlog_btlog:
@lldb_command('zstack_findelem')
def ZStackFindElem(cmd_args=None):
""" Zone corruption debugging: search the zone log and print out the stack traces for all log entries that
- refer to the given zone element.
+ refer to the given zone element.
Usage: zstack_findelem <btlog addr> <elem addr>
When the kernel panics due to a corrupted zone element, get the
if int(kern.globals.log_records) == 0 or unsigned(kern.globals.corruption_debug_flag) == 0:
print "Zone logging with corruption detection not enabled. Add '-zc zlog=<zone name>' to boot-args."
return
-
+
btlog_ptr = kern.GetValueFromAddress(cmd_args[0], 'btlog_t *')
target_element = unsigned(kern.GetValueFromAddress(cmd_args[1], 'void *'))
# EndMacro: zstack_findelem
+@lldb_command('zstack_findtop', 'N:')
+def ShowZstackTop(cmd_args=None, cmd_options={}):
+ """ Zone leak debugging: search the log and print the stacks with the most active references
+ in the stack trace.
+
+ Usage: zstack_findtop [-N <n-stacks>] <btlog-addr>
+ """
+
+ if not cmd_args:
+ raise ArgumentError('Missing required btlog address argument')
+
+ n = 5
+ if '-N' in cmd_options:
+ n = int(cmd_options['-N'])
+
+ btlog_ptr = kern.GetValueFromAddress(cmd_args[0], 'btlog_t *')
+ btrecord_size = unsigned(btlog_ptr.btrecord_size)
+ btrecords = unsigned(btlog_ptr.btrecords)
+
+ cpcs_index = unsigned(btlog_ptr.head)
+ depth = unsigned(btlog_ptr.btrecord_btdepth)
+
+ records = []
+ while cpcs_index != 0xffffff:
+ cpcs_record_offset = cpcs_index * btrecord_size
+ cpcs_record = kern.GetValueFromAddress(btrecords + cpcs_record_offset, 'btlog_record_t *')
+ cpcs_record.index = cpcs_index
+ records.append(cpcs_record)
+ cpcs_index = cpcs_record.next
+
+ recs = sorted(records, key=lambda x: x.ref_count, reverse=True)
+
+ for rec in recs[:n]:
+ ShowZStackRecord(rec, rec.index, depth, unsigned(btlog_ptr.active_element_count))
+
+# EndMacro: zstack_findtop
+
# Macro: btlog_find
@lldb_command('btlog_find', "AS")
if len(cmd_args) >= 2:
trace_size = ArgumentStringToInt(cmd_args[1])
ShowZstackTraceHelper(trace, trace_size)
-
+
#EndMacro: showzstacktrace
def ShowZstackTraceHelper(stack, depth):
""" Helper routine for printing a zstack.
params:
stack: void *[] - An array of pointers representing the Zstack
- depth: int - The depth of the ztrace stack
+ depth: int - The depth of the ztrace stack
returns:
None
"""
@lldb_command('showtopztrace')
def ShowTopZtrace(cmd_args=None):
- """ Shows the ztrace with the biggest size.
+ """ Shows the ztrace with the biggest size.
(According to top_ztrace, not by iterating through the hash table)
"""
top_trace = kern.globals.top_ztrace
if unsigned(kern.globals.zallocations) == 0:
print "zallocations array not initialized!"
return
- print '{0: <5s} {1: <18s} {2: <5s} {3: <15s}'.format('INDEX','ADDRESS','TRACE','SIZE')
+ print '{0: <5s} {1: <18s} {2: <5s} {3: <15s}'.format('INDEX','ADDRESS','TRACE','SIZE')
current_index = 0
max_zallocation = unsigned(kern.globals.zleak_alloc_buckets)
allocation_count = 0
if not cmd_args:
print ShowZallocsForTrace.__doc__
return
- print '{0: <5s} {1: <18s} {2: <15s}'.format('INDEX','ADDRESS','SIZE')
+ print '{0: <5s} {1: <18s} {2: <15s}'.format('INDEX','ADDRESS','SIZE')
target_index = ArgumentStringToInt(cmd_args[0])
current_index = 0
max_zallocation = unsigned(kern.globals.zleak_alloc_buckets)
frame = 0
if not zstack_record:
return "Zstack record none!"
-
+
depth_val = unsigned(depth)
while frame < depth_val:
frame_pc = zstack_record.bt[frame]
print "Container allocation = {0: <#0x} = {1: d}K".format(kern.globals.debug_container_malloc_size, (kern.globals.debug_container_malloc_size / 1024))
print "IOMalloc allocation = {0: <#0x} = {1: d}K".format(kern.globals.debug_iomalloc_size, (kern.globals.debug_iomalloc_size / 1024))
print "Container allocation = {0: <#0x} = {1: d}K".format(kern.globals.debug_iomallocpageable_size, (kern.globals.debug_iomallocpageable_size / 1024))
-
-
-# EndMacro: showioalloc
+
+
+# EndMacro: showioalloc
# Macro: showselectmem
print '-'*40
print "Total: {:d} bytes ({:d} kbytes)".format(selmem, selmem/1024)
# Endmacro: showselectmem
-
-
+
+
# Macro: showtaskvme
@lldb_command('showtaskvme', "PS")
def ShowTaskVmeHelper(cmd_args=None, cmd_options={}):
vmstats.compressed_lifetime = 0
vmstats.error = ''
- hdr_format = "{0: >10s} {1: <20s} {2: >6s} {3: >10s} {4: >10s} {5: >10s} {6: >10s} {7: >10s} {8: >10s} {9: >10s} {10: >10s} {11: >10s} {12: >10s} {13: >10s} {14:}"
- print hdr_format.format('pid', 'command', '#ents', 'wired', 'vsize', 'rsize', 'NEW RSIZE', 'max rsize', 'internal', 'external', 'reusable', 'compressed', 'compressed', 'compressed', '')
- print hdr_format.format('', '', '', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(current)', '(peak)', '(lifetime)', '')
- entry_format = "{p.p_pid: >10d} {p.p_comm: <20s} {m.hdr.nentries: >6d} {s.wired_count: >10d} {vsize: >10d} {s.resident_count: >10d} {s.new_resident_count: >10d} {s.resident_max: >10d} {s.internal: >10d} {s.external: >10d} {s.reusable: >10d} {s.compressed: >10d} {s.compressed_peak: >10d} {s.compressed_lifetime: >10d} {s.error}"
+ hdr_format = "{:>6s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:<20s} {:1s}"
+ print hdr_format.format('#ents', 'wired', 'vsize', 'rsize', 'NEW RSIZE', 'max rsize', 'internal', 'external', 'reusable', 'compressed', 'compressed', 'compressed', 'pid', 'command', '')
+ print hdr_format.format('', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(current)', '(peak)', '(lifetime)', '', '', '')
+ entry_format = "{m.hdr.nentries: >6d} {s.wired_count: >10d} {vsize: >10d} {s.resident_count: >10d} {s.new_resident_count: >10d} {s.resident_max: >10d} {s.internal: >10d} {s.external: >10d} {s.reusable: >10d} {s.compressed: >10d} {s.compressed_peak: >10d} {s.compressed_lifetime: >10d} {p.p_pid: >10d} {p.p_comm: <20s} {s.error}"
for task in kern.tasks:
proc = Cast(task.bsd_info, 'proc *')
vmstats.error += '*'
print entry_format.format(p=proc, m=vmmap, vsize=(unsigned(vmmap.size) / page_size), t=task, s=vmstats)
-
+
def ShowTaskVMEntries(task, show_pager_info, show_all_shadows):
""" Routine to print out a summary listing of all the entries in a vm_map
- params:
+ params:
task - core.value : a object of type 'task *'
returns:
None
usage: showmapvme <vm_map>
"""
if cmd_args == None or len(cmd_args) < 1:
- print "Invalid argument.", ShowMap.__doc__
+ print "Invalid argument.", ShowMapVME.__doc__
return
map_val = kern.GetValueFromAddress(cmd_args[0], 'vm_map_t')
print GetVMMapSummary.header
resident_pages = 0
if vmmap.pmap != 0: resident_pages = int(vmmap.pmap.stats.resident_count)
first_free = 0
- if int(vmmap.holelistenabled) == 0: first_free = vmmap.f_s.first_free
+ if int(vmmap.holelistenabled) == 0: first_free = vmmap.f_s._first_free
out_string += format_string.format(vmmap, vmmap.pmap, vm_size, vmmap.hdr.nentries, resident_pages, vmmap.hint, first_free)
return out_string
vme_protection = int(vme.protection)
vme_max_protection = int(vme.max_protection)
vme_extra_info_str ="SC-Ds"[int(vme.inheritance)]
- if int(vme.is_sub_map) != 0 :
+ if int(vme.is_sub_map) != 0 :
vme_extra_info_str +="s"
elif int(vme.needs_copy) != 0 :
vme_extra_info_str +="n"
@lldb_type_summary(['kmod_info_t *'])
@header("{0: <20s} {1: <20s} {2: <20s} {3: >3s} {4: >5s} {5: <20s} {6: <20s} {7: >20s} {8: <30s}".format('kmod_info', 'address', 'size', 'id', 'refs', 'TEXT exec', 'size', 'version', 'name'))
def GetKextSummary(kmod):
- """ returns a string representation of kext information
+ """ returns a string representation of kext information
"""
out_string = ""
format_string = "{0: <#020x} {1: <#020x} {2: <#020x} {3: >3d} {4: >5d} {5: <#020x} {6: <#020x} {7: >20s} {8: <30s}"
return out_string
@lldb_type_summary(['uuid_t'])
-@header("")
+@header("")
def GetUUIDSummary(uuid):
""" returns a string representation like CA50DA4C-CA10-3246-B8DC-93542489AA26
"""
if address == 0 or size == 0:
return ([defval], [defval])
- # if int(kern.globals.gLoadedKextSummaries.version) <= 2:
+ ## if int(kern.globals.gLoadedKextSummaries.version) <= 2:
# until we have separate version. we will pay penalty only on arm64 devices
- if kern.arch not in ('arm64',):
+ if not kern.arch.startswith('arm64'):
return ([defval], [defval])
restrict_size_to_read = 1536
'addr of macho header', [macho.MachOSegment,..], [MachoSection,...], kext, kmod_obj)
"""
cached_result = caching.GetDynamicCacheData("kern.kexts.loadinformation", [])
- # if specific addr is provided then ignore caching
+ ## if specific addr is provided then ignore caching
if cached_result and not addr:
return cached_result
return "invalid"
(MAJ_MULT, MIN_MULT, REV_MULT,STAGE_MULT) = (100000000, 1000000, 10000, 1000)
version = version_num
-
+
vers_major = version / MAJ_MULT
version = version - (vers_major * MAJ_MULT)
-
+
vers_minor = version / MIN_MULT
version = version - (vers_minor * MIN_MULT)
-
+
vers_revision = version / REV_MULT
version = version - (vers_revision * REV_MULT)
-
+
vers_stage = version / STAGE_MULT
version = version - (vers_stage * STAGE_MULT)
-
- vers_stage_level = version
-
+
+ vers_stage_level = version
+
out_str = "%d.%d" % (vers_major, vers_minor)
if vers_revision > 0: out_str += ".%d" % vers_revision
if vers_stage == 1 : out_str += "d%d" % vers_stage_level
if vers_stage == 3 : out_str += "a%d" % vers_stage_level
if vers_stage == 5 : out_str += "b%d" % vers_stage_level
if vers_stage == 6 : out_str += "fc%d" % vers_stage_level
-
+
return out_str
@lldb_command('showallknownkmods')
return
+def FindKmodNameForAddr(addr):
+ """ Given an address, return the name of the kext containing that address
+ """
+ addr = unsigned(addr)
+ all_kexts_info = GetKextLoadInformation()
+ for kinfo in all_kexts_info:
+ segment = macho.get_segment_with_addr(kinfo[4], addr)
+ if segment:
+ return kinfo[7].name
+ return None
+
+
+@lldb_command('addkextaddr')
+def AddKextAddr(cmd_args=[]):
+ """ Given an address, load the kext which contains that address
+ Syntax: (lldb) addkextaddr <addr>
+ """
+ if len(cmd_args) < 1:
+ raise ArgumentError("Insufficient arguments")
+
+ addr = ArgumentStringToInt(cmd_args[0])
+ all_kexts_info = GetKextLoadInformation()
+ kernel_uuid = str(kern.globals.kernel_uuid_string).lower()
+ found_kinfo = None
+ found_segment = None
+ for kinfo in all_kexts_info:
+ segment = macho.get_segment_with_addr(kinfo[4], addr)
+ if segment:
+ print GetKextSummary.header
+ print GetKextSummary(kinfo[7]) + " segment: {} offset = {:#0x}".format(segment.name, (addr - segment.vmaddr))
+ cur_uuid = kinfo[0].lower()
+ if (kernel_uuid == cur_uuid):
+ print "(builtin)"
+ else:
+ print "Fetching dSYM for %s" % cur_uuid
+ info = dsymForUUID(cur_uuid)
+ if info and 'DBGSymbolRichExecutable' in info:
+ print "Adding dSYM (%s) for %s" % (cur_uuid, info['DBGSymbolRichExecutable'])
+ addDSYM(cur_uuid, info)
+ loadDSYM(cur_uuid, int(kinfo[1],16), kinfo[4])
+ else:
+ print "Failed to get symbol info for %s" % cur_uuid
+ return
+
+
@lldb_command('showkmodaddr')
def ShowKmodAddr(cmd_args=[]):
""" Given an address, print the offset and name for the kmod containing it
\nNote: LLDB does not support adding kext based on directory paths like gdb used to.".format(exec_path))
slide_value = None
+ sections = None
if cmd_args:
slide_value = cmd_args[0]
debuglog("loading slide value from user input %s" % cmd_args[0])
debuglog(k[0])
if k[0].lower() == uuid_str.lower():
slide_value = k[1]
+ sections = k[4]
debuglog("found the slide %s for uuid %s" % (k[1], k[0]))
if slide_value is None:
raise ArgumentError("Unable to find load address for module described at %s " % exec_full_path)
- load_cmd = "target modules load --file %s --slide %s" % (exec_full_path, str(slide_value))
- print load_cmd
- print lldb_run_command(load_cmd)
+
+ if not sections:
+ cmd_str = "target modules load --file %s --slide %s" % ( exec_full_path, str(slide_value))
+ debuglog(cmd_str)
+ else:
+ cmd_str = "target modules load --file {} ".format(exec_full_path)
+ sections_str = ""
+ for s in sections:
+ sections_str += " {} {:#0x} ".format(s.name, s.vmaddr)
+ cmd_str += sections_str
+ debuglog(cmd_str)
+
+ lldb.debugger.HandleCommand(cmd_str)
+
kern.symbolicator = None
return True
all_kexts_info = GetKextLoadInformation()
+ kernel_uuid = str(kern.globals.kernel_uuid_string).lower()
if "-N" in cmd_options:
kext_name = cmd_options["-N"]
for x in all_kexts_info:
if cur_knm == x[2]:
cur_uuid = x[0].lower()
- print "Fetching dSYM for {:s}".format(cur_uuid)
- info = dsymForUUID(cur_uuid)
- if info and 'DBGSymbolRichExecutable' in info:
- print "Adding dSYM ({0:s}) for {1:s}".format(cur_uuid, info['DBGSymbolRichExecutable'])
- addDSYM(cur_uuid, info)
- loadDSYM(cur_uuid, int(x[1],16), x[4])
+ if (kernel_uuid == cur_uuid):
+ print "(builtin)"
else:
- print "Failed to get symbol info for {:s}".format(cur_uuid)
+ print "Fetching dSYM for {:s}".format(cur_uuid)
+ info = dsymForUUID(cur_uuid)
+ if info and 'DBGSymbolRichExecutable' in info:
+ print "Adding dSYM ({0:s}) for {1:s}".format(cur_uuid, info['DBGSymbolRichExecutable'])
+ addDSYM(cur_uuid, info)
+ loadDSYM(cur_uuid, int(x[1],16), x[4])
+ else:
+ print "Failed to get symbol info for {:s}".format(cur_uuid)
break
kern.symbolicator = None
return
for k_info in all_kexts_info:
cur_uuid = k_info[0].lower()
if load_all_kexts or (uuid == cur_uuid):
- print "Fetching dSYM for %s" % cur_uuid
- info = dsymForUUID(cur_uuid)
- if info and 'DBGSymbolRichExecutable' in info:
- print "Adding dSYM (%s) for %s" % (cur_uuid, info['DBGSymbolRichExecutable'])
- addDSYM(cur_uuid, info)
- loadDSYM(cur_uuid, int(k_info[1],16), k_info[4])
- else:
- print "Failed to get symbol info for %s" % cur_uuid
+ if (kernel_uuid != cur_uuid):
+ print "Fetching dSYM for %s" % cur_uuid
+ info = dsymForUUID(cur_uuid)
+ if info and 'DBGSymbolRichExecutable' in info:
+ print "Adding dSYM (%s) for %s" % (cur_uuid, info['DBGSymbolRichExecutable'])
+ addDSYM(cur_uuid, info)
+ loadDSYM(cur_uuid, int(k_info[1],16), k_info[4])
+ else:
+ print "Failed to get symbol info for %s" % cur_uuid
#end of for loop
kern.symbolicator = None
return True
@lldb_type_summary(['mount *'])
@header("{0: <20s} {1: <20s} {2: <20s} {3: <12s} {4: <12s} {5: <12s} {6: >6s} {7: <30s} {8: <35s} {9: <30s}".format('volume(mp)', 'mnt_data', 'mnt_devvp', 'flag', 'kern_flag', 'lflag', 'type', 'mnton', 'mntfrom', 'iosched supported'))
def GetMountSummary(mount):
- """ Display a summary of mount on the system
+ """ Display a summary of mount on the system
"""
out_string = ("{mnt: <#020x} {mnt.mnt_data: <#020x} {mnt.mnt_devvp: <#020x} {mnt.mnt_flag: <#012x} " +
"{mnt.mnt_kern_flag: <#012x} {mnt.mnt_lflag: <#012x} {vfs.f_fstypename: >6s} " +
_GetVnodePathName(vnode.v_mount.mnt_vnodecovered, str(vnode.v_mount.mnt_vnodecovered.v_name) )
else:
_GetVnodePathName(vnode.v_parent, str(vnode.v_parent.v_name))
- _GetVnodePathName.output += "/%s" % vnodename
+ _GetVnodePathName.output += "/%s" % vnodename
def GetVnodePath(vnode):
""" Get string representation of the vnode
devnode_major = (devnode_dev >> 24) & 0xff
devnode_minor = devnode_dev & 0x00ffffff
- # boilerplate device information for a vnode
+ # boilerplate device information for a vnode
vnodedev_output += "Device Info:\n\t vnode:\t\t{:#x}".format(vnode)
vnodedev_output += "\n\t type:\t\t"
if (vnode.v_type == vblk_type):
vnode_lock_output += ("PID {: <18d}").format(lockf_proc.p_pid)
else:
vnode_lock_output += ("ID {: <#019x}").format(int(lockf.lf_id))
-
+
# lock type
if lockf_type == 1:
vnode_lock_output += ("{: <12s}").format('shared')
vnode_lock_output += ("{: <12s}").format('unlock')
else:
vnode_lock_output += ("{: <12s}").format('unknown')
-
+
# start and stop values
vnode_lock_output += ("{: #018x} ..").format(lockf.lf_start)
vnode_lock_output += ("{: #018x}\n").format(lockf.lf_end)
while lockf_blocker:
out_str += ("{: <4s}").format('>')
out_str += GetVnodeLock(lockf_blocker)
- lockf_blocker = lockf_blocker.lf_block.tqe_next
+ lockf_blocker = lockf_blocker.lf_block.tqe_next
return out_str
@lldb_command('showvnodelocks')
# EndMacro: showvnodelocks
# Macro: showproclocks
-
+
@lldb_command('showproclocks')
def ShowProcLocks(cmd_args=None):
""" Routine to display list of advisory record locks for the given process
csblob_version = '-'
if (vtype == 1) and (vnode.v_un.vu_ubcinfo != 0):
csblob_version = '{: <6d}'.format(vnode.v_un.vu_ubcinfo.cs_add_gen)
- # Check to see if vnode is mapped/unmapped
+ # Check to see if vnode is mapped/unmapped
if (vnode.v_un.vu_ubcinfo.ui_flags & 0x8) != 0:
mapped = '1'
else:
vnodeval = kern.GetValueFromAddress(cmd_args[0],'vnode *')
print GetVnodeSummary.header
print GetVnodeSummary(vnodeval)
-
+
@lldb_command('showvolvnodes')
def ShowVolVnodes(cmd_args=None):
""" Display info about all vnodes of a given mount_t
if int(fdptr.fd_rdir) != 0:
print '{0: <25s}\n{1: <s}\n{2: <s}'.format('Current Root Directory:', GetVnodeSummary.header, GetVnodeSummary(fdptr.fd_rdir))
count = 0
- print '\n' + '{0: <5s} {1: <7s}'.format('fd', 'flags') + GetVnodeSummary.header
+ print '\n' + '{0: <5s} {1: <7s}'.format('fd', 'flags') + GetVnodeSummary.header
# Hack to get around <rdar://problem/12879494> llb fails to cast addresses to double pointers
fpptr = Cast(fdptr.fd_ofiles, 'fileproc *')
while count < fdptr.fd_nfiles:
@xnudebug_test('test_vnode')
def TestShowAllVnodes(kernel_target, config, lldb_obj, isConnected ):
""" Test the functionality of vnode related commands
- returns
+ returns
- False on failure
- - True on success
+ - True on success
"""
if not isConnected:
print "Target is not connected. Cannot test memstats"
result = res.GetOutput()
if len(result.split("\n")) > 2 and result.find('VREG') != -1 and len(result.splitlines()[2].split()) > 5:
return True
- else:
+ else:
return False
# Macro: showallmtx
hdr_format = '{:<18s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
else:
hdr_format = '{:<10s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
-
- print hdr_format.format('LCK GROUP', 'CNT', 'UTIL', 'MISS', 'WAIT', 'NAME')
+
+ print hdr_format.format('LCK GROUP', 'CNT', 'UTIL', 'MISS', 'WAIT', 'NAME')
mtxgrp_queue_head = kern.globals.lck_grp_queue
- mtxgrp_ptr_type = GetType('_lck_grp_ *')
-
- for mtxgrp_ptr in IterateQueue(mtxgrp_queue_head, mtxgrp_ptr_type, "lck_grp_link"):
+ mtxgrp_ptr_type = GetType('_lck_grp_ *')
+
+ for mtxgrp_ptr in IterateQueue(mtxgrp_queue_head, mtxgrp_ptr_type, "lck_grp_link"):
print GetMutexEntry(mtxgrp_ptr)
return
# EndMacro: showallmtx
out_str += "Pri : {mtx.lck_mtx_pri:#x}\n".format(mtx=mtx)
out_str += "Spin : {mtx.lck_mtx_spin:#x}\n".format(mtx=mtx)
out_str += "Ext : {mtx.lck_mtx_is_ext:#x}\n".format(mtx=mtx)
- if mtx.lck_mtxd_pad32 == 0xFFFFFFFF :
- out_str += "Canary (valid) : {mtx.lck_mtxd_pad32:#x}\n".format(mtx=mtx)
+ if mtx.lck_mtx_pad32 == 0xFFFFFFFF :
+ out_str += "Canary (valid) : {mtx.lck_mtx_pad32:#x}\n".format(mtx=mtx)
else:
- out_str += "Canary (INVALID) : {mtx.lck_mtxd_pad32:#x}\n".format(mtx=mtx)
+ out_str += "Canary (INVALID) : {mtx.lck_mtx_pad32:#x}\n".format(mtx=mtx)
return out_str
out_str = "Lock Type\t\t: MUTEX\n"
return
summary_str = ""
- lock = kern.GetValueFromAddress(cmd_args[0], 'uintptr_t*')
-
- if kern.arch == "x86_64" and lock:
+ addr = cmd_args[0]
+ # from osfmk/arm/locks.h
+ LCK_SPIN_TYPE = 0x11
+ LCK_MTX_TYPE = 0x22
+ if kern.arch == "x86_64":
if "-M" in cmd_options:
- lock_mtx = Cast(lock, 'lck_mtx_t *')
+ lock_mtx = kern.GetValueFromAddress(addr, 'lck_mtx_t *')
summary_str = GetMutexLockSummary(lock_mtx)
elif "-S" in cmd_options:
- lock_spin = Cast(lock, 'lck_spin_t *')
+ lock_spin = kern.GetValueFromAddress(addr, 'lck_spin_t *')
summary_str = GetSpinLockSummary(lock_spin)
else:
summary_str = "Please specify supported lock option(-M/-S)"
print summary_str
- return
-
- if lock:
- lock_mtx = Cast(lock, 'lck_mtx_t*')
- if lock_mtx.lck_mtx_type == 0x22:
- summary_str = GetMutexLockSummary(lock_mtx)
-
- lock_spin = Cast(lock, 'lck_spin_t*')
- if lock_spin.type == 0x11:
- summary_str = GetSpinLockSummary(lock_spin)
-
- if summary_str == "":
- summary_str = "Lock Type\t\t: INVALID LOCK"
- print summary_str
+ else:
+ lock = kern.GetValueFromAddress(addr, 'uintptr_t *')
+ if lock:
+ lock_mtx = Cast(lock, 'lck_mtx_t*')
+ if lock_mtx.lck_mtx_type == LCK_MTX_TYPE:
+ summary_str = GetMutexLockSummary(lock_mtx)
+
+ lock_spin = Cast(lock, 'lck_spin_t*')
+ if lock_spin.type == LCK_SPIN_TYPE:
+ summary_str = GetSpinLockSummary(lock_spin)
+ if summary_str == "":
+ summary_str = "Lock Type\t\t: INVALID LOCK"
+ print summary_str
#EndMacro: showlock
""" Prints out the phys memory map from kernelBootArgs
Supported only on x86_64
"""
- if kern.arch == 'x86_64':
- voffset = unsigned(0xFFFFFF8000000000)
- else:
+ if kern.arch != 'x86_64':
print "showbootermemorymap not supported on this architecture"
return
-
+
out_string = ""
# Memory type map
i = 0
while i < mcount:
- mptr = kern.GetValueFromAddress(unsigned(boot_args.MemoryMap) + voffset + unsigned(i*msize), 'EfiMemoryRange *')
+ mptr = kern.GetValueFromAddress(unsigned(boot_args.MemoryMap) + kern.VM_MIN_KERNEL_ADDRESS + unsigned(i*msize), 'EfiMemoryRange *')
mtype = unsigned(mptr.Type)
if mtype in memtype_dict:
out_string += "{0: <12s}".format(memtype_dict[mtype])
queue_len = kern.globals.purgeable_nonvolatile_count
queue_head = kern.globals.purgeable_nonvolatile_queue
- print 'purgeable_nonvolatile_queue:{:#018x} purgeable_volatile_count:{:d}\n'.format(kern.GetLoadAddressForSymbol('purgeable_nonvolatile_queue'),queue_len)
+ print 'purgeable_nonvolatile_queue:{: <#018x} purgeable_volatile_count:{:d}\n'.format(kern.GetLoadAddressForSymbol('purgeable_nonvolatile_queue'),queue_len)
print 'N:non-volatile V:volatile E:empty D:deny\n'
- print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process")
+ print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:>3s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","tag","owner","pid","process")
idx = 0
for object in IterateQueue(queue_head, 'struct vm_object *', 'objq'):
idx += 1
compressor_pager = Cast(object.pager, 'compressor_pager *')
compressed_count = compressor_pager.cpgr_num_slots_occupied
- print "{:>6d}/{:<6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}\n".format(idx,queue_len,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner))
+ print "{:>6d}/{:<6d} {: <#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:>3d} {: <#018x} {:>6d} {:<20s}\n".format(idx,queue_len,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_ledger_tag, object.vo_un2.vou_owner,GetProcPIDForObjectOwner(object.vo_un2.vou_owner),GetProcNameForObjectOwner(object.vo_un2.vou_owner))
nonvolatile_total.objects += 1
nonvolatile_total.vsize += object.vo_un1.vou_size/page_size
nonvolatile_total.rsize += object.resident_page_count
nonvolatile_total.wsize += object.wired_page_count
nonvolatile_total.csize += compressed_count
- if object.vo_un2.vou_purgeable_owner == 0:
+ if object.vo_un2.vou_owner == 0:
nonvolatile_total.disowned_objects += 1
nonvolatile_total.disowned_vsize += object.vo_un1.vou_size/page_size
nonvolatile_total.disowned_rsize += object.resident_page_count
for object in IterateQueue(qhead, 'struct vm_object *', 'objq'):
if idx == 0:
# print "{:>6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s} {:18s} {:>6s} {:<20s} {:s}\n".format("#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process","volatilizer","pid","process","")
- print "{:>6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s}\n".format("#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process")
+ print "{:>6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:>3s} {:18s} {:>6s} {:<20s}\n".format("#","object","P","refcnt","size (pages)","resid","wired","compressed","tag","owner","pid","process")
idx += 1
ShowPurgeableVolatileVmObject(object, idx, volatile_total)
returns:
None
"""
-# if int(object.vo_un2.vou_purgeable_owner) != int(object.vo_purgeable_volatilizer):
+## if int(object.vo_un2.vou_owner) != int(object.vo_purgeable_volatilizer):
# diff=" !="
-# else:
+## else:
# diff=" "
page_size = kern.globals.page_size
if object.purgable == 0:
else:
compressor_pager = Cast(object.pager, 'compressor_pager *')
compressed_count = compressor_pager.cpgr_num_slots_occupied
-# print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s} {:#018x} {:>6d} {:<20s} {:s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count,object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner),object.vo_purgeable_volatilizer,GetProcPIDForTask(object.vo_purgeable_volatilizer),GetProcNameForTask(object.vo_purgeable_volatilizer),diff)
- print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner))
+# print "{:>6d} {: <#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {: <#018x} {:>6d} {:<20s} {: <#018x} {:>6d} {:<20s} {:s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count,object.vo_un2.vou_owner,GetProcPIDForObjectOwner(object.vo_un2.vou_owner),GetProcNameForObjectOwner(object.vo_un2.vou_owner),object.vo_purgeable_volatilizer,GetProcPIDForObjectOwner(object.vo_purgeable_volatilizer),GetProcNameForObjectOwner(object.vo_purgeable_volatilizer),diff)
+ print "{:>6d} {: <#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:>3d} {: <#018x} {:>6d} {:<20s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_ledger_tag, object.vo_un2.vou_owner,GetProcPIDForObjectOwner(object.vo_un2.vou_owner),GetProcNameForObjectOwner(object.vo_un2.vou_owner))
volatile_total.objects += 1
volatile_total.vsize += object.vo_un1.vou_size/page_size
volatile_total.rsize += object.resident_page_count
volatile_total.wsize += object.wired_page_count
volatile_total.csize += compressed_count
- if object.vo_un2.vou_purgeable_owner == 0:
+ if object.vo_un2.vou_owner == 0:
volatile_total.disowned_objects += 1
volatile_total.disowned_vsize += object.vo_un1.vou_size/page_size
volatile_total.disowned_rsize += object.resident_page_count
"""
pager = Cast(obj.pager, 'compressor_pager_t')
return pager.cpgr_num_slots_occupied
-# if pager.cpgr_num_slots > 128:
-# slots_arr = pager.cpgr_slots.cpgr_islots
-# num_indirect_slot_ptr = (pager.cpgr_num_slots + 127) / 128
-# index = 0
-# compressor_slot = 0
-# compressed_pages = 0
-# while index < num_indirect_slot_ptr:
-# compressor_slot = 0
-# if slots_arr[index]:
-# while compressor_slot < 128:
-# if slots_arr[index][compressor_slot]:
-# compressed_pages += 1
-# compressor_slot += 1
-# index += 1
-# else:
-# slots_arr = pager.cpgr_slots.cpgr_dslots
-# compressor_slot = 0
-# compressed_pages = 0
-# while compressor_slot < pager.cpgr_num_slots:
-# if slots_arr[compressor_slot]:
-# compressed_pages += 1
-# compressor_slot += 1
-# return compressed_pages
+ """ # commented code below
+ if pager.cpgr_num_slots > 128:
+ slots_arr = pager.cpgr_slots.cpgr_islots
+ num_indirect_slot_ptr = (pager.cpgr_num_slots + 127) / 128
+ index = 0
+ compressor_slot = 0
+ compressed_pages = 0
+ while index < num_indirect_slot_ptr:
+ compressor_slot = 0
+ if slots_arr[index]:
+ while compressor_slot < 128:
+ if slots_arr[index][compressor_slot]:
+ compressed_pages += 1
+ compressor_slot += 1
+ index += 1
+ else:
+ slots_arr = pager.cpgr_slots.cpgr_dslots
+ compressor_slot = 0
+ compressed_pages = 0
+ while compressor_slot < pager.cpgr_num_slots:
+ if slots_arr[compressor_slot]:
+ compressed_pages += 1
+ compressor_slot += 1
+ return compressed_pages
+ """
def ShowTaskVMEntries(task, show_pager_info, show_all_shadows):
""" Routine to print out a summary listing of all the entries in a vm_map
if not task.map:
print "Task {0: <#020x} has map = 0x0"
return None
- showmapvme(task.map, show_pager_info, show_all_shadows)
+ showmapvme(task.map, 0, 0, show_pager_info, show_all_shadows, False)
-@lldb_command("showmapvme", "PS")
+@lldb_command("showmapvme", "A:B:PRST")
def ShowMapVME(cmd_args=None, cmd_options={}):
"""Routine to print out info about the specified vm_map and its vm entries
- usage: showmapvme <vm_map>
+ usage: showmapvme <vm_map> [-A start] [-B end] [-S] [-P]
+ Use -A <start> flag to start at virtual address <start>
+ Use -B <end> flag to end at virtual address <end>
Use -S flag to show VM object shadow chains
Use -P flag to show pager info (mapped file, compressed pages, ...)
+ Use -R flag to reverse order
+ Use -T to show red-black tree pointers
"""
if cmd_args == None or len(cmd_args) < 1:
- print "Invalid argument.", ShowMap.__doc__
+ print "Invalid argument.", ShowMapVME.__doc__
return
show_pager_info = False
show_all_shadows = False
+ show_rb_tree = False
+ start_vaddr = 0
+ end_vaddr = 0
+ reverse_order = False
+ if "-A" in cmd_options:
+ start_vaddr = unsigned(int(cmd_options['-A'], 16))
+ if "-B" in cmd_options:
+ end_vaddr = unsigned(int(cmd_options['-B'], 16))
if "-P" in cmd_options:
show_pager_info = True
if "-S" in cmd_options:
show_all_shadows = True
+ if "-R" in cmd_options:
+ reverse_order = True
+ if "-T" in cmd_options:
+ show_rb_tree = True
map = kern.GetValueFromAddress(cmd_args[0], 'vm_map_t')
- showmapvme(map, show_pager_info, show_all_shadows)
+ showmapvme(map, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order, show_rb_tree)
+
+@lldb_command("showvmobject", "A:B:PRST")
+def ShowVMObject(cmd_args=None, cmd_options={}):
+ """Routine to print out a VM object and its shadow chain
+ usage: showvmobject <vm_object> [-S] [-P]
+ -S: show VM object shadow chain
+ -P: show pager info (mapped file, compressed pages, ...)
+ """
+ if cmd_args == None or len(cmd_args) < 1:
+ print "Invalid argument.", ShowMapVME.__doc__
+ return
+ show_pager_info = False
+ show_all_shadows = False
+ if "-P" in cmd_options:
+ show_pager_info = True
+ if "-S" in cmd_options:
+ show_all_shadows = True
+ object = kern.GetValueFromAddress(cmd_args[0], 'vm_object_t')
+ showvmobject(object, 0, 0, show_pager_info, show_all_shadows)
-def showmapvme(map, show_pager_info, show_all_shadows):
+def showvmobject(object, offset=0, size=0, show_pager_info=False, show_all_shadows=False):
page_size = kern.globals.page_size
vnode_pager_ops = kern.globals.vnode_pager_ops
vnode_pager_ops_addr = unsigned(addressof(vnode_pager_ops))
+ depth = 0
+ if size == 0 and object != 0 and object.internal:
+ size = object.vo_un1.vou_size
+ while object != 0:
+ depth += 1
+ if show_all_shadows == False and depth != 1 and object.shadow != 0:
+ offset += unsigned(object.vo_un2.vou_shadow_offset)
+ object = object.shadow
+ continue
+ if object.copy_strategy == 0:
+ copy_strategy="N"
+ elif object.copy_strategy == 2:
+ copy_strategy="D"
+ elif object.copy_strategy == 4:
+ copy_strategy="S"
+
+ else:
+ copy_strategy=str(object.copy_strategy)
+ if object.internal:
+ internal = "internal"
+ else:
+ internal = "external"
+ purgeable = "NVED"[int(object.purgable)]
+ pager_string = ""
+ if object.phys_contiguous:
+ pager_string = pager_string + "phys_contig {:#018x}:{:#018x} ".format(unsigned(object.vo_un2.vou_shadow_offset), unsigned(object.vo_un1.vou_size))
+ pager = object.pager
+ if show_pager_info and pager != 0:
+ if object.internal:
+ pager_string = pager_string + "-> compressed:{:d}".format(GetCompressedPagesForObject(object))
+ elif unsigned(pager.mo_pager_ops) == vnode_pager_ops_addr:
+ vnode_pager = Cast(pager,'vnode_pager *')
+ pager_string = pager_string + "-> " + GetVnodePath(vnode_pager.vnode_handle)
+ else:
+ pager_string = pager_string + "-> {:s}:{: <#018x}".format(pager.mo_pager_ops.memory_object_pager_name, pager)
+ print "{:>18d} {:#018x}:{:#018x} {: <#018x} ref:{:<6d} ts:{:1d} strat:{:1s} purg:{:1s} {:s} wtag:{:d} ({:d} {:d} {:d}) {:s}".format(depth,offset,offset+size,object,object.ref_count,object.true_share,copy_strategy,purgeable,internal,object.wire_tag,unsigned(object.vo_un1.vou_size)/page_size,object.resident_page_count,object.wired_page_count,pager_string)
+# print " #{:<5d} obj {: <#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} size:{:<10d} wired:{:<10d} resident:{:<10d} reusable:{:<10d}".format(depth,object,object.ref_count,object.true_share,copy_strategy,internal,object.vo_un1.vou_size/page_size,object.wired_page_count,object.resident_page_count,object.reusable_page_count)
+ offset += unsigned(object.vo_un2.vou_shadow_offset)
+ object = object.shadow
+
+def showmapvme(map, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order=False, show_rb_tree=False):
rsize = 0
if map.pmap != 0:
rsize = int(map.pmap.stats.resident_count)
print "{:<18s} {:<18s} {:<18s} {:>10s} {:>18s} {:>18s}:{:<18s}".format("vm_map","pmap","size","#ents","rsize","start","end")
- print "{:#018x} {:#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x}".format(map,map.pmap,unsigned(map.size),map.hdr.nentries,rsize,map.hdr.links.start,map.hdr.links.end)
- vme_list_head = map.hdr.links
+ print "{: <#018x} {: <#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x}".format(map,map.pmap,unsigned(map.size),map.hdr.nentries,rsize,map.hdr.links.start,map.hdr.links.end)
+ showmaphdrvme(map.hdr, map.pmap, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order, show_rb_tree)
+
+def showmapcopyvme(mapcopy, start_vaddr=0, end_vaddr=0, show_pager_info=True, show_all_shadows=True, reverse_order=False, show_rb_tree=False):
+ print "{:<18s} {:<18s} {:<18s} {:>10s} {:>18s} {:>18s}:{:<18s}".format("vm_map_copy","pmap","size","#ents","rsize","start","end")
+ print "{: <#018x} {:#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x}".format(mapcopy,0,0,mapcopy.c_u.hdr.nentries,0,mapcopy.c_u.hdr.links.start,mapcopy.c_u.hdr.links.end)
+ showmaphdrvme(mapcopy.c_u.hdr, 0, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order, show_rb_tree)
+
+def showmaphdrvme(maphdr, pmap, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order, show_rb_tree):
+ page_size = kern.globals.page_size
+ vnode_pager_ops = kern.globals.vnode_pager_ops
+ vnode_pager_ops_addr = unsigned(addressof(vnode_pager_ops))
+ if hasattr(kern.globals, 'compressor_object'):
+ compressor_object = kern.globals.compressor_object
+ else:
+ compressor_object = -1;
+ vme_list_head = maphdr.links
vme_ptr_type = GetType('vm_map_entry *')
- print "{:<18s} {:>18s}:{:<18s} {:>10s} {:<8s} {:<10s} {:<18s} {:<18s}".format("entry","start","end","#pgs","tag.kmod","prot&flags","object","offset")
- last_end = unsigned(map.hdr.links.start)
- for vme in IterateQueue(vme_list_head, vme_ptr_type, "links"):
+ print "{:<18s} {:>18s}:{:<18s} {:>10s} {:<8s} {:<16s} {:<18s} {:<18s}".format("entry","start","end","#pgs","tag.kmod","prot&flags","object","offset")
+ last_end = unsigned(maphdr.links.start)
+ skipped_entries = 0
+ for vme in IterateQueue(vme_list_head, vme_ptr_type, "links", reverse_order):
+ if start_vaddr != 0 and end_vaddr != 0:
+ if unsigned(vme.links.start) > end_vaddr:
+ break
+ if unsigned(vme.links.end) <= start_vaddr:
+ last_end = unsigned(vme.links.end)
+ skipped_entries = skipped_entries + 1
+ continue
+ if skipped_entries != 0:
+ print "... skipped {:d} entries ...".format(skipped_entries)
+ skipped_entries = 0
if unsigned(vme.links.start) != last_end:
print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end,vme.links.start,(unsigned(vme.links.start) - last_end)/page_size)
last_end = unsigned(vme.links.end)
size = unsigned(vme.links.end) - unsigned(vme.links.start)
object = vme.vme_object.vmo_object
if object == 0:
- object_str = "{:<#018x}".format(object)
+ object_str = "{: <#018x}".format(object)
elif vme.is_sub_map:
if object == kern.globals.bufferhdr_map:
object_str = "BUFFERHDR_MAP"
elif hasattr(kern.globals, 'vector_upl_submap') and object == kern.globals.vector_upl_submap:
object_str = "VECTOR_UPL_SUBMAP"
else:
- object_str = "submap:{:<#018x}".format(object)
+ object_str = "submap:{: <#018x}".format(object)
else:
if object == kern.globals.kernel_object:
object_str = "KERNEL_OBJECT"
elif object == kern.globals.vm_submap_object:
object_str = "VM_SUBMAP_OBJECT"
- elif object == kern.globals.compressor_object:
+ elif object == compressor_object:
object_str = "COMPRESSOR_OBJECT"
else:
- object_str = "{:<#018x}".format(object)
+ object_str = "{: <#018x}".format(object)
offset = unsigned(vme.vme_offset) & ~0xFFF
tag = unsigned(vme.vme_offset & 0xFFF)
+ protection = ""
+ if vme.protection & 0x1:
+ protection +="r"
+ else:
+ protection += "-"
+ if vme.protection & 0x2:
+ protection += "w"
+ else:
+ protection += "-"
+ if vme.protection & 0x4:
+ protection += "x"
+ else:
+ protection += "-"
+ max_protection = ""
+ if vme.max_protection & 0x1:
+ max_protection +="r"
+ else:
+ max_protection += "-"
+ if vme.max_protection & 0x2:
+ max_protection += "w"
+ else:
+ max_protection += "-"
+ if vme.max_protection & 0x4:
+ max_protection += "x"
+ else:
+ max_protection += "-"
vme_flags = ""
if vme.is_sub_map:
vme_flags += "s"
if vme.needs_copy:
vme_flags += "n"
- if vme.is_sub_map and vme.use_pmap:
+ if vme.use_pmap:
vme_flags += "p"
+ if vme.wired_count:
+ vme_flags += "w"
+ if vme.used_for_jit:
+ vme_flags += "j"
tagstr = ""
- if map.pmap == kern.globals.kernel_pmap:
+ if pmap == kern.globals.kernel_pmap:
xsite = Cast(kern.globals.vm_allocation_sites[tag],'OSKextAccount *')
- if xsite and xsite.site.flags & 2:
+ if xsite and xsite.site.flags & 0x0200:
tagstr = ".{:<3d}".format(xsite.loadTag)
- print "{:#018x} {:#018x}:{:#018x} {:>10d} {:>3d}{:<4s} {:1d}{:1d}{:<8s} {:<18s} {:<#18x}".format(vme,vme.links.start,vme.links.end,(unsigned(vme.links.end)-unsigned(vme.links.start))/page_size,tag,tagstr,vme.protection,vme.max_protection,vme_flags,object_str,offset)
+ rb_info = ""
+ if show_rb_tree:
+ rb_info = "l={: <#018x} r={: <#018x} p={: <#018x}".format(vme.store.entry.rbe_left, vme.store.entry.rbe_right, vme.store.entry.rbe_parent)
+ print "{: <#018x} {:#018x}:{:#018x} {:>10d} {:>3d}{:<4s} {:3s}/{:3s}/{:<8s} {:<18s} {:<#18x} {:s}".format(vme,vme.links.start,vme.links.end,(unsigned(vme.links.end)-unsigned(vme.links.start))/page_size,tag,tagstr,protection,max_protection,vme_flags,object_str,offset, rb_info)
if (show_pager_info or show_all_shadows) and vme.is_sub_map == 0 and vme.vme_object.vmo_object != 0:
object = vme.vme_object.vmo_object
else:
object = 0
- depth = 0
- while object != 0:
- depth += 1
- if show_all_shadows == False and depth != 1 and object.shadow != 0:
- offset += unsigned(object.vo_un2.vou_shadow_offset)
- object = object.shadow
- continue
- if object.copy_strategy == 0:
- copy_strategy="N"
- elif object.copy_strategy == 2:
- copy_strategy="D"
- elif object.copy_strategy == 4:
- copy_strategy="S"
- else:
- copy_strategy=str(object.copy_strategy)
- if object.internal:
- internal = "internal"
- else:
- internal = "external"
- pager_string = ""
- pager = object.pager
- if show_pager_info and pager != 0:
- if object.internal:
- pager_string = "-> compressed:{:d}".format(GetCompressedPagesForObject(object))
- elif unsigned(pager.mo_pager_ops) == vnode_pager_ops_addr:
- vnode_pager = Cast(pager,'vnode_pager *')
- pager_string = "-> " + GetVnodePath(vnode_pager.vnode_handle)
- else:
- pager_string = "-> {:s}:{:#018x}".format(pager.mo_pager_ops.memory_object_pager_name, pager.mo_pager_ops)
- print "{:>18d} {:#018x}:{:#018x} {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} ({:d} {:d} {:d}) {:s}".format(depth,offset,offset+size,object,object.ref_count,object.true_share,copy_strategy,internal,unsigned(object.vo_un1.vou_size)/page_size,object.resident_page_count,object.wired_page_count,pager_string)
-# print " #{:<5d} obj {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} size:{:<10d} wired:{:<10d} resident:{:<10d} reusable:{:<10d}".format(depth,object,object.ref_count,object.true_share,copy_strategy,internal,object.vo_un1.vou_size/page_size,object.wired_page_count,object.resident_page_count,object.reusable_page_count)
- offset += unsigned(object.vo_un2.vou_shadow_offset)
- object = object.shadow
- if unsigned(map.hdr.links.end) > last_end:
- print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end,map.hdr.links.end,(unsigned(map.hdr.links.end) - last_end)/page_size)
+ showvmobject(object, offset, size, show_pager_info, show_all_shadows)
+ if start_vaddr != 0 or end_vaddr != 0:
+ print "..."
+ elif unsigned(maphdr.links.end) > last_end:
+ print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end,maphdr.links.end,(unsigned(maphdr.links.end) - last_end)/page_size)
return None
def CountMapTags(map, tagcounts, slow):
page = _vm_page_unpack_ptr(page_list)
while (page != 0):
vmpage = kern.GetValueFromAddress(page, 'vm_page_t')
- if (addr == unsigned(vmpage.offset)) and (object == vm_object_t(_vm_page_unpack_ptr(vmpage.vm_page_object))):
- if (not vmpage.local) and (vmpage.wire_count > 0):
+ if (addr == unsigned(vmpage.vmp_offset)) and (object == vm_object_t(_vm_page_unpack_ptr(vmpage.vmp_object))):
+ if (not vmpage.vmp_local) and (vmpage.vmp_wire_count > 0):
count += 1
break
- page = _vm_page_unpack_ptr(vmpage.next_m)
+ page = _vm_page_unpack_ptr(vmpage.vmp_next_m)
addr += page_size
tagcounts[tag] += count
elif vme.is_sub_map:
tagcounts[unsigned(object.wire_tag)] += object.wired_page_count
return None
-def CountWiredPurgeableGroup(qhead, tagcounts):
- for object in IterateQueue(qhead, 'struct vm_object *', 'objq'):
- CountWiredObject(object, tagcounts)
- return None
-
-def CountWiredPurgeableQueue(qhead, tagcounts):
- CountWiredPurgeableGroup(qhead.objq[0], tagcounts)
- CountWiredPurgeableGroup(qhead.objq[1], tagcounts)
- CountWiredPurgeableGroup(qhead.objq[2], tagcounts)
- CountWiredPurgeableGroup(qhead.objq[3], tagcounts)
- CountWiredPurgeableGroup(qhead.objq[4], tagcounts)
- CountWiredPurgeableGroup(qhead.objq[5], tagcounts)
- CountWiredPurgeableGroup(qhead.objq[6], tagcounts)
- CountWiredPurgeableGroup(qhead.objq[7], tagcounts)
-
def GetKmodIDName(kmod_id):
kmod_val = kern.globals.kmod
for kmod in IterateLinkedList(kmod_val, 'next'):
return "{:<50s}".format(kmod.name)
return "??"
-def GetVMKernName(tag):
- if 1 == tag:
- return "VM_KERN_MEMORY_OSFMK"
- elif 2 == tag:
- return "VM_KERN_MEMORY_BSD"
- elif 3 == tag:
- return "VM_KERN_MEMORY_IOKIT"
- elif 4 == tag:
- return "VM_KERN_MEMORY_LIBKERN"
- elif 5 == tag:
- return "VM_KERN_MEMORY_OSKEXT"
- elif 6 == tag:
- return "VM_KERN_MEMORY_KEXT"
- elif 7 == tag:
- return "VM_KERN_MEMORY_IPC"
- elif 8 == tag:
- return "VM_KERN_MEMORY_STACK"
- elif 9 == tag:
- return "VM_KERN_MEMORY_CPU"
- elif 10 == tag:
- return "VM_KERN_MEMORY_PMAP"
- elif 11 == tag:
- return "VM_KERN_MEMORY_PTE"
- elif 12 == tag:
- return "VM_KERN_MEMORY_ZONE"
- elif 13 == tag:
- return "VM_KERN_MEMORY_KALLOC"
- elif 14 == tag:
- return "VM_KERN_MEMORY_COMPRESSOR"
- elif 15 == tag:
- return "VM_KERN_MEMORY_COMPRESSED_DATA"
- elif 16 == tag:
- return "VM_KERN_MEMORY_PHANTOM_CACHE"
- elif 17 == tag:
- return "VM_KERN_MEMORY_WAITQ"
- elif 18 == tag:
- return "VM_KERN_MEMORY_DIAG"
- elif 19 == tag:
- return "VM_KERN_MEMORY_LOG"
- elif 20 == tag:
- return "VM_KERN_MEMORY_FILE"
- elif 21 == tag:
- return "VM_KERN_MEMORY_MBUF"
- elif 22 == tag:
- return "VM_KERN_MEMORY_UBC"
- elif 23 == tag:
- return "VM_KERN_MEMORY_SECURITY"
- elif 24 == tag:
- return "VM_KERN_MEMORY_MLOCK"
- return "??"
+FixedTags = {
+ 0: "VM_KERN_MEMORY_NONE",
+ 1: "VM_KERN_MEMORY_OSFMK",
+ 2: "VM_KERN_MEMORY_BSD",
+ 3: "VM_KERN_MEMORY_IOKIT",
+ 4: "VM_KERN_MEMORY_LIBKERN",
+ 5: "VM_KERN_MEMORY_OSKEXT",
+ 6: "VM_KERN_MEMORY_KEXT",
+ 7: "VM_KERN_MEMORY_IPC",
+ 8: "VM_KERN_MEMORY_STACK",
+ 9: "VM_KERN_MEMORY_CPU",
+ 10: "VM_KERN_MEMORY_PMAP",
+ 11: "VM_KERN_MEMORY_PTE",
+ 12: "VM_KERN_MEMORY_ZONE",
+ 13: "VM_KERN_MEMORY_KALLOC",
+ 14: "VM_KERN_MEMORY_COMPRESSOR",
+ 15: "VM_KERN_MEMORY_COMPRESSED_DATA",
+ 16: "VM_KERN_MEMORY_PHANTOM_CACHE",
+ 17: "VM_KERN_MEMORY_WAITQ",
+ 18: "VM_KERN_MEMORY_DIAG",
+ 19: "VM_KERN_MEMORY_LOG",
+ 20: "VM_KERN_MEMORY_FILE",
+ 21: "VM_KERN_MEMORY_MBUF",
+ 22: "VM_KERN_MEMORY_UBC",
+ 23: "VM_KERN_MEMORY_SECURITY",
+ 24: "VM_KERN_MEMORY_MLOCK",
+ 25: "VM_KERN_MEMORY_REASON",
+ 26: "VM_KERN_MEMORY_SKYWALK",
+ 27: "VM_KERN_MEMORY_LTABLE",
+ 255:"VM_KERN_MEMORY_ANY",
+}
+def GetVMKernName(tag):
+ return FixedTags[tag]
-@lldb_command("showvmtags", "S")
+@lldb_command("showvmtags", "AS")
def showvmtags(cmd_args=None, cmd_options={}):
"""Routine to print out info about kernel wired page allocations
usage: showvmtags
iterates kernel map and vm objects totaling allocations by tag.
usage: showvmtags -S
also iterates kernel object pages individually - slow.
+ usage: showvmtags -A
+ show all tags, even tags that have no wired count
"""
slow = False
if "-S" in cmd_options:
slow = True
+ all_tags = False
+ if "-A" in cmd_options:
+ all_tags = True
page_size = unsigned(kern.globals.page_size)
tagcounts = []
+ tagpeaks = []
for tag in range(256):
tagcounts.append(0)
+ for tag in range(256):
+ tagpeaks.append(0)
+
+ if kern.globals.vm_tag_active_update:
+ for tag in range(256):
+ site = kern.globals.vm_allocation_sites[tag]
+ if site:
+ tagcounts[unsigned(tag)] = unsigned(site.total)
+ tagpeaks[unsigned(tag)] = unsigned(site.peak)
+ else:
+ queue_head = kern.globals.vm_objects_wired
+ for object in IterateQueue(queue_head, 'struct vm_object *', 'wired_objq'):
+ if object != kern.globals.kernel_object:
+ CountWiredObject(object, tagcounts)
- queue_head = kern.globals.vm_objects_wired
- for object in IterateQueue(queue_head, 'struct vm_object *', 'objq'):
- if object != kern.globals.kernel_object:
- CountWiredObject(object, tagcounts)
-
- queue_head = kern.globals.purgeable_nonvolatile_queue
- for object in IterateQueue(queue_head, 'struct vm_object *', 'objq'):
- CountWiredObject(object, tagcounts)
-
- purgeable_queues = kern.globals.purgeable_queues
- CountWiredPurgeableQueue(purgeable_queues[0], tagcounts)
- CountWiredPurgeableQueue(purgeable_queues[1], tagcounts)
- CountWiredPurgeableQueue(purgeable_queues[2], tagcounts)
-
- CountMapTags(kern.globals.kernel_map, tagcounts, slow)
+ CountMapTags(kern.globals.kernel_map, tagcounts, slow)
total = 0
- print " {:<8s} {:>7s} {:<50s}".format("tag.kmod","size","name")
+ print " {:<7s} {:>7s} {:>7s} {:<50s}".format("tag.kmod","peak","size","name")
for tag in range(256):
- if tagcounts[tag]:
+ if all_tags or tagcounts[tag]:
total += tagcounts[tag]
tagstr = ""
sitestr = ""
- if (tag <= 24):
+ if ((tag <= 27) or (tag == 255)):
sitestr = GetVMKernName(tag)
else:
site = kern.globals.vm_allocation_sites[tag]
if site:
- if site.flags & 2:
- xsite = Cast(site,'OSKextAccount *')
- tagstr = ".{:<3d}".format(xsite.loadTag)
- sitestr = GetKmodIDName(xsite.loadTag)
+ if site.flags & 0x007F:
+ cstr = addressof(site.subtotals[site.subtotalscount])
+ sitestr = "{:<50s}".format(str(Cast(cstr, 'char *')))
else:
- sitestr = kern.Symbolicate(site)
- print " {:>3d}{:<4s} {:>7d}K {:<50s}".format(tag,tagstr,tagcounts[tag]*page_size / 1024,sitestr)
- print "Total: {:>7d}K".format(total*page_size / 1024)
+ if site.flags & 0x0200:
+ xsite = Cast(site,'OSKextAccount *')
+ tagstr = ".{:<3d}".format(xsite.loadTag)
+ sitestr = GetKmodIDName(xsite.loadTag)
+ else:
+ sitestr = kern.Symbolicate(site)
+ print " {:>3d}{:<4s} {:>7d}K {:>7d}K {:<50s}".format(tag,tagstr,tagpeaks[tag] / 1024, tagcounts[tag] / 1024,sitestr)
+ print "Total: {:>7d}K".format(total / 1024)
return None
def FindVMEntriesForVnode(task, vn):
- """ returns an array of vme that have the vnode set to defined vnode
+ """ returns an array of vme that have the vnode set to defined vnode
each entry in array is of format (vme, start_addr, end_address, protection)
"""
retval = []
pass
else:
vn_pager = Cast(obj.pager, 'vnode_pager *')
- if unsigned(vn_pager.pager_ops) == pager_ops_addr and unsigned(vn_pager.vnode_handle) == unsigned(vn):
+ if unsigned(vn_pager.vn_pgr_hdr.mo_pager_ops) == pager_ops_addr and unsigned(vn_pager.vnode_handle) == unsigned(vn):
retval.append((vme, unsigned(vme.links.start), unsigned(vme.links.end), unsigned(vme.protection)))
obj = obj.shadow
return retval
page = _vm_page_unpack_ptr(page_list)
while (page != 0) :
pg_t = kern.GetValueFromAddress(page, 'vm_page_t')
- print format_string.format(page, pg_t.offset, _vm_page_unpack_ptr(pg_t.vm_page_object))
- page = _vm_page_unpack_ptr(pg_t.next_m)
+ print format_string.format(page, pg_t.vmp_offset, _vm_page_unpack_ptr(pg_t.vmp_object))
+ page = _vm_page_unpack_ptr(pg_t.vmp_next_m)
def _vm_page_get_phys_page(page):
if kern.arch == 'x86_64':
- return page.phys_page
+ return page.vmp_phys_page
if page == 0 :
return 0
#ARM64 - min_addr = 0xffffff8000000000
if unsigned(page) & unsigned(ptr_mask) :
masked_page = (unsigned(page) & ~ptr_mask)
- return (unsigned(addressof(kern.globals.vm_pages[masked_page])))
+ # can't use addressof(kern.globals.vm_pages[masked_page]) due to 32 bit limitation in SB bridge
+ vm_pages_addr = unsigned(addressof(kern.globals.vm_pages[0]))
+ element_size = unsigned(addressof(kern.globals.vm_pages[1])) - vm_pages_addr
+ return (vm_pages_addr + masked_page * element_size)
return ((unsigned(page) << unsigned(ptr_shift)) + unsigned(min_addr))
@lldb_command('calcvmpagehash')
page_found = False
pages_seen = set()
- for vmp in IterateQueue(obj.memq, "vm_page_t", "listq", walk_backwards, unpack_ptr_fn=_vm_page_unpack_ptr):
+ for vmp in IterateQueue(obj.memq, "vm_page_t", "vmp_listq", walk_backwards, unpack_ptr_fn=_vm_page_unpack_ptr):
page_count += 1
out_string = ""
if (page != 0 and not(page_found) and vmp == page):
if (page_count % 1000) == 0:
print "traversed %d pages ...\n" % (page_count)
else:
- out_string += format_string.format(page_count, res_page_count, vmp, vmp.offset, _vm_page_unpack_ptr(vmp.listq.next), _vm_page_get_phys_page(vmp), vmp.wire_count)
- out_string += first_bitfield_format_string.format(vmp.vm_page_q_state, vmp.vm_page_in_background, vmp.vm_page_on_backgroundq, vmp.gobbled, vmp.laundry, vmp.no_cache,
- vmp.private, vmp.reference)
+ out_string += format_string.format(page_count, res_page_count, vmp, vmp.vmp_offset, _vm_page_unpack_ptr(vmp.vmp_listq.next), _vm_page_get_phys_page(vmp), vmp.vmp_wire_count)
+ out_string += first_bitfield_format_string.format(vmp.vmp_q_state, vmp.vmp_in_background, vmp.vmp_on_backgroundq, vmp.vmp_gobbled, vmp.vmp_laundry, vmp.vmp_no_cache,
+ vmp.vmp_private, vmp.vmp_reference)
- out_string += second_bitfield_format_string.format(vmp.busy, vmp.wanted, vmp.tabled, vmp.hashed, vmp.fictitious, vmp.clustered,
- vmp.pmapped, vmp.xpmapped, vmp.wpmapped, vmp.free_when_done, vmp.absent,
- vmp.error, vmp.dirty, vmp.cleaning, vmp.precious, vmp.overwriting,
- vmp.restart, vmp.unusual, vmp.encrypted, vmp.encrypted_cleaning,
- vmp.cs_validated, vmp.cs_tainted, vmp.cs_nx, vmp.reusable, vmp.lopage, vmp.slid,
- vmp.written_by_kernel)
+ if hasattr(vmp,'slid'):
+ vmp_slid = vmp.slid
+ else:
+ vmp_slid = 0
+ out_string += second_bitfield_format_string.format(vmp.vmp_busy, vmp.vmp_wanted, vmp.vmp_tabled, vmp.vmp_hashed, vmp.vmp_fictitious, vmp.vmp_clustered,
+ vmp.vmp_pmapped, vmp.vmp_xpmapped, vmp.vmp_wpmapped, vmp.vmp_free_when_done, vmp.vmp_absent,
+ vmp.vmp_error, vmp.vmp_dirty, vmp.vmp_cleaning, vmp.vmp_precious, vmp.vmp_overwriting,
+ vmp.vmp_restart, vmp.vmp_unusual, 0, 0,
+ vmp.vmp_cs_validated, vmp.vmp_cs_tainted, vmp.vmp_cs_nx, vmp.vmp_reusable, vmp.vmp_lopage, vmp_slid,
+ vmp.vmp_written_by_kernel)
if (vmp in pages_seen):
print out_string + "cycle detected! we've seen vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " twice. stopping...\n"
return
- if (_vm_page_unpack_ptr(vmp.vm_page_object) != unsigned(obj)):
- print out_string + " vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " points to different vm_object_t: " + "{0: <#020x}".format(unsigned(_vm_page_unpack_ptr(vmp.vm_page_object)))
+ if (_vm_page_unpack_ptr(vmp.vmp_object) != unsigned(obj)):
+ print out_string + " vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " points to different vm_object_t: " + "{0: <#020x}".format(unsigned(_vm_page_unpack_ptr(vmp.vmp_object)))
return
- if (vmp.vm_page_q_state == VM_PAGE_IS_WIRED) and (vmp.wire_count == 0):
+ if (vmp.vmp_q_state == VM_PAGE_IS_WIRED) and (vmp.vmp_wire_count == 0):
print out_string + " page in wired state with wire_count of 0\n"
print "vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + "\n"
print "stopping...\n"
return
- if ((vmp.__unused_pageq_bits != 0) or (vmp.__unused_object_bits != 0)):
- print out_string + " unused bits not zero for vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " unused__pageq_bits: %d unused_object_bits : %d\n" % (vmp.__unused_pageq_bits,
- vmp.__unused_object_bits)
+ if ((vmp.vmp_unused_page_bits != 0) or (vmp.vmp_unused_object_bits != 0)):
+ print out_string + " unused bits not zero for vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " unused__pageq_bits: %d unused_object_bits : %d\n" % (vmp.vmp_unused_page_bits,
+ vmp.vmp_unused_object_bits)
print "stopping...\n"
return
pages_seen.add(vmp)
if False:
- hash_id = _calc_vm_page_hash(obj, vmp.offset)
+ hash_id = _calc_vm_page_hash(obj, vmp.vmp_offset)
hash_page_list = kern.globals.vm_page_buckets[hash_id].page_list
hash_page = _vm_page_unpack_ptr(hash_page_list)
hash_page_t = 0
hash_page_t = kern.GetValueFromAddress(hash_page, 'vm_page_t')
if hash_page_t == vmp:
break
- hash_page = _vm_page_unpack_ptr(hash_page_t.next_m)
+ hash_page = _vm_page_unpack_ptr(hash_page_t.vmp_next_m)
if (unsigned(vmp) != unsigned(hash_page_t)):
print out_string + "unable to find page: " + "{0: <#020x}".format(unsigned(vmp)) + " from object in kernel page bucket list\n"
- print lldb_run_command("vm_page_info %s 0x%x" % (cmd_args[0], unsigned(vmp.offset)))
+ print lldb_run_command("vm_page_info %s 0x%x" % (cmd_args[0], unsigned(vmp.vmp_offset)))
return
if (page_count >= limit and not(ignore_limit)):
usage: show_apple_protect_pager <pager>
"""
if cmd_args == None or len(cmd_args) < 1:
- print "Invalid argument.", ShowMap.__doc__
+ print "Invalid argument.", ShowAppleProtectPager.__doc__
return
- pager = kern.GetValueFromAddress(cmd_ars[0], 'apple_protect_pager_t')
+ pager = kern.GetValueFromAddress(cmd_args[0], 'apple_protect_pager_t')
show_apple_protect_pager(pager, 1, 1)
def show_apple_protect_pager(pager, qcnt, idx):
shadow = object.shadow
vnode_pager = Cast(object.pager,'vnode_pager *')
filename = GetVnodePath(vnode_pager.vnode_handle)
- print "{:>3}/{:<3d} {:#018x} {:>5d} {:>5d} {:>6d} {:#018x} {:#018x} {:#018x} {:#018x} {:#018x} {:#018x}\n\tcrypt_info:{:#018x} <decrypt:{:#018x} end:{:#018x} ops:{:#018x} refs:{:<d}>\n\tvnode:{:#018x} {:s}\n".format(idx, qcnt, pager, pager.ref_count, pager.is_ready, pager.is_mapped, pager.pager_control, pager.backing_object, pager.backing_offset, pager.crypto_backing_offset, pager.crypto_start, pager.crypto_end, pager.crypt_info, pager.crypt_info.page_decrypt, pager.crypt_info.crypt_end, pager.crypt_info.crypt_ops, pager.crypt_info.crypt_refcnt, vnode_pager.vnode_handle, filename)
+ print "{:>3}/{:<3d} {: <#018x} {:>5d} {:>5d} {:>6d} {: <#018x} {: <#018x} {:#018x} {:#018x} {:#018x} {:#018x}\n\tcrypt_info:{: <#018x} <decrypt:{: <#018x} end:{:#018x} ops:{: <#018x} refs:{:<d}>\n\tvnode:{: <#018x} {:s}\n".format(idx, qcnt, pager, pager.ref_count, pager.is_ready, pager.is_mapped, pager.pager_control, pager.backing_object, pager.backing_offset, pager.crypto_backing_offset, pager.crypto_start, pager.crypto_end, pager.crypt_info, pager.crypt_info.page_decrypt, pager.crypt_info.crypt_end, pager.crypt_info.crypt_ops, pager.crypt_info.crypt_refcnt, vnode_pager.vnode_handle, filename)
@lldb_command("show_console_ring")
def ShowConsoleRingData(cmd_args=None):
# Not shown are uuid, user_data, cpu_time
global kern
- if kern.arch == 'x86_64':
- print "Snapshots are not supported.\n"
- return
show_footprint_details = False
show_all_entries = False
_ShowVnodeBlocks(True, cmd_args)
# EndMacro: showvnodecleanblk/showvnodedirtyblk
+
+
+@lldb_command("vm_page_lookup_in_map")
+def VmPageLookupInMap(cmd_args=None):
+ """Lookup up a page at a virtual address in a VM map
+ usage: vm_page_lookup_in_map <map> <vaddr>
+ """
+ if cmd_args == None or len(cmd_args) < 2:
+ print "Invalid argument.", VmPageLookupInMap.__doc__
+ return
+ map = kern.GetValueFromAddress(cmd_args[0], 'vm_map_t')
+ vaddr = kern.GetValueFromAddress(cmd_args[1], 'vm_map_offset_t')
+ print "vaddr {:#018x} in map {: <#018x}".format(vaddr, map)
+ vm_page_lookup_in_map(map, vaddr)
+
+def vm_page_lookup_in_map(map, vaddr):
+ vaddr = unsigned(vaddr)
+ vme_list_head = map.hdr.links
+ vme_ptr_type = GetType('vm_map_entry *')
+ for vme in IterateQueue(vme_list_head, vme_ptr_type, "links"):
+ if unsigned(vme.links.start) > vaddr:
+ break
+ if unsigned(vme.links.end) <= vaddr:
+ continue
+ offset_in_vme = vaddr - unsigned(vme.links.start)
+ print " offset {:#018x} in map entry {: <#018x} [{:#018x}:{:#018x}] object {: <#018x} offset {:#018x}".format(offset_in_vme, vme, unsigned(vme.links.start), unsigned(vme.links.end), vme.vme_object.vmo_object, unsigned(vme.vme_offset) & ~0xFFF)
+ offset_in_object = offset_in_vme + (unsigned(vme.vme_offset) & ~0xFFF)
+ if vme.is_sub_map:
+ print "vaddr {:#018x} in map {: <#018x}".format(offset_in_object, vme.vme_object.vmo_submap)
+ vm_page_lookup_in_map(vme.vme_object.vmo_submap, offset_in_object)
+ else:
+ vm_page_lookup_in_object(vme.vme_object.vmo_object, offset_in_object)
+
+@lldb_command("vm_page_lookup_in_object")
+def VmPageLookupInObject(cmd_args=None):
+ """Lookup up a page at a given offset in a VM object
+ usage: vm_page_lookup_in_object <object> <offset>
+ """
+ if cmd_args == None or len(cmd_args) < 2:
+ print "Invalid argument.", VmPageLookupInObject.__doc__
+ return
+ object = kern.GetValueFromAddress(cmd_args[0], 'vm_object_t')
+ offset = kern.GetValueFromAddress(cmd_args[1], 'vm_object_offset_t')
+ print "offset {:#018x} in object {: <#018x}".format(offset, object)
+ vm_page_lookup_in_object(object, offset)
+
+def vm_page_lookup_in_object(object, offset):
+ offset = unsigned(offset)
+ page_size = kern.globals.page_size
+ trunc_offset = offset & ~(page_size - 1)
+ print " offset {:#018x} in VM object {: <#018x}".format(offset, object)
+ hash_id = _calc_vm_page_hash(object, trunc_offset)
+ page_list = kern.globals.vm_page_buckets[hash_id].page_list
+ page = _vm_page_unpack_ptr(page_list)
+ while page != 0:
+ m = kern.GetValueFromAddress(page, 'vm_page_t')
+ m_object_val = _vm_page_unpack_ptr(m.vmp_object)
+ m_object = kern.GetValueFromAddress(m_object_val, 'vm_object_t')
+ if unsigned(m_object) != unsigned(object) or unsigned(m.vmp_offset) != unsigned(trunc_offset):
+ page = _vm_page_unpack_ptr(m.vmp_next_m)
+ continue
+ print " resident page {: <#018x} phys {:#010x}".format(m, _vm_page_get_phys_page(m))
+ return
+ if object.pager and object.pager_ready:
+ offset_in_pager = trunc_offset + unsigned(object.paging_offset)
+ if not object.internal:
+ print " offset {:#018x} in external '{:s}' {: <#018x}".format(offset_in_pager, object.pager.mo_pager_ops.memory_object_pager_name, object.pager)
+ return
+ pager = Cast(object.pager, 'compressor_pager *')
+ ret = vm_page_lookup_in_compressor_pager(pager, offset_in_pager)
+ if ret:
+ return
+ if object.shadow and not object.phys_contiguous:
+ offset_in_shadow = offset + unsigned(object.vo_un2.vou_shadow_offset)
+ vm_page_lookup_in_object(object.shadow, offset_in_shadow)
+ return
+ print " page is absent and will be zero-filled on demand"
+ return
+
+@lldb_command("vm_page_lookup_in_compressor_pager")
+def VmPageLookupInCompressorPager(cmd_args=None):
+ """Lookup up a page at a given offset in a compressor pager
+ usage: vm_page_lookup_in_compressor_pager <pager> <offset>
+ """
+ if cmd_args == None or len(cmd_args) < 2:
+ print "Invalid argument.", VmPageLookupInCompressorPager.__doc__
+ return
+ pager = kern.GetValueFromAddress(cmd_args[0], 'compressor_pager_t')
+ offset = kern.GetValueFromAddress(cmd_args[1], 'memory_object_offset_t')
+ print "offset {:#018x} in compressor pager {: <#018x}".format(offset, pager)
+ vm_page_lookup_in_compressor_pager(pager, offset)
+
+def vm_page_lookup_in_compressor_pager(pager, offset):
+ offset = unsigned(offset)
+ page_size = unsigned(kern.globals.page_size)
+ page_num = unsigned(offset / page_size)
+ if page_num > pager.cpgr_num_slots:
+ print " *** ERROR: vm_page_lookup_in_compressor_pager({: <#018x},{:#018x}): page_num {:#x} > num_slots {:#x}".format(pager, offset, page_num, pager.cpgr_num_slots)
+ return 0
+ slots_per_chunk = 512 / sizeof ('compressor_slot_t')
+ num_chunks = unsigned((pager.cpgr_num_slots+slots_per_chunk-1) / slots_per_chunk)
+ if num_chunks > 1:
+ chunk_idx = unsigned(page_num / slots_per_chunk)
+ chunk = pager.cpgr_slots.cpgr_islots[chunk_idx]
+ slot_idx = unsigned(page_num % slots_per_chunk)
+ slot = GetObjectAtIndexFromArray(chunk, slot_idx)
+ slot_str = "islots[{:d}][{:d}]".format(chunk_idx, slot_idx)
+ elif pager.cpgr_num_slots > 2:
+ slot_idx = page_num
+ slot = GetObjectAtIndexFromArray(pager.cpgr_slots.cpgr_dslots, slot_idx)
+ slot_str = "dslots[{:d}]".format(slot_idx)
+ else:
+ slot_idx = page_num
+ slot = GetObjectAtIndexFromArray(pager.cpgr_slots.cpgr_eslots, slot_idx)
+ slot_str = "eslots[{:d}]".format(slot_idx)
+ print " offset {:#018x} in compressor pager {: <#018x} {:s} slot {: <#018x}".format(offset, pager, slot_str, slot)
+ if slot == 0:
+ return 0
+ slot_value = dereference(slot)
+ print " value {:#010x}".format(slot_value)
+ vm_page_lookup_in_compressor(Cast(slot, 'c_slot_mapping_t'))
+ return 1
+
+@lldb_command("vm_page_lookup_in_compressor")
+def VmPageLookupInCompressor(cmd_args=None):
+ """Lookup up a page in a given compressor slot
+ usage: vm_page_lookup_in_compressor <slot>
+ """
+ if cmd_args == None or len(cmd_args) < 1:
+ print "Invalid argument.", VmPageLookupInCompressor.__doc__
+ return
+ slot = kern.GetValueFromAddress(cmd_args[0], 'compressor_slot_t *')
+ print "compressor slot {: <#018x}".format(slot)
+ vm_page_lookup_in_compressor(slot)
+
+C_SV_CSEG_ID = ((1 << 22) - 1)
+
+def vm_page_lookup_in_compressor(slot_ptr):
+ slot_ptr = Cast(slot_ptr, 'compressor_slot_t *')
+ slot_value = dereference(slot_ptr)
+ slot = Cast(slot_value, 'c_slot_mapping')
+ print slot
+ print "compressor slot {: <#018x} -> {:#010x} cseg {:d} cindx {:d}".format(unsigned(slot_ptr), unsigned(slot_value), slot.s_cseg, slot.s_cindx)
+ if slot_ptr == 0:
+ return
+ if slot.s_cseg == C_SV_CSEG_ID:
+ sv = kern.globals.c_segment_sv_hash_table
+ print "single value[{:#d}]: ref {:d} value {:#010x}".format(slot.s_cindx, sv[slot.s_cindx].c_sv_he_un.c_sv_he.c_sv_he_ref, sv[slot.s_cindx].c_sv_he_un.c_sv_he.c_sv_he_data)
+ return
+ if slot.s_cseg == 0 or unsigned(slot.s_cseg) > unsigned(kern.globals.c_segments_available):
+ print "*** ERROR: s_cseg {:d} is out of bounds (1 - {:d})".format(slot.s_cseg, unsigned(kern.globals.c_segments_available))
+ return
+ c_segments = kern.globals.c_segments
+ c_segments_elt = GetObjectAtIndexFromArray(c_segments, slot.s_cseg-1)
+ c_seg = c_segments_elt.c_seg
+ c_no_data = 0
+ if hasattr(c_seg, 'c_state'):
+ c_state = c_seg.c_state
+ if c_state == 0:
+ c_state_str = "C_IS_EMPTY"
+ c_no_data = 1
+ elif c_state == 1:
+ c_state_str = "C_IS_FREE"
+ c_no_data = 1
+ elif c_state == 2:
+ c_state_str = "C_IS_FILLING"
+ elif c_state == 3:
+ c_state_str = "C_ON_AGE_Q"
+ elif c_state == 4:
+ c_state_str = "C_ON_SWAPOUT_Q"
+ elif c_state == 5:
+ c_state_str = "C_ON_SWAPPEDOUT_Q"
+ c_no_data = 1
+ elif c_state == 6:
+ c_state_str = "C_ON_SWAPPEDOUTSPARSE_Q"
+ c_no_data = 1
+ elif c_state == 7:
+ c_state_str = "C_ON_SWAPPEDIN_Q"
+ elif c_state == 8:
+ c_state_str = "C_ON_MAJORCOMPACT_Q"
+ elif c_state == 9:
+ c_state_str = "C_ON_BAD_Q"
+ c_no_data = 1
+ else:
+ c_state_str = "<unknown>"
+ else:
+ c_state = -1
+ c_state_str = "<no c_state field>"
+ print "c_segments[{:d}] {: <#018x} c_seg {: <#018x} c_state {:#x}={:s}".format(slot.s_cseg-1, c_segments_elt, c_seg, c_state, c_state_str)
+ c_indx = unsigned(slot.s_cindx)
+ if hasattr(c_seg, 'c_slot_var_array'):
+ c_seg_fixed_array_len = kern.globals.c_seg_fixed_array_len
+ if c_indx < c_seg_fixed_array_len:
+ cs = c_seg.c_slot_fixed_array[c_indx]
+ else:
+ cs = GetObjectAtIndexFromArray(c_seg.c_slot_var_array, c_indx - c_seg_fixed_array_len)
+ else:
+ C_SEG_SLOT_ARRAY_SIZE = 64
+ C_SEG_SLOT_ARRAY_MASK = C_SEG_SLOT_ARRAY_SIZE - 1
+ cs = GetObjectAtIndexFromArray(c_seg.c_slots[c_indx / C_SEG_SLOT_ARRAY_SIZE], c_indx & C_SEG_SLOT_ARRAY_MASK)
+ print cs
+ c_slot_unpacked_ptr = (unsigned(cs.c_packed_ptr) << 2) + vm_min_kernel_and_kext_address()
+ print "c_slot {: <#018x} c_offset {:#x} c_size {:#x} c_packed_ptr {:#x} (unpacked: {: <#018x})".format(cs, cs.c_offset, cs.c_size, cs.c_packed_ptr, unsigned(c_slot_unpacked_ptr))
+ if unsigned(slot_ptr) != unsigned(c_slot_unpacked_ptr):
+ print "*** ERROR: compressor slot {: <#018x} points back to {: <#018x} instead of itself".format(slot_ptr, c_slot_unpacked_ptr)
+ if c_no_data == 0:
+ c_data = c_seg.c_store.c_buffer + (4 * cs.c_offset)
+ c_size = cs.c_size
+ cmd = "memory read {: <#018x} {: <#018x} --force".format(c_data, c_data + c_size)
+ print cmd
+ print lldb_run_command(cmd)
+ else:
+ print "<no compressed data>"
+
+def vm_min_kernel_and_kext_address(cmd_args=None):
+ if hasattr(kern.globals, 'vm_min_kernel_and_kext_address'):
+ return unsigned(kern.globals.vm_min_kernel_and_kext_address)
+ elif kern.arch == 'x86_64':
+ return unsigned(0xffffff7f80000000)
+ elif kern.arch == 'arm64':
+ return unsigned(0xffffff8000000000)
+ elif kern.arch == 'arm':
+ return unsigned(0x80000000)
+ else:
+ print "vm_min_kernel_and_kext_address(): unknown arch '{:s}'".format(kern.arch)
+ return unsigned(0)
+
+def print_hex_data(data, begin_offset=0, desc=""):
+ """ print on stdout "hexdump -C < data" like output
+ params:
+ data - bytearray or array of int where each int < 255
+ begin_offset - int offset that should be printed in left column
+ desc - str optional description to print on the first line to describe data
+ """
+ if desc:
+ print "{}:".format(desc)
+ index = 0
+ total_len = len(data)
+ hex_buf = ""
+ char_buf = ""
+ while index < total_len:
+ hex_buf += " {:02x}".format(data[index])
+ if data[index] < 0x20 or data[index] > 0x7e:
+ char_buf += "."
+ else:
+ char_buf += "{:c}".format(data[index])
+ index += 1
+ if index and index % 8 == 0:
+ hex_buf += " "
+ if index > 1 and (index % 16) == 0:
+ print "{:08x} {: <50s} |{: <16s}|".format(begin_offset + index - 16, hex_buf, char_buf)
+ hex_buf = ""
+ char_buf = ""
+ print "{:08x} {: <50s} |{: <16s}|".format(begin_offset + index - 16, hex_buf, char_buf)
+ return
+
+@lldb_command('vm_scan_all_pages')
+def VMScanAllPages(cmd_args=None):
+ """Scans the vm_pages[] array
+ """
+ vm_pages_count = kern.globals.vm_pages_count
+ vm_pages = kern.globals.vm_pages
+
+ free_count = 0
+ local_free_count = 0
+ active_count = 0
+ local_active_count = 0
+ inactive_count = 0
+ speculative_count = 0
+ throttled_count = 0
+ wired_count = 0
+ compressor_count = 0
+ pageable_internal_count = 0
+ pageable_external_count = 0
+ secluded_count = 0
+ secluded_free_count = 0
+ secluded_inuse_count = 0
+
+ i = 0
+ while i < vm_pages_count:
+
+ if i % 10000 == 0:
+ print "{:d}/{:d}...\n".format(i,vm_pages_count)
+
+ m = vm_pages[i]
+
+ internal = 0
+ external = 0
+ m_object_val = _vm_page_unpack_ptr(m.vmp_object)
+
+ if m_object:
+ if m_object.internal:
+ internal = 1
+ else:
+ external = 1
+
+ if m.vmp_wire_count != 0 and m.vmp_local == 0:
+ wired_count = wired_count + 1
+ pageable = 0
+ elif m.vmp_throttled:
+ throttled_count = throttled_count + 1
+ pageable = 0
+ elif m.vmp_active:
+ active_count = active_count + 1
+ pageable = 1
+ elif m.vmp_local:
+ local_active_count = local_active_count + 1
+ pageable = 0
+ elif m.vmp_inactive:
+ inactive_count = inactive_count + 1
+ pageable = 1
+ elif m.vmp_speculative:
+ speculative_count = speculative_count + 1
+ pageable = 0
+ elif m.vmp_free:
+ free_count = free_count + 1
+ pageable = 0
+ elif m.vmp_secluded:
+ secluded_count = secluded_count + 1
+ if m_object == 0:
+ secluded_free_count = secluded_free_count + 1
+ else:
+ secluded_inuse_count = secluded_inuse_count + 1
+ pageable = 0
+ elif m_object == 0 and m.vmp_busy:
+ local_free_count = local_free_count + 1
+ pageable = 0
+ elif m.vmp_compressor:
+ compressor_count = compressor_count + 1
+ pageable = 0
+ else:
+ print "weird page vm_pages[{:d}]?\n".format(i)
+ pageable = 0
+
+ if pageable:
+ if internal:
+ pageable_internal_count = pageable_internal_count + 1
+ else:
+ pageable_external_count = pageable_external_count + 1
+ i = i + 1
+
+ print "vm_pages_count = {:d}\n".format(vm_pages_count)
+
+ print "wired_count = {:d}\n".format(wired_count)
+ print "throttled_count = {:d}\n".format(throttled_count)
+ print "active_count = {:d}\n".format(active_count)
+ print "local_active_count = {:d}\n".format(local_active_count)
+ print "inactive_count = {:d}\n".format(inactive_count)
+ print "speculative_count = {:d}\n".format(speculative_count)
+ print "free_count = {:d}\n".format(free_count)
+ print "local_free_count = {:d}\n".format(local_free_count)
+ print "compressor_count = {:d}\n".format(compressor_count)
+
+ print "pageable_internal_count = {:d}\n".format(pageable_internal_count)
+ print "pageable_external_count = {:d}\n".format(pageable_external_count)
+ print "secluded_count = {:d}\n".format(secluded_count)
+ print "secluded_free_count = {:d}\n".format(secluded_free_count)
+ print "secluded_inuse_count = {:d}\n".format(secluded_inuse_count)
+
+
+@lldb_command('show_all_vm_named_entries')
+def ShowAllVMNamedEntries(cmd_args=None):
+ """ Routine to print a summary listing of all the VM named entries
+ """
+ queue_len = kern.globals.vm_named_entry_count
+ queue_head = kern.globals.vm_named_entry_list
+
+ print 'vm_named_entry_list:{: <#018x} vm_named_entry_count:{:d}\n'.format(kern.GetLoadAddressForSymbol('vm_named_entry_list'),queue_len)
+
+ print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:>3s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","tag","owner","pid","process")
+ idx = 0
+ for entry in IterateQueue(queue_head, 'struct vm_named_entry *', 'named_entry_list'):
+ idx += 1
+ showmemoryentry(entry, idx, queue_len)
+
+@lldb_command('show_vm_named_entry')
+def ShowVMNamedEntry(cmd_args=None):
+ """ Routine to print a VM named entry
+ """
+ if cmd_args == None or len(cmd_args) < 1:
+ print "Invalid argument.", ShowMapVMNamedEntry.__doc__
+ return
+ named_entry = kern.GetValueFromAddress(cmd_args[0], 'vm_named_entry_t')
+ showmemoryentry(named_entry, 0, 0)
+
+def showmemoryentry(entry, idx=0, queue_len=0):
+ """ Routine to print out a summary a VM memory entry
+ params:
+ entry - core.value : a object of type 'struct vm_named_entry *'
+ returns:
+ None
+ """
+ show_pager_info = True
+ show_all_shadows = True
+
+ backing = ""
+ if entry.is_sub_map == 1:
+ backing += "SUBMAP"
+ if entry.is_copy == 1:
+ backing += "COPY"
+ if entry.is_sub_map == 0 and entry.is_copy == 0:
+ backing += "OBJECT"
+ prot=""
+ if entry.protection & 0x1:
+ prot += "r"
+ else:
+ prot += "-"
+ if entry.protection & 0x2:
+ prot += "w"
+ else:
+ prot += "-"
+ if entry.protection & 0x4:
+ prot += "x"
+ else:
+ prot += "-"
+ extra_str = ""
+ if hasattr(entry, 'named_entry_alias'):
+ extra_str += " alias={:d}".format(entry.named_entry_alias)
+ if hasattr(entry, 'named_entry_port'):
+ extra_str += " port={:#016x}".format(entry.named_entry_port)
+ print "{:>6d}/{:<6d} {: <#018x} ref={:d} prot={:d}/{:s} type={:s} backing={: <#018x} offset={:#016x} dataoffset={:#016x} size={:#016x}{:s}\n".format(idx,queue_len,entry,entry.ref_count,entry.protection,prot,backing,entry.backing.object,entry.offset,entry.data_offset,entry.size,extra_str)
+ if entry.is_sub_map == 1:
+ showmapvme(entry.backing.map, 0, 0, show_pager_info, show_all_shadows)
+ if entry.is_copy == 1:
+ showmapcopyvme(entry.backing.copy, 0, 0, 0, show_pager_info, show_all_shadows, 0)
+ if entry.is_sub_map == 0 and entry.is_copy == 0:
+ showvmobject(entry.backing.object, entry.offset, entry.size, show_pager_info, show_all_shadows)
+
+
+def IterateRBTreeEntry2(element, element_type, field_name1, field_name2):
+ """ iterate over a rbtree as defined with RB_HEAD in libkern/tree.h
+ element - value : Value object for rbh_root
+ element_type - str : Type of the link element
+ field_name - str : Name of the field in link element's structure
+ returns:
+ A generator does not return. It is used for iterating
+ value : an object thats of type (element_type) head->sle_next. Always a pointer object
+ """
+ elt = element.__getattr__('rbh_root')
+ if type(element_type) == str:
+ element_type = gettype(element_type)
+ charp_type = gettype('char *');
+
+ # Walk to find min
+ parent = elt
+ while unsigned(elt) != 0:
+ parent = elt
+ elt = cast(elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_left'), element_type)
+ elt = parent
+
+ # Now elt is min
+ while unsigned(elt) != 0:
+ yield elt
+ # implementation cribbed from RB_NEXT in libkern/tree.h
+ right = cast(elt.__getattr__(field_name1).__getattr__(fieldname2).__getattr__('rbe_right'), element_type)
+ if unsigned(right) != 0:
+ elt = right
+ left = cast(elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_left'), element_type)
+ while unsigned(left) != 0:
+ elt = left
+ left = cast(elt.__getattr__(field_name1).__getattr(__field_name2).__getattr__('rbe_left'), element_type)
+ else:
+
+ # avoid using GetValueFromAddress
+ addr = elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_parent')&~1
+ parent = value(elt.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(addr)))
+ parent = cast(parent, element_type)
+
+ if unsigned(parent) != 0:
+ left = cast(parent.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_left'), element_type)
+ if (unsigned(parent) != 0) and (unsigned(elt) == unsigned(left)):
+ elt = parent
+ else:
+ if unsigned(parent) != 0:
+ right = cast(parent.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_right'), element_type)
+ while unsigned(parent) != 0 and (unsigned(elt) == unsigned(right)):
+ elt = parent
+
+ # avoid using GetValueFromAddress
+ addr = elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_parent')&~1
+ parent = value(elt.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(addr)))
+ parent = cast(parent, element_type)
+
+ right = cast(parent.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_right'), element_type)
+
+ # avoid using GetValueFromAddress
+ addr = elt.__getattr__(field_name1).__getattr__(field_name2).__getattr__('rbe_parent')&~1
+ elt = value(elt.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(addr)))
+ elt = cast(elt, element_type)
+
+
+@lldb_command("showmaprb")
+def ShowMapRB(cmd_args=None):
+ """Routine to print out a VM map's RB tree
+ usage: showmaprb <vm_map>
+ """
+ if cmd_args == None or len(cmd_args) < 1:
+ print "Invalid argument.", ShowMapRB.__doc__
+ return
+ map_val = kern.GetValueFromAddress(cmd_args[0], 'vm_map_t')
+ print GetVMMapSummary.header
+ print GetVMMapSummary(map_val)
+ vme_rb_root = map_val.hdr.rb_head_store
+ vme_ptr_type = GetType('struct vm_map_entry *')
+ print GetVMEntrySummary.header
+ for vme in IterateRBTreeEntry2(vme_rb_root, 'struct vm_map_entry *', 'store', 'entry'):
+ print GetVMEntrySummary(vme)
+ return None
+
+@lldb_command('show_all_owned_objects', 'T')
+def ShowAllOwnedObjects(cmd_args=None, cmd_options={}):
+ """ Routine to print the list of VM objects owned by each task
+ -T: show only ledger-tagged objects
+ """
+ showonlytagged = False
+ if "-T" in cmd_options:
+ showonlytagged = True
+ for task in kern.tasks:
+ ShowTaskOwnedVmObjects(task, showonlytagged)
+
+@lldb_command('show_task_owned_objects', 'T')
+def ShowTaskOwnedObjects(cmd_args=None, cmd_options={}):
+ """ Routine to print the list of VM objects owned by the specified task
+ -T: show only ledger-tagged objects
+ """
+ showonlytagged = False
+ if "-T" in cmd_options:
+ showonlytagged = True
+ task = kern.GetValueFromAddress(cmd_args[0], 'task *')
+ ShowTaskOwnedVmObjects(task, showonlytagged)
+
+def ShowTaskOwnedVmObjects(task, showonlytagged=False):
+ """ Routine to print out a summary listing of all the entries in a vm_map
+ params:
+ task - core.value : a object of type 'task *'
+ returns:
+ None
+ """
+ taskobjq_total = lambda:None
+ taskobjq_total.objects = 0
+ taskobjq_total.vsize = 0
+ taskobjq_total.rsize = 0
+ taskobjq_total.wsize = 0
+ taskobjq_total.csize = 0
+ vmo_list_head = task.task_objq
+ vmo_ptr_type = GetType('vm_object *')
+ idx = 0
+ for vmo in IterateQueue(vmo_list_head, vmo_ptr_type, "task_objq"):
+ idx += 1
+ if not showonlytagged or vmo.vo_ledger_tag != 0:
+ if taskobjq_total.objects == 0:
+ print ' \n'
+ print GetTaskSummary.header + ' ' + GetProcSummary.header
+ print GetTaskSummary(task) + ' ' + GetProcSummary(Cast(task.bsd_info, 'proc *'))
+ print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:>2s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","tg","owner","pid","process")
+ ShowOwnedVmObject(vmo, idx, 0, taskobjq_total)
+ if taskobjq_total.objects != 0:
+ print " total:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(taskobjq_total.objects, taskobjq_total.vsize, taskobjq_total.rsize, taskobjq_total.wsize, taskobjq_total.csize)
+ return None
+
+def ShowOwnedVmObject(object, idx, queue_len, taskobjq_total):
+ """ Routine to print out a VM object owned by a task
+ params:
+ object - core.value : a object of type 'struct vm_object *'
+ returns:
+ None
+ """
+ page_size = kern.globals.page_size
+ if object.purgable == 0:
+ purgable = "N"
+ elif object.purgable == 1:
+ purgable = "V"
+ elif object.purgable == 2:
+ purgable = "E"
+ elif object.purgable == 3:
+ purgable = "D"
+ else:
+ purgable = "?"
+ if object.pager == 0:
+ compressed_count = 0
+ else:
+ compressor_pager = Cast(object.pager, 'compressor_pager *')
+ compressed_count = compressor_pager.cpgr_num_slots_occupied
+
+ print "{:>6d}/{:<6d} {: <#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:>2d} {: <#018x} {:>6d} {:<20s}\n".format(idx,queue_len,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_ledger_tag, object.vo_un2.vou_owner,GetProcPIDForObjectOwner(object.vo_un2.vou_owner),GetProcNameForObjectOwner(object.vo_un2.vou_owner))
+
+ taskobjq_total.objects += 1
+ taskobjq_total.vsize += object.vo_un1.vou_size/page_size
+ taskobjq_total.rsize += object.resident_page_count
+ taskobjq_total.wsize += object.wired_page_count
+ taskobjq_total.csize += compressed_count
+
+def GetProcPIDForObjectOwner(owner):
+ """ same as GetProcPIDForTask() but deals with -1 for a disowned object
+ """
+ if unsigned(Cast(owner, 'int')) == unsigned(int(0xffffffff)):
+ return -1
+ return GetProcPIDForTask(owner)
+
+def GetProcNameForObjectOwner(owner):
+ """ same as GetProcNameForTask() but deals with -1 for a disowned object
+ """
+ if unsigned(Cast(owner, 'int')) == unsigned(int(0xffffffff)):
+ return "<disowned>"
+ return GetProcNameForTask(owner)
+
+def GetDescForNamedEntry(mem_entry):
+ out_str = "\n"
+ out_str += "\t\tmem_entry {:#08x} ref:{:d} offset:{:#08x} size:{:#08x} prot{:d} backing {:#08x}".format(mem_entry, mem_entry.ref_count, mem_entry.offset, mem_entry.size, mem_entry.protection, mem_entry.backing.object)
+ if mem_entry.is_sub_map:
+ out_str += " is_sub_map"
+ elif mem_entry.is_copy:
+ out_str += " is_copy"
+ else:
+ out_str += " is_object"
+ return out_str