+@lldb_command('show_all_purgeable_objects')
+def ShowAllPurgeableVmObjects(cmd_args=None):
+ """ Routine to print a summary listing of all the purgeable vm objects
+ """
+ print "\n-------------------- VOLATILE OBJECTS --------------------\n"
+ ShowAllPurgeableVolatileVmObjects()
+ print "\n-------------------- NON-VOLATILE OBJECTS --------------------\n"
+ ShowAllPurgeableNonVolatileVmObjects()
+
+@lldb_command('show_all_purgeable_nonvolatile_objects')
+def ShowAllPurgeableNonVolatileVmObjects(cmd_args=None):
+ """ Routine to print a summary listing of all the vm objects in
+ the purgeable_nonvolatile_queue
+ """
+
+ nonvolatile_total = lambda:None
+ nonvolatile_total.objects = 0
+ nonvolatile_total.vsize = 0
+ nonvolatile_total.rsize = 0
+ nonvolatile_total.wsize = 0
+ nonvolatile_total.csize = 0
+ nonvolatile_total.disowned_objects = 0
+ nonvolatile_total.disowned_vsize = 0
+ nonvolatile_total.disowned_rsize = 0
+ nonvolatile_total.disowned_wsize = 0
+ nonvolatile_total.disowned_csize = 0
+
+ queue_len = kern.globals.purgeable_nonvolatile_count
+ queue_head = kern.globals.purgeable_nonvolatile_queue
+
+ print 'purgeable_nonvolatile_queue:{:#018x} purgeable_volatile_count:{:d}\n'.format(kern.GetLoadAddressForSymbol('purgeable_nonvolatile_queue'),queue_len)
+ print 'N:non-volatile V:volatile E:empty D:deny\n'
+
+ print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process")
+ idx = 0
+ for object in IterateQueue(queue_head, 'struct vm_object *', 'objq'):
+ idx += 1
+ ShowPurgeableNonVolatileVmObject(object, idx, queue_len, nonvolatile_total)
+ print "disowned objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(nonvolatile_total.disowned_objects, nonvolatile_total.disowned_vsize, nonvolatile_total.disowned_rsize, nonvolatile_total.disowned_wsize, nonvolatile_total.disowned_csize)
+ print " all objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(nonvolatile_total.objects, nonvolatile_total.vsize, nonvolatile_total.rsize, nonvolatile_total.wsize, nonvolatile_total.csize)
+
+
+def ShowPurgeableNonVolatileVmObject(object, idx, queue_len, nonvolatile_total):
+ """ Routine to print out a summary a VM object in purgeable_nonvolatile_queue
+ params:
+ object - core.value : a object of type 'struct vm_object *'
+ returns:
+ None
+ """
+ if object.purgable == 0:
+ purgable = "N"
+ elif object.purgable == 1:
+ purgable = "V"
+ elif object.purgable == 2:
+ purgable = "E"
+ elif object.purgable == 3:
+ purgable = "D"
+ else:
+ purgable = "?"
+ if object.pager == 0:
+ compressed_count = 0
+ else:
+ compressor_pager = Cast(object.pager, 'compressor_pager *')
+ compressed_count = compressor_pager.cpgr_num_slots_occupied
+
+ print "{:>6d}/{:<6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}\n".format(idx,queue_len,object,purgable,object.ref_count,object.vo_un1.vou_size/kern.globals.page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner))
+
+ nonvolatile_total.objects += 1
+ nonvolatile_total.vsize += object.vo_un1.vou_size/kern.globals.page_size
+ nonvolatile_total.rsize += object.resident_page_count
+ nonvolatile_total.wsize += object.wired_page_count
+ nonvolatile_total.csize += compressed_count
+ if object.vo_un2.vou_purgeable_owner == 0:
+ nonvolatile_total.disowned_objects += 1
+ nonvolatile_total.disowned_vsize += object.vo_un1.vou_size/kern.globals.page_size
+ nonvolatile_total.disowned_rsize += object.resident_page_count
+ nonvolatile_total.disowned_wsize += object.wired_page_count
+ nonvolatile_total.disowned_csize += compressed_count
+
+
+@lldb_command('show_all_purgeable_volatile_objects')
+def ShowAllPurgeableVolatileVmObjects(cmd_args=None):
+ """ Routine to print a summary listing of all the vm objects in
+ the purgeable queues
+ """
+ volatile_total = lambda:None
+ volatile_total.objects = 0
+ volatile_total.vsize = 0
+ volatile_total.rsize = 0
+ volatile_total.wsize = 0
+ volatile_total.csize = 0
+ volatile_total.disowned_objects = 0
+ volatile_total.disowned_vsize = 0
+ volatile_total.disowned_rsize = 0
+ volatile_total.disowned_wsize = 0
+ volatile_total.disowned_csize = 0
+
+ purgeable_queues = kern.globals.purgeable_queues
+ print "---------- OBSOLETE\n"
+ ShowPurgeableQueue(purgeable_queues[0], volatile_total)
+ print "\n\n---------- FIFO\n"
+ ShowPurgeableQueue(purgeable_queues[1], volatile_total)
+ print "\n\n---------- LIFO\n"
+ ShowPurgeableQueue(purgeable_queues[2], volatile_total)
+
+ print "disowned objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(volatile_total.disowned_objects, volatile_total.disowned_vsize, volatile_total.disowned_rsize, volatile_total.disowned_wsize, volatile_total.disowned_csize)
+ print " all objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(volatile_total.objects, volatile_total.vsize, volatile_total.rsize, volatile_total.wsize, volatile_total.csize)
+ purgeable_count = kern.globals.vm_page_purgeable_count
+ purgeable_wired_count = kern.globals.vm_page_purgeable_wired_count
+ if purgeable_count != volatile_total.rsize or purgeable_wired_count != volatile_total.wsize:
+ mismatch = "<--------- MISMATCH\n"
+ else:
+ mismatch = ""
+ print "vm_page_purgeable_count: resident:{:<10d} wired:{:<10d} {:s}\n".format(purgeable_count, purgeable_wired_count, mismatch)
+
+
+def ShowPurgeableQueue(qhead, volatile_total):
+ print "----- GROUP 0\n"
+ ShowPurgeableGroup(qhead.objq[0], volatile_total)
+ print "----- GROUP 1\n"
+ ShowPurgeableGroup(qhead.objq[1], volatile_total)
+ print "----- GROUP 2\n"
+ ShowPurgeableGroup(qhead.objq[2], volatile_total)
+ print "----- GROUP 3\n"
+ ShowPurgeableGroup(qhead.objq[3], volatile_total)
+ print "----- GROUP 4\n"
+ ShowPurgeableGroup(qhead.objq[4], volatile_total)
+ print "----- GROUP 5\n"
+ ShowPurgeableGroup(qhead.objq[5], volatile_total)
+ print "----- GROUP 6\n"
+ ShowPurgeableGroup(qhead.objq[6], volatile_total)
+ print "----- GROUP 7\n"
+ ShowPurgeableGroup(qhead.objq[7], volatile_total)
+
+def ShowPurgeableGroup(qhead, volatile_total):
+ idx = 0
+ for object in IterateQueue(qhead, 'struct vm_object *', 'objq'):
+ if idx == 0:
+# print "{:>6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s} {:18s} {:>6s} {:<20s} {:s}\n".format("#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process","volatilizer","pid","process","")
+ print "{:>6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s}\n".format("#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process")
+ idx += 1
+ ShowPurgeableVolatileVmObject(object, idx, volatile_total)
+
+def ShowPurgeableVolatileVmObject(object, idx, volatile_total):
+ """ Routine to print out a summary a VM object in a purgeable queue
+ params:
+ object - core.value : a object of type 'struct vm_object *'
+ returns:
+ None
+ """
+# if int(object.vo_un2.vou_purgeable_owner) != int(object.vo_purgeable_volatilizer):
+# diff=" !="
+# else:
+# diff=" "
+ if object.purgable == 0:
+ purgable = "N"
+ elif object.purgable == 1:
+ purgable = "V"
+ elif object.purgable == 2:
+ purgable = "E"
+ elif object.purgable == 3:
+ purgable = "D"
+ else:
+ purgable = "?"
+ if object.pager == 0:
+ compressed_count = 0
+ else:
+ compressor_pager = Cast(object.pager, 'compressor_pager *')
+ compressed_count = compressor_pager.cpgr_num_slots_occupied
+# print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s} {:#018x} {:>6d} {:<20s} {:s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/kern.globals.page_size,object.resident_page_count,object.wired_page_count,compressed_count,object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner),object.vo_purgeable_volatilizer,GetProcPIDForTask(object.vo_purgeable_volatilizer),GetProcNameForTask(object.vo_purgeable_volatilizer),diff)
+ print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/kern.globals.page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner))
+ volatile_total.objects += 1
+ volatile_total.vsize += object.vo_un1.vou_size/kern.globals.page_size
+ volatile_total.rsize += object.resident_page_count
+ volatile_total.wsize += object.wired_page_count
+ volatile_total.csize += compressed_count
+ if object.vo_un2.vou_purgeable_owner == 0:
+ volatile_total.disowned_objects += 1
+ volatile_total.disowned_vsize += object.vo_un1.vou_size/kern.globals.page_size
+ volatile_total.disowned_rsize += object.resident_page_count
+ volatile_total.disowned_wsize += object.wired_page_count
+ volatile_total.disowned_csize += compressed_count
+
+
+def GetCompressedPagesForObject(obj):
+ """Stuff
+ """
+ pager = Cast(obj.pager, 'compressor_pager_t')
+ return pager.cpgr_num_slots_occupied
+# if pager.cpgr_num_slots > 128:
+# slots_arr = pager.cpgr_slots.cpgr_islots
+# num_indirect_slot_ptr = (pager.cpgr_num_slots + 127) / 128
+# index = 0
+# compressor_slot = 0
+# compressed_pages = 0
+# while index < num_indirect_slot_ptr:
+# compressor_slot = 0
+# if slots_arr[index]:
+# while compressor_slot < 128:
+# if slots_arr[index][compressor_slot]:
+# compressed_pages += 1
+# compressor_slot += 1
+# index += 1
+# else:
+# slots_arr = pager.cpgr_slots.cpgr_dslots
+# compressor_slot = 0
+# compressed_pages = 0
+# while compressor_slot < pager.cpgr_num_slots:
+# if slots_arr[compressor_slot]:
+# compressed_pages += 1
+# compressor_slot += 1
+# return compressed_pages
+
+@lldb_command('showallvme', "-PS")
+def ShowAllVME(cmd_args=None, cmd_options={}):
+ """ Routine to print a summary listing of all the vm map entries
+ Go Through each task in system and show the vm info
+ """
+ show_pager_info = False
+ show_all_shadows = False
+ if "-P" in cmd_options:
+ show_pager_info = True
+ if "-S" in cmd_options:
+ show_all_shadows = True
+ for task in kern.tasks:
+ ShowTaskVMEntries(task, show_pager_info, show_all_shadows)
+
+def ShowTaskVMEntries(task, show_pager_info, show_all_shadows):
+ """ Routine to print out a summary listing of all the entries in a vm_map
+ params:
+ task - core.value : a object of type 'task *'
+ returns:
+ None
+ """
+ print "vm_map entries for task " + hex(task)
+ print GetTaskSummary.header
+ print GetTaskSummary(task)
+ if not task.map:
+ print "Task {0: <#020x} has map = 0x0"
+ return None
+ showmapvme(task.map, show_pager_info, show_all_shadows)
+
+@lldb_command("showmapvme", "PS")
+def ShowMapVME(cmd_args=None, cmd_options={}):
+ """Routine to print out info about the specified vm_map and its vm entries
+ usage: showmapvme <vm_map>
+ """
+ if cmd_args == None or len(cmd_args) < 1:
+ print "Invalid argument.", ShowMap.__doc__
+ return
+ show_pager_info = False
+ show_all_shadows = False
+ if "-P" in cmd_options:
+ show_pager_info = True
+ if "-S" in cmd_options:
+ show_all_shadows = True
+ map = kern.GetValueFromAddress(cmd_args[0], 'vm_map_t')
+ showmapvme(map, show_pager_info, show_all_shadows)
+
+def showmapvme(map, show_pager_info, show_all_shadows):
+ vnode_pager_ops = kern.globals.vnode_pager_ops
+ vnode_pager_ops_addr = unsigned(addressof(vnode_pager_ops))
+ rsize = 0
+ if map.pmap != 0:
+ rsize = int(map.pmap.stats.resident_count)
+ print "{:<18s} {:<18s} {:<18s} {:>10s} {:>10s} {:>18s}:{:<18s}".format("vm_map","pmap","size","#ents","rsize","start","end")
+ print "{:#018x} {:#018x} {:#018x} {:>10d} {:>10d} {:#018x}:{:#018x}".format(map,map.pmap,(map.size/4096),map.hdr.nentries,rsize,map.hdr.links.start,map.hdr.links.end)
+ vme_list_head = map.hdr.links
+ vme_ptr_type = GetType('vm_map_entry *')
+ print "{:<18s} {:>18s}:{:<18s} {:>10s} {:>3s} {:<10s} {:<18s} {:<18s}".format("entry","start","end","#pgs","tag","prot&flags","object","offset")
+ last_end = map.hdr.links.start
+ for vme in IterateQueue(vme_list_head, vme_ptr_type, "links"):
+ if vme.links.start != last_end:
+ print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end,vme.links.start,(vme.links.start-last_end)/4096)
+ last_end = vme.links.end
+ vme_flags = ""
+ if vme.is_sub_map:
+ vme_flags += "s"
+ print "{:#018x} {:#018x}:{:#018x} {:>10d} {:>3d} {:1d}{:1d}{:<8s} {:#018x} {:<#18x}".format(vme,vme.links.start,vme.links.end,(vme.links.end-vme.links.start)/4096,vme.alias,vme.protection,vme.max_protection,vme_flags,vme.object.vm_object,vme.offset)
+ if show_pager_info and vme.is_sub_map == 0 and vme.object.vm_object != 0:
+ object = vme.object.vm_object
+ else:
+ object = 0
+ depth = 0
+ offset = unsigned(vme.offset)
+ size = vme.links.end - vme.links.start
+ while object != 0:
+ depth += 1
+ if show_all_shadows == False and depth != 1 and object.shadow != 0:
+ offset += unsigned(object.vo_un2.vou_shadow_offset)
+ object = object.shadow
+ continue
+ if object.copy_strategy == 0:
+ copy_strategy="N"
+ elif object.copy_strategy == 2:
+ copy_strategy="D"
+ elif object.copy_strategy == 4:
+ copy_strategy="S"
+ else:
+ copy_strategy=str(object.copy_strategy)
+ if object.internal:
+ internal = "internal"
+ else:
+ internal = "external"
+ pager_string = ""
+ if show_pager_info and object.pager != 0:
+ if object.internal:
+ pager_string = "-> compressed:{:d}".format(GetCompressedPagesForObject(object))
+ else:
+ vnode_pager = Cast(object.pager,'vnode_pager *')
+ if unsigned(vnode_pager.pager_ops) == vnode_pager_ops_addr:
+ pager_string = "-> " + GetVnodePath(vnode_pager.vnode_handle)
+ print "{:>18d} {:#018x}:{:#018x} {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} ({:d} {:d} {:d}) {:s}".format(depth,offset,offset+size,object,object.ref_count,object.true_share,copy_strategy,internal,unsigned(object.vo_un1.vou_size)/4096,object.resident_page_count,object.wired_page_count,pager_string)
+# print " #{:<5d} obj {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} size:{:<10d} wired:{:<10d} resident:{:<10d} reusable:{:<10d}".format(depth,object,object.ref_count,object.true_share,copy_strategy,internal,object.vo_un1.vou_size/4096,object.wired_page_count,object.resident_page_count,object.reusable_page_count)
+ offset += unsigned(object.vo_un2.vou_shadow_offset)
+ object = object.shadow
+ return None
+
+def FindVMEntriesForVnode(task, vn):
+ """ returns an array of vme that have the vnode set to defined vnode
+ each entry in array is of format (vme, start_addr, end_address, protection)
+ """
+ retval = []
+ vmmap = task.map
+ pmap = vmmap.pmap
+ pager_ops_addr = unsigned(addressof(kern.globals.vnode_pager_ops))
+ debuglog("pager_ops_addr %s" % hex(pager_ops_addr))
+
+ if unsigned(pmap) == 0:
+ return retval
+ vme_list_head = vmmap.hdr.links
+ vme_ptr_type = gettype('vm_map_entry *')
+ for vme in IterateQueue(vme_list_head, vme_ptr_type, 'links'):
+ #print vme
+ if unsigned(vme.is_sub_map) == 0 and unsigned(vme.object.vm_object) != 0:
+ obj = vme.object.vm_object
+ else:
+ continue
+
+ while obj != 0:
+ if obj.pager != 0:
+ if obj.internal:
+ pass
+ else:
+ vn_pager = Cast(obj.pager, 'vnode_pager *')
+ if unsigned(vn_pager.pager_ops) == pager_ops_addr and unsigned(vn_pager.vnode_handle) == unsigned(vn):
+ retval.append((vme, unsigned(vme.links.start), unsigned(vme.links.end), unsigned(vme.protection)))
+ obj = obj.shadow
+ return retval
+
+@lldb_command('showtaskloadinfo')
+def ShowTaskLoadInfo(cmd_args=None, cmd_options={}):
+ """ Print the load address and uuid for the process
+ Usage: (lldb)showtaskloadinfo <task_t>
+ """
+ if not cmd_args:
+ raise ArgumentError("Insufficient arguments")
+ t = kern.GetValueFromAddress(cmd_args[0], 'struct task *')
+ print_format = "0x{0:x} - 0x{1:x} {2: <50s} (??? - ???) <{3: <36s}> {4: <50s}"
+ p = Cast(t.bsd_info, 'struct proc *')
+ uuid = p.p_uuid
+ uuid_out_string = "{a[0]:02X}{a[1]:02X}{a[2]:02X}{a[3]:02X}-{a[4]:02X}{a[5]:02X}-{a[6]:02X}{a[7]:02X}-{a[8]:02X}{a[9]:02X}-{a[10]:02X}{a[11]:02X}{a[12]:02X}{a[13]:02X}{a[14]:02X}{a[15]:02X}".format(a=uuid)
+ filepath = GetVnodePath(p.p_textvp)
+ libname = filepath.split('/')[-1]
+ #print "uuid: %s file: %s" % (uuid_out_string, filepath)
+ mappings = FindVMEntriesForVnode(t, p.p_textvp)
+ load_addr = 0
+ end_addr = 0
+ for m in mappings:
+ if m[3] == 5:
+ load_addr = m[1]
+ end_addr = m[2]
+ #print "Load address: %s" % hex(m[1])
+ print print_format.format(load_addr, end_addr, libname, uuid_out_string, filepath)
+ return None
+