]> git.saurik.com Git - apple/xnu.git/blobdiff - tools/lldbmacros/memory.py
xnu-3248.50.21.tar.gz
[apple/xnu.git] / tools / lldbmacros / memory.py
index ff5ce0ae22f06cb739d067636b932c5253c358bc..16604d864e2d8a1069ecbd1e43cc9a7f3ff25ee0 100644 (file)
@@ -128,8 +128,8 @@ def ShowMemoryStatus(cmd_args=None):
 # Macro: zprint
 
 @lldb_type_summary(['zone','zone_t'])
 # Macro: zprint
 
 @lldb_type_summary(['zone','zone_t'])
-@header("{:^18s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}({:>6s} {:>6s} {:>6s}) {:^14s} {:<20s}".format(
-'ZONE', 'TOT_SZ', 'PAGE_COUNT', 'ALLOC_ELTS', 'FREE_ELTS', 'FREE_SZ', 'ELT_SZ', 'ALLOC', 'ELTS', 'PGS', 'SLK', 'FLAGS', 'NAME'))
+@header("{:^18s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}({:>6s} {:>6s} {:>6s}) {:^15s} {:<20s}".format(
+'ZONE', 'TOT_SZ', 'PAGE_COUNT', 'ALLOC_ELTS', 'FREE_ELTS', 'FREE_SZ', 'ELT_SZ', 'ALLOC', 'ELTS', 'PGS', 'WASTE', 'FLAGS', 'NAME'))
 def GetZoneSummary(zone):
     """ Summarize a zone with important information. See help zprint for description of each field
         params: 
 def GetZoneSummary(zone):
     """ Summarize a zone with important information. See help zprint for description of each field
         params: 
@@ -138,28 +138,39 @@ def GetZoneSummary(zone):
           str - summary of the zone
     """
     out_string = ""
           str - summary of the zone
     """
     out_string = ""
-    format_string = '{:#018x} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:6d} {:6d} {:6d}  {markings} {name:s} ' 
-    pagesize = 4096
+    format_string = '{:#018x} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:6d} {:6d} {:6d}  {markings} {name:s} '
+    pagesize = kern.globals.page_size
     
     
-    free_elements = (zone.cur_size / zone.elem_size) - zone.count
+    free_elements = zone.countfree
     free_size = free_elements * zone.elem_size
     
     free_size = free_elements * zone.elem_size
     
-    alloc_count = zone.alloc_size / zone.elem_size
     alloc_pages = zone.alloc_size / pagesize
     alloc_pages = zone.alloc_size / pagesize
-    alloc_slack = zone.alloc_size % zone.elem_size
+    if zone.use_page_list :
+        metadata_size = sizeof('struct zone_page_metadata')
+        metadata_offset = metadata_size
+        if ((metadata_size % zone.elem_size) != 0) :
+            metadata_offset += zone.elem_size - (metadata_size % zone.elem_size)
+        alloc_count = ((pagesize - metadata_offset) / zone.elem_size) * alloc_pages
+        alloc_waste = metadata_offset * alloc_pages
+    else :
+        alloc_count = zone.alloc_size / zone.elem_size
+        alloc_waste = zone.alloc_size  % zone.elem_size
+
     marks = [
     marks = [
-            ["collectable",        "C"],
-            ["expandable",         "X"],
-            ["noencrypt",          "$"],
-            ["caller_acct",        "@"],
-            ["exhaustible",        "H"],
-            ["allows_foreign",     "F"],
-            ["async_prio_refill",  "R"],
-            ["no_callout",         "O"],
-            ["zleak_on",           "L"],
-            ["doing_alloc",        "A"],
-            ["waiting",            "W"],
-            ["doing_gc",           "G"]
+            ["collectable",                 "C"],
+            ["expandable",                  "X"],
+            ["noencrypt",                   "$"],
+            ["caller_acct",                 "@"],
+            ["exhaustible",                 "H"],
+            ["allows_foreign",              "F"],
+            ["async_prio_refill",           "R"],
+            ["no_callout",                  "O"],
+            ["zleak_on",                    "L"],
+            ["doing_alloc_without_vm_priv", "A"],
+            ["doing_alloc_with_vm_priv",    "S"],
+            ["waiting",                     "W"],
+            ["doing_gc",                    "G"],
+            ["use_page_list",               "P"]
             ]
     if kern.arch == 'x86_64':
         marks.append(["gzalloc_exempt",     "M"])
             ]
     if kern.arch == 'x86_64':
         marks.append(["gzalloc_exempt",     "M"])
@@ -174,7 +185,7 @@ def GetZoneSummary(zone):
     out_string += format_string.format(zone, zone.cur_size, zone.page_count,
                     zone.count, free_elements, free_size,
                     zone.elem_size, zone.alloc_size, alloc_count,
     out_string += format_string.format(zone, zone.cur_size, zone.page_count,
                     zone.count, free_elements, free_size,
                     zone.elem_size, zone.alloc_size, alloc_count,
-                    alloc_pages, alloc_slack, name = zone.zone_name, markings=markings)
+                    alloc_pages, alloc_waste, name = zone.zone_name, markings=markings)
     
     if zone.exhaustible :
             out_string += "(max: {:d})".format(zone.max_size)
     
     if zone.exhaustible :
             out_string += "(max: {:d})".format(zone.max_size)
@@ -196,10 +207,12 @@ def Zprint(cmd_args=None):
         R - will be refilled when below low water mark
         O - does not allow refill callout to fill zone on noblock allocation
         N - zone requires alignment (avoids padding this zone for debugging)
         R - will be refilled when below low water mark
         O - does not allow refill callout to fill zone on noblock allocation
         N - zone requires alignment (avoids padding this zone for debugging)
-        A - currently trying to allocate more backing memory from kernel_memory_allocate
+        A - currently trying to allocate more backing memory from kernel_memory_allocate without VM priv
+        S - currently trying to allocate more backing memory from kernel_memory_allocate with VM priv
         W - another thread is waiting for more memory
         L - zone is being monitored by zleaks
         G - currently running GC
         W - another thread is waiting for more memory
         L - zone is being monitored by zleaks
         G - currently running GC
+        P - uses zone_page_metadata
     """
     global kern
     print GetZoneSummary.header
     """
     global kern
     print GetZoneSummary.header
@@ -839,6 +852,49 @@ def ShowIOAllocations(cmd_args=None):
 # EndMacro: showioalloc    
 
 
 # EndMacro: showioalloc    
 
 
+# Macro: showselectmem
+@lldb_command('showselectmem', "S:")
+def ShowSelectMem(cmd_args=None, cmd_options={}):
+    """ Show memory cached by threads on calls to select.
+
+        usage: showselectmem [-v]
+            -v        : print each thread's memory
+                        (one line per thread with non-zero select memory)
+            -S {addr} : Find the thread whose thread-local select set
+                        matches the given address
+    """
+    verbose = False
+    opt_wqs = 0
+    if config['verbosity'] > vHUMAN:
+        verbose = True
+    if "-S" in cmd_options:
+        opt_wqs = unsigned(kern.GetValueFromAddress(cmd_options["-S"], 'uint64_t *'))
+        if opt_wqs == 0:
+            raise ArgumentError("Invalid waitq set address: {:s}".format(cmd_options["-S"]))
+    selmem = 0
+    if verbose:
+        print "{:18s} {:10s} {:s}".format('Task', 'Thread ID', 'Select Mem (bytes)')
+    for t in kern.tasks:
+        for th in IterateQueue(t.threads, 'thread *', 'task_threads'):
+            uth = Cast(th.uthread, 'uthread *');
+            wqs = 0
+            if hasattr(uth, 'uu_allocsize'): # old style
+                thmem = uth.uu_allocsize
+                wqs = uth.uu_wqset
+            elif hasattr(uth, 'uu_wqstate_sz'): # new style
+                thmem = uth.uu_wqstate_sz
+                wqs = uth.uu_wqset
+            else:
+                print "What kind of uthread is this?!"
+                return
+            if opt_wqs and opt_wqs == unsigned(wqs):
+                print "FOUND: {:#x} in thread: {:#x} ({:#x})".format(opt_wqs, unsigned(th), unsigned(th.thread_id))
+            if verbose and thmem > 0:
+                print "{:<#18x} {:<#10x} {:d}".format(unsigned(t), unsigned(th.thread_id), thmem)
+            selmem += thmem
+    print '-'*40
+    print "Total: {:d} bytes ({:d} kbytes)".format(selmem, selmem/1024)
+# Endmacro: showselectmem
  
  
 # Macro: showtaskvme
  
  
 # Macro: showtaskvme
@@ -846,6 +902,8 @@ def ShowIOAllocations(cmd_args=None):
 def ShowTaskVmeHelper(cmd_args=None, cmd_options={}):
     """ Display a summary list of the specified vm_map's entries
         Usage: showtaskvme <task address>  (ex. showtaskvme 0x00ataskptr00 )
 def ShowTaskVmeHelper(cmd_args=None, cmd_options={}):
     """ Display a summary list of the specified vm_map's entries
         Usage: showtaskvme <task address>  (ex. showtaskvme 0x00ataskptr00 )
+        Use -S flag to show VM object shadow chains
+        Use -P flag to show pager info (mapped file, compressed pages, ...)
     """
     show_pager_info = False
     show_all_shadows = False
     """
     show_pager_info = False
     show_all_shadows = False
@@ -904,6 +962,7 @@ def ShowTaskVM(cmd_args=None):
 def ShowAllVMStats(cmd_args=None):
     """ Print a summary of vm statistics in a table format
     """
 def ShowAllVMStats(cmd_args=None):
     """ Print a summary of vm statistics in a table format
     """
+    page_size = kern.globals.page_size
     vmstats = lambda:None
     vmstats.wired_count = 0
     vmstats.resident_count = 0
     vmstats = lambda:None
     vmstats.wired_count = 0
     vmstats.resident_count = 0
@@ -951,7 +1010,7 @@ def ShowAllVMStats(cmd_args=None):
         if vmstats.new_resident_count +vmstats.reusable != vmstats.resident_count:
             vmstats.error += '*'
 
         if vmstats.new_resident_count +vmstats.reusable != vmstats.resident_count:
             vmstats.error += '*'
 
-        print entry_format.format(p=proc, m=vmmap, vsize=(unsigned(vmmap.size) >> 12), t=task, s=vmstats)
+        print entry_format.format(p=proc, m=vmmap, vsize=(unsigned(vmmap.size) / page_size), t=task, s=vmstats)
         
 
 def ShowTaskVMEntries(task, show_pager_info, show_all_shadows):
         
 
 def ShowTaskVMEntries(task, show_pager_info, show_all_shadows):
@@ -1015,13 +1074,16 @@ def GetVMMapSummary(vmmap):
     vm_size = uint64_t(vmmap.size).value
     resident_pages = 0
     if vmmap.pmap != 0: resident_pages = int(vmmap.pmap.stats.resident_count)
     vm_size = uint64_t(vmmap.size).value
     resident_pages = 0
     if vmmap.pmap != 0: resident_pages = int(vmmap.pmap.stats.resident_count)
-    out_string += format_string.format(vmmap, vmmap.pmap, vm_size, vmmap.hdr.nentries, resident_pages, vmmap.hint, vmmap.first_free)
+    first_free = 0
+    if int(vmmap.holelistenabled) == 0: first_free = vmmap.f_s.first_free
+    out_string += format_string.format(vmmap, vmmap.pmap, vm_size, vmmap.hdr.nentries, resident_pages, vmmap.hint, first_free)
     return out_string
 
 @lldb_type_summary(['vm_map_entry'])
 @header("{0: <20s} {1: <20s} {2: <5s} {3: >7s} {4: <20s} {5: <20s}".format("entry", "start", "prot", "#page", "object", "offset"))
 def GetVMEntrySummary(vme):
     """ Display vm entry specific information. """
     return out_string
 
 @lldb_type_summary(['vm_map_entry'])
 @header("{0: <20s} {1: <20s} {2: <5s} {3: >7s} {4: <20s} {5: <20s}".format("entry", "start", "prot", "#page", "object", "offset"))
 def GetVMEntrySummary(vme):
     """ Display vm entry specific information. """
+    page_size = kern.globals.page_size
     out_string = ""
     format_string = "{0: <#020x} {1: <#20x} {2: <1x}{3: <1x}{4: <3s} {5: >7d} {6: <#020x} {7: <#020x}"
     vme_protection = int(vme.protection)
     out_string = ""
     format_string = "{0: <#020x} {1: <#20x} {2: <1x}{3: <1x}{4: <3s} {5: >7d} {6: <#020x} {7: <#020x}"
     vme_protection = int(vme.protection)
@@ -1031,8 +1093,8 @@ def GetVMEntrySummary(vme):
         vme_extra_info_str +="s"
     elif int(vme.needs_copy) != 0 :
         vme_extra_info_str +="n"
         vme_extra_info_str +="s"
     elif int(vme.needs_copy) != 0 :
         vme_extra_info_str +="n"
-    num_pages = (unsigned(vme.links.end) - unsigned(vme.links.start)) >> 12
-    out_string += format_string.format(vme, vme.links.start, vme_protection, vme_max_protection, vme_extra_info_str, num_pages, vme.object.vm_object, vme.offset)
+    num_pages = (unsigned(vme.links.end) - unsigned(vme.links.start)) / page_size
+    out_string += format_string.format(vme, vme.links.start, vme_protection, vme_max_protection, vme_extra_info_str, num_pages, vme.vme_object.vmo_object, vme.vme_offset)
     return out_string
 
 # EndMacro: showtaskvme
     return out_string
 
 # EndMacro: showtaskvme
@@ -1509,6 +1571,8 @@ def GetVnodeLock(lockf):
         vnode_lock_output += ("{: <8s}").format('prov')
     if lockf_flags & 0x10:
         vnode_lock_output += ("{: <4s}").format('W')
         vnode_lock_output += ("{: <8s}").format('prov')
     if lockf_flags & 0x10:
         vnode_lock_output += ("{: <4s}").format('W')
+    if lockf_flags & 0x400:
+        vnode_lock_output += ("{: <8s}").format('ofd')
     else:
         vnode_lock_output += ("{: <4s}").format('.')
 
     else:
         vnode_lock_output += ("{: <4s}").format('.')
 
@@ -2114,6 +2178,7 @@ def ShowPurgeableNonVolatileVmObject(object, idx, queue_len, nonvolatile_total):
         returns:
             None
     """
         returns:
             None
     """
+    page_size = kern.globals.page_size
     if object.purgable == 0:
         purgable = "N"
     elif object.purgable == 1:
     if object.purgable == 0:
         purgable = "N"
     elif object.purgable == 1:
@@ -2130,16 +2195,16 @@ def ShowPurgeableNonVolatileVmObject(object, idx, queue_len, nonvolatile_total):
         compressor_pager = Cast(object.pager, 'compressor_pager *')
         compressed_count = compressor_pager.cpgr_num_slots_occupied
 
         compressor_pager = Cast(object.pager, 'compressor_pager *')
         compressed_count = compressor_pager.cpgr_num_slots_occupied
 
-    print "{:>6d}/{:<6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d}   {:#018x} {:>6d} {:<20s}\n".format(idx,queue_len,object,purgable,object.ref_count,object.vo_un1.vou_size/kern.globals.page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner))
+    print "{:>6d}/{:<6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d}   {:#018x} {:>6d} {:<20s}\n".format(idx,queue_len,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner))
 
     nonvolatile_total.objects += 1
 
     nonvolatile_total.objects += 1
-    nonvolatile_total.vsize += object.vo_un1.vou_size/kern.globals.page_size
+    nonvolatile_total.vsize += object.vo_un1.vou_size/page_size
     nonvolatile_total.rsize += object.resident_page_count
     nonvolatile_total.wsize += object.wired_page_count
     nonvolatile_total.csize += compressed_count
     if object.vo_un2.vou_purgeable_owner == 0:
         nonvolatile_total.disowned_objects += 1
     nonvolatile_total.rsize += object.resident_page_count
     nonvolatile_total.wsize += object.wired_page_count
     nonvolatile_total.csize += compressed_count
     if object.vo_un2.vou_purgeable_owner == 0:
         nonvolatile_total.disowned_objects += 1
-        nonvolatile_total.disowned_vsize += object.vo_un1.vou_size/kern.globals.page_size
+        nonvolatile_total.disowned_vsize += object.vo_un1.vou_size/page_size
         nonvolatile_total.disowned_rsize += object.resident_page_count
         nonvolatile_total.disowned_wsize += object.wired_page_count
         nonvolatile_total.disowned_csize += compressed_count
         nonvolatile_total.disowned_rsize += object.resident_page_count
         nonvolatile_total.disowned_wsize += object.wired_page_count
         nonvolatile_total.disowned_csize += compressed_count
@@ -2219,6 +2284,7 @@ def ShowPurgeableVolatileVmObject(object, idx, volatile_total):
 #        diff=" !="
 #    else:
 #        diff="  "
 #        diff=" !="
 #    else:
 #        diff="  "
+    page_size = kern.globals.page_size
     if object.purgable == 0:
         purgable = "N"
     elif object.purgable == 1:
     if object.purgable == 0:
         purgable = "N"
     elif object.purgable == 1:
@@ -2234,16 +2300,16 @@ def ShowPurgeableVolatileVmObject(object, idx, volatile_total):
     else:
         compressor_pager = Cast(object.pager, 'compressor_pager *')
         compressed_count = compressor_pager.cpgr_num_slots_occupied
     else:
         compressor_pager = Cast(object.pager, 'compressor_pager *')
         compressed_count = compressor_pager.cpgr_num_slots_occupied
-#    print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}   {:#018x} {:>6d} {:<20s} {:s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/kern.globals.page_size,object.resident_page_count,object.wired_page_count,compressed_count,object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner),object.vo_purgeable_volatilizer,GetProcPIDForTask(object.vo_purgeable_volatilizer),GetProcNameForTask(object.vo_purgeable_volatilizer),diff)
-    print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d}   {:#018x} {:>6d} {:<20s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/kern.globals.page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner))
+#    print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}   {:#018x} {:>6d} {:<20s} {:s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count,object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner),object.vo_purgeable_volatilizer,GetProcPIDForTask(object.vo_purgeable_volatilizer),GetProcNameForTask(object.vo_purgeable_volatilizer),diff)
+    print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d}   {:#018x} {:>6d} {:<20s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count, object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner))
     volatile_total.objects += 1
     volatile_total.objects += 1
-    volatile_total.vsize += object.vo_un1.vou_size/kern.globals.page_size
+    volatile_total.vsize += object.vo_un1.vou_size/page_size
     volatile_total.rsize += object.resident_page_count
     volatile_total.wsize += object.wired_page_count
     volatile_total.csize += compressed_count
     if object.vo_un2.vou_purgeable_owner == 0:
         volatile_total.disowned_objects += 1
     volatile_total.rsize += object.resident_page_count
     volatile_total.wsize += object.wired_page_count
     volatile_total.csize += compressed_count
     if object.vo_un2.vou_purgeable_owner == 0:
         volatile_total.disowned_objects += 1
-        volatile_total.disowned_vsize += object.vo_un1.vou_size/kern.globals.page_size
+        volatile_total.disowned_vsize += object.vo_un1.vou_size/page_size
         volatile_total.disowned_rsize += object.resident_page_count
         volatile_total.disowned_wsize += object.wired_page_count
         volatile_total.disowned_csize += compressed_count
         volatile_total.disowned_rsize += object.resident_page_count
         volatile_total.disowned_wsize += object.wired_page_count
         volatile_total.disowned_csize += compressed_count
@@ -2278,20 +2344,6 @@ def GetCompressedPagesForObject(obj):
 #           compressor_slot += 1
 #   return compressed_pages
 
 #           compressor_slot += 1
 #   return compressed_pages
 
-@lldb_command('showallvme', "-PS")
-def ShowAllVME(cmd_args=None, cmd_options={}):
-    """ Routine to print a summary listing of all the vm map entries
-        Go Through each task in system and show the vm info
-    """
-    show_pager_info = False
-    show_all_shadows = False
-    if "-P" in cmd_options:
-        show_pager_info = True
-    if "-S" in cmd_options:
-        show_all_shadows = True
-    for task in kern.tasks:
-        ShowTaskVMEntries(task, show_pager_info, show_all_shadows)
-
 def ShowTaskVMEntries(task, show_pager_info, show_all_shadows):
     """  Routine to print out a summary listing of all the entries in a vm_map
         params: 
 def ShowTaskVMEntries(task, show_pager_info, show_all_shadows):
     """  Routine to print out a summary listing of all the entries in a vm_map
         params: 
@@ -2311,6 +2363,8 @@ def ShowTaskVMEntries(task, show_pager_info, show_all_shadows):
 def ShowMapVME(cmd_args=None, cmd_options={}):
     """Routine to print out info about the specified vm_map and its vm entries
         usage: showmapvme <vm_map>
 def ShowMapVME(cmd_args=None, cmd_options={}):
     """Routine to print out info about the specified vm_map and its vm entries
         usage: showmapvme <vm_map>
+        Use -S flag to show VM object shadow chains
+        Use -P flag to show pager info (mapped file, compressed pages, ...)
     """
     if cmd_args == None or len(cmd_args) < 1:
         print "Invalid argument.", ShowMap.__doc__
     """
     if cmd_args == None or len(cmd_args) < 1:
         print "Invalid argument.", ShowMap.__doc__
@@ -2325,32 +2379,78 @@ def ShowMapVME(cmd_args=None, cmd_options={}):
     showmapvme(map, show_pager_info, show_all_shadows)
 
 def showmapvme(map, show_pager_info, show_all_shadows):
     showmapvme(map, show_pager_info, show_all_shadows)
 
 def showmapvme(map, show_pager_info, show_all_shadows):
+    page_size = kern.globals.page_size
     vnode_pager_ops = kern.globals.vnode_pager_ops
     vnode_pager_ops_addr = unsigned(addressof(vnode_pager_ops))
     rsize = 0
     if map.pmap != 0:
         rsize = int(map.pmap.stats.resident_count)
     vnode_pager_ops = kern.globals.vnode_pager_ops
     vnode_pager_ops_addr = unsigned(addressof(vnode_pager_ops))
     rsize = 0
     if map.pmap != 0:
         rsize = int(map.pmap.stats.resident_count)
-    print "{:<18s} {:<18s} {:<18s} {:>10s} {:>10s} {:>18s}:{:<18s}".format("vm_map","pmap","size","#ents","rsize","start","end")
-    print "{:#018x} {:#018x} {:#018x} {:>10d} {:>10d} {:#018x}:{:#018x}".format(map,map.pmap,(map.size/4096),map.hdr.nentries,rsize,map.hdr.links.start,map.hdr.links.end)
+    print "{:<18s} {:<18s} {:<18s} {:>10s} {:>18s} {:>18s}:{:<18s}".format("vm_map","pmap","size","#ents","rsize","start","end")
+    print "{:#018x} {:#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x}".format(map,map.pmap,unsigned(map.size),map.hdr.nentries,rsize,map.hdr.links.start,map.hdr.links.end)
     vme_list_head = map.hdr.links
     vme_ptr_type = GetType('vm_map_entry *')
     vme_list_head = map.hdr.links
     vme_ptr_type = GetType('vm_map_entry *')
-    print "{:<18s} {:>18s}:{:<18s} {:>10s} {:>3s} {:<10s} {:<18s} {:<18s}".format("entry","start","end","#pgs","tag","prot&flags","object","offset")
-    last_end = map.hdr.links.start
+    print "{:<18s} {:>18s}:{:<18s} {:>10s} {:<8s} {:<10s} {:<18s} {:<18s}".format("entry","start","end","#pgs","tag.kmod","prot&flags","object","offset")
+    last_end = unsigned(map.hdr.links.start)
     for vme in IterateQueue(vme_list_head, vme_ptr_type, "links"):
     for vme in IterateQueue(vme_list_head, vme_ptr_type, "links"):
-        if vme.links.start != last_end:
-            print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end,vme.links.start,(vme.links.start-last_end)/4096)
-        last_end = vme.links.end
+        if unsigned(vme.links.start) != last_end:
+            print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end,vme.links.start,(unsigned(vme.links.start) - last_end)/page_size)
+        last_end = unsigned(vme.links.end)
+        size = unsigned(vme.links.end) - unsigned(vme.links.start)
+        object = vme.vme_object.vmo_object
+        if object == 0:
+            object_str = "{:<#018x}".format(object)
+        elif vme.is_sub_map:
+            if object == kern.globals.bufferhdr_map:
+                object_str = "BUFFERHDR_MAP"
+            elif object == kern.globals.mb_map:
+                object_str = "MB_MAP"
+            elif object == kern.globals.bsd_pageable_map:
+                object_str = "BSD_PAGEABLE_MAP"
+            elif object == kern.globals.ipc_kernel_map:
+                object_str = "IPC_KERNEL_MAP"
+            elif object == kern.globals.ipc_kernel_copy_map:
+                object_str = "IPC_KERNEL_COPY_MAP"
+            elif object == kern.globals.kalloc_map:
+                object_str = "KALLOC_MAP"
+            elif object == kern.globals.zone_map:
+                object_str = "ZONE_MAP"
+            elif hasattr(kern.globals, 'gzalloc_map') and object == kern.globals.gzalloc_map:
+                object_str = "GZALLOC_MAP"
+            elif hasattr(kern.globals, 'g_kext_map') and object == kern.globals.g_kext_map:
+                object_str = "G_KEXT_MAP"
+            elif hasattr(kern.globals, 'vector_upl_submap') and object == kern.globals.vector_upl_submap:
+                object_str = "VECTOR_UPL_SUBMAP"
+            else:
+                object_str = "submap:{:<#018x}".format(object)
+        else:
+            if object == kern.globals.kernel_object:
+                object_str = "KERNEL_OBJECT"
+            elif object == kern.globals.vm_submap_object:
+                object_str = "VM_SUBMAP_OBJECT"
+            elif object == kern.globals.compressor_object:
+                object_str = "COMPRESSOR_OBJECT"
+            else:
+                object_str = "{:<#018x}".format(object)
+        offset = unsigned(vme.vme_offset) & ~0xFFF
+        tag = unsigned(vme.vme_offset & 0xFFF)
         vme_flags = ""
         if vme.is_sub_map:
             vme_flags += "s"
         vme_flags = ""
         if vme.is_sub_map:
             vme_flags += "s"
-        print "{:#018x} {:#018x}:{:#018x} {:>10d} {:>3d} {:1d}{:1d}{:<8s} {:#018x} {:<#18x}".format(vme,vme.links.start,vme.links.end,(vme.links.end-vme.links.start)/4096,vme.alias,vme.protection,vme.max_protection,vme_flags,vme.object.vm_object,vme.offset)
-        if show_pager_info and vme.is_sub_map == 0 and vme.object.vm_object != 0:
-            object = vme.object.vm_object
+        if vme.needs_copy:
+            vme_flags += "n"
+        if vme.is_sub_map and vme.use_pmap:
+            vme_flags += "p"
+        tagstr = ""
+        if map.pmap == kern.globals.kernel_pmap:
+            xsite = Cast(kern.globals.vm_allocation_sites[tag],'OSKextAccount *')
+            if xsite and xsite.site.flags & 2:
+                tagstr = ".{:<3d}".format(xsite.loadTag)
+        print "{:#018x} {:#018x}:{:#018x} {:>10d} {:>3d}{:<4s}  {:1d}{:1d}{:<8s} {:<18s} {:<#18x}".format(vme,vme.links.start,vme.links.end,(unsigned(vme.links.end)-unsigned(vme.links.start))/page_size,tag,tagstr,vme.protection,vme.max_protection,vme_flags,object_str,offset)
+        if (show_pager_info or show_all_shadows) and vme.is_sub_map == 0 and vme.vme_object.vmo_object != 0:
+            object = vme.vme_object.vmo_object
         else:
             object = 0
         depth = 0
         else:
             object = 0
         depth = 0
-        offset = unsigned(vme.offset)
-        size = vme.links.end - vme.links.start
         while object != 0:
             depth += 1
             if show_all_shadows == False and depth != 1 and object.shadow != 0:
         while object != 0:
             depth += 1
             if show_all_shadows == False and depth != 1 and object.shadow != 0:
@@ -2370,19 +2470,185 @@ def showmapvme(map, show_pager_info, show_all_shadows):
             else:
                 internal = "external"
             pager_string = ""
             else:
                 internal = "external"
             pager_string = ""
-            if show_pager_info and object.pager != 0:
+            pager = object.pager
+            if show_pager_info and pager != 0:
                 if object.internal:
                     pager_string = "-> compressed:{:d}".format(GetCompressedPagesForObject(object))
                 if object.internal:
                     pager_string = "-> compressed:{:d}".format(GetCompressedPagesForObject(object))
+                elif unsigned(pager.mo_pager_ops) == vnode_pager_ops_addr:
+                    vnode_pager = Cast(pager,'vnode_pager *')
+                    pager_string = "-> " + GetVnodePath(vnode_pager.vnode_handle)
                 else:
                 else:
-                    vnode_pager = Cast(object.pager,'vnode_pager *')
-                    if unsigned(vnode_pager.pager_ops) == vnode_pager_ops_addr:
-                        pager_string = "-> " + GetVnodePath(vnode_pager.vnode_handle)
-            print "{:>18d} {:#018x}:{:#018x} {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} ({:d} {:d} {:d}) {:s}".format(depth,offset,offset+size,object,object.ref_count,object.true_share,copy_strategy,internal,unsigned(object.vo_un1.vou_size)/4096,object.resident_page_count,object.wired_page_count,pager_string)
-#            print "        #{:<5d} obj {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} size:{:<10d} wired:{:<10d} resident:{:<10d} reusable:{:<10d}".format(depth,object,object.ref_count,object.true_share,copy_strategy,internal,object.vo_un1.vou_size/4096,object.wired_page_count,object.resident_page_count,object.reusable_page_count)
+                    pager_string = "-> {:s}:{:#018x}".format(pager.mo_pager_ops.memory_object_pager_name, pager.mo_pager_ops)
+            print "{:>18d} {:#018x}:{:#018x} {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} ({:d} {:d} {:d}) {:s}".format(depth,offset,offset+size,object,object.ref_count,object.true_share,copy_strategy,internal,unsigned(object.vo_un1.vou_size)/page_size,object.resident_page_count,object.wired_page_count,pager_string)
+#            print "        #{:<5d} obj {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} size:{:<10d} wired:{:<10d} resident:{:<10d} reusable:{:<10d}".format(depth,object,object.ref_count,object.true_share,copy_strategy,internal,object.vo_un1.vou_size/page_size,object.wired_page_count,object.resident_page_count,object.reusable_page_count)
             offset += unsigned(object.vo_un2.vou_shadow_offset)
             object = object.shadow
             offset += unsigned(object.vo_un2.vou_shadow_offset)
             object = object.shadow
+    if unsigned(map.hdr.links.end) > last_end:
+        print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end,map.hdr.links.end,(unsigned(map.hdr.links.end) - last_end)/page_size)
     return None
 
     return None
 
+def CountMapTags(map, tagcounts, slow):
+    page_size = unsigned(kern.globals.page_size)
+    vme_list_head = map.hdr.links
+    vme_ptr_type = GetType('vm_map_entry *')
+    for vme in IterateQueue(vme_list_head, vme_ptr_type, "links"):
+        object = vme.vme_object.vmo_object
+        tag = vme.vme_offset & 0xFFF
+        if object == kern.globals.kernel_object:
+            count = 0
+            if not slow:
+                count = unsigned(vme.links.end - vme.links.start) / page_size
+            else:
+                addr = unsigned(vme.links.start)
+                while addr < unsigned(vme.links.end):
+                    hash_id = _calc_vm_page_hash(object, addr)
+                    page_list = kern.globals.vm_page_buckets[hash_id].page_list
+                    page = _vm_page_unpack_ptr(page_list)
+                    while (page != 0):
+                        vmpage = kern.GetValueFromAddress(page, 'vm_page_t')
+                        if (addr == unsigned(vmpage.offset)) and (object == vmpage.object):
+                            if (not vmpage.local) and (vmpage.wire_count > 0):
+                                count += 1
+                            break
+                        page = _vm_page_unpack_ptr(vmpage.next_m)
+                    addr += page_size
+            tagcounts[tag] += count
+        elif vme.is_sub_map:
+            CountMapTags(Cast(object,'vm_map_t'), tagcounts, slow)
+    return None
+
+def CountWiredObject(object, tagcounts):
+    tagcounts[unsigned(object.wire_tag)] += object.wired_page_count
+    return None
+
+def CountWiredPurgeableGroup(qhead, tagcounts):
+    for object in IterateQueue(qhead, 'struct vm_object *', 'objq'):
+        CountWiredObject(object, tagcounts)
+    return None
+
+def CountWiredPurgeableQueue(qhead, tagcounts):
+    CountWiredPurgeableGroup(qhead.objq[0], tagcounts)
+    CountWiredPurgeableGroup(qhead.objq[1], tagcounts)
+    CountWiredPurgeableGroup(qhead.objq[2], tagcounts)
+    CountWiredPurgeableGroup(qhead.objq[3], tagcounts)
+    CountWiredPurgeableGroup(qhead.objq[4], tagcounts)
+    CountWiredPurgeableGroup(qhead.objq[5], tagcounts)
+    CountWiredPurgeableGroup(qhead.objq[6], tagcounts)
+    CountWiredPurgeableGroup(qhead.objq[7], tagcounts)
+
+def GetKmodIDName(kmod_id):
+    kmod_val = kern.globals.kmod
+    for kmod in IterateLinkedList(kmod_val, 'next'):
+        if (kmod.id == kmod_id):
+            return "{:<50s}".format(kmod.name)
+    return "??"
+
+def GetVMKernName(tag):
+    if 1 == tag:
+        return "VM_KERN_MEMORY_OSFMK"
+    elif 2 == tag:
+        return "VM_KERN_MEMORY_BSD"
+    elif 3 == tag:
+        return "VM_KERN_MEMORY_IOKIT"
+    elif 4 == tag:
+        return "VM_KERN_MEMORY_LIBKERN"
+    elif 5 == tag:
+        return "VM_KERN_MEMORY_OSKEXT"
+    elif 6 == tag:
+        return "VM_KERN_MEMORY_KEXT"
+    elif 7 == tag:
+        return "VM_KERN_MEMORY_IPC"
+    elif 8 == tag:
+        return "VM_KERN_MEMORY_STACK"
+    elif 9 == tag:
+        return "VM_KERN_MEMORY_CPU"
+    elif 10 == tag:
+        return "VM_KERN_MEMORY_PMAP"
+    elif 11 == tag:
+        return "VM_KERN_MEMORY_PTE"
+    elif 12 == tag:
+        return "VM_KERN_MEMORY_ZONE"
+    elif 13 == tag:
+        return "VM_KERN_MEMORY_KALLOC"
+    elif 14 == tag:
+        return "VM_KERN_MEMORY_COMPRESSOR"
+    elif 15 == tag:
+        return "VM_KERN_MEMORY_COMPRESSED_DATA"
+    elif 16 == tag:
+        return "VM_KERN_MEMORY_PHANTOM_CACHE"
+    elif 17 == tag:
+        return "VM_KERN_MEMORY_WAITQ"
+    elif 18 == tag:
+        return "VM_KERN_MEMORY_DIAG"
+    elif 19 == tag:
+        return "VM_KERN_MEMORY_LOG"
+    elif 20 == tag:
+        return "VM_KERN_MEMORY_FILE"
+    elif 21 == tag:
+        return "VM_KERN_MEMORY_MBUF"
+    elif 22 == tag:
+        return "VM_KERN_MEMORY_UBC"
+    elif 23 == tag:
+        return "VM_KERN_MEMORY_SECURITY"
+    elif 24 == tag:
+        return "VM_KERN_MEMORY_MLOCK"
+    return "??"
+
+
+@lldb_command("showvmtags", "S")
+def showvmtags(cmd_args=None, cmd_options={}):
+    """Routine to print out info about kernel wired page allocations
+        usage: showvmtags
+               iterates kernel map and vm objects totaling allocations by tag.
+        usage: showvmtags -S
+               also iterates kernel object pages individually - slow.
+    """
+    slow = False
+    if "-S" in cmd_options:
+        slow = True
+    page_size = unsigned(kern.globals.page_size)
+    tagcounts = []
+    for tag in range(256):
+        tagcounts.append(0)
+
+    queue_head = kern.globals.vm_objects_wired
+    for object in IterateQueue(queue_head, 'struct vm_object *', 'objq'):
+        CountWiredObject(object, tagcounts)
+
+    queue_head = kern.globals.purgeable_nonvolatile_queue
+    for object in IterateQueue(queue_head, 'struct vm_object *', 'objq'):
+        CountWiredObject(object, tagcounts)
+
+    purgeable_queues = kern.globals.purgeable_queues
+    CountWiredPurgeableQueue(purgeable_queues[0], tagcounts)
+    CountWiredPurgeableQueue(purgeable_queues[1], tagcounts)
+    CountWiredPurgeableQueue(purgeable_queues[2], tagcounts)
+
+    CountMapTags(kern.globals.kernel_map, tagcounts, slow)
+
+    total = 0
+    print " {:<8s}  {:>7s}  {:<50s}".format("tag.kmod","size","name")
+    for tag in range(256):
+        if tagcounts[tag]:
+            total += tagcounts[tag]
+            tagstr = ""
+            sitestr = ""
+            if (tag <= 24):
+                sitestr = GetVMKernName(tag)
+            else:
+                site = kern.globals.vm_allocation_sites[tag]
+                if site:
+                    if site.flags & 2:
+                        xsite = Cast(site,'OSKextAccount *')
+                        tagstr = ".{:<3d}".format(xsite.loadTag)
+                        sitestr = GetKmodIDName(xsite.loadTag)
+                    else:
+                        sitestr = kern.Symbolicate(site)
+            print " {:>3d}{:<4s}  {:>7d}K  {:<50s}".format(tag,tagstr,tagcounts[tag]*page_size / 1024,sitestr)
+    print "Total:    {:>7d}K".format(total*page_size / 1024)
+    return None
+
+
 def FindVMEntriesForVnode(task, vn):
     """ returns an array of vme that have the vnode set to defined vnode 
         each entry in array is of format (vme, start_addr, end_address, protection)
 def FindVMEntriesForVnode(task, vn):
     """ returns an array of vme that have the vnode set to defined vnode 
         each entry in array is of format (vme, start_addr, end_address, protection)
@@ -2399,8 +2665,8 @@ def FindVMEntriesForVnode(task, vn):
     vme_ptr_type = gettype('vm_map_entry *')
     for vme in IterateQueue(vme_list_head, vme_ptr_type, 'links'):
         #print vme
     vme_ptr_type = gettype('vm_map_entry *')
     for vme in IterateQueue(vme_list_head, vme_ptr_type, 'links'):
         #print vme
-        if unsigned(vme.is_sub_map) == 0 and unsigned(vme.object.vm_object) != 0:
-            obj = vme.object.vm_object
+        if unsigned(vme.is_sub_map) == 0 and unsigned(vme.vme_object.vmo_object) != 0:
+            obj = vme.vme_object.vmo_object
         else:
             continue
 
         else:
             continue
 
@@ -2439,4 +2705,254 @@ def ShowTaskLoadInfo(cmd_args=None, cmd_options={}):
             end_addr = m[2]
             #print "Load address: %s" % hex(m[1])
     print print_format.format(load_addr, end_addr, libname, uuid_out_string, filepath)
             end_addr = m[2]
             #print "Load address: %s" % hex(m[1])
     print print_format.format(load_addr, end_addr, libname, uuid_out_string, filepath)
-    return None    
+    return None
+
+@header("{0: <20s} {1: <20s} {2: <20s}".format("vm_page_t", "offset", "object"))
+@lldb_command('vmpagelookup')
+def VMPageLookup(cmd_args=None):
+    """ Print the pages in the page bucket corresponding to the provided object and offset.
+        Usage: (lldb)vmpagelookup <vm_object_t> <vm_offset_t>
+    """
+    if cmd_args == None or len(cmd_args) < 2:
+        raise ArgumentError("Please specify an object and offset.")
+    format_string = "{0: <#020x} {1: <#020x} {2: <#020x}\n"
+
+    obj = kern.GetValueFromAddress(cmd_args[0],'unsigned long long')
+    off = kern.GetValueFromAddress(cmd_args[1],'unsigned long long')
+
+    hash_id = _calc_vm_page_hash(obj, off)
+
+    page_list = kern.globals.vm_page_buckets[hash_id].page_list
+    print("hash_id: 0x%x page_list: 0x%x\n" % (unsigned(hash_id), unsigned(page_list)))
+
+    print VMPageLookup.header
+    page = _vm_page_unpack_ptr(page_list)
+    while (page != 0) :
+        pg_t = kern.GetValueFromAddress(page, 'vm_page_t')
+        print format_string.format(page, pg_t.offset, pg_t.object)
+        page = _vm_page_unpack_ptr(pg_t.next_m)
+
+def _vm_page_unpack_ptr(page):
+    if kern.ptrsize == 4 :
+        return page
+
+    if page == 0 :
+        return page
+
+    min_addr = kern.globals.vm_min_kernel_and_kext_address
+    #INTEL - min_addr = 0xffffff7f80000000
+    #ARM - min_addr = 0x80000000
+    #ARM64 - min_addr = 0xffffff8000000000
+    return ((page << 6) + min_addr)
+
+@lldb_command('calcvmpagehash')
+def CalcVMPageHash(cmd_args=None):
+    """ Get the page bucket corresponding to the provided object and offset.
+        Usage: (lldb)calcvmpagehash <vm_object_t> <vm_offset_t>
+    """
+    if cmd_args == None or len(cmd_args) < 2:
+        raise ArgumentError("Please specify an object and offset.")
+
+    obj = kern.GetValueFromAddress(cmd_args[0],'unsigned long long')
+    off = kern.GetValueFromAddress(cmd_args[1],'unsigned long long')
+
+    hash_id = _calc_vm_page_hash(obj, off)
+
+    print("hash_id: 0x%x page_list: 0x%x\n" % (unsigned(hash_id), unsigned(kern.globals.vm_page_buckets[hash_id].page_list)))
+    return None
+
+def _calc_vm_page_hash(obj, off):
+    bucket_hash = (int) (kern.globals.vm_page_bucket_hash)
+    hash_mask = (int) (kern.globals.vm_page_hash_mask)
+
+    one = (obj * bucket_hash) & 0xFFFFFFFF
+    two = off >> unsigned(kern.globals.page_shift)
+    three = two ^ bucket_hash
+    four = one + three
+    hash_id = four & hash_mask
+
+    return hash_id
+
+@header("{0: <10s} of {1: <10s} {2: <20s} {3: <20s} {4: <20s} {5: <10s} {6: <5s}\t {7: <28s}\t{8: <50s}".format("index", "total", "vm_page_t", "offset", "next", "phys_page", "wire#", "first bitfield", "second bitfield"))
+@lldb_command('vmobjectwalkpages', 'SBNQP:')
+def VMObjectWalkPages(cmd_args=None, cmd_options={}):
+    """ Print the resident pages contained in the provided object. If a vm_page_t is provided as well, we
+        specifically look for this page, highlighting it in the output or noting if it was not found. For
+        each page, we confirm that it points to the object. We also keep track of the number of pages we
+        see and compare this to the object's resident page count field.
+        Usage:
+            vmobjectwalkpages <vm_object_t> : Walk and print all the pages for a given object (up to 4K pages by default)
+            vmobjectwalkpages <vm_object_t> -B : Walk and print all the pages for a given object (up to 4K pages by default), traversing the memq backwards
+            vmobjectwalkpages <vm_object_t> -N : Walk and print all the pages for a given object, ignore the page limit
+            vmobjectwalkpages <vm_object_t> -Q : Walk all pages for a given object, looking for known signs of corruption (i.e. inactive and active both being set for a page)
+            vmobjectwalkpages <vm_object_t> -P <vm_page_t> : Walk all the pages for a given object, annotate the specified page in the output with ***
+            vmobjectwalkpages <vm_object_t> -P <vm_page_t> -S : Walk all the pages for a given object, stopping when we find the specified page
+
+    """
+
+    if (cmd_args == None or len(cmd_args) < 1):
+        raise ArgumentError("Please specify at minimum a vm_object_t and optionally a vm_page_t")
+
+    out_string = ""
+
+    obj = kern.GetValueFromAddress(cmd_args[0], 'vm_object_t')
+
+    page = 0
+    if "-P" in cmd_options:
+        page = kern.GetValueFromAddress(cmd_options['-P'], 'vm_page_t')
+
+    stop = 0
+    if "-S" in cmd_options:
+        if page == 0:
+            raise ArgumentError("-S can only be passed when a page is specified with -P")
+        stop = 1
+
+    walk_backwards = False
+    if "-B" in cmd_options:
+        walk_backwards = True
+
+    quiet_mode = False
+    if "-Q" in cmd_options:
+        quiet_mode = True
+
+    if not quiet_mode:
+        print VMObjectWalkPages.header
+        format_string = "{0: <#10d} of {1: <#10d} {2: <#020x} {3: <#020x} {4: <#020x} {5: <#010x} {6: <#05d}\t"
+        first_bitfield_format_string = "{0: <#1d}:{1: <#1d}:{2: <#1d}:{3: <#1d}:{4: <#1d}:{5: <#1d}:{6: <#1d}:"
+        first_bitfield_format_string += "{7: <#1d}:{8: <#1d}:{9: <#1d}:{10: <#1d}:{11: <#1d}:{12: <#1d}"
+        second_bitfield_format_string = first_bitfield_format_string
+        second_bitfield_format_string += ":{13: <#1d}:{14: <#1d}:{15: <#1d}:{16: <#1d}:{17: <#1d}:{18: <#1d}:{19: <#1d}:"
+        second_bitfield_format_string +=  "{20: <#1d}:{21: <#1d}:{22: <#1d}:{23: <#1d}:{24: <#1d}:{25: <#1d}:{26: <#1d}\n"
+        first_bitfield_format_string += "\t"
+
+    limit = 4096 #arbitrary limit of number of pages to walk
+    ignore_limit = 0
+    if "-N" in cmd_options:
+        ignore_limit = 1
+
+    page_count = 0
+    res_page_count = unsigned(obj.resident_page_count)
+    page_found = False
+    pages_seen = set()
+
+    for vmp in IterateQueue(obj.memq, "vm_page_t", "listq", walk_backwards):
+        page_count += 1
+        out_string = ""
+        if (page != 0 and not(page_found) and vmp == page):
+            out_string += "******"
+            page_found = True
+
+        if page != 0 or quiet_mode:
+             if (page_count % 1000) == 0:
+                print "traversed %d pages ...\n" % (page_count)
+        else:
+                out_string += format_string.format(page_count, res_page_count, vmp, vmp.offset, vmp.listq.next, vmp.phys_page, vmp.wire_count)
+                out_string += first_bitfield_format_string.format(vmp.active, vmp.inactive, vmp.clean_queue, vmp.local, vmp.speculative,
+                                                                    vmp.throttled, vmp.free, vmp.pageout_queue, vmp.laundry, vmp.reference,
+                                                                    vmp.gobbled, vmp.private, vmp.no_cache)
+
+                out_string += second_bitfield_format_string.format(vmp.busy, vmp.wanted, vmp.tabled, vmp.hashed, vmp.fictitious, vmp.clustered,
+                                                                    vmp.clustered, vmp.pmapped, vmp.xpmapped, vmp.wpmapped, vmp.pageout, vmp.absent,
+                                                                    vmp.error, vmp.dirty, vmp.cleaning, vmp.precious, vmp.precious, vmp.overwriting,
+                                                                    vmp.restart, vmp.unusual, vmp.encrypted, vmp.encrypted, vmp.encrypted_cleaning,
+                                                                    vmp.cs_validated, vmp.cs_tainted, vmp.cs_nx, vmp.reusable, vmp.lopage, vmp.slid, vmp.compressor,
+                                                                    vmp.written_by_kernel)
+
+        if (vmp in pages_seen):
+            print out_string + "cycle detected! we've seen vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " twice. stopping...\n"
+            return
+
+        if (vmp.object != obj):
+            print out_string + " vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) +  " points to different vm_object_t: " + "{0: <#020x}".format(unsigned(vmp.object))
+            return
+
+        if (not vmp.local) and (vmp.wire_count > 0):
+            if (vmp.active or vmp.inactive or vmp.speculative or vmp.throttled or vmp.pageout_queue):
+                print out_string + " wired page with wrong page queue attributes\n"
+                print "vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " active: %d inactive: %d speculative: %d throttled %d pageout_queue: %d\n" % (vmp.active,
+                                    vmp.inactive, vmp.speculative, vmp.throttled, vmp.pageout_queue)
+                print "stopping...\n"
+                return
+
+        if ((vmp.free + vmp.active + vmp.inactive + vmp.speculative + vmp.throttled + vmp.pageout_queue) > 1):
+            print out_string + " more than one pageout queue bit set active\n"
+            print "vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " free: %d active: %d inactive: %d speculative: %d throttled: %d pageout_queue: %d\n" % (vmp.free,
+                                            vmp.active, vmp.inactive, vmp.speculative, vmp.throttled, vmp.pageout_queue)
+            print "stopping...\n"
+            return
+
+        if ((vmp.__unused_pageq_bits != 0) or (vmp.__unused_object_bits != 0)):
+            print out_string + " unused bits not zero for vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " unused__pageq_bits: %d unused_object_bits : %d\n" % (vmp.__unused_pageq_bits,
+                                            vmp.__unused_object_bits)
+            print "stopping...\n"
+            return
+
+        pages_seen.add(vmp)
+
+        if False:
+            hash_id = _calc_vm_page_hash(obj, vmp.offset)
+            hash_page_list = kern.globals.vm_page_buckets[hash_id].page_list
+            hash_page = _vm_page_unpack_ptr(hash_page_list)
+            hash_page_t = 0
+
+            while (hash_page != 0):
+                hash_page_t = kern.GetValueFromAddress(hash_page, 'vm_page_t')
+                if hash_page_t == vmp:
+                    break
+                hash_page = _vm_page_unpack_ptr(hash_page_t.next_m)
+
+            if (unsigned(vmp) != unsigned(hash_page_t)):
+                print out_string + "unable to find page: " + "{0: <#020x}".format(unsigned(vmp)) + " from object in kernel page bucket list\n"
+                print lldb_run_command("vm_page_info %s 0x%x" % (cmd_args[0], unsigned(vmp.offset)))
+                return
+
+        if (page_count >= limit and not(ignore_limit)):
+            print out_string + "Limit reached (%d pages), stopping..." % (limit)
+            return
+
+        print out_string
+
+        if page_found and stop:
+            print("Object reports resident page count of: %d we stopped after traversing %d and finding the requested page.\n" % (unsigned(obj.res_page_count), unsigned(page_count)))
+            return
+
+    if (page != 0):
+        print("page found? : %s\n" % page_found)
+
+    print("Object reports resident page count of %d, we saw %d pages when we walked the resident list.\n" % (unsigned(obj.resident_page_count), unsigned(page_count)))
+
+
+@lldb_command("show_all_apple_protect_pagers")
+def ShowAllAppleProtectPagers(cmd_args=None):
+    """Routine to print all apple_protect pagers
+        usage: show_all_apple_protect_pagers
+    """
+    print "{:>3s} {:<3s} {:<18s} {:>5s} {:>5s} {:>6s} {:<18s} {:<18s} {:<18s} {:<18s} {:<18s} {:<18s}\n".format("#", "#", "pager", "refs", "ready", "mapped", "mo_control", "object", "offset", "crypto_offset", "crypto_start", "crypto_end")
+    qhead = kern.globals.apple_protect_pager_queue
+    qtype = GetType('apple_protect_pager *')
+    qcnt = kern.globals.apple_protect_pager_count
+    idx = 0
+    for pager in IterateQueue(qhead, qtype, "pager_queue"):
+        idx = idx + 1
+        show_apple_protect_pager(pager, qcnt, idx)
+
+@lldb_command("show_apple_protect_pager")
+def ShowAppleProtectPager(cmd_args=None):
+    """Routine to print out info about an apple_protect pager
+        usage: show_apple_protect_pager <pager>
+    """
+    if cmd_args == None or len(cmd_args) < 1:
+        print "Invalid argument.", ShowMap.__doc__
+        return
+    pager = kern.GetValueFromAddress(cmd_ars[0], 'apple_protect_pager_t')
+    show_apple_protect_pager(pager, 1, 1)
+
+def show_apple_protect_pager(pager, qcnt, idx):
+    object = pager.backing_object
+    shadow = object.shadow
+    while shadow != 0:
+        object = shadow
+        shadow = object.shadow
+    vnode_pager = Cast(object.pager,'vnode_pager *')
+    filename = GetVnodePath(vnode_pager.vnode_handle)
+    print "{:>3}/{:<3d} {:#018x} {:>5d} {:>5d} {:>6d} {:#018x} {:#018x} {:#018x} {:#018x} {:#018x} {:#018x}\n\tcrypt_info:{:#018x} <decrypt:{:#018x} end:{:#018x} ops:{:#018x} refs:{:<d}>\n\tvnode:{:#018x} {:s}\n".format(idx, qcnt, pager, pager.ref_count, pager.is_ready, pager.is_mapped, pager.pager_control, pager.backing_object, pager.backing_offset, pager.crypto_backing_offset, pager.crypto_start, pager.crypto_end, pager.crypt_info, pager.crypt_info.page_decrypt, pager.crypt_info.crypt_end, pager.crypt_info.crypt_ops, pager.crypt_info.crypt_refcnt, vnode_pager.vnode_handle, filename)