2 """ Please make sure you read the README file COMPLETELY BEFORE reading anything below.
3 It is very critical that you read coding guidelines in Section E in README file.
14 @lldb_command('memstats')
15 def Memstats(cmd_args
=None):
16 """ Prints out a summary of various memory statistics. In particular vm_page_wire_count should be greater than 2K or you are under memory pressure.
19 print "memorystatus_level: {: >10d}".format(kern
.globals.memorystatus_level
)
20 print "memorystatus_available_pages: {: >10d}".format(kern
.globals.memorystatus_available_pages
)
21 print "inuse_ptepages_count: {: >10d}".format(kern
.globals.inuse_ptepages_count
)
24 print "vm_page_throttled_count: {: >10d}".format(kern
.globals.vm_page_throttled_count
)
25 print "vm_page_active_count: {: >10d}".format(kern
.globals.vm_page_active_count
)
26 print "vm_page_inactive_count: {: >10d}".format(kern
.globals.vm_page_inactive_count
)
27 print "vm_page_wire_count: {: >10d}".format(kern
.globals.vm_page_wire_count
)
28 print "vm_page_free_count: {: >10d}".format(kern
.globals.vm_page_free_count
)
29 print "vm_page_purgeable_count: {: >10d}".format(kern
.globals.vm_page_purgeable_count
)
30 print "vm_page_inactive_target: {: >10d}".format(kern
.globals.vm_page_inactive_target
)
31 print "vm_page_free_target: {: >10d}".format(kern
.globals.vm_page_free_target
)
33 print "vm_page_free_reserved: {: >10d}".format(kern
.globals.vm_page_free_reserved
)
35 @xnudebug_test('test_memstats')
36 def TestMemstats(kernel_target
, config
, lldb_obj
, isConnected
):
37 """ Test the functionality of memstats command
43 print "Target is not connected. Cannot test memstats"
45 res
= lldb
.SBCommandReturnObject()
46 lldb_obj
.debugger
.GetCommandInterpreter().HandleCommand("memstats", res
)
47 result
= res
.GetOutput()
48 if result
.split(":")[1].strip().find('None') == -1 :
55 # Macro: showmemorystatus
56 def CalculateLedgerPeak(phys_footprint_entry
):
57 """ Internal function to calculate ledger peak value for the given phys footprint entry
58 params: phys_footprint_entry - value representing struct ledger_entry *
59 return: value - representing the ledger peak for the given phys footprint entry
61 now
= kern
.globals.sched_tick
/ 20
62 ledger_peak
= phys_footprint_entry
.le_credit
- phys_footprint_entry
.le_debit
63 if (now
- phys_footprint_entry
._le
.le_peaks
[0].le_time
<= 1) and (phys_footprint_entry
._le
.le_peaks
[0].le_max
> ledger_peak
):
64 ledger_peak
= phys_footprint_entry
._le
.le_peaks
[0].le_max
65 if (now
- phys_footprint_entry
._le
.le_peaks
[1].le_time
<= 1) and (phys_footprint_entry
._le
.le_peaks
[1].le_max
> ledger_peak
):
66 ledger_peak
= phys_footprint_entry
._le
.le_peaks
[1].le_max
69 @header("{: >8s} {: >22s} {: >22s} {: >11s} {: >11s} {: >12s} {: >10s} {: >13s} {: ^10s} {: >8s} {: <20s}\n".format(
70 'pid', 'effective priority', 'requested priority', 'state', 'user_data', 'physical', 'iokit', 'footprint',
71 'spike', 'limit', 'command'))
72 def GetMemoryStatusNode(proc_val
):
73 """ Internal function to get memorystatus information from the given proc
74 params: proc - value representing struct proc *
75 return: str - formatted output information for proc object
78 task_val
= Cast(proc_val
.task
, 'task *')
79 task_ledgerp
= task_val
.ledger
81 task_physmem_footprint_ledger_entry
= task_ledgerp
.l_entries
[kern
.globals.task_ledgers
.phys_mem
]
82 task_iokit_footprint_ledger_entry
= task_ledgerp
.l_entries
[kern
.globals.task_ledgers
.iokit_mapped
]
83 task_phys_footprint_ledger_entry
= task_ledgerp
.l_entries
[kern
.globals.task_ledgers
.phys_footprint
]
84 page_size
= kern
.globals.page_size
86 phys_mem_footprint
= (task_physmem_footprint_ledger_entry
.le_credit
- task_physmem_footprint_ledger_entry
.le_debit
) / page_size
87 iokit_footprint
= (task_iokit_footprint_ledger_entry
.le_credit
- task_iokit_footprint_ledger_entry
.le_debit
) / page_size
88 phys_footprint
= (task_phys_footprint_ledger_entry
.le_credit
- task_phys_footprint_ledger_entry
.le_debit
) / page_size
89 phys_footprint_limit
= task_phys_footprint_ledger_entry
.le_limit
/ page_size
90 ledger_peak
= CalculateLedgerPeak(task_phys_footprint_ledger_entry
)
91 phys_footprint_spike
= ledger_peak
/ page_size
93 format_string
= '{0: >8d} {1: >22d} {2: >22d} {3: #011x} {4: #011x} {5: >12d} {6: >10d} {7: >13d}'
94 out_str
+= format_string
.format(proc_val
.p_pid
, proc_val
.p_memstat_effectivepriority
,
95 proc_val
.p_memstat_requestedpriority
, proc_val
.p_memstat_state
, proc_val
.p_memstat_userdata
,
96 phys_mem_footprint
, iokit_footprint
, phys_footprint
)
97 if phys_footprint
!= phys_footprint_spike
:
98 out_str
+= "{: ^12d}".format(phys_footprint_spike
)
100 out_str
+= "{: ^12s}".format('-')
101 out_str
+= "{: 8d} {: <20s}\n".format(phys_footprint_limit
, proc_val
.p_comm
)
104 @lldb_command('showmemorystatus')
105 def ShowMemoryStatus(cmd_args
=None):
106 """ Routine to display each entry in jetsam list with a summary of pressure statistics
107 Usage: showmemorystatus
111 print GetMemoryStatusNode
.header
112 print "{: >91s} {: >10s} {: >13s} {: ^10s} {: >8s}\n".format("(pages)", "(pages)", "(pages)",
113 "(pages)", "(pages)")
114 while bucket_index
< bucket_count
:
115 current_bucket
= kern
.globals.memstat_bucket
[bucket_index
]
116 current_list
= current_bucket
.list
117 current_proc
= Cast(current_list
.tqh_first
, 'proc *')
118 while unsigned(current_proc
) != 0:
119 print GetMemoryStatusNode(current_proc
)
120 current_proc
= current_proc
.p_memstat_list
.tqe_next
125 # EndMacro: showmemorystatus
127 def GetRealMetadata(meta
):
128 """ Get real metadata for a given metadata pointer
131 if unsigned(meta
.zindex
) != 255:
134 return kern
.GetValueFromAddress(unsigned(meta
) - unsigned(meta
.real_metadata_offset
), "struct zone_page_metadata *")
138 def GetFreeList(meta
):
139 """ Get the free list pointer for a given metadata pointer
142 zone_map_min_address
= kern
.GetGlobalVariable('zone_map_min_address')
143 zone_map_max_address
= kern
.GetGlobalVariable('zone_map_max_address')
145 if unsigned(meta
.freelist_offset
) == unsigned(0xffffffff):
148 if (unsigned(meta
) >= unsigned(zone_map_min_address
)) and (unsigned(meta
) < unsigned(zone_map_max_address
)):
149 page_index
= ((unsigned(meta
) - unsigned(kern
.GetGlobalVariable('zone_metadata_region_min'))) / sizeof('struct zone_page_metadata'))
150 return (unsigned(zone_map_min_address
) + (kern
.globals.page_size
* (page_index
))) + meta
.freelist_offset
152 return (unsigned(meta
) + meta
.freelist_offset
)
156 @lldb_type_summary(['zone_page_metadata'])
157 @header("{:<18s} {:<18s} {:>8s} {:>8s} {:<18s} {:<20s}".format('ZONE_METADATA', 'FREELIST', 'PG_CNT', 'FREE_CNT', 'ZONE', 'NAME'))
158 def GetZoneMetadataSummary(meta
):
159 """ Summarize a zone metadata object
160 params: meta - obj representing zone metadata in the kernel
161 returns: str - summary of the zone metadata
167 out_str
+= 'Metadata Description:\n' + GetZoneMetadataSummary
.header
+ '\n'
168 meta
= kern
.GetValueFromAddress(meta
, "struct zone_page_metadata *")
169 if unsigned(meta
.zindex
) == 255:
170 out_str
+= "{:#018x} {:#018x} {:8d} {:8d} {:#018x} {:s}\n".format(meta
, 0, 0, 0, 0, '(fake multipage meta)')
171 meta
= GetRealMetadata(meta
)
174 zinfo
= kern
.globals.zone_array
[unsigned(meta
.zindex
)]
175 out_str
+= "{:#018x} {:#018x} {:8d} {:8d} {:#018x} {:s}".format(meta
, GetFreeList(meta
), meta
.page_count
, meta
.free_count
, addressof(zinfo
), zinfo
.zone_name
)
181 @header("{:<18s} {:>18s} {:>18s} {:<18s}".format('ADDRESS', 'TYPE', 'OFFSET_IN_PG', 'METADATA'))
183 """ Information about kernel pointer
187 pagesize
= kern
.globals.page_size
188 zone_map_min_address
= kern
.GetGlobalVariable('zone_map_min_address')
189 zone_map_max_address
= kern
.GetGlobalVariable('zone_map_max_address')
190 if (unsigned(addr
) >= unsigned(zone_map_min_address
)) and (unsigned(addr
) < unsigned(zone_map_max_address
)):
191 zone_metadata_region_min
= kern
.GetGlobalVariable('zone_metadata_region_min')
192 zone_metadata_region_max
= kern
.GetGlobalVariable('zone_metadata_region_max')
193 if (unsigned(addr
) >= unsigned(zone_metadata_region_min
)) and (unsigned(addr
) < unsigned(zone_metadata_region_max
)):
194 metadata_offset
= (unsigned(addr
) - unsigned(zone_metadata_region_min
)) % sizeof('struct zone_page_metadata')
195 page_offset_str
= "{:d}/{:d}".format((unsigned(addr
) - (unsigned(addr
) & ~
(pagesize
- 1))), pagesize
)
196 out_str
+= WhatIs
.header
+ '\n'
197 out_str
+= "{:#018x} {:>18s} {:>18s} {:#018x}\n\n".format(unsigned(addr
), "Metadata", page_offset_str
, unsigned(addr
) - metadata_offset
)
198 out_str
+= GetZoneMetadataSummary((unsigned(addr
) - metadata_offset
)) + '\n\n'
200 page_index
= ((unsigned(addr
) & ~
(pagesize
- 1)) - unsigned(zone_map_min_address
)) / pagesize
201 meta
= unsigned(zone_metadata_region_min
) + (page_index
* sizeof('struct zone_page_metadata'))
202 meta
= kern
.GetValueFromAddress(meta
, "struct zone_page_metadata *")
203 page_meta
= GetRealMetadata(meta
)
205 zinfo
= kern
.globals.zone_array
[unsigned(page_meta
.zindex
)]
206 page_offset_str
= "{:d}/{:d}".format((unsigned(addr
) - (unsigned(addr
) & ~
(pagesize
- 1))), pagesize
)
207 out_str
+= WhatIs
.header
+ '\n'
208 out_str
+= "{:#018x} {:>18s} {:>18s} {:#018x}\n\n".format(unsigned(addr
), "Element", page_offset_str
, page_meta
)
209 out_str
+= GetZoneMetadataSummary(unsigned(page_meta
)) + '\n\n'
211 out_str
+= "Unmapped address within the zone_map ({:#018x}-{:#018x})".format(zone_map_min_address
, zone_map_max_address
)
213 out_str
+= "Address {:#018x} is outside the zone_map ({:#018x}-{:#018x})\n".format(addr
, zone_map_min_address
, zone_map_max_address
)
217 @lldb_command('whatis')
218 def WhatIsHelper(cmd_args
=None):
219 """ Routine to show information about a kernel pointer
220 Usage: whatis <address>
223 raise ArgumentError("No arguments passed")
224 addr
= kern
.GetValueFromAddress(cmd_args
[0], 'void *')
228 data_array
= kern
.GetValueFromAddress(unsigned(addr
) - 16, "uint8_t *")
229 print_hex_data(data_array
[0:48], unsigned(addr
) - 16, "")
236 @lldb_type_summary(['zone','zone_t'])
237 @header("{:^18s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}({:>6s} {:>6s} {:>6s}) {:^15s} {:<20s}".format(
238 'ZONE', 'TOT_SZ', 'PAGE_COUNT', 'ALLOC_ELTS', 'FREE_ELTS', 'FREE_SZ', 'ALL_FREE_PGS', 'ELT_SZ', 'ALLOC', 'ELTS', 'PGS', 'WASTE', 'FLAGS', 'NAME'))
239 def GetZoneSummary(zone
):
240 """ Summarize a zone with important information. See help zprint for description of each field
242 zone: value - obj representing a zone in kernel
244 str - summary of the zone
247 format_string
= '{:#018x} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:6d} {:6d} {:6d} {markings} {name:s} '
248 pagesize
= kern
.globals.page_size
250 free_elements
= zone
.countfree
251 free_size
= free_elements
* zone
.elem_size
253 alloc_pages
= zone
.alloc_size
/ pagesize
254 alloc_count
= zone
.alloc_size
/ zone
.elem_size
255 alloc_waste
= zone
.alloc_size
% zone
.elem_size
258 ["collectable", "C"],
261 ["caller_acct", "@"],
262 ["exhaustible", "H"],
263 ["allows_foreign", "F"],
264 ["async_prio_refill", "R"],
267 ["doing_alloc_without_vm_priv", "A"],
268 ["doing_alloc_with_vm_priv", "S"],
271 if kern
.arch
== 'x86_64':
272 marks
.append(["gzalloc_exempt", "M"])
273 marks
.append(["alignment_required", "N"])
277 if zone
.__getattr
__(mark
[0]) :
281 out_string
+= format_string
.format(zone
, zone
.cur_size
, zone
.page_count
,
282 zone
.count
, free_elements
, free_size
, zone
.count_all_free_pages
,
283 zone
.elem_size
, zone
.alloc_size
, alloc_count
,
284 alloc_pages
, alloc_waste
, name
= zone
.zone_name
, markings
=markings
)
286 if zone
.exhaustible
:
287 out_string
+= "(max: {:d})".format(zone
.max_size
)
291 @lldb_command('zprint')
292 def Zprint(cmd_args
=None):
293 """ Routine to print a summary listing of all the kernel zones
294 All columns are printed in decimal
298 $ - not encrypted during hibernation
299 @ - allocs and frees are accounted to caller process for KPRVT
301 F - allows foreign memory (memory not allocated from zone_map)
302 M - gzalloc will avoid monitoring this zone
303 R - will be refilled when below low water mark
304 O - does not allow refill callout to fill zone on noblock allocation
305 N - zone requires alignment (avoids padding this zone for debugging)
306 A - currently trying to allocate more backing memory from kernel_memory_allocate without VM priv
307 S - currently trying to allocate more backing memory from kernel_memory_allocate with VM priv
308 W - another thread is waiting for more memory
309 L - zone is being monitored by zleaks
310 G - currently running GC
313 print GetZoneSummary
.header
314 for zval
in kern
.zones
:
315 print GetZoneSummary(zval
)
317 @xnudebug_test('test_zprint')
318 def TestZprint(kernel_target
, config
, lldb_obj
, isConnected
):
319 """ Test the functionality of zprint command
325 print "Target is not connected. Cannot test memstats"
327 res
= lldb
.SBCommandReturnObject()
328 lldb_obj
.debugger
.GetCommandInterpreter().HandleCommand("zprint", res
)
329 result
= res
.GetOutput()
330 if len(result
.split("\n")) > 2:
338 # Macro: showzfreelist
340 def ShowZfreeListHeader(zone
):
341 """ Helper routine to print a header for zone freelist.
342 (Since the freelist does not have a custom type, this is not defined as a Type Summary).
344 zone:zone_t - Zone object to print header info
349 scaled_factor
= (unsigned(kern
.globals.zp_factor
) +
350 (unsigned(zone
.elem_size
) >> unsigned(kern
.globals.zp_scale
)))
353 out_str
+= "{0: <9s} {1: <12s} {2: <18s} {3: <18s} {4: <6s}\n".format('ELEM_SIZE', 'COUNT', 'NCOOKIE', 'PCOOKIE', 'FACTOR')
354 out_str
+= "{0: <9d} {1: <12d} 0x{2:0>16x} 0x{3:0>16x} {4: <2d}/{5: <2d}\n\n".format(
355 zone
.elem_size
, zone
.count
, kern
.globals.zp_nopoison_cookie
, kern
.globals.zp_poisoned_cookie
, zone
.zp_count
, scaled_factor
)
356 out_str
+= "{0: <7s} {1: <18s} {2: <18s} {3: <18s} {4: <18s} {5: <18s} {6: <14s}\n".format(
357 'NUM', 'ELEM', 'NEXT', 'BACKUP', '^ NCOOKIE', '^ PCOOKIE', 'POISON (PREV)')
360 def ShowZfreeListChain(zone
, zfirst
, zlimit
):
361 """ Helper routine to print a zone free list chain
363 zone: zone_t - Zone object
364 zfirst: void * - A pointer to the first element of the free list chain
365 zlimit: int - Limit for the number of elements to be printed by showzfreelist
369 current
= Cast(zfirst
, 'void *')
370 while ShowZfreeList
.elts_found
< zlimit
:
371 ShowZfreeList
.elts_found
+= 1
372 znext
= dereference(Cast(current
, 'vm_offset_t *'))
373 znext
= (unsigned(znext
) ^
unsigned(kern
.globals.zp_nopoison_cookie
))
374 znext
= kern
.GetValueFromAddress(znext
, 'vm_offset_t *')
375 backup_ptr
= kern
.GetValueFromAddress((unsigned(Cast(current
, 'vm_offset_t')) + unsigned(zone
.elem_size
) - sizeof('vm_offset_t')), 'vm_offset_t *')
376 backup_val
= dereference(backup_ptr
)
377 n_unobfuscated
= (unsigned(backup_val
) ^
unsigned(kern
.globals.zp_nopoison_cookie
))
378 p_unobfuscated
= (unsigned(backup_val
) ^
unsigned(kern
.globals.zp_poisoned_cookie
))
380 if p_unobfuscated
== unsigned(znext
):
381 poison_str
= "P ({0: <d})".format(ShowZfreeList
.elts_found
- ShowZfreeList
.last_poisoned
)
382 ShowZfreeList
.last_poisoned
= ShowZfreeList
.elts_found
384 if n_unobfuscated
!= unsigned(znext
):
385 poison_str
= "INVALID"
386 print "{0: <7d} 0x{1:0>16x} 0x{2:0>16x} 0x{3:0>16x} 0x{4:0>16x} 0x{5:0>16x} {6: <14s}\n".format(
387 ShowZfreeList
.elts_found
, unsigned(current
), unsigned(znext
), unsigned(backup_val
), n_unobfuscated
, p_unobfuscated
, poison_str
)
388 if unsigned(znext
) == 0:
390 current
= Cast(znext
, 'void *')
392 @static_var('elts_found',0)
393 @static_var('last_poisoned',0)
394 @lldb_command('showzfreelist')
395 def ShowZfreeList(cmd_args
=None):
396 """ Walk the freelist for a zone, printing out the primary and backup next pointers, the poisoning cookies, and the poisoning status of each element.
397 Usage: showzfreelist <zone> [iterations]
399 Will walk up to 50 elements by default, pass a limit in 'iterations' to override.
402 print ShowZfreeList
.__doc
__
404 ShowZfreeList
.elts_found
= 0
405 ShowZfreeList
.last_poisoned
= 0
407 zone
= kern
.GetValueFromAddress(cmd_args
[0], 'struct zone *')
409 if len(cmd_args
) >= 2:
410 zlimit
= ArgumentStringToInt(cmd_args
[1])
411 ShowZfreeListHeader(zone
)
413 if unsigned(zone
.allows_foreign
) == 1:
414 for free_page_meta
in IterateQueue(zone
.pages
.any_free_foreign
, 'struct zone_page_metadata *', 'pages'):
415 if ShowZfreeList
.elts_found
== zlimit
:
417 zfirst
= kern
.GetValueFromAddress(GetFreeList(free_page_meta
), 'void *')
418 if unsigned(zfirst
) != 0:
419 ShowZfreeListChain(zone
, zfirst
, zlimit
)
420 for free_page_meta
in IterateQueue(zone
.pages
.intermediate
, 'struct zone_page_metadata *', 'pages'):
421 if ShowZfreeList
.elts_found
== zlimit
:
423 zfirst
= kern
.GetValueFromAddress(GetFreeList(free_page_meta
), 'void *')
424 if unsigned(zfirst
) != 0:
425 ShowZfreeListChain(zone
, zfirst
, zlimit
)
426 for free_page_meta
in IterateQueue(zone
.pages
.all_free
, 'struct zone_page_metadata *', 'pages'):
427 if ShowZfreeList
.elts_found
== zlimit
:
429 zfirst
= kern
.GetValueFromAddress(GetFreeList(free_page_meta
), 'void *')
430 if unsigned(zfirst
) != 0:
431 ShowZfreeListChain(zone
, zfirst
, zlimit
)
433 if ShowZfreeList
.elts_found
== zlimit
:
434 print "Stopped at {0: <d} elements!".format(zlimit
)
436 print "Found {0: <d} elements!".format(ShowZfreeList
.elts_found
)
438 # EndMacro: showzfreelist
440 # Macro: zstack_showzonesbeinglogged
442 @lldb_command('zstack_showzonesbeinglogged')
443 def ZstackShowZonesBeingLogged(cmd_args
=None):
447 for zval
in kern
.zones
:
449 print "Zone: %s with its BTLog at: 0x%lx" % (zval
.zone_name
, zval
.zlog_btlog
)
451 # EndMacro: zstack_showzonesbeinglogged
455 @lldb_command('zstack')
456 def Zstack(cmd_args
=None):
457 """ Zone leak debugging: Print the stack trace logged at <index> in the stacks list. If a <count> is supplied, it prints <count> stacks starting at <index>.
458 Usage: zstack <btlog addr> <index> [<count>]
460 The suggested usage is to look at stacks with high percentage of refs (maybe > 25%).
461 The stack trace that occurs the most is probably the cause of the leak. Use zstack_findleak for that.
466 if int(kern
.globals.log_records
) == 0:
467 print "Zone logging not enabled. Add 'zlog=<zone name>' to boot-args."
470 btlog_ptr
= kern
.GetValueFromAddress(cmd_args
[0], 'btlog_t *')
471 btrecords_total_size
= unsigned(btlog_ptr
.btlog_buffersize
)
472 btrecord_size
= unsigned(btlog_ptr
.btrecord_size
)
473 btrecords
= unsigned(btlog_ptr
.btrecords
)
474 btlog_size
= unsigned(sizeof('struct btlog'))
475 depth
= unsigned(btlog_ptr
.btrecord_btdepth
)
476 zstack_index
= ArgumentStringToInt(cmd_args
[1])
478 if len(cmd_args
) >= 3:
479 count
= ArgumentStringToInt(cmd_args
[2])
481 max_count
= ((btrecords_total_size
- btlog_size
)/btrecord_size
)
483 if (zstack_index
+ count
) > max_count
:
484 count
= max_count
- zstack_index
486 while count
and (zstack_index
!= 0xffffff):
487 zstack_record_offset
= zstack_index
* btrecord_size
488 zstack_record
= kern
.GetValueFromAddress(btrecords
+ zstack_record_offset
, 'btlog_record_t *')
489 if int(zstack_record
.ref_count
)!=0:
490 ShowZStackRecord(zstack_record
, zstack_index
, depth
, unsigned(btlog_ptr
.active_element_count
))
496 # Macro: zstack_inorder
498 @lldb_command('zstack_inorder')
499 def ZstackInOrder(cmd_args
=None):
500 """ Zone leak debugging: Print the stack traces starting from head to the tail.
501 Usage: zstack_inorder <btlog addr>
504 print "Zone leak debugging: Print the stack traces starting from head to the tail. \nUsage: zstack_inorder <btlog addr>"
506 if int(kern
.globals.log_records
) == 0:
507 print "Zone logging not enabled. Add 'zlog=<zone name>' to boot-args."
510 btlog_ptr
= kern
.GetValueFromAddress(cmd_args
[0], 'btlog_t *')
511 btrecords_total_size
= unsigned(btlog_ptr
.btlog_buffersize
)
512 btrecord_size
= unsigned(btlog_ptr
.btrecord_size
)
513 btrecords
= unsigned(btlog_ptr
.btrecords
)
514 btlog_size
= unsigned(sizeof('struct btlog'))
515 depth
= unsigned(btlog_ptr
.btrecord_btdepth
)
516 zstack_head
= unsigned(btlog_ptr
.head
)
517 zstack_index
= zstack_head
518 zstack_tail
= unsigned(btlog_ptr
.tail
)
519 count
= ((btrecords_total_size
- btlog_size
)/btrecord_size
)
521 while count
and (zstack_index
!= 0xffffff):
522 zstack_record_offset
= zstack_index
* btrecord_size
523 zstack_record
= kern
.GetValueFromAddress(btrecords
+ zstack_record_offset
, 'btlog_record_t *')
524 ShowZStackRecord(zstack_record
, zstack_index
, depth
, unsigned(btlog_ptr
.active_element_count
))
525 zstack_index
= zstack_record
.next
528 # EndMacro : zstack_inorder
532 @lldb_command('findoldest')
533 def FindOldest(cmd_args
=None):
536 print "***** DEPRECATED ***** use 'zstack_findleak' macro instead."
538 # EndMacro : findoldest
540 # Macro : zstack_findleak
542 @lldb_command('zstack_findleak')
543 def zstack_findleak(cmd_args
=None):
544 """ Zone leak debugging: search the log and print the stack with the most active references
546 Usage: zstack_findleak <btlog address>
548 This is useful for verifying a suspected stack as being the source of
551 btlog_ptr
= kern
.GetValueFromAddress(cmd_args
[0], 'btlog_t *')
552 btrecord_size
= unsigned(btlog_ptr
.btrecord_size
)
553 btrecords
= unsigned(btlog_ptr
.btrecords
)
555 cpcs_index
= unsigned(btlog_ptr
.head
)
556 depth
= unsigned(btlog_ptr
.btrecord_btdepth
)
561 while cpcs_index
!= 0xffffff:
562 cpcs_record_offset
= cpcs_index
* btrecord_size
563 cpcs_record
= kern
.GetValueFromAddress(btrecords
+ cpcs_record_offset
, 'btlog_record_t *')
564 if cpcs_record
.ref_count
> highref
:
565 highref_record
= cpcs_record
566 highref
= cpcs_record
.ref_count
567 highref_index
= cpcs_index
568 cpcs_index
= cpcs_record
.next
569 ShowZStackRecord(highref_record
, highref_index
, depth
, unsigned(btlog_ptr
.active_element_count
))
571 # EndMacro: zstack_findleak
575 @lldb_command('findelem')
576 def FindElem(cmd_args
=None):
579 print "***** DEPRECATED ***** use 'zstack_findelem' macro instead."
583 @lldb_command('zstack_findelem')
584 def ZStackFindElem(cmd_args
=None):
585 """ Zone corruption debugging: search the zone log and print out the stack traces for all log entries that
586 refer to the given zone element.
587 Usage: zstack_findelem <btlog addr> <elem addr>
589 When the kernel panics due to a corrupted zone element, get the
590 element address and use this command. This will show you the stack traces of all logged zalloc and
591 zfree operations which tells you who touched the element in the recent past. This also makes
592 double-frees readily apparent.
595 print ZStackFindElem
.__doc
__
597 if int(kern
.globals.log_records
) == 0 or unsigned(kern
.globals.corruption_debug_flag
) == 0:
598 print "Zone logging with corruption detection not enabled. Add '-zc zlog=<zone name>' to boot-args."
601 btlog_ptr
= kern
.GetValueFromAddress(cmd_args
[0], 'btlog_t *')
602 target_element
= unsigned(kern
.GetValueFromAddress(cmd_args
[1], 'void *'))
604 btrecord_size
= unsigned(btlog_ptr
.btrecord_size
)
605 btrecords
= unsigned(btlog_ptr
.btrecords
)
606 depth
= unsigned(btlog_ptr
.btrecord_btdepth
)
610 hashelem
= cast(btlog_ptr
.elem_linkage_un
.element_hash_queue
.tqh_first
, 'btlog_element_t *')
611 if (target_element
>> 32) != 0:
612 target_element
= target_element ^
0xFFFFFFFFFFFFFFFF
614 target_element
= target_element ^
0xFFFFFFFF
616 if unsigned(hashelem
.elem
) == target_element
:
617 recindex
= hashelem
.recindex
618 recoffset
= recindex
* btrecord_size
619 record
= kern
.GetValueFromAddress(btrecords
+ recoffset
, 'btlog_record_t *')
621 if record
.operation
== 1:
622 out_str
+= "OP: ALLOC. "
624 out_str
+= "OP: FREE. "
625 out_str
+= "Stack Index {0: <d} {1: <s}\n".format(recindex
, ('-' * 8))
627 print GetBtlogBacktrace(depth
, record
)
629 if int(record
.operation
) == prev_op
:
630 print "{0: <s} DOUBLE OP! {1: <s}".format(('*' * 8), ('*' * 8))
632 prev_op
= int(record
.operation
)
634 hashelem
= cast(hashelem
.element_hash_link
.tqe_next
, 'btlog_element_t *')
636 if scan_items
% 100 == 0:
637 print "Scanning is ongoing. {0: <d} items scanned since last check." .format(scan_items
)
639 # EndMacro: zstack_findelem
643 @lldb_command('btlog_find', "AS")
644 def BtlogFind(cmd_args
=None, cmd_options
={}):
647 print "***** DEPRECATED ***** use 'zstack_findelem' macro instead."
650 #EndMacro: btlog_find
654 @lldb_command('showzalloc')
655 def ShowZalloc(cmd_args
=None):
656 """ Prints a zallocation from the zallocations array based off its index and prints the associated symbolicated backtrace.
657 Usage: showzalloc <index>
660 print ShowZalloc
.__doc
__
662 if unsigned(kern
.globals.zallocations
) == 0:
663 print "zallocations array not initialized!"
665 zallocation
= kern
.globals.zallocations
[ArgumentStringToInt(cmd_args
[0])]
667 ShowZTrace([str(int(zallocation
.za_trace_index
))])
669 #EndMacro: showzalloc
673 @lldb_command('showztrace')
674 def ShowZTrace(cmd_args
=None):
675 """ Prints the backtrace from the ztraces array at index
676 Usage: showztrace <trace index>
679 print ShowZTrace
.__doc
__
681 if unsigned(kern
.globals.ztraces
) == 0:
682 print "ztraces array not initialized!"
684 ztrace_addr
= kern
.globals.ztraces
[ArgumentStringToInt(cmd_args
[0])]
686 ShowZstackTraceHelper(ztrace_addr
.zt_stack
, ztrace_addr
.zt_depth
)
688 #EndMacro: showztrace
690 #Macro: showztraceaddr
692 @lldb_command('showztraceaddr')
693 def ShowZTraceAddr(cmd_args
=None):
694 """ Prints the struct ztrace passed in.
695 Usage: showztraceaddr <trace address>
698 print ShowZTraceAddr
.__doc
__
700 ztrace_ptr
= kern
.GetValueFromAddress(cmd_args
[0], 'struct ztrace *')
701 print dereference(ztrace_ptr
)
702 ShowZstackTraceHelper(ztrace_ptr
.zt_stack
, ztrace_ptr
.zt_depth
)
704 #EndMacro: showztraceaddr
706 #Macro: showzstacktrace
708 @lldb_command('showzstacktrace')
709 def ShowZstackTrace(cmd_args
=None):
710 """ Routine to print a stacktrace stored by OSBacktrace.
711 Usage: showzstacktrace <saved stacktrace> [size]
713 size is optional, defaults to 15.
716 print ShowZstackTrace
.__doc
__
718 void_ptr_type
= gettype('void *')
719 void_double_ptr_type
= void_ptr_type
.GetPointerType()
720 trace
= kern
.GetValueFromAddress(cmd_args
[0], void_double_ptr_type
)
722 if len(cmd_args
) >= 2:
723 trace_size
= ArgumentStringToInt(cmd_args
[1])
724 ShowZstackTraceHelper(trace
, trace_size
)
726 #EndMacro: showzstacktrace
728 def ShowZstackTraceHelper(stack
, depth
):
729 """ Helper routine for printing a zstack.
731 stack: void *[] - An array of pointers representing the Zstack
732 depth: int - The depth of the ztrace stack
737 while trace_current
< depth
:
738 trace_addr
= stack
[trace_current
]
739 symbol_arr
= kern
.SymbolicateFromAddress(unsigned(trace_addr
))
741 symbol_str
= str(symbol_arr
[0].addr
)
744 print '{0: <#x} {1: <s}'.format(trace_addr
, symbol_str
)
747 #Macro: showtopztrace
749 @lldb_command('showtopztrace')
750 def ShowTopZtrace(cmd_args
=None):
751 """ Shows the ztrace with the biggest size.
752 (According to top_ztrace, not by iterating through the hash table)
754 top_trace
= kern
.globals.top_ztrace
755 print 'Index: {0: <d}'.format((unsigned(top_trace
) - unsigned(kern
.globals.ztraces
)) / sizeof('struct ztrace'))
756 print dereference(top_trace
)
757 ShowZstackTraceHelper(top_trace
.zt_stack
, top_trace
.zt_depth
)
759 #EndMacro: showtopztrace
763 @lldb_command('showzallocs')
764 def ShowZallocs(cmd_args
=None):
765 """ Prints all allocations in the zallocations table
767 if unsigned(kern
.globals.zallocations
) == 0:
768 print "zallocations array not initialized!"
770 print '{0: <5s} {1: <18s} {2: <5s} {3: <15s}'.format('INDEX','ADDRESS','TRACE','SIZE')
772 max_zallocation
= unsigned(kern
.globals.zleak_alloc_buckets
)
774 while current_index
< max_zallocation
:
775 current_zalloc
= kern
.globals.zallocations
[current_index
]
776 if int(current_zalloc
.za_element
) != 0:
777 print '{0: <5d} {1: <#018x} {2: <5d} {3: <15d}'.format(current_index
, current_zalloc
.za_element
, current_zalloc
.za_trace_index
, unsigned(current_zalloc
.za_size
))
778 allocation_count
+= 1
780 print 'Total Allocations: {0: <d}'.format(allocation_count
)
782 #EndMacro: showzallocs
784 #Macro: showzallocsfortrace
786 @lldb_command('showzallocsfortrace')
787 def ShowZallocsForTrace(cmd_args
=None):
788 """ Prints all allocations pointing to the passed in trace's index into ztraces by looking through zallocations table
789 Usage: showzallocsfortrace <trace index>
792 print ShowZallocsForTrace
.__doc
__
794 print '{0: <5s} {1: <18s} {2: <15s}'.format('INDEX','ADDRESS','SIZE')
795 target_index
= ArgumentStringToInt(cmd_args
[0])
797 max_zallocation
= unsigned(kern
.globals.zleak_alloc_buckets
)
799 while current_index
< max_zallocation
:
800 current_zalloc
= kern
.globals.zallocations
[current_index
]
801 if unsigned(current_zalloc
.za_element
) != 0 and (unsigned(current_zalloc
.za_trace_index
) == unsigned(target_index
)):
802 print '{0: <5d} {1: <#018x} {2: <6d}'.format(current_index
, current_zalloc
.za_element
, current_zalloc
.za_size
)
803 allocation_count
+= 1
805 print 'Total Allocations: {0: <d}'.format(allocation_count
)
807 #EndMacro: showzallocsfortrace
811 @lldb_command('showztraces')
812 def ShowZTraces(cmd_args
=None):
813 """ Prints all traces with size > 0
815 ShowZTracesAbove([0])
817 #EndMacro: showztraces
819 #Macro: showztracesabove
821 @lldb_command('showztracesabove')
822 def ShowZTracesAbove(cmd_args
=None):
823 """ Prints all traces with size greater than X
824 Usage: showztracesabove <size>
827 print ShowZTracesAbove
.__doc
__
829 print '{0: <5s} {1: <6s}'.format('INDEX','SIZE')
832 max_ztrace
= unsigned(kern
.globals.zleak_trace_buckets
)
833 while current_index
< max_ztrace
:
834 ztrace_current
= kern
.globals.ztraces
[current_index
]
835 if ztrace_current
.zt_size
> unsigned(cmd_args
[0]):
836 print '{0: <5d} {1: <6d}'.format(current_index
, int(ztrace_current
.zt_size
))
839 print 'Total traces: {0: <d}'.format(ztrace_count
)
841 #EndMacro: showztracesabove
843 #Macro: showztracehistogram
845 @lldb_command('showztracehistogram')
846 def ShowZtraceHistogram(cmd_args
=None):
847 """ Prints the histogram of the ztrace table
849 print '{0: <5s} {1: <9s} {2: <10s}'.format('INDEX','HIT_COUNT','COLLISIONS')
852 max_ztrace
= unsigned(kern
.globals.zleak_trace_buckets
)
853 while current_index
< max_ztrace
:
854 ztrace_current
= kern
.globals.ztraces
[current_index
]
855 if ztrace_current
.zt_hit_count
!= 0:
856 print '{0: <5d} {1: <9d} {2: <10d}'.format(current_index
, ztrace_current
.zt_hit_count
, ztrace_current
.zt_collisions
)
859 print 'Total traces: {0: <d}'.format(ztrace_count
)
861 #EndMacro: showztracehistogram
863 #Macro: showzallochistogram
865 @lldb_command('showzallochistogram')
866 def ShowZallocHistogram(cmd_args
=None):
867 """ Prints the histogram for the zalloc table
869 print '{0: <5s} {1: <9s}'.format('INDEX','HIT_COUNT')
871 zallocation_count
= 0
872 max_ztrace
= unsigned(kern
.globals.zleak_alloc_buckets
)
873 while current_index
< max_ztrace
:
874 zallocation_current
= kern
.globals.zallocations
[current_index
]
875 if zallocation_current
.za_hit_count
!= 0:
876 print '{0: <5d} {1: <9d}'.format(current_index
, zallocation_current
.za_hit_count
)
877 zallocation_count
+= 1
879 print 'Total Allocations: {0: <d}'.format(zallocation_count
)
881 #EndMacro: showzallochistogram
885 @lldb_command('showzstats')
886 def ShowZstats(cmd_args
=None):
887 """ Prints the zone leak detection stats
889 print 'z_alloc_collisions: {0: <d}, z_trace_collisions: {1: <d}'.format(unsigned(kern
.globals.z_alloc_collisions
), unsigned(kern
.globals.z_trace_collisions
))
890 print 'z_alloc_overwrites: {0: <d}, z_trace_overwrites: {1: <d}'.format(unsigned(kern
.globals.z_alloc_overwrites
), unsigned(kern
.globals.z_trace_overwrites
))
891 print 'z_alloc_recorded: {0: <d}, z_trace_recorded: {1: <d}'.format(unsigned(kern
.globals.z_alloc_recorded
), unsigned(kern
.globals.z_trace_recorded
))
893 #EndMacro: showzstats
895 def GetBtlogBacktrace(depth
, zstack_record
):
896 """ Helper routine for getting a BT Log record backtrace stack.
898 depth:int - The depth of the zstack record
899 zstack_record:btlog_record_t * - A BTLog record
901 str - string with backtrace in it.
905 if not zstack_record
:
906 return "Zstack record none!"
908 depth_val
= unsigned(depth
)
909 while frame
< depth_val
:
910 frame_pc
= zstack_record
.bt
[frame
]
911 if not frame_pc
or int(frame_pc
) == 0:
913 symbol_arr
= kern
.SymbolicateFromAddress(frame_pc
)
915 symbol_str
= str(symbol_arr
[0].addr
)
918 out_str
+= "{0: <#0x} <{1: <s}>\n".format(frame_pc
, symbol_str
)
922 def ShowZStackRecord(zstack_record
, zstack_index
, btrecord_btdepth
, elements_count
):
923 """ Helper routine for printing a single zstack record
925 zstack_record:btlog_record_t * - A BTLog record
926 zstack_index:int - Index for the record in the BTLog table
931 if zstack_record
.operation
== 1:
935 out_str
+= "Stack Index {0: <d} with active refs {1: <d} of {2: <d} {3: <s}\n".format(zstack_index
, zstack_record
.ref_count
, elements_count
, ('-' * 8))
937 print GetBtlogBacktrace(btrecord_btdepth
, zstack_record
)
942 @lldb_command('showioalloc')
943 def ShowIOAllocations(cmd_args
=None):
944 """ Show some accounting of memory allocated by IOKit allocators. See ioalloccount man page for details.
945 Routine to display a summary of memory accounting allocated by IOKit allocators.
947 print "Instance allocation = {0: <#0x} = {1: d}K".format(kern
.globals.debug_ivars_size
, (kern
.globals.debug_ivars_size
/ 1024))
948 print "Container allocation = {0: <#0x} = {1: d}K".format(kern
.globals.debug_container_malloc_size
, (kern
.globals.debug_container_malloc_size
/ 1024))
949 print "IOMalloc allocation = {0: <#0x} = {1: d}K".format(kern
.globals.debug_iomalloc_size
, (kern
.globals.debug_iomalloc_size
/ 1024))
950 print "Container allocation = {0: <#0x} = {1: d}K".format(kern
.globals.debug_iomallocpageable_size
, (kern
.globals.debug_iomallocpageable_size
/ 1024))
953 # EndMacro: showioalloc
956 # Macro: showselectmem
957 @lldb_command('showselectmem', "S:")
958 def ShowSelectMem(cmd_args
=None, cmd_options
={}):
959 """ Show memory cached by threads on calls to select.
961 usage: showselectmem [-v]
962 -v : print each thread's memory
963 (one line per thread with non-zero select memory)
964 -S {addr} : Find the thread whose thread-local select set
965 matches the given address
969 if config
['verbosity'] > vHUMAN
:
971 if "-S" in cmd_options
:
972 opt_wqs
= unsigned(kern
.GetValueFromAddress(cmd_options
["-S"], 'uint64_t *'))
974 raise ArgumentError("Invalid waitq set address: {:s}".format(cmd_options
["-S"]))
977 print "{:18s} {:10s} {:s}".format('Task', 'Thread ID', 'Select Mem (bytes)')
979 for th
in IterateQueue(t
.threads
, 'thread *', 'task_threads'):
980 uth
= Cast(th
.uthread
, 'uthread *');
982 if hasattr(uth
, 'uu_allocsize'): # old style
983 thmem
= uth
.uu_allocsize
985 elif hasattr(uth
, 'uu_wqstate_sz'): # new style
986 thmem
= uth
.uu_wqstate_sz
989 print "What kind of uthread is this?!"
991 if opt_wqs
and opt_wqs
== unsigned(wqs
):
992 print "FOUND: {:#x} in thread: {:#x} ({:#x})".format(opt_wqs
, unsigned(th
), unsigned(th
.thread_id
))
993 if verbose
and thmem
> 0:
994 print "{:<#18x} {:<#10x} {:d}".format(unsigned(t
), unsigned(th
.thread_id
), thmem
)
997 print "Total: {:d} bytes ({:d} kbytes)".format(selmem
, selmem
/1024)
998 # Endmacro: showselectmem
1001 # Macro: showtaskvme
1002 @lldb_command('showtaskvme', "PS")
1003 def ShowTaskVmeHelper(cmd_args
=None, cmd_options
={}):
1004 """ Display a summary list of the specified vm_map's entries
1005 Usage: showtaskvme <task address> (ex. showtaskvme 0x00ataskptr00 )
1006 Use -S flag to show VM object shadow chains
1007 Use -P flag to show pager info (mapped file, compressed pages, ...)
1009 show_pager_info
= False
1010 show_all_shadows
= False
1011 if "-P" in cmd_options
:
1012 show_pager_info
= True
1013 if "-S" in cmd_options
:
1014 show_all_shadows
= True
1015 task
= kern
.GetValueFromAddress(cmd_args
[0], 'task *')
1016 ShowTaskVMEntries(task
, show_pager_info
, show_all_shadows
)
1018 @lldb_command('showallvme', "PS")
1019 def ShowAllVME(cmd_args
=None, cmd_options
={}):
1020 """ Routine to print a summary listing of all the vm map entries
1021 Go Through each task in system and show the vm memory regions
1022 Use -S flag to show VM object shadow chains
1023 Use -P flag to show pager info (mapped file, compressed pages, ...)
1025 show_pager_info
= False
1026 show_all_shadows
= False
1027 if "-P" in cmd_options
:
1028 show_pager_info
= True
1029 if "-S" in cmd_options
:
1030 show_all_shadows
= True
1031 for task
in kern
.tasks
:
1032 ShowTaskVMEntries(task
, show_pager_info
, show_all_shadows
)
1034 @lldb_command('showallvm')
1035 def ShowAllVM(cmd_args
=None):
1036 """ Routine to print a summary listing of all the vm maps
1038 for task
in kern
.tasks
:
1039 print GetTaskSummary
.header
+ ' ' + GetProcSummary
.header
1040 print GetTaskSummary(task
) + ' ' + GetProcSummary(Cast(task
.bsd_info
, 'proc *'))
1041 print GetVMMapSummary
.header
1042 print GetVMMapSummary(task
.map)
1044 @lldb_command("showtaskvm")
1045 def ShowTaskVM(cmd_args
=None):
1046 """ Display info about the specified task's vm_map
1047 syntax: (lldb) showtaskvm <task_ptr>
1050 print ShowTaskVM
.__doc
__
1052 task
= kern
.GetValueFromAddress(cmd_args
[0], 'task *')
1054 print "Unknown arguments."
1056 print GetTaskSummary
.header
+ ' ' + GetProcSummary
.header
1057 print GetTaskSummary(task
) + ' ' + GetProcSummary(Cast(task
.bsd_info
, 'proc *'))
1058 print GetVMMapSummary
.header
1059 print GetVMMapSummary(task
.map)
1062 @lldb_command('showallvmstats')
1063 def ShowAllVMStats(cmd_args
=None):
1064 """ Print a summary of vm statistics in a table format
1066 page_size
= kern
.globals.page_size
1067 vmstats
= lambda:None
1068 vmstats
.wired_count
= 0
1069 vmstats
.resident_count
= 0
1070 vmstats
.resident_max
= 0
1071 vmstats
.internal
= 0
1072 vmstats
.external
= 0
1073 vmstats
.reusable
= 0
1074 vmstats
.compressed
= 0
1075 vmstats
.compressed_peak
= 0
1076 vmstats
.compressed_lifetime
= 0
1079 hdr_format
= "{0: >10s} {1: <20s} {2: >6s} {3: >10s} {4: >10s} {5: >10s} {6: >10s} {7: >10s} {8: >10s} {9: >10s} {10: >10s} {11: >10s} {12: >10s} {13: >10s} {14:}"
1080 print hdr_format
.format('pid', 'command', '#ents', 'wired', 'vsize', 'rsize', 'NEW RSIZE', 'max rsize', 'internal', 'external', 'reusable', 'compressed', 'compressed', 'compressed', '')
1081 print hdr_format
.format('', '', '', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(current)', '(peak)', '(lifetime)', '')
1082 entry_format
= "{p.p_pid: >10d} {p.p_comm: <20s} {m.hdr.nentries: >6d} {s.wired_count: >10d} {vsize: >10d} {s.resident_count: >10d} {s.new_resident_count: >10d} {s.resident_max: >10d} {s.internal: >10d} {s.external: >10d} {s.reusable: >10d} {s.compressed: >10d} {s.compressed_peak: >10d} {s.compressed_lifetime: >10d} {s.error}"
1084 for task
in kern
.tasks
:
1085 proc
= Cast(task
.bsd_info
, 'proc *')
1086 vmmap
= Cast(task
.map, '_vm_map *')
1088 vmstats
.wired_count
= vmmap
.pmap
.stats
.wired_count
;
1089 vmstats
.resident_count
= unsigned(vmmap
.pmap
.stats
.resident_count
);
1090 vmstats
.resident_max
= vmmap
.pmap
.stats
.resident_max
;
1091 vmstats
.internal
= unsigned(vmmap
.pmap
.stats
.internal
);
1092 vmstats
.external
= unsigned(vmmap
.pmap
.stats
.external
);
1093 vmstats
.reusable
= unsigned(vmmap
.pmap
.stats
.reusable
);
1094 vmstats
.compressed
= unsigned(vmmap
.pmap
.stats
.compressed
);
1095 vmstats
.compressed_peak
= unsigned(vmmap
.pmap
.stats
.compressed_peak
);
1096 vmstats
.compressed_lifetime
= unsigned(vmmap
.pmap
.stats
.compressed_lifetime
);
1097 vmstats
.new_resident_count
= vmstats
.internal
+ vmstats
.external
1099 if vmstats
.internal
< 0:
1100 vmstats
.error
+= '*'
1101 if vmstats
.external
< 0:
1102 vmstats
.error
+= '*'
1103 if vmstats
.reusable
< 0:
1104 vmstats
.error
+= '*'
1105 if vmstats
.compressed
< 0:
1106 vmstats
.error
+= '*'
1107 if vmstats
.compressed_peak
< 0:
1108 vmstats
.error
+= '*'
1109 if vmstats
.compressed_lifetime
< 0:
1110 vmstats
.error
+= '*'
1111 if vmstats
.new_resident_count
+vmstats
.reusable
!= vmstats
.resident_count
:
1112 vmstats
.error
+= '*'
1114 print entry_format
.format(p
=proc
, m
=vmmap
, vsize
=(unsigned(vmmap
.size
) / page_size
), t
=task
, s
=vmstats
)
1117 def ShowTaskVMEntries(task
, show_pager_info
, show_all_shadows
):
1118 """ Routine to print out a summary listing of all the entries in a vm_map
1120 task - core.value : a object of type 'task *'
1124 print "vm_map entries for task " + hex(task
)
1125 print GetTaskSummary
.header
1126 print GetTaskSummary(task
)
1128 print "Task {0: <#020x} has map = 0x0"
1130 print GetVMMapSummary
.header
1131 print GetVMMapSummary(task
.map)
1132 vme_list_head
= task
.map.hdr
.links
1133 vme_ptr_type
= GetType('vm_map_entry *')
1134 print GetVMEntrySummary
.header
1135 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, "links"):
1136 print GetVMEntrySummary(vme
, show_pager_info
, show_all_shadows
)
1139 @lldb_command("showmap")
1140 def ShowMap(cmd_args
=None):
1141 """ Routine to print out info about the specified vm_map
1142 usage: showmap <vm_map>
1144 if cmd_args
== None or len(cmd_args
) < 1:
1145 print "Invalid argument.", ShowMap
.__doc
__
1147 map_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vm_map_t')
1148 print GetVMMapSummary
.header
1149 print GetVMMapSummary(map_val
)
1151 @lldb_command("showmapvme")
1152 def ShowMapVME(cmd_args
=None):
1153 """Routine to print out info about the specified vm_map and its vm entries
1154 usage: showmapvme <vm_map>
1156 if cmd_args
== None or len(cmd_args
) < 1:
1157 print "Invalid argument.", ShowMap
.__doc
__
1159 map_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vm_map_t')
1160 print GetVMMapSummary
.header
1161 print GetVMMapSummary(map_val
)
1162 vme_list_head
= map_val
.hdr
.links
1163 vme_ptr_type
= GetType('vm_map_entry *')
1164 print GetVMEntrySummary
.header
1165 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, "links"):
1166 print GetVMEntrySummary(vme
)
1169 @lldb_type_summary(['_vm_map *', 'vm_map_t'])
1170 @header("{0: <20s} {1: <20s} {2: <20s} {3: >5s} {4: >5s} {5: <20s} {6: <20s}".format("vm_map", "pmap", "vm_size", "#ents", "rpage", "hint", "first_free"))
1171 def GetVMMapSummary(vmmap
):
1172 """ Display interesting bits from vm_map struct """
1174 format_string
= "{0: <#020x} {1: <#020x} {2: <#020x} {3: >5d} {4: >5d} {5: <#020x} {6: <#020x}"
1175 vm_size
= uint64_t(vmmap
.size
).value
1177 if vmmap
.pmap
!= 0: resident_pages
= int(vmmap
.pmap
.stats
.resident_count
)
1179 if int(vmmap
.holelistenabled
) == 0: first_free
= vmmap
.f_s
.first_free
1180 out_string
+= format_string
.format(vmmap
, vmmap
.pmap
, vm_size
, vmmap
.hdr
.nentries
, resident_pages
, vmmap
.hint
, first_free
)
1183 @lldb_type_summary(['vm_map_entry'])
1184 @header("{0: <20s} {1: <20s} {2: <5s} {3: >7s} {4: <20s} {5: <20s}".format("entry", "start", "prot", "#page", "object", "offset"))
1185 def GetVMEntrySummary(vme
):
1186 """ Display vm entry specific information. """
1187 page_size
= kern
.globals.page_size
1189 format_string
= "{0: <#020x} {1: <#20x} {2: <1x}{3: <1x}{4: <3s} {5: >7d} {6: <#020x} {7: <#020x}"
1190 vme_protection
= int(vme
.protection
)
1191 vme_max_protection
= int(vme
.max_protection
)
1192 vme_extra_info_str
="SC-Ds"[int(vme
.inheritance
)]
1193 if int(vme
.is_sub_map
) != 0 :
1194 vme_extra_info_str
+="s"
1195 elif int(vme
.needs_copy
) != 0 :
1196 vme_extra_info_str
+="n"
1197 num_pages
= (unsigned(vme
.links
.end
) - unsigned(vme
.links
.start
)) / page_size
1198 out_string
+= format_string
.format(vme
, vme
.links
.start
, vme_protection
, vme_max_protection
, vme_extra_info_str
, num_pages
, vme
.vme_object
.vmo_object
, vme
.vme_offset
)
1201 # EndMacro: showtaskvme
1202 @lldb_command('showmapwired')
1203 def ShowMapWired(cmd_args
=None):
1204 """ Routine to print out a summary listing of all the entries with wired pages in a vm_map
1206 if cmd_args
== None or len(cmd_args
) < 1:
1207 print "Invalid argument", ShowMapWired
.__doc
__
1209 map_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vm_map_t')
1212 @lldb_type_summary(['kmod_info_t *'])
1213 @header("{0: <20s} {1: <20s} {2: <20s} {3: >3s} {4: >5s} {5: <20s} {6: <20s} {7: >20s} {8: <30s}".format('kmod_info', 'address', 'size', 'id', 'refs', 'TEXT exec', 'size', 'version', 'name'))
1214 def GetKextSummary(kmod
):
1215 """ returns a string representation of kext information
1218 format_string
= "{0: <#020x} {1: <#020x} {2: <#020x} {3: >3d} {4: >5d} {5: <#020x} {6: <#020x} {7: >20s} {8: <30s}"
1219 segments
, sections
= GetAllSegmentsAndSectionsFromDataInMemory(unsigned(kmod
.address
), unsigned(kmod
.size
))
1220 text_segment
= macho
.get_text_segment(segments
)
1221 if not text_segment
:
1222 text_segment
= segments
[0]
1223 out_string
+= format_string
.format(kmod
, kmod
.address
, kmod
.size
, kmod
.id, kmod
.reference_count
, text_segment
.vmaddr
, text_segment
.vmsize
, kmod
.version
, kmod
.name
)
1226 @lldb_type_summary(['uuid_t'])
1228 def GetUUIDSummary(uuid
):
1229 """ returns a string representation like CA50DA4C-CA10-3246-B8DC-93542489AA26
1231 arr
= Cast(addressof(uuid
), 'uint8_t *')
1234 data
.append(int(arr
[i
]))
1235 return "{a[0]:02X}{a[1]:02X}{a[2]:02X}{a[3]:02X}-{a[4]:02X}{a[5]:02X}-{a[6]:02X}{a[7]:02X}-{a[8]:02X}{a[9]:02X}-{a[10]:02X}{a[11]:02X}{a[12]:02X}{a[13]:02X}{a[14]:02X}{a[15]:02X}".format(a
=data
)
1237 @lldb_command('showallkmods')
1238 def ShowAllKexts(cmd_args
=None):
1239 """Display a summary listing of all loaded kexts (alias: showallkmods)
1241 kmod_val
= kern
.globals.kmod
1242 kextuuidinfo
= GetKextLoadInformation(show_progress
=(config
['verbosity'] > vHUMAN
))
1243 print "{: <36s} ".format("UUID") + GetKextSummary
.header
1244 for kval
in IterateLinkedList(kmod_val
, 'next'):
1245 uuid
= "........-....-....-....-............"
1246 kaddr
= unsigned(kval
.address
)
1247 found_kext_summary
= None
1248 for l
in kextuuidinfo
:
1249 if kaddr
== int(l
[3],16):
1251 found_kext_summary
= l
1253 if found_kext_summary
:
1254 _ksummary
= GetKextSummary(found_kext_summary
[7])
1256 _ksummary
= GetKextSummary(kval
)
1257 print uuid
+ " " + _ksummary
1259 def GetKmodWithAddr(addr
):
1260 """ Go through kmod list and find one with begin_addr as addr
1261 returns: None if not found. else a cvalue of type kmod
1263 kmod_val
= kern
.globals.kmod
1264 for kval
in IterateLinkedList(kmod_val
, 'next'):
1265 if addr
== unsigned(kval
.address
):
1269 def GetAllSegmentsAndSectionsFromDataInMemory(address
, size
):
1270 """ reads memory at address and parses mach_header to get segment and section information
1271 returns: Tuple of (segments_list, sections_list) like ([MachOSegment,...], [MachOSegment, ...])
1272 where MachOSegment has fields like 'name vmaddr vmsize fileoff filesize'
1273 if TEXT segment is not found a dummy segment & section with address, size is returned.
1275 cache_hash
= "kern.kexts.segments.{}.{}".format(address
, size
)
1276 cached_result
= caching
.GetDynamicCacheData(cache_hash
,())
1278 return cached_result
1280 defval
= macho
.MachOSegment('__TEXT', address
, size
, 0, size
)
1281 if address
== 0 or size
== 0:
1282 return ([defval
], [defval
])
1284 # if int(kern.globals.gLoadedKextSummaries.version) <= 2:
1285 # until we have separate version. we will pay penalty only on arm64 devices
1286 if kern
.arch
not in ('arm64',):
1287 return ([defval
], [defval
])
1289 restrict_size_to_read
= 1536
1291 while machoObject
is None:
1292 err
= lldb
.SBError()
1293 size_to_read
= min(size
, restrict_size_to_read
)
1294 data
= LazyTarget
.GetProcess().ReadMemory(address
, size_to_read
, err
)
1295 if not err
.Success():
1296 print "Failed to read memory at {} and size {}".format(address
, size_to_read
)
1297 return ([defval
], [defval
])
1299 m
= macho
.MemMacho(data
, len(data
))
1301 except Exception as e
:
1302 if str(e
.message
).find('unpack requires a string argument') >= 0:
1303 # this may be due to short read of memory. Lets do double read size.
1304 restrict_size_to_read
*= 2
1305 debuglog("Bumping mach header read size to {}".format(restrict_size_to_read
))
1308 print "Failed to read MachO for address {} errormessage: {}".format(address
, e
.message
)
1309 return ([defval
], [defval
])
1310 # end of while loop. We have machoObject defined
1311 segments
= machoObject
.get_segments_with_name('')
1312 sections
= machoObject
.get_sections_with_name('')
1313 rval
= (segments
, sections
)
1314 caching
.SaveDynamicCacheData(cache_hash
, rval
)
1317 def GetKextLoadInformation(addr
=0, show_progress
=False):
1318 """ Extract the kext uuid and load address information from the kernel data structure.
1320 addr - int - optional integer that is the address to search for.
1322 [] - array with each entry of format
1323 ( 'UUID', 'Hex Load Address of __TEXT or __TEXT_EXEC section', 'name',
1324 'addr of macho header', [macho.MachOSegment,..], [MachoSection,...], kext, kmod_obj)
1326 cached_result
= caching
.GetDynamicCacheData("kern.kexts.loadinformation", [])
1327 # if specific addr is provided then ignore caching
1328 if cached_result
and not addr
:
1329 return cached_result
1331 # because of <rdar://problem/12683084>, we can't find summaries directly
1332 #addr = hex(addressof(kern.globals.gLoadedKextSummaries.summaries))
1333 baseaddr
= unsigned(kern
.globals.gLoadedKextSummaries
) + 0x10
1334 summaries_begin
= kern
.GetValueFromAddress(baseaddr
, 'OSKextLoadedKextSummary *')
1335 total_summaries
= int(kern
.globals.gLoadedKextSummaries
.numSummaries
)
1336 kext_version
= int(kern
.globals.gLoadedKextSummaries
.version
)
1337 entry_size
= 64 + 16 + 8 + 8 + 8 + 4 + 4
1338 if kext_version
>= 2 :
1339 entry_size
= int(kern
.globals.gLoadedKextSummaries
.entry_size
)
1341 for i
in range(total_summaries
):
1343 print "progress: {}/{}".format(i
, total_summaries
)
1344 tmpaddress
= unsigned(summaries_begin
) + (i
* entry_size
)
1345 current_kext
= kern
.GetValueFromAddress(tmpaddress
, 'OSKextLoadedKextSummary *')
1346 # code to extract macho information
1347 segments
, sections
= GetAllSegmentsAndSectionsFromDataInMemory(unsigned(current_kext
.address
), unsigned(current_kext
.size
))
1348 seginfo
= macho
.get_text_segment(segments
)
1350 seginfo
= segments
[0]
1351 kmod_obj
= GetKmodWithAddr(unsigned(current_kext
.address
))
1353 if addr
== unsigned(current_kext
.address
) or addr
== seginfo
.vmaddr
:
1354 return [(GetUUIDSummary(current_kext
.uuid
) , hex(seginfo
.vmaddr
).rstrip('L'), str(current_kext
.name
), hex(current_kext
.address
), segments
, seginfo
, current_kext
, kmod_obj
)]
1355 retval
.append((GetUUIDSummary(current_kext
.uuid
) , hex(seginfo
.vmaddr
).rstrip('L'), str(current_kext
.name
), hex(current_kext
.address
), segments
, seginfo
, current_kext
, kmod_obj
))
1358 caching
.SaveDynamicCacheData("kern.kexts.loadinformation", retval
)
1361 lldb_alias('showallkexts', 'showallkmods')
1363 def GetOSKextVersion(version_num
):
1364 """ returns a string of format 1.2.3x from the version_num
1365 params: version_num - int
1368 if version_num
== -1 :
1370 (MAJ_MULT
, MIN_MULT
, REV_MULT
,STAGE_MULT
) = (100000000, 1000000, 10000, 1000)
1371 version
= version_num
1373 vers_major
= version
/ MAJ_MULT
1374 version
= version
- (vers_major
* MAJ_MULT
)
1376 vers_minor
= version
/ MIN_MULT
1377 version
= version
- (vers_minor
* MIN_MULT
)
1379 vers_revision
= version
/ REV_MULT
1380 version
= version
- (vers_revision
* REV_MULT
)
1382 vers_stage
= version
/ STAGE_MULT
1383 version
= version
- (vers_stage
* STAGE_MULT
)
1385 vers_stage_level
= version
1387 out_str
= "%d.%d" % (vers_major
, vers_minor
)
1388 if vers_revision
> 0: out_str
+= ".%d" % vers_revision
1389 if vers_stage
== 1 : out_str
+= "d%d" % vers_stage_level
1390 if vers_stage
== 3 : out_str
+= "a%d" % vers_stage_level
1391 if vers_stage
== 5 : out_str
+= "b%d" % vers_stage_level
1392 if vers_stage
== 6 : out_str
+= "fc%d" % vers_stage_level
1396 @lldb_command('showallknownkmods')
1397 def ShowAllKnownKexts(cmd_args
=None):
1398 """ Display a summary listing of all kexts known in the system.
1399 This is particularly useful to find if some kext was unloaded before this crash'ed state.
1401 kext_count
= int(kern
.globals.sKextsByID
.count
)
1403 kext_dictionary
= kern
.globals.sKextsByID
.dictionary
1404 print "%d kexts in sKextsByID:" % kext_count
1405 print "{0: <20s} {1: <20s} {2: >5s} {3: >20s} {4: <30s}".format('OSKEXT *', 'load_addr', 'id', 'version', 'name')
1406 format_string
= "{0: <#020x} {1: <20s} {2: >5s} {3: >20s} {4: <30s}"
1408 while index
< kext_count
:
1409 kext_dict
= GetObjectAtIndexFromArray(kext_dictionary
, index
)
1410 kext_name
= str(kext_dict
.key
.string
)
1411 osk
= Cast(kext_dict
.value
, 'OSKext *')
1412 if int(osk
.flags
.loaded
) :
1413 load_addr
= "{0: <#020x}".format(osk
.kmod_info
)
1414 id = "{0: >5d}".format(osk
.loadTag
)
1416 load_addr
= "------"
1418 version_num
= unsigned(osk
.version
)
1419 version
= GetOSKextVersion(version_num
)
1420 print format_string
.format(osk
, load_addr
, id, version
, kext_name
)
1425 @lldb_command('showkmodaddr')
1426 def ShowKmodAddr(cmd_args
=[]):
1427 """ Given an address, print the offset and name for the kmod containing it
1428 Syntax: (lldb) showkmodaddr <addr>
1430 if len(cmd_args
) < 1:
1431 raise ArgumentError("Insufficient arguments")
1433 addr
= ArgumentStringToInt(cmd_args
[0])
1434 all_kexts_info
= GetKextLoadInformation()
1436 found_segment
= None
1437 for kinfo
in all_kexts_info
:
1438 s
= macho
.get_segment_with_addr(kinfo
[4], addr
)
1444 print GetKextSummary
.header
1445 print GetKextSummary(found_kinfo
[7]) + " segment: {} offset = {:#0x}".format(found_segment
.name
, (addr
- found_segment
.vmaddr
))
1450 @lldb_command('addkext','AF:N:')
1451 def AddKextSyms(cmd_args
=[], cmd_options
={}):
1452 """ Add kext symbols into lldb.
1453 This command finds symbols for a uuid and load the required executable
1455 addkext <uuid> : Load one kext based on uuid. eg. (lldb)addkext 4DD2344C0-4A81-3EAB-BDCF-FEAFED9EB73E
1456 addkext -F <abs/path/to/executable> <load_address> : Load kext executable at specified load address
1457 addkext -N <name> : Load one kext that matches the name provided. eg. (lldb) addkext -N corecrypto
1458 addkext -N <name> -A: Load all kext that matches the name provided. eg. to load all kext with Apple in name do (lldb) addkext -N Apple -A
1459 addkext all : Will load all the kext symbols - SLOW
1463 if "-F" in cmd_options
:
1464 exec_path
= cmd_options
["-F"]
1465 exec_full_path
= ResolveFSPath(exec_path
)
1466 if not os
.path
.exists(exec_full_path
):
1467 raise ArgumentError("Unable to resolve {:s}".format(exec_path
))
1469 if not os
.path
.isfile(exec_full_path
):
1470 raise ArgumentError("Path is {:s} not a filepath. \nPlease check that path points to executable.\
1471 \nFor ex. path/to/Symbols/IOUSBFamily.kext/Contents/PlugIns/AppleUSBHub.kext/Contents/MacOS/AppleUSBHub.\
1472 \nNote: LLDB does not support adding kext based on directory paths like gdb used to.".format(exec_path
))
1476 slide_value
= cmd_args
[0]
1477 debuglog("loading slide value from user input %s" % cmd_args
[0])
1479 filespec
= lldb
.SBFileSpec(exec_full_path
, False)
1480 print "target modules add %s" % exec_full_path
1481 print lldb_run_command("target modules add %s" % exec_full_path
)
1482 loaded_module
= LazyTarget
.GetTarget().FindModule(filespec
)
1483 if loaded_module
.IsValid():
1484 uuid_str
= loaded_module
.GetUUIDString()
1485 debuglog("added module %s with uuid %s" % (exec_full_path
, uuid_str
))
1486 if slide_value
is None:
1487 all_kexts_info
= GetKextLoadInformation()
1488 for k
in all_kexts_info
:
1490 if k
[0].lower() == uuid_str
.lower():
1492 debuglog("found the slide %s for uuid %s" % (k
[1], k
[0]))
1493 if slide_value
is None:
1494 raise ArgumentError("Unable to find load address for module described at %s " % exec_full_path
)
1495 load_cmd
= "target modules load --file %s --slide %s" % (exec_full_path
, str(slide_value
))
1497 print lldb_run_command(load_cmd
)
1498 kern
.symbolicator
= None
1501 all_kexts_info
= GetKextLoadInformation()
1503 if "-N" in cmd_options
:
1504 kext_name
= cmd_options
["-N"]
1505 kext_name_matches
= GetLongestMatchOption(kext_name
, [str(x
[2]) for x
in all_kexts_info
], True)
1506 if len(kext_name_matches
) != 1 and "-A" not in cmd_options
:
1507 print "Ambiguous match for name: {:s}".format(kext_name
)
1508 if len(kext_name_matches
) > 0:
1509 print "Options are:\n\t" + "\n\t".join(kext_name_matches
)
1511 debuglog("matched the kext to name %s and uuid %s" % (kext_name_matches
[0], kext_name
))
1512 for cur_knm
in kext_name_matches
:
1513 for x
in all_kexts_info
:
1515 cur_uuid
= x
[0].lower()
1516 print "Fetching dSYM for {:s}".format(cur_uuid
)
1517 info
= dsymForUUID(cur_uuid
)
1518 if info
and 'DBGSymbolRichExecutable' in info
:
1519 print "Adding dSYM ({0:s}) for {1:s}".format(cur_uuid
, info
['DBGSymbolRichExecutable'])
1520 addDSYM(cur_uuid
, info
)
1521 loadDSYM(cur_uuid
, int(x
[1],16), x
[4])
1523 print "Failed to get symbol info for {:s}".format(cur_uuid
)
1525 kern
.symbolicator
= None
1528 if len(cmd_args
) < 1:
1529 raise ArgumentError("No arguments specified.")
1531 uuid
= cmd_args
[0].lower()
1533 load_all_kexts
= False
1535 load_all_kexts
= True
1537 if not load_all_kexts
and len(uuid_regex
.findall(uuid
)) == 0:
1538 raise ArgumentError("Unknown argument {:s}".format(uuid
))
1540 for k_info
in all_kexts_info
:
1541 cur_uuid
= k_info
[0].lower()
1542 if load_all_kexts
or (uuid
== cur_uuid
):
1543 print "Fetching dSYM for %s" % cur_uuid
1544 info
= dsymForUUID(cur_uuid
)
1545 if info
and 'DBGSymbolRichExecutable' in info
:
1546 print "Adding dSYM (%s) for %s" % (cur_uuid
, info
['DBGSymbolRichExecutable'])
1547 addDSYM(cur_uuid
, info
)
1548 loadDSYM(cur_uuid
, int(k_info
[1],16), k_info
[4])
1550 print "Failed to get symbol info for %s" % cur_uuid
1552 kern
.symbolicator
= None
1557 lldb_alias('showkmod', 'showkmodaddr')
1558 lldb_alias('showkext', 'showkmodaddr')
1559 lldb_alias('showkextaddr', 'showkmodaddr')
1561 @lldb_type_summary(['mount *'])
1562 @header("{0: <20s} {1: <20s} {2: <20s} {3: <12s} {4: <12s} {5: <12s} {6: >6s} {7: <30s} {8: <35s} {9: <30s}".format('volume(mp)', 'mnt_data', 'mnt_devvp', 'flag', 'kern_flag', 'lflag', 'type', 'mnton', 'mntfrom', 'iosched supported'))
1563 def GetMountSummary(mount
):
1564 """ Display a summary of mount on the system
1566 out_string
= ("{mnt: <#020x} {mnt.mnt_data: <#020x} {mnt.mnt_devvp: <#020x} {mnt.mnt_flag: <#012x} " +
1567 "{mnt.mnt_kern_flag: <#012x} {mnt.mnt_lflag: <#012x} {vfs.f_fstypename: >6s} " +
1568 "{vfs.f_mntonname: <30s} {vfs.f_mntfromname: <35s} {iomode: <30s}").format(mnt
=mount
, vfs
=mount
.mnt_vfsstat
, iomode
=('Yes' if (mount
.mnt_ioflags
& 0x4) else 'No'))
1571 @lldb_command('showallmounts')
1572 def ShowAllMounts(cmd_args
=None):
1573 """ Print all mount points
1575 mntlist
= kern
.globals.mountlist
1576 print GetMountSummary
.header
1577 for mnt
in IterateTAILQ_HEAD(mntlist
, 'mnt_list'):
1578 print GetMountSummary(mnt
)
1581 lldb_alias('ShowAllVols', 'showallmounts')
1583 @lldb_command('systemlog')
1584 def ShowSystemLog(cmd_args
=None):
1585 """ Display the kernel's printf ring buffer """
1586 msgbufp
= kern
.globals.msgbufp
1587 msg_size
= int(msgbufp
.msg_size
)
1588 msg_bufx
= int(msgbufp
.msg_bufx
)
1589 msg_bufr
= int(msgbufp
.msg_bufr
)
1590 msg_bufc
= msgbufp
.msg_bufc
1591 msg_bufc_data
= msg_bufc
.GetSBValue().GetPointeeData(0, msg_size
)
1593 # the buffer is circular; start at the write pointer to end,
1594 # then from beginning to write pointer
1596 err
= lldb
.SBError()
1597 for i
in range(msg_bufx
, msg_size
) + range(0, msg_bufx
) :
1599 cbyte
= msg_bufc_data
.GetUnsignedInt8(err
, i
)
1600 if not err
.Success() :
1601 raise ValueError("Failed to read character at offset " + str(i
) + ": " + err
.GetCString())
1616 @static_var('output','')
1617 def _GetVnodePathName(vnode
, vnodename
):
1618 """ Internal function to get vnode path string from vnode structure.
1622 returns Nothing. The output will be stored in the static variable.
1626 if int(vnode
.v_flag
) & 0x1 and int(hex(vnode
.v_mount
), 16) !=0:
1627 if int(vnode
.v_mount
.mnt_vnodecovered
):
1628 _GetVnodePathName(vnode
.v_mount
.mnt_vnodecovered
, str(vnode
.v_mount
.mnt_vnodecovered
.v_name
) )
1630 _GetVnodePathName(vnode
.v_parent
, str(vnode
.v_parent
.v_name
))
1631 _GetVnodePathName
.output
+= "/%s" % vnodename
1633 def GetVnodePath(vnode
):
1634 """ Get string representation of the vnode
1635 params: vnodeval - value representing vnode * in the kernel
1636 return: str - of format /path/to/something
1640 if (int(vnode
.v_flag
) & 0x000001) and int(hex(vnode
.v_mount
), 16) != 0 and (int(vnode
.v_mount
.mnt_flag
) & 0x00004000) :
1643 _GetVnodePathName
.output
= ''
1644 if abs(vnode
.v_name
) != 0:
1645 _GetVnodePathName(vnode
, str(vnode
.v_name
))
1646 out_str
+= _GetVnodePathName
.output
1648 out_str
+= 'v_name = NULL'
1649 _GetVnodePathName
.output
= ''
1653 @lldb_command('showvnodepath')
1654 def ShowVnodePath(cmd_args
=None):
1655 """ Prints the path for a vnode
1656 usage: showvnodepath <vnode>
1658 if cmd_args
!= None and len(cmd_args
) > 0 :
1659 vnode_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vnode *')
1661 print GetVnodePath(vnode_val
)
1664 # Macro: showvnodedev
1665 def GetVnodeDevInfo(vnode
):
1666 """ Internal function to get information from the device type vnodes
1667 params: vnode - value representing struct vnode *
1668 return: str - formatted output information for block and char vnode types passed as param
1670 vnodedev_output
= ""
1671 vblk_type
= GetEnumValue('vtype::VBLK')
1672 vchr_type
= GetEnumValue('vtype::VCHR')
1673 if (vnode
.v_type
== vblk_type
) or (vnode
.v_type
== vchr_type
):
1674 devnode
= Cast(vnode
.v_data
, 'devnode_t *')
1675 devnode_dev
= devnode
.dn_typeinfo
.dev
1676 devnode_major
= (devnode_dev
>> 24) & 0xff
1677 devnode_minor
= devnode_dev
& 0x00ffffff
1679 # boilerplate device information for a vnode
1680 vnodedev_output
+= "Device Info:\n\t vnode:\t\t{:#x}".format(vnode
)
1681 vnodedev_output
+= "\n\t type:\t\t"
1682 if (vnode
.v_type
== vblk_type
):
1683 vnodedev_output
+= "VBLK"
1684 if (vnode
.v_type
== vchr_type
):
1685 vnodedev_output
+= "VCHR"
1686 vnodedev_output
+= "\n\t name:\t\t{:<s}".format(vnode
.v_name
)
1687 vnodedev_output
+= "\n\t major, minor:\t{:d},{:d}".format(devnode_major
, devnode_minor
)
1688 vnodedev_output
+= "\n\t mode\t\t0{:o}".format(unsigned(devnode
.dn_mode
))
1689 vnodedev_output
+= "\n\t owner (u,g):\t{:d} {:d}".format(devnode
.dn_uid
, devnode
.dn_gid
)
1691 # decode device specific data
1692 vnodedev_output
+= "\nDevice Specific Information:\t"
1693 if (vnode
.v_type
== vblk_type
):
1694 vnodedev_output
+= "Sorry, I do not know how to decode block devices yet!"
1695 vnodedev_output
+= "\nMaybe you can write me!"
1697 if (vnode
.v_type
== vchr_type
):
1698 # Device information; this is scanty
1700 if (devnode_major
> 42) or (devnode_major
< 0):
1701 vnodedev_output
+= "Invalid major #\n"
1702 # static assignments in conf
1703 elif (devnode_major
== 0):
1704 vnodedev_output
+= "Console mux device\n"
1705 elif (devnode_major
== 2):
1706 vnodedev_output
+= "Current tty alias\n"
1707 elif (devnode_major
== 3):
1708 vnodedev_output
+= "NULL device\n"
1709 elif (devnode_major
== 4):
1710 vnodedev_output
+= "Old pty slave\n"
1711 elif (devnode_major
== 5):
1712 vnodedev_output
+= "Old pty master\n"
1713 elif (devnode_major
== 6):
1714 vnodedev_output
+= "Kernel log\n"
1715 elif (devnode_major
== 12):
1716 vnodedev_output
+= "Memory devices\n"
1717 # Statically linked dynamic assignments
1718 elif unsigned(kern
.globals.cdevsw
[devnode_major
].d_open
) == unsigned(kern
.GetLoadAddressForSymbol('ptmx_open')):
1719 vnodedev_output
+= "Cloning pty master not done\n"
1720 #GetVnodeDevCpty(devnode_major, devnode_minor)
1721 elif unsigned(kern
.globals.cdevsw
[devnode_major
].d_open
) == unsigned(kern
.GetLoadAddressForSymbol('ptsd_open')):
1722 vnodedev_output
+= "Cloning pty slave not done\n"
1723 #GetVnodeDevCpty(devnode_major, devnode_minor)
1725 vnodedev_output
+= "RESERVED SLOT\n"
1727 vnodedev_output
+= "{:#x} is not a device".format(vnode
)
1728 return vnodedev_output
1730 @lldb_command('showvnodedev')
1731 def ShowVnodeDev(cmd_args
=None):
1732 """ Routine to display details of all vnodes of block and character device types
1733 Usage: showvnodedev <address of vnode>
1736 print "No arguments passed"
1737 print ShowVnodeDev
.__doc
__
1739 vnode_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vnode *')
1741 print "unknown arguments:", str(cmd_args
)
1743 print GetVnodeDevInfo(vnode_val
)
1745 # EndMacro: showvnodedev
1747 # Macro: showvnodelocks
1748 def GetVnodeLock(lockf
):
1749 """ Internal function to get information from the given advisory lock
1750 params: lockf - value representing v_lockf member in struct vnode *
1751 return: str - formatted output information for the advisory lock
1753 vnode_lock_output
= ''
1754 lockf_flags
= lockf
.lf_flags
1755 lockf_type
= lockf
.lf_type
1756 if lockf_flags
& 0x20:
1757 vnode_lock_output
+= ("{: <8s}").format('flock')
1758 if lockf_flags
& 0x40:
1759 vnode_lock_output
+= ("{: <8s}").format('posix')
1760 if lockf_flags
& 0x80:
1761 vnode_lock_output
+= ("{: <8s}").format('prov')
1762 if lockf_flags
& 0x10:
1763 vnode_lock_output
+= ("{: <4s}").format('W')
1764 if lockf_flags
& 0x400:
1765 vnode_lock_output
+= ("{: <8s}").format('ofd')
1767 vnode_lock_output
+= ("{: <4s}").format('.')
1769 # POSIX file vs advisory range locks
1770 if lockf_flags
& 0x40:
1771 lockf_proc
= Cast(lockf
.lf_id
, 'proc *')
1772 vnode_lock_output
+= ("PID {: <18d}").format(lockf_proc
.p_pid
)
1774 vnode_lock_output
+= ("ID {: <#019x}").format(int(lockf
.lf_id
))
1778 vnode_lock_output
+= ("{: <12s}").format('shared')
1781 vnode_lock_output
+= ("{: <12s}").format('exclusive')
1784 vnode_lock_output
+= ("{: <12s}").format('unlock')
1786 vnode_lock_output
+= ("{: <12s}").format('unknown')
1788 # start and stop values
1789 vnode_lock_output
+= ("{: #018x} ..").format(lockf
.lf_start
)
1790 vnode_lock_output
+= ("{: #018x}\n").format(lockf
.lf_end
)
1791 return vnode_lock_output
1793 @header("{0: <3s} {1: <7s} {2: <3s} {3: <21s} {4: <11s} {5: ^19s} {6: ^17s}".format('*', 'type', 'W', 'held by', 'lock type', 'start', 'end'))
1794 def GetVnodeLocksSummary(vnode
):
1795 """ Internal function to get summary of advisory locks for the given vnode
1796 params: vnode - value representing the vnode object
1797 return: str - formatted output information for the summary of advisory locks
1801 lockf_list
= vnode
.v_lockf
1802 for lockf_itr
in IterateLinkedList(lockf_list
, 'lf_next'):
1803 out_str
+= ("{: <4s}").format('H')
1804 out_str
+= GetVnodeLock(lockf_itr
)
1805 lockf_blocker
= lockf_itr
.lf_blkhd
.tqh_first
1806 while lockf_blocker
:
1807 out_str
+= ("{: <4s}").format('>')
1808 out_str
+= GetVnodeLock(lockf_blocker
)
1809 lockf_blocker
= lockf_blocker
.lf_block
.tqe_next
1812 @lldb_command('showvnodelocks')
1813 def ShowVnodeLocks(cmd_args
=None):
1814 """ Routine to display list of advisory record locks for the given vnode address
1815 Usage: showvnodelocks <address of vnode>
1818 print "No arguments passed"
1819 print ShowVnodeLocks
.__doc
__
1821 vnode_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vnode *')
1823 print "unknown arguments:", str(cmd_args
)
1825 print GetVnodeLocksSummary
.header
1826 print GetVnodeLocksSummary(vnode_val
)
1828 # EndMacro: showvnodelocks
1830 # Macro: showproclocks
1832 @lldb_command('showproclocks')
1833 def ShowProcLocks(cmd_args
=None):
1834 """ Routine to display list of advisory record locks for the given process
1835 Usage: showproclocks <address of proc>
1838 print "No arguments passed"
1839 print ShowProcLocks
.__doc
__
1841 proc
= kern
.GetValueFromAddress(cmd_args
[0], 'proc *')
1843 print "unknown arguments:", str(cmd_args
)
1846 proc_filedesc
= proc
.p_fd
1847 fd_lastfile
= proc_filedesc
.fd_lastfile
1848 fd_ofiles
= proc_filedesc
.fd_ofiles
1851 while count
<= fd_lastfile
:
1852 if fd_ofiles
[count
]:
1853 fglob
= fd_ofiles
[count
].f_fglob
1854 fo_type
= fglob
.fg_ops
.fo_type
1856 fg_data
= fglob
.fg_data
1857 fg_vnode
= Cast(fg_data
, 'vnode *')
1858 name
= fg_vnode
.v_name
1859 lockf_itr
= fg_vnode
.v_lockf
1862 print GetVnodeLocksSummary
.header
1864 out_str
+= ("\n( fd {:d}, name ").format(count
)
1866 out_str
+= "(null) )\n"
1868 out_str
+= "{:s} )\n".format(name
)
1870 print GetVnodeLocksSummary(fg_vnode
)
1872 print "\n{0: d} total locks for {1: #018x}".format(seen
, proc
)
1874 # EndMacro: showproclocks
1876 @lldb_type_summary(['vnode_t', 'vnode *'])
1877 @header("{0: <20s} {1: >8s} {2: >8s} {3: <20s} {4: <6s} {5: <20s} {6: <6s} {7: <6s} {8: <35s}".format('vnode', 'usecount', 'iocount', 'v_data', 'vtype', 'parent', 'mapped', 'cs_version', 'name'))
1878 def GetVnodeSummary(vnode
):
1879 """ Get a summary of important information out of vnode
1882 format_string
= "{0: <#020x} {1: >8d} {2: >8d} {3: <#020x} {4: <6s} {5: <#020x} {6: <6s} {7: <6s} {8: <35s}"
1883 usecount
= int(vnode
.v_usecount
)
1884 iocount
= int(vnode
.v_iocount
)
1885 v_data_ptr
= int(hex(vnode
.v_data
), 16)
1886 vtype
= int(vnode
.v_type
)
1887 vtype_str
= "%d" % vtype
1888 vnode_types
= ['VNON', 'VREG', 'VDIR', 'VBLK', 'VCHR', 'VLNK', 'VSOCK', 'VFIFO', 'VBAD', 'VSTR', 'VCPLX'] # see vnode.h for enum type definition
1889 if vtype
>= 0 and vtype
< len(vnode_types
):
1890 vtype_str
= vnode_types
[vtype
]
1891 parent_ptr
= int(hex(vnode
.v_parent
), 16)
1892 name_ptr
= int(hex(vnode
.v_name
), 16)
1895 name
= str(vnode
.v_name
)
1896 elif int(vnode
.v_tag
) == 16 :
1897 cnode
= Cast(vnode
.v_data
, 'cnode *')
1898 name
= "hfs: %s" % str( Cast(cnode
.c_desc
.cd_nameptr
, 'char *'))
1900 csblob_version
= '-'
1901 if (vtype
== 1) and (vnode
.v_un
.vu_ubcinfo
!= 0):
1902 csblob_version
= '{: <6d}'.format(vnode
.v_un
.vu_ubcinfo
.cs_add_gen
)
1903 # Check to see if vnode is mapped/unmapped
1904 if (vnode
.v_un
.vu_ubcinfo
.ui_flags
& 0x8) != 0:
1908 out_str
+= format_string
.format(vnode
, usecount
, iocount
, v_data_ptr
, vtype_str
, parent_ptr
, mapped
, csblob_version
, name
)
1911 @lldb_command('showallvnodes')
1912 def ShowAllVnodes(cmd_args
=None):
1913 """ Display info about all vnodes
1915 mntlist
= kern
.globals.mountlist
1916 print GetVnodeSummary
.header
1917 for mntval
in IterateTAILQ_HEAD(mntlist
, 'mnt_list'):
1918 for vnodeval
in IterateTAILQ_HEAD(mntval
.mnt_vnodelist
, 'v_mntvnodes'):
1919 print GetVnodeSummary(vnodeval
)
1922 @lldb_command('showvnode')
1923 def ShowVnode(cmd_args
=None):
1924 """ Display info about one vnode
1925 usage: showvnode <vnode>
1927 if cmd_args
== None or len(cmd_args
) < 1:
1928 print "Please provide valid vnode argument. Type help showvnode for help."
1930 vnodeval
= kern
.GetValueFromAddress(cmd_args
[0],'vnode *')
1931 print GetVnodeSummary
.header
1932 print GetVnodeSummary(vnodeval
)
1934 @lldb_command('showvolvnodes')
1935 def ShowVolVnodes(cmd_args
=None):
1936 """ Display info about all vnodes of a given mount_t
1938 if cmd_args
== None or len(cmd_args
) < 1:
1939 print "Please provide a valide mount_t argument. Try 'help showvolvnodes' for help"
1941 mntval
= kern
.GetValueFromAddress(cmd_args
[0], 'mount_t')
1942 print GetVnodeSummary
.header
1943 for vnodeval
in IterateTAILQ_HEAD(mntval
.mnt_vnodelist
, 'v_mntvnodes'):
1944 print GetVnodeSummary(vnodeval
)
1947 @lldb_command('showvolbusyvnodes')
1948 def ShowVolBusyVnodes(cmd_args
=None):
1949 """ Display info about busy (iocount!=0) vnodes of a given mount_t
1951 if cmd_args
== None or len(cmd_args
) < 1:
1952 print "Please provide a valide mount_t argument. Try 'help showvolbusyvnodes' for help"
1954 mntval
= kern
.GetValueFromAddress(cmd_args
[0], 'mount_t')
1955 print GetVnodeSummary
.header
1956 for vnodeval
in IterateTAILQ_HEAD(mntval
.mnt_vnodelist
, 'v_mntvnodes'):
1957 if int(vnodeval
.v_iocount
) != 0:
1958 print GetVnodeSummary(vnodeval
)
1960 @lldb_command('showallbusyvnodes')
1961 def ShowAllBusyVnodes(cmd_args
=None):
1962 """ Display info about all busy (iocount!=0) vnodes
1964 mntlistval
= kern
.globals.mountlist
1965 for mntval
in IterateTAILQ_HEAD(mntlistval
, 'mnt_list'):
1966 ShowVolBusyVnodes([hex(mntval
)])
1968 @lldb_command('print_vnode')
1969 def PrintVnode(cmd_args
=None):
1970 """ Prints out the fields of a vnode struct
1971 Usage: print_vnode <vnode>
1974 print "Please provide valid vnode argument. Type help print_vnode for help."
1978 @lldb_command('showworkqvnodes')
1979 def ShowWorkqVnodes(cmd_args
=None):
1980 """ Print the vnode worker list
1981 Usage: showworkqvnodes <struct mount *>
1984 print "Please provide valid mount argument. Type help showworkqvnodes for help."
1987 mp
= kern
.GetValueFromAddress(cmd_args
[0], 'mount *')
1988 vp
= Cast(mp
.mnt_workerqueue
.tqh_first
, 'vnode *')
1989 print GetVnodeSummary
.header
1991 print GetVnodeSummary(vp
)
1992 vp
= vp
.v_mntvnodes
.tqe_next
1994 @lldb_command('shownewvnodes')
1995 def ShowNewVnodes(cmd_args
=None):
1996 """ Print the new vnode list
1997 Usage: shownewvnodes <struct mount *>
2000 print "Please provide valid mount argument. Type help shownewvnodes for help."
2002 mp
= kern
.GetValueFromAddress(cmd_args
[0], 'mount *')
2003 vp
= Cast(mp
.mnt_newvnodes
.tqh_first
, 'vnode *')
2004 print GetVnodeSummary
.header
2006 print GetVnodeSummary(vp
)
2007 vp
= vp
.v_mntvnodes
.tqe_next
2010 @lldb_command('showprocvnodes')
2011 def ShowProcVnodes(cmd_args
=None):
2012 """ Routine to print out all the open fds which are vnodes in a process
2013 Usage: showprocvnodes <proc *>
2016 print "Please provide valid proc argument. Type help showprocvnodes for help."
2018 procptr
= kern
.GetValueFromAddress(cmd_args
[0], 'proc *')
2019 fdptr
= Cast(procptr
.p_fd
, 'filedesc *')
2020 if int(fdptr
.fd_cdir
) != 0:
2021 print '{0: <25s}\n{1: <s}\n{2: <s}'.format('Current Working Directory:', GetVnodeSummary
.header
, GetVnodeSummary(fdptr
.fd_cdir
))
2022 if int(fdptr
.fd_rdir
) != 0:
2023 print '{0: <25s}\n{1: <s}\n{2: <s}'.format('Current Root Directory:', GetVnodeSummary
.header
, GetVnodeSummary(fdptr
.fd_rdir
))
2025 print '\n' + '{0: <5s} {1: <7s}'.format('fd', 'flags') + GetVnodeSummary
.header
2026 # Hack to get around <rdar://problem/12879494> llb fails to cast addresses to double pointers
2027 fpptr
= Cast(fdptr
.fd_ofiles
, 'fileproc *')
2028 while count
< fdptr
.fd_nfiles
:
2029 fpp
= dereference(fpptr
)
2030 fproc
= Cast(fpp
, 'fileproc *')
2032 fglob
= dereference(fproc
).f_fglob
2034 if (int(fglob
) != 0) and (int(fglob
.fg_ops
.fo_type
) == 1):
2035 if (fdptr
.fd_ofileflags
[count
] & 1): flags
+= 'E'
2036 if (fdptr
.fd_ofileflags
[count
] & 2): flags
+= 'F'
2037 if (fdptr
.fd_ofileflags
[count
] & 4): flags
+= 'R'
2038 if (fdptr
.fd_ofileflags
[count
] & 8): flags
+= 'C'
2039 print '{0: <5d} {1: <7s}'.format(count
, flags
) + GetVnodeSummary(Cast(fglob
.fg_data
, 'vnode *'))
2041 fpptr
= kern
.GetValueFromAddress(int(fpptr
) + kern
.ptrsize
,'fileproc *')
2043 @lldb_command('showallprocvnodes')
2044 def ShowAllProcVnodes(cmd_args
=None):
2045 """ Routine to print out all the open fds which are vnodes
2048 procptr
= Cast(kern
.globals.allproc
.lh_first
, 'proc *')
2049 while procptr
and int(procptr
) != 0:
2050 print '{:<s}'.format("=" * 106)
2051 print GetProcInfo(procptr
)
2052 ShowProcVnodes([int(procptr
)])
2053 procptr
= procptr
.p_list
.le_next
2055 @xnudebug_test('test_vnode')
2056 def TestShowAllVnodes(kernel_target
, config
, lldb_obj
, isConnected
):
2057 """ Test the functionality of vnode related commands
2063 print "Target is not connected. Cannot test memstats"
2065 res
= lldb
.SBCommandReturnObject()
2066 lldb_obj
.debugger
.GetCommandInterpreter().HandleCommand("showallvnodes", res
)
2067 result
= res
.GetOutput()
2068 if len(result
.split("\n")) > 2 and result
.find('VREG') != -1 and len(result
.splitlines()[2].split()) > 5:
2074 @lldb_type_summary(['_lck_grp_ *'])
2075 def GetMutexEntry(mtxg
):
2076 """ Summarize a mutex group entry with important information.
2078 mtxg: value - obj representing a mutex group in kernel
2080 out_string - summary of the mutex group
2084 if kern
.ptrsize
== 8:
2085 format_string
= '{0:#018x} {1:10d} {2:10d} {3:10d} {4:10d} {5: <30s} '
2087 format_string
= '{0:#010x} {1:10d} {2:10d} {3:10d} {4:10d} {5: <30s} '
2089 if mtxg
.lck_grp_mtxcnt
:
2090 out_string
+= format_string
.format(mtxg
, mtxg
.lck_grp_mtxcnt
,mtxg
.lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_util_cnt
,
2091 mtxg
.lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_miss_cnt
,
2092 mtxg
.lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_wait_cnt
, mtxg
.lck_grp_name
)
2095 @lldb_command('showallmtx')
2096 def ShowAllMtx(cmd_args
=None):
2097 """ Routine to print a summary listing of all mutexes
2100 if kern
.ptrsize
== 8:
2101 hdr_format
= '{:<18s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
2103 hdr_format
= '{:<10s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
2105 print hdr_format
.format('LCK GROUP', 'CNT', 'UTIL', 'MISS', 'WAIT', 'NAME')
2107 mtxgrp_queue_head
= kern
.globals.lck_grp_queue
2108 mtxgrp_ptr_type
= GetType('_lck_grp_ *')
2110 for mtxgrp_ptr
in IterateQueue(mtxgrp_queue_head
, mtxgrp_ptr_type
, "lck_grp_link"):
2111 print GetMutexEntry(mtxgrp_ptr
)
2113 # EndMacro: showallmtx
2115 # Macro: showallrwlck
2116 @lldb_type_summary(['_lck_grp_ *'])
2117 def GetRWLEntry(rwlg
):
2118 """ Summarize a reader writer lock group with important information.
2120 rwlg: value - obj representing a reader writer lock group in kernel
2122 out_string - summary of the reader writer lock group
2126 if kern
.ptrsize
== 8:
2127 format_string
= '{0:#018x} {1:10d} {2:10d} {3:10d} {4:10d} {5: <30s} '
2129 format_string
= '{0:#010x} {1:10d} {2:10d} {3:10d} {4:10d} {5: <30s} '
2131 if rwlg
.lck_grp_rwcnt
:
2132 out_string
+= format_string
.format(rwlg
, rwlg
.lck_grp_rwcnt
,rwlg
.lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
,
2133 rwlg
.lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
,
2134 rwlg
.lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
, rwlg
.lck_grp_name
)
2138 @lldb_type_summary(['lck_mtx_t *'])
2139 @header("===== Mutex Lock Summary =====")
2140 def GetMutexLockSummary(mtx
):
2141 """ Summarize mutex lock with important information.
2143 mtx: value - obj representing a mutex lock in kernel
2145 out_str - summary of the mutex lock
2148 return "Invalid lock value: 0x0"
2150 if kern
.arch
== "x86_64":
2151 out_str
= "Lock Type : MUTEX\n"
2152 if mtx
.lck_mtx_tag
== 0x07ff1007 :
2153 out_str
+= "Tagged as indirect, printing ext lock at: {:#x}\n".format(mtx
.lck_mtx_ptr
)
2154 mtx
= Cast(mtx
.lck_mtx_ptr
, 'lck_mtx_t *')
2156 if mtx
.lck_mtx_tag
== 0x07fe2007 :
2157 out_str
+= "*** Tagged as DESTROYED ({:#x}) ***\n".format(mtx
.lck_mtx_tag
)
2159 out_str
+= "Owner Thread : {mtx.lck_mtx_owner:#x}\n".format(mtx
=mtx
)
2160 out_str
+= "Number of Waiters : {mtx.lck_mtx_waiters:#x}\n".format(mtx
=mtx
)
2161 out_str
+= "ILocked : {mtx.lck_mtx_ilocked:#x}\n".format(mtx
=mtx
)
2162 out_str
+= "MLocked : {mtx.lck_mtx_mlocked:#x}\n".format(mtx
=mtx
)
2163 out_str
+= "Promoted : {mtx.lck_mtx_promoted:#x}\n".format(mtx
=mtx
)
2164 out_str
+= "Pri : {mtx.lck_mtx_pri:#x}\n".format(mtx
=mtx
)
2165 out_str
+= "Spin : {mtx.lck_mtx_spin:#x}\n".format(mtx
=mtx
)
2166 out_str
+= "Ext : {mtx.lck_mtx_is_ext:#x}\n".format(mtx
=mtx
)
2167 if mtx
.lck_mtxd_pad32
== 0xFFFFFFFF :
2168 out_str
+= "Canary (valid) : {mtx.lck_mtxd_pad32:#x}\n".format(mtx
=mtx
)
2170 out_str
+= "Canary (INVALID) : {mtx.lck_mtxd_pad32:#x}\n".format(mtx
=mtx
)
2173 out_str
= "Lock Type\t\t: MUTEX\n"
2174 out_str
+= "Owner Thread\t\t: {:#x}".format(mtx
.lck_mtx_data
& ~
0x3)
2175 if (mtx
.lck_mtx_data
& ~
0x3) == 0xfffffff0:
2176 out_str
+= " Held as spinlock"
2177 out_str
+= "\nNumber of Waiters\t: {:d}\n".format(mtx
.lck_mtx_waiters
)
2178 out_str
+= "Flags\t\t\t: "
2179 if mtx
.lck_mtx_data
& 0x1:
2180 out_str
+= "[Interlock Locked] "
2181 if mtx
.lck_mtx_data
& 0x2:
2182 out_str
+= "[Wait Flag]"
2185 @lldb_type_summary(['lck_spin_t *'])
2186 @header("===== SpinLock Summary =====")
2187 def GetSpinLockSummary(spinlock
):
2188 """ Summarize spinlock with important information.
2190 spinlock: value - obj representing a spinlock in kernel
2192 out_str - summary of the spinlock
2195 return "Invalid lock value: 0x0"
2197 out_str
= "Lock Type\t\t: SPINLOCK\n"
2198 if kern
.arch
== "x86_64":
2199 out_str
+= "Interlock\t\t: {:#x}\n".format(spinlock
.interlock
)
2202 lock_data
= spinlock
.hwlock
.lock_data
2204 out_str
+= "Invalid state: interlock is locked but no owner\n"
2206 out_str
+= "Owner Thread\t\t: "
2210 out_str
+= "{:#x}\n".format(lock_data
& ~
0x1)
2211 if (lock_data
& 1) == 0:
2212 out_str
+= "Invalid state: owned but interlock bit is not set\n"
2215 @lldb_command('showlock', 'MS')
2216 def ShowLock(cmd_args
=None, cmd_options
={}):
2217 """ Show info about a lock - its state and owner thread details
2218 Usage: showlock <address of a lock>
2219 -M : to consider <addr> as lck_mtx_t
2220 -S : to consider <addr> as lck_spin_t
2223 raise ArgumentError("Please specify the address of the lock whose info you want to view.")
2227 lock
= kern
.GetValueFromAddress(cmd_args
[0], 'uintptr_t*')
2229 if kern
.arch
== "x86_64" and lock
:
2230 if "-M" in cmd_options
:
2231 lock_mtx
= Cast(lock
, 'lck_mtx_t *')
2232 summary_str
= GetMutexLockSummary(lock_mtx
)
2233 elif "-S" in cmd_options
:
2234 lock_spin
= Cast(lock
, 'lck_spin_t *')
2235 summary_str
= GetSpinLockSummary(lock_spin
)
2237 summary_str
= "Please specify supported lock option(-M/-S)"
2243 lock_mtx
= Cast(lock
, 'lck_mtx_t*')
2244 if lock_mtx
.lck_mtx_type
== 0x22:
2245 summary_str
= GetMutexLockSummary(lock_mtx
)
2247 lock_spin
= Cast(lock
, 'lck_spin_t*')
2248 if lock_spin
.type == 0x11:
2249 summary_str
= GetSpinLockSummary(lock_spin
)
2251 if summary_str
== "":
2252 summary_str
= "Lock Type\t\t: INVALID LOCK"
2257 @lldb_command('showallrwlck')
2258 def ShowAllRWLck(cmd_args
=None):
2259 """ Routine to print a summary listing of all read/writer locks
2261 if kern
.ptrsize
== 8:
2262 hdr_format
= '{:<18s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
2264 hdr_format
= '{:<10s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
2266 print hdr_format
.format('LCK GROUP', 'CNT', 'UTIL', 'MISS', 'WAIT', 'NAME')
2268 rwlgrp_queue_head
= kern
.globals.lck_grp_queue
2269 rwlgrp_ptr_type
= GetType('_lck_grp_ *')
2270 for rwlgrp_ptr
in IterateQueue(rwlgrp_queue_head
, rwlgrp_ptr_type
, "lck_grp_link"):
2271 print GetRWLEntry(rwlgrp_ptr
)
2273 # EndMacro: showallrwlck
2275 #Macro: showbootermemorymap
2276 @lldb_command('showbootermemorymap')
2277 def ShowBooterMemoryMap(cmd_args
=None):
2278 """ Prints out the phys memory map from kernelBootArgs
2279 Supported only on x86_64
2281 if kern
.arch
== 'x86_64':
2282 voffset
= unsigned(0xFFFFFF8000000000)
2284 print "showbootermemorymap not supported on this architecture"
2307 boot_args
= kern
.globals.kernelBootArgs
2308 msize
= boot_args
.MemoryMapDescriptorSize
2309 mcount
= (boot_args
.MemoryMapSize
) / unsigned(msize
)
2311 out_string
+= "{0: <12s} {1: <19s} {2: <19s} {3: <19s} {4: <10s}\n".format("Type", "Physical Start", "Number of Pages", "Virtual Start", "Attributes")
2315 mptr
= kern
.GetValueFromAddress(unsigned(boot_args
.MemoryMap
) + voffset
+ unsigned(i
*msize
), 'EfiMemoryRange *')
2316 mtype
= unsigned(mptr
.Type
)
2317 if mtype
in memtype_dict
:
2318 out_string
+= "{0: <12s}".format(memtype_dict
[mtype
])
2320 out_string
+= "{0: <12s}".format("UNKNOWN")
2322 if mptr
.VirtualStart
== 0:
2323 out_string
+= "{0: #019x} {1: #019x} {2: <19s} {3: #019x}\n".format(mptr
.PhysicalStart
, mptr
.NumberOfPages
, ' '*19, mptr
.Attribute
)
2325 out_string
+= "{0: #019x} {1: #019x} {2: #019x} {3: #019x}\n".format(mptr
.PhysicalStart
, mptr
.NumberOfPages
, mptr
.VirtualStart
, mptr
.Attribute
)
2329 #EndMacro: showbootermemorymap
2331 @lldb_command('show_all_purgeable_objects')
2332 def ShowAllPurgeableVmObjects(cmd_args
=None):
2333 """ Routine to print a summary listing of all the purgeable vm objects
2335 print "\n-------------------- VOLATILE OBJECTS --------------------\n"
2336 ShowAllPurgeableVolatileVmObjects()
2337 print "\n-------------------- NON-VOLATILE OBJECTS --------------------\n"
2338 ShowAllPurgeableNonVolatileVmObjects()
2340 @lldb_command('show_all_purgeable_nonvolatile_objects')
2341 def ShowAllPurgeableNonVolatileVmObjects(cmd_args
=None):
2342 """ Routine to print a summary listing of all the vm objects in
2343 the purgeable_nonvolatile_queue
2346 nonvolatile_total
= lambda:None
2347 nonvolatile_total
.objects
= 0
2348 nonvolatile_total
.vsize
= 0
2349 nonvolatile_total
.rsize
= 0
2350 nonvolatile_total
.wsize
= 0
2351 nonvolatile_total
.csize
= 0
2352 nonvolatile_total
.disowned_objects
= 0
2353 nonvolatile_total
.disowned_vsize
= 0
2354 nonvolatile_total
.disowned_rsize
= 0
2355 nonvolatile_total
.disowned_wsize
= 0
2356 nonvolatile_total
.disowned_csize
= 0
2358 queue_len
= kern
.globals.purgeable_nonvolatile_count
2359 queue_head
= kern
.globals.purgeable_nonvolatile_queue
2361 print 'purgeable_nonvolatile_queue:{:#018x} purgeable_volatile_count:{:d}\n'.format(kern
.GetLoadAddressForSymbol('purgeable_nonvolatile_queue'),queue_len
)
2362 print 'N:non-volatile V:volatile E:empty D:deny\n'
2364 print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process")
2366 for object in IterateQueue(queue_head
, 'struct vm_object *', 'objq'):
2368 ShowPurgeableNonVolatileVmObject(object, idx
, queue_len
, nonvolatile_total
)
2369 print "disowned objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(nonvolatile_total
.disowned_objects
, nonvolatile_total
.disowned_vsize
, nonvolatile_total
.disowned_rsize
, nonvolatile_total
.disowned_wsize
, nonvolatile_total
.disowned_csize
)
2370 print " all objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(nonvolatile_total
.objects
, nonvolatile_total
.vsize
, nonvolatile_total
.rsize
, nonvolatile_total
.wsize
, nonvolatile_total
.csize
)
2373 def ShowPurgeableNonVolatileVmObject(object, idx
, queue_len
, nonvolatile_total
):
2374 """ Routine to print out a summary a VM object in purgeable_nonvolatile_queue
2376 object - core.value : a object of type 'struct vm_object *'
2380 page_size
= kern
.globals.page_size
2381 if object.purgable
== 0:
2383 elif object.purgable
== 1:
2385 elif object.purgable
== 2:
2387 elif object.purgable
== 3:
2391 if object.pager
== 0:
2392 compressed_count
= 0
2394 compressor_pager
= Cast(object.pager
, 'compressor_pager *')
2395 compressed_count
= compressor_pager
.cpgr_num_slots_occupied
2397 print "{:>6d}/{:<6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}\n".format(idx
,queue_len
,object,purgable
,object.ref_count
,object.vo_un1
.vou_size
/page_size
,object.resident_page_count
,object.wired_page_count
,compressed_count
, object.vo_un2
.vou_purgeable_owner
,GetProcPIDForTask(object.vo_un2
.vou_purgeable_owner
),GetProcNameForTask(object.vo_un2
.vou_purgeable_owner
))
2399 nonvolatile_total
.objects
+= 1
2400 nonvolatile_total
.vsize
+= object.vo_un1
.vou_size
/page_size
2401 nonvolatile_total
.rsize
+= object.resident_page_count
2402 nonvolatile_total
.wsize
+= object.wired_page_count
2403 nonvolatile_total
.csize
+= compressed_count
2404 if object.vo_un2
.vou_purgeable_owner
== 0:
2405 nonvolatile_total
.disowned_objects
+= 1
2406 nonvolatile_total
.disowned_vsize
+= object.vo_un1
.vou_size
/page_size
2407 nonvolatile_total
.disowned_rsize
+= object.resident_page_count
2408 nonvolatile_total
.disowned_wsize
+= object.wired_page_count
2409 nonvolatile_total
.disowned_csize
+= compressed_count
2412 @lldb_command('show_all_purgeable_volatile_objects')
2413 def ShowAllPurgeableVolatileVmObjects(cmd_args
=None):
2414 """ Routine to print a summary listing of all the vm objects in
2415 the purgeable queues
2417 volatile_total
= lambda:None
2418 volatile_total
.objects
= 0
2419 volatile_total
.vsize
= 0
2420 volatile_total
.rsize
= 0
2421 volatile_total
.wsize
= 0
2422 volatile_total
.csize
= 0
2423 volatile_total
.disowned_objects
= 0
2424 volatile_total
.disowned_vsize
= 0
2425 volatile_total
.disowned_rsize
= 0
2426 volatile_total
.disowned_wsize
= 0
2427 volatile_total
.disowned_csize
= 0
2429 purgeable_queues
= kern
.globals.purgeable_queues
2430 print "---------- OBSOLETE\n"
2431 ShowPurgeableQueue(purgeable_queues
[0], volatile_total
)
2432 print "\n\n---------- FIFO\n"
2433 ShowPurgeableQueue(purgeable_queues
[1], volatile_total
)
2434 print "\n\n---------- LIFO\n"
2435 ShowPurgeableQueue(purgeable_queues
[2], volatile_total
)
2437 print "disowned objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(volatile_total
.disowned_objects
, volatile_total
.disowned_vsize
, volatile_total
.disowned_rsize
, volatile_total
.disowned_wsize
, volatile_total
.disowned_csize
)
2438 print " all objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(volatile_total
.objects
, volatile_total
.vsize
, volatile_total
.rsize
, volatile_total
.wsize
, volatile_total
.csize
)
2439 purgeable_count
= kern
.globals.vm_page_purgeable_count
2440 purgeable_wired_count
= kern
.globals.vm_page_purgeable_wired_count
2441 if purgeable_count
!= volatile_total
.rsize
or purgeable_wired_count
!= volatile_total
.wsize
:
2442 mismatch
= "<--------- MISMATCH\n"
2445 print "vm_page_purgeable_count: resident:{:<10d} wired:{:<10d} {:s}\n".format(purgeable_count
, purgeable_wired_count
, mismatch
)
2448 def ShowPurgeableQueue(qhead
, volatile_total
):
2449 print "----- GROUP 0\n"
2450 ShowPurgeableGroup(qhead
.objq
[0], volatile_total
)
2451 print "----- GROUP 1\n"
2452 ShowPurgeableGroup(qhead
.objq
[1], volatile_total
)
2453 print "----- GROUP 2\n"
2454 ShowPurgeableGroup(qhead
.objq
[2], volatile_total
)
2455 print "----- GROUP 3\n"
2456 ShowPurgeableGroup(qhead
.objq
[3], volatile_total
)
2457 print "----- GROUP 4\n"
2458 ShowPurgeableGroup(qhead
.objq
[4], volatile_total
)
2459 print "----- GROUP 5\n"
2460 ShowPurgeableGroup(qhead
.objq
[5], volatile_total
)
2461 print "----- GROUP 6\n"
2462 ShowPurgeableGroup(qhead
.objq
[6], volatile_total
)
2463 print "----- GROUP 7\n"
2464 ShowPurgeableGroup(qhead
.objq
[7], volatile_total
)
2466 def ShowPurgeableGroup(qhead
, volatile_total
):
2468 for object in IterateQueue(qhead
, 'struct vm_object *', 'objq'):
2470 # print "{:>6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s} {:18s} {:>6s} {:<20s} {:s}\n".format("#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process","volatilizer","pid","process","")
2471 print "{:>6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s}\n".format("#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process")
2473 ShowPurgeableVolatileVmObject(object, idx
, volatile_total
)
2475 def ShowPurgeableVolatileVmObject(object, idx
, volatile_total
):
2476 """ Routine to print out a summary a VM object in a purgeable queue
2478 object - core.value : a object of type 'struct vm_object *'
2482 # if int(object.vo_un2.vou_purgeable_owner) != int(object.vo_purgeable_volatilizer):
2486 page_size
= kern
.globals.page_size
2487 if object.purgable
== 0:
2489 elif object.purgable
== 1:
2491 elif object.purgable
== 2:
2493 elif object.purgable
== 3:
2497 if object.pager
== 0:
2498 compressed_count
= 0
2500 compressor_pager
= Cast(object.pager
, 'compressor_pager *')
2501 compressed_count
= compressor_pager
.cpgr_num_slots_occupied
2502 # print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s} {:#018x} {:>6d} {:<20s} {:s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count,object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner),object.vo_purgeable_volatilizer,GetProcPIDForTask(object.vo_purgeable_volatilizer),GetProcNameForTask(object.vo_purgeable_volatilizer),diff)
2503 print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}\n".format(idx
,object,purgable
,object.ref_count
,object.vo_un1
.vou_size
/page_size
,object.resident_page_count
,object.wired_page_count
,compressed_count
, object.vo_un2
.vou_purgeable_owner
,GetProcPIDForTask(object.vo_un2
.vou_purgeable_owner
),GetProcNameForTask(object.vo_un2
.vou_purgeable_owner
))
2504 volatile_total
.objects
+= 1
2505 volatile_total
.vsize
+= object.vo_un1
.vou_size
/page_size
2506 volatile_total
.rsize
+= object.resident_page_count
2507 volatile_total
.wsize
+= object.wired_page_count
2508 volatile_total
.csize
+= compressed_count
2509 if object.vo_un2
.vou_purgeable_owner
== 0:
2510 volatile_total
.disowned_objects
+= 1
2511 volatile_total
.disowned_vsize
+= object.vo_un1
.vou_size
/page_size
2512 volatile_total
.disowned_rsize
+= object.resident_page_count
2513 volatile_total
.disowned_wsize
+= object.wired_page_count
2514 volatile_total
.disowned_csize
+= compressed_count
2517 def GetCompressedPagesForObject(obj
):
2520 pager
= Cast(obj
.pager
, 'compressor_pager_t')
2521 return pager
.cpgr_num_slots_occupied
2522 # if pager.cpgr_num_slots > 128:
2523 # slots_arr = pager.cpgr_slots.cpgr_islots
2524 # num_indirect_slot_ptr = (pager.cpgr_num_slots + 127) / 128
2526 # compressor_slot = 0
2527 # compressed_pages = 0
2528 # while index < num_indirect_slot_ptr:
2529 # compressor_slot = 0
2530 # if slots_arr[index]:
2531 # while compressor_slot < 128:
2532 # if slots_arr[index][compressor_slot]:
2533 # compressed_pages += 1
2534 # compressor_slot += 1
2537 # slots_arr = pager.cpgr_slots.cpgr_dslots
2538 # compressor_slot = 0
2539 # compressed_pages = 0
2540 # while compressor_slot < pager.cpgr_num_slots:
2541 # if slots_arr[compressor_slot]:
2542 # compressed_pages += 1
2543 # compressor_slot += 1
2544 # return compressed_pages
2546 def ShowTaskVMEntries(task
, show_pager_info
, show_all_shadows
):
2547 """ Routine to print out a summary listing of all the entries in a vm_map
2549 task - core.value : a object of type 'task *'
2553 print "vm_map entries for task " + hex(task
)
2554 print GetTaskSummary
.header
2555 print GetTaskSummary(task
)
2557 print "Task {0: <#020x} has map = 0x0"
2559 showmapvme(task
.map, show_pager_info
, show_all_shadows
)
2561 @lldb_command("showmapvme", "PS")
2562 def ShowMapVME(cmd_args
=None, cmd_options
={}):
2563 """Routine to print out info about the specified vm_map and its vm entries
2564 usage: showmapvme <vm_map>
2565 Use -S flag to show VM object shadow chains
2566 Use -P flag to show pager info (mapped file, compressed pages, ...)
2568 if cmd_args
== None or len(cmd_args
) < 1:
2569 print "Invalid argument.", ShowMap
.__doc
__
2571 show_pager_info
= False
2572 show_all_shadows
= False
2573 if "-P" in cmd_options
:
2574 show_pager_info
= True
2575 if "-S" in cmd_options
:
2576 show_all_shadows
= True
2577 map = kern
.GetValueFromAddress(cmd_args
[0], 'vm_map_t')
2578 showmapvme(map, show_pager_info
, show_all_shadows
)
2580 def showmapvme(map, show_pager_info
, show_all_shadows
):
2581 page_size
= kern
.globals.page_size
2582 vnode_pager_ops
= kern
.globals.vnode_pager_ops
2583 vnode_pager_ops_addr
= unsigned(addressof(vnode_pager_ops
))
2586 rsize
= int(map.pmap
.stats
.resident_count
)
2587 print "{:<18s} {:<18s} {:<18s} {:>10s} {:>18s} {:>18s}:{:<18s}".format("vm_map","pmap","size","#ents","rsize","start","end")
2588 print "{:#018x} {:#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x}".format(map,map.pmap
,unsigned(map.size
),map.hdr
.nentries
,rsize
,map.hdr
.links
.start
,map.hdr
.links
.end
)
2589 vme_list_head
= map.hdr
.links
2590 vme_ptr_type
= GetType('vm_map_entry *')
2591 print "{:<18s} {:>18s}:{:<18s} {:>10s} {:<8s} {:<10s} {:<18s} {:<18s}".format("entry","start","end","#pgs","tag.kmod","prot&flags","object","offset")
2592 last_end
= unsigned(map.hdr
.links
.start
)
2593 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, "links"):
2594 if unsigned(vme
.links
.start
) != last_end
:
2595 print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end
,vme
.links
.start
,(unsigned(vme
.links
.start
) - last_end
)/page_size
)
2596 last_end
= unsigned(vme
.links
.end
)
2597 size
= unsigned(vme
.links
.end
) - unsigned(vme
.links
.start
)
2598 object = vme
.vme_object
.vmo_object
2600 object_str
= "{:<#018x}".format(object)
2601 elif vme
.is_sub_map
:
2602 if object == kern
.globals.bufferhdr_map
:
2603 object_str
= "BUFFERHDR_MAP"
2604 elif object == kern
.globals.mb_map
:
2605 object_str
= "MB_MAP"
2606 elif object == kern
.globals.bsd_pageable_map
:
2607 object_str
= "BSD_PAGEABLE_MAP"
2608 elif object == kern
.globals.ipc_kernel_map
:
2609 object_str
= "IPC_KERNEL_MAP"
2610 elif object == kern
.globals.ipc_kernel_copy_map
:
2611 object_str
= "IPC_KERNEL_COPY_MAP"
2612 elif object == kern
.globals.kalloc_map
:
2613 object_str
= "KALLOC_MAP"
2614 elif object == kern
.globals.zone_map
:
2615 object_str
= "ZONE_MAP"
2616 elif hasattr(kern
.globals, 'compressor_map') and object == kern
.globals.compressor_map
:
2617 object_str
= "COMPRESSOR_MAP"
2618 elif hasattr(kern
.globals, 'gzalloc_map') and object == kern
.globals.gzalloc_map
:
2619 object_str
= "GZALLOC_MAP"
2620 elif hasattr(kern
.globals, 'g_kext_map') and object == kern
.globals.g_kext_map
:
2621 object_str
= "G_KEXT_MAP"
2622 elif hasattr(kern
.globals, 'vector_upl_submap') and object == kern
.globals.vector_upl_submap
:
2623 object_str
= "VECTOR_UPL_SUBMAP"
2625 object_str
= "submap:{:<#018x}".format(object)
2627 if object == kern
.globals.kernel_object
:
2628 object_str
= "KERNEL_OBJECT"
2629 elif object == kern
.globals.vm_submap_object
:
2630 object_str
= "VM_SUBMAP_OBJECT"
2631 elif object == kern
.globals.compressor_object
:
2632 object_str
= "COMPRESSOR_OBJECT"
2634 object_str
= "{:<#018x}".format(object)
2635 offset
= unsigned(vme
.vme_offset
) & ~
0xFFF
2636 tag
= unsigned(vme
.vme_offset
& 0xFFF)
2642 if vme
.is_sub_map
and vme
.use_pmap
:
2645 if map.pmap
== kern
.globals.kernel_pmap
:
2646 xsite
= Cast(kern
.globals.vm_allocation_sites
[tag
],'OSKextAccount *')
2647 if xsite
and xsite
.site
.flags
& 2:
2648 tagstr
= ".{:<3d}".format(xsite
.loadTag
)
2649 print "{:#018x} {:#018x}:{:#018x} {:>10d} {:>3d}{:<4s} {:1d}{:1d}{:<8s} {:<18s} {:<#18x}".format(vme
,vme
.links
.start
,vme
.links
.end
,(unsigned(vme
.links
.end
)-unsigned(vme
.links
.start
))/page_size
,tag
,tagstr
,vme
.protection
,vme
.max_protection
,vme_flags
,object_str
,offset
)
2650 if (show_pager_info
or show_all_shadows
) and vme
.is_sub_map
== 0 and vme
.vme_object
.vmo_object
!= 0:
2651 object = vme
.vme_object
.vmo_object
2657 if show_all_shadows
== False and depth
!= 1 and object.shadow
!= 0:
2658 offset
+= unsigned(object.vo_un2
.vou_shadow_offset
)
2659 object = object.shadow
2661 if object.copy_strategy
== 0:
2663 elif object.copy_strategy
== 2:
2665 elif object.copy_strategy
== 4:
2668 copy_strategy
=str(object.copy_strategy
)
2670 internal
= "internal"
2672 internal
= "external"
2674 pager
= object.pager
2675 if show_pager_info
and pager
!= 0:
2677 pager_string
= "-> compressed:{:d}".format(GetCompressedPagesForObject(object))
2678 elif unsigned(pager
.mo_pager_ops
) == vnode_pager_ops_addr
:
2679 vnode_pager
= Cast(pager
,'vnode_pager *')
2680 pager_string
= "-> " + GetVnodePath(vnode_pager
.vnode_handle
)
2682 pager_string
= "-> {:s}:{:#018x}".format(pager
.mo_pager_ops
.memory_object_pager_name
, pager
.mo_pager_ops
)
2683 print "{:>18d} {:#018x}:{:#018x} {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} ({:d} {:d} {:d}) {:s}".format(depth
,offset
,offset
+size
,object,object.ref_count
,object.true_share
,copy_strategy
,internal
,unsigned(object.vo_un1
.vou_size
)/page_size
,object.resident_page_count
,object.wired_page_count
,pager_string
)
2684 # print " #{:<5d} obj {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} size:{:<10d} wired:{:<10d} resident:{:<10d} reusable:{:<10d}".format(depth,object,object.ref_count,object.true_share,copy_strategy,internal,object.vo_un1.vou_size/page_size,object.wired_page_count,object.resident_page_count,object.reusable_page_count)
2685 offset
+= unsigned(object.vo_un2
.vou_shadow_offset
)
2686 object = object.shadow
2687 if unsigned(map.hdr
.links
.end
) > last_end
:
2688 print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end
,map.hdr
.links
.end
,(unsigned(map.hdr
.links
.end
) - last_end
)/page_size
)
2691 def CountMapTags(map, tagcounts
, slow
):
2692 page_size
= unsigned(kern
.globals.page_size
)
2693 vme_list_head
= map.hdr
.links
2694 vme_ptr_type
= GetType('vm_map_entry *')
2695 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, "links"):
2696 object = vme
.vme_object
.vmo_object
2697 tag
= vme
.vme_offset
& 0xFFF
2698 if object == kern
.globals.kernel_object
:
2701 count
= unsigned(vme
.links
.end
- vme
.links
.start
) / page_size
2703 addr
= unsigned(vme
.links
.start
)
2704 while addr
< unsigned(vme
.links
.end
):
2705 hash_id
= _calc_vm_page_hash(object, addr
)
2706 page_list
= kern
.globals.vm_page_buckets
[hash_id
].page_list
2707 page
= _vm_page_unpack_ptr(page_list
)
2709 vmpage
= kern
.GetValueFromAddress(page
, 'vm_page_t')
2710 if (addr
== unsigned(vmpage
.offset
)) and (object == vm_object_t(_vm_page_unpack_ptr(vmpage
.vm_page_object
))):
2711 if (not vmpage
.local
) and (vmpage
.wire_count
> 0):
2714 page
= _vm_page_unpack_ptr(vmpage
.next_m
)
2716 tagcounts
[tag
] += count
2717 elif vme
.is_sub_map
:
2718 CountMapTags(Cast(object,'vm_map_t'), tagcounts
, slow
)
2721 def CountWiredObject(object, tagcounts
):
2722 tagcounts
[unsigned(object.wire_tag
)] += object.wired_page_count
2725 def CountWiredPurgeableGroup(qhead
, tagcounts
):
2726 for object in IterateQueue(qhead
, 'struct vm_object *', 'objq'):
2727 CountWiredObject(object, tagcounts
)
2730 def CountWiredPurgeableQueue(qhead
, tagcounts
):
2731 CountWiredPurgeableGroup(qhead
.objq
[0], tagcounts
)
2732 CountWiredPurgeableGroup(qhead
.objq
[1], tagcounts
)
2733 CountWiredPurgeableGroup(qhead
.objq
[2], tagcounts
)
2734 CountWiredPurgeableGroup(qhead
.objq
[3], tagcounts
)
2735 CountWiredPurgeableGroup(qhead
.objq
[4], tagcounts
)
2736 CountWiredPurgeableGroup(qhead
.objq
[5], tagcounts
)
2737 CountWiredPurgeableGroup(qhead
.objq
[6], tagcounts
)
2738 CountWiredPurgeableGroup(qhead
.objq
[7], tagcounts
)
2740 def GetKmodIDName(kmod_id
):
2741 kmod_val
= kern
.globals.kmod
2742 for kmod
in IterateLinkedList(kmod_val
, 'next'):
2743 if (kmod
.id == kmod_id
):
2744 return "{:<50s}".format(kmod
.name
)
2747 def GetVMKernName(tag
):
2749 return "VM_KERN_MEMORY_OSFMK"
2751 return "VM_KERN_MEMORY_BSD"
2753 return "VM_KERN_MEMORY_IOKIT"
2755 return "VM_KERN_MEMORY_LIBKERN"
2757 return "VM_KERN_MEMORY_OSKEXT"
2759 return "VM_KERN_MEMORY_KEXT"
2761 return "VM_KERN_MEMORY_IPC"
2763 return "VM_KERN_MEMORY_STACK"
2765 return "VM_KERN_MEMORY_CPU"
2767 return "VM_KERN_MEMORY_PMAP"
2769 return "VM_KERN_MEMORY_PTE"
2771 return "VM_KERN_MEMORY_ZONE"
2773 return "VM_KERN_MEMORY_KALLOC"
2775 return "VM_KERN_MEMORY_COMPRESSOR"
2777 return "VM_KERN_MEMORY_COMPRESSED_DATA"
2779 return "VM_KERN_MEMORY_PHANTOM_CACHE"
2781 return "VM_KERN_MEMORY_WAITQ"
2783 return "VM_KERN_MEMORY_DIAG"
2785 return "VM_KERN_MEMORY_LOG"
2787 return "VM_KERN_MEMORY_FILE"
2789 return "VM_KERN_MEMORY_MBUF"
2791 return "VM_KERN_MEMORY_UBC"
2793 return "VM_KERN_MEMORY_SECURITY"
2795 return "VM_KERN_MEMORY_MLOCK"
2799 @lldb_command("showvmtags", "S")
2800 def showvmtags(cmd_args
=None, cmd_options
={}):
2801 """Routine to print out info about kernel wired page allocations
2803 iterates kernel map and vm objects totaling allocations by tag.
2804 usage: showvmtags -S
2805 also iterates kernel object pages individually - slow.
2808 if "-S" in cmd_options
:
2810 page_size
= unsigned(kern
.globals.page_size
)
2812 for tag
in range(256):
2815 queue_head
= kern
.globals.vm_objects_wired
2816 for object in IterateQueue(queue_head
, 'struct vm_object *', 'objq'):
2817 if object != kern
.globals.kernel_object
:
2818 CountWiredObject(object, tagcounts
)
2820 queue_head
= kern
.globals.purgeable_nonvolatile_queue
2821 for object in IterateQueue(queue_head
, 'struct vm_object *', 'objq'):
2822 CountWiredObject(object, tagcounts
)
2824 purgeable_queues
= kern
.globals.purgeable_queues
2825 CountWiredPurgeableQueue(purgeable_queues
[0], tagcounts
)
2826 CountWiredPurgeableQueue(purgeable_queues
[1], tagcounts
)
2827 CountWiredPurgeableQueue(purgeable_queues
[2], tagcounts
)
2829 CountMapTags(kern
.globals.kernel_map
, tagcounts
, slow
)
2832 print " {:<8s} {:>7s} {:<50s}".format("tag.kmod","size","name")
2833 for tag
in range(256):
2835 total
+= tagcounts
[tag
]
2839 sitestr
= GetVMKernName(tag
)
2841 site
= kern
.globals.vm_allocation_sites
[tag
]
2844 xsite
= Cast(site
,'OSKextAccount *')
2845 tagstr
= ".{:<3d}".format(xsite
.loadTag
)
2846 sitestr
= GetKmodIDName(xsite
.loadTag
)
2848 sitestr
= kern
.Symbolicate(site
)
2849 print " {:>3d}{:<4s} {:>7d}K {:<50s}".format(tag
,tagstr
,tagcounts
[tag
]*page_size
/ 1024,sitestr
)
2850 print "Total: {:>7d}K".format(total
*page_size
/ 1024)
2854 def FindVMEntriesForVnode(task
, vn
):
2855 """ returns an array of vme that have the vnode set to defined vnode
2856 each entry in array is of format (vme, start_addr, end_address, protection)
2861 pager_ops_addr
= unsigned(addressof(kern
.globals.vnode_pager_ops
))
2862 debuglog("pager_ops_addr %s" % hex(pager_ops_addr
))
2864 if unsigned(pmap
) == 0:
2866 vme_list_head
= vmmap
.hdr
.links
2867 vme_ptr_type
= gettype('vm_map_entry *')
2868 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, 'links'):
2870 if unsigned(vme
.is_sub_map
) == 0 and unsigned(vme
.vme_object
.vmo_object
) != 0:
2871 obj
= vme
.vme_object
.vmo_object
2880 vn_pager
= Cast(obj
.pager
, 'vnode_pager *')
2881 if unsigned(vn_pager
.pager_ops
) == pager_ops_addr
and unsigned(vn_pager
.vnode_handle
) == unsigned(vn
):
2882 retval
.append((vme
, unsigned(vme
.links
.start
), unsigned(vme
.links
.end
), unsigned(vme
.protection
)))
2886 @lldb_command('showtaskloadinfo')
2887 def ShowTaskLoadInfo(cmd_args
=None, cmd_options
={}):
2888 """ Print the load address and uuid for the process
2889 Usage: (lldb)showtaskloadinfo <task_t>
2892 raise ArgumentError("Insufficient arguments")
2893 t
= kern
.GetValueFromAddress(cmd_args
[0], 'struct task *')
2894 print_format
= "0x{0:x} - 0x{1:x} {2: <50s} (??? - ???) <{3: <36s}> {4: <50s}"
2895 p
= Cast(t
.bsd_info
, 'struct proc *')
2897 uuid_out_string
= "{a[0]:02X}{a[1]:02X}{a[2]:02X}{a[3]:02X}-{a[4]:02X}{a[5]:02X}-{a[6]:02X}{a[7]:02X}-{a[8]:02X}{a[9]:02X}-{a[10]:02X}{a[11]:02X}{a[12]:02X}{a[13]:02X}{a[14]:02X}{a[15]:02X}".format(a
=uuid
)
2898 filepath
= GetVnodePath(p
.p_textvp
)
2899 libname
= filepath
.split('/')[-1]
2900 #print "uuid: %s file: %s" % (uuid_out_string, filepath)
2901 mappings
= FindVMEntriesForVnode(t
, p
.p_textvp
)
2908 #print "Load address: %s" % hex(m[1])
2909 print print_format
.format(load_addr
, end_addr
, libname
, uuid_out_string
, filepath
)
2912 @header("{0: <20s} {1: <20s} {2: <20s}".format("vm_page_t", "offset", "object"))
2913 @lldb_command('vmpagelookup')
2914 def VMPageLookup(cmd_args
=None):
2915 """ Print the pages in the page bucket corresponding to the provided object and offset.
2916 Usage: (lldb)vmpagelookup <vm_object_t> <vm_offset_t>
2918 if cmd_args
== None or len(cmd_args
) < 2:
2919 raise ArgumentError("Please specify an object and offset.")
2920 format_string
= "{0: <#020x} {1: <#020x} {2: <#020x}\n"
2922 obj
= kern
.GetValueFromAddress(cmd_args
[0],'unsigned long long')
2923 off
= kern
.GetValueFromAddress(cmd_args
[1],'unsigned long long')
2925 hash_id
= _calc_vm_page_hash(obj
, off
)
2927 page_list
= kern
.globals.vm_page_buckets
[hash_id
].page_list
2928 print("hash_id: 0x%x page_list: 0x%x\n" % (unsigned(hash_id
), unsigned(page_list
)))
2930 print VMPageLookup
.header
2931 page
= _vm_page_unpack_ptr(page_list
)
2933 pg_t
= kern
.GetValueFromAddress(page
, 'vm_page_t')
2934 print format_string
.format(page
, pg_t
.offset
, _vm_page_unpack_ptr(pg_t
.vm_page_object
))
2935 page
= _vm_page_unpack_ptr(pg_t
.next_m
)
2939 @lldb_command('vmpage_get_phys_page')
2940 def VmPageGetPhysPage(cmd_args
=None):
2941 """ return the physical page for a vm_page_t
2942 usage: vm_page_get_phys_page <vm_page_t>
2944 if cmd_args
== None or len(cmd_args
) < 1:
2945 print "Please provide valid vm_page_t. Type help vm_page_get_phys_page for help."
2948 page
= kern
.GetValueFromAddress(cmd_args
[0], 'vm_page_t')
2949 phys_page
= _vm_page_get_phys_page(page
)
2950 print("phys_page = 0x%x\n" % phys_page
)
2953 def _vm_page_get_phys_page(page
):
2954 if kern
.arch
== 'x86_64':
2955 return page
.phys_page
2961 if m
>= unsigned(kern
.globals.vm_page_array_beginning_addr
) and m
< unsigned(kern
.globals.vm_page_array_ending_addr
) :
2962 return (m
- unsigned(kern
.globals.vm_page_array_beginning_addr
)) / sizeof('struct vm_page') + unsigned(kern
.globals.vm_first_phys_ppnum
)
2964 page_with_ppnum
= Cast(page
, 'uint32_t *')
2965 ppnum_offset
= sizeof('struct vm_page') / sizeof('uint32_t')
2966 return page_with_ppnum
[ppnum_offset
]
2969 @lldb_command('vmpage_unpack_ptr')
2970 def VmPageUnpackPtr(cmd_args
=None):
2971 """ unpack a pointer
2972 usage: vm_page_unpack_ptr <packed_ptr>
2974 if cmd_args
== None or len(cmd_args
) < 1:
2975 print "Please provide valid packed pointer argument. Type help vm_page_unpack_ptr for help."
2978 packed
= kern
.GetValueFromAddress(cmd_args
[0],'unsigned long')
2979 unpacked
= _vm_page_unpack_ptr(packed
)
2980 print("unpacked pointer = 0x%x\n" % unpacked
)
2983 def _vm_page_unpack_ptr(page
):
2984 if kern
.ptrsize
== 4 :
2990 min_addr
= kern
.globals.vm_min_kernel_and_kext_address
2991 ptr_shift
= kern
.globals.vm_packed_pointer_shift
2992 ptr_mask
= kern
.globals.vm_packed_from_vm_pages_array_mask
2993 #INTEL - min_addr = 0xffffff7f80000000
2994 #ARM - min_addr = 0x80000000
2995 #ARM64 - min_addr = 0xffffff8000000000
2996 if unsigned(page
) & unsigned(ptr_mask
) :
2997 masked_page
= (unsigned(page
) & ~ptr_mask
)
2998 return (unsigned(addressof(kern
.globals.vm_pages
[masked_page
])))
2999 return ((unsigned(page
) << unsigned(ptr_shift
)) + unsigned(min_addr
))
3001 @lldb_command('calcvmpagehash')
3002 def CalcVMPageHash(cmd_args
=None):
3003 """ Get the page bucket corresponding to the provided object and offset.
3004 Usage: (lldb)calcvmpagehash <vm_object_t> <vm_offset_t>
3006 if cmd_args
== None or len(cmd_args
) < 2:
3007 raise ArgumentError("Please specify an object and offset.")
3009 obj
= kern
.GetValueFromAddress(cmd_args
[0],'unsigned long long')
3010 off
= kern
.GetValueFromAddress(cmd_args
[1],'unsigned long long')
3012 hash_id
= _calc_vm_page_hash(obj
, off
)
3014 print("hash_id: 0x%x page_list: 0x%x\n" % (unsigned(hash_id
), unsigned(kern
.globals.vm_page_buckets
[hash_id
].page_list
)))
3017 def _calc_vm_page_hash(obj
, off
):
3018 bucket_hash
= (int) (kern
.globals.vm_page_bucket_hash
)
3019 hash_mask
= (int) (kern
.globals.vm_page_hash_mask
)
3021 one
= (obj
* bucket_hash
) & 0xFFFFFFFF
3022 two
= off
>> unsigned(kern
.globals.page_shift
)
3023 three
= two ^ bucket_hash
3025 hash_id
= four
& hash_mask
3029 VM_PAGE_IS_WIRED
= 1
3031 @header("{0: <10s} of {1: <10s} {2: <20s} {3: <20s} {4: <20s} {5: <10s} {6: <5s}\t {7: <28s}\t{8: <50s}".format("index", "total", "vm_page_t", "offset", "next", "phys_page", "wire#", "first bitfield", "second bitfield"))
3032 @lldb_command('vmobjectwalkpages', 'SBNQP:')
3033 def VMObjectWalkPages(cmd_args
=None, cmd_options
={}):
3034 """ Print the resident pages contained in the provided object. If a vm_page_t is provided as well, we
3035 specifically look for this page, highlighting it in the output or noting if it was not found. For
3036 each page, we confirm that it points to the object. We also keep track of the number of pages we
3037 see and compare this to the object's resident page count field.
3039 vmobjectwalkpages <vm_object_t> : Walk and print all the pages for a given object (up to 4K pages by default)
3040 vmobjectwalkpages <vm_object_t> -B : Walk and print all the pages for a given object (up to 4K pages by default), traversing the memq backwards
3041 vmobjectwalkpages <vm_object_t> -N : Walk and print all the pages for a given object, ignore the page limit
3042 vmobjectwalkpages <vm_object_t> -Q : Walk all pages for a given object, looking for known signs of corruption (i.e. q_state == VM_PAGE_IS_WIRED && wire_count == 0)
3043 vmobjectwalkpages <vm_object_t> -P <vm_page_t> : Walk all the pages for a given object, annotate the specified page in the output with ***
3044 vmobjectwalkpages <vm_object_t> -P <vm_page_t> -S : Walk all the pages for a given object, stopping when we find the specified page
3048 if (cmd_args
== None or len(cmd_args
) < 1):
3049 raise ArgumentError("Please specify at minimum a vm_object_t and optionally a vm_page_t")
3053 obj
= kern
.GetValueFromAddress(cmd_args
[0], 'vm_object_t')
3056 if "-P" in cmd_options
:
3057 page
= kern
.GetValueFromAddress(cmd_options
['-P'], 'vm_page_t')
3060 if "-S" in cmd_options
:
3062 raise ArgumentError("-S can only be passed when a page is specified with -P")
3065 walk_backwards
= False
3066 if "-B" in cmd_options
:
3067 walk_backwards
= True
3070 if "-Q" in cmd_options
:
3074 print VMObjectWalkPages
.header
3075 format_string
= "{0: <#10d} of {1: <#10d} {2: <#020x} {3: <#020x} {4: <#020x} {5: <#010x} {6: <#05d}\t"
3076 first_bitfield_format_string
= "{0: <#2d}:{1: <#1d}:{2: <#1d}:{3: <#1d}:{4: <#1d}:{5: <#1d}:{6: <#1d}:{7: <#1d}\t"
3077 second_bitfield_format_string
= "{0: <#1d}:{1: <#1d}:{2: <#1d}:{3: <#1d}:{4: <#1d}:{5: <#1d}:{6: <#1d}:"
3078 second_bitfield_format_string
+= "{7: <#1d}:{8: <#1d}:{9: <#1d}:{10: <#1d}:{11: <#1d}:{12: <#1d}:"
3079 second_bitfield_format_string
+= "{13: <#1d}:{14: <#1d}:{15: <#1d}:{16: <#1d}:{17: <#1d}:{18: <#1d}:{19: <#1d}:"
3080 second_bitfield_format_string
+= "{20: <#1d}:{21: <#1d}:{22: <#1d}:{23: <#1d}:{24: <#1d}:{25: <#1d}:{26: <#1d}\n"
3082 limit
= 4096 #arbitrary limit of number of pages to walk
3084 if "-N" in cmd_options
:
3088 res_page_count
= unsigned(obj
.resident_page_count
)
3092 for vmp
in IterateQueue(obj
.memq
, "vm_page_t", "listq", walk_backwards
, unpack_ptr_fn
=_vm_page_unpack_ptr
):
3095 if (page
!= 0 and not(page_found
) and vmp
== page
):
3096 out_string
+= "******"
3099 if page
!= 0 or quiet_mode
:
3100 if (page_count
% 1000) == 0:
3101 print "traversed %d pages ...\n" % (page_count
)
3103 out_string
+= format_string
.format(page_count
, res_page_count
, vmp
, vmp
.offset
, _vm_page_unpack_ptr(vmp
.listq
.next
), _vm_page_get_phys_page(vmp
), vmp
.wire_count
)
3104 out_string
+= first_bitfield_format_string
.format(vmp
.vm_page_q_state
, vmp
.vm_page_in_background
, vmp
.vm_page_on_backgroundq
, vmp
.gobbled
, vmp
.laundry
, vmp
.no_cache
,
3105 vmp
.private
, vmp
.reference
)
3107 out_string
+= second_bitfield_format_string
.format(vmp
.busy
, vmp
.wanted
, vmp
.tabled
, vmp
.hashed
, vmp
.fictitious
, vmp
.clustered
,
3108 vmp
.pmapped
, vmp
.xpmapped
, vmp
.wpmapped
, vmp
.free_when_done
, vmp
.absent
,
3109 vmp
.error
, vmp
.dirty
, vmp
.cleaning
, vmp
.precious
, vmp
.overwriting
,
3110 vmp
.restart
, vmp
.unusual
, vmp
.encrypted
, vmp
.encrypted_cleaning
,
3111 vmp
.cs_validated
, vmp
.cs_tainted
, vmp
.cs_nx
, vmp
.reusable
, vmp
.lopage
, vmp
.slid
,
3112 vmp
.written_by_kernel
)
3114 if (vmp
in pages_seen
):
3115 print out_string
+ "cycle detected! we've seen vm_page_t: " + "{0: <#020x}".format(unsigned(vmp
)) + " twice. stopping...\n"
3118 if (_vm_page_unpack_ptr(vmp
.vm_page_object
) != unsigned(obj
)):
3119 print out_string
+ " vm_page_t: " + "{0: <#020x}".format(unsigned(vmp
)) + " points to different vm_object_t: " + "{0: <#020x}".format(unsigned(_vm_page_unpack_ptr(vmp
.vm_page_object
)))
3122 if (vmp
.vm_page_q_state
== VM_PAGE_IS_WIRED
) and (vmp
.wire_count
== 0):
3123 print out_string
+ " page in wired state with wire_count of 0\n"
3124 print "vm_page_t: " + "{0: <#020x}".format(unsigned(vmp
)) + "\n"
3125 print "stopping...\n"
3128 if ((vmp
.__unused
_pageq
_bits
!= 0) or (vmp
.__unused
_object
_bits
!= 0)):
3129 print out_string
+ " unused bits not zero for vm_page_t: " + "{0: <#020x}".format(unsigned(vmp
)) + " unused__pageq_bits: %d unused_object_bits : %d\n" % (vmp
.__unused
_pageq
_bits
,
3130 vmp
.__unused
_object
_bits
)
3131 print "stopping...\n"
3137 hash_id
= _calc_vm_page_hash(obj
, vmp
.offset
)
3138 hash_page_list
= kern
.globals.vm_page_buckets
[hash_id
].page_list
3139 hash_page
= _vm_page_unpack_ptr(hash_page_list
)
3142 while (hash_page
!= 0):
3143 hash_page_t
= kern
.GetValueFromAddress(hash_page
, 'vm_page_t')
3144 if hash_page_t
== vmp
:
3146 hash_page
= _vm_page_unpack_ptr(hash_page_t
.next_m
)
3148 if (unsigned(vmp
) != unsigned(hash_page_t
)):
3149 print out_string
+ "unable to find page: " + "{0: <#020x}".format(unsigned(vmp
)) + " from object in kernel page bucket list\n"
3150 print lldb_run_command("vm_page_info %s 0x%x" % (cmd_args
[0], unsigned(vmp
.offset
)))
3153 if (page_count
>= limit
and not(ignore_limit
)):
3154 print out_string
+ "Limit reached (%d pages), stopping..." % (limit
)
3159 if page_found
and stop
:
3160 print("Object reports resident page count of: %d we stopped after traversing %d and finding the requested page.\n" % (unsigned(obj
.res_page_count
), unsigned(page_count
)))
3164 print("page found? : %s\n" % page_found
)
3166 print("Object reports resident page count of %d, we saw %d pages when we walked the resident list.\n" % (unsigned(obj
.resident_page_count
), unsigned(page_count
)))
3169 @lldb_command("show_all_apple_protect_pagers")
3170 def ShowAllAppleProtectPagers(cmd_args
=None):
3171 """Routine to print all apple_protect pagers
3172 usage: show_all_apple_protect_pagers
3174 print "{:>3s} {:<3s} {:<18s} {:>5s} {:>5s} {:>6s} {:<18s} {:<18s} {:<18s} {:<18s} {:<18s} {:<18s}\n".format("#", "#", "pager", "refs", "ready", "mapped", "mo_control", "object", "offset", "crypto_offset", "crypto_start", "crypto_end")
3175 qhead
= kern
.globals.apple_protect_pager_queue
3176 qtype
= GetType('apple_protect_pager *')
3177 qcnt
= kern
.globals.apple_protect_pager_count
3179 for pager
in IterateQueue(qhead
, qtype
, "pager_queue"):
3181 show_apple_protect_pager(pager
, qcnt
, idx
)
3183 @lldb_command("show_apple_protect_pager")
3184 def ShowAppleProtectPager(cmd_args
=None):
3185 """Routine to print out info about an apple_protect pager
3186 usage: show_apple_protect_pager <pager>
3188 if cmd_args
== None or len(cmd_args
) < 1:
3189 print "Invalid argument.", ShowMap
.__doc
__
3191 pager
= kern
.GetValueFromAddress(cmd_ars
[0], 'apple_protect_pager_t')
3192 show_apple_protect_pager(pager
, 1, 1)
3194 def show_apple_protect_pager(pager
, qcnt
, idx
):
3195 object = pager
.backing_object
3196 shadow
= object.shadow
3199 shadow
= object.shadow
3200 vnode_pager
= Cast(object.pager
,'vnode_pager *')
3201 filename
= GetVnodePath(vnode_pager
.vnode_handle
)
3202 print "{:>3}/{:<3d} {:#018x} {:>5d} {:>5d} {:>6d} {:#018x} {:#018x} {:#018x} {:#018x} {:#018x} {:#018x}\n\tcrypt_info:{:#018x} <decrypt:{:#018x} end:{:#018x} ops:{:#018x} refs:{:<d}>\n\tvnode:{:#018x} {:s}\n".format(idx
, qcnt
, pager
, pager
.ref_count
, pager
.is_ready
, pager
.is_mapped
, pager
.pager_control
, pager
.backing_object
, pager
.backing_offset
, pager
.crypto_backing_offset
, pager
.crypto_start
, pager
.crypto_end
, pager
.crypt_info
, pager
.crypt_info
.page_decrypt
, pager
.crypt_info
.crypt_end
, pager
.crypt_info
.crypt_ops
, pager
.crypt_info
.crypt_refcnt
, vnode_pager
.vnode_handle
, filename
)
3204 @lldb_command("show_console_ring")
3205 def ShowConsoleRingData(cmd_args
=None):
3206 """ Print console ring buffer stats and data
3208 cr
= kern
.globals.console_ring
3209 print "console_ring = {:#018x} buffer = {:#018x} length = {:<5d} used = {:<5d} read_ptr = {:#018x} write_ptr = {:#018x}".format(addressof(cr
), cr
.buffer, cr
.len, cr
.used
, cr
.read_ptr
, cr
.write_ptr
)
3211 for i
in range(unsigned(cr
.used
)):
3212 idx
= ((unsigned(cr
.read_ptr
) - unsigned(cr
.buffer)) + i
) % unsigned(cr
.len)
3213 pending_data
.append("{:c}".format(cr
.buffer[idx
]))
3217 print "".join(pending_data
)
3219 # Macro: showjetsamsnapshot
3221 @lldb_command("showjetsamsnapshot", "DA")
3222 def ShowJetsamSnapshot(cmd_args
=None, cmd_options
={}):
3223 """ Dump entries in the jetsam snapshot table
3224 usage: showjetsamsnapshot [-D] [-A]
3225 Use -D flag to print extra physfootprint details
3226 Use -A flag to print all entries (regardless of valid count)
3229 # Not shown are uuid, user_data, cpu_time
3232 if kern
.arch
== 'x86_64':
3233 print "Snapshots are not supported.\n"
3236 show_footprint_details
= False
3237 show_all_entries
= False
3239 if "-D" in cmd_options
:
3240 show_footprint_details
= True
3242 if "-A" in cmd_options
:
3243 show_all_entries
= True
3245 valid_count
= kern
.globals.memorystatus_jetsam_snapshot_count
3246 max_count
= kern
.globals.memorystatus_jetsam_snapshot_max
3248 if (show_all_entries
== True):
3253 print "{:s}".format(valid_count
)
3254 print "{:s}".format(max_count
)
3257 print "The jetsam snapshot is empty."
3258 print "Use -A to force dump all entries (regardless of valid count)"
3261 # Dumps the snapshot header info
3262 print lldb_run_command('p *memorystatus_jetsam_snapshot')
3264 hdr_format
= "{0: >32s} {1: >5s} {2: >4s} {3: >6s} {4: >6s} {5: >20s} {6: >20s} {7: >20s} {8: >5s} {9: >10s} {10: >6s} {11: >6s} {12: >10s} {13: >15s} {14: >15s} {15: >15s} {16: >15s}"
3265 if (show_footprint_details
== True):
3266 hdr_format
+= "{17: >15s} {18: >15s} {19: >12s} {20: >12s} {21: >17s} {22: >10s} {23: >13s} {24: >10s}"
3269 if (show_footprint_details
== False):
3270 print hdr_format
.format('command', 'index', 'pri', 'cid', 'pid', 'starttime', 'killtime', 'idletime', 'kill', '#ents', 'fds', 'gen', 'state', 'footprint', 'max', 'purgeable', 'lifetimeMax')
3271 print hdr_format
.format('', '', '', '', '', '(abs)', '(abs)', '(abs)', 'cause', '', '', 'Count', '', '(pages)', '(pages)', '(pages)', '(pages)')
3273 print hdr_format
.format('command', 'index', 'pri', 'cid', 'pid', 'starttime', 'killtime', 'idletime', 'kill', '#ents', 'fds', 'gen', 'state', 'footprint', 'max', 'purgeable', 'lifetimeMax', '|| internal', 'internal_comp', 'iokit_mapped', 'purge_nonvol', 'purge_nonvol_comp', 'alt_acct', 'alt_acct_comp', 'page_table')
3274 print hdr_format
.format('', '', '', '', '', '(abs)', '(abs)', '(abs)', 'cause', '', '', 'Count', '', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)')
3277 entry_format
= "{e.name: >32s} {index: >5d} {e.priority: >4d} {e.jse_coalition_jetsam_id: >6d} {e.pid: >6d} "\
3278 "{e.jse_starttime: >20d} {e.jse_killtime: >20d} "\
3279 "{e.jse_idle_delta: >20d} {e.killed: >5d} {e.jse_memory_region_count: >10d} "\
3280 "{e.fds: >6d} {e.jse_gencount: >6d} {e.state: >10x} {e.pages: >15d} {e.max_pages: >15d} "\
3281 "{e.purgeable_pages: >15d} {e.max_pages_lifetime: >15d}"
3283 if (show_footprint_details
== True):
3284 entry_format
+= "{e.jse_internal_pages: >15d} "\
3285 "{e.jse_internal_compressed_pages: >15d} "\
3286 "{e.jse_iokit_mapped_pages: >12d} "\
3287 "{e.jse_purgeable_nonvolatile_pages: >12d} "\
3288 "{e.jse_purgeable_nonvolatile_compressed_pages: >17d} "\
3289 "{e.jse_alternate_accounting_pages: >10d} "\
3290 "{e.jse_alternate_accounting_compressed_pages: >13d} "\
3291 "{e.jse_page_table_pages: >10d}"
3293 snapshot_list
= kern
.globals.memorystatus_jetsam_snapshot
.entries
3296 current_entry
= Cast(snapshot_list
[idx
], 'jetsam_snapshot_entry')
3297 print entry_format
.format(index
=idx
, e
=current_entry
)
3301 # EndMacro: showjetsamsnapshot