2 """ Please make sure you read the README file COMPLETELY BEFORE reading anything below.
3 It is very critical that you read coding guidelines in Section E in README file.
13 @lldb_command('memstats')
14 def Memstats(cmd_args
=None):
15 """ Prints out a summary of various memory statistics. In particular vm_page_wire_count should be greater than 2K or you are under memory pressure.
18 print "memorystatus_level: {: >10d}".format(kern
.globals.memorystatus_level
)
22 print "memorystatus_available_pages: {: >10d}".format(kern
.globals.memorystatus_available_pages
)
25 print "vm_page_throttled_count: {: >10d}".format(kern
.globals.vm_page_throttled_count
)
26 print "vm_page_active_count: {: >10d}".format(kern
.globals.vm_page_active_count
)
27 print "vm_page_inactive_count: {: >10d}".format(kern
.globals.vm_page_inactive_count
)
28 print "vm_page_wire_count: {: >10d}".format(kern
.globals.vm_page_wire_count
)
29 print "vm_page_free_count: {: >10d}".format(kern
.globals.vm_page_free_count
)
30 print "vm_page_purgeable_count: {: >10d}".format(kern
.globals.vm_page_purgeable_count
)
31 print "vm_page_inactive_target: {: >10d}".format(kern
.globals.vm_page_inactive_target
)
32 print "vm_page_free_target: {: >10d}".format(kern
.globals.vm_page_free_target
)
33 print "inuse_ptepages_count: {: >10d}".format(kern
.globals.inuse_ptepages_count
)
34 print "vm_page_free_reserved: {: >10d}".format(kern
.globals.vm_page_free_reserved
)
36 @xnudebug_test('test_memstats')
37 def TestMemstats(kernel_target
, config
, lldb_obj
, isConnected
):
38 """ Test the functionality of memstats command
44 print "Target is not connected. Cannot test memstats"
46 res
= lldb
.SBCommandReturnObject()
47 lldb_obj
.debugger
.GetCommandInterpreter().HandleCommand("memstats", res
)
48 result
= res
.GetOutput()
49 if result
.split(":")[1].strip().find('None') == -1 :
56 # Macro: showmemorystatus
57 def CalculateLedgerPeak(phys_footprint_entry
):
58 """ Internal function to calculate ledger peak value for the given phys footprint entry
59 params: phys_footprint_entry - value representing struct ledger_entry *
60 return: value - representing the ledger peak for the given phys footprint entry
62 now
= kern
.globals.sched_tick
/ 20
63 ledger_peak
= phys_footprint_entry
.le_credit
- phys_footprint_entry
.le_debit
64 if (now
- phys_footprint_entry
._le
.le_peaks
[0].le_time
<= 1) and (phys_footprint_entry
._le
.le_peaks
[0].le_max
> ledger_peak
):
65 ledger_peak
= phys_footprint_entry
._le
.le_peaks
[0].le_max
66 if (now
- phys_footprint_entry
._le
.le_peaks
[1].le_time
<= 1) and (phys_footprint_entry
._le
.le_peaks
[1].le_max
> ledger_peak
):
67 ledger_peak
= phys_footprint_entry
._le
.le_peaks
[1].le_max
70 @header("{: >8s} {: >22s} {: >22s} {: >11s} {: >11s} {: >12s} {: >10s} {: >13s} {: ^10s} {: >8s} {: <20s}\n".format(
71 'pid', 'effective priority', 'requested priority', 'state', 'user_data', 'physical', 'iokit', 'footprint',
72 'spike', 'limit', 'command'))
73 def GetMemoryStatusNode(proc_val
):
74 """ Internal function to get memorystatus information from the given proc
75 params: proc - value representing struct proc *
76 return: str - formatted output information for proc object
79 task_val
= Cast(proc_val
.task
, 'task *')
80 task_ledgerp
= task_val
.ledger
82 task_physmem_footprint_ledger_entry
= task_ledgerp
.l_entries
[kern
.globals.task_ledgers
.phys_mem
]
83 task_iokit_footprint_ledger_entry
= task_ledgerp
.l_entries
[kern
.globals.task_ledgers
.iokit_mapped
]
84 task_phys_footprint_ledger_entry
= task_ledgerp
.l_entries
[kern
.globals.task_ledgers
.phys_footprint
]
85 page_size
= kern
.globals.page_size
87 phys_mem_footprint
= (task_physmem_footprint_ledger_entry
.le_credit
- task_physmem_footprint_ledger_entry
.le_debit
) / page_size
88 iokit_footprint
= (task_iokit_footprint_ledger_entry
.le_credit
- task_iokit_footprint_ledger_entry
.le_debit
) / page_size
89 phys_footprint
= (task_phys_footprint_ledger_entry
.le_credit
- task_phys_footprint_ledger_entry
.le_debit
) / page_size
90 phys_footprint_limit
= task_phys_footprint_ledger_entry
.le_limit
/ page_size
91 ledger_peak
= CalculateLedgerPeak(task_phys_footprint_ledger_entry
)
92 phys_footprint_spike
= ledger_peak
/ page_size
94 format_string
= '{0: >8d} {1: >22d} {2: >22d} {3: #011x} {4: #011x} {5: >12d} {6: >10d} {7: >13d}'
95 out_str
+= format_string
.format(proc_val
.p_pid
, proc_val
.p_memstat_effectivepriority
,
96 proc_val
.p_memstat_requestedpriority
, proc_val
.p_memstat_state
, proc_val
.p_memstat_userdata
,
97 phys_mem_footprint
, iokit_footprint
, phys_footprint
)
98 if phys_footprint
!= phys_footprint_spike
:
99 out_str
+= "{: ^12d}".format(phys_footprint_spike
)
101 out_str
+= "{: ^12s}".format('-')
102 out_str
+= "{: 8d} {: <20s}\n".format(phys_footprint_limit
, proc_val
.p_comm
)
105 @lldb_command('showmemorystatus')
106 def ShowMemoryStatus(cmd_args
=None):
107 """ Routine to display each entry in jetsam list with a summary of pressure statistics
108 Usage: showmemorystatus
112 print GetMemoryStatusNode
.header
113 print "{: >91s} {: >10s} {: >13s} {: ^10s} {: >8s}\n".format("(pages)", "(pages)", "(pages)",
114 "(pages)", "(pages)")
115 while bucket_index
< bucket_count
:
116 current_bucket
= kern
.globals.memstat_bucket
[bucket_index
]
117 current_list
= current_bucket
.list
118 current_proc
= Cast(current_list
.tqh_first
, 'proc *')
119 while unsigned(current_proc
) != 0:
120 print GetMemoryStatusNode(current_proc
)
121 current_proc
= current_proc
.p_memstat_list
.tqe_next
126 # EndMacro: showmemorystatus
130 @lldb_type_summary(['zone','zone_t'])
131 @header("{:^18s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}({:>6s} {:>6s} {:>6s}) {:^15s} {:<20s}".format(
132 'ZONE', 'TOT_SZ', 'PAGE_COUNT', 'ALLOC_ELTS', 'FREE_ELTS', 'FREE_SZ', 'ELT_SZ', 'ALLOC', 'ELTS', 'PGS', 'WASTE', 'FLAGS', 'NAME'))
133 def GetZoneSummary(zone
):
134 """ Summarize a zone with important information. See help zprint for description of each field
136 zone: value - obj representing a zone in kernel
138 str - summary of the zone
141 format_string
= '{:#018x} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:10d} {:6d} {:6d} {:6d} {markings} {name:s} '
142 pagesize
= kern
.globals.page_size
144 free_elements
= zone
.countfree
145 free_size
= free_elements
* zone
.elem_size
147 alloc_pages
= zone
.alloc_size
/ pagesize
148 if zone
.use_page_list
:
149 metadata_size
= sizeof('struct zone_page_metadata')
150 metadata_offset
= metadata_size
151 if ((metadata_size
% zone
.elem_size
) != 0) :
152 metadata_offset
+= zone
.elem_size
- (metadata_size
% zone
.elem_size
)
153 alloc_count
= ((pagesize
- metadata_offset
) / zone
.elem_size
) * alloc_pages
154 alloc_waste
= metadata_offset
* alloc_pages
156 alloc_count
= zone
.alloc_size
/ zone
.elem_size
157 alloc_waste
= zone
.alloc_size
% zone
.elem_size
160 ["collectable", "C"],
163 ["caller_acct", "@"],
164 ["exhaustible", "H"],
165 ["allows_foreign", "F"],
166 ["async_prio_refill", "R"],
169 ["doing_alloc_without_vm_priv", "A"],
170 ["doing_alloc_with_vm_priv", "S"],
173 ["use_page_list", "P"]
175 if kern
.arch
== 'x86_64':
176 marks
.append(["gzalloc_exempt", "M"])
177 marks
.append(["alignment_required", "N"])
181 if zone
.__getattr
__(mark
[0]) :
185 out_string
+= format_string
.format(zone
, zone
.cur_size
, zone
.page_count
,
186 zone
.count
, free_elements
, free_size
,
187 zone
.elem_size
, zone
.alloc_size
, alloc_count
,
188 alloc_pages
, alloc_waste
, name
= zone
.zone_name
, markings
=markings
)
190 if zone
.exhaustible
:
191 out_string
+= "(max: {:d})".format(zone
.max_size
)
195 @lldb_command('zprint')
196 def Zprint(cmd_args
=None):
197 """ Routine to print a summary listing of all the kernel zones
198 All columns are printed in decimal
202 $ - not encrypted during hibernation
203 @ - allocs and frees are accounted to caller process for KPRVT
205 F - allows foreign memory (memory not allocated from zone_map)
206 M - gzalloc will avoid monitoring this zone
207 R - will be refilled when below low water mark
208 O - does not allow refill callout to fill zone on noblock allocation
209 N - zone requires alignment (avoids padding this zone for debugging)
210 A - currently trying to allocate more backing memory from kernel_memory_allocate without VM priv
211 S - currently trying to allocate more backing memory from kernel_memory_allocate with VM priv
212 W - another thread is waiting for more memory
213 L - zone is being monitored by zleaks
214 G - currently running GC
215 P - uses zone_page_metadata
218 print GetZoneSummary
.header
219 for zval
in kern
.zones
:
220 print GetZoneSummary(zval
)
222 @xnudebug_test('test_zprint')
223 def TestZprint(kernel_target
, config
, lldb_obj
, isConnected
):
224 """ Test the functionality of zprint command
230 print "Target is not connected. Cannot test memstats"
232 res
= lldb
.SBCommandReturnObject()
233 lldb_obj
.debugger
.GetCommandInterpreter().HandleCommand("zprint", res
)
234 result
= res
.GetOutput()
235 if len(result
.split("\n")) > 2:
243 # Macro: showzfreelist
245 def ShowZfreeListHeader(zone
):
246 """ Helper routine to print a header for zone freelist.
247 (Since the freelist does not have a custom type, this is not defined as a Type Summary).
249 zone:zone_t - Zone object to print header info
254 scaled_factor
= (unsigned(kern
.globals.zp_factor
) +
255 (unsigned(zone
.elem_size
) >> unsigned(kern
.globals.zp_scale
)))
258 out_str
+= "{0: <9s} {1: <12s} {2: <18s} {3: <18s} {4: <6s}\n".format('ELEM_SIZE', 'COUNT', 'NCOOKIE', 'PCOOKIE', 'FACTOR')
259 out_str
+= "{0: <9d} {1: <12d} 0x{2:0>16x} 0x{3:0>16x} {4: <2d}/{5: <2d}\n\n".format(
260 zone
.elem_size
, zone
.count
, kern
.globals.zp_nopoison_cookie
, kern
.globals.zp_poisoned_cookie
, zone
.zp_count
, scaled_factor
)
261 out_str
+= "{0: <7s} {1: <18s} {2: <18s} {3: <18s} {4: <18s} {5: <18s} {6: <14s}\n".format(
262 'NUM', 'ELEM', 'NEXT', 'BACKUP', '^ NCOOKIE', '^ PCOOKIE', 'POISON (PREV)')
265 def ShowZfreeListChain(zone
, zfirst
, zlimit
):
266 """ Helper routine to print a zone free list chain
268 zone: zone_t - Zone object
269 zfirst: void * - A pointer to the first element of the free list chain
270 zlimit: int - Limit for the number of elements to be printed by showzfreelist
274 current
= Cast(zfirst
, 'void *')
275 while ShowZfreeList
.elts_found
< zlimit
:
276 ShowZfreeList
.elts_found
+= 1
277 znext
= dereference(Cast(current
, 'vm_offset_t *'))
278 backup_ptr
= kern
.GetValueFromAddress((unsigned(Cast(current
, 'vm_offset_t')) + unsigned(zone
.elem_size
) - sizeof('vm_offset_t')), 'vm_offset_t *')
279 backup_val
= dereference(backup_ptr
)
280 n_unobfuscated
= (unsigned(backup_val
) ^
unsigned(kern
.globals.zp_nopoison_cookie
))
281 p_unobfuscated
= (unsigned(backup_val
) ^
unsigned(kern
.globals.zp_poisoned_cookie
))
283 if p_unobfuscated
== unsigned(znext
):
284 poison_str
= "P ({0: <d})".format(ShowZfreeList
.elts_found
- ShowZfreeList
.last_poisoned
)
285 ShowZfreeList
.last_poisoned
= ShowZfreeList
.elts_found
287 if n_unobfuscated
!= unsigned(znext
):
288 poison_str
= "INVALID"
289 print "{0: <7d} 0x{1:0>16x} 0x{2:0>16x} 0x{3:0>16x} 0x{4:0>16x} 0x{5:0>16x} {6: <14s}\n".format(
290 ShowZfreeList
.elts_found
, unsigned(current
), unsigned(znext
), unsigned(backup_val
), n_unobfuscated
, p_unobfuscated
, poison_str
)
291 if unsigned(znext
) == 0:
293 current
= Cast(znext
, 'void *')
295 @static_var('elts_found',0)
296 @static_var('last_poisoned',0)
297 @lldb_command('showzfreelist')
298 def ShowZfreeList(cmd_args
=None):
299 """ Walk the freelist for a zone, printing out the primary and backup next pointers, the poisoning cookies, and the poisoning status of each element.
300 Usage: showzfreelist <zone> [iterations]
302 Will walk up to 50 elements by default, pass a limit in 'iterations' to override.
305 print ShowZfreeList
.__doc
__
307 ShowZfreeList
.elts_found
= 0
308 ShowZfreeList
.last_poisoned
= 0
310 zone
= kern
.GetValueFromAddress(cmd_args
[0], 'struct zone *')
312 if len(cmd_args
) >= 2:
313 zlimit
= ArgumentStringToInt(cmd_args
[1])
314 ShowZfreeListHeader(zone
)
316 if unsigned(zone
.use_page_list
) == 1:
317 if unsigned(zone
.allows_foreign
) == 1:
318 for free_page_meta
in IterateQueue(zone
.pages
.any_free_foreign
, 'struct zone_page_metadata *', 'pages'):
319 if ShowZfreeList
.elts_found
== zlimit
:
321 zfirst
= Cast(free_page_meta
.elements
, 'void *')
322 if unsigned(zfirst
) != 0:
323 ShowZfreeListChain(zone
, zfirst
, zlimit
)
324 for free_page_meta
in IterateQueue(zone
.pages
.intermediate
, 'struct zone_page_metadata *', 'pages'):
325 if ShowZfreeList
.elts_found
== zlimit
:
327 zfirst
= Cast(free_page_meta
.elements
, 'void *')
328 if unsigned(zfirst
) != 0:
329 ShowZfreeListChain(zone
, zfirst
, zlimit
)
330 for free_page_meta
in IterateQueue(zone
.pages
.all_free
, 'struct zone_page_metadata *', 'pages'):
331 if ShowZfreeList
.elts_found
== zlimit
:
333 zfirst
= Cast(free_page_meta
.elements
, 'void *')
334 if unsigned(zfirst
) != 0:
335 ShowZfreeListChain(zone
, zfirst
, zlimit
)
337 zfirst
= Cast(zone
.free_elements
, 'void *')
338 if unsigned(zfirst
) != 0:
339 ShowZfreeListChain(zone
, zfirst
, zlimit
)
341 if ShowZfreeList
.elts_found
== zlimit
:
342 print "Stopped at {0: <d} elements!".format(zlimit
)
344 print "Found {0: <d} elements!".format(ShowZfreeList
.elts_found
)
346 # EndMacro: showzfreelist
350 @lldb_command('zstack')
351 def Zstack(cmd_args
=None):
352 """ Zone leak debugging: Print the stack trace of log element at <index>. If a <count> is supplied, it prints <count> log elements starting at <index>.
353 Usage: zstack <index> [<count>]
355 The suggested usage is to look at indexes below zcurrent and look for common stack traces.
356 The stack trace that occurs the most is probably the cause of the leak. Find the pc of the
357 function calling into zalloc and use the countpcs command to find out how often that pc occurs in the log.
358 The pc occuring in a high percentage of records is most likely the source of the leak.
360 The findoldest command is also useful for leak debugging since it identifies the oldest record
361 in the log, which may indicate the leaker.
366 if int(kern
.globals.log_records
) == 0:
367 print "Zone logging not enabled. Add 'zlog=<zone name>' to boot-args."
369 if int(kern
.globals.zlog_btlog
) == 0:
370 print "Zone logging enabled, but zone has not been initialized yet."
374 if len(cmd_args
) >= 2:
375 count
= ArgumentStringToInt(cmd_args
[1])
376 zstack_index
= unsigned(cmd_args
[0])
377 while count
and (zstack_index
!= 0xffffff):
378 zstack_record_offset
= zstack_index
* unsigned(kern
.globals.zlog_btlog
.btrecord_size
)
379 zstack_record
= kern
.GetValueFromAddress(unsigned(kern
.globals.zlog_btlog
.btrecords
) + zstack_record_offset
, 'btlog_record_t *')
380 ShowZStackRecord(zstack_record
, zstack_index
)
381 zstack_index
= zstack_record
.next
388 @lldb_command('findoldest')
389 def FindOldest(cmd_args
=None):
390 """ Zone leak debugging: find and print the oldest record in the log.
392 Once it prints a stack trace, find the pc of the caller above all the zalloc, kalloc and
393 IOKit layers. Then use the countpcs command to see how often this caller has allocated
394 memory. A caller with a high percentage of records in the log is probably the leaker.
396 if int(kern
.globals.log_records
) == 0:
397 print FindOldest
.__doc
__
399 if int(kern
.globals.zlog_btlog
) == 0:
400 print "Zone logging enabled, but zone has not been initialized yet."
402 index
= kern
.globals.zlog_btlog
.head
403 if unsigned(index
) != 0xffffff:
404 print "Oldest record is at log index: {0: <d}".format(index
)
407 print "No Records Present"
409 # EndMacro : findoldest
413 @lldb_command('countpcs')
414 def Countpcs(cmd_args
=None):
415 """ Zone leak debugging: search the log and print a count of all log entries that contain the given <pc>
419 This is useful for verifying a suspected <pc> as being the source of
420 the leak. If a high percentage of the log entries contain the given <pc>, then it's most
421 likely the source of the leak. Note that this command can take several minutes to run.
424 print Countpcs
.__doc
__
426 if int(kern
.globals.log_records
) == 0:
427 print "Zone logging not enabled. Add 'zlog=<zone name>' to boot-args."
429 if int(kern
.globals.zlog_btlog
) == 0:
430 print "Zone logging enabled, but zone has not been initialized yet."
433 cpcs_index
= unsigned(kern
.globals.zlog_btlog
.head
)
434 target_pc
= unsigned(kern
.GetValueFromAddress(cmd_args
[0], 'void *'))
436 depth
= unsigned(kern
.globals.zlog_btlog
.btrecord_btdepth
)
438 while cpcs_index
!= 0xffffff:
439 cpcs_record_offset
= cpcs_index
* unsigned(kern
.globals.zlog_btlog
.btrecord_size
)
440 cpcs_record
= kern
.GetValueFromAddress(unsigned(kern
.globals.zlog_btlog
.btrecords
) + cpcs_record_offset
, 'btlog_record_t *')
443 frame_pc
= unsigned(cpcs_record
.bt
[frame
])
444 if frame_pc
== target_pc
:
448 cpcs_index
= cpcs_record
.next
449 print "Occured {0: <d} times in log ({1: <d}{2: <s} of records)".format(found
, (found
* 100)/unsigned(kern
.globals.zlog_btlog
.activecount
), '%')
455 @lldb_command('findelem')
456 def FindElem(cmd_args
=None):
457 """ Zone corruption debugging: search the log and print out the stack traces for all log entries that
458 refer to the given zone element.
459 Usage: findelem <elem addr>
461 When the kernel panics due to a corrupted zone element, get the
462 element address and use this command. This will show you the stack traces of all logged zalloc and
463 zfree operations which tells you who touched the element in the recent past. This also makes
464 double-frees readily apparent.
467 print FindElem
.__doc
__
469 if int(kern
.globals.log_records
) == 0:
470 print "Zone logging not enabled. Add 'zlog=<zone name>' to boot-args."
472 if int(kern
.globals.zlog_btlog
) == 0:
473 print "Zone logging enabled, but zone has not been initialized yet."
476 target_element
= unsigned(kern
.GetValueFromAddress(cmd_args
[0], 'void *'))
477 index
= unsigned(kern
.globals.zlog_btlog
.head
)
480 while index
!= 0xffffff:
481 findelem_record_offset
= index
* unsigned(kern
.globals.zlog_btlog
.btrecord_size
)
482 findelem_record
= kern
.GetValueFromAddress(unsigned(kern
.globals.zlog_btlog
.btrecords
) + findelem_record_offset
, 'btlog_record_t *')
483 if unsigned(findelem_record
.element
) == target_element
:
485 if int(findelem_record
.operation
) == prev_op
:
486 print "{0: <s} DOUBLE OP! {1: <s}".format(('*' * 8), ('*' * 8))
487 prev_op
= int(findelem_record
.operation
)
488 index
= findelem_record
.next
494 @lldb_command('btlog_find', "AS")
495 def BtlogFind(cmd_args
=None, cmd_options
={}):
496 """ Search the btlog_t for entries corresponding to the given element.
497 Use -A flag to print all entries.
498 Use -S flag to summarize the count of records
499 Usage: btlog_find <btlog_t> <element>
500 Usage: btlog_find <btlog_t> -A
501 Note: Backtraces will be in chronological order, with oldest entries aged out in FIFO order as needed.
504 raise ArgumentError("Need a btlog_t parameter")
505 btlog
= kern
.GetValueFromAddress(cmd_args
[0], 'btlog_t *')
509 target_elem
= 0xffffffff
511 if "-A" in cmd_options
:
514 if not printall
and len(cmd_args
) < 2:
515 raise ArgumentError("<element> is missing in args. Need a search pointer.")
516 target_elem
= unsigned(kern
.GetValueFromAddress(cmd_args
[1], 'void *'))
518 if "-S" in cmd_options
:
521 index
= unsigned(btlog
.head
)
523 record_size
= unsigned(btlog
.btrecord_size
)
525 while index
!= 0xffffff:
526 record_offset
= index
* record_size
527 record
= kern
.GetValueFromAddress(unsigned(btlog
.btrecords
) + record_offset
, 'btlog_record_t *')
528 if printall
or unsigned(record
.element
) == target_elem
:
529 _s
= '{0: <s} {2: <#0x} OP {1: <d} {3: <s}'.format(('-' * 8), record
.operation
, unsigned(record
.element
), ('-' * 8))
530 _s
+= GetBtlogBacktrace(btlog
.btrecord_btdepth
, record
)
532 if _s
not in summary_cache
:
533 summary_cache
[_s
] = 1
535 summary_cache
[_s
] += 1
540 if (progress
% 1000) == 0: print '{0: <d} entries searched!\n'.format(progress
)
541 except ValueError, e
:
545 print "=================== SUMMARY =================="
546 for (k
,v
) in summary_cache
.iteritems():
547 print "Count: %d %s \n " % (v
, k
)
550 #EndMacro: btlog_find
554 @lldb_command('showzalloc')
555 def ShowZalloc(cmd_args
=None):
556 """ Prints a zallocation from the zallocations array based off its index and prints the associated symbolicated backtrace.
557 Usage: showzalloc <index>
560 print ShowZalloc
.__doc
__
562 if unsigned(kern
.globals.zallocations
) == 0:
563 print "zallocations array not initialized!"
565 zallocation
= kern
.globals.zallocations
[ArgumentStringToInt(cmd_args
[0])]
567 ShowZTrace([str(int(zallocation
.za_trace_index
))])
569 #EndMacro: showzalloc
573 @lldb_command('showztrace')
574 def ShowZTrace(cmd_args
=None):
575 """ Prints the backtrace from the ztraces array at index
576 Usage: showztrace <trace index>
579 print ShowZTrace
.__doc
__
581 if unsigned(kern
.globals.ztraces
) == 0:
582 print "ztraces array not initialized!"
584 ztrace_addr
= kern
.globals.ztraces
[ArgumentStringToInt(cmd_args
[0])]
586 ShowZstackTraceHelper(ztrace_addr
.zt_stack
, ztrace_addr
.zt_depth
)
588 #EndMacro: showztrace
590 #Macro: showztraceaddr
592 @lldb_command('showztraceaddr')
593 def ShowZTraceAddr(cmd_args
=None):
594 """ Prints the struct ztrace passed in.
595 Usage: showztraceaddr <trace address>
598 print ShowZTraceAddr
.__doc
__
600 ztrace_ptr
= kern
.GetValueFromAddress(cmd_args
[0], 'struct ztrace *')
601 print dereference(ztrace_ptr
)
602 ShowZstackTraceHelper(ztrace_ptr
.zt_stack
, ztrace_ptr
.zt_depth
)
604 #EndMacro: showztraceaddr
606 #Macro: showzstacktrace
608 @lldb_command('showzstacktrace')
609 def ShowZstackTrace(cmd_args
=None):
610 """ Routine to print a stacktrace stored by OSBacktrace.
611 Usage: showzstacktrace <saved stacktrace> [size]
613 size is optional, defaults to 15.
616 print ShowZstackTrace
.__doc
__
618 void_ptr_type
= gettype('void *')
619 void_double_ptr_type
= void_ptr_type
.GetPointerType()
620 trace
= kern
.GetValueFromAddress(cmd_args
[0], void_double_ptr_type
)
622 if len(cmd_args
) >= 2:
623 trace_size
= ArgumentStringToInt(cmd_args
[1])
624 ShowZstackTraceHelper(trace
, trace_size
)
626 #EndMacro: showzstacktrace
628 def ShowZstackTraceHelper(stack
, depth
):
629 """ Helper routine for printing a zstack.
631 stack: void *[] - An array of pointers representing the Zstack
632 depth: int - The depth of the ztrace stack
637 while trace_current
< depth
:
638 trace_addr
= stack
[trace_current
]
639 symbol_arr
= kern
.SymbolicateFromAddress(unsigned(trace_addr
))
641 symbol_str
= str(symbol_arr
[0].addr
)
644 print '{0: <#x} {1: <s}'.format(trace_addr
, symbol_str
)
647 #Macro: showtopztrace
649 @lldb_command('showtopztrace')
650 def ShowTopZtrace(cmd_args
=None):
651 """ Shows the ztrace with the biggest size.
652 (According to top_ztrace, not by iterating through the hash table)
654 top_trace
= kern
.globals.top_ztrace
655 print 'Index: {0: <d}'.format((unsigned(top_trace
) - unsigned(kern
.globals.ztraces
)) / sizeof('struct ztrace'))
656 print dereference(top_trace
)
657 ShowZstackTraceHelper(top_trace
.zt_stack
, top_trace
.zt_depth
)
659 #EndMacro: showtopztrace
663 @lldb_command('showzallocs')
664 def ShowZallocs(cmd_args
=None):
665 """ Prints all allocations in the zallocations table
667 if unsigned(kern
.globals.zallocations
) == 0:
668 print "zallocations array not initialized!"
670 print '{0: <5s} {1: <18s} {2: <5s} {3: <15s}'.format('INDEX','ADDRESS','TRACE','SIZE')
672 max_zallocation
= unsigned(kern
.globals.zleak_alloc_buckets
)
674 while current_index
< max_zallocation
:
675 current_zalloc
= kern
.globals.zallocations
[current_index
]
676 if int(current_zalloc
.za_element
) != 0:
677 print '{0: <5d} {1: <#018x} {2: <5d} {3: <15d}'.format(current_index
, current_zalloc
.za_element
, current_zalloc
.za_trace_index
, unsigned(current_zalloc
.za_size
))
678 allocation_count
+= 1
680 print 'Total Allocations: {0: <d}'.format(allocation_count
)
682 #EndMacro: showzallocs
684 #Macro: showzallocsfortrace
686 @lldb_command('showzallocsfortrace')
687 def ShowZallocsForTrace(cmd_args
=None):
688 """ Prints all allocations pointing to the passed in trace's index into ztraces by looking through zallocations table
689 Usage: showzallocsfortrace <trace index>
692 print ShowZallocsForTrace
.__doc
__
694 print '{0: <5s} {1: <18s} {2: <15s}'.format('INDEX','ADDRESS','SIZE')
695 target_index
= ArgumentStringToInt(cmd_args
[0])
697 max_zallocation
= unsigned(kern
.globals.zleak_alloc_buckets
)
699 while current_index
< max_zallocation
:
700 current_zalloc
= kern
.globals.zallocations
[current_index
]
701 if unsigned(current_zalloc
.za_element
) != 0 and (unsigned(current_zalloc
.za_trace_index
) == unsigned(target_index
)):
702 print '{0: <5d} {1: <#018x} {2: <6d}'.format(current_index
, current_zalloc
.za_element
, current_zalloc
.za_size
)
703 allocation_count
+= 1
705 print 'Total Allocations: {0: <d}'.format(allocation_count
)
707 #EndMacro: showzallocsfortrace
711 @lldb_command('showztraces')
712 def ShowZTraces(cmd_args
=None):
713 """ Prints all traces with size > 0
715 ShowZTracesAbove([0])
717 #EndMacro: showztraces
719 #Macro: showztracesabove
721 @lldb_command('showztracesabove')
722 def ShowZTracesAbove(cmd_args
=None):
723 """ Prints all traces with size greater than X
724 Usage: showztracesabove <size>
727 print ShowZTracesAbove
.__doc
__
729 print '{0: <5s} {1: <6s}'.format('INDEX','SIZE')
732 max_ztrace
= unsigned(kern
.globals.zleak_trace_buckets
)
733 while current_index
< max_ztrace
:
734 ztrace_current
= kern
.globals.ztraces
[current_index
]
735 if ztrace_current
.zt_size
> unsigned(cmd_args
[0]):
736 print '{0: <5d} {1: <6d}'.format(current_index
, int(ztrace_current
.zt_size
))
739 print 'Total traces: {0: <d}'.format(ztrace_count
)
741 #EndMacro: showztracesabove
743 #Macro: showztracehistogram
745 @lldb_command('showztracehistogram')
746 def ShowZtraceHistogram(cmd_args
=None):
747 """ Prints the histogram of the ztrace table
749 print '{0: <5s} {1: <9s} {2: <10s}'.format('INDEX','HIT_COUNT','COLLISIONS')
752 max_ztrace
= unsigned(kern
.globals.zleak_trace_buckets
)
753 while current_index
< max_ztrace
:
754 ztrace_current
= kern
.globals.ztraces
[current_index
]
755 if ztrace_current
.zt_hit_count
!= 0:
756 print '{0: <5d} {1: <9d} {2: <10d}'.format(current_index
, ztrace_current
.zt_hit_count
, ztrace_current
.zt_collisions
)
759 print 'Total traces: {0: <d}'.format(ztrace_count
)
761 #EndMacro: showztracehistogram
763 #Macro: showzallochistogram
765 @lldb_command('showzallochistogram')
766 def ShowZallocHistogram(cmd_args
=None):
767 """ Prints the histogram for the zalloc table
769 print '{0: <5s} {1: <9s}'.format('INDEX','HIT_COUNT')
771 zallocation_count
= 0
772 max_ztrace
= unsigned(kern
.globals.zleak_alloc_buckets
)
773 while current_index
< max_ztrace
:
774 zallocation_current
= kern
.globals.zallocations
[current_index
]
775 if zallocation_current
.za_hit_count
!= 0:
776 print '{0: <5d} {1: <9d}'.format(current_index
, zallocation_current
.za_hit_count
)
777 zallocation_count
+= 1
779 print 'Total Allocations: {0: <d}'.format(zallocation_count
)
781 #EndMacro: showzallochistogram
785 @lldb_command('showzstats')
786 def ShowZstats(cmd_args
=None):
787 """ Prints the zone leak detection stats
789 print 'z_alloc_collisions: {0: <d}, z_trace_collisions: {1: <d}'.format(unsigned(kern
.globals.z_alloc_collisions
), unsigned(kern
.globals.z_trace_collisions
))
790 print 'z_alloc_overwrites: {0: <d}, z_trace_overwrites: {1: <d}'.format(unsigned(kern
.globals.z_alloc_overwrites
), unsigned(kern
.globals.z_trace_overwrites
))
791 print 'z_alloc_recorded: {0: <d}, z_trace_recorded: {1: <d}'.format(unsigned(kern
.globals.z_alloc_recorded
), unsigned(kern
.globals.z_trace_recorded
))
793 #EndMacro: showzstats
795 def GetBtlogBacktrace(depth
, zstack_record
):
796 """ Helper routine for getting a BT Log record backtrace stack.
798 depth:int - The depth of the zstack record
799 zstack_record:btlog_record_t * - A BTLog record
801 str - string with backtrace in it.
805 if not zstack_record
:
806 return "Zstack record none!"
808 depth_val
= unsigned(depth
)
809 while frame
< depth_val
:
810 frame_pc
= zstack_record
.bt
[frame
]
811 if not frame_pc
or int(frame_pc
) == 0:
813 symbol_arr
= kern
.SymbolicateFromAddress(frame_pc
)
815 symbol_str
= str(symbol_arr
[0].addr
)
818 out_str
+= "{0: <#0x} <{1: <s}>\n".format(frame_pc
, symbol_str
)
822 def ShowZStackRecord(zstack_record
, zstack_index
):
823 """ Helper routine for printing a single zstack record
825 zstack_record:btlog_record_t * - A BTLog record
826 zstack_index:int - Index for the record in the BTLog table
831 if zstack_record
.operation
== 1:
835 out_str
+= "{0: <#0x} : Index {1: <d} {2: <s}\n".format(zstack_record
.element
, zstack_index
, ('-' * 8))
837 print GetBtlogBacktrace(kern
.globals.zlog_btlog
.btrecord_btdepth
, zstack_record
)
841 @lldb_command('showioalloc')
842 def ShowIOAllocations(cmd_args
=None):
843 """ Show some accounting of memory allocated by IOKit allocators. See ioalloccount man page for details.
844 Routine to display a summary of memory accounting allocated by IOKit allocators.
846 print "Instance allocation = {0: <#0x} = {1: d}K".format(kern
.globals.debug_ivars_size
, (kern
.globals.debug_ivars_size
/ 1024))
847 print "Container allocation = {0: <#0x} = {1: d}K".format(kern
.globals.debug_container_malloc_size
, (kern
.globals.debug_container_malloc_size
/ 1024))
848 print "IOMalloc allocation = {0: <#0x} = {1: d}K".format(kern
.globals.debug_iomalloc_size
, (kern
.globals.debug_iomalloc_size
/ 1024))
849 print "Container allocation = {0: <#0x} = {1: d}K".format(kern
.globals.debug_iomallocpageable_size
, (kern
.globals.debug_iomallocpageable_size
/ 1024))
852 # EndMacro: showioalloc
855 # Macro: showselectmem
856 @lldb_command('showselectmem', "S:")
857 def ShowSelectMem(cmd_args
=None, cmd_options
={}):
858 """ Show memory cached by threads on calls to select.
860 usage: showselectmem [-v]
861 -v : print each thread's memory
862 (one line per thread with non-zero select memory)
863 -S {addr} : Find the thread whose thread-local select set
864 matches the given address
868 if config
['verbosity'] > vHUMAN
:
870 if "-S" in cmd_options
:
871 opt_wqs
= unsigned(kern
.GetValueFromAddress(cmd_options
["-S"], 'uint64_t *'))
873 raise ArgumentError("Invalid waitq set address: {:s}".format(cmd_options
["-S"]))
876 print "{:18s} {:10s} {:s}".format('Task', 'Thread ID', 'Select Mem (bytes)')
878 for th
in IterateQueue(t
.threads
, 'thread *', 'task_threads'):
879 uth
= Cast(th
.uthread
, 'uthread *');
881 if hasattr(uth
, 'uu_allocsize'): # old style
882 thmem
= uth
.uu_allocsize
884 elif hasattr(uth
, 'uu_wqstate_sz'): # new style
885 thmem
= uth
.uu_wqstate_sz
888 print "What kind of uthread is this?!"
890 if opt_wqs
and opt_wqs
== unsigned(wqs
):
891 print "FOUND: {:#x} in thread: {:#x} ({:#x})".format(opt_wqs
, unsigned(th
), unsigned(th
.thread_id
))
892 if verbose
and thmem
> 0:
893 print "{:<#18x} {:<#10x} {:d}".format(unsigned(t
), unsigned(th
.thread_id
), thmem
)
896 print "Total: {:d} bytes ({:d} kbytes)".format(selmem
, selmem
/1024)
897 # Endmacro: showselectmem
901 @lldb_command('showtaskvme', "PS")
902 def ShowTaskVmeHelper(cmd_args
=None, cmd_options
={}):
903 """ Display a summary list of the specified vm_map's entries
904 Usage: showtaskvme <task address> (ex. showtaskvme 0x00ataskptr00 )
905 Use -S flag to show VM object shadow chains
906 Use -P flag to show pager info (mapped file, compressed pages, ...)
908 show_pager_info
= False
909 show_all_shadows
= False
910 if "-P" in cmd_options
:
911 show_pager_info
= True
912 if "-S" in cmd_options
:
913 show_all_shadows
= True
914 task
= kern
.GetValueFromAddress(cmd_args
[0], 'task *')
915 ShowTaskVMEntries(task
, show_pager_info
, show_all_shadows
)
917 @lldb_command('showallvme', "PS")
918 def ShowAllVME(cmd_args
=None, cmd_options
={}):
919 """ Routine to print a summary listing of all the vm map entries
920 Go Through each task in system and show the vm memory regions
921 Use -S flag to show VM object shadow chains
922 Use -P flag to show pager info (mapped file, compressed pages, ...)
924 show_pager_info
= False
925 show_all_shadows
= False
926 if "-P" in cmd_options
:
927 show_pager_info
= True
928 if "-S" in cmd_options
:
929 show_all_shadows
= True
930 for task
in kern
.tasks
:
931 ShowTaskVMEntries(task
, show_pager_info
, show_all_shadows
)
933 @lldb_command('showallvm')
934 def ShowAllVM(cmd_args
=None):
935 """ Routine to print a summary listing of all the vm maps
937 for task
in kern
.tasks
:
938 print GetTaskSummary
.header
+ ' ' + GetProcSummary
.header
939 print GetTaskSummary(task
) + ' ' + GetProcSummary(Cast(task
.bsd_info
, 'proc *'))
940 print GetVMMapSummary
.header
941 print GetVMMapSummary(task
.map)
943 @lldb_command("showtaskvm")
944 def ShowTaskVM(cmd_args
=None):
945 """ Display info about the specified task's vm_map
946 syntax: (lldb) showtaskvm <task_ptr>
949 print ShowTaskVM
.__doc
__
951 task
= kern
.GetValueFromAddress(cmd_args
[0], 'task *')
953 print "Unknown arguments."
955 print GetTaskSummary
.header
+ ' ' + GetProcSummary
.header
956 print GetTaskSummary(task
) + ' ' + GetProcSummary(Cast(task
.bsd_info
, 'proc *'))
957 print GetVMMapSummary
.header
958 print GetVMMapSummary(task
.map)
961 @lldb_command('showallvmstats')
962 def ShowAllVMStats(cmd_args
=None):
963 """ Print a summary of vm statistics in a table format
965 page_size
= kern
.globals.page_size
966 vmstats
= lambda:None
967 vmstats
.wired_count
= 0
968 vmstats
.resident_count
= 0
969 vmstats
.resident_max
= 0
973 vmstats
.compressed
= 0
974 vmstats
.compressed_peak
= 0
975 vmstats
.compressed_lifetime
= 0
978 hdr_format
= "{0: >10s} {1: <20s} {2: >6s} {3: >10s} {4: >10s} {5: >10s} {6: >10s} {7: >10s} {8: >10s} {9: >10s} {10: >10s} {11: >10s} {12: >10s} {13: >10s} {14:}"
979 print hdr_format
.format('pid', 'command', '#ents', 'wired', 'vsize', 'rsize', 'NEW RSIZE', 'max rsize', 'internal', 'external', 'reusable', 'compressed', 'compressed', 'compressed', '')
980 print hdr_format
.format('', '', '', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(current)', '(peak)', '(lifetime)', '')
981 entry_format
= "{p.p_pid: >10d} {p.p_comm: <20s} {m.hdr.nentries: >6d} {s.wired_count: >10d} {vsize: >10d} {s.resident_count: >10d} {s.new_resident_count: >10d} {s.resident_max: >10d} {s.internal: >10d} {s.external: >10d} {s.reusable: >10d} {s.compressed: >10d} {s.compressed_peak: >10d} {s.compressed_lifetime: >10d} {s.error}"
983 for task
in kern
.tasks
:
984 proc
= Cast(task
.bsd_info
, 'proc *')
985 vmmap
= Cast(task
.map, '_vm_map *')
987 vmstats
.wired_count
= vmmap
.pmap
.stats
.wired_count
;
988 vmstats
.resident_count
= unsigned(vmmap
.pmap
.stats
.resident_count
);
989 vmstats
.resident_max
= vmmap
.pmap
.stats
.resident_max
;
990 vmstats
.internal
= unsigned(vmmap
.pmap
.stats
.internal
);
991 vmstats
.external
= unsigned(vmmap
.pmap
.stats
.external
);
992 vmstats
.reusable
= unsigned(vmmap
.pmap
.stats
.reusable
);
993 vmstats
.compressed
= unsigned(vmmap
.pmap
.stats
.compressed
);
994 vmstats
.compressed_peak
= unsigned(vmmap
.pmap
.stats
.compressed_peak
);
995 vmstats
.compressed_lifetime
= unsigned(vmmap
.pmap
.stats
.compressed_lifetime
);
996 vmstats
.new_resident_count
= vmstats
.internal
+ vmstats
.external
998 if vmstats
.internal
< 0:
1000 if vmstats
.external
< 0:
1001 vmstats
.error
+= '*'
1002 if vmstats
.reusable
< 0:
1003 vmstats
.error
+= '*'
1004 if vmstats
.compressed
< 0:
1005 vmstats
.error
+= '*'
1006 if vmstats
.compressed_peak
< 0:
1007 vmstats
.error
+= '*'
1008 if vmstats
.compressed_lifetime
< 0:
1009 vmstats
.error
+= '*'
1010 if vmstats
.new_resident_count
+vmstats
.reusable
!= vmstats
.resident_count
:
1011 vmstats
.error
+= '*'
1013 print entry_format
.format(p
=proc
, m
=vmmap
, vsize
=(unsigned(vmmap
.size
) / page_size
), t
=task
, s
=vmstats
)
1016 def ShowTaskVMEntries(task
, show_pager_info
, show_all_shadows
):
1017 """ Routine to print out a summary listing of all the entries in a vm_map
1019 task - core.value : a object of type 'task *'
1023 print "vm_map entries for task " + hex(task
)
1024 print GetTaskSummary
.header
1025 print GetTaskSummary(task
)
1027 print "Task {0: <#020x} has map = 0x0"
1029 print GetVMMapSummary
.header
1030 print GetVMMapSummary(task
.map)
1031 vme_list_head
= task
.map.hdr
.links
1032 vme_ptr_type
= GetType('vm_map_entry *')
1033 print GetVMEntrySummary
.header
1034 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, "links"):
1035 print GetVMEntrySummary(vme
, show_pager_info
, show_all_shadows
)
1038 @lldb_command("showmap")
1039 def ShowMap(cmd_args
=None):
1040 """ Routine to print out info about the specified vm_map
1041 usage: showmap <vm_map>
1043 if cmd_args
== None or len(cmd_args
) < 1:
1044 print "Invalid argument.", ShowMap
.__doc
__
1046 map_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vm_map_t')
1047 print GetVMMapSummary
.header
1048 print GetVMMapSummary(map_val
)
1050 @lldb_command("showmapvme")
1051 def ShowMapVME(cmd_args
=None):
1052 """Routine to print out info about the specified vm_map and its vm entries
1053 usage: showmapvme <vm_map>
1055 if cmd_args
== None or len(cmd_args
) < 1:
1056 print "Invalid argument.", ShowMap
.__doc
__
1058 map_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vm_map_t')
1059 print GetVMMapSummary
.header
1060 print GetVMMapSummary(map_val
)
1061 vme_list_head
= map_val
.hdr
.links
1062 vme_ptr_type
= GetType('vm_map_entry *')
1063 print GetVMEntrySummary
.header
1064 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, "links"):
1065 print GetVMEntrySummary(vme
)
1068 @lldb_type_summary(['_vm_map *', 'vm_map_t'])
1069 @header("{0: <20s} {1: <20s} {2: <20s} {3: >5s} {4: >5s} {5: <20s} {6: <20s}".format("vm_map", "pmap", "vm_size", "#ents", "rpage", "hint", "first_free"))
1070 def GetVMMapSummary(vmmap
):
1071 """ Display interesting bits from vm_map struct """
1073 format_string
= "{0: <#020x} {1: <#020x} {2: <#020x} {3: >5d} {4: >5d} {5: <#020x} {6: <#020x}"
1074 vm_size
= uint64_t(vmmap
.size
).value
1076 if vmmap
.pmap
!= 0: resident_pages
= int(vmmap
.pmap
.stats
.resident_count
)
1078 if int(vmmap
.holelistenabled
) == 0: first_free
= vmmap
.f_s
.first_free
1079 out_string
+= format_string
.format(vmmap
, vmmap
.pmap
, vm_size
, vmmap
.hdr
.nentries
, resident_pages
, vmmap
.hint
, first_free
)
1082 @lldb_type_summary(['vm_map_entry'])
1083 @header("{0: <20s} {1: <20s} {2: <5s} {3: >7s} {4: <20s} {5: <20s}".format("entry", "start", "prot", "#page", "object", "offset"))
1084 def GetVMEntrySummary(vme
):
1085 """ Display vm entry specific information. """
1086 page_size
= kern
.globals.page_size
1088 format_string
= "{0: <#020x} {1: <#20x} {2: <1x}{3: <1x}{4: <3s} {5: >7d} {6: <#020x} {7: <#020x}"
1089 vme_protection
= int(vme
.protection
)
1090 vme_max_protection
= int(vme
.max_protection
)
1091 vme_extra_info_str
="SC-Ds"[int(vme
.inheritance
)]
1092 if int(vme
.is_sub_map
) != 0 :
1093 vme_extra_info_str
+="s"
1094 elif int(vme
.needs_copy
) != 0 :
1095 vme_extra_info_str
+="n"
1096 num_pages
= (unsigned(vme
.links
.end
) - unsigned(vme
.links
.start
)) / page_size
1097 out_string
+= format_string
.format(vme
, vme
.links
.start
, vme_protection
, vme_max_protection
, vme_extra_info_str
, num_pages
, vme
.vme_object
.vmo_object
, vme
.vme_offset
)
1100 # EndMacro: showtaskvme
1101 @lldb_command('showmapwired')
1102 def ShowMapWired(cmd_args
=None):
1103 """ Routine to print out a summary listing of all the entries with wired pages in a vm_map
1105 if cmd_args
== None or len(cmd_args
) < 1:
1106 print "Invalid argument", ShowMapWired
.__doc
__
1108 map_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vm_map_t')
1111 @lldb_type_summary(['kmod_info_t *'])
1112 @header("{0: <20s} {1: <20s} {2: <20s} {3: >3s} {4: >5s} {5: >20s} {6: <30s}".format('kmod_info', 'address', 'size', 'id', 'refs', 'version', 'name'))
1113 def GetKextSummary(kmod
):
1114 """ returns a string representation of kext information
1117 format_string
= "{0: <#020x} {1: <#020x} {2: <#020x} {3: >3d} {4: >5d} {5: >20s} {6: <30s}"
1118 out_string
+= format_string
.format(kmod
, kmod
.address
, kmod
.size
, kmod
.id, kmod
.reference_count
, kmod
.version
, kmod
.name
)
1121 @lldb_type_summary(['uuid_t'])
1123 def GetUUIDSummary(uuid
):
1124 """ returns a string representation like CA50DA4C-CA10-3246-B8DC-93542489AA26
1126 arr
= Cast(addressof(uuid
), 'uint8_t *')
1129 data
.append(int(arr
[i
]))
1130 return "{a[0]:02X}{a[1]:02X}{a[2]:02X}{a[3]:02X}-{a[4]:02X}{a[5]:02X}-{a[6]:02X}{a[7]:02X}-{a[8]:02X}{a[9]:02X}-{a[10]:02X}{a[11]:02X}{a[12]:02X}{a[13]:02X}{a[14]:02X}{a[15]:02X}".format(a
=data
)
1132 @lldb_command('showallkmods')
1133 def ShowAllKexts(cmd_args
=None):
1134 """Display a summary listing of all loaded kexts (alias: showallkmods)
1136 kmod_val
= kern
.globals.kmod
1137 print "{: <36s} ".format("UUID") + GetKextSummary
.header
1138 kextuuidinfo
= GetKextLoadInformation()
1139 for kval
in IterateLinkedList(kmod_val
, 'next'):
1140 uuid
= "........-....-....-....-............"
1141 kaddr
= unsigned(kval
.address
)
1142 for l
in kextuuidinfo
:
1143 if kaddr
== int(l
[1],16):
1146 print uuid
+ " " + GetKextSummary(kval
)
1148 def GetKextLoadInformation(addr
=0):
1149 """ Extract the kext uuid and load address information from the kernel data structure.
1151 addr - int - optional integer that is the address to search for.
1153 [] - array with each entry of format ( 'UUID', 'Hex Load Address')
1155 # because of <rdar://problem/12683084>, we can't find summaries directly
1156 #addr = hex(addressof(kern.globals.gLoadedKextSummaries.summaries))
1157 baseaddr
= unsigned(kern
.globals.gLoadedKextSummaries
) + 0x10
1158 summaries_begin
= kern
.GetValueFromAddress(baseaddr
, 'OSKextLoadedKextSummary *')
1159 total_summaries
= int(kern
.globals.gLoadedKextSummaries
.numSummaries
)
1160 kext_version
= int(kern
.globals.gLoadedKextSummaries
.version
)
1161 entry_size
= 64 + 16 + 8 + 8 + 8 + 4 + 4
1162 if kext_version
>= 2 :
1163 entry_size
= int(kern
.globals.gLoadedKextSummaries
.entry_size
)
1165 for i
in range(total_summaries
):
1166 tmpaddress
= unsigned(summaries_begin
) + (i
* entry_size
)
1167 current_kext
= kern
.GetValueFromAddress(tmpaddress
, 'OSKextLoadedKextSummary *')
1169 if addr
== unsigned(current_kext
.address
):
1170 retval
.append((GetUUIDSummary(current_kext
.uuid
) , hex(current_kext
.address
), str(current_kext
.name
) ))
1172 retval
.append((GetUUIDSummary(current_kext
.uuid
) , hex(current_kext
.address
), str(current_kext
.name
) ))
1176 lldb_alias('showallkexts', 'showallkmods')
1178 def GetOSKextVersion(version_num
):
1179 """ returns a string of format 1.2.3x from the version_num
1180 params: version_num - int
1183 if version_num
== -1 :
1185 (MAJ_MULT
, MIN_MULT
, REV_MULT
,STAGE_MULT
) = (100000000, 1000000, 10000, 1000)
1186 version
= version_num
1188 vers_major
= version
/ MAJ_MULT
1189 version
= version
- (vers_major
* MAJ_MULT
)
1191 vers_minor
= version
/ MIN_MULT
1192 version
= version
- (vers_minor
* MIN_MULT
)
1194 vers_revision
= version
/ REV_MULT
1195 version
= version
- (vers_revision
* REV_MULT
)
1197 vers_stage
= version
/ STAGE_MULT
1198 version
= version
- (vers_stage
* STAGE_MULT
)
1200 vers_stage_level
= version
1202 out_str
= "%d.%d" % (vers_major
, vers_minor
)
1203 if vers_revision
> 0: out_str
+= ".%d" % vers_revision
1204 if vers_stage
== 1 : out_str
+= "d%d" % vers_stage_level
1205 if vers_stage
== 3 : out_str
+= "a%d" % vers_stage_level
1206 if vers_stage
== 5 : out_str
+= "b%d" % vers_stage_level
1207 if vers_stage
== 6 : out_str
+= "fc%d" % vers_stage_level
1211 @lldb_command('showallknownkmods')
1212 def ShowAllKnownKexts(cmd_args
=None):
1213 """ Display a summary listing of all kexts known in the system.
1214 This is particularly useful to find if some kext was unloaded before this crash'ed state.
1216 kext_count
= int(kern
.globals.sKextsByID
.count
)
1218 kext_dictionary
= kern
.globals.sKextsByID
.dictionary
1219 print "%d kexts in sKextsByID:" % kext_count
1220 print "{0: <20s} {1: <20s} {2: >5s} {3: >20s} {4: <30s}".format('OSKEXT *', 'load_addr', 'id', 'version', 'name')
1221 format_string
= "{0: <#020x} {1: <20s} {2: >5s} {3: >20s} {4: <30s}"
1223 while index
< kext_count
:
1224 kext_dict
= GetObjectAtIndexFromArray(kext_dictionary
, index
)
1225 kext_name
= str(kext_dict
.key
.string
)
1226 osk
= Cast(kext_dict
.value
, 'OSKext *')
1227 if int(osk
.flags
.loaded
) :
1228 load_addr
= "{0: <#020x}".format(osk
.kmod_info
)
1229 id = "{0: >5d}".format(osk
.loadTag
)
1231 load_addr
= "------"
1233 version_num
= unsigned(osk
.version
)
1234 version
= GetOSKextVersion(version_num
)
1235 print format_string
.format(osk
, load_addr
, id, version
, kext_name
)
1240 @lldb_command('showkmodaddr')
1241 def ShowKmodAddr(cmd_args
=[]):
1242 """ Given an address, print the offset and name for the kmod containing it
1243 Syntax: (lldb) showkmodaddr <addr>
1245 if len(cmd_args
) < 1:
1246 raise ArgumentError("Insufficient arguments")
1248 addr
= ArgumentStringToInt(cmd_args
[0])
1249 kmod_val
= kern
.globals.kmod
1250 for kval
in IterateLinkedList(kmod_val
, 'next'):
1251 if addr
>= unsigned(kval
.address
) and addr
<= (unsigned(kval
.address
) + unsigned(kval
.size
)):
1252 print GetKextSummary
.header
1253 print GetKextSummary(kval
) + " offset = {0: #0x}".format((addr
- unsigned(kval
.address
)))
1257 @lldb_command('addkext','AF:N:')
1258 def AddKextSyms(cmd_args
=[], cmd_options
={}):
1259 """ Add kext symbols into lldb.
1260 This command finds symbols for a uuid and load the required executable
1262 addkext <uuid> : Load one kext based on uuid. eg. (lldb)addkext 4DD2344C0-4A81-3EAB-BDCF-FEAFED9EB73E
1263 addkext -F <abs/path/to/executable> <load_address> : Load kext executable at specified load address
1264 addkext -N <name> : Load one kext that matches the name provided. eg. (lldb) addkext -N corecrypto
1265 addkext -N <name> -A: Load all kext that matches the name provided. eg. to load all kext with Apple in name do (lldb) addkext -N Apple -A
1266 addkext all : Will load all the kext symbols - SLOW
1270 if "-F" in cmd_options
:
1271 exec_path
= cmd_options
["-F"]
1272 exec_full_path
= ResolveFSPath(exec_path
)
1273 if not os
.path
.exists(exec_full_path
):
1274 raise ArgumentError("Unable to resolve {:s}".format(exec_path
))
1276 if not os
.path
.isfile(exec_full_path
):
1277 raise ArgumentError("Path is {:s} not a filepath. \nPlease check that path points to executable.\
1278 \nFor ex. path/to/Symbols/IOUSBFamily.kext/Contents/PlugIns/AppleUSBHub.kext/Contents/MacOS/AppleUSBHub.\
1279 \nNote: LLDB does not support adding kext based on directory paths like gdb used to.".format(exec_path
))
1280 if not os
.access(exec_full_path
, os
.X_OK
):
1281 raise ArgumentError("Path is {:s} not an executable file".format(exec_path
))
1285 slide_value
= cmd_args
[0]
1286 debuglog("loading slide value from user input %s" % cmd_args
[0])
1288 filespec
= lldb
.SBFileSpec(exec_full_path
, False)
1289 print "target modules add %s" % exec_full_path
1290 print lldb_run_command("target modules add %s" % exec_full_path
)
1291 loaded_module
= LazyTarget
.GetTarget().FindModule(filespec
)
1292 if loaded_module
.IsValid():
1293 uuid_str
= loaded_module
.GetUUIDString()
1294 debuglog("added module %s with uuid %s" % (exec_full_path
, uuid_str
))
1295 if slide_value
is None:
1296 all_kexts_info
= GetKextLoadInformation()
1297 for k
in all_kexts_info
:
1299 if k
[0].lower() == uuid_str
.lower():
1301 debuglog("found the slide %s for uuid %s" % (k
[1], k
[0]))
1303 if slide_value
is None:
1304 raise ArgumentError("Unable to find load address for module described at %s " % exec_full_path
)
1305 load_cmd
= "target modules load --file %s --slide %s" % (exec_full_path
, str(slide_value
))
1307 print lldb_run_command(load_cmd
)
1308 kern
.symbolicator
= None
1311 all_kexts_info
= GetKextLoadInformation()
1313 if "-N" in cmd_options
:
1314 kext_name
= cmd_options
["-N"]
1315 kext_name_matches
= GetLongestMatchOption(kext_name
, [str(x
[2]) for x
in all_kexts_info
], True)
1316 if len(kext_name_matches
) != 1 and "-A" not in cmd_options
:
1317 print "Ambiguous match for name: {:s}".format(kext_name
)
1318 if len(kext_name_matches
) > 0:
1319 print "Options are:\n\t" + "\n\t".join(kext_name_matches
)
1321 debuglog("matched the kext to name %s and uuid %s" % (kext_name_matches
[0], kext_name
))
1322 for cur_knm
in kext_name_matches
:
1323 for x
in all_kexts_info
:
1325 cur_uuid
= x
[0].lower()
1326 print "Fetching dSYM for {:s}".format(cur_uuid
)
1327 info
= dsymForUUID(cur_uuid
)
1328 if info
and 'DBGSymbolRichExecutable' in info
:
1329 print "Adding dSYM ({0:s}) for {1:s}".format(cur_uuid
, info
['DBGSymbolRichExecutable'])
1330 addDSYM(cur_uuid
, info
)
1331 loadDSYM(cur_uuid
, int(x
[1],16))
1333 print "Failed to get symbol info for {:s}".format(cur_uuid
)
1335 kern
.symbolicator
= None
1338 if len(cmd_args
) < 1:
1339 raise ArgumentError("No arguments specified.")
1341 uuid
= cmd_args
[0].lower()
1343 load_all_kexts
= False
1345 load_all_kexts
= True
1347 if not load_all_kexts
and len(uuid_regex
.findall(uuid
)) == 0:
1348 raise ArgumentError("Unknown argument {:s}".format(uuid
))
1350 for k_info
in all_kexts_info
:
1351 cur_uuid
= k_info
[0].lower()
1352 if load_all_kexts
or (uuid
== cur_uuid
):
1353 print "Fetching dSYM for %s" % cur_uuid
1354 info
= dsymForUUID(cur_uuid
)
1355 if info
and 'DBGSymbolRichExecutable' in info
:
1356 print "Adding dSYM (%s) for %s" % (cur_uuid
, info
['DBGSymbolRichExecutable'])
1357 addDSYM(cur_uuid
, info
)
1358 loadDSYM(cur_uuid
, int(k_info
[1],16))
1360 print "Failed to get symbol info for %s" % cur_uuid
1362 kern
.symbolicator
= None
1367 lldb_alias('showkmod', 'showkmodaddr')
1368 lldb_alias('showkext', 'showkmodaddr')
1369 lldb_alias('showkextaddr', 'showkmodaddr')
1371 @lldb_type_summary(['mount *'])
1372 @header("{0: <20s} {1: <20s} {2: <20s} {3: <12s} {4: <12s} {5: <12s} {6: >6s} {7: <30s} {8: <35s} {9: <30s}".format('volume(mp)', 'mnt_data', 'mnt_devvp', 'flag', 'kern_flag', 'lflag', 'type', 'mnton', 'mntfrom', 'iosched supported'))
1373 def GetMountSummary(mount
):
1374 """ Display a summary of mount on the system
1376 out_string
= ("{mnt: <#020x} {mnt.mnt_data: <#020x} {mnt.mnt_devvp: <#020x} {mnt.mnt_flag: <#012x} " +
1377 "{mnt.mnt_kern_flag: <#012x} {mnt.mnt_lflag: <#012x} {vfs.f_fstypename: >6s} " +
1378 "{vfs.f_mntonname: <30s} {vfs.f_mntfromname: <35s} {iomode: <30s}").format(mnt
=mount
, vfs
=mount
.mnt_vfsstat
, iomode
=('Yes' if (mount
.mnt_ioflags
& 0x4) else 'No'))
1381 @lldb_command('showallmounts')
1382 def ShowAllMounts(cmd_args
=None):
1383 """ Print all mount points
1385 mntlist
= kern
.globals.mountlist
1386 print GetMountSummary
.header
1387 for mnt
in IterateTAILQ_HEAD(mntlist
, 'mnt_list'):
1388 print GetMountSummary(mnt
)
1391 lldb_alias('ShowAllVols', 'showallmounts')
1393 @lldb_command('systemlog')
1394 def ShowSystemLog(cmd_args
=None):
1395 """ Display the kernel's printf ring buffer """
1396 msgbufp
= kern
.globals.msgbufp
1397 msg_size
= int(msgbufp
.msg_size
)
1398 msg_bufx
= int(msgbufp
.msg_bufx
)
1399 msg_bufr
= int(msgbufp
.msg_bufr
)
1400 msg_bufc
= msgbufp
.msg_bufc
1401 msg_bufc_data
= msg_bufc
.GetSBValue().GetPointeeData(0, msg_size
)
1403 # the buffer is circular; start at the write pointer to end,
1404 # then from beginning to write pointer
1406 err
= lldb
.SBError()
1407 for i
in range(msg_bufx
, msg_size
) + range(0, msg_bufx
) :
1409 cbyte
= msg_bufc_data
.GetUnsignedInt8(err
, i
)
1410 if not err
.Success() :
1411 raise ValueError("Failed to read character at offset " + str(i
) + ": " + err
.GetCString())
1426 @static_var('output','')
1427 def _GetVnodePathName(vnode
, vnodename
):
1428 """ Internal function to get vnode path string from vnode structure.
1432 returns Nothing. The output will be stored in the static variable.
1436 if int(vnode
.v_flag
) & 0x1 and int(hex(vnode
.v_mount
), 16) !=0:
1437 if int(vnode
.v_mount
.mnt_vnodecovered
):
1438 _GetVnodePathName(vnode
.v_mount
.mnt_vnodecovered
, str(vnode
.v_mount
.mnt_vnodecovered
.v_name
) )
1440 _GetVnodePathName(vnode
.v_parent
, str(vnode
.v_parent
.v_name
))
1441 _GetVnodePathName
.output
+= "/%s" % vnodename
1443 def GetVnodePath(vnode
):
1444 """ Get string representation of the vnode
1445 params: vnodeval - value representing vnode * in the kernel
1446 return: str - of format /path/to/something
1450 if (int(vnode
.v_flag
) & 0x000001) and int(hex(vnode
.v_mount
), 16) != 0 and (int(vnode
.v_mount
.mnt_flag
) & 0x00004000) :
1453 _GetVnodePathName
.output
= ''
1454 if abs(vnode
.v_name
) != 0:
1455 _GetVnodePathName(vnode
, str(vnode
.v_name
))
1456 out_str
+= _GetVnodePathName
.output
1458 out_str
+= 'v_name = NULL'
1459 _GetVnodePathName
.output
= ''
1463 @lldb_command('showvnodepath')
1464 def ShowVnodePath(cmd_args
=None):
1465 """ Prints the path for a vnode
1466 usage: showvnodepath <vnode>
1468 if cmd_args
!= None and len(cmd_args
) > 0 :
1469 vnode_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vnode *')
1471 print GetVnodePath(vnode_val
)
1474 # Macro: showvnodedev
1475 def GetVnodeDevInfo(vnode
):
1476 """ Internal function to get information from the device type vnodes
1477 params: vnode - value representing struct vnode *
1478 return: str - formatted output information for block and char vnode types passed as param
1480 vnodedev_output
= ""
1481 vblk_type
= GetEnumValue('vtype::VBLK')
1482 vchr_type
= GetEnumValue('vtype::VCHR')
1483 if (vnode
.v_type
== vblk_type
) or (vnode
.v_type
== vchr_type
):
1484 devnode
= Cast(vnode
.v_data
, 'devnode_t *')
1485 devnode_dev
= devnode
.dn_typeinfo
.dev
1486 devnode_major
= (devnode_dev
>> 24) & 0xff
1487 devnode_minor
= devnode_dev
& 0x00ffffff
1489 # boilerplate device information for a vnode
1490 vnodedev_output
+= "Device Info:\n\t vnode:\t\t{:#x}".format(vnode
)
1491 vnodedev_output
+= "\n\t type:\t\t"
1492 if (vnode
.v_type
== vblk_type
):
1493 vnodedev_output
+= "VBLK"
1494 if (vnode
.v_type
== vchr_type
):
1495 vnodedev_output
+= "VCHR"
1496 vnodedev_output
+= "\n\t name:\t\t{:<s}".format(vnode
.v_name
)
1497 vnodedev_output
+= "\n\t major, minor:\t{:d},{:d}".format(devnode_major
, devnode_minor
)
1498 vnodedev_output
+= "\n\t mode\t\t0{:o}".format(unsigned(devnode
.dn_mode
))
1499 vnodedev_output
+= "\n\t owner (u,g):\t{:d} {:d}".format(devnode
.dn_uid
, devnode
.dn_gid
)
1501 # decode device specific data
1502 vnodedev_output
+= "\nDevice Specific Information:\t"
1503 if (vnode
.v_type
== vblk_type
):
1504 vnodedev_output
+= "Sorry, I do not know how to decode block devices yet!"
1505 vnodedev_output
+= "\nMaybe you can write me!"
1507 if (vnode
.v_type
== vchr_type
):
1508 # Device information; this is scanty
1510 if (devnode_major
> 42) or (devnode_major
< 0):
1511 vnodedev_output
+= "Invalid major #\n"
1512 # static assignments in conf
1513 elif (devnode_major
== 0):
1514 vnodedev_output
+= "Console mux device\n"
1515 elif (devnode_major
== 2):
1516 vnodedev_output
+= "Current tty alias\n"
1517 elif (devnode_major
== 3):
1518 vnodedev_output
+= "NULL device\n"
1519 elif (devnode_major
== 4):
1520 vnodedev_output
+= "Old pty slave\n"
1521 elif (devnode_major
== 5):
1522 vnodedev_output
+= "Old pty master\n"
1523 elif (devnode_major
== 6):
1524 vnodedev_output
+= "Kernel log\n"
1525 elif (devnode_major
== 12):
1526 vnodedev_output
+= "Memory devices\n"
1527 # Statically linked dynamic assignments
1528 elif unsigned(kern
.globals.cdevsw
[devnode_major
].d_open
) == unsigned(kern
.GetLoadAddressForSymbol('ptmx_open')):
1529 vnodedev_output
+= "Cloning pty master not done\n"
1530 #GetVnodeDevCpty(devnode_major, devnode_minor)
1531 elif unsigned(kern
.globals.cdevsw
[devnode_major
].d_open
) == unsigned(kern
.GetLoadAddressForSymbol('ptsd_open')):
1532 vnodedev_output
+= "Cloning pty slave not done\n"
1533 #GetVnodeDevCpty(devnode_major, devnode_minor)
1535 vnodedev_output
+= "RESERVED SLOT\n"
1537 vnodedev_output
+= "{:#x} is not a device".format(vnode
)
1538 return vnodedev_output
1540 @lldb_command('showvnodedev')
1541 def ShowVnodeDev(cmd_args
=None):
1542 """ Routine to display details of all vnodes of block and character device types
1543 Usage: showvnodedev <address of vnode>
1546 print "No arguments passed"
1547 print ShowVnodeDev
.__doc
__
1549 vnode_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vnode *')
1551 print "unknown arguments:", str(cmd_args
)
1553 print GetVnodeDevInfo(vnode_val
)
1555 # EndMacro: showvnodedev
1557 # Macro: showvnodelocks
1558 def GetVnodeLock(lockf
):
1559 """ Internal function to get information from the given advisory lock
1560 params: lockf - value representing v_lockf member in struct vnode *
1561 return: str - formatted output information for the advisory lock
1563 vnode_lock_output
= ''
1564 lockf_flags
= lockf
.lf_flags
1565 lockf_type
= lockf
.lf_type
1566 if lockf_flags
& 0x20:
1567 vnode_lock_output
+= ("{: <8s}").format('flock')
1568 if lockf_flags
& 0x40:
1569 vnode_lock_output
+= ("{: <8s}").format('posix')
1570 if lockf_flags
& 0x80:
1571 vnode_lock_output
+= ("{: <8s}").format('prov')
1572 if lockf_flags
& 0x10:
1573 vnode_lock_output
+= ("{: <4s}").format('W')
1574 if lockf_flags
& 0x400:
1575 vnode_lock_output
+= ("{: <8s}").format('ofd')
1577 vnode_lock_output
+= ("{: <4s}").format('.')
1579 # POSIX file vs advisory range locks
1580 if lockf_flags
& 0x40:
1581 lockf_proc
= Cast(lockf
.lf_id
, 'proc *')
1582 vnode_lock_output
+= ("PID {: <18d}").format(lockf_proc
.p_pid
)
1584 vnode_lock_output
+= ("ID {: <#019x}").format(int(lockf
.lf_id
))
1588 vnode_lock_output
+= ("{: <12s}").format('shared')
1591 vnode_lock_output
+= ("{: <12s}").format('exclusive')
1594 vnode_lock_output
+= ("{: <12s}").format('unlock')
1596 vnode_lock_output
+= ("{: <12s}").format('unknown')
1598 # start and stop values
1599 vnode_lock_output
+= ("{: #018x} ..").format(lockf
.lf_start
)
1600 vnode_lock_output
+= ("{: #018x}\n").format(lockf
.lf_end
)
1601 return vnode_lock_output
1603 @header("{0: <3s} {1: <7s} {2: <3s} {3: <21s} {4: <11s} {5: ^19s} {6: ^17s}".format('*', 'type', 'W', 'held by', 'lock type', 'start', 'end'))
1604 def GetVnodeLocksSummary(vnode
):
1605 """ Internal function to get summary of advisory locks for the given vnode
1606 params: vnode - value representing the vnode object
1607 return: str - formatted output information for the summary of advisory locks
1611 lockf_list
= vnode
.v_lockf
1612 for lockf_itr
in IterateLinkedList(lockf_list
, 'lf_next'):
1613 out_str
+= ("{: <4s}").format('H')
1614 out_str
+= GetVnodeLock(lockf_itr
)
1615 lockf_blocker
= lockf_itr
.lf_blkhd
.tqh_first
1616 while lockf_blocker
:
1617 out_str
+= ("{: <4s}").format('>')
1618 out_str
+= GetVnodeLock(lockf_blocker
)
1619 lockf_blocker
= lockf_blocker
.lf_block
.tqe_next
1622 @lldb_command('showvnodelocks')
1623 def ShowVnodeLocks(cmd_args
=None):
1624 """ Routine to display list of advisory record locks for the given vnode address
1625 Usage: showvnodelocks <address of vnode>
1628 print "No arguments passed"
1629 print ShowVnodeLocks
.__doc
__
1631 vnode_val
= kern
.GetValueFromAddress(cmd_args
[0], 'vnode *')
1633 print "unknown arguments:", str(cmd_args
)
1635 print GetVnodeLocksSummary
.header
1636 print GetVnodeLocksSummary(vnode_val
)
1638 # EndMacro: showvnodelocks
1640 # Macro: showproclocks
1642 @lldb_command('showproclocks')
1643 def ShowProcLocks(cmd_args
=None):
1644 """ Routine to display list of advisory record locks for the given process
1645 Usage: showproclocks <address of proc>
1648 print "No arguments passed"
1649 print ShowProcLocks
.__doc
__
1651 proc
= kern
.GetValueFromAddress(cmd_args
[0], 'proc *')
1653 print "unknown arguments:", str(cmd_args
)
1656 proc_filedesc
= proc
.p_fd
1657 fd_lastfile
= proc_filedesc
.fd_lastfile
1658 fd_ofiles
= proc_filedesc
.fd_ofiles
1661 while count
<= fd_lastfile
:
1662 if fd_ofiles
[count
]:
1663 fglob
= fd_ofiles
[count
].f_fglob
1664 fo_type
= fglob
.fg_ops
.fo_type
1666 fg_data
= fglob
.fg_data
1667 fg_vnode
= Cast(fg_data
, 'vnode *')
1668 name
= fg_vnode
.v_name
1669 lockf_itr
= fg_vnode
.v_lockf
1672 print GetVnodeLocksSummary
.header
1674 out_str
+= ("\n( fd {:d}, name ").format(count
)
1676 out_str
+= "(null) )\n"
1678 out_str
+= "{:s} )\n".format(name
)
1680 print GetVnodeLocksSummary(fg_vnode
)
1682 print "\n{0: d} total locks for {1: #018x}".format(seen
, proc
)
1684 # EndMacro: showproclocks
1686 @lldb_type_summary(['vnode_t', 'vnode *'])
1687 @header("{0: <20s} {1: >8s} {2: >8s} {3: <20s} {4: <6s} {5: <20s} {6: <6s} {7: <6s} {8: <35s}".format('vnode', 'usecount', 'iocount', 'v_data', 'vtype', 'parent', 'mapped', 'cs_version', 'name'))
1688 def GetVnodeSummary(vnode
):
1689 """ Get a summary of important information out of vnode
1692 format_string
= "{0: <#020x} {1: >8d} {2: >8d} {3: <#020x} {4: <6s} {5: <#020x} {6: <6s} {7: <6s} {8: <35s}"
1693 usecount
= int(vnode
.v_usecount
)
1694 iocount
= int(vnode
.v_iocount
)
1695 v_data_ptr
= int(hex(vnode
.v_data
), 16)
1696 vtype
= int(vnode
.v_type
)
1697 vtype_str
= "%d" % vtype
1698 vnode_types
= ['VNON', 'VREG', 'VDIR', 'VBLK', 'VCHR', 'VLNK', 'VSOCK', 'VFIFO', 'VBAD', 'VSTR', 'VCPLX'] # see vnode.h for enum type definition
1699 if vtype
>= 0 and vtype
< len(vnode_types
):
1700 vtype_str
= vnode_types
[vtype
]
1701 parent_ptr
= int(hex(vnode
.v_parent
), 16)
1702 name_ptr
= int(hex(vnode
.v_name
), 16)
1705 name
= str(vnode
.v_name
)
1706 elif int(vnode
.v_tag
) == 16 :
1707 cnode
= Cast(vnode
.v_data
, 'cnode *')
1708 name
= "hfs: %s" % str( Cast(cnode
.c_desc
.cd_nameptr
, 'char *'))
1710 csblob_version
= '-'
1711 if (vtype
== 1) and (vnode
.v_un
.vu_ubcinfo
!= 0):
1712 csblob_version
= '{: <6d}'.format(vnode
.v_un
.vu_ubcinfo
.cs_add_gen
)
1713 # Check to see if vnode is mapped/unmapped
1714 if (vnode
.v_un
.vu_ubcinfo
.ui_flags
& 0x8) != 0:
1718 out_str
+= format_string
.format(vnode
, usecount
, iocount
, v_data_ptr
, vtype_str
, parent_ptr
, mapped
, csblob_version
, name
)
1721 @lldb_command('showallvnodes')
1722 def ShowAllVnodes(cmd_args
=None):
1723 """ Display info about all vnodes
1725 mntlist
= kern
.globals.mountlist
1726 print GetVnodeSummary
.header
1727 for mntval
in IterateTAILQ_HEAD(mntlist
, 'mnt_list'):
1728 for vnodeval
in IterateTAILQ_HEAD(mntval
.mnt_vnodelist
, 'v_mntvnodes'):
1729 print GetVnodeSummary(vnodeval
)
1732 @lldb_command('showvnode')
1733 def ShowVnode(cmd_args
=None):
1734 """ Display info about one vnode
1735 usage: showvnode <vnode>
1737 if cmd_args
== None or len(cmd_args
) < 1:
1738 print "Please provide valid vnode argument. Type help showvnode for help."
1740 vnodeval
= kern
.GetValueFromAddress(cmd_args
[0],'vnode *')
1741 print GetVnodeSummary
.header
1742 print GetVnodeSummary(vnodeval
)
1744 @lldb_command('showvolvnodes')
1745 def ShowVolVnodes(cmd_args
=None):
1746 """ Display info about all vnodes of a given mount_t
1748 if cmd_args
== None or len(cmd_args
) < 1:
1749 print "Please provide a valide mount_t argument. Try 'help showvolvnodes' for help"
1751 mntval
= kern
.GetValueFromAddress(cmd_args
[0], 'mount_t')
1752 print GetVnodeSummary
.header
1753 for vnodeval
in IterateTAILQ_HEAD(mntval
.mnt_vnodelist
, 'v_mntvnodes'):
1754 print GetVnodeSummary(vnodeval
)
1757 @lldb_command('showvolbusyvnodes')
1758 def ShowVolBusyVnodes(cmd_args
=None):
1759 """ Display info about busy (iocount!=0) vnodes of a given mount_t
1761 if cmd_args
== None or len(cmd_args
) < 1:
1762 print "Please provide a valide mount_t argument. Try 'help showvolbusyvnodes' for help"
1764 mntval
= kern
.GetValueFromAddress(cmd_args
[0], 'mount_t')
1765 print GetVnodeSummary
.header
1766 for vnodeval
in IterateTAILQ_HEAD(mntval
.mnt_vnodelist
, 'v_mntvnodes'):
1767 if int(vnodeval
.v_iocount
) != 0:
1768 print GetVnodeSummary(vnodeval
)
1770 @lldb_command('showallbusyvnodes')
1771 def ShowAllBusyVnodes(cmd_args
=None):
1772 """ Display info about all busy (iocount!=0) vnodes
1774 mntlistval
= kern
.globals.mountlist
1775 for mntval
in IterateTAILQ_HEAD(mntlistval
, 'mnt_list'):
1776 ShowVolBusyVnodes([hex(mntval
)])
1778 @lldb_command('print_vnode')
1779 def PrintVnode(cmd_args
=None):
1780 """ Prints out the fields of a vnode struct
1781 Usage: print_vnode <vnode>
1784 print "Please provide valid vnode argument. Type help print_vnode for help."
1788 @lldb_command('showworkqvnodes')
1789 def ShowWorkqVnodes(cmd_args
=None):
1790 """ Print the vnode worker list
1791 Usage: showworkqvnodes <struct mount *>
1794 print "Please provide valid mount argument. Type help showworkqvnodes for help."
1797 mp
= kern
.GetValueFromAddress(cmd_args
[0], 'mount *')
1798 vp
= Cast(mp
.mnt_workerqueue
.tqh_first
, 'vnode *')
1799 print GetVnodeSummary
.header
1801 print GetVnodeSummary(vp
)
1802 vp
= vp
.v_mntvnodes
.tqe_next
1804 @lldb_command('shownewvnodes')
1805 def ShowNewVnodes(cmd_args
=None):
1806 """ Print the new vnode list
1807 Usage: shownewvnodes <struct mount *>
1810 print "Please provide valid mount argument. Type help shownewvnodes for help."
1812 mp
= kern
.GetValueFromAddress(cmd_args
[0], 'mount *')
1813 vp
= Cast(mp
.mnt_newvnodes
.tqh_first
, 'vnode *')
1814 print GetVnodeSummary
.header
1816 print GetVnodeSummary(vp
)
1817 vp
= vp
.v_mntvnodes
.tqe_next
1820 @lldb_command('showprocvnodes')
1821 def ShowProcVnodes(cmd_args
=None):
1822 """ Routine to print out all the open fds which are vnodes in a process
1823 Usage: showprocvnodes <proc *>
1826 print "Please provide valid proc argument. Type help showprocvnodes for help."
1828 procptr
= kern
.GetValueFromAddress(cmd_args
[0], 'proc *')
1829 fdptr
= Cast(procptr
.p_fd
, 'filedesc *')
1830 if int(fdptr
.fd_cdir
) != 0:
1831 print '{0: <25s}\n{1: <s}\n{2: <s}'.format('Current Working Directory:', GetVnodeSummary
.header
, GetVnodeSummary(fdptr
.fd_cdir
))
1832 if int(fdptr
.fd_rdir
) != 0:
1833 print '{0: <25s}\n{1: <s}\n{2: <s}'.format('Current Root Directory:', GetVnodeSummary
.header
, GetVnodeSummary(fdptr
.fd_rdir
))
1835 print '\n' + '{0: <5s} {1: <7s}'.format('fd', 'flags') + GetVnodeSummary
.header
1836 # Hack to get around <rdar://problem/12879494> llb fails to cast addresses to double pointers
1837 fpptr
= Cast(fdptr
.fd_ofiles
, 'fileproc *')
1838 while count
< fdptr
.fd_nfiles
:
1839 fpp
= dereference(fpptr
)
1840 fproc
= Cast(fpp
, 'fileproc *')
1842 fglob
= dereference(fproc
).f_fglob
1844 if (int(fglob
) != 0) and (int(fglob
.fg_ops
.fo_type
) == 1):
1845 if (fdptr
.fd_ofileflags
[count
] & 1): flags
+= 'E'
1846 if (fdptr
.fd_ofileflags
[count
] & 2): flags
+= 'F'
1847 if (fdptr
.fd_ofileflags
[count
] & 4): flags
+= 'R'
1848 if (fdptr
.fd_ofileflags
[count
] & 8): flags
+= 'C'
1849 print '{0: <5d} {1: <7s}'.format(count
, flags
) + GetVnodeSummary(Cast(fglob
.fg_data
, 'vnode *'))
1851 fpptr
= kern
.GetValueFromAddress(int(fpptr
) + kern
.ptrsize
,'fileproc *')
1853 @lldb_command('showallprocvnodes')
1854 def ShowAllProcVnodes(cmd_args
=None):
1855 """ Routine to print out all the open fds which are vnodes
1858 procptr
= Cast(kern
.globals.allproc
.lh_first
, 'proc *')
1859 while procptr
and int(procptr
) != 0:
1860 print '{:<s}'.format("=" * 106)
1861 print GetProcInfo(procptr
)
1862 ShowProcVnodes([int(procptr
)])
1863 procptr
= procptr
.p_list
.le_next
1865 @xnudebug_test('test_vnode')
1866 def TestShowAllVnodes(kernel_target
, config
, lldb_obj
, isConnected
):
1867 """ Test the functionality of vnode related commands
1873 print "Target is not connected. Cannot test memstats"
1875 res
= lldb
.SBCommandReturnObject()
1876 lldb_obj
.debugger
.GetCommandInterpreter().HandleCommand("showallvnodes", res
)
1877 result
= res
.GetOutput()
1878 if len(result
.split("\n")) > 2 and result
.find('VREG') != -1 and len(result
.splitlines()[2].split()) > 5:
1884 @lldb_type_summary(['_lck_grp_ *'])
1885 def GetMutexEntry(mtxg
):
1886 """ Summarize a mutex group entry with important information.
1888 mtxg: value - obj representing a mutex group in kernel
1890 out_string - summary of the mutex group
1894 if kern
.ptrsize
== 8:
1895 format_string
= '{0:#018x} {1:10d} {2:10d} {3:10d} {4:10d} {5: <30s} '
1897 format_string
= '{0:#010x} {1:10d} {2:10d} {3:10d} {4:10d} {5: <30s} '
1899 if mtxg
.lck_grp_mtxcnt
:
1900 out_string
+= format_string
.format(mtxg
, mtxg
.lck_grp_mtxcnt
,mtxg
.lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_util_cnt
,
1901 mtxg
.lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_miss_cnt
,
1902 mtxg
.lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_wait_cnt
, mtxg
.lck_grp_name
)
1905 @lldb_command('showallmtx')
1906 def ShowAllMtx(cmd_args
=None):
1907 """ Routine to print a summary listing of all mutexes
1910 if kern
.ptrsize
== 8:
1911 hdr_format
= '{:<18s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
1913 hdr_format
= '{:<10s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
1915 print hdr_format
.format('LCK GROUP', 'CNT', 'UTIL', 'MISS', 'WAIT', 'NAME')
1917 mtxgrp_queue_head
= kern
.globals.lck_grp_queue
1918 mtxgrp_ptr_type
= GetType('_lck_grp_ *')
1920 for mtxgrp_ptr
in IterateQueue(mtxgrp_queue_head
, mtxgrp_ptr_type
, "lck_grp_link"):
1921 print GetMutexEntry(mtxgrp_ptr
)
1923 # EndMacro: showallmtx
1925 # Macro: showallrwlck
1926 @lldb_type_summary(['_lck_grp_ *'])
1927 def GetRWLEntry(rwlg
):
1928 """ Summarize a reader writer lock group with important information.
1930 rwlg: value - obj representing a reader writer lock group in kernel
1932 out_string - summary of the reader writer lock group
1936 if kern
.ptrsize
== 8:
1937 format_string
= '{0:#018x} {1:10d} {2:10d} {3:10d} {4:10d} {5: <30s} '
1939 format_string
= '{0:#010x} {1:10d} {2:10d} {3:10d} {4:10d} {5: <30s} '
1941 if rwlg
.lck_grp_rwcnt
:
1942 out_string
+= format_string
.format(rwlg
, rwlg
.lck_grp_rwcnt
,rwlg
.lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
,
1943 rwlg
.lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
,
1944 rwlg
.lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
, rwlg
.lck_grp_name
)
1948 @lldb_type_summary(['lck_mtx_t *'])
1949 @header("===== Mutex Lock Summary =====")
1950 def GetMutexLockSummary(mtx
):
1951 """ Summarize mutex lock with important information.
1953 mtx: value - obj representing a mutex lock in kernel
1955 out_str - summary of the mutex lock
1958 return "Invalid lock value: 0x0"
1960 if kern
.arch
== "x86_64":
1961 out_str
= "Lock Type\t\t: MUTEX\n"
1962 mtxd
= mtx
.lck_mtx_sw
.lck_mtxd
1963 out_str
+= "Owner Thread\t\t: {:#x}\n".format(mtxd
.lck_mtxd_owner
)
1964 cmd_str
= "p/d ((lck_mtx_t*){:#x})->lck_mtx_sw.lck_mtxd.".format(mtx
)
1965 cmd_out
= lldb_run_command(cmd_str
+ "lck_mtxd_waiters")
1966 out_str
+= "Number of Waiters\t: {:s}\n".format(cmd_out
.split()[-1])
1967 cmd_out
= lldb_run_command(cmd_str
+ "lck_mtxd_ilocked")
1968 out_str
+= "ILocked\t\t\t: {:s}\n".format(cmd_out
.split()[-1])
1969 cmd_out
= lldb_run_command(cmd_str
+ "lck_mtxd_mlocked")
1970 out_str
+= "MLocked\t\t\t: {:s}\n".format(cmd_out
.split()[-1])
1971 cmd_out
= lldb_run_command(cmd_str
+ "lck_mtxd_promoted")
1972 out_str
+= "Promoted\t\t: {:s}\n".format(cmd_out
.split()[-1])
1973 cmd_out
= lldb_run_command(cmd_str
+ "lck_mtxd_spin")
1974 out_str
+= "Spin\t\t\t: {:s}\n".format(cmd_out
.split()[-1])
1977 out_str
= "Lock Type\t\t: MUTEX\n"
1978 out_str
+= "Owner Thread\t\t: {:#x}\n".format(mtx
.lck_mtx_hdr
.lck_mtxd_data
& ~
0x3)
1979 out_str
+= "Number of Waiters\t: {:d}\n".format(mtx
.lck_mtx_sw
.lck_mtxd
.lck_mtxd_waiters
)
1980 out_str
+= "Flags\t\t\t: "
1981 if mtx
.lck_mtx_hdr
.lck_mtxd_data
& 0x1:
1982 out_str
+= "[Interlock Locked] "
1983 if mtx
.lck_mtx_hdr
.lck_mtxd_data
& 0x2:
1984 out_str
+= "[Wait Flag]"
1985 if (mtx
.lck_mtx_hdr
.lck_mtxd_data
& 0x3) == 0:
1989 @lldb_type_summary(['lck_spin_t *'])
1990 @header("===== SpinLock Summary =====")
1991 def GetSpinLockSummary(spinlock
):
1992 """ Summarize spinlock with important information.
1994 spinlock: value - obj representing a spinlock in kernel
1996 out_str - summary of the spinlock
1999 return "Invalid lock value: 0x0"
2001 out_str
= "Lock Type\t\t: SPINLOCK\n"
2002 if kern
.arch
== "x86_64":
2003 out_str
+= "Interlock\t\t: {:#x}\n".format(spinlock
.interlock
)
2006 out_str
+= "Owner Thread\t\t: {:#x}\n".format(spinlock
.lck_spin_data
& ~
0x3)
2007 out_str
+= "Flags\t\t\t: "
2008 if spinlock
.lck_spin_data
& 0x1:
2009 out_str
+= "[Interlock Locked] "
2010 if spinlock
.lck_spin_data
& 0x2:
2011 out_str
+= "[Wait Flag]"
2012 if (spinlock
.lck_spin_data
& 0x3) == 0:
2016 @lldb_command('showlock', 'MS')
2017 def ShowLock(cmd_args
=None, cmd_options
={}):
2018 """ Show info about a lock - its state and owner thread details
2019 Usage: showlock <address of a lock>
2020 -M : to consider <addr> as lck_mtx_t
2021 -S : to consider <addr> as lck_spin_t
2024 raise ArgumentError("Please specify the address of the lock whose info you want to view.")
2028 lock
= kern
.GetValueFromAddress(cmd_args
[0], 'uintptr_t*')
2030 if kern
.arch
== "x86_64" and lock
:
2031 if "-M" in cmd_options
:
2032 lock_mtx
= Cast(lock
, 'lck_mtx_t *')
2033 summary_str
= GetMutexLockSummary(lock_mtx
)
2034 elif "-S" in cmd_options
:
2035 lock_spin
= Cast(lock
, 'lck_spin_t *')
2036 summary_str
= GetSpinLockSummary(lock_spin
)
2038 summary_str
= "Please specify supported lock option(-M/-S)"
2044 lock_mtx
= Cast(lock
, 'lck_mtx_t*')
2045 if lock_mtx
.lck_mtx_type
== 0x22:
2046 summary_str
= GetMutexLockSummary(lock_mtx
)
2048 lock_spin
= Cast(lock
, 'lck_spin_t*')
2049 if lock_spin
.lck_spin_type
== 0x11:
2050 summary_str
= GetSpinLockSummary(lock_spin
)
2052 if summary_str
== "":
2053 summary_str
= "Lock Type\t\t: INVALID LOCK"
2058 @lldb_command('showallrwlck')
2059 def ShowAllRWLck(cmd_args
=None):
2060 """ Routine to print a summary listing of all read/writer locks
2062 if kern
.ptrsize
== 8:
2063 hdr_format
= '{:<18s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
2065 hdr_format
= '{:<10s} {:>10s} {:>10s} {:>10s} {:>10s} {:<30s} '
2067 print hdr_format
.format('LCK GROUP', 'CNT', 'UTIL', 'MISS', 'WAIT', 'NAME')
2069 rwlgrp_queue_head
= kern
.globals.lck_grp_queue
2070 rwlgrp_ptr_type
= GetType('_lck_grp_ *')
2071 for rwlgrp_ptr
in IterateQueue(rwlgrp_queue_head
, rwlgrp_ptr_type
, "lck_grp_link"):
2072 print GetRWLEntry(rwlgrp_ptr
)
2074 # EndMacro: showallrwlck
2076 #Macro: showbootermemorymap
2077 @lldb_command('showbootermemorymap')
2078 def ShowBooterMemoryMap(cmd_args
=None):
2079 """ Prints out the phys memory map from kernelBootArgs
2080 Supported only on x86_64
2082 if kern
.arch
== 'x86_64':
2083 voffset
= unsigned(0xFFFFFF8000000000)
2085 print "showbootermemorymap not supported on this architecture"
2108 boot_args
= kern
.globals.kernelBootArgs
2109 msize
= boot_args
.MemoryMapDescriptorSize
2110 mcount
= (boot_args
.MemoryMapSize
) / unsigned(msize
)
2112 out_string
+= "{0: <12s} {1: <19s} {2: <19s} {3: <19s} {4: <10s}\n".format("Type", "Physical Start", "Number of Pages", "Virtual Start", "Attributes")
2116 mptr
= kern
.GetValueFromAddress(unsigned(boot_args
.MemoryMap
) + voffset
+ unsigned(i
*msize
), 'EfiMemoryRange *')
2117 mtype
= unsigned(mptr
.Type
)
2118 if mtype
in memtype_dict
:
2119 out_string
+= "{0: <12s}".format(memtype_dict
[mtype
])
2121 out_string
+= "{0: <12s}".format("UNKNOWN")
2123 if mptr
.VirtualStart
== 0:
2124 out_string
+= "{0: #019x} {1: #019x} {2: <19s} {3: #019x}\n".format(mptr
.PhysicalStart
, mptr
.NumberOfPages
, ' '*19, mptr
.Attribute
)
2126 out_string
+= "{0: #019x} {1: #019x} {2: #019x} {3: #019x}\n".format(mptr
.PhysicalStart
, mptr
.NumberOfPages
, mptr
.VirtualStart
, mptr
.Attribute
)
2130 #EndMacro: showbootermemorymap
2132 @lldb_command('show_all_purgeable_objects')
2133 def ShowAllPurgeableVmObjects(cmd_args
=None):
2134 """ Routine to print a summary listing of all the purgeable vm objects
2136 print "\n-------------------- VOLATILE OBJECTS --------------------\n"
2137 ShowAllPurgeableVolatileVmObjects()
2138 print "\n-------------------- NON-VOLATILE OBJECTS --------------------\n"
2139 ShowAllPurgeableNonVolatileVmObjects()
2141 @lldb_command('show_all_purgeable_nonvolatile_objects')
2142 def ShowAllPurgeableNonVolatileVmObjects(cmd_args
=None):
2143 """ Routine to print a summary listing of all the vm objects in
2144 the purgeable_nonvolatile_queue
2147 nonvolatile_total
= lambda:None
2148 nonvolatile_total
.objects
= 0
2149 nonvolatile_total
.vsize
= 0
2150 nonvolatile_total
.rsize
= 0
2151 nonvolatile_total
.wsize
= 0
2152 nonvolatile_total
.csize
= 0
2153 nonvolatile_total
.disowned_objects
= 0
2154 nonvolatile_total
.disowned_vsize
= 0
2155 nonvolatile_total
.disowned_rsize
= 0
2156 nonvolatile_total
.disowned_wsize
= 0
2157 nonvolatile_total
.disowned_csize
= 0
2159 queue_len
= kern
.globals.purgeable_nonvolatile_count
2160 queue_head
= kern
.globals.purgeable_nonvolatile_queue
2162 print 'purgeable_nonvolatile_queue:{:#018x} purgeable_volatile_count:{:d}\n'.format(kern
.GetLoadAddressForSymbol('purgeable_nonvolatile_queue'),queue_len
)
2163 print 'N:non-volatile V:volatile E:empty D:deny\n'
2165 print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process")
2167 for object in IterateQueue(queue_head
, 'struct vm_object *', 'objq'):
2169 ShowPurgeableNonVolatileVmObject(object, idx
, queue_len
, nonvolatile_total
)
2170 print "disowned objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(nonvolatile_total
.disowned_objects
, nonvolatile_total
.disowned_vsize
, nonvolatile_total
.disowned_rsize
, nonvolatile_total
.disowned_wsize
, nonvolatile_total
.disowned_csize
)
2171 print " all objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(nonvolatile_total
.objects
, nonvolatile_total
.vsize
, nonvolatile_total
.rsize
, nonvolatile_total
.wsize
, nonvolatile_total
.csize
)
2174 def ShowPurgeableNonVolatileVmObject(object, idx
, queue_len
, nonvolatile_total
):
2175 """ Routine to print out a summary a VM object in purgeable_nonvolatile_queue
2177 object - core.value : a object of type 'struct vm_object *'
2181 page_size
= kern
.globals.page_size
2182 if object.purgable
== 0:
2184 elif object.purgable
== 1:
2186 elif object.purgable
== 2:
2188 elif object.purgable
== 3:
2192 if object.pager
== 0:
2193 compressed_count
= 0
2195 compressor_pager
= Cast(object.pager
, 'compressor_pager *')
2196 compressed_count
= compressor_pager
.cpgr_num_slots_occupied
2198 print "{:>6d}/{:<6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}\n".format(idx
,queue_len
,object,purgable
,object.ref_count
,object.vo_un1
.vou_size
/page_size
,object.resident_page_count
,object.wired_page_count
,compressed_count
, object.vo_un2
.vou_purgeable_owner
,GetProcPIDForTask(object.vo_un2
.vou_purgeable_owner
),GetProcNameForTask(object.vo_un2
.vou_purgeable_owner
))
2200 nonvolatile_total
.objects
+= 1
2201 nonvolatile_total
.vsize
+= object.vo_un1
.vou_size
/page_size
2202 nonvolatile_total
.rsize
+= object.resident_page_count
2203 nonvolatile_total
.wsize
+= object.wired_page_count
2204 nonvolatile_total
.csize
+= compressed_count
2205 if object.vo_un2
.vou_purgeable_owner
== 0:
2206 nonvolatile_total
.disowned_objects
+= 1
2207 nonvolatile_total
.disowned_vsize
+= object.vo_un1
.vou_size
/page_size
2208 nonvolatile_total
.disowned_rsize
+= object.resident_page_count
2209 nonvolatile_total
.disowned_wsize
+= object.wired_page_count
2210 nonvolatile_total
.disowned_csize
+= compressed_count
2213 @lldb_command('show_all_purgeable_volatile_objects')
2214 def ShowAllPurgeableVolatileVmObjects(cmd_args
=None):
2215 """ Routine to print a summary listing of all the vm objects in
2216 the purgeable queues
2218 volatile_total
= lambda:None
2219 volatile_total
.objects
= 0
2220 volatile_total
.vsize
= 0
2221 volatile_total
.rsize
= 0
2222 volatile_total
.wsize
= 0
2223 volatile_total
.csize
= 0
2224 volatile_total
.disowned_objects
= 0
2225 volatile_total
.disowned_vsize
= 0
2226 volatile_total
.disowned_rsize
= 0
2227 volatile_total
.disowned_wsize
= 0
2228 volatile_total
.disowned_csize
= 0
2230 purgeable_queues
= kern
.globals.purgeable_queues
2231 print "---------- OBSOLETE\n"
2232 ShowPurgeableQueue(purgeable_queues
[0], volatile_total
)
2233 print "\n\n---------- FIFO\n"
2234 ShowPurgeableQueue(purgeable_queues
[1], volatile_total
)
2235 print "\n\n---------- LIFO\n"
2236 ShowPurgeableQueue(purgeable_queues
[2], volatile_total
)
2238 print "disowned objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(volatile_total
.disowned_objects
, volatile_total
.disowned_vsize
, volatile_total
.disowned_rsize
, volatile_total
.disowned_wsize
, volatile_total
.disowned_csize
)
2239 print " all objects:{:<10d} [ virtual:{:<10d} resident:{:<10d} wired:{:<10d} compressed:{:<10d} ]\n".format(volatile_total
.objects
, volatile_total
.vsize
, volatile_total
.rsize
, volatile_total
.wsize
, volatile_total
.csize
)
2240 purgeable_count
= kern
.globals.vm_page_purgeable_count
2241 purgeable_wired_count
= kern
.globals.vm_page_purgeable_wired_count
2242 if purgeable_count
!= volatile_total
.rsize
or purgeable_wired_count
!= volatile_total
.wsize
:
2243 mismatch
= "<--------- MISMATCH\n"
2246 print "vm_page_purgeable_count: resident:{:<10d} wired:{:<10d} {:s}\n".format(purgeable_count
, purgeable_wired_count
, mismatch
)
2249 def ShowPurgeableQueue(qhead
, volatile_total
):
2250 print "----- GROUP 0\n"
2251 ShowPurgeableGroup(qhead
.objq
[0], volatile_total
)
2252 print "----- GROUP 1\n"
2253 ShowPurgeableGroup(qhead
.objq
[1], volatile_total
)
2254 print "----- GROUP 2\n"
2255 ShowPurgeableGroup(qhead
.objq
[2], volatile_total
)
2256 print "----- GROUP 3\n"
2257 ShowPurgeableGroup(qhead
.objq
[3], volatile_total
)
2258 print "----- GROUP 4\n"
2259 ShowPurgeableGroup(qhead
.objq
[4], volatile_total
)
2260 print "----- GROUP 5\n"
2261 ShowPurgeableGroup(qhead
.objq
[5], volatile_total
)
2262 print "----- GROUP 6\n"
2263 ShowPurgeableGroup(qhead
.objq
[6], volatile_total
)
2264 print "----- GROUP 7\n"
2265 ShowPurgeableGroup(qhead
.objq
[7], volatile_total
)
2267 def ShowPurgeableGroup(qhead
, volatile_total
):
2269 for object in IterateQueue(qhead
, 'struct vm_object *', 'objq'):
2271 # print "{:>6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s} {:18s} {:>6s} {:<20s} {:s}\n".format("#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process","volatilizer","pid","process","")
2272 print "{:>6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:18s} {:>6s} {:<20s}\n".format("#","object","P","refcnt","size (pages)","resid","wired","compressed","owner","pid","process")
2274 ShowPurgeableVolatileVmObject(object, idx
, volatile_total
)
2276 def ShowPurgeableVolatileVmObject(object, idx
, volatile_total
):
2277 """ Routine to print out a summary a VM object in a purgeable queue
2279 object - core.value : a object of type 'struct vm_object *'
2283 # if int(object.vo_un2.vou_purgeable_owner) != int(object.vo_purgeable_volatilizer):
2287 page_size
= kern
.globals.page_size
2288 if object.purgable
== 0:
2290 elif object.purgable
== 1:
2292 elif object.purgable
== 2:
2294 elif object.purgable
== 3:
2298 if object.pager
== 0:
2299 compressed_count
= 0
2301 compressor_pager
= Cast(object.pager
, 'compressor_pager *')
2302 compressed_count
= compressor_pager
.cpgr_num_slots_occupied
2303 # print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s} {:#018x} {:>6d} {:<20s} {:s}\n".format(idx,object,purgable,object.ref_count,object.vo_un1.vou_size/page_size,object.resident_page_count,object.wired_page_count,compressed_count,object.vo_un2.vou_purgeable_owner,GetProcPIDForTask(object.vo_un2.vou_purgeable_owner),GetProcNameForTask(object.vo_un2.vou_purgeable_owner),object.vo_purgeable_volatilizer,GetProcPIDForTask(object.vo_purgeable_volatilizer),GetProcNameForTask(object.vo_purgeable_volatilizer),diff)
2304 print "{:>6d} {:#018x} {:1s} {:>6d} {:>16d} {:>10d} {:>10d} {:>10d} {:#018x} {:>6d} {:<20s}\n".format(idx
,object,purgable
,object.ref_count
,object.vo_un1
.vou_size
/page_size
,object.resident_page_count
,object.wired_page_count
,compressed_count
, object.vo_un2
.vou_purgeable_owner
,GetProcPIDForTask(object.vo_un2
.vou_purgeable_owner
),GetProcNameForTask(object.vo_un2
.vou_purgeable_owner
))
2305 volatile_total
.objects
+= 1
2306 volatile_total
.vsize
+= object.vo_un1
.vou_size
/page_size
2307 volatile_total
.rsize
+= object.resident_page_count
2308 volatile_total
.wsize
+= object.wired_page_count
2309 volatile_total
.csize
+= compressed_count
2310 if object.vo_un2
.vou_purgeable_owner
== 0:
2311 volatile_total
.disowned_objects
+= 1
2312 volatile_total
.disowned_vsize
+= object.vo_un1
.vou_size
/page_size
2313 volatile_total
.disowned_rsize
+= object.resident_page_count
2314 volatile_total
.disowned_wsize
+= object.wired_page_count
2315 volatile_total
.disowned_csize
+= compressed_count
2318 def GetCompressedPagesForObject(obj
):
2321 pager
= Cast(obj
.pager
, 'compressor_pager_t')
2322 return pager
.cpgr_num_slots_occupied
2323 # if pager.cpgr_num_slots > 128:
2324 # slots_arr = pager.cpgr_slots.cpgr_islots
2325 # num_indirect_slot_ptr = (pager.cpgr_num_slots + 127) / 128
2327 # compressor_slot = 0
2328 # compressed_pages = 0
2329 # while index < num_indirect_slot_ptr:
2330 # compressor_slot = 0
2331 # if slots_arr[index]:
2332 # while compressor_slot < 128:
2333 # if slots_arr[index][compressor_slot]:
2334 # compressed_pages += 1
2335 # compressor_slot += 1
2338 # slots_arr = pager.cpgr_slots.cpgr_dslots
2339 # compressor_slot = 0
2340 # compressed_pages = 0
2341 # while compressor_slot < pager.cpgr_num_slots:
2342 # if slots_arr[compressor_slot]:
2343 # compressed_pages += 1
2344 # compressor_slot += 1
2345 # return compressed_pages
2347 def ShowTaskVMEntries(task
, show_pager_info
, show_all_shadows
):
2348 """ Routine to print out a summary listing of all the entries in a vm_map
2350 task - core.value : a object of type 'task *'
2354 print "vm_map entries for task " + hex(task
)
2355 print GetTaskSummary
.header
2356 print GetTaskSummary(task
)
2358 print "Task {0: <#020x} has map = 0x0"
2360 showmapvme(task
.map, show_pager_info
, show_all_shadows
)
2362 @lldb_command("showmapvme", "PS")
2363 def ShowMapVME(cmd_args
=None, cmd_options
={}):
2364 """Routine to print out info about the specified vm_map and its vm entries
2365 usage: showmapvme <vm_map>
2366 Use -S flag to show VM object shadow chains
2367 Use -P flag to show pager info (mapped file, compressed pages, ...)
2369 if cmd_args
== None or len(cmd_args
) < 1:
2370 print "Invalid argument.", ShowMap
.__doc
__
2372 show_pager_info
= False
2373 show_all_shadows
= False
2374 if "-P" in cmd_options
:
2375 show_pager_info
= True
2376 if "-S" in cmd_options
:
2377 show_all_shadows
= True
2378 map = kern
.GetValueFromAddress(cmd_args
[0], 'vm_map_t')
2379 showmapvme(map, show_pager_info
, show_all_shadows
)
2381 def showmapvme(map, show_pager_info
, show_all_shadows
):
2382 page_size
= kern
.globals.page_size
2383 vnode_pager_ops
= kern
.globals.vnode_pager_ops
2384 vnode_pager_ops_addr
= unsigned(addressof(vnode_pager_ops
))
2387 rsize
= int(map.pmap
.stats
.resident_count
)
2388 print "{:<18s} {:<18s} {:<18s} {:>10s} {:>18s} {:>18s}:{:<18s}".format("vm_map","pmap","size","#ents","rsize","start","end")
2389 print "{:#018x} {:#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x}".format(map,map.pmap
,unsigned(map.size
),map.hdr
.nentries
,rsize
,map.hdr
.links
.start
,map.hdr
.links
.end
)
2390 vme_list_head
= map.hdr
.links
2391 vme_ptr_type
= GetType('vm_map_entry *')
2392 print "{:<18s} {:>18s}:{:<18s} {:>10s} {:<8s} {:<10s} {:<18s} {:<18s}".format("entry","start","end","#pgs","tag.kmod","prot&flags","object","offset")
2393 last_end
= unsigned(map.hdr
.links
.start
)
2394 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, "links"):
2395 if unsigned(vme
.links
.start
) != last_end
:
2396 print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end
,vme
.links
.start
,(unsigned(vme
.links
.start
) - last_end
)/page_size
)
2397 last_end
= unsigned(vme
.links
.end
)
2398 size
= unsigned(vme
.links
.end
) - unsigned(vme
.links
.start
)
2399 object = vme
.vme_object
.vmo_object
2401 object_str
= "{:<#018x}".format(object)
2402 elif vme
.is_sub_map
:
2403 if object == kern
.globals.bufferhdr_map
:
2404 object_str
= "BUFFERHDR_MAP"
2405 elif object == kern
.globals.mb_map
:
2406 object_str
= "MB_MAP"
2407 elif object == kern
.globals.bsd_pageable_map
:
2408 object_str
= "BSD_PAGEABLE_MAP"
2409 elif object == kern
.globals.ipc_kernel_map
:
2410 object_str
= "IPC_KERNEL_MAP"
2411 elif object == kern
.globals.ipc_kernel_copy_map
:
2412 object_str
= "IPC_KERNEL_COPY_MAP"
2413 elif object == kern
.globals.kalloc_map
:
2414 object_str
= "KALLOC_MAP"
2415 elif object == kern
.globals.zone_map
:
2416 object_str
= "ZONE_MAP"
2417 elif hasattr(kern
.globals, 'gzalloc_map') and object == kern
.globals.gzalloc_map
:
2418 object_str
= "GZALLOC_MAP"
2419 elif hasattr(kern
.globals, 'g_kext_map') and object == kern
.globals.g_kext_map
:
2420 object_str
= "G_KEXT_MAP"
2421 elif hasattr(kern
.globals, 'vector_upl_submap') and object == kern
.globals.vector_upl_submap
:
2422 object_str
= "VECTOR_UPL_SUBMAP"
2424 object_str
= "submap:{:<#018x}".format(object)
2426 if object == kern
.globals.kernel_object
:
2427 object_str
= "KERNEL_OBJECT"
2428 elif object == kern
.globals.vm_submap_object
:
2429 object_str
= "VM_SUBMAP_OBJECT"
2430 elif object == kern
.globals.compressor_object
:
2431 object_str
= "COMPRESSOR_OBJECT"
2433 object_str
= "{:<#018x}".format(object)
2434 offset
= unsigned(vme
.vme_offset
) & ~
0xFFF
2435 tag
= unsigned(vme
.vme_offset
& 0xFFF)
2441 if vme
.is_sub_map
and vme
.use_pmap
:
2444 if map.pmap
== kern
.globals.kernel_pmap
:
2445 xsite
= Cast(kern
.globals.vm_allocation_sites
[tag
],'OSKextAccount *')
2446 if xsite
and xsite
.site
.flags
& 2:
2447 tagstr
= ".{:<3d}".format(xsite
.loadTag
)
2448 print "{:#018x} {:#018x}:{:#018x} {:>10d} {:>3d}{:<4s} {:1d}{:1d}{:<8s} {:<18s} {:<#18x}".format(vme
,vme
.links
.start
,vme
.links
.end
,(unsigned(vme
.links
.end
)-unsigned(vme
.links
.start
))/page_size
,tag
,tagstr
,vme
.protection
,vme
.max_protection
,vme_flags
,object_str
,offset
)
2449 if (show_pager_info
or show_all_shadows
) and vme
.is_sub_map
== 0 and vme
.vme_object
.vmo_object
!= 0:
2450 object = vme
.vme_object
.vmo_object
2456 if show_all_shadows
== False and depth
!= 1 and object.shadow
!= 0:
2457 offset
+= unsigned(object.vo_un2
.vou_shadow_offset
)
2458 object = object.shadow
2460 if object.copy_strategy
== 0:
2462 elif object.copy_strategy
== 2:
2464 elif object.copy_strategy
== 4:
2467 copy_strategy
=str(object.copy_strategy
)
2469 internal
= "internal"
2471 internal
= "external"
2473 pager
= object.pager
2474 if show_pager_info
and pager
!= 0:
2476 pager_string
= "-> compressed:{:d}".format(GetCompressedPagesForObject(object))
2477 elif unsigned(pager
.mo_pager_ops
) == vnode_pager_ops_addr
:
2478 vnode_pager
= Cast(pager
,'vnode_pager *')
2479 pager_string
= "-> " + GetVnodePath(vnode_pager
.vnode_handle
)
2481 pager_string
= "-> {:s}:{:#018x}".format(pager
.mo_pager_ops
.memory_object_pager_name
, pager
.mo_pager_ops
)
2482 print "{:>18d} {:#018x}:{:#018x} {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} ({:d} {:d} {:d}) {:s}".format(depth
,offset
,offset
+size
,object,object.ref_count
,object.true_share
,copy_strategy
,internal
,unsigned(object.vo_un1
.vou_size
)/page_size
,object.resident_page_count
,object.wired_page_count
,pager_string
)
2483 # print " #{:<5d} obj {:#018x} ref:{:<6d} ts:{:1d} strat:{:1s} {:s} size:{:<10d} wired:{:<10d} resident:{:<10d} reusable:{:<10d}".format(depth,object,object.ref_count,object.true_share,copy_strategy,internal,object.vo_un1.vou_size/page_size,object.wired_page_count,object.resident_page_count,object.reusable_page_count)
2484 offset
+= unsigned(object.vo_un2
.vou_shadow_offset
)
2485 object = object.shadow
2486 if unsigned(map.hdr
.links
.end
) > last_end
:
2487 print "{:18s} {:#018x}:{:#018x} {:>10d}".format("------------------",last_end
,map.hdr
.links
.end
,(unsigned(map.hdr
.links
.end
) - last_end
)/page_size
)
2490 def CountMapTags(map, tagcounts
, slow
):
2491 page_size
= unsigned(kern
.globals.page_size
)
2492 vme_list_head
= map.hdr
.links
2493 vme_ptr_type
= GetType('vm_map_entry *')
2494 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, "links"):
2495 object = vme
.vme_object
.vmo_object
2496 tag
= vme
.vme_offset
& 0xFFF
2497 if object == kern
.globals.kernel_object
:
2500 count
= unsigned(vme
.links
.end
- vme
.links
.start
) / page_size
2502 addr
= unsigned(vme
.links
.start
)
2503 while addr
< unsigned(vme
.links
.end
):
2504 hash_id
= _calc_vm_page_hash(object, addr
)
2505 page_list
= kern
.globals.vm_page_buckets
[hash_id
].page_list
2506 page
= _vm_page_unpack_ptr(page_list
)
2508 vmpage
= kern
.GetValueFromAddress(page
, 'vm_page_t')
2509 if (addr
== unsigned(vmpage
.offset
)) and (object == vmpage
.object):
2510 if (not vmpage
.local
) and (vmpage
.wire_count
> 0):
2513 page
= _vm_page_unpack_ptr(vmpage
.next_m
)
2515 tagcounts
[tag
] += count
2516 elif vme
.is_sub_map
:
2517 CountMapTags(Cast(object,'vm_map_t'), tagcounts
, slow
)
2520 def CountWiredObject(object, tagcounts
):
2521 tagcounts
[unsigned(object.wire_tag
)] += object.wired_page_count
2524 def CountWiredPurgeableGroup(qhead
, tagcounts
):
2525 for object in IterateQueue(qhead
, 'struct vm_object *', 'objq'):
2526 CountWiredObject(object, tagcounts
)
2529 def CountWiredPurgeableQueue(qhead
, tagcounts
):
2530 CountWiredPurgeableGroup(qhead
.objq
[0], tagcounts
)
2531 CountWiredPurgeableGroup(qhead
.objq
[1], tagcounts
)
2532 CountWiredPurgeableGroup(qhead
.objq
[2], tagcounts
)
2533 CountWiredPurgeableGroup(qhead
.objq
[3], tagcounts
)
2534 CountWiredPurgeableGroup(qhead
.objq
[4], tagcounts
)
2535 CountWiredPurgeableGroup(qhead
.objq
[5], tagcounts
)
2536 CountWiredPurgeableGroup(qhead
.objq
[6], tagcounts
)
2537 CountWiredPurgeableGroup(qhead
.objq
[7], tagcounts
)
2539 def GetKmodIDName(kmod_id
):
2540 kmod_val
= kern
.globals.kmod
2541 for kmod
in IterateLinkedList(kmod_val
, 'next'):
2542 if (kmod
.id == kmod_id
):
2543 return "{:<50s}".format(kmod
.name
)
2546 def GetVMKernName(tag
):
2548 return "VM_KERN_MEMORY_OSFMK"
2550 return "VM_KERN_MEMORY_BSD"
2552 return "VM_KERN_MEMORY_IOKIT"
2554 return "VM_KERN_MEMORY_LIBKERN"
2556 return "VM_KERN_MEMORY_OSKEXT"
2558 return "VM_KERN_MEMORY_KEXT"
2560 return "VM_KERN_MEMORY_IPC"
2562 return "VM_KERN_MEMORY_STACK"
2564 return "VM_KERN_MEMORY_CPU"
2566 return "VM_KERN_MEMORY_PMAP"
2568 return "VM_KERN_MEMORY_PTE"
2570 return "VM_KERN_MEMORY_ZONE"
2572 return "VM_KERN_MEMORY_KALLOC"
2574 return "VM_KERN_MEMORY_COMPRESSOR"
2576 return "VM_KERN_MEMORY_COMPRESSED_DATA"
2578 return "VM_KERN_MEMORY_PHANTOM_CACHE"
2580 return "VM_KERN_MEMORY_WAITQ"
2582 return "VM_KERN_MEMORY_DIAG"
2584 return "VM_KERN_MEMORY_LOG"
2586 return "VM_KERN_MEMORY_FILE"
2588 return "VM_KERN_MEMORY_MBUF"
2590 return "VM_KERN_MEMORY_UBC"
2592 return "VM_KERN_MEMORY_SECURITY"
2594 return "VM_KERN_MEMORY_MLOCK"
2598 @lldb_command("showvmtags", "S")
2599 def showvmtags(cmd_args
=None, cmd_options
={}):
2600 """Routine to print out info about kernel wired page allocations
2602 iterates kernel map and vm objects totaling allocations by tag.
2603 usage: showvmtags -S
2604 also iterates kernel object pages individually - slow.
2607 if "-S" in cmd_options
:
2609 page_size
= unsigned(kern
.globals.page_size
)
2611 for tag
in range(256):
2614 queue_head
= kern
.globals.vm_objects_wired
2615 for object in IterateQueue(queue_head
, 'struct vm_object *', 'objq'):
2616 CountWiredObject(object, tagcounts
)
2618 queue_head
= kern
.globals.purgeable_nonvolatile_queue
2619 for object in IterateQueue(queue_head
, 'struct vm_object *', 'objq'):
2620 CountWiredObject(object, tagcounts
)
2622 purgeable_queues
= kern
.globals.purgeable_queues
2623 CountWiredPurgeableQueue(purgeable_queues
[0], tagcounts
)
2624 CountWiredPurgeableQueue(purgeable_queues
[1], tagcounts
)
2625 CountWiredPurgeableQueue(purgeable_queues
[2], tagcounts
)
2627 CountMapTags(kern
.globals.kernel_map
, tagcounts
, slow
)
2630 print " {:<8s} {:>7s} {:<50s}".format("tag.kmod","size","name")
2631 for tag
in range(256):
2633 total
+= tagcounts
[tag
]
2637 sitestr
= GetVMKernName(tag
)
2639 site
= kern
.globals.vm_allocation_sites
[tag
]
2642 xsite
= Cast(site
,'OSKextAccount *')
2643 tagstr
= ".{:<3d}".format(xsite
.loadTag
)
2644 sitestr
= GetKmodIDName(xsite
.loadTag
)
2646 sitestr
= kern
.Symbolicate(site
)
2647 print " {:>3d}{:<4s} {:>7d}K {:<50s}".format(tag
,tagstr
,tagcounts
[tag
]*page_size
/ 1024,sitestr
)
2648 print "Total: {:>7d}K".format(total
*page_size
/ 1024)
2652 def FindVMEntriesForVnode(task
, vn
):
2653 """ returns an array of vme that have the vnode set to defined vnode
2654 each entry in array is of format (vme, start_addr, end_address, protection)
2659 pager_ops_addr
= unsigned(addressof(kern
.globals.vnode_pager_ops
))
2660 debuglog("pager_ops_addr %s" % hex(pager_ops_addr
))
2662 if unsigned(pmap
) == 0:
2664 vme_list_head
= vmmap
.hdr
.links
2665 vme_ptr_type
= gettype('vm_map_entry *')
2666 for vme
in IterateQueue(vme_list_head
, vme_ptr_type
, 'links'):
2668 if unsigned(vme
.is_sub_map
) == 0 and unsigned(vme
.vme_object
.vmo_object
) != 0:
2669 obj
= vme
.vme_object
.vmo_object
2678 vn_pager
= Cast(obj
.pager
, 'vnode_pager *')
2679 if unsigned(vn_pager
.pager_ops
) == pager_ops_addr
and unsigned(vn_pager
.vnode_handle
) == unsigned(vn
):
2680 retval
.append((vme
, unsigned(vme
.links
.start
), unsigned(vme
.links
.end
), unsigned(vme
.protection
)))
2684 @lldb_command('showtaskloadinfo')
2685 def ShowTaskLoadInfo(cmd_args
=None, cmd_options
={}):
2686 """ Print the load address and uuid for the process
2687 Usage: (lldb)showtaskloadinfo <task_t>
2690 raise ArgumentError("Insufficient arguments")
2691 t
= kern
.GetValueFromAddress(cmd_args
[0], 'struct task *')
2692 print_format
= "0x{0:x} - 0x{1:x} {2: <50s} (??? - ???) <{3: <36s}> {4: <50s}"
2693 p
= Cast(t
.bsd_info
, 'struct proc *')
2695 uuid_out_string
= "{a[0]:02X}{a[1]:02X}{a[2]:02X}{a[3]:02X}-{a[4]:02X}{a[5]:02X}-{a[6]:02X}{a[7]:02X}-{a[8]:02X}{a[9]:02X}-{a[10]:02X}{a[11]:02X}{a[12]:02X}{a[13]:02X}{a[14]:02X}{a[15]:02X}".format(a
=uuid
)
2696 filepath
= GetVnodePath(p
.p_textvp
)
2697 libname
= filepath
.split('/')[-1]
2698 #print "uuid: %s file: %s" % (uuid_out_string, filepath)
2699 mappings
= FindVMEntriesForVnode(t
, p
.p_textvp
)
2706 #print "Load address: %s" % hex(m[1])
2707 print print_format
.format(load_addr
, end_addr
, libname
, uuid_out_string
, filepath
)
2710 @header("{0: <20s} {1: <20s} {2: <20s}".format("vm_page_t", "offset", "object"))
2711 @lldb_command('vmpagelookup')
2712 def VMPageLookup(cmd_args
=None):
2713 """ Print the pages in the page bucket corresponding to the provided object and offset.
2714 Usage: (lldb)vmpagelookup <vm_object_t> <vm_offset_t>
2716 if cmd_args
== None or len(cmd_args
) < 2:
2717 raise ArgumentError("Please specify an object and offset.")
2718 format_string
= "{0: <#020x} {1: <#020x} {2: <#020x}\n"
2720 obj
= kern
.GetValueFromAddress(cmd_args
[0],'unsigned long long')
2721 off
= kern
.GetValueFromAddress(cmd_args
[1],'unsigned long long')
2723 hash_id
= _calc_vm_page_hash(obj
, off
)
2725 page_list
= kern
.globals.vm_page_buckets
[hash_id
].page_list
2726 print("hash_id: 0x%x page_list: 0x%x\n" % (unsigned(hash_id
), unsigned(page_list
)))
2728 print VMPageLookup
.header
2729 page
= _vm_page_unpack_ptr(page_list
)
2731 pg_t
= kern
.GetValueFromAddress(page
, 'vm_page_t')
2732 print format_string
.format(page
, pg_t
.offset
, pg_t
.object)
2733 page
= _vm_page_unpack_ptr(pg_t
.next_m
)
2735 def _vm_page_unpack_ptr(page
):
2736 if kern
.ptrsize
== 4 :
2742 min_addr
= kern
.globals.vm_min_kernel_and_kext_address
2743 #INTEL - min_addr = 0xffffff7f80000000
2744 #ARM - min_addr = 0x80000000
2745 #ARM64 - min_addr = 0xffffff8000000000
2746 return ((page
<< 6) + min_addr
)
2748 @lldb_command('calcvmpagehash')
2749 def CalcVMPageHash(cmd_args
=None):
2750 """ Get the page bucket corresponding to the provided object and offset.
2751 Usage: (lldb)calcvmpagehash <vm_object_t> <vm_offset_t>
2753 if cmd_args
== None or len(cmd_args
) < 2:
2754 raise ArgumentError("Please specify an object and offset.")
2756 obj
= kern
.GetValueFromAddress(cmd_args
[0],'unsigned long long')
2757 off
= kern
.GetValueFromAddress(cmd_args
[1],'unsigned long long')
2759 hash_id
= _calc_vm_page_hash(obj
, off
)
2761 print("hash_id: 0x%x page_list: 0x%x\n" % (unsigned(hash_id
), unsigned(kern
.globals.vm_page_buckets
[hash_id
].page_list
)))
2764 def _calc_vm_page_hash(obj
, off
):
2765 bucket_hash
= (int) (kern
.globals.vm_page_bucket_hash
)
2766 hash_mask
= (int) (kern
.globals.vm_page_hash_mask
)
2768 one
= (obj
* bucket_hash
) & 0xFFFFFFFF
2769 two
= off
>> unsigned(kern
.globals.page_shift
)
2770 three
= two ^ bucket_hash
2772 hash_id
= four
& hash_mask
2776 @header("{0: <10s} of {1: <10s} {2: <20s} {3: <20s} {4: <20s} {5: <10s} {6: <5s}\t {7: <28s}\t{8: <50s}".format("index", "total", "vm_page_t", "offset", "next", "phys_page", "wire#", "first bitfield", "second bitfield"))
2777 @lldb_command('vmobjectwalkpages', 'SBNQP:')
2778 def VMObjectWalkPages(cmd_args
=None, cmd_options
={}):
2779 """ Print the resident pages contained in the provided object. If a vm_page_t is provided as well, we
2780 specifically look for this page, highlighting it in the output or noting if it was not found. For
2781 each page, we confirm that it points to the object. We also keep track of the number of pages we
2782 see and compare this to the object's resident page count field.
2784 vmobjectwalkpages <vm_object_t> : Walk and print all the pages for a given object (up to 4K pages by default)
2785 vmobjectwalkpages <vm_object_t> -B : Walk and print all the pages for a given object (up to 4K pages by default), traversing the memq backwards
2786 vmobjectwalkpages <vm_object_t> -N : Walk and print all the pages for a given object, ignore the page limit
2787 vmobjectwalkpages <vm_object_t> -Q : Walk all pages for a given object, looking for known signs of corruption (i.e. inactive and active both being set for a page)
2788 vmobjectwalkpages <vm_object_t> -P <vm_page_t> : Walk all the pages for a given object, annotate the specified page in the output with ***
2789 vmobjectwalkpages <vm_object_t> -P <vm_page_t> -S : Walk all the pages for a given object, stopping when we find the specified page
2793 if (cmd_args
== None or len(cmd_args
) < 1):
2794 raise ArgumentError("Please specify at minimum a vm_object_t and optionally a vm_page_t")
2798 obj
= kern
.GetValueFromAddress(cmd_args
[0], 'vm_object_t')
2801 if "-P" in cmd_options
:
2802 page
= kern
.GetValueFromAddress(cmd_options
['-P'], 'vm_page_t')
2805 if "-S" in cmd_options
:
2807 raise ArgumentError("-S can only be passed when a page is specified with -P")
2810 walk_backwards
= False
2811 if "-B" in cmd_options
:
2812 walk_backwards
= True
2815 if "-Q" in cmd_options
:
2819 print VMObjectWalkPages
.header
2820 format_string
= "{0: <#10d} of {1: <#10d} {2: <#020x} {3: <#020x} {4: <#020x} {5: <#010x} {6: <#05d}\t"
2821 first_bitfield_format_string
= "{0: <#1d}:{1: <#1d}:{2: <#1d}:{3: <#1d}:{4: <#1d}:{5: <#1d}:{6: <#1d}:"
2822 first_bitfield_format_string
+= "{7: <#1d}:{8: <#1d}:{9: <#1d}:{10: <#1d}:{11: <#1d}:{12: <#1d}"
2823 second_bitfield_format_string
= first_bitfield_format_string
2824 second_bitfield_format_string
+= ":{13: <#1d}:{14: <#1d}:{15: <#1d}:{16: <#1d}:{17: <#1d}:{18: <#1d}:{19: <#1d}:"
2825 second_bitfield_format_string
+= "{20: <#1d}:{21: <#1d}:{22: <#1d}:{23: <#1d}:{24: <#1d}:{25: <#1d}:{26: <#1d}\n"
2826 first_bitfield_format_string
+= "\t"
2828 limit
= 4096 #arbitrary limit of number of pages to walk
2830 if "-N" in cmd_options
:
2834 res_page_count
= unsigned(obj
.resident_page_count
)
2838 for vmp
in IterateQueue(obj
.memq
, "vm_page_t", "listq", walk_backwards
):
2841 if (page
!= 0 and not(page_found
) and vmp
== page
):
2842 out_string
+= "******"
2845 if page
!= 0 or quiet_mode
:
2846 if (page_count
% 1000) == 0:
2847 print "traversed %d pages ...\n" % (page_count
)
2849 out_string
+= format_string
.format(page_count
, res_page_count
, vmp
, vmp
.offset
, vmp
.listq
.next
, vmp
.phys_page
, vmp
.wire_count
)
2850 out_string
+= first_bitfield_format_string
.format(vmp
.active
, vmp
.inactive
, vmp
.clean_queue
, vmp
.local
, vmp
.speculative
,
2851 vmp
.throttled
, vmp
.free
, vmp
.pageout_queue
, vmp
.laundry
, vmp
.reference
,
2852 vmp
.gobbled
, vmp
.private
, vmp
.no_cache
)
2854 out_string
+= second_bitfield_format_string
.format(vmp
.busy
, vmp
.wanted
, vmp
.tabled
, vmp
.hashed
, vmp
.fictitious
, vmp
.clustered
,
2855 vmp
.clustered
, vmp
.pmapped
, vmp
.xpmapped
, vmp
.wpmapped
, vmp
.pageout
, vmp
.absent
,
2856 vmp
.error
, vmp
.dirty
, vmp
.cleaning
, vmp
.precious
, vmp
.precious
, vmp
.overwriting
,
2857 vmp
.restart
, vmp
.unusual
, vmp
.encrypted
, vmp
.encrypted
, vmp
.encrypted_cleaning
,
2858 vmp
.cs_validated
, vmp
.cs_tainted
, vmp
.cs_nx
, vmp
.reusable
, vmp
.lopage
, vmp
.slid
, vmp
.compressor
,
2859 vmp
.written_by_kernel
)
2861 if (vmp
in pages_seen
):
2862 print out_string
+ "cycle detected! we've seen vm_page_t: " + "{0: <#020x}".format(unsigned(vmp
)) + " twice. stopping...\n"
2865 if (vmp
.object != obj
):
2866 print out_string
+ " vm_page_t: " + "{0: <#020x}".format(unsigned(vmp
)) + " points to different vm_object_t: " + "{0: <#020x}".format(unsigned(vmp
.object))
2869 if (not vmp
.local
) and (vmp
.wire_count
> 0):
2870 if (vmp
.active
or vmp
.inactive
or vmp
.speculative
or vmp
.throttled
or vmp
.pageout_queue
):
2871 print out_string
+ " wired page with wrong page queue attributes\n"
2872 print "vm_page_t: " + "{0: <#020x}".format(unsigned(vmp
)) + " active: %d inactive: %d speculative: %d throttled %d pageout_queue: %d\n" % (vmp
.active
,
2873 vmp
.inactive
, vmp
.speculative
, vmp
.throttled
, vmp
.pageout_queue
)
2874 print "stopping...\n"
2877 if ((vmp
.free
+ vmp
.active
+ vmp
.inactive
+ vmp
.speculative
+ vmp
.throttled
+ vmp
.pageout_queue
) > 1):
2878 print out_string
+ " more than one pageout queue bit set active\n"
2879 print "vm_page_t: " + "{0: <#020x}".format(unsigned(vmp
)) + " free: %d active: %d inactive: %d speculative: %d throttled: %d pageout_queue: %d\n" % (vmp
.free
,
2880 vmp
.active
, vmp
.inactive
, vmp
.speculative
, vmp
.throttled
, vmp
.pageout_queue
)
2881 print "stopping...\n"
2884 if ((vmp
.__unused
_pageq
_bits
!= 0) or (vmp
.__unused
_object
_bits
!= 0)):
2885 print out_string
+ " unused bits not zero for vm_page_t: " + "{0: <#020x}".format(unsigned(vmp
)) + " unused__pageq_bits: %d unused_object_bits : %d\n" % (vmp
.__unused
_pageq
_bits
,
2886 vmp
.__unused
_object
_bits
)
2887 print "stopping...\n"
2893 hash_id
= _calc_vm_page_hash(obj
, vmp
.offset
)
2894 hash_page_list
= kern
.globals.vm_page_buckets
[hash_id
].page_list
2895 hash_page
= _vm_page_unpack_ptr(hash_page_list
)
2898 while (hash_page
!= 0):
2899 hash_page_t
= kern
.GetValueFromAddress(hash_page
, 'vm_page_t')
2900 if hash_page_t
== vmp
:
2902 hash_page
= _vm_page_unpack_ptr(hash_page_t
.next_m
)
2904 if (unsigned(vmp
) != unsigned(hash_page_t
)):
2905 print out_string
+ "unable to find page: " + "{0: <#020x}".format(unsigned(vmp
)) + " from object in kernel page bucket list\n"
2906 print lldb_run_command("vm_page_info %s 0x%x" % (cmd_args
[0], unsigned(vmp
.offset
)))
2909 if (page_count
>= limit
and not(ignore_limit
)):
2910 print out_string
+ "Limit reached (%d pages), stopping..." % (limit
)
2915 if page_found
and stop
:
2916 print("Object reports resident page count of: %d we stopped after traversing %d and finding the requested page.\n" % (unsigned(obj
.res_page_count
), unsigned(page_count
)))
2920 print("page found? : %s\n" % page_found
)
2922 print("Object reports resident page count of %d, we saw %d pages when we walked the resident list.\n" % (unsigned(obj
.resident_page_count
), unsigned(page_count
)))
2925 @lldb_command("show_all_apple_protect_pagers")
2926 def ShowAllAppleProtectPagers(cmd_args
=None):
2927 """Routine to print all apple_protect pagers
2928 usage: show_all_apple_protect_pagers
2930 print "{:>3s} {:<3s} {:<18s} {:>5s} {:>5s} {:>6s} {:<18s} {:<18s} {:<18s} {:<18s} {:<18s} {:<18s}\n".format("#", "#", "pager", "refs", "ready", "mapped", "mo_control", "object", "offset", "crypto_offset", "crypto_start", "crypto_end")
2931 qhead
= kern
.globals.apple_protect_pager_queue
2932 qtype
= GetType('apple_protect_pager *')
2933 qcnt
= kern
.globals.apple_protect_pager_count
2935 for pager
in IterateQueue(qhead
, qtype
, "pager_queue"):
2937 show_apple_protect_pager(pager
, qcnt
, idx
)
2939 @lldb_command("show_apple_protect_pager")
2940 def ShowAppleProtectPager(cmd_args
=None):
2941 """Routine to print out info about an apple_protect pager
2942 usage: show_apple_protect_pager <pager>
2944 if cmd_args
== None or len(cmd_args
) < 1:
2945 print "Invalid argument.", ShowMap
.__doc
__
2947 pager
= kern
.GetValueFromAddress(cmd_ars
[0], 'apple_protect_pager_t')
2948 show_apple_protect_pager(pager
, 1, 1)
2950 def show_apple_protect_pager(pager
, qcnt
, idx
):
2951 object = pager
.backing_object
2952 shadow
= object.shadow
2955 shadow
= object.shadow
2956 vnode_pager
= Cast(object.pager
,'vnode_pager *')
2957 filename
= GetVnodePath(vnode_pager
.vnode_handle
)
2958 print "{:>3}/{:<3d} {:#018x} {:>5d} {:>5d} {:>6d} {:#018x} {:#018x} {:#018x} {:#018x} {:#018x} {:#018x}\n\tcrypt_info:{:#018x} <decrypt:{:#018x} end:{:#018x} ops:{:#018x} refs:{:<d}>\n\tvnode:{:#018x} {:s}\n".format(idx
, qcnt
, pager
, pager
.ref_count
, pager
.is_ready
, pager
.is_mapped
, pager
.pager_control
, pager
.backing_object
, pager
.backing_offset
, pager
.crypto_backing_offset
, pager
.crypto_start
, pager
.crypto_end
, pager
.crypt_info
, pager
.crypt_info
.page_decrypt
, pager
.crypt_info
.crypt_end
, pager
.crypt_info
.crypt_ops
, pager
.crypt_info
.crypt_refcnt
, vnode_pager
.vnode_handle
, filename
)