5 # TODO: write scheduler related macros here
7 # Macro: showallprocrunqcount
9 @lldb_command('showallprocrunqcount')
10 def ShowAllProcRunQCount(cmd_args
=None):
11 """ Prints out the runq count for all processors
13 out_str
= "Processor\t# Runnable\n"
14 processor_itr
= kern
.globals.processor_list
16 out_str
+= "{:d}\t\t{:d}\n".format(processor_itr
.cpu_id
, processor_itr
.runq
.count
)
17 processor_itr
= processor_itr
.processor_list
18 out_str
+= "RT:\t\t{:d}\n".format(kern
.globals.rt_runq
.count
)
21 # EndMacro: showallprocrunqcount
23 # Macro: showinterrupts
25 @lldb_command('showinterrupts')
26 def ShowInterrupts(cmd_args
=None):
27 """ Prints IRQ, IPI and TMR counts for each CPU
30 if kern
.arch
not in ('arm', 'arm64'):
31 print "showinterrupts is only supported on arm/arm64"
34 base_address
= kern
.GetLoadAddressForSymbol('CpuDataEntries')
36 for x
in xrange (0, unsigned(kern
.globals.machine_info
.physical_cpu
)):
37 element
= kern
.GetValueFromAddress(base_address
+ (x
* struct_size
), 'uintptr_t *')[1]
38 cpu_data_entry
= Cast(element
, 'cpu_data_t *')
39 print "CPU {} IRQ: {:d}\n".format(x
, cpu_data_entry
.cpu_stat
.irq_ex_cnt
)
40 print "CPU {} IPI: {:d}\n".format(x
, cpu_data_entry
.cpu_stat
.ipi_cnt
)
41 print "CPU {} TMR: {:d}\n".format(x
, cpu_data_entry
.cpu_stat
.timer_cnt
)
42 # EndMacro: showinterrupts
44 # Macro: showactiveinterrupts
46 @lldb_command('showactiveinterrupts')
47 def ShowActiveInterrupts(cmd_args
=None):
48 """ Prints the interrupts that are unmasked & active with the Interrupt Controller
49 Usage: showactiveinterrupts <address of Interrupt Controller object>
52 print "No arguments passed"
53 print ShowActiveInterrupts
.__doc
__
55 aic
= kern
.GetValueFromAddress(cmd_args
[0], 'AppleInterruptController *')
57 print "unknown arguments:", str(cmd_args
)
60 aic_base
= unsigned(aic
._aicBaseAddress
)
62 aic_imc_base
= aic_base
+ 0x4180
64 current_pointer
= aic_imc_base
65 unmasked
= dereference(kern
.GetValueFromAddress(current_pointer
, 'uintptr_t *'))
66 active
= dereference(kern
.GetValueFromAddress(current_pointer
+ aic_him_offset
, 'uintptr_t *'))
69 while current_interrupt
< 192:
70 if (((unmasked
& mask
) == 0) and (active
& mask
)):
71 print "Interrupt {:d} unmasked and active\n".format(current_interrupt
)
72 current_interrupt
= current_interrupt
+ 1
73 if (current_interrupt
% 32 == 0):
75 group_count
= group_count
+ 1
76 unmasked
= dereference(kern
.GetValueFromAddress(current_pointer
+ (4 * group_count
), 'uintptr_t *'))
77 active
= dereference(kern
.GetValueFromAddress((current_pointer
+ aic_him_offset
) + (4 * group_count
), 'uintptr_t *'))
80 # EndMacro: showactiveinterrupts
82 # Macro: showirqbyipitimerratio
83 @lldb_command('showirqbyipitimerratio')
84 def ShowIrqByIpiTimerRatio(cmd_args
=None):
85 """ Prints the ratio of IRQ by sum of IPI & TMR counts for each CPU
87 if kern
.arch
== "x86_64":
88 print "This macro is not supported on x86_64 architecture"
91 out_str
= "IRQ-IT Ratio: "
92 base_address
= kern
.GetLoadAddressForSymbol('CpuDataEntries')
94 for x
in range (0, unsigned(kern
.globals.machine_info
.physical_cpu
)):
95 element
= kern
.GetValueFromAddress(base_address
+ (x
* struct_size
), 'uintptr_t *')[1]
96 cpu_data_entry
= Cast(element
, 'cpu_data_t *')
97 out_str
+= " CPU {} [{:.2f}]".format(x
, float(cpu_data_entry
.cpu_stat
.irq_ex_cnt
)/(cpu_data_entry
.cpu_stat
.ipi_cnt
+ cpu_data_entry
.cpu_stat
.timer_cnt
))
100 # EndMacro: showirqbyipitimerratio
102 #Macro: showinterruptsourceinfo
103 @lldb_command('showinterruptsourceinfo')
104 def showinterruptsourceinfo(cmd_args
= None):
105 """ Extract information of interrupt source causing interrupt storms.
108 print "No arguments passed"
110 #Dump IOInterruptVector object
111 print "--- Dumping IOInterruptVector object ---\n"
112 object_info
= lldb_run_command("dumpobject {:s} IOInterruptVector".format(cmd_args
[0]))
114 print "--- Dumping IOFilterInterruptEventSource object ---\n"
115 #Dump the IOFilterInterruptEventSource object.
116 target_info
=re
.search('target =\s+(.*)',object_info
)
117 target
= target_info
.group()
118 target
= target
.split()
119 #Dump the Object pointer of the source who is triggering the Interrupts.
120 vector_info
=lldb_run_command("dumpobject {:s} ".format(target
[2]))
122 owner_info
= re
.search('owner =\s+(.*)',vector_info
)
123 owner
= owner_info
.group()
126 out
=lldb_run_command(" dumpobject {:s}".format(owner
[2]))
129 # EndMacro: showinterruptsourceinfo
131 @lldb_command('showcurrentabstime')
132 def ShowCurremtAbsTime(cmd_args
=None):
133 """ Routine to print latest absolute time known to system before being stopped.
134 Usage: showcurrentabstime
136 pset
= addressof(kern
.globals.pset0
)
139 while unsigned(pset
) != 0:
140 for processor
in ParanoidIterateLinkageChain(pset
.active_queue
, "processor_t", "processor_queue"):
141 if unsigned(processor
.last_dispatch
) > cur_abstime
:
142 cur_abstime
= unsigned(processor
.last_dispatch
)
144 for processor
in ParanoidIterateLinkageChain(pset
.idle_queue
, "processor_t", "processor_queue"):
145 if unsigned(processor
.last_dispatch
) > cur_abstime
:
146 cur_abstime
= unsigned(processor
.last_dispatch
)
148 for processor
in ParanoidIterateLinkageChain(pset
.idle_secondary_queue
, "processor_t", "processor_queue"):
149 if unsigned(processor
.last_dispatch
) > cur_abstime
:
150 cur_abstime
= unsigned(processor
.last_dispatch
)
152 pset
= pset
.pset_list
154 print "Last dispatch time known: %d MATUs" % cur_abstime
157 @lldb_command('abs2nano')
158 def ShowAbstimeToNanoTime(cmd_args
=[]):
159 """ convert mach_absolute_time units to nano seconds
160 Usage: (lldb) abs2nano <timestamp in MATUs>
163 raise ArgumentError("Invalid argument")
164 timedata
= ArgumentStringToInt(cmd_args
[0])
165 ns
= kern
.GetNanotimeFromAbstime(timedata
)
166 us
= float(ns
) / 1000
175 print "{:d} ns, {:f} us, {:f} ms, {:f} s, {:f} m, {:f} h, {:f} d".format(ns
, us
, ms
, s
, m
, h
, d
)
177 print "{:d} ns, {:f} us, {:f} ms, {:f} s".format(ns
, us
, ms
, s
)
179 # Macro: showschedhistory
181 def GetSchedMostRecentDispatch(show_processor_details
=False):
182 """ Return the most recent dispatch on the system, printing processor
183 details if argument is true.
185 processor_list
= kern
.globals.processor_list
187 most_recent_dispatch
= 0
188 current_processor
= processor_list
190 while unsigned(current_processor
) > 0:
191 active_thread
= current_processor
.active_thread
192 if unsigned(active_thread
) != 0 :
193 task_val
= active_thread
.task
194 proc_val
= Cast(task_val
.bsd_info
, 'proc *')
195 proc_name
= "<unknown>" if unsigned(proc_val
) == 0 else str(proc_val
.p_name
)
197 last_dispatch
= unsigned(current_processor
.last_dispatch
)
199 if kern
.arch
== 'x86_64':
200 cpu_data
= kern
.globals.cpu_data_ptr
[current_processor
.cpu_id
]
202 cpu_debugger_time
= max(cpu_data
.debugger_entry_time
, cpu_data
.debugger_ipi_time
)
203 time_since_dispatch
= unsigned(cpu_debugger_time
- last_dispatch
)
204 time_since_dispatch_us
= kern
.GetNanotimeFromAbstime(time_since_dispatch
) / 1000.0
205 time_since_debugger
= unsigned(cpu_debugger_time
- kern
.globals.debugger_entry_time
)
206 time_since_debugger_us
= kern
.GetNanotimeFromAbstime(time_since_debugger
) / 1000.0
208 if show_processor_details
:
209 print "Processor last dispatch: {:16d} Entered debugger: {:16d} ({:8.3f} us after dispatch, {:8.3f} us after debugger) Active thread: 0x{t:<16x} 0x{t.thread_id:<8x} {proc_name:s}".format(last_dispatch
, cpu_debugger_time
,
210 time_since_dispatch_us
, time_since_debugger_us
, t
=active_thread
, proc_name
=proc_name
)
212 if show_processor_details
:
213 print "Processor last dispatch: {:16d} Active thread: 0x{t:<16x} 0x{t.thread_id:<8x} {proc_name:s}".format(last_dispatch
, t
=active_thread
, proc_name
=proc_name
)
215 if last_dispatch
> most_recent_dispatch
:
216 most_recent_dispatch
= last_dispatch
218 current_processor
= current_processor
.processor_list
220 return most_recent_dispatch
222 @header("{:<18s} {:<10s} {:>16s} {:>16s} {:>16s} {:>18s} {:>16s} {:>16s} {:>16s} {:2s} {:2s} {:2s} {:>2s} {:<19s} {:<9s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>11s} {:>8s}".format("thread", "id", "on-core", "off-core", "runnable", "last-duration (us)", "since-off (us)", "since-on (us)", "pending (us)", "BP", "SP", "TP", "MP", "sched-mode", "state", "cpu-usage", "delta", "sch-usage", "stamp", "shift", "task", "thread-name"))
223 def ShowThreadSchedHistory(thread
, most_recent_dispatch
):
224 """ Given a thread and the most recent dispatch time of a thread on the
225 system, print out details about scheduler history for the thread.
230 if unsigned(thread
.uthread
) != 0:
231 uthread
= Cast(thread
.uthread
, 'uthread *')
232 # Doing the straightforward thing blows up weirdly, so use some indirections to get back on track
233 if unsigned(uthread
.pth_name
) != 0 :
234 thread_name
= str(kern
.GetValueFromAddress(unsigned(uthread
.pth_name
), 'char*'))
237 task_name
= "unknown"
238 if task
and unsigned(task
.bsd_info
):
239 p
= Cast(task
.bsd_info
, 'proc *')
240 task_name
= str(p
.p_name
)
244 mode
= str(thread
.sched_mode
)
245 if "TIMESHARE" in mode
:
246 sched_mode
+="timeshare"
247 elif "FIXED" in mode
:
249 elif "REALTIME" in mode
:
250 sched_mode
+="realtime"
252 if (unsigned(thread
.bound_processor
) != 0):
256 if (unsigned(thread
.sched_flags
) & 0x0004):
261 thread_state_chars
= {0x0:'', 0x1:'W', 0x2:'S', 0x4:'R', 0x8:'U', 0x10:'H', 0x20:'A', 0x40:'P', 0x80:'I'}
265 state_str
+= thread_state_chars
[int(state
& mask
)]
268 last_on
= thread
.computation_epoch
269 last_off
= thread
.last_run_time
270 last_runnable
= thread
.last_made_runnable_time
272 if int(last_runnable
) == 18446744073709551615 :
275 time_on_abs
= unsigned(last_off
- last_on
)
276 time_on_us
= kern
.GetNanotimeFromAbstime(time_on_abs
) / 1000.0
278 time_pending_abs
= unsigned(most_recent_dispatch
- last_runnable
)
279 time_pending_us
= kern
.GetNanotimeFromAbstime(time_pending_abs
) / 1000.0
281 if int(last_runnable
) == 0 :
284 time_since_off_abs
= unsigned(most_recent_dispatch
- last_off
)
285 time_since_off_us
= kern
.GetNanotimeFromAbstime(time_since_off_abs
) / 1000.0
286 time_since_on_abs
= unsigned(most_recent_dispatch
- last_on
)
287 time_since_on_us
= kern
.GetNanotimeFromAbstime(time_since_on_abs
) / 1000.0
289 fmt
= "0x{t:<16x} 0x{t.thread_id:<8x} {t.computation_epoch:16d} {t.last_run_time:16d} {last_runnable:16d} {time_on_us:18.3f} {time_since_off_us:16.3f} {time_since_on_us:16.3f} {time_pending_us:16.3f}"
290 fmt2
= " {t.base_pri:2d} {t.sched_pri:2d} {t.task_priority:2d} {t.max_priority:2d} {sched_mode:19s}"
291 fmt3
= " {state:9s} {t.cpu_usage:10d} {t.cpu_delta:10d} {t.sched_usage:10d} {t.sched_stamp:10d} {t.pri_shift:10d} {name:s} {thread_name:s}"
293 out_str
= fmt
.format(t
=thread
, time_on_us
=time_on_us
, time_since_off_us
=time_since_off_us
, time_since_on_us
=time_since_on_us
, last_runnable
=last_runnable
, time_pending_us
=time_pending_us
)
294 out_str
+= fmt2
.format(t
=thread
, sched_mode
=sched_mode
)
295 out_str
+= fmt3
.format(t
=thread
, state
=state_str
, name
=task_name
, thread_name
=thread_name
)
299 @lldb_command('showschedhistory')
300 def ShowSchedHistory(cmd_args
=None):
301 """ Routine to print out thread scheduling history
302 Usage: showschedhistory [<thread-ptr> ...]
306 most_recent_dispatch
= GetSchedMostRecentDispatch(False)
308 print ShowThreadSchedHistory
.header
309 for thread_ptr
in cmd_args
:
310 thread
= kern
.GetValueFromAddress(ArgumentStringToInt(thread_ptr
), 'thread *')
311 ShowThreadSchedHistory(thread
, most_recent_dispatch
)
315 run_buckets
= kern
.globals.sched_run_buckets
317 run_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')]
318 fixpri_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')]
319 share_fg_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')]
320 share_ut_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')]
321 share_bg_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')]
323 sched_pri_shifts
= kern
.globals.sched_run_buckets
325 share_fg_shift
= sched_pri_shifts
[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')]
326 share_ut_shift
= sched_pri_shifts
[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')]
327 share_bg_shift
= sched_pri_shifts
[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')]
330 print "Processors: {g.processor_avail_count:d} Runnable threads: {:d} Fixpri threads: {:d}\n".format(run_count
, fixpri_count
, g
=kern
.globals)
331 print "FG Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count
, share_ut_count
, share_bg_count
)
332 print "Mach factor: {g.sched_mach_factor:d} Load factor: {g.sched_load_average:d} Sched tick: {g.sched_tick:d} timestamp: {g.sched_tick_last_abstime:d} interval:{g.sched_tick_interval:d}\n".format(g
=kern
.globals)
333 print "Fixed shift: {g.sched_fixed_shift:d} FG shift: {:d} UT shift: {:d} BG shift: {:d}\n".format(share_fg_shift
, share_ut_shift
, share_bg_shift
, g
=kern
.globals)
334 print "sched_pri_decay_band_limit: {g.sched_pri_decay_band_limit:d} sched_decay_usage_age_factor: {g.sched_decay_usage_age_factor:d}\n".format(g
=kern
.globals)
336 if kern
.arch
== 'x86_64':
337 print "debugger_entry_time: {g.debugger_entry_time:d}\n".format(g
=kern
.globals)
339 most_recent_dispatch
= GetSchedMostRecentDispatch(True)
340 print "Most recent dispatch: " + str(most_recent_dispatch
)
342 print ShowThreadSchedHistory
.header
343 for thread
in IterateQueue(kern
.globals.threads
, 'thread *', 'threads'):
344 ShowThreadSchedHistory(thread
, most_recent_dispatch
)
347 # EndMacro: showschedhistory
350 # Macro: showallprocessors
352 def ShowGroupSetSummary(runq
, task_map
):
353 """ Internal function to print summary of group run queue
354 params: runq - value representing struct run_queue *
356 print " runq: count {: <10d} highq: {: <10d} urgency {: <10d}\n".format(runq
.count
, runq
.highq
, runq
.urgency
)
359 runq_queue_count
= sizeof(runq
.queues
)/sizeof(runq
.queues
[0])
361 for runq_queue_i
in xrange(runq_queue_count
) :
362 runq_queue_head
= addressof(runq
.queues
[runq_queue_i
])
363 runq_queue_p
= runq_queue_head
.next
365 if unsigned(runq_queue_p
) != unsigned(runq_queue_head
):
366 runq_queue_this_count
= 0
368 for entry
in ParanoidIterateLinkageChain(runq_queue_head
, "sched_entry_t", "entry_links"):
369 runq_queue_this_count
+= 1
371 print " Queue [{: <#012x}] Priority {: <3d} count {:d}\n".format(runq_queue_head
, runq_queue_i
, runq_queue_this_count
)
372 for entry
in ParanoidIterateLinkageChain(runq_queue_head
, "sched_entry_t", "entry_links"):
373 group_addr
= unsigned(entry
) - (sizeof(dereference(entry
)) * unsigned(entry
.sched_pri
))
374 group
= kern
.GetValueFromAddress(unsigned(group_addr
), 'sched_group_t')
375 task
= task_map
.get(unsigned(group
), 0x0)
377 print "Cannot find task for group: {: <#012x}".format(group
)
378 print "\tEntry [{: <#012x}] Priority {: <3d} Group {: <#012x} Task {: <#012x}\n".format(unsigned(entry
), entry
.sched_pri
, unsigned(group
), unsigned(task
))
380 @lldb_command('showrunq')
381 def ShowRunq(cmd_args
=None):
382 """ Routine to print information of a runq
383 Usage: showrunq <runq>
387 print "No arguments passed"
388 print ShowRunq
.__doc
__
391 runq
= kern
.GetValueFromAddress(cmd_args
[0], 'struct run_queue *')
392 ShowRunQSummary(runq
)
394 def ShowRunQSummary(runq
):
395 """ Internal function to print summary of run_queue
396 params: runq - value representing struct run_queue *
398 print " runq: count {: <10d} highq: {: <10d} urgency {: <10d}\n".format(runq
.count
, runq
.highq
, runq
.urgency
)
401 runq_queue_count
= sizeof(runq
.queues
)/sizeof(runq
.queues
[0])
403 for runq_queue_i
in xrange(runq_queue_count
) :
404 runq_queue_head
= addressof(runq
.queues
[runq_queue_i
])
405 runq_queue_p
= runq_queue_head
.next
407 if unsigned(runq_queue_p
) != unsigned(runq_queue_head
):
408 runq_queue_this_count
= 0
410 for thread
in ParanoidIterateLinkageChain(runq_queue_head
, "thread_t", "runq_links"):
411 runq_queue_this_count
+= 1
413 print " Queue [{: <#012x}] Priority {: <3d} count {:d}\n".format(runq_queue_head
, runq_queue_i
, runq_queue_this_count
)
414 print "\t" + GetThreadSummary
.header
+ "\n"
415 for thread
in ParanoidIterateLinkageChain(runq_queue_head
, "thread_t", "runq_links"):
416 print "\t" + GetThreadSummary(thread
) + "\n"
417 if config
['verbosity'] > vHUMAN
:
418 print "\t" + GetThreadBackTrace(thread
, prefix
="\t\t") + "\n"
421 def ShowGrrrSummary(grrr_runq
):
422 """ Internal function to print summary of grrr_run_queue
423 params: grrr_runq - value representing struct grrr_run_queue *
425 print " GRRR Info: Count {: <10d} Weight {: <10d} Current Group {: <#012x}\n".format(grrr_runq
.count
,
426 grrr_runq
.weight
, grrr_runq
.current_group
)
428 grrr_group_count
= sizeof(grrr_runq
.groups
)/sizeof(grrr_runq
.groups
[0])
429 for grrr_group_i
in xrange(grrr_group_count
) :
430 grrr_group
= addressof(grrr_runq
.groups
[grrr_group_i
])
431 if grrr_group
.count
> 0:
432 print " Group {: <3d} [{: <#012x}] ".format(grrr_group
.index
, grrr_group
)
433 print "Count {:d} Weight {:d}\n".format(grrr_group
.count
, grrr_group
.weight
)
434 grrr_group_client_head
= addressof(grrr_group
.clients
)
435 print GetThreadSummary
.header
436 for thread
in ParanoidIterateLinkageChain(grrr_group_client_head
, "thread_t", "runq_links"):
437 print "\t" + GetThreadSummary(thread
) + "\n"
438 if config
['verbosity'] > vHUMAN
:
439 print "\t" + GetThreadBackTrace(thread
, prefix
="\t\t") + "\n"
441 def ShowNextThread(processor
):
442 if (processor
.next_thread
!= 0) :
443 print " " + "Next thread:\n"
444 print "\t" + GetThreadSummary
.header
+ "\n"
445 print "\t" + GetThreadSummary(processor
.next_thread
) + "\n"
447 def ShowActiveThread(processor
):
448 if (processor
.active_thread
!= 0) :
449 print "\t" + GetThreadSummary
.header
+ "\n"
450 print "\t" + GetThreadSummary(processor
.active_thread
) + "\n"
452 @lldb_command('showallprocessors')
453 @lldb_command('showscheduler')
454 def ShowScheduler(cmd_args
=None):
455 """ Routine to print information of all psets and processors
458 pset
= addressof(kern
.globals.pset0
)
460 show_priority_runq
= 0
461 show_priority_pset_runq
= 0
462 show_group_pset_runq
= 0
463 sched_string
= str(kern
.globals.sched_current_dispatch
.sched_name
)
465 if sched_string
== "traditional":
466 show_priority_runq
= 1
467 elif sched_string
== "traditional_with_pset_runqueue":
468 show_priority_pset_runq
= 1
469 elif sched_string
== "grrr":
471 elif sched_string
== "multiq":
472 show_priority_runq
= 1
473 show_group_pset_runq
= 1
474 elif sched_string
== "dualq":
475 show_priority_pset_runq
= 1
476 show_priority_runq
= 1
478 print "Unknown sched_string {:s}".format(sched_string
)
480 print "Scheduler: {:s} ({:s})\n".format(sched_string
,
481 kern
.Symbolicate(unsigned(kern
.globals.sched_current_dispatch
)))
483 run_buckets
= kern
.globals.sched_run_buckets
485 run_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')]
486 fixpri_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')]
487 share_fg_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')]
488 share_ut_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')]
489 share_bg_count
= run_buckets
[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')]
491 print "Processors: {g.processor_avail_count:d} Runnable threads: {:d} Fixpri threads: {:d}\n".format(run_count
, fixpri_count
, g
=kern
.globals)
492 print "FG Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count
, share_ut_count
, share_bg_count
)
494 if show_group_pset_runq
:
495 print "multiq scheduler config: deep-drain {g.deep_drain:d}, ceiling {g.drain_ceiling:d}, depth limit {g.drain_depth_limit:d}, band limit {g.drain_band_limit:d}, sanity check {g.multiq_sanity_check:d}\n".format(g
=kern
.globals)
497 # Create a group->task mapping
499 for task
in kern
.tasks
:
500 task_map
[unsigned(task
.sched_group
)] = task
501 for task
in kern
.terminated_tasks
:
502 task_map
[unsigned(task
.sched_group
)] = task
506 while unsigned(pset
) != 0:
507 print "Processor Set {: <#012x} Count {:d} (cpu_id {:<#x}-{:<#x})\n".format(pset
,
508 pset
.cpu_set_count
, pset
.cpu_set_low
, pset
.cpu_set_hi
)
510 if show_priority_pset_runq
:
511 runq
= pset
.pset_runq
512 ShowRunQSummary(runq
)
514 if show_group_pset_runq
:
516 runq
= pset
.pset_runq
517 ShowGroupSetSummary(runq
, task_map
)
518 print "All Groups:\n"
519 # TODO: Possibly output task header for each group
520 for group
in IterateQueue(kern
.globals.sched_groups
, "sched_group_t", "sched_groups"):
521 if (group
.runq
.count
!= 0) :
522 task
= task_map
.get(unsigned(group
), "Unknown task!")
523 print "Group {: <#012x} Task {: <#012x}\n".format(unsigned(group
), unsigned(task
))
524 ShowRunQSummary(group
.runq
)
527 print "Active Processors:\n"
528 for processor
in ParanoidIterateLinkageChain(pset
.active_queue
, "processor_t", "processor_queue"):
529 print " " + GetProcessorSummary(processor
)
530 ShowActiveThread(processor
)
531 ShowNextThread(processor
)
533 if show_priority_runq
:
534 runq
= processor
.runq
535 ShowRunQSummary(runq
)
537 grrr_runq
= processor
.grrr_runq
538 ShowGrrrSummary(grrr_runq
)
542 print "Idle Processors:\n"
543 for processor
in ParanoidIterateLinkageChain(pset
.idle_queue
, "processor_t", "processor_queue"):
544 print " " + GetProcessorSummary(processor
)
545 ShowActiveThread(processor
)
546 ShowNextThread(processor
)
548 if show_priority_runq
:
549 ShowRunQSummary(processor
.runq
)
553 print "Idle Secondary Processors:\n"
554 for processor
in ParanoidIterateLinkageChain(pset
.idle_secondary_queue
, "processor_t", "processor_queue"):
555 print " " + GetProcessorSummary(processor
)
556 ShowActiveThread(processor
)
557 ShowNextThread(processor
)
559 if show_priority_runq
:
560 print ShowRunQSummary(processor
.runq
)
564 pset
= pset
.pset_list
566 print "\nRealtime Queue ({:<#012x}) Count {:d}\n".format(addressof(kern
.globals.rt_runq
.queue
), kern
.globals.rt_runq
.count
)
567 if kern
.globals.rt_runq
.count
!= 0:
568 print "\t" + GetThreadSummary
.header
+ "\n"
569 for rt_runq_thread
in ParanoidIterateLinkageChain(kern
.globals.rt_runq
.queue
, "thread_t", "runq_links"):
570 print "\t" + GetThreadSummary(rt_runq_thread
) + "\n"
572 print "\nTerminate Queue: ({:<#012x})\n".format(addressof(kern
.globals.thread_terminate_queue
))
574 for thread
in ParanoidIterateLinkageChain(kern
.globals.thread_terminate_queue
, "thread_t", "runq_links"):
576 print "\t" + GetThreadSummary
.header
+ "\n"
578 print "\t" + GetThreadSummary(thread
) + "\n"
580 print "\nCrashed Threads Queue: ({:<#012x})\n".format(addressof(kern
.globals.crashed_threads_queue
))
582 for thread
in ParanoidIterateLinkageChain(kern
.globals.crashed_threads_queue
, "thread_t", "runq_links"):
584 print "\t" + GetThreadSummary
.header
+ "\n"
586 print "\t" + GetThreadSummary(thread
) + "\n"
588 print "\nWaiting For Kernel Stacks Queue: ({:<#012x})\n".format(addressof(kern
.globals.thread_stack_queue
))
590 for thread
in ParanoidIterateLinkageChain(kern
.globals.thread_stack_queue
, "thread_t", "runq_links"):
592 print "\t" + GetThreadSummary
.header
+ "\n"
594 print "\t" + GetThreadSummary(thread
) + "\n"
600 # EndMacro: showallprocessors
603 def ParanoidIterateLinkageChain(queue_head
, element_type
, field_name
, field_ofst
=0):
604 """ Iterate over a Linkage Chain queue in kernel of type queue_head_t. (osfmk/kern/queue.h method 1)
605 This is equivalent to the qe_foreach_element() macro
606 Blows up aggressively and descriptively when something goes wrong iterating a queue.
607 Prints correctness errors, and throws exceptions on 'cannot proceed' errors
608 If this is annoying, set the global 'enable_paranoia' to false.
611 queue_head - value : Value object for queue_head.
612 element_type - lldb.SBType : pointer type of the element which contains the queue_chain_t. Typically its structs like thread, task etc..
613 - str : OR a string describing the type. ex. 'task *'
614 field_name - str : Name of the field (in element) which holds a queue_chain_t
615 field_ofst - int : offset from the 'field_name' (in element) which holds a queue_chain_t
616 This is mostly useful if a particular element contains an array of queue_chain_t
618 A generator does not return. It is used for iterating.
619 value : An object thats of type (element_type). Always a pointer object
621 for thread in IterateQueue(kern.globals.threads, 'thread *', 'threads'):
622 print thread.thread_id
625 if type(element_type
) is str:
626 element_type
= gettype(element_type
)
628 # Some ways of constructing a queue head seem to end up with the
629 # struct object as the value and not a pointer to the struct head
630 # In that case, addressof will give us a pointer to the struct, which is what we need
631 if not queue_head
.GetSBValue().GetType().IsPointerType() :
632 queue_head
= addressof(queue_head
)
634 # Mosh the value into a brand new value, to really get rid of its old cvalue history
635 queue_head
= kern
.GetValueFromAddress(unsigned(queue_head
), 'struct queue_entry *')
637 if unsigned(queue_head
) == 0:
638 if ParanoidIterateLinkageChain
.enable_paranoia
:
639 print "bad queue_head_t: {:s}".format(queue_head
)
642 if element_type
.IsPointerType():
643 elem_ofst
= getfieldoffset(element_type
.GetPointeeType(), field_name
) + field_ofst
645 elem_ofst
= getfieldoffset(element_type
, field_name
) + field_ofst
648 link
= queue_head
.next
649 last_link
= queue_head
650 try_read_next
= unsigned(queue_head
.next
)
652 print "Exception while looking at queue_head: {:>#18x}".format(unsigned(queue_head
))
655 if ParanoidIterateLinkageChain
.enable_paranoia
:
656 if unsigned(queue_head
.next
) == 0:
657 raise ValueError("NULL next pointer on head: queue_head {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head
, queue_head
.next
, queue_head
.prev
))
658 if unsigned(queue_head
.prev
) == 0:
659 print "NULL prev pointer on head: queue_head {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head
, queue_head
.next
, queue_head
.prev
)
660 if unsigned(queue_head
.next
) == unsigned(queue_head
) and unsigned(queue_head
.prev
) != unsigned(queue_head
):
661 print "corrupt queue_head {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head
, queue_head
.next
, queue_head
.prev
)
663 if ParanoidIterateLinkageChain
.enable_debug
:
664 print "starting at queue_head {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head
, queue_head
.next
, queue_head
.prev
)
670 while (unsigned(queue_head
) != unsigned(link
)):
671 if ParanoidIterateLinkageChain
.enable_paranoia
:
672 if unsigned(link
.next
) == 0:
673 raise ValueError("NULL next pointer: queue_head {:>#18x} link: {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head
, link
, link
.next
, link
.prev
))
674 if unsigned(link
.prev
) == 0:
675 print "NULL prev pointer: queue_head {:>#18x} link: {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head
, link
, link
.next
, link
.prev
)
676 if unsigned(last_link
) != unsigned(link
.prev
):
677 print "Corrupt prev pointer: queue_head {:>#18x} link: {:>#18x} next: {:>#18x} prev: {:>#18x} prev link: {:>#18x} ".format(
678 queue_head
, link
, link
.next
, link
.prev
, last_link
)
680 addr
= unsigned(link
) - unsigned(elem_ofst
);
681 obj
= kern
.GetValueFromAddress(addr
, element_type
)
682 if ParanoidIterateLinkageChain
.enable_debug
:
683 print "yielding link: {:>#18x} next: {:>#18x} prev: {:>#18x} addr: {:>#18x} obj: {:>#18x}".format(link
, link
.next
, link
.prev
, addr
, obj
)
688 exc_info
= sys
.exc_info()
690 print "Exception while iterating queue: {:>#18x} link: {:>#18x} addr: {:>#18x} obj: {:>#18x} last link: {:>#18x}".format(queue_head
, link
, addr
, obj
, last_link
)
693 traceback
.print_exc()
694 raise exc_info
[0], exc_info
[1], exc_info
[2]
696 ParanoidIterateLinkageChain
.enable_paranoia
= True
697 ParanoidIterateLinkageChain
.enable_debug
= False
699 # Macro: showallcallouts
700 @lldb_command('showallcallouts')
701 def ShowAllCallouts(cmd_args
=None):
702 """ Prints out the pending and delayed thread calls for high priority thread call group
704 # Get the high priority thread's call group
705 g
= addressof(kern
.globals.thread_call_groups
[0])
706 pq
= addressof(g
.pending_queue
)
707 dq
= addressof(g
.delayed_queue
)
709 print "Active threads: {:d}\n".format(g
.active_count
)
710 print "Idle threads: {:d}\n".format(g
.idle_count
)
711 print "Pending threads: {:d}\n".format(g
.pending_count
)
713 call
= Cast(pq
.next
, 'thread_call_t')
714 while unsigned(call
) != unsigned(pq
):
715 print "Callout: " + kern
.Symbolicate([unsigned(call
.tc_call
.func
)]) + "\n"
716 call
= Cast(call
.tc_call
.q_link
.next
, 'thread_call_t')
719 call
= Cast(dq
.next
, 'thread_call_t')
720 while unsigned(call
) != unsigned(dq
):
721 out_str
= "Deadline: {:>22d}. Callout: {:#x} <".format(call
.tc_call
.deadline
, unsigned(call
.tc_call
.func
))
722 print out_str
+ kern
.Symbolicate(unsigned(call
.tc_call
.func
)) + ">\n"
723 call
= Cast(call
.tc_call
.q_link
.next
, 'thread_call_t')
725 # EndMacro: showallcallouts