X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3903760236c30e3b5ace7a4eefac3a269d68957c..94ff46dc2849db4d43eaaf144872decc522aafb4:/tools/lldbmacros/scheduler.py diff --git a/tools/lldbmacros/scheduler.py b/tools/lldbmacros/scheduler.py old mode 100644 new mode 100755 index cf828c2db..0708c7658 --- a/tools/lldbmacros/scheduler.py +++ b/tools/lldbmacros/scheduler.py @@ -1,6 +1,8 @@ from xnu import * from utils import * from process import * +from misc import * +from memory import * # TODO: write scheduler related macros here @@ -15,7 +17,7 @@ def ShowAllProcRunQCount(cmd_args=None): while processor_itr: out_str += "{:d}\t\t{:d}\n".format(processor_itr.cpu_id, processor_itr.runq.count) processor_itr = processor_itr.processor_list - out_str += "RT:\t\t{:d}\n".format(kern.globals.rt_runq.count) + # out_str += "RT:\t\t{:d}\n".format(kern.globals.rt_runq.count) print out_str # EndMacro: showallprocrunqcount @@ -27,18 +29,25 @@ def ShowInterrupts(cmd_args=None): """ Prints IRQ, IPI and TMR counts for each CPU """ - if kern.arch not in ('arm', 'arm64'): + if not kern.arch.startswith('arm'): print "showinterrupts is only supported on arm/arm64" return base_address = kern.GetLoadAddressForSymbol('CpuDataEntries') - struct_size = 16 - for x in xrange (0, unsigned(kern.globals.machine_info.physical_cpu)): - element = kern.GetValueFromAddress(base_address + (x * struct_size), 'uintptr_t *')[1] - cpu_data_entry = Cast(element, 'cpu_data_t *') - print "CPU {} IRQ: {:d}\n".format(x, cpu_data_entry.cpu_stat.irq_ex_cnt) - print "CPU {} IPI: {:d}\n".format(x, cpu_data_entry.cpu_stat.ipi_cnt) - print "CPU {} TMR: {:d}\n".format(x, cpu_data_entry.cpu_stat.timer_cnt) + struct_size = 16 + x = 0 + y = 0 + while x < unsigned(kern.globals.machine_info.physical_cpu): + element = kern.GetValueFromAddress(base_address + (y * struct_size), 'uintptr_t *')[1] + if element: + cpu_data_entry = Cast(element, 'cpu_data_t *') + print "CPU {} IRQ: {:d}\n".format(y, cpu_data_entry.cpu_stat.irq_ex_cnt) + print "CPU {} IPI: {:d}\n".format(y, cpu_data_entry.cpu_stat.ipi_cnt) + print "CPU {} PMI: {:d}\n".format(y, cpu_data_entry.cpu_monotonic.mtc_npmis) + print "CPU {} TMR: {:d}\n".format(y, cpu_data_entry.cpu_stat.timer_cnt) + x = x + 1 + y = y + 1 + # EndMacro: showinterrupts # Macro: showactiveinterrupts @@ -91,10 +100,15 @@ def ShowIrqByIpiTimerRatio(cmd_args=None): out_str = "IRQ-IT Ratio: " base_address = kern.GetLoadAddressForSymbol('CpuDataEntries') struct_size = 16 - for x in range (0, unsigned(kern.globals.machine_info.physical_cpu)): - element = kern.GetValueFromAddress(base_address + (x * struct_size), 'uintptr_t *')[1] - cpu_data_entry = Cast(element, 'cpu_data_t *') - out_str += " CPU {} [{:.2f}]".format(x, float(cpu_data_entry.cpu_stat.irq_ex_cnt)/(cpu_data_entry.cpu_stat.ipi_cnt + cpu_data_entry.cpu_stat.timer_cnt)) + x = 0 + y = 0 + while x < unsigned(kern.globals.machine_info.physical_cpu): + element = kern.GetValueFromAddress(base_address + (y * struct_size), 'uintptr_t *')[1] + if element: + cpu_data_entry = Cast(element, 'cpu_data_t *') + out_str += " CPU {} [{:.2f}]".format(y, float(cpu_data_entry.cpu_stat.irq_ex_cnt)/(cpu_data_entry.cpu_stat.ipi_cnt + cpu_data_entry.cpu_stat.timer_cnt)) + x = x + 1 + y = y + 1 print out_str # EndMacro: showirqbyipitimerratio @@ -134,18 +148,13 @@ def ShowCurremtAbsTime(cmd_args=None): Usage: showcurrentabstime """ pset = addressof(kern.globals.pset0) + processor_array = kern.globals.processor_array cur_abstime = 0 while unsigned(pset) != 0: - for processor in ParanoidIterateLinkageChain(pset.active_queue, "processor_t", "processor_queue"): - if unsigned(processor.last_dispatch) > cur_abstime: - cur_abstime = unsigned(processor.last_dispatch) - - for processor in ParanoidIterateLinkageChain(pset.idle_queue, "processor_t", "processor_queue"): - if unsigned(processor.last_dispatch) > cur_abstime: - cur_abstime = unsigned(processor.last_dispatch) - - for processor in ParanoidIterateLinkageChain(pset.idle_secondary_queue, "processor_t", "processor_queue"): + cpu_bitmap = int(pset.cpu_bitmask) + for cpuid in IterateBitmap(cpu_bitmap): + processor = processor_array[cpuid] if unsigned(processor.last_dispatch) > cur_abstime: cur_abstime = unsigned(processor.last_dispatch) @@ -153,6 +162,127 @@ def ShowCurremtAbsTime(cmd_args=None): print "Last dispatch time known: %d MATUs" % cur_abstime +bucketStr = ["", "FIXPRI (>UI)", "TIMESHARE_FG", "TIMESHARE_IN", "TIMESHARE_DF", "TIMESHARE_UT", "TIMESHARE_BG"] + +@header(" {:>18s} | {:>20s} | {:>20s} | {:>10s} | {:>10s}".format('Thread Group', 'Interactivity Score', 'Last Timeshare Tick', 'pri_shift', 'highq')) +def GetSchedClutchBucketSummary(clutch_bucket): + return " 0x{:>16x} | {:>20d} | {:>20d} | {:>10d} | {:>10d}".format(clutch_bucket.scb_clutch.sc_tg, clutch_bucket.scb_interactivity_score, clutch_bucket.scb_timeshare_tick, clutch_bucket.scb_pri_shift, clutch_bucket.scb_runq.highq) + +def ShowSchedClutchForPset(pset): + root_clutch = pset.pset_clutch_root + print "\n{:s} : {:d}\n\n".format("Current Timestamp", GetRecentTimestamp()) + print "{:>10s} | {:>20s} | {:>30s} | {:>18s} | {:>10s} | {:>10s} | {:>30s} | {:>30s} | {:>15s} | ".format("Root", "Root Buckets", "Clutch Buckets", "Address", "Priority", "Count", "CPU Usage (MATUs)", "CPU Blocked (MATUs)", "Deadline (abs)") + GetSchedClutchBucketSummary.header + print "=" * 300 + print "{:>10s} | {:>20s} | {:>30s} | 0x{:16x} | {:>10d} | {:>10d} | {:>30s} | {:>30s} | {:>15s} | ".format("Root", "*", "*", addressof(root_clutch), root_clutch.scr_priority, root_clutch.scr_thr_count, "*", "*", "*") + print "-" * 300 + + for i in range(1, 7): + root_bucket = root_clutch.scr_buckets[i] + print "{:>10s} | {:>20s} | {:>30s} | 0x{:16x} | {:>10s} | {:>10s} | {:>30s} | {:>30s} | {:>15d} | ".format("*", bucketStr[i], "*", addressof(root_bucket), "*", "*", "*", "*", root_bucket.scrb_deadline) + prioq = root_bucket.scrb_clutch_buckets + clutch_bucket_list = [] + for clutch_bucket in IteratePriorityQueue(prioq, 'struct sched_clutch_bucket', 'scb_pqlink'): + clutch_bucket_list.append(clutch_bucket) + if len(clutch_bucket_list) > 0: + clutch_bucket_list.sort(key=lambda x: x.scb_priority, reverse=True) + for clutch_bucket in clutch_bucket_list: + cpu_used = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_used + cpu_blocked = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_blocked + print "{:>10s} | {:>20s} | {:>30s} | 0x{:16x} | {:>10d} | {:>10d} | {:>30d} | {:>30d} | {:>15s} | ".format("*", "*", clutch_bucket.scb_clutch.sc_tg.tg_name, clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count, cpu_used, cpu_blocked, "*") + GetSchedClutchBucketSummary(clutch_bucket) + print "-" * 300 + +@lldb_command('showschedclutch') +def ShowSchedClutch(cmd_args=[]): + """ Routine to print the clutch scheduler hierarchy. + Usage: showschedclutch + """ + if not cmd_args: + raise ArgumentError("Invalid argument") + pset = kern.GetValueFromAddress(cmd_args[0], "processor_set_t") + ShowSchedClutchForPset(pset) + +@lldb_command('showschedclutchroot') +def ShowSchedClutchRoot(cmd_args=[]): + """ show information about the root of the sched clutch hierarchy + Usage: showschedclutchroot + """ + if not cmd_args: + raise ArgumentError("Invalid argument") + root = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_root *") + if not root: + print "unknown arguments:", str(cmd_args) + return False + print "{:>30s} : 0x{:16x}".format("Root", root) + print "{:>30s} : 0x{:16x}".format("Pset", root.scr_pset) + print "{:>30s} : {:d}".format("Priority", root.scr_priority) + print "{:>30s} : {:d}".format("Urgency", root.scr_urgency) + print "{:>30s} : {:d}".format("Threads", root.scr_thr_count) + print "{:>30s} : {:d}".format("Current Timestamp", GetRecentTimestamp()) + print "{:>30s} : {:b} (BG/UT/DF/IN/FG/FIX/NULL)".format("Runnable Root Buckets Bitmap", int(root.scr_runnable_bitmap[0])) + +@lldb_command('showschedclutchrootbucket') +def ShowSchedClutchRootBucket(cmd_args=[]): + """ show information about a root bucket in the sched clutch hierarchy + Usage: showschedclutchrootbucket + """ + if not cmd_args: + raise ArgumentError("Invalid argument") + root_bucket = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_root_bucket *") + if not root_bucket: + print "unknown arguments:", str(cmd_args) + return False + print "{:<30s} : 0x{:16x}".format("Root Bucket", root_bucket) + print "{:<30s} : {:s}".format("Bucket Name", bucketStr[int(root_bucket.scrb_bucket)]) + print "{:<30s} : {:d}".format("Deadline", root_bucket.scrb_deadline) + print "{:<30s} : {:d}".format("Current Timestamp", GetRecentTimestamp()) + print "\n" + prioq = root_bucket.scrb_clutch_buckets + clutch_bucket_list = [] + for clutch_bucket in IteratePriorityQueue(prioq, 'struct sched_clutch_bucket', 'scb_pqlink'): + clutch_bucket_list.append(clutch_bucket) + if len(clutch_bucket_list) > 0: + print "=" * 240 + print "{:>30s} | {:>18s} | {:>20s} | {:>20s} | ".format("Name", "Clutch Bucket", "Priority", "Count") + GetSchedClutchBucketSummary.header + print "=" * 240 + clutch_bucket_list.sort(key=lambda x: x.scb_priority, reverse=True) + for clutch_bucket in clutch_bucket_list: + print "{:>30s} | 0x{:16x} | {:>20d} | {:>20d} | ".format(clutch_bucket.scb_clutch.sc_tg.tg_name, clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count) + GetSchedClutchBucketSummary(clutch_bucket) + +@lldb_command('showschedclutchbucket') +def ShowSchedClutchBucket(cmd_args=[]): + """ show information about a clutch bucket in the sched clutch hierarchy + Usage: showschedclutchbucket + """ + if not cmd_args: + raise ArgumentError("Invalid argument") + clutch_bucket = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_bucket *") + if not clutch_bucket: + print "unknown arguments:", str(cmd_args) + return False + print "{:<30s} : 0x{:16x}".format("Clutch Bucket", clutch_bucket) + print "{:<30s} : {:s}".format("TG Name", clutch_bucket.scb_clutch.sc_tg.tg_name) + print "{:<30s} : {:d}".format("Priority", clutch_bucket.scb_priority) + print "{:<30s} : {:d}".format("Thread Count", clutch_bucket.scb_thr_count) + print "{:<30s} : 0x{:16x}".format("Thread Group", clutch_bucket.scb_clutch.sc_tg) + cpu_used = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_used + cpu_blocked = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_blocked + print "{:<30s} : {:d}".format("CPU Used (MATUs)", cpu_used) + print "{:<30s} : {:d}".format("CPU Blocked (MATUs)", cpu_blocked) + print "{:<30s} : {:d}".format("Interactivity Score", clutch_bucket.scb_interactivity_score) + print "{:<30s} : {:d}".format("Last Timeshare Update Tick", clutch_bucket.scb_timeshare_tick) + print "{:<30s} : {:d}".format("Priority Shift", clutch_bucket.scb_pri_shift) + print "\n" + runq = clutch_bucket.scb_clutchpri_prioq + thread_list = [] + for thread in IteratePriorityQueue(runq, 'struct thread', 'sched_clutchpri_link'): + thread_list.append(thread) + if len(thread_list) > 0: + print "=" * 240 + print GetThreadSummary.header + "{:s}".format("Process Name") + print "=" * 240 + for thread in thread_list: + proc = Cast(thread.task.bsd_info, 'proc *') + print GetThreadSummary(thread) + "{:s}".format(str(proc.p_comm)) @lldb_command('abs2nano') def ShowAbstimeToNanoTime(cmd_args=[]): @@ -178,6 +308,16 @@ def ShowAbstimeToNanoTime(cmd_args=[]): # Macro: showschedhistory +def GetRecentTimestamp(): + """ + Return a recent timestamp. + TODO: on x86, if not in the debugger, then look at the scheduler + """ + if kern.arch == 'x86_64': + return kern.globals.debugger_entry_time + else : + return GetSchedMostRecentDispatch(False) + def GetSchedMostRecentDispatch(show_processor_details=False): """ Return the most recent dispatch on the system, printing processor details if argument is true. @@ -219,7 +359,7 @@ def GetSchedMostRecentDispatch(show_processor_details=False): return most_recent_dispatch -@header("{:<18s} {:<10s} {:>16s} {:>16s} {:>16s} {:>18s} {:>16s} {:>16s} {:>16s} {:2s} {:2s} {:2s} {:>2s} {:<19s} {:<9s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>11s} {:>8s}".format("thread", "id", "on-core", "off-core", "runnable", "last-duration (us)", "since-off (us)", "since-on (us)", "pending (us)", "BP", "SP", "TP", "MP", "sched-mode", "state", "cpu-usage", "delta", "sch-usage", "stamp", "shift", "task", "thread-name")) +@header("{:<18s} {:<10s} {:>16s} {:>16s} {:>16s} {:>16s} {:>18s} {:>16s} {:>16s} {:>16s} {:>16s} {:2s} {:2s} {:2s} {:>2s} {:<19s} {:<9s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>11s} {:>8s}".format("thread", "id", "on-core", "off-core", "runnable", "prichange", "last-duration (us)", "since-off (us)", "since-on (us)", "pending (us)", "pri-change (us)", "BP", "SP", "TP", "MP", "sched-mode", "state", "cpu-usage", "delta", "sch-usage", "stamp", "shift", "task", "thread-name")) def ShowThreadSchedHistory(thread, most_recent_dispatch): """ Given a thread and the most recent dispatch time of a thread on the system, print out details about scheduler history for the thread. @@ -268,69 +408,108 @@ def ShowThreadSchedHistory(thread, most_recent_dispatch): last_on = thread.computation_epoch last_off = thread.last_run_time last_runnable = thread.last_made_runnable_time - + last_prichange = thread.last_basepri_change_time + if int(last_runnable) == 18446744073709551615 : last_runnable = 0 + if int(last_prichange) == 18446744073709551615 : + last_prichange = 0 + time_on_abs = unsigned(last_off - last_on) time_on_us = kern.GetNanotimeFromAbstime(time_on_abs) / 1000.0 time_pending_abs = unsigned(most_recent_dispatch - last_runnable) time_pending_us = kern.GetNanotimeFromAbstime(time_pending_abs) / 1000.0 - + if int(last_runnable) == 0 : time_pending_us = 0 + last_prichange_abs = unsigned(most_recent_dispatch - last_prichange) + last_prichange_us = kern.GetNanotimeFromAbstime(last_prichange_abs) / 1000.0 + + if int(last_prichange) == 0 : + last_prichange_us = 0 + time_since_off_abs = unsigned(most_recent_dispatch - last_off) time_since_off_us = kern.GetNanotimeFromAbstime(time_since_off_abs) / 1000.0 time_since_on_abs = unsigned(most_recent_dispatch - last_on) time_since_on_us = kern.GetNanotimeFromAbstime(time_since_on_abs) / 1000.0 - fmt = "0x{t:<16x} 0x{t.thread_id:<8x} {t.computation_epoch:16d} {t.last_run_time:16d} {last_runnable:16d} {time_on_us:18.3f} {time_since_off_us:16.3f} {time_since_on_us:16.3f} {time_pending_us:16.3f}" + fmt = "0x{t:<16x} 0x{t.thread_id:<8x} {t.computation_epoch:16d} {t.last_run_time:16d} {last_runnable:16d} {last_prichange:16d} {time_on_us:18.3f} {time_since_off_us:16.3f} {time_since_on_us:16.3f} {time_pending_us:16.3f} {last_prichange_us:16.3f}" fmt2 = " {t.base_pri:2d} {t.sched_pri:2d} {t.task_priority:2d} {t.max_priority:2d} {sched_mode:19s}" fmt3 = " {state:9s} {t.cpu_usage:10d} {t.cpu_delta:10d} {t.sched_usage:10d} {t.sched_stamp:10d} {t.pri_shift:10d} {name:s} {thread_name:s}" - out_str = fmt.format(t=thread, time_on_us=time_on_us, time_since_off_us=time_since_off_us, time_since_on_us=time_since_on_us, last_runnable=last_runnable, time_pending_us=time_pending_us) + out_str = fmt.format(t=thread, time_on_us=time_on_us, time_since_off_us=time_since_off_us, time_since_on_us=time_since_on_us, last_runnable=last_runnable, time_pending_us=time_pending_us, last_prichange=last_prichange, last_prichange_us=last_prichange_us) out_str += fmt2.format(t=thread, sched_mode=sched_mode) out_str += fmt3.format(t=thread, state=state_str, name=task_name, thread_name=thread_name) - + print out_str -@lldb_command('showschedhistory') -def ShowSchedHistory(cmd_args=None): - """ Routine to print out thread scheduling history - Usage: showschedhistory [ ...] +def SortThreads(threads, column): + if column != 'on-core' and column != 'off-core' and column != 'last-duration': + raise ArgumentError("unsupported sort column") + if column == 'on-core': + threads.sort(key=lambda t: t.computation_epoch) + elif column == 'off-core': + threads.sort(key=lambda t: t.last_run_time) + else: + threads.sort(key=lambda t: t.last_run_time - t.computation_epoch) + +@lldb_command('showschedhistory', 'S:') +def ShowSchedHistory(cmd_args=None, cmd_options=None): + """ Routine to print out thread scheduling history, optionally sorted by a + column. + + Usage: showschedhistory [-S on-core|off-core|last-duration] [ ...] """ + sort_column = None + if '-S' in cmd_options: + sort_column = cmd_options['-S'] + if cmd_args: most_recent_dispatch = GetSchedMostRecentDispatch(False) print ShowThreadSchedHistory.header - for thread_ptr in cmd_args: - thread = kern.GetValueFromAddress(ArgumentStringToInt(thread_ptr), 'thread *') - ShowThreadSchedHistory(thread, most_recent_dispatch) + + if sort_column: + threads = [] + for thread_ptr in cmd_args: + threads.append(kern.GetValueFromAddress(ArgumentStringToInt(thread_ptr), 'thread *')) + + SortThreads(threads, sort_column) + + for thread in threads: + ShowThreadSchedHistory(thread, most_recent_dispatch) + else: + for thread_ptr in cmd_args: + thread = kern.GetValueFromAddress(ArgumentStringToInt(thread_ptr), 'thread *') + ShowThreadSchedHistory(thread, most_recent_dispatch) return - + run_buckets = kern.globals.sched_run_buckets run_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')] fixpri_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')] share_fg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')] + share_df_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')] share_ut_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')] share_bg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')] sched_pri_shifts = kern.globals.sched_run_buckets share_fg_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')] + share_df_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')] share_ut_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')] share_bg_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')] print "Processors: {g.processor_avail_count:d} Runnable threads: {:d} Fixpri threads: {:d}\n".format(run_count, fixpri_count, g=kern.globals) - print "FG Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_ut_count, share_bg_count) + print "FG Timeshare threads: {:d} DF Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_df_count, share_ut_count, share_bg_count) print "Mach factor: {g.sched_mach_factor:d} Load factor: {g.sched_load_average:d} Sched tick: {g.sched_tick:d} timestamp: {g.sched_tick_last_abstime:d} interval:{g.sched_tick_interval:d}\n".format(g=kern.globals) - print "Fixed shift: {g.sched_fixed_shift:d} FG shift: {:d} UT shift: {:d} BG shift: {:d}\n".format(share_fg_shift, share_ut_shift, share_bg_shift, g=kern.globals) + print "Fixed shift: {g.sched_fixed_shift:d} FG shift: {:d} DF shift: {:d} UT shift: {:d} BG shift: {:d}\n".format(share_fg_shift, share_df_shift, share_ut_shift, share_bg_shift, g=kern.globals) print "sched_pri_decay_band_limit: {g.sched_pri_decay_band_limit:d} sched_decay_usage_age_factor: {g.sched_decay_usage_age_factor:d}\n".format(g=kern.globals) if kern.arch == 'x86_64': @@ -340,12 +519,24 @@ def ShowSchedHistory(cmd_args=None): print "Most recent dispatch: " + str(most_recent_dispatch) print ShowThreadSchedHistory.header - for thread in IterateQueue(kern.globals.threads, 'thread *', 'threads'): - ShowThreadSchedHistory(thread, most_recent_dispatch) + + if sort_column: + threads = [t for t in IterateQueue(kern.globals.threads, 'thread *', 'threads')] + + SortThreads(threads, sort_column) + + for thread in threads: + ShowThreadSchedHistory(thread, most_recent_dispatch) + else: + for thread in IterateQueue(kern.globals.threads, 'thread *', 'threads'): + ShowThreadSchedHistory(thread, most_recent_dispatch) # EndMacro: showschedhistory +def int32(n): + n = n & 0xffffffff + return (n ^ 0x80000000) - 0x80000000 # Macro: showallprocessors @@ -353,7 +544,8 @@ def ShowGroupSetSummary(runq, task_map): """ Internal function to print summary of group run queue params: runq - value representing struct run_queue * """ - print " runq: count {: <10d} highq: {: <10d} urgency {: <10d}\n".format(runq.count, runq.highq, runq.urgency) + + print " runq: count {: <10d} highq: {: <10d} urgency {: <10d}\n".format(runq.count, int32(runq.highq), runq.urgency) runq_queue_i = 0 runq_queue_count = sizeof(runq.queues)/sizeof(runq.queues[0]) @@ -365,11 +557,11 @@ def ShowGroupSetSummary(runq, task_map): if unsigned(runq_queue_p) != unsigned(runq_queue_head): runq_queue_this_count = 0 - for entry in ParanoidIterateLinkageChain(runq_queue_head, "sched_entry_t", "entry_links"): + for entry in ParanoidIterateLinkageChain(runq_queue_head, "sched_entry_t", "entry_links", circleQueue=True): runq_queue_this_count += 1 print " Queue [{: <#012x}] Priority {: <3d} count {:d}\n".format(runq_queue_head, runq_queue_i, runq_queue_this_count) - for entry in ParanoidIterateLinkageChain(runq_queue_head, "sched_entry_t", "entry_links"): + for entry in ParanoidIterateLinkageChain(runq_queue_head, "sched_entry_t", "entry_links", circleQueue=True): group_addr = unsigned(entry) - (sizeof(dereference(entry)) * unsigned(entry.sched_pri)) group = kern.GetValueFromAddress(unsigned(group_addr), 'sched_group_t') task = task_map.get(unsigned(group), 0x0) @@ -395,28 +587,38 @@ def ShowRunQSummary(runq): """ Internal function to print summary of run_queue params: runq - value representing struct run_queue * """ - print " runq: count {: <10d} highq: {: <10d} urgency {: <10d}\n".format(runq.count, runq.highq, runq.urgency) + + print " runq: count {: <10d} highq: {: <10d} urgency {: <10d}\n".format(runq.count, int32(runq.highq), runq.urgency) runq_queue_i = 0 runq_queue_count = sizeof(runq.queues)/sizeof(runq.queues[0]) for runq_queue_i in xrange(runq_queue_count) : runq_queue_head = addressof(runq.queues[runq_queue_i]) - runq_queue_p = runq_queue_head.next + runq_queue_p = runq_queue_head.head - if unsigned(runq_queue_p) != unsigned(runq_queue_head): + if unsigned(runq_queue_p): runq_queue_this_count = 0 - for thread in ParanoidIterateLinkageChain(runq_queue_head, "thread_t", "runq_links"): + for thread in ParanoidIterateLinkageChain(runq_queue_head, "thread_t", "runq_links", circleQueue=True): runq_queue_this_count += 1 print " Queue [{: <#012x}] Priority {: <3d} count {:d}\n".format(runq_queue_head, runq_queue_i, runq_queue_this_count) print "\t" + GetThreadSummary.header + "\n" - for thread in ParanoidIterateLinkageChain(runq_queue_head, "thread_t", "runq_links"): + for thread in ParanoidIterateLinkageChain(runq_queue_head, "thread_t", "runq_links", circleQueue=True): print "\t" + GetThreadSummary(thread) + "\n" if config['verbosity'] > vHUMAN : print "\t" + GetThreadBackTrace(thread, prefix="\t\t") + "\n" +def ShowRTRunQSummary(rt_runq): + if (hex(rt_runq.count) == hex(0xfdfdfdfd)) : + print " Realtime Queue ({:<#012x}) uninitialized\n".format(addressof(rt_runq.queue)) + return + print " Realtime Queue ({:<#012x}) Count {:d}\n".format(addressof(rt_runq.queue), rt_runq.count) + if rt_runq.count != 0: + print "\t" + GetThreadSummary.header + "\n" + for rt_runq_thread in ParanoidIterateLinkageChain(rt_runq.queue, "thread_t", "runq_links", circleQueue=True): + print "\t" + GetThreadSummary(rt_runq_thread) + "\n" def ShowGrrrSummary(grrr_runq): """ Internal function to print summary of grrr_run_queue @@ -433,17 +635,11 @@ def ShowGrrrSummary(grrr_runq): print "Count {:d} Weight {:d}\n".format(grrr_group.count, grrr_group.weight) grrr_group_client_head = addressof(grrr_group.clients) print GetThreadSummary.header - for thread in ParanoidIterateLinkageChain(grrr_group_client_head, "thread_t", "runq_links"): + for thread in ParanoidIterateLinkageChain(grrr_group_client_head, "thread_t", "runq_links", circleQueue=True): print "\t" + GetThreadSummary(thread) + "\n" if config['verbosity'] > vHUMAN : print "\t" + GetThreadBackTrace(thread, prefix="\t\t") + "\n" -def ShowNextThread(processor): - if (processor.next_thread != 0) : - print " " + "Next thread:\n" - print "\t" + GetThreadSummary.header + "\n" - print "\t" + GetThreadSummary(processor.next_thread) + "\n" - def ShowActiveThread(processor): if (processor.active_thread != 0) : print "\t" + GetThreadSummary.header + "\n" @@ -455,12 +651,13 @@ def ShowScheduler(cmd_args=None): """ Routine to print information of all psets and processors Usage: showscheduler """ - pset = addressof(kern.globals.pset0) + node = addressof(kern.globals.pset_node0) show_grrr = 0 show_priority_runq = 0 show_priority_pset_runq = 0 show_group_pset_runq = 0 - sched_string = str(kern.globals.sched_current_dispatch.sched_name) + show_clutch = 0 + sched_string = str(kern.globals.sched_string) if sched_string == "traditional": show_priority_runq = 1 @@ -474,25 +671,37 @@ def ShowScheduler(cmd_args=None): elif sched_string == "dualq": show_priority_pset_runq = 1 show_priority_runq = 1 + elif sched_string == "amp": + show_priority_pset_runq = 1 + show_priority_runq = 1 + elif sched_string == "clutch": + show_clutch = 1 else : print "Unknown sched_string {:s}".format(sched_string) - print "Scheduler: {:s} ({:s})\n".format(sched_string, - kern.Symbolicate(unsigned(kern.globals.sched_current_dispatch))) - - run_buckets = kern.globals.sched_run_buckets - - run_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')] - fixpri_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')] - share_fg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')] - share_ut_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')] - share_bg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')] - - print "Processors: {g.processor_avail_count:d} Runnable threads: {:d} Fixpri threads: {:d}\n".format(run_count, fixpri_count, g=kern.globals) - print "FG Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_ut_count, share_bg_count) + print "Scheduler: {:s}\n".format(sched_string) + + if show_clutch == 0: + run_buckets = kern.globals.sched_run_buckets + run_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')] + fixpri_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')] + share_fg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')] + share_df_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')] + share_ut_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')] + share_bg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')] + print "Processors: {g.processor_avail_count:d} Runnable threads: {:d} Fixpri threads: {:d}\n".format(run_count, fixpri_count, g=kern.globals) + print "FG Timeshare threads: {:d} DF Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_df_count, share_ut_count, share_bg_count) + + processor_offline = GetEnumValue('processor_state_t::PROCESSOR_OFF_LINE') + processor_idle = GetEnumValue('processor_state_t::PROCESSOR_IDLE') + processor_dispatching = GetEnumValue('processor_state_t::PROCESSOR_DISPATCHING') + processor_running = GetEnumValue('processor_state_t::PROCESSOR_RUNNING') if show_group_pset_runq: - print "multiq scheduler config: deep-drain {g.deep_drain:d}, ceiling {g.drain_ceiling:d}, depth limit {g.drain_depth_limit:d}, band limit {g.drain_band_limit:d}, sanity check {g.multiq_sanity_check:d}\n".format(g=kern.globals) + if hasattr(kern.globals, "multiq_sanity_check"): + print "multiq scheduler config: deep-drain {g.deep_drain:d}, ceiling {g.drain_ceiling:d}, depth limit {g.drain_depth_limit:d}, band limit {g.drain_band_limit:d}, sanity check {g.multiq_sanity_check:d}\n".format(g=kern.globals) + else: + print "multiq scheduler config: deep-drain {g.deep_drain:d}, ceiling {g.drain_ceiling:d}, depth limit {g.drain_depth_limit:d}, band limit {g.drain_band_limit:d}\n".format(g=kern.globals) # Create a group->task mapping task_map = {} @@ -503,95 +712,124 @@ def ShowScheduler(cmd_args=None): print " \n" - while unsigned(pset) != 0: - print "Processor Set {: <#012x} Count {:d} (cpu_id {:<#x}-{:<#x})\n".format(pset, - pset.cpu_set_count, pset.cpu_set_low, pset.cpu_set_hi) - - if show_priority_pset_runq: - runq = pset.pset_runq - ShowRunQSummary(runq) - - if show_group_pset_runq: - print "Main Runq:\n" - runq = pset.pset_runq - ShowGroupSetSummary(runq, task_map) - print "All Groups:\n" - # TODO: Possibly output task header for each group - for group in IterateQueue(kern.globals.sched_groups, "sched_group_t", "sched_groups"): - if (group.runq.count != 0) : - task = task_map.get(unsigned(group), "Unknown task!") - print "Group {: <#012x} Task {: <#012x}\n".format(unsigned(group), unsigned(task)) - ShowRunQSummary(group.runq) - print " \n" - - print "Active Processors:\n" - for processor in ParanoidIterateLinkageChain(pset.active_queue, "processor_t", "processor_queue"): - print " " + GetProcessorSummary(processor) - ShowActiveThread(processor) - ShowNextThread(processor) - - if show_priority_runq: - runq = processor.runq - ShowRunQSummary(runq) - if show_grrr: - grrr_runq = processor.grrr_runq - ShowGrrrSummary(grrr_runq) - print " \n" - - - print "Idle Processors:\n" - for processor in ParanoidIterateLinkageChain(pset.idle_queue, "processor_t", "processor_queue"): - print " " + GetProcessorSummary(processor) - ShowActiveThread(processor) - ShowNextThread(processor) - - if show_priority_runq: - ShowRunQSummary(processor.runq) - print " \n" + while node != 0: + pset = node.psets + pset = kern.GetValueFromAddress(unsigned(pset), 'struct processor_set *') + while pset != 0: + print "Processor Set {: <#012x} Count {:d} (cpu_id {:<#x}-{:<#x})\n".format(pset, + unsigned(pset.cpu_set_count), pset.cpu_set_low, pset.cpu_set_hi) - print "Idle Secondary Processors:\n" - for processor in ParanoidIterateLinkageChain(pset.idle_secondary_queue, "processor_t", "processor_queue"): - print " " + GetProcessorSummary(processor) - ShowActiveThread(processor) - ShowNextThread(processor) - - if show_priority_runq: - print ShowRunQSummary(processor.runq) - print " \n" - - - pset = pset.pset_list + rt_runq = kern.GetValueFromAddress(unsigned(addressof(pset.rt_runq)), 'struct rt_queue *') + ShowRTRunQSummary(rt_runq) - print "\nRealtime Queue ({:<#012x}) Count {:d}\n".format(addressof(kern.globals.rt_runq.queue), kern.globals.rt_runq.count) - if kern.globals.rt_runq.count != 0: - print "\t" + GetThreadSummary.header + "\n" - for rt_runq_thread in ParanoidIterateLinkageChain(kern.globals.rt_runq.queue, "thread_t", "runq_links"): - print "\t" + GetThreadSummary(rt_runq_thread) + "\n" + if show_priority_pset_runq: + runq = kern.GetValueFromAddress(unsigned(addressof(pset.pset_runq)), 'struct run_queue *') + ShowRunQSummary(runq) - print "\nTerminate Queue: ({:<#012x})\n".format(addressof(kern.globals.thread_terminate_queue)) - first = False - for thread in ParanoidIterateLinkageChain(kern.globals.thread_terminate_queue, "thread_t", "runq_links"): - if first: - print "\t" + GetThreadSummary.header + "\n" - first = True - print "\t" + GetThreadSummary(thread) + "\n" + if show_group_pset_runq: + print "Main Runq:\n" + runq = kern.GetValueFromAddress(unsigned(addressof(pset.pset_runq)), 'struct run_queue *') + ShowGroupSetSummary(runq, task_map) + print "All Groups:\n" + # TODO: Possibly output task header for each group + for group in IterateQueue(kern.globals.sched_groups, "sched_group_t", "sched_groups"): + if (group.runq.count != 0) : + task = task_map.get(unsigned(group), "Unknown task!") + print "Group {: <#012x} Task {: <#012x}\n".format(unsigned(group), unsigned(task)) + ShowRunQSummary(group.runq) + print " \n" + + processor_array = kern.globals.processor_array + + print "Active Processors:\n" + active_bitmap = int(pset.cpu_state_map[processor_dispatching]) | int(pset.cpu_state_map[processor_running]) + for cpuid in IterateBitmap(active_bitmap): + processor = processor_array[cpuid] + if processor != 0: + print " " + GetProcessorSummary(processor) + ShowActiveThread(processor) + + if show_priority_runq: + runq = processor.runq + ShowRunQSummary(runq) + if show_grrr: + grrr_runq = processor.grrr_runq + ShowGrrrSummary(grrr_runq) + print " \n" + + + print "Idle Processors:\n" + idle_bitmap = int(pset.cpu_state_map[processor_idle]) & int(pset.primary_map) + for cpuid in IterateBitmap(idle_bitmap): + processor = processor_array[cpuid] + if processor != 0: + print " " + GetProcessorSummary(processor) + ShowActiveThread(processor) + + if show_priority_runq: + ShowRunQSummary(processor.runq) + print " \n" + + + print "Idle Secondary Processors:\n" + idle_bitmap = int(pset.cpu_state_map[processor_idle]) & ~(int(pset.primary_map)) + for cpuid in IterateBitmap(idle_bitmap): + processor = processor_array[cpuid] + if processor != 0: + print " " + GetProcessorSummary(processor) + ShowActiveThread(processor) + + if show_priority_runq: + print ShowRunQSummary(processor.runq) + print " \n" + + + print "Other Processors:\n" + other_bitmap = 0 + for i in range(processor_offline, processor_idle): + other_bitmap |= int(pset.cpu_state_map[i]) + other_bitmap &= int(pset.cpu_bitmask) + for cpuid in IterateBitmap(other_bitmap): + processor = processor_array[cpuid] + if processor != 0: + print " " + GetProcessorSummary(processor) + ShowActiveThread(processor) + + if show_priority_runq: + ShowRunQSummary(processor.runq) + print " \n" + + if show_clutch: + print "=== Clutch Scheduler Hierarchy ===\n\n" + ShowSchedClutchForPset(pset) + + pset = pset.pset_list + + node = node.node_list print "\nCrashed Threads Queue: ({:<#012x})\n".format(addressof(kern.globals.crashed_threads_queue)) - first = False + first = True for thread in ParanoidIterateLinkageChain(kern.globals.crashed_threads_queue, "thread_t", "runq_links"): if first: - print "\t" + GetThreadSummary.header + "\n" - first = True - print "\t" + GetThreadSummary(thread) + "\n" - - print "\nWaiting For Kernel Stacks Queue: ({:<#012x})\n".format(addressof(kern.globals.thread_stack_queue)) - first = False - for thread in ParanoidIterateLinkageChain(kern.globals.thread_stack_queue, "thread_t", "runq_links"): - if first: - print "\t" + GetThreadSummary.header + "\n" - first = True - print "\t" + GetThreadSummary(thread) + "\n" + print "\t" + GetThreadSummary.header + first = False + print "\t" + GetThreadSummary(thread) + + def dump_mpsc_thread_queue(name, head): + head = addressof(head) + print "\n{:s}: ({:<#012x})\n".format(name, head) + first = True + for thread in IterateMPSCQueue(head.mpd_queue, 'struct thread', 'mpsc_links'): + if first: + print "\t" + GetThreadSummary.header + first = False + print "\t" + GetThreadSummary(thread) + + dump_mpsc_thread_queue("Terminate Queue", kern.globals.thread_terminate_queue) + dump_mpsc_thread_queue("Waiting For Kernel Stacks Queue", kern.globals.thread_stack_queue) + dump_mpsc_thread_queue("Thread Exception Queue", kern.globals.thread_exception_queue) + dump_mpsc_thread_queue("Thread Deallocate Queue", kern.globals.thread_deallocate_queue) print "\n" @@ -600,8 +838,8 @@ def ShowScheduler(cmd_args=None): # EndMacro: showallprocessors -def ParanoidIterateLinkageChain(queue_head, element_type, field_name, field_ofst=0): - """ Iterate over a Linkage Chain queue in kernel of type queue_head_t. (osfmk/kern/queue.h method 1) +def ParanoidIterateLinkageChain(queue_head, element_type, field_name, field_ofst=0, circleQueue=False): + """ Iterate over a Linkage Chain queue in kernel of type queue_head_t or circle_queue_head_t. (osfmk/kern/queue.h method 1 or circle_queue.h) This is equivalent to the qe_foreach_element() macro Blows up aggressively and descriptively when something goes wrong iterating a queue. Prints correctness errors, and throws exceptions on 'cannot proceed' errors @@ -631,18 +869,24 @@ def ParanoidIterateLinkageChain(queue_head, element_type, field_name, field_ofst if not queue_head.GetSBValue().GetType().IsPointerType() : queue_head = addressof(queue_head) - # Mosh the value into a brand new value, to really get rid of its old cvalue history - queue_head = kern.GetValueFromAddress(unsigned(queue_head), 'struct queue_entry *') + if circleQueue: + # Mosh the value into a brand new value, to really get rid of its old cvalue history + queue_head = kern.GetValueFromAddress(unsigned(queue_head), 'struct circle_queue_head *').head + else: + # Mosh the value into a brand new value, to really get rid of its old cvalue history + queue_head = kern.GetValueFromAddress(unsigned(queue_head), 'struct queue_entry *') if unsigned(queue_head) == 0: - if ParanoidIterateLinkageChain.enable_paranoia: + if not circleQueue and ParanoidIterateLinkageChain.enable_paranoia: print "bad queue_head_t: {:s}".format(queue_head) return if element_type.IsPointerType(): - elem_ofst = getfieldoffset(element_type.GetPointeeType(), field_name) + field_ofst + struct_type = element_type.GetPointeeType() else: - elem_ofst = getfieldoffset(element_type, field_name) + field_ofst + struct_type = element_type + + elem_ofst = getfieldoffset(struct_type, field_name) + field_ofst try: link = queue_head.next @@ -667,7 +911,9 @@ def ParanoidIterateLinkageChain(queue_head, element_type, field_name, field_ofst obj = 0 try: - while (unsigned(queue_head) != unsigned(link)): + while True: + if not circleQueue and unsigned(queue_head) == unsigned(link): + break; if ParanoidIterateLinkageChain.enable_paranoia: if unsigned(link.next) == 0: raise ValueError("NULL next pointer: queue_head {:>#18x} link: {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head, link, link.next, link.prev)) @@ -684,6 +930,8 @@ def ParanoidIterateLinkageChain(queue_head, element_type, field_name, field_ofst yield obj last_link = link link = link.next + if circleQueue and unsigned(queue_head) == unsigned(link): + break; except: exc_info = sys.exc_info() try: @@ -696,31 +944,104 @@ def ParanoidIterateLinkageChain(queue_head, element_type, field_name, field_ofst ParanoidIterateLinkageChain.enable_paranoia = True ParanoidIterateLinkageChain.enable_debug = False +def bit_first(bitmap): + return bitmap.bit_length() - 1 + +def lsb_first(bitmap): + bitmap = bitmap & -bitmap + return bit_first(bitmap) + +def IterateBitmap(bitmap): + """ Iterate over a bitmap, returning the index of set bits starting from 0 + + params: + bitmap - value : bitmap + returns: + A generator does not return. It is used for iterating. + value : index of a set bit + example usage: + for cpuid in IterateBitmap(running_bitmap): + print processor_array[cpuid] + """ + i = lsb_first(bitmap) + while (i >= 0): + yield i + bitmap = bitmap & ~((1 << (i + 1)) - 1) + i = lsb_first(bitmap) + + # Macro: showallcallouts + +def ShowThreadCall(prefix, call): + """ + Print a description of a thread_call_t and its relationship to its expected fire time + """ + func = call.tc_call.func + param0 = call.tc_call.param0 + param1 = call.tc_call.param1 + + iotes_desc = "" + iotes_callout = kern.GetLoadAddressForSymbol("_ZN18IOTimerEventSource17timeoutAndReleaseEPvS0_") + iotes_callout2 = kern.GetLoadAddressForSymbol("_ZN18IOTimerEventSource15timeoutSignaledEPvS0_") + + if (unsigned(func) == unsigned(iotes_callout) or + unsigned(func) == unsigned(iotes_callout2)) : + iotes = Cast(call.tc_call.param0, 'IOTimerEventSource*') + func = iotes.action + param0 = iotes.owner + param1 = unsigned(iotes) + + func_name = kern.Symbolicate(func) + if (func_name == "") : + func_name = FindKmodNameForAddr(func) + + call_entry = call.tc_call + + recent_timestamp = GetRecentTimestamp() + + # THREAD_CALL_CONTINUOUS 0x100 + kern.globals.mach_absolutetime_asleep + if (call.tc_flags & 0x100) : + timer_fire = call_entry.deadline - (recent_timestamp + kern.globals.mach_absolutetime_asleep) + else : + timer_fire = call_entry.deadline - recent_timestamp + + timer_fire_s = kern.GetNanotimeFromAbstime(timer_fire) / 1000000000.0 + + ttd_s = kern.GetNanotimeFromAbstime(call.tc_ttd) / 1000000000.0 + + print "{:s}{:#018x}: {:18d} {:18d} {:03.06f} {:03.06f} {:#018x}({:#018x},{:#018x}) ({:s})".format(prefix, + unsigned(call), call_entry.deadline, call.tc_soft_deadline, ttd_s, timer_fire_s, + func, param0, param1, func_name) + @lldb_command('showallcallouts') def ShowAllCallouts(cmd_args=None): - """ Prints out the pending and delayed thread calls for high priority thread call group + """ Prints out the pending and delayed thread calls for the thread call groups """ - # Get the high priority thread's call group - g = addressof(kern.globals.thread_call_groups[0]) - pq = addressof(g.pending_queue) - dq = addressof(g.delayed_queue) - - print "Active threads: {:d}\n".format(g.active_count) - print "Idle threads: {:d}\n".format(g.idle_count) - print "Pending threads: {:d}\n".format(g.pending_count) - - call = Cast(pq.next, 'thread_call_t') - while unsigned(call) != unsigned(pq): - print "Callout: " + kern.Symbolicate([unsigned(call.tc_call.func)]) + "\n" - call = Cast(call.tc_call.q_link.next, 'thread_call_t') - - print "\nDelayed:\n" - call = Cast(dq.next, 'thread_call_t') - while unsigned(call) != unsigned(dq): - out_str = "Deadline: {:>22d}. Callout: {:#x} <".format(call.tc_call.deadline, unsigned(call.tc_call.func)) - print out_str + kern.Symbolicate(unsigned(call.tc_call.func)) + ">\n" - call = Cast(call.tc_call.q_link.next, 'thread_call_t') + + index_max = GetEnumValue('thread_call_index_t::THREAD_CALL_INDEX_MAX') + + for i in range (0, index_max) : + group = kern.globals.thread_call_groups[i] + + print "Group {i:d}: {g.tcg_name:s} ({:>#18x})".format(addressof(group), i=i, g=group) + print "\t" +"Active: {g.active_count:d} Idle: {g.idle_count:d}\n".format(g=group) + print "\t" +"Blocked: {g.blocked_count:d} Pending: {g.pending_count:d}\n".format(g=group) + print "\t" +"Target: {g.target_thread_count:d}\n".format(g=group) + + print "\t" +"Pending Queue: ({:>#18x})\n".format(addressof(group.pending_queue)) + for call in ParanoidIterateLinkageChain(group.pending_queue, "thread_call_t", "tc_call.q_link"): + ShowThreadCall("\t\t", call) + + print "\t" +"Delayed Queue (Absolute Time): ({:>#18x}) timer: ({:>#18x})\n".format( + addressof(group.delayed_queues[0]), addressof(group.delayed_timers[0])) + for call in ParanoidIterateLinkageChain(group.delayed_queues[0], "thread_call_t", "tc_call.q_link"): + ShowThreadCall("\t\t", call) + + print "\t" +"Delayed Queue (Continuous Time): ({:>#18x}) timer: ({:>#18x})\n".format( + addressof(group.delayed_queues[1]), addressof(group.delayed_timers[1])) + for call in ParanoidIterateLinkageChain(group.delayed_queues[1], "thread_call_t", "tc_call.q_link"): + ShowThreadCall("\t\t", call) # EndMacro: showallcallouts