from process import *
from misc import *
from memory import *
+from ipc import *
# TODO: write scheduler related macros here
print "Last dispatch time known: %d MATUs" % cur_abstime
-bucketStr = ["", "FIXPRI (>UI)", "TIMESHARE_FG", "TIMESHARE_IN", "TIMESHARE_DF", "TIMESHARE_UT", "TIMESHARE_BG"]
+bucketStr = ["FIXPRI (>UI)", "TIMESHARE_FG", "TIMESHARE_IN", "TIMESHARE_DF", "TIMESHARE_UT", "TIMESHARE_BG"]
-@header(" {:>18s} | {:>20s} | {:>20s} | {:>10s} | {:>10s}".format('Thread Group', 'Interactivity Score', 'Last Timeshare Tick', 'pri_shift', 'highq'))
+@header("{:<18s} | {:>20s} | {:>20s} | {:>10s} | {:>10s}".format('Thread Group', 'Pending (us)', 'Interactivity Score', 'TG Boost', 'Highest Thread Pri'))
def GetSchedClutchBucketSummary(clutch_bucket):
- return " 0x{:>16x} | {:>20d} | {:>20d} | {:>10d} | {:>10d}".format(clutch_bucket.scb_clutch.sc_tg, clutch_bucket.scb_interactivity_score, clutch_bucket.scb_timeshare_tick, clutch_bucket.scb_pri_shift, clutch_bucket.scb_runq.highq)
+ tg_boost = kern.globals.sched_clutch_bucket_group_pri_boost[clutch_bucket.scb_group.scbg_clutch.sc_tg_priority]
+ pending_delta = kern.GetNanotimeFromAbstime(GetRecentTimestamp() - clutch_bucket.scb_group.scbg_pending_data.scct_timestamp) / 1000
+ if (int)(clutch_bucket.scb_group.scbg_pending_data.scct_timestamp) == 18446744073709551615:
+ pending_delta = 0
+ return "0x{:<16x} | {:>20d} | {:>20d} | {:>10d} | {:>10d}".format(clutch_bucket.scb_group.scbg_clutch.sc_tg, pending_delta, clutch_bucket.scb_group.scbg_interactivity_data.scct_count, tg_boost, SchedPriorityStableQueueRootPri(clutch_bucket.scb_thread_runq, 'struct thread', 'th_clutch_runq_link'))
def ShowSchedClutchForPset(pset):
root_clutch = pset.pset_clutch_root
print "\n{:s} : {:d}\n\n".format("Current Timestamp", GetRecentTimestamp())
- print "{:>10s} | {:>20s} | {:>30s} | {:>18s} | {:>10s} | {:>10s} | {:>30s} | {:>30s} | {:>15s} | ".format("Root", "Root Buckets", "Clutch Buckets", "Address", "Priority", "Count", "CPU Usage (MATUs)", "CPU Blocked (MATUs)", "Deadline (abs)") + GetSchedClutchBucketSummary.header
+ print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | {:<18s} | {:>10s} | {:>10s} | {:>15s} | ".format("Root", "Root Buckets", "Clutch Buckets", "Threads", "Address", "Pri (Base)", "Count", "Deadline (us)") + GetSchedClutchBucketSummary.header
print "=" * 300
- print "{:>10s} | {:>20s} | {:>30s} | 0x{:16x} | {:>10d} | {:>10d} | {:>30s} | {:>30s} | {:>15s} | ".format("Root", "*", "*", addressof(root_clutch), root_clutch.scr_priority, root_clutch.scr_thr_count, "*", "*", "*")
+ print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10d} | {:>10d} | {:>15s} | ".format("Root", "*", "*", "*", addressof(root_clutch), (root_clutch.scr_priority if root_clutch.scr_thr_count > 0 else -1), root_clutch.scr_thr_count, "*")
print "-" * 300
- for i in range(1, 7):
- root_bucket = root_clutch.scr_buckets[i]
- print "{:>10s} | {:>20s} | {:>30s} | 0x{:16x} | {:>10s} | {:>10s} | {:>30s} | {:>30s} | {:>15d} | ".format("*", bucketStr[i], "*", addressof(root_bucket), "*", "*", "*", "*", root_bucket.scrb_deadline)
- prioq = root_bucket.scrb_clutch_buckets
+ for i in range(0, 6):
+ root_bucket = root_clutch.scr_unbound_buckets[i]
+ root_bucket_deadline = 0
+ if root_bucket.scrb_clutch_buckets.scbrq_count != 0 and i != 0:
+ root_bucket_deadline = kern.GetNanotimeFromAbstime(root_bucket.scrb_pqlink.deadline - GetRecentTimestamp()) / 1000
+ print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10s} | {:>10s} | {:>15d} | ".format("*", bucketStr[int(root_bucket.scrb_bucket)], "*", "*", addressof(root_bucket), "*", "*", root_bucket_deadline)
+ clutch_bucket_runq = root_bucket.scrb_clutch_buckets
clutch_bucket_list = []
- for clutch_bucket in IteratePriorityQueue(prioq, 'struct sched_clutch_bucket', 'scb_pqlink'):
- clutch_bucket_list.append(clutch_bucket)
+ for pri in range(0,128):
+ clutch_bucket_circleq = clutch_bucket_runq.scbrq_queues[pri]
+ for clutch_bucket in IterateCircleQueue(clutch_bucket_circleq, 'struct sched_clutch_bucket', 'scb_runqlink'):
+ clutch_bucket_list.append(clutch_bucket)
if len(clutch_bucket_list) > 0:
clutch_bucket_list.sort(key=lambda x: x.scb_priority, reverse=True)
for clutch_bucket in clutch_bucket_list:
- cpu_used = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_used
- cpu_blocked = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_blocked
- print "{:>10s} | {:>20s} | {:>30s} | 0x{:16x} | {:>10d} | {:>10d} | {:>30d} | {:>30d} | {:>15s} | ".format("*", "*", clutch_bucket.scb_clutch.sc_tg.tg_name, clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count, cpu_used, cpu_blocked, "*") + GetSchedClutchBucketSummary(clutch_bucket)
+ print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | {:<18s} | {:>10s} | {:>10s} | {:>15s} | ".format("", "", "", "", "", "", "", "")
+ print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10d} | {:>10d} | {:>15s} | ".format("*", "*", clutch_bucket.scb_group.scbg_clutch.sc_tg.tg_name, "*", clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count, "*") + GetSchedClutchBucketSummary(clutch_bucket)
+ runq = clutch_bucket.scb_clutchpri_prioq
+ for thread in IterateSchedPriorityQueue(runq, 'struct thread', 'th_clutch_pri_link'):
+ thread_name = GetThreadName(thread)[-24:]
+ if len(thread_name) == 0:
+ thread_name = "<unnamed thread>"
+ print "{:>10s} | {:>20s} | {:>30s} | {:<25s} | 0x{:<16x} | {:>10d} | {:>10s} | {:>15s} | ".format("*", "*", "*", thread_name, thread, thread.base_pri, "*", "*")
+ print "-" * 300
+ root_bucket = root_clutch.scr_bound_buckets[i]
+ root_bucket_deadline = 0
+ if root_bucket.scrb_bound_thread_runq.count != 0:
+ root_bucket_deadline = kern.GetNanotimeFromAbstime(root_bucket.scrb_pqlink.deadline - GetRecentTimestamp()) / 1000
+ print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10s} | {:>10d} | {:>15d} | ".format("*", bucketStr[int(root_bucket.scrb_bucket)] + " [Bound]", "*", "*", addressof(root_bucket), "*", root_bucket.scrb_bound_thread_runq.count, root_bucket_deadline)
+ if root_bucket.scrb_bound_thread_runq.count == 0:
+ print "-" * 300
+ continue
+ thread_runq = root_bucket.scrb_bound_thread_runq
+ for pri in range(0, 128):
+ thread_circleq = thread_runq.queues[pri]
+ for thread in IterateCircleQueue(thread_circleq, 'struct thread', 'runq_links'):
+ thread_name = GetThreadName(thread)[-24:]
+ if len(thread_name) == 0:
+ thread_name = "<unnamed thread>"
+ print "{:>10s} | {:>20s} | {:>30s} | {:<25s} | 0x{:<16x} | {:>10d} | {:>10s} | {:>15s} | ".format("*", "*", "*", thread_name, thread, thread.base_pri, "*", "*")
print "-" * 300
@lldb_command('showschedclutch')
if not root:
print "unknown arguments:", str(cmd_args)
return False
- print "{:>30s} : 0x{:16x}".format("Root", root)
- print "{:>30s} : 0x{:16x}".format("Pset", root.scr_pset)
- print "{:>30s} : {:d}".format("Priority", root.scr_priority)
+ print "{:>30s} : 0x{:<16x}".format("Root", root)
+ print "{:>30s} : 0x{:<16x}".format("Pset", root.scr_pset)
+ print "{:>30s} : {:d}".format("Priority", (root.scr_priority if root.scr_thr_count > 0 else -1))
print "{:>30s} : {:d}".format("Urgency", root.scr_urgency)
print "{:>30s} : {:d}".format("Threads", root.scr_thr_count)
print "{:>30s} : {:d}".format("Current Timestamp", GetRecentTimestamp())
if not root_bucket:
print "unknown arguments:", str(cmd_args)
return False
- print "{:<30s} : 0x{:16x}".format("Root Bucket", root_bucket)
+ print "{:<30s} : 0x{:<16x}".format("Root Bucket", root_bucket)
print "{:<30s} : {:s}".format("Bucket Name", bucketStr[int(root_bucket.scrb_bucket)])
- print "{:<30s} : {:d}".format("Deadline", root_bucket.scrb_deadline)
+ print "{:<30s} : {:d}".format("Deadline", (root_bucket.scrb_pqlink.deadline if root_bucket.scrb_clutch_buckets.scbrq_count != 0 else 0))
print "{:<30s} : {:d}".format("Current Timestamp", GetRecentTimestamp())
print "\n"
- prioq = root_bucket.scrb_clutch_buckets
+ clutch_bucket_runq = root_bucket.scrb_clutch_buckets
clutch_bucket_list = []
- for clutch_bucket in IteratePriorityQueue(prioq, 'struct sched_clutch_bucket', 'scb_pqlink'):
- clutch_bucket_list.append(clutch_bucket)
+ for pri in range(0,128):
+ clutch_bucket_circleq = clutch_bucket_runq.scbrq_queues[pri]
+ for clutch_bucket in IterateCircleQueue(clutch_bucket_circleq, 'struct sched_clutch_bucket', 'scb_runqlink'):
+ clutch_bucket_list.append(clutch_bucket)
if len(clutch_bucket_list) > 0:
print "=" * 240
print "{:>30s} | {:>18s} | {:>20s} | {:>20s} | ".format("Name", "Clutch Bucket", "Priority", "Count") + GetSchedClutchBucketSummary.header
print "=" * 240
clutch_bucket_list.sort(key=lambda x: x.scb_priority, reverse=True)
for clutch_bucket in clutch_bucket_list:
- print "{:>30s} | 0x{:16x} | {:>20d} | {:>20d} | ".format(clutch_bucket.scb_clutch.sc_tg.tg_name, clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count) + GetSchedClutchBucketSummary(clutch_bucket)
+ print "{:>30s} | 0x{:<16x} | {:>20d} | {:>20d} | ".format(clutch_bucket.scb_group.scbg_clutch.sc_tg.tg_name, clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count) + GetSchedClutchBucketSummary(clutch_bucket)
-@lldb_command('showschedclutchbucket')
-def ShowSchedClutchBucket(cmd_args=[]):
- """ show information about a clutch bucket in the sched clutch hierarchy
- Usage: showschedclutchbucket <clutch_bucket>
- """
- if not cmd_args:
- raise ArgumentError("Invalid argument")
- clutch_bucket = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_bucket *")
- if not clutch_bucket:
- print "unknown arguments:", str(cmd_args)
- return False
- print "{:<30s} : 0x{:16x}".format("Clutch Bucket", clutch_bucket)
- print "{:<30s} : {:s}".format("TG Name", clutch_bucket.scb_clutch.sc_tg.tg_name)
+def SchedClutchBucketDetails(clutch_bucket):
+ print "{:<30s} : 0x{:<16x}".format("Clutch Bucket", clutch_bucket)
+ print "{:<30s} : {:s}".format("Scheduling Bucket", bucketStr[(int)(clutch_bucket.scb_bucket)])
+ print "{:<30s} : 0x{:<16x}".format("Clutch Bucket Group", clutch_bucket.scb_group)
+ print "{:<30s} : {:s}".format("TG Name", clutch_bucket.scb_group.scbg_clutch.sc_tg.tg_name)
print "{:<30s} : {:d}".format("Priority", clutch_bucket.scb_priority)
print "{:<30s} : {:d}".format("Thread Count", clutch_bucket.scb_thr_count)
- print "{:<30s} : 0x{:16x}".format("Thread Group", clutch_bucket.scb_clutch.sc_tg)
- cpu_used = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_used
- cpu_blocked = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_blocked
- print "{:<30s} : {:d}".format("CPU Used (MATUs)", cpu_used)
- print "{:<30s} : {:d}".format("CPU Blocked (MATUs)", cpu_blocked)
- print "{:<30s} : {:d}".format("Interactivity Score", clutch_bucket.scb_interactivity_score)
- print "{:<30s} : {:d}".format("Last Timeshare Update Tick", clutch_bucket.scb_timeshare_tick)
- print "{:<30s} : {:d}".format("Priority Shift", clutch_bucket.scb_pri_shift)
+ print "{:<30s} : 0x{:<16x}".format("Thread Group", clutch_bucket.scb_group.scbg_clutch.sc_tg)
+ print "{:<30s} : {:6d} (inherited from clutch bucket group)".format("Interactivity Score", clutch_bucket.scb_group.scbg_interactivity_data.scct_count)
+ print "{:<30s} : {:6d} (inherited from clutch bucket group)".format("Last Timeshare Update Tick", clutch_bucket.scb_group.scbg_timeshare_tick)
+ print "{:<30s} : {:6d} (inherited from clutch bucket group)".format("Priority Shift", clutch_bucket.scb_group.scbg_pri_shift)
print "\n"
runq = clutch_bucket.scb_clutchpri_prioq
thread_list = []
- for thread in IteratePriorityQueue(runq, 'struct thread', 'sched_clutchpri_link'):
+ for thread in IterateSchedPriorityQueue(runq, 'struct thread', 'th_clutch_pri_link'):
thread_list.append(thread)
if len(thread_list) > 0:
print "=" * 240
print "=" * 240
for thread in thread_list:
proc = Cast(thread.task.bsd_info, 'proc *')
- print GetThreadSummary(thread) + "{:s}".format(str(proc.p_comm))
+ print GetThreadSummary(thread) + "{:s}".format(GetProcName(proc))
+
+@lldb_command('showschedclutchbucket')
+def ShowSchedClutchBucket(cmd_args=[]):
+ """ show information about a clutch bucket in the sched clutch hierarchy
+ Usage: showschedclutchbucket <clutch_bucket>
+ """
+ if not cmd_args:
+ raise ArgumentError("Invalid argument")
+ clutch_bucket = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_bucket *")
+ if not clutch_bucket:
+ print "unknown arguments:", str(cmd_args)
+ return False
+ SchedClutchBucketDetails(clutch_bucket)
@lldb_command('abs2nano')
def ShowAbstimeToNanoTime(cmd_args=[]):
TODO: on x86, if not in the debugger, then look at the scheduler
"""
if kern.arch == 'x86_64':
- return kern.globals.debugger_entry_time
+ most_recent_dispatch = GetSchedMostRecentDispatch(False)
+ if most_recent_dispatch > kern.globals.debugger_entry_time :
+ return most_recent_dispatch
+ else :
+ return kern.globals.debugger_entry_time
else :
return GetSchedMostRecentDispatch(False)
if unsigned(active_thread) != 0 :
task_val = active_thread.task
proc_val = Cast(task_val.bsd_info, 'proc *')
- proc_name = "<unknown>" if unsigned(proc_val) == 0 else str(proc_val.p_name)
+ proc_name = "<unknown>" if unsigned(proc_val) == 0 else GetProcName(proc_val)
last_dispatch = unsigned(current_processor.last_dispatch)
task_name = "unknown"
if task and unsigned(task.bsd_info):
p = Cast(task.bsd_info, 'proc *')
- task_name = str(p.p_name)
+ task_name = GetProcName(p)
sched_mode = ""
show_priority_pset_runq = 0
show_group_pset_runq = 0
show_clutch = 0
+ show_edge = 0
sched_string = str(kern.globals.sched_string)
if sched_string == "traditional":
show_priority_runq = 1
elif sched_string == "clutch":
show_clutch = 1
+ elif sched_string == "edge":
+ show_edge = 1
else :
print "Unknown sched_string {:s}".format(sched_string)
print "Scheduler: {:s}\n".format(sched_string)
- if show_clutch == 0:
+ if show_clutch == 0 and show_edge == 0:
run_buckets = kern.globals.sched_run_buckets
run_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')]
fixpri_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')]
ShowRunQSummary(processor.runq)
print " \n"
- if show_clutch:
- print "=== Clutch Scheduler Hierarchy ===\n\n"
+ if show_clutch or show_edge:
+ cluster_type = "SMP"
+ if pset.pset_type == 1:
+ cluster_type = "E"
+ elif pset.pset_type == 2:
+ cluster_type = "P"
+ print "=== Clutch Scheduler Hierarchy Pset{:d} (Type: {:s}) ] ===\n\n".format(pset.pset_cluster_id, cluster_type)
ShowSchedClutchForPset(pset)
pset = pset.pset_list
ParanoidIterateLinkageChain.enable_paranoia = True
ParanoidIterateLinkageChain.enable_debug = False
+def LinkageChainEmpty(queue_head):
+ if not queue_head.GetSBValue().GetType().IsPointerType() :
+ queue_head = addressof(queue_head)
+
+ # Mosh the value into a brand new value, to really get rid of its old cvalue history
+ # avoid using GetValueFromAddress
+ queue_head = value(queue_head.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(unsigned(queue_head))))
+ queue_head = cast(queue_head, 'struct queue_entry *')
+
+ link = queue_head.next
+
+ return unsigned(queue_head) == unsigned(link)
+
def bit_first(bitmap):
return bitmap.bit_length() - 1
# Macro: showallcallouts
-def ShowThreadCall(prefix, call):
+from kevent import GetKnoteKqueue
+
+def ShowThreadCall(prefix, call, recent_timestamp, is_pending=False):
"""
Print a description of a thread_call_t and its relationship to its expected fire time
"""
- func = call.tc_call.func
- param0 = call.tc_call.param0
- param1 = call.tc_call.param1
-
- iotes_desc = ""
- iotes_callout = kern.GetLoadAddressForSymbol("_ZN18IOTimerEventSource17timeoutAndReleaseEPvS0_")
- iotes_callout2 = kern.GetLoadAddressForSymbol("_ZN18IOTimerEventSource15timeoutSignaledEPvS0_")
+ func = call.tc_func
+ param0 = call.tc_param0
+ param1 = call.tc_param1
- if (unsigned(func) == unsigned(iotes_callout) or
- unsigned(func) == unsigned(iotes_callout2)) :
- iotes = Cast(call.tc_call.param0, 'IOTimerEventSource*')
- func = iotes.action
- param0 = iotes.owner
- param1 = unsigned(iotes)
+ is_iotes = False
func_name = kern.Symbolicate(func)
- if (func_name == "") :
- func_name = FindKmodNameForAddr(func)
- call_entry = call.tc_call
+ extra_string = ""
- recent_timestamp = GetRecentTimestamp()
+ strip_func = kern.StripKernelPAC(unsigned(func))
+
+ func_syms = kern.SymbolicateFromAddress(strip_func)
+ # returns an array of SBSymbol
+
+ if func_syms and func_syms[0] :
+ func_name = func_syms[0].GetName()
+
+ try :
+ if ("IOTimerEventSource::timeoutAndRelease" in func_name or
+ "IOTimerEventSource::timeoutSignaled" in func_name) :
+ iotes = Cast(call.tc_param0, 'IOTimerEventSource*')
+ try:
+ func = iotes.action
+ param0 = iotes.owner
+ param1 = unsigned(iotes)
+ except AttributeError:
+ # This is horrible, horrible, horrible. But it works. Needed because IOEventSource hides the action member in an
+ # anonymous union when XNU_PRIVATE_SOURCE is set. To grab it, we work backwards from the enabled member.
+ func = dereference(kern.GetValueFromAddress(addressof(iotes.enabled) - sizeof('IOEventSource::Action'), 'uint64_t *'))
+ param0 = iotes.owner
+ param1 = unsigned(iotes)
+
+ workloop = iotes.workLoop
+ thread = workloop.workThread
- # THREAD_CALL_CONTINUOUS 0x100
- kern.globals.mach_absolutetime_asleep
- if (call.tc_flags & 0x100) :
- timer_fire = call_entry.deadline - (recent_timestamp + kern.globals.mach_absolutetime_asleep)
+ is_iotes = True
+
+ # re-symbolicate the func we found inside the IOTES
+ strip_func = kern.StripKernelPAC(unsigned(func))
+ func_syms = kern.SymbolicateFromAddress(strip_func)
+ if func_syms and func_syms[0] :
+ func_name = func_syms[0].GetName()
+ else :
+ func_name = str(FindKmodNameForAddr(func))
+
+ # cast from IOThread to thread_t, because IOThread is sometimes opaque
+ thread = Cast(thread, 'thread_t')
+ thread_id = thread.thread_id
+ thread_name = GetThreadName(thread)
+
+ extra_string += "workloop thread: {:#x} ({:#x}) {:s}".format(thread, thread_id, thread_name)
+
+ if "filt_timerexpire" in func_name :
+ knote = Cast(call.tc_param0, 'struct knote *')
+ kqueue = GetKnoteKqueue(knote)
+ proc = kqueue.kq_p
+ proc_name = GetProcName(proc)
+ proc_pid = proc.p_pid
+
+ extra_string += "kq: {:#018x} {:s}[{:d}]".format(kqueue, proc_name, proc_pid)
+
+ if "mk_timer_expire" in func_name :
+ timer = Cast(call.tc_param0, 'struct mk_timer *')
+ port = timer.port
+
+ extra_string += "port: {:#018x} {:s}".format(port, GetPortDestinationSummary(port))
+
+ if "workq_kill_old_threads_call" in func_name :
+ workq = Cast(call.tc_param0, 'struct workqueue *')
+ proc = workq.wq_proc
+ proc_name = GetProcName(proc)
+ proc_pid = proc.p_pid
+
+ extra_string += "{:s}[{:d}]".format(proc_name, proc_pid)
+
+ if ("workq_add_new_threads_call" in func_name or
+ "realitexpire" in func_name):
+ proc = Cast(call.tc_param0, 'struct proc *')
+ proc_name = GetProcName(proc)
+ proc_pid = proc.p_pid
+
+ extra_string += "{:s}[{:d}]".format(proc_name, proc_pid)
+
+ except:
+ print "exception generating extra_string for call: {:#018x}".format(call)
+ if ShowThreadCall.enable_debug :
+ raise
+
+ if (func_name == "") :
+ func_name = FindKmodNameForAddr(func)
+
+ if (call.tc_flags & GetEnumValue('thread_call_flags_t::THREAD_CALL_FLAG_CONTINUOUS')) :
+ timer_fire = call.tc_pqlink.deadline - (recent_timestamp + kern.globals.mach_absolutetime_asleep)
else :
- timer_fire = call_entry.deadline - recent_timestamp
+ timer_fire = call.tc_pqlink.deadline - recent_timestamp
timer_fire_s = kern.GetNanotimeFromAbstime(timer_fire) / 1000000000.0
ttd_s = kern.GetNanotimeFromAbstime(call.tc_ttd) / 1000000000.0
- print "{:s}{:#018x}: {:18d} {:18d} {:03.06f} {:03.06f} {:#018x}({:#018x},{:#018x}) ({:s})".format(prefix,
- unsigned(call), call_entry.deadline, call.tc_soft_deadline, ttd_s, timer_fire_s,
- func, param0, param1, func_name)
+ if (is_pending) :
+ pending_time = call.tc_pending_timestamp - recent_timestamp
+ pending_time = kern.GetNanotimeFromAbstime(pending_time) / 1000000000.0
+
+ flags = int(call.tc_flags)
+ # TODO: extract this out of the thread_call_flags_t enum
+ thread_call_flags = {0x0:'', 0x1:'A', 0x2:'W', 0x4:'D', 0x8:'R', 0x10:'S', 0x20:'O',
+ 0x40:'P', 0x80:'L', 0x100:'C'}
+
+ flags_str = ''
+ mask = 0x1
+ while mask <= 0x100 :
+ flags_str += thread_call_flags[int(flags & mask)]
+ mask = mask << 1
+
+ if is_iotes :
+ flags_str += 'I'
+
+ if (is_pending) :
+ print ("{:s}{:#018x}: {:18d} {:18d} {:16.06f} {:16.06f} {:16.06f} {:9s} " +
+ "{:#018x} ({:#018x}, {:#018x}) ({:s}) {:s}").format(prefix,
+ unsigned(call), call.tc_pqlink.deadline, call.tc_soft_deadline, ttd_s,
+ timer_fire_s, pending_time, flags_str,
+ func, param0, param1, func_name, extra_string)
+ else :
+ print ("{:s}{:#018x}: {:18d} {:18d} {:16.06f} {:16.06f} {:9s} " +
+ "{:#018x} ({:#018x}, {:#018x}) ({:s}) {:s}").format(prefix,
+ unsigned(call), call.tc_pqlink.deadline, call.tc_soft_deadline, ttd_s,
+ timer_fire_s, flags_str,
+ func, param0, param1, func_name, extra_string)
+
+ShowThreadCall.enable_debug = False
+
+@header("{:>18s} {:>18s} {:>18s} {:>16s} {:>16s} {:9s} {:>18s}".format(
+ "entry", "deadline", "soft_deadline",
+ "duration (s)", "to go (s)", "flags", "(*func) (param0, param1)"))
+def PrintThreadGroup(group):
+ header = PrintThreadGroup.header
+ pending_header = "{:>18s} {:>18s} {:>18s} {:>16s} {:>16s} {:>16s} {:9s} {:>18s}".format(
+ "entry", "deadline", "soft_deadline",
+ "duration (s)", "to go (s)", "pending", "flags", "(*func) (param0, param1)")
+
+ recent_timestamp = GetRecentTimestamp()
+
+ idle_timestamp_distance = group.idle_timestamp - recent_timestamp
+ idle_timestamp_distance_s = kern.GetNanotimeFromAbstime(idle_timestamp_distance) / 1000000000.0
+
+ is_parallel = ""
+
+ if (group.tcg_flags & GetEnumValue('thread_call_group_flags_t::TCG_PARALLEL')) :
+ is_parallel = " (parallel)"
+
+ print "Group: {g.tcg_name:s} ({:#18x}){:s}".format(unsigned(group), is_parallel, g=group)
+ print "\t" +"Thread Priority: {g.tcg_thread_pri:d}\n".format(g=group)
+ print ("\t" +"Active: {g.active_count:<3d} Idle: {g.idle_count:<3d}" +
+ "Blocked: {g.blocked_count:<3d} Pending: {g.pending_count:<3d}" +
+ "Target: {g.target_thread_count:<3d}\n").format(g=group)
+
+ if unsigned(group.idle_timestamp) is not 0 :
+ print "\t" +"Idle Timestamp: {g.idle_timestamp:d} ({:03.06f})\n".format(idle_timestamp_distance_s,
+ g=group)
+
+ print "\t" +"Pending Queue: ({:>#18x})\n".format(addressof(group.pending_queue))
+ if not LinkageChainEmpty(group.pending_queue) :
+ print "\t\t" + pending_header
+ for call in ParanoidIterateLinkageChain(group.pending_queue, "thread_call_t", "tc_qlink"):
+ ShowThreadCall("\t\t", call, recent_timestamp, is_pending=True)
+
+ print "\t" +"Delayed Queue (Absolute Time): ({:>#18x}) timer: ({:>#18x})\n".format(
+ addressof(group.delayed_queues[0]), addressof(group.delayed_timers[0]))
+ if not LinkageChainEmpty(group.delayed_queues[0]) :
+ print "\t\t" + header
+ for call in ParanoidIterateLinkageChain(group.delayed_queues[0], "thread_call_t", "tc_qlink"):
+ ShowThreadCall("\t\t", call, recent_timestamp)
+
+ print "\t" +"Delayed Queue (Continuous Time): ({:>#18x}) timer: ({:>#18x})\n".format(
+ addressof(group.delayed_queues[1]), addressof(group.delayed_timers[1]))
+ if not LinkageChainEmpty(group.delayed_queues[1]) :
+ print "\t\t" + header
+ for call in ParanoidIterateLinkageChain(group.delayed_queues[1], "thread_call_t", "tc_qlink"):
+ ShowThreadCall("\t\t", call, recent_timestamp)
+
+def PrintThreadCallThreads() :
+ callout_flag = GetEnumValue('thread_tag_t::THREAD_TAG_CALLOUT')
+ recent_timestamp = GetRecentTimestamp()
+
+ for thread in IterateQueue(kern.globals.kernel_task.threads, 'thread *', 'task_threads'):
+ if (thread.thread_tag & callout_flag) :
+ print " {:#20x} {:#12x} {:s}".format(thread, thread.thread_id, GetThreadName(thread))
+ state = thread.thc_state
+ if state and state.thc_call :
+ print "\t" + PrintThreadGroup.header
+ ShowThreadCall("\t", state.thc_call, recent_timestamp)
+ soft_deadline = state.thc_call_soft_deadline
+ slop_time = state.thc_call_hard_deadline - soft_deadline
+ slop_time = kern.GetNanotimeFromAbstime(slop_time) / 1000000000.0
+ print "\t original soft deadline {:d}, hard deadline {:d} (leeway {:.06f}s)".format(
+ soft_deadline, state.thc_call_hard_deadline, slop_time)
+ enqueue_time = state.thc_call_pending_timestamp - soft_deadline
+ enqueue_time = kern.GetNanotimeFromAbstime(enqueue_time) / 1000000000.0
+ print "\t time to enqueue after deadline: {:.06f}s (enqueued at: {:d})".format(
+ enqueue_time, state.thc_call_pending_timestamp)
+ wait_time = state.thc_call_start - state.thc_call_pending_timestamp
+ wait_time = kern.GetNanotimeFromAbstime(wait_time) / 1000000000.0
+ print "\t time to start executing after enqueue: {:.06f}s (executing at: {:d})".format(
+ wait_time, state.thc_call_start)
+
+ if (state.thc_IOTES_invocation_timestamp) :
+ iotes_acquire_time = state.thc_IOTES_invocation_timestamp - state.thc_call_start
+ iotes_acquire_time = kern.GetNanotimeFromAbstime(iotes_acquire_time) / 1000000000.0
+ print "\t IOTES acquire time: {:.06f}s (acquired at: {:d})".format(
+ iotes_acquire_time, state.thc_IOTES_invocation_timestamp)
+
+
+@lldb_command('showcalloutgroup')
+def ShowCalloutGroup(cmd_args=None):
+ """ Prints out the pending and delayed thread calls for a specific group
+
+ Pass 'threads' to show the thread call threads themselves.
+
+ Callout flags:
+
+ A - Allocated memory owned by thread_call.c
+ W - Wait - thread waiting for call to finish running
+ D - Delayed - deadline based
+ R - Running - currently executing on a thread
+ S - Signal - call from timer interrupt instead of thread
+ O - Once - pend the enqueue if re-armed while running
+ P - Reschedule pending - enqueue is pending due to re-arm while running
+ L - Rate-limited - (App Nap)
+ C - Continuous time - Timeout is in mach_continuous_time
+ I - Callout is an IOTimerEventSource
+ """
+ if not cmd_args:
+ print "No arguments passed"
+ print ShowCalloutGroup.__doc__
+ return False
+
+ if "threads" in cmd_args[0] :
+ PrintThreadCallThreads()
+ return
+
+ group = kern.GetValueFromAddress(cmd_args[0], 'struct thread_call_group *')
+ if not group:
+ print "unknown arguments:", str(cmd_args)
+ return False
+
+ PrintThreadGroup(group)
@lldb_command('showallcallouts')
def ShowAllCallouts(cmd_args=None):
""" Prints out the pending and delayed thread calls for the thread call groups
- """
+ Callout flags:
+
+ A - Allocated memory owned by thread_call.c
+ W - Wait - thread waiting for call to finish running
+ D - Delayed - deadline based
+ R - Running - currently executing on a thread
+ S - Signal - call from timer interrupt instead of thread
+ O - Once - pend the enqueue if re-armed while running
+ P - Reschedule pending - enqueue is pending due to re-arm while running
+ L - Rate-limited - (App Nap)
+ C - Continuous time - Timeout is in mach_continuous_time
+ I - Callout is an IOTimerEventSource
+ """
index_max = GetEnumValue('thread_call_index_t::THREAD_CALL_INDEX_MAX')
for i in range (0, index_max) :
- group = kern.globals.thread_call_groups[i]
-
- print "Group {i:d}: {g.tcg_name:s} ({:>#18x})".format(addressof(group), i=i, g=group)
- print "\t" +"Active: {g.active_count:d} Idle: {g.idle_count:d}\n".format(g=group)
- print "\t" +"Blocked: {g.blocked_count:d} Pending: {g.pending_count:d}\n".format(g=group)
- print "\t" +"Target: {g.target_thread_count:d}\n".format(g=group)
-
- print "\t" +"Pending Queue: ({:>#18x})\n".format(addressof(group.pending_queue))
- for call in ParanoidIterateLinkageChain(group.pending_queue, "thread_call_t", "tc_call.q_link"):
- ShowThreadCall("\t\t", call)
-
- print "\t" +"Delayed Queue (Absolute Time): ({:>#18x}) timer: ({:>#18x})\n".format(
- addressof(group.delayed_queues[0]), addressof(group.delayed_timers[0]))
- for call in ParanoidIterateLinkageChain(group.delayed_queues[0], "thread_call_t", "tc_call.q_link"):
- ShowThreadCall("\t\t", call)
-
- print "\t" +"Delayed Queue (Continuous Time): ({:>#18x}) timer: ({:>#18x})\n".format(
- addressof(group.delayed_queues[1]), addressof(group.delayed_timers[1]))
- for call in ParanoidIterateLinkageChain(group.delayed_queues[1], "thread_call_t", "tc_call.q_link"):
- ShowThreadCall("\t\t", call)
+ group = addressof(kern.globals.thread_call_groups[i])
+ PrintThreadGroup(group)
+
+ print "Thread Call Threads:"
+ PrintThreadCallThreads()
# EndMacro: showallcallouts