]>
Commit | Line | Data |
---|---|---|
39236c6e A |
1 | from xnu import * |
2 | from utils import * | |
3 | from process import * | |
5ba3f43e A |
4 | from misc import * |
5 | from memory import * | |
f427ee49 | 6 | from ipc import * |
39236c6e A |
7 | |
8 | # TODO: write scheduler related macros here | |
9 | ||
39037602 A |
10 | # Macro: showallprocrunqcount |
11 | ||
12 | @lldb_command('showallprocrunqcount') | |
13 | def ShowAllProcRunQCount(cmd_args=None): | |
14 | """ Prints out the runq count for all processors | |
15 | """ | |
16 | out_str = "Processor\t# Runnable\n" | |
17 | processor_itr = kern.globals.processor_list | |
18 | while processor_itr: | |
19 | out_str += "{:d}\t\t{:d}\n".format(processor_itr.cpu_id, processor_itr.runq.count) | |
20 | processor_itr = processor_itr.processor_list | |
5ba3f43e | 21 | # out_str += "RT:\t\t{:d}\n".format(kern.globals.rt_runq.count) |
39037602 A |
22 | print out_str |
23 | ||
24 | # EndMacro: showallprocrunqcount | |
25 | ||
39236c6e | 26 | # Macro: showinterrupts |
3e170ce0 | 27 | |
39236c6e A |
28 | @lldb_command('showinterrupts') |
29 | def ShowInterrupts(cmd_args=None): | |
30 | """ Prints IRQ, IPI and TMR counts for each CPU | |
39037602 A |
31 | """ |
32 | ||
5ba3f43e | 33 | if not kern.arch.startswith('arm'): |
39037602 A |
34 | print "showinterrupts is only supported on arm/arm64" |
35 | return | |
36 | ||
3e170ce0 | 37 | base_address = kern.GetLoadAddressForSymbol('CpuDataEntries') |
5ba3f43e A |
38 | struct_size = 16 |
39 | x = 0 | |
40 | y = 0 | |
41 | while x < unsigned(kern.globals.machine_info.physical_cpu): | |
42 | element = kern.GetValueFromAddress(base_address + (y * struct_size), 'uintptr_t *')[1] | |
43 | if element: | |
44 | cpu_data_entry = Cast(element, 'cpu_data_t *') | |
45 | print "CPU {} IRQ: {:d}\n".format(y, cpu_data_entry.cpu_stat.irq_ex_cnt) | |
46 | print "CPU {} IPI: {:d}\n".format(y, cpu_data_entry.cpu_stat.ipi_cnt) | |
cb323159 | 47 | print "CPU {} PMI: {:d}\n".format(y, cpu_data_entry.cpu_monotonic.mtc_npmis) |
5ba3f43e A |
48 | print "CPU {} TMR: {:d}\n".format(y, cpu_data_entry.cpu_stat.timer_cnt) |
49 | x = x + 1 | |
50 | y = y + 1 | |
51 | ||
39236c6e A |
52 | # EndMacro: showinterrupts |
53 | ||
54 | # Macro: showactiveinterrupts | |
3e170ce0 | 55 | |
39236c6e A |
56 | @lldb_command('showactiveinterrupts') |
57 | def ShowActiveInterrupts(cmd_args=None): | |
58 | """ Prints the interrupts that are unmasked & active with the Interrupt Controller | |
59 | Usage: showactiveinterrupts <address of Interrupt Controller object> | |
60 | """ | |
61 | if not cmd_args: | |
62 | print "No arguments passed" | |
63 | print ShowActiveInterrupts.__doc__ | |
64 | return False | |
65 | aic = kern.GetValueFromAddress(cmd_args[0], 'AppleInterruptController *') | |
66 | if not aic: | |
67 | print "unknown arguments:", str(cmd_args) | |
68 | return False | |
69 | ||
70 | aic_base = unsigned(aic._aicBaseAddress) | |
71 | current_interrupt = 0 | |
72 | aic_imc_base = aic_base + 0x4180 | |
73 | aic_him_offset = 0x80 | |
74 | current_pointer = aic_imc_base | |
75 | unmasked = dereference(kern.GetValueFromAddress(current_pointer, 'uintptr_t *')) | |
76 | active = dereference(kern.GetValueFromAddress(current_pointer + aic_him_offset, 'uintptr_t *')) | |
77 | group_count = 0 | |
78 | mask = 1 | |
79 | while current_interrupt < 192: | |
80 | if (((unmasked & mask) == 0) and (active & mask)): | |
81 | print "Interrupt {:d} unmasked and active\n".format(current_interrupt) | |
82 | current_interrupt = current_interrupt + 1 | |
83 | if (current_interrupt % 32 == 0): | |
84 | mask = 1 | |
85 | group_count = group_count + 1 | |
86 | unmasked = dereference(kern.GetValueFromAddress(current_pointer + (4 * group_count), 'uintptr_t *')) | |
87 | active = dereference(kern.GetValueFromAddress((current_pointer + aic_him_offset) + (4 * group_count), 'uintptr_t *')) | |
88 | else: | |
89 | mask = mask << 1 | |
39236c6e A |
90 | # EndMacro: showactiveinterrupts |
91 | ||
39037602 A |
92 | # Macro: showirqbyipitimerratio |
93 | @lldb_command('showirqbyipitimerratio') | |
94 | def ShowIrqByIpiTimerRatio(cmd_args=None): | |
95 | """ Prints the ratio of IRQ by sum of IPI & TMR counts for each CPU | |
96 | """ | |
97 | if kern.arch == "x86_64": | |
98 | print "This macro is not supported on x86_64 architecture" | |
99 | return | |
100 | ||
101 | out_str = "IRQ-IT Ratio: " | |
102 | base_address = kern.GetLoadAddressForSymbol('CpuDataEntries') | |
103 | struct_size = 16 | |
5ba3f43e A |
104 | x = 0 |
105 | y = 0 | |
106 | while x < unsigned(kern.globals.machine_info.physical_cpu): | |
107 | element = kern.GetValueFromAddress(base_address + (y * struct_size), 'uintptr_t *')[1] | |
108 | if element: | |
109 | cpu_data_entry = Cast(element, 'cpu_data_t *') | |
110 | out_str += " CPU {} [{:.2f}]".format(y, float(cpu_data_entry.cpu_stat.irq_ex_cnt)/(cpu_data_entry.cpu_stat.ipi_cnt + cpu_data_entry.cpu_stat.timer_cnt)) | |
111 | x = x + 1 | |
112 | y = y + 1 | |
39037602 A |
113 | print out_str |
114 | ||
115 | # EndMacro: showirqbyipitimerratio | |
116 | ||
117 | #Macro: showinterruptsourceinfo | |
118 | @lldb_command('showinterruptsourceinfo') | |
119 | def showinterruptsourceinfo(cmd_args = None): | |
120 | """ Extract information of interrupt source causing interrupt storms. | |
121 | """ | |
122 | if not cmd_args: | |
123 | print "No arguments passed" | |
124 | return False | |
125 | #Dump IOInterruptVector object | |
126 | print "--- Dumping IOInterruptVector object ---\n" | |
127 | object_info = lldb_run_command("dumpobject {:s} IOInterruptVector".format(cmd_args[0])) | |
128 | print object_info | |
129 | print "--- Dumping IOFilterInterruptEventSource object ---\n" | |
130 | #Dump the IOFilterInterruptEventSource object. | |
131 | target_info=re.search('target =\s+(.*)',object_info) | |
132 | target= target_info.group() | |
133 | target= target.split() | |
134 | #Dump the Object pointer of the source who is triggering the Interrupts. | |
135 | vector_info=lldb_run_command("dumpobject {:s} ".format(target[2])) | |
136 | print vector_info | |
137 | owner_info= re.search('owner =\s+(.*)',vector_info) | |
138 | owner= owner_info.group() | |
139 | owner= owner.split() | |
140 | print "\n\n" | |
141 | out=lldb_run_command(" dumpobject {:s}".format(owner[2])) | |
142 | print out | |
143 | ||
144 | # EndMacro: showinterruptsourceinfo | |
3e170ce0 A |
145 | |
146 | @lldb_command('showcurrentabstime') | |
147 | def ShowCurremtAbsTime(cmd_args=None): | |
148 | """ Routine to print latest absolute time known to system before being stopped. | |
149 | Usage: showcurrentabstime | |
150 | """ | |
151 | pset = addressof(kern.globals.pset0) | |
d9a64523 | 152 | processor_array = kern.globals.processor_array |
3e170ce0 A |
153 | cur_abstime = 0 |
154 | ||
155 | while unsigned(pset) != 0: | |
d9a64523 A |
156 | cpu_bitmap = int(pset.cpu_bitmask) |
157 | for cpuid in IterateBitmap(cpu_bitmap): | |
158 | processor = processor_array[cpuid] | |
3e170ce0 A |
159 | if unsigned(processor.last_dispatch) > cur_abstime: |
160 | cur_abstime = unsigned(processor.last_dispatch) | |
161 | ||
162 | pset = pset.pset_list | |
163 | ||
164 | print "Last dispatch time known: %d MATUs" % cur_abstime | |
165 | ||
ea3f0419 | 166 | bucketStr = ["FIXPRI (>UI)", "TIMESHARE_FG", "TIMESHARE_IN", "TIMESHARE_DF", "TIMESHARE_UT", "TIMESHARE_BG"] |
cb323159 | 167 | |
f427ee49 | 168 | @header("{:<18s} | {:>20s} | {:>20s} | {:>10s} | {:>10s}".format('Thread Group', 'Pending (us)', 'Interactivity Score', 'TG Boost', 'Highest Thread Pri')) |
cb323159 | 169 | def GetSchedClutchBucketSummary(clutch_bucket): |
f427ee49 A |
170 | tg_boost = kern.globals.sched_clutch_bucket_group_pri_boost[clutch_bucket.scb_group.scbg_clutch.sc_tg_priority] |
171 | pending_delta = kern.GetNanotimeFromAbstime(GetRecentTimestamp() - clutch_bucket.scb_group.scbg_pending_data.scct_timestamp) / 1000 | |
172 | if (int)(clutch_bucket.scb_group.scbg_pending_data.scct_timestamp) == 18446744073709551615: | |
173 | pending_delta = 0 | |
174 | return "0x{:<16x} | {:>20d} | {:>20d} | {:>10d} | {:>10d}".format(clutch_bucket.scb_group.scbg_clutch.sc_tg, pending_delta, clutch_bucket.scb_group.scbg_interactivity_data.scct_count, tg_boost, SchedPriorityStableQueueRootPri(clutch_bucket.scb_thread_runq, 'struct thread', 'th_clutch_runq_link')) | |
cb323159 A |
175 | |
176 | def ShowSchedClutchForPset(pset): | |
177 | root_clutch = pset.pset_clutch_root | |
178 | print "\n{:s} : {:d}\n\n".format("Current Timestamp", GetRecentTimestamp()) | |
f427ee49 | 179 | print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | {:<18s} | {:>10s} | {:>10s} | {:>15s} | ".format("Root", "Root Buckets", "Clutch Buckets", "Threads", "Address", "Pri (Base)", "Count", "Deadline (us)") + GetSchedClutchBucketSummary.header |
cb323159 | 180 | print "=" * 300 |
f427ee49 | 181 | print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10d} | {:>10d} | {:>15s} | ".format("Root", "*", "*", "*", addressof(root_clutch), (root_clutch.scr_priority if root_clutch.scr_thr_count > 0 else -1), root_clutch.scr_thr_count, "*") |
cb323159 A |
182 | print "-" * 300 |
183 | ||
ea3f0419 | 184 | for i in range(0, 6): |
f427ee49 A |
185 | root_bucket = root_clutch.scr_unbound_buckets[i] |
186 | root_bucket_deadline = 0 | |
187 | if root_bucket.scrb_clutch_buckets.scbrq_count != 0 and i != 0: | |
188 | root_bucket_deadline = kern.GetNanotimeFromAbstime(root_bucket.scrb_pqlink.deadline - GetRecentTimestamp()) / 1000 | |
189 | print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10s} | {:>10s} | {:>15d} | ".format("*", bucketStr[int(root_bucket.scrb_bucket)], "*", "*", addressof(root_bucket), "*", "*", root_bucket_deadline) | |
ea3f0419 | 190 | clutch_bucket_runq = root_bucket.scrb_clutch_buckets |
cb323159 | 191 | clutch_bucket_list = [] |
ea3f0419 A |
192 | for pri in range(0,128): |
193 | clutch_bucket_circleq = clutch_bucket_runq.scbrq_queues[pri] | |
194 | for clutch_bucket in IterateCircleQueue(clutch_bucket_circleq, 'struct sched_clutch_bucket', 'scb_runqlink'): | |
195 | clutch_bucket_list.append(clutch_bucket) | |
cb323159 A |
196 | if len(clutch_bucket_list) > 0: |
197 | clutch_bucket_list.sort(key=lambda x: x.scb_priority, reverse=True) | |
198 | for clutch_bucket in clutch_bucket_list: | |
f427ee49 A |
199 | print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | {:<18s} | {:>10s} | {:>10s} | {:>15s} | ".format("", "", "", "", "", "", "", "") |
200 | print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10d} | {:>10d} | {:>15s} | ".format("*", "*", clutch_bucket.scb_group.scbg_clutch.sc_tg.tg_name, "*", clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count, "*") + GetSchedClutchBucketSummary(clutch_bucket) | |
201 | runq = clutch_bucket.scb_clutchpri_prioq | |
202 | for thread in IterateSchedPriorityQueue(runq, 'struct thread', 'th_clutch_pri_link'): | |
203 | thread_name = GetThreadName(thread)[-24:] | |
204 | if len(thread_name) == 0: | |
205 | thread_name = "<unnamed thread>" | |
206 | print "{:>10s} | {:>20s} | {:>30s} | {:<25s} | 0x{:<16x} | {:>10d} | {:>10s} | {:>15s} | ".format("*", "*", "*", thread_name, thread, thread.base_pri, "*", "*") | |
207 | print "-" * 300 | |
208 | root_bucket = root_clutch.scr_bound_buckets[i] | |
209 | root_bucket_deadline = 0 | |
210 | if root_bucket.scrb_bound_thread_runq.count != 0: | |
211 | root_bucket_deadline = kern.GetNanotimeFromAbstime(root_bucket.scrb_pqlink.deadline - GetRecentTimestamp()) / 1000 | |
212 | print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10s} | {:>10d} | {:>15d} | ".format("*", bucketStr[int(root_bucket.scrb_bucket)] + " [Bound]", "*", "*", addressof(root_bucket), "*", root_bucket.scrb_bound_thread_runq.count, root_bucket_deadline) | |
213 | if root_bucket.scrb_bound_thread_runq.count == 0: | |
214 | print "-" * 300 | |
215 | continue | |
216 | thread_runq = root_bucket.scrb_bound_thread_runq | |
217 | for pri in range(0, 128): | |
218 | thread_circleq = thread_runq.queues[pri] | |
219 | for thread in IterateCircleQueue(thread_circleq, 'struct thread', 'runq_links'): | |
220 | thread_name = GetThreadName(thread)[-24:] | |
221 | if len(thread_name) == 0: | |
222 | thread_name = "<unnamed thread>" | |
223 | print "{:>10s} | {:>20s} | {:>30s} | {:<25s} | 0x{:<16x} | {:>10d} | {:>10s} | {:>15s} | ".format("*", "*", "*", thread_name, thread, thread.base_pri, "*", "*") | |
cb323159 A |
224 | print "-" * 300 |
225 | ||
226 | @lldb_command('showschedclutch') | |
227 | def ShowSchedClutch(cmd_args=[]): | |
228 | """ Routine to print the clutch scheduler hierarchy. | |
229 | Usage: showschedclutch <pset> | |
230 | """ | |
231 | if not cmd_args: | |
232 | raise ArgumentError("Invalid argument") | |
233 | pset = kern.GetValueFromAddress(cmd_args[0], "processor_set_t") | |
234 | ShowSchedClutchForPset(pset) | |
235 | ||
236 | @lldb_command('showschedclutchroot') | |
237 | def ShowSchedClutchRoot(cmd_args=[]): | |
238 | """ show information about the root of the sched clutch hierarchy | |
239 | Usage: showschedclutchroot <root> | |
240 | """ | |
241 | if not cmd_args: | |
242 | raise ArgumentError("Invalid argument") | |
243 | root = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_root *") | |
244 | if not root: | |
245 | print "unknown arguments:", str(cmd_args) | |
246 | return False | |
f427ee49 A |
247 | print "{:>30s} : 0x{:<16x}".format("Root", root) |
248 | print "{:>30s} : 0x{:<16x}".format("Pset", root.scr_pset) | |
249 | print "{:>30s} : {:d}".format("Priority", (root.scr_priority if root.scr_thr_count > 0 else -1)) | |
cb323159 A |
250 | print "{:>30s} : {:d}".format("Urgency", root.scr_urgency) |
251 | print "{:>30s} : {:d}".format("Threads", root.scr_thr_count) | |
252 | print "{:>30s} : {:d}".format("Current Timestamp", GetRecentTimestamp()) | |
253 | print "{:>30s} : {:b} (BG/UT/DF/IN/FG/FIX/NULL)".format("Runnable Root Buckets Bitmap", int(root.scr_runnable_bitmap[0])) | |
254 | ||
255 | @lldb_command('showschedclutchrootbucket') | |
256 | def ShowSchedClutchRootBucket(cmd_args=[]): | |
257 | """ show information about a root bucket in the sched clutch hierarchy | |
258 | Usage: showschedclutchrootbucket <root_bucket> | |
259 | """ | |
260 | if not cmd_args: | |
261 | raise ArgumentError("Invalid argument") | |
262 | root_bucket = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_root_bucket *") | |
263 | if not root_bucket: | |
264 | print "unknown arguments:", str(cmd_args) | |
265 | return False | |
f427ee49 | 266 | print "{:<30s} : 0x{:<16x}".format("Root Bucket", root_bucket) |
cb323159 | 267 | print "{:<30s} : {:s}".format("Bucket Name", bucketStr[int(root_bucket.scrb_bucket)]) |
f427ee49 | 268 | print "{:<30s} : {:d}".format("Deadline", (root_bucket.scrb_pqlink.deadline if root_bucket.scrb_clutch_buckets.scbrq_count != 0 else 0)) |
cb323159 A |
269 | print "{:<30s} : {:d}".format("Current Timestamp", GetRecentTimestamp()) |
270 | print "\n" | |
ea3f0419 | 271 | clutch_bucket_runq = root_bucket.scrb_clutch_buckets |
cb323159 | 272 | clutch_bucket_list = [] |
ea3f0419 A |
273 | for pri in range(0,128): |
274 | clutch_bucket_circleq = clutch_bucket_runq.scbrq_queues[pri] | |
275 | for clutch_bucket in IterateCircleQueue(clutch_bucket_circleq, 'struct sched_clutch_bucket', 'scb_runqlink'): | |
276 | clutch_bucket_list.append(clutch_bucket) | |
cb323159 A |
277 | if len(clutch_bucket_list) > 0: |
278 | print "=" * 240 | |
279 | print "{:>30s} | {:>18s} | {:>20s} | {:>20s} | ".format("Name", "Clutch Bucket", "Priority", "Count") + GetSchedClutchBucketSummary.header | |
280 | print "=" * 240 | |
281 | clutch_bucket_list.sort(key=lambda x: x.scb_priority, reverse=True) | |
282 | for clutch_bucket in clutch_bucket_list: | |
f427ee49 | 283 | print "{:>30s} | 0x{:<16x} | {:>20d} | {:>20d} | ".format(clutch_bucket.scb_group.scbg_clutch.sc_tg.tg_name, clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count) + GetSchedClutchBucketSummary(clutch_bucket) |
cb323159 | 284 | |
f427ee49 A |
285 | def SchedClutchBucketDetails(clutch_bucket): |
286 | print "{:<30s} : 0x{:<16x}".format("Clutch Bucket", clutch_bucket) | |
287 | print "{:<30s} : {:s}".format("Scheduling Bucket", bucketStr[(int)(clutch_bucket.scb_bucket)]) | |
288 | print "{:<30s} : 0x{:<16x}".format("Clutch Bucket Group", clutch_bucket.scb_group) | |
289 | print "{:<30s} : {:s}".format("TG Name", clutch_bucket.scb_group.scbg_clutch.sc_tg.tg_name) | |
cb323159 A |
290 | print "{:<30s} : {:d}".format("Priority", clutch_bucket.scb_priority) |
291 | print "{:<30s} : {:d}".format("Thread Count", clutch_bucket.scb_thr_count) | |
f427ee49 A |
292 | print "{:<30s} : 0x{:<16x}".format("Thread Group", clutch_bucket.scb_group.scbg_clutch.sc_tg) |
293 | print "{:<30s} : {:6d} (inherited from clutch bucket group)".format("Interactivity Score", clutch_bucket.scb_group.scbg_interactivity_data.scct_count) | |
294 | print "{:<30s} : {:6d} (inherited from clutch bucket group)".format("Last Timeshare Update Tick", clutch_bucket.scb_group.scbg_timeshare_tick) | |
295 | print "{:<30s} : {:6d} (inherited from clutch bucket group)".format("Priority Shift", clutch_bucket.scb_group.scbg_pri_shift) | |
cb323159 A |
296 | print "\n" |
297 | runq = clutch_bucket.scb_clutchpri_prioq | |
298 | thread_list = [] | |
f427ee49 | 299 | for thread in IterateSchedPriorityQueue(runq, 'struct thread', 'th_clutch_pri_link'): |
cb323159 A |
300 | thread_list.append(thread) |
301 | if len(thread_list) > 0: | |
302 | print "=" * 240 | |
303 | print GetThreadSummary.header + "{:s}".format("Process Name") | |
304 | print "=" * 240 | |
305 | for thread in thread_list: | |
306 | proc = Cast(thread.task.bsd_info, 'proc *') | |
f427ee49 A |
307 | print GetThreadSummary(thread) + "{:s}".format(GetProcName(proc)) |
308 | ||
309 | @lldb_command('showschedclutchbucket') | |
310 | def ShowSchedClutchBucket(cmd_args=[]): | |
311 | """ show information about a clutch bucket in the sched clutch hierarchy | |
312 | Usage: showschedclutchbucket <clutch_bucket> | |
313 | """ | |
314 | if not cmd_args: | |
315 | raise ArgumentError("Invalid argument") | |
316 | clutch_bucket = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_bucket *") | |
317 | if not clutch_bucket: | |
318 | print "unknown arguments:", str(cmd_args) | |
319 | return False | |
320 | SchedClutchBucketDetails(clutch_bucket) | |
3e170ce0 A |
321 | |
322 | @lldb_command('abs2nano') | |
323 | def ShowAbstimeToNanoTime(cmd_args=[]): | |
324 | """ convert mach_absolute_time units to nano seconds | |
325 | Usage: (lldb) abs2nano <timestamp in MATUs> | |
326 | """ | |
327 | if not cmd_args: | |
328 | raise ArgumentError("Invalid argument") | |
329 | timedata = ArgumentStringToInt(cmd_args[0]) | |
39037602 A |
330 | ns = kern.GetNanotimeFromAbstime(timedata) |
331 | us = float(ns) / 1000 | |
332 | ms = us / 1000 | |
333 | s = ms / 1000 | |
334 | ||
335 | if s > 60 : | |
336 | m = s / 60 | |
337 | h = m / 60 | |
338 | d = h / 24 | |
339 | ||
340 | print "{:d} ns, {:f} us, {:f} ms, {:f} s, {:f} m, {:f} h, {:f} d".format(ns, us, ms, s, m, h, d) | |
341 | else: | |
342 | print "{:d} ns, {:f} us, {:f} ms, {:f} s".format(ns, us, ms, s) | |
3e170ce0 A |
343 | |
344 | # Macro: showschedhistory | |
345 | ||
5ba3f43e A |
346 | def GetRecentTimestamp(): |
347 | """ | |
348 | Return a recent timestamp. | |
349 | TODO: on x86, if not in the debugger, then look at the scheduler | |
350 | """ | |
351 | if kern.arch == 'x86_64': | |
f427ee49 A |
352 | most_recent_dispatch = GetSchedMostRecentDispatch(False) |
353 | if most_recent_dispatch > kern.globals.debugger_entry_time : | |
354 | return most_recent_dispatch | |
355 | else : | |
356 | return kern.globals.debugger_entry_time | |
5ba3f43e A |
357 | else : |
358 | return GetSchedMostRecentDispatch(False) | |
359 | ||
39037602 A |
360 | def GetSchedMostRecentDispatch(show_processor_details=False): |
361 | """ Return the most recent dispatch on the system, printing processor | |
362 | details if argument is true. | |
363 | """ | |
364 | processor_list = kern.globals.processor_list | |
365 | ||
366 | most_recent_dispatch = 0 | |
367 | current_processor = processor_list | |
368 | ||
369 | while unsigned(current_processor) > 0: | |
370 | active_thread = current_processor.active_thread | |
371 | if unsigned(active_thread) != 0 : | |
372 | task_val = active_thread.task | |
373 | proc_val = Cast(task_val.bsd_info, 'proc *') | |
f427ee49 | 374 | proc_name = "<unknown>" if unsigned(proc_val) == 0 else GetProcName(proc_val) |
39037602 A |
375 | |
376 | last_dispatch = unsigned(current_processor.last_dispatch) | |
377 | ||
378 | if kern.arch == 'x86_64': | |
379 | cpu_data = kern.globals.cpu_data_ptr[current_processor.cpu_id] | |
380 | if (cpu_data != 0) : | |
381 | cpu_debugger_time = max(cpu_data.debugger_entry_time, cpu_data.debugger_ipi_time) | |
382 | time_since_dispatch = unsigned(cpu_debugger_time - last_dispatch) | |
383 | time_since_dispatch_us = kern.GetNanotimeFromAbstime(time_since_dispatch) / 1000.0 | |
384 | time_since_debugger = unsigned(cpu_debugger_time - kern.globals.debugger_entry_time) | |
385 | time_since_debugger_us = kern.GetNanotimeFromAbstime(time_since_debugger) / 1000.0 | |
386 | ||
387 | if show_processor_details: | |
388 | print "Processor last dispatch: {:16d} Entered debugger: {:16d} ({:8.3f} us after dispatch, {:8.3f} us after debugger) Active thread: 0x{t:<16x} 0x{t.thread_id:<8x} {proc_name:s}".format(last_dispatch, cpu_debugger_time, | |
389 | time_since_dispatch_us, time_since_debugger_us, t=active_thread, proc_name=proc_name) | |
390 | else: | |
391 | if show_processor_details: | |
392 | print "Processor last dispatch: {:16d} Active thread: 0x{t:<16x} 0x{t.thread_id:<8x} {proc_name:s}".format(last_dispatch, t=active_thread, proc_name=proc_name) | |
393 | ||
394 | if last_dispatch > most_recent_dispatch: | |
395 | most_recent_dispatch = last_dispatch | |
396 | ||
397 | current_processor = current_processor.processor_list | |
398 | ||
399 | return most_recent_dispatch | |
400 | ||
5ba3f43e | 401 | @header("{:<18s} {:<10s} {:>16s} {:>16s} {:>16s} {:>16s} {:>18s} {:>16s} {:>16s} {:>16s} {:>16s} {:2s} {:2s} {:2s} {:>2s} {:<19s} {:<9s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>11s} {:>8s}".format("thread", "id", "on-core", "off-core", "runnable", "prichange", "last-duration (us)", "since-off (us)", "since-on (us)", "pending (us)", "pri-change (us)", "BP", "SP", "TP", "MP", "sched-mode", "state", "cpu-usage", "delta", "sch-usage", "stamp", "shift", "task", "thread-name")) |
3e170ce0 | 402 | def ShowThreadSchedHistory(thread, most_recent_dispatch): |
39037602 A |
403 | """ Given a thread and the most recent dispatch time of a thread on the |
404 | system, print out details about scheduler history for the thread. | |
405 | """ | |
406 | ||
3e170ce0 A |
407 | thread_name = "" |
408 | ||
39037602 | 409 | if unsigned(thread.uthread) != 0: |
3e170ce0 | 410 | uthread = Cast(thread.uthread, 'uthread *') |
39037602 A |
411 | # Doing the straightforward thing blows up weirdly, so use some indirections to get back on track |
412 | if unsigned(uthread.pth_name) != 0 : | |
413 | thread_name = str(kern.GetValueFromAddress(unsigned(uthread.pth_name), 'char*')) | |
3e170ce0 A |
414 | |
415 | task = thread.task | |
416 | task_name = "unknown" | |
417 | if task and unsigned(task.bsd_info): | |
418 | p = Cast(task.bsd_info, 'proc *') | |
f427ee49 | 419 | task_name = GetProcName(p) |
3e170ce0 A |
420 | |
421 | sched_mode = "" | |
422 | ||
423 | mode = str(thread.sched_mode) | |
424 | if "TIMESHARE" in mode: | |
425 | sched_mode+="timeshare" | |
426 | elif "FIXED" in mode: | |
427 | sched_mode+="fixed" | |
428 | elif "REALTIME" in mode: | |
429 | sched_mode+="realtime" | |
430 | ||
431 | if (unsigned(thread.bound_processor) != 0): | |
432 | sched_mode+="-bound" | |
433 | ||
434 | # TH_SFLAG_THROTTLED | |
435 | if (unsigned(thread.sched_flags) & 0x0004): | |
436 | sched_mode+="-BG" | |
437 | ||
438 | state = thread.state | |
439 | ||
440 | thread_state_chars = {0x0:'', 0x1:'W', 0x2:'S', 0x4:'R', 0x8:'U', 0x10:'H', 0x20:'A', 0x40:'P', 0x80:'I'} | |
441 | state_str = '' | |
442 | mask = 0x1 | |
443 | while mask <= 0x80 : | |
444 | state_str += thread_state_chars[int(state & mask)] | |
445 | mask = mask << 1 | |
446 | ||
447 | last_on = thread.computation_epoch | |
448 | last_off = thread.last_run_time | |
39037602 | 449 | last_runnable = thread.last_made_runnable_time |
5ba3f43e A |
450 | last_prichange = thread.last_basepri_change_time |
451 | ||
39037602 A |
452 | if int(last_runnable) == 18446744073709551615 : |
453 | last_runnable = 0 | |
3e170ce0 | 454 | |
5ba3f43e A |
455 | if int(last_prichange) == 18446744073709551615 : |
456 | last_prichange = 0 | |
457 | ||
3e170ce0 A |
458 | time_on_abs = unsigned(last_off - last_on) |
459 | time_on_us = kern.GetNanotimeFromAbstime(time_on_abs) / 1000.0 | |
460 | ||
39037602 A |
461 | time_pending_abs = unsigned(most_recent_dispatch - last_runnable) |
462 | time_pending_us = kern.GetNanotimeFromAbstime(time_pending_abs) / 1000.0 | |
5ba3f43e | 463 | |
39037602 A |
464 | if int(last_runnable) == 0 : |
465 | time_pending_us = 0 | |
466 | ||
5ba3f43e A |
467 | last_prichange_abs = unsigned(most_recent_dispatch - last_prichange) |
468 | last_prichange_us = kern.GetNanotimeFromAbstime(last_prichange_abs) / 1000.0 | |
469 | ||
470 | if int(last_prichange) == 0 : | |
471 | last_prichange_us = 0 | |
472 | ||
3e170ce0 A |
473 | time_since_off_abs = unsigned(most_recent_dispatch - last_off) |
474 | time_since_off_us = kern.GetNanotimeFromAbstime(time_since_off_abs) / 1000.0 | |
475 | time_since_on_abs = unsigned(most_recent_dispatch - last_on) | |
476 | time_since_on_us = kern.GetNanotimeFromAbstime(time_since_on_abs) / 1000.0 | |
477 | ||
5ba3f43e | 478 | fmt = "0x{t:<16x} 0x{t.thread_id:<8x} {t.computation_epoch:16d} {t.last_run_time:16d} {last_runnable:16d} {last_prichange:16d} {time_on_us:18.3f} {time_since_off_us:16.3f} {time_since_on_us:16.3f} {time_pending_us:16.3f} {last_prichange_us:16.3f}" |
3e170ce0 A |
479 | fmt2 = " {t.base_pri:2d} {t.sched_pri:2d} {t.task_priority:2d} {t.max_priority:2d} {sched_mode:19s}" |
480 | fmt3 = " {state:9s} {t.cpu_usage:10d} {t.cpu_delta:10d} {t.sched_usage:10d} {t.sched_stamp:10d} {t.pri_shift:10d} {name:s} {thread_name:s}" | |
481 | ||
5ba3f43e | 482 | out_str = fmt.format(t=thread, time_on_us=time_on_us, time_since_off_us=time_since_off_us, time_since_on_us=time_since_on_us, last_runnable=last_runnable, time_pending_us=time_pending_us, last_prichange=last_prichange, last_prichange_us=last_prichange_us) |
3e170ce0 A |
483 | out_str += fmt2.format(t=thread, sched_mode=sched_mode) |
484 | out_str += fmt3.format(t=thread, state=state_str, name=task_name, thread_name=thread_name) | |
5ba3f43e | 485 | |
39037602 | 486 | print out_str |
3e170ce0 | 487 | |
5ba3f43e A |
488 | def SortThreads(threads, column): |
489 | if column != 'on-core' and column != 'off-core' and column != 'last-duration': | |
490 | raise ArgumentError("unsupported sort column") | |
491 | if column == 'on-core': | |
492 | threads.sort(key=lambda t: t.computation_epoch) | |
493 | elif column == 'off-core': | |
494 | threads.sort(key=lambda t: t.last_run_time) | |
495 | else: | |
496 | threads.sort(key=lambda t: t.last_run_time - t.computation_epoch) | |
497 | ||
498 | @lldb_command('showschedhistory', 'S:') | |
499 | def ShowSchedHistory(cmd_args=None, cmd_options=None): | |
500 | """ Routine to print out thread scheduling history, optionally sorted by a | |
501 | column. | |
502 | ||
503 | Usage: showschedhistory [-S on-core|off-core|last-duration] [<thread-ptr> ...] | |
3e170ce0 A |
504 | """ |
505 | ||
5ba3f43e A |
506 | sort_column = None |
507 | if '-S' in cmd_options: | |
508 | sort_column = cmd_options['-S'] | |
509 | ||
39037602 A |
510 | if cmd_args: |
511 | most_recent_dispatch = GetSchedMostRecentDispatch(False) | |
3e170ce0 | 512 | |
39037602 | 513 | print ShowThreadSchedHistory.header |
5ba3f43e A |
514 | |
515 | if sort_column: | |
516 | threads = [] | |
517 | for thread_ptr in cmd_args: | |
518 | threads.append(kern.GetValueFromAddress(ArgumentStringToInt(thread_ptr), 'thread *')) | |
519 | ||
520 | SortThreads(threads, sort_column) | |
521 | ||
522 | for thread in threads: | |
523 | ShowThreadSchedHistory(thread, most_recent_dispatch) | |
524 | else: | |
525 | for thread_ptr in cmd_args: | |
526 | thread = kern.GetValueFromAddress(ArgumentStringToInt(thread_ptr), 'thread *') | |
527 | ShowThreadSchedHistory(thread, most_recent_dispatch) | |
3e170ce0 | 528 | |
39037602 | 529 | return |
5ba3f43e | 530 | |
39037602 | 531 | run_buckets = kern.globals.sched_run_buckets |
3e170ce0 | 532 | |
39037602 A |
533 | run_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')] |
534 | fixpri_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')] | |
535 | share_fg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')] | |
d9a64523 | 536 | share_df_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')] |
39037602 A |
537 | share_ut_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')] |
538 | share_bg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')] | |
3e170ce0 | 539 | |
39037602 | 540 | sched_pri_shifts = kern.globals.sched_run_buckets |
3e170ce0 | 541 | |
39037602 | 542 | share_fg_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')] |
d9a64523 | 543 | share_df_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')] |
39037602 A |
544 | share_ut_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')] |
545 | share_bg_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')] | |
3e170ce0 | 546 | |
3e170ce0 | 547 | |
39037602 | 548 | print "Processors: {g.processor_avail_count:d} Runnable threads: {:d} Fixpri threads: {:d}\n".format(run_count, fixpri_count, g=kern.globals) |
d9a64523 | 549 | print "FG Timeshare threads: {:d} DF Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_df_count, share_ut_count, share_bg_count) |
39037602 | 550 | print "Mach factor: {g.sched_mach_factor:d} Load factor: {g.sched_load_average:d} Sched tick: {g.sched_tick:d} timestamp: {g.sched_tick_last_abstime:d} interval:{g.sched_tick_interval:d}\n".format(g=kern.globals) |
d9a64523 | 551 | print "Fixed shift: {g.sched_fixed_shift:d} FG shift: {:d} DF shift: {:d} UT shift: {:d} BG shift: {:d}\n".format(share_fg_shift, share_df_shift, share_ut_shift, share_bg_shift, g=kern.globals) |
39037602 | 552 | print "sched_pri_decay_band_limit: {g.sched_pri_decay_band_limit:d} sched_decay_usage_age_factor: {g.sched_decay_usage_age_factor:d}\n".format(g=kern.globals) |
3e170ce0 | 553 | |
39037602 A |
554 | if kern.arch == 'x86_64': |
555 | print "debugger_entry_time: {g.debugger_entry_time:d}\n".format(g=kern.globals) | |
3e170ce0 | 556 | |
39037602 | 557 | most_recent_dispatch = GetSchedMostRecentDispatch(True) |
3e170ce0 A |
558 | print "Most recent dispatch: " + str(most_recent_dispatch) |
559 | ||
39037602 | 560 | print ShowThreadSchedHistory.header |
5ba3f43e A |
561 | |
562 | if sort_column: | |
563 | threads = [t for t in IterateQueue(kern.globals.threads, 'thread *', 'threads')] | |
564 | ||
565 | SortThreads(threads, sort_column) | |
566 | ||
567 | for thread in threads: | |
568 | ShowThreadSchedHistory(thread, most_recent_dispatch) | |
569 | else: | |
570 | for thread in IterateQueue(kern.globals.threads, 'thread *', 'threads'): | |
571 | ShowThreadSchedHistory(thread, most_recent_dispatch) | |
3e170ce0 | 572 | |
3e170ce0 A |
573 | |
574 | # EndMacro: showschedhistory | |
575 | ||
5ba3f43e A |
576 | def int32(n): |
577 | n = n & 0xffffffff | |
578 | return (n ^ 0x80000000) - 0x80000000 | |
39037602 A |
579 | |
580 | # Macro: showallprocessors | |
581 | ||
582 | def ShowGroupSetSummary(runq, task_map): | |
583 | """ Internal function to print summary of group run queue | |
584 | params: runq - value representing struct run_queue * | |
585 | """ | |
5ba3f43e A |
586 | |
587 | print " runq: count {: <10d} highq: {: <10d} urgency {: <10d}\n".format(runq.count, int32(runq.highq), runq.urgency) | |
39037602 A |
588 | |
589 | runq_queue_i = 0 | |
590 | runq_queue_count = sizeof(runq.queues)/sizeof(runq.queues[0]) | |
591 | ||
592 | for runq_queue_i in xrange(runq_queue_count) : | |
593 | runq_queue_head = addressof(runq.queues[runq_queue_i]) | |
594 | runq_queue_p = runq_queue_head.next | |
595 | ||
596 | if unsigned(runq_queue_p) != unsigned(runq_queue_head): | |
597 | runq_queue_this_count = 0 | |
598 | ||
cb323159 | 599 | for entry in ParanoidIterateLinkageChain(runq_queue_head, "sched_entry_t", "entry_links", circleQueue=True): |
39037602 A |
600 | runq_queue_this_count += 1 |
601 | ||
602 | print " Queue [{: <#012x}] Priority {: <3d} count {:d}\n".format(runq_queue_head, runq_queue_i, runq_queue_this_count) | |
cb323159 | 603 | for entry in ParanoidIterateLinkageChain(runq_queue_head, "sched_entry_t", "entry_links", circleQueue=True): |
39037602 A |
604 | group_addr = unsigned(entry) - (sizeof(dereference(entry)) * unsigned(entry.sched_pri)) |
605 | group = kern.GetValueFromAddress(unsigned(group_addr), 'sched_group_t') | |
606 | task = task_map.get(unsigned(group), 0x0) | |
607 | if task == 0x0 : | |
608 | print "Cannot find task for group: {: <#012x}".format(group) | |
609 | print "\tEntry [{: <#012x}] Priority {: <3d} Group {: <#012x} Task {: <#012x}\n".format(unsigned(entry), entry.sched_pri, unsigned(group), unsigned(task)) | |
610 | ||
611 | @lldb_command('showrunq') | |
612 | def ShowRunq(cmd_args=None): | |
613 | """ Routine to print information of a runq | |
614 | Usage: showrunq <runq> | |
615 | """ | |
616 | ||
617 | if not cmd_args: | |
618 | print "No arguments passed" | |
619 | print ShowRunq.__doc__ | |
620 | return False | |
621 | ||
622 | runq = kern.GetValueFromAddress(cmd_args[0], 'struct run_queue *') | |
623 | ShowRunQSummary(runq) | |
624 | ||
625 | def ShowRunQSummary(runq): | |
626 | """ Internal function to print summary of run_queue | |
627 | params: runq - value representing struct run_queue * | |
628 | """ | |
5ba3f43e A |
629 | |
630 | print " runq: count {: <10d} highq: {: <10d} urgency {: <10d}\n".format(runq.count, int32(runq.highq), runq.urgency) | |
39037602 A |
631 | |
632 | runq_queue_i = 0 | |
633 | runq_queue_count = sizeof(runq.queues)/sizeof(runq.queues[0]) | |
634 | ||
635 | for runq_queue_i in xrange(runq_queue_count) : | |
636 | runq_queue_head = addressof(runq.queues[runq_queue_i]) | |
cb323159 | 637 | runq_queue_p = runq_queue_head.head |
39037602 | 638 | |
cb323159 | 639 | if unsigned(runq_queue_p): |
39037602 A |
640 | runq_queue_this_count = 0 |
641 | ||
cb323159 | 642 | for thread in ParanoidIterateLinkageChain(runq_queue_head, "thread_t", "runq_links", circleQueue=True): |
39037602 A |
643 | runq_queue_this_count += 1 |
644 | ||
645 | print " Queue [{: <#012x}] Priority {: <3d} count {:d}\n".format(runq_queue_head, runq_queue_i, runq_queue_this_count) | |
646 | print "\t" + GetThreadSummary.header + "\n" | |
cb323159 | 647 | for thread in ParanoidIterateLinkageChain(runq_queue_head, "thread_t", "runq_links", circleQueue=True): |
39037602 A |
648 | print "\t" + GetThreadSummary(thread) + "\n" |
649 | if config['verbosity'] > vHUMAN : | |
650 | print "\t" + GetThreadBackTrace(thread, prefix="\t\t") + "\n" | |
651 | ||
5ba3f43e | 652 | def ShowRTRunQSummary(rt_runq): |
0a7de745 A |
653 | if (hex(rt_runq.count) == hex(0xfdfdfdfd)) : |
654 | print " Realtime Queue ({:<#012x}) uninitialized\n".format(addressof(rt_runq.queue)) | |
655 | return | |
5ba3f43e A |
656 | print " Realtime Queue ({:<#012x}) Count {:d}\n".format(addressof(rt_runq.queue), rt_runq.count) |
657 | if rt_runq.count != 0: | |
658 | print "\t" + GetThreadSummary.header + "\n" | |
cb323159 | 659 | for rt_runq_thread in ParanoidIterateLinkageChain(rt_runq.queue, "thread_t", "runq_links", circleQueue=True): |
5ba3f43e | 660 | print "\t" + GetThreadSummary(rt_runq_thread) + "\n" |
39037602 A |
661 | |
662 | def ShowGrrrSummary(grrr_runq): | |
663 | """ Internal function to print summary of grrr_run_queue | |
664 | params: grrr_runq - value representing struct grrr_run_queue * | |
665 | """ | |
666 | print " GRRR Info: Count {: <10d} Weight {: <10d} Current Group {: <#012x}\n".format(grrr_runq.count, | |
667 | grrr_runq.weight, grrr_runq.current_group) | |
668 | grrr_group_i = 0 | |
669 | grrr_group_count = sizeof(grrr_runq.groups)/sizeof(grrr_runq.groups[0]) | |
670 | for grrr_group_i in xrange(grrr_group_count) : | |
671 | grrr_group = addressof(grrr_runq.groups[grrr_group_i]) | |
672 | if grrr_group.count > 0: | |
673 | print " Group {: <3d} [{: <#012x}] ".format(grrr_group.index, grrr_group) | |
674 | print "Count {:d} Weight {:d}\n".format(grrr_group.count, grrr_group.weight) | |
675 | grrr_group_client_head = addressof(grrr_group.clients) | |
676 | print GetThreadSummary.header | |
cb323159 | 677 | for thread in ParanoidIterateLinkageChain(grrr_group_client_head, "thread_t", "runq_links", circleQueue=True): |
39037602 A |
678 | print "\t" + GetThreadSummary(thread) + "\n" |
679 | if config['verbosity'] > vHUMAN : | |
680 | print "\t" + GetThreadBackTrace(thread, prefix="\t\t") + "\n" | |
681 | ||
39037602 A |
682 | def ShowActiveThread(processor): |
683 | if (processor.active_thread != 0) : | |
684 | print "\t" + GetThreadSummary.header + "\n" | |
685 | print "\t" + GetThreadSummary(processor.active_thread) + "\n" | |
686 | ||
687 | @lldb_command('showallprocessors') | |
688 | @lldb_command('showscheduler') | |
689 | def ShowScheduler(cmd_args=None): | |
690 | """ Routine to print information of all psets and processors | |
691 | Usage: showscheduler | |
692 | """ | |
5ba3f43e | 693 | node = addressof(kern.globals.pset_node0) |
39037602 A |
694 | show_grrr = 0 |
695 | show_priority_runq = 0 | |
696 | show_priority_pset_runq = 0 | |
697 | show_group_pset_runq = 0 | |
cb323159 | 698 | show_clutch = 0 |
f427ee49 | 699 | show_edge = 0 |
cb323159 | 700 | sched_string = str(kern.globals.sched_string) |
39037602 A |
701 | |
702 | if sched_string == "traditional": | |
703 | show_priority_runq = 1 | |
704 | elif sched_string == "traditional_with_pset_runqueue": | |
705 | show_priority_pset_runq = 1 | |
706 | elif sched_string == "grrr": | |
707 | show_grrr = 1 | |
708 | elif sched_string == "multiq": | |
709 | show_priority_runq = 1 | |
710 | show_group_pset_runq = 1 | |
711 | elif sched_string == "dualq": | |
712 | show_priority_pset_runq = 1 | |
713 | show_priority_runq = 1 | |
5ba3f43e A |
714 | elif sched_string == "amp": |
715 | show_priority_pset_runq = 1 | |
716 | show_priority_runq = 1 | |
cb323159 A |
717 | elif sched_string == "clutch": |
718 | show_clutch = 1 | |
f427ee49 A |
719 | elif sched_string == "edge": |
720 | show_edge = 1 | |
39037602 A |
721 | else : |
722 | print "Unknown sched_string {:s}".format(sched_string) | |
723 | ||
cb323159 A |
724 | print "Scheduler: {:s}\n".format(sched_string) |
725 | ||
f427ee49 | 726 | if show_clutch == 0 and show_edge == 0: |
cb323159 A |
727 | run_buckets = kern.globals.sched_run_buckets |
728 | run_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')] | |
729 | fixpri_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')] | |
730 | share_fg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_FG')] | |
731 | share_df_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')] | |
732 | share_ut_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_UT')] | |
733 | share_bg_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_BG')] | |
734 | print "Processors: {g.processor_avail_count:d} Runnable threads: {:d} Fixpri threads: {:d}\n".format(run_count, fixpri_count, g=kern.globals) | |
735 | print "FG Timeshare threads: {:d} DF Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_df_count, share_ut_count, share_bg_count) | |
736 | ||
737 | processor_offline = GetEnumValue('processor_state_t::PROCESSOR_OFF_LINE') | |
738 | processor_idle = GetEnumValue('processor_state_t::PROCESSOR_IDLE') | |
739 | processor_dispatching = GetEnumValue('processor_state_t::PROCESSOR_DISPATCHING') | |
740 | processor_running = GetEnumValue('processor_state_t::PROCESSOR_RUNNING') | |
39037602 A |
741 | |
742 | if show_group_pset_runq: | |
5ba3f43e A |
743 | if hasattr(kern.globals, "multiq_sanity_check"): |
744 | print "multiq scheduler config: deep-drain {g.deep_drain:d}, ceiling {g.drain_ceiling:d}, depth limit {g.drain_depth_limit:d}, band limit {g.drain_band_limit:d}, sanity check {g.multiq_sanity_check:d}\n".format(g=kern.globals) | |
745 | else: | |
746 | print "multiq scheduler config: deep-drain {g.deep_drain:d}, ceiling {g.drain_ceiling:d}, depth limit {g.drain_depth_limit:d}, band limit {g.drain_band_limit:d}\n".format(g=kern.globals) | |
39037602 A |
747 | |
748 | # Create a group->task mapping | |
749 | task_map = {} | |
750 | for task in kern.tasks: | |
751 | task_map[unsigned(task.sched_group)] = task | |
752 | for task in kern.terminated_tasks: | |
753 | task_map[unsigned(task.sched_group)] = task | |
754 | ||
755 | print " \n" | |
756 | ||
5ba3f43e A |
757 | while node != 0: |
758 | pset = node.psets | |
759 | pset = kern.GetValueFromAddress(unsigned(pset), 'struct processor_set *') | |
760 | ||
761 | while pset != 0: | |
762 | print "Processor Set {: <#012x} Count {:d} (cpu_id {:<#x}-{:<#x})\n".format(pset, | |
763 | unsigned(pset.cpu_set_count), pset.cpu_set_low, pset.cpu_set_hi) | |
764 | ||
765 | rt_runq = kern.GetValueFromAddress(unsigned(addressof(pset.rt_runq)), 'struct rt_queue *') | |
766 | ShowRTRunQSummary(rt_runq) | |
39037602 | 767 | |
5ba3f43e A |
768 | if show_priority_pset_runq: |
769 | runq = kern.GetValueFromAddress(unsigned(addressof(pset.pset_runq)), 'struct run_queue *') | |
39037602 | 770 | ShowRunQSummary(runq) |
39037602 | 771 | |
5ba3f43e A |
772 | if show_group_pset_runq: |
773 | print "Main Runq:\n" | |
774 | runq = kern.GetValueFromAddress(unsigned(addressof(pset.pset_runq)), 'struct run_queue *') | |
775 | ShowGroupSetSummary(runq, task_map) | |
776 | print "All Groups:\n" | |
777 | # TODO: Possibly output task header for each group | |
778 | for group in IterateQueue(kern.globals.sched_groups, "sched_group_t", "sched_groups"): | |
779 | if (group.runq.count != 0) : | |
780 | task = task_map.get(unsigned(group), "Unknown task!") | |
781 | print "Group {: <#012x} Task {: <#012x}\n".format(unsigned(group), unsigned(task)) | |
782 | ShowRunQSummary(group.runq) | |
783 | print " \n" | |
d9a64523 A |
784 | |
785 | processor_array = kern.globals.processor_array | |
39037602 | 786 | |
5ba3f43e | 787 | print "Active Processors:\n" |
cb323159 | 788 | active_bitmap = int(pset.cpu_state_map[processor_dispatching]) | int(pset.cpu_state_map[processor_running]) |
d9a64523 A |
789 | for cpuid in IterateBitmap(active_bitmap): |
790 | processor = processor_array[cpuid] | |
791 | if processor != 0: | |
792 | print " " + GetProcessorSummary(processor) | |
793 | ShowActiveThread(processor) | |
d9a64523 A |
794 | |
795 | if show_priority_runq: | |
796 | runq = processor.runq | |
797 | ShowRunQSummary(runq) | |
798 | if show_grrr: | |
799 | grrr_runq = processor.grrr_runq | |
800 | ShowGrrrSummary(grrr_runq) | |
5ba3f43e | 801 | print " \n" |
39037602 A |
802 | |
803 | ||
5ba3f43e | 804 | print "Idle Processors:\n" |
cb323159 | 805 | idle_bitmap = int(pset.cpu_state_map[processor_idle]) & int(pset.primary_map) |
d9a64523 A |
806 | for cpuid in IterateBitmap(idle_bitmap): |
807 | processor = processor_array[cpuid] | |
808 | if processor != 0: | |
809 | print " " + GetProcessorSummary(processor) | |
810 | ShowActiveThread(processor) | |
d9a64523 A |
811 | |
812 | if show_priority_runq: | |
813 | ShowRunQSummary(processor.runq) | |
5ba3f43e | 814 | print " \n" |
39037602 A |
815 | |
816 | ||
5ba3f43e | 817 | print "Idle Secondary Processors:\n" |
cb323159 | 818 | idle_bitmap = int(pset.cpu_state_map[processor_idle]) & ~(int(pset.primary_map)) |
d9a64523 A |
819 | for cpuid in IterateBitmap(idle_bitmap): |
820 | processor = processor_array[cpuid] | |
821 | if processor != 0: | |
822 | print " " + GetProcessorSummary(processor) | |
823 | ShowActiveThread(processor) | |
d9a64523 A |
824 | |
825 | if show_priority_runq: | |
826 | print ShowRunQSummary(processor.runq) | |
827 | print " \n" | |
39037602 | 828 | |
d9a64523 A |
829 | |
830 | print "Other Processors:\n" | |
831 | other_bitmap = 0 | |
cb323159 | 832 | for i in range(processor_offline, processor_idle): |
d9a64523 A |
833 | other_bitmap |= int(pset.cpu_state_map[i]) |
834 | other_bitmap &= int(pset.cpu_bitmask) | |
835 | for cpuid in IterateBitmap(other_bitmap): | |
836 | processor = processor_array[cpuid] | |
837 | if processor != 0: | |
838 | print " " + GetProcessorSummary(processor) | |
839 | ShowActiveThread(processor) | |
d9a64523 A |
840 | |
841 | if show_priority_runq: | |
842 | ShowRunQSummary(processor.runq) | |
5ba3f43e A |
843 | print " \n" |
844 | ||
f427ee49 A |
845 | if show_clutch or show_edge: |
846 | cluster_type = "SMP" | |
847 | if pset.pset_type == 1: | |
848 | cluster_type = "E" | |
849 | elif pset.pset_type == 2: | |
850 | cluster_type = "P" | |
851 | print "=== Clutch Scheduler Hierarchy Pset{:d} (Type: {:s}) ] ===\n\n".format(pset.pset_cluster_id, cluster_type) | |
cb323159 | 852 | ShowSchedClutchForPset(pset) |
5ba3f43e A |
853 | |
854 | pset = pset.pset_list | |
855 | ||
856 | node = node.node_list | |
39037602 | 857 | |
39037602 | 858 | print "\nCrashed Threads Queue: ({:<#012x})\n".format(addressof(kern.globals.crashed_threads_queue)) |
cb323159 | 859 | first = True |
39037602 A |
860 | for thread in ParanoidIterateLinkageChain(kern.globals.crashed_threads_queue, "thread_t", "runq_links"): |
861 | if first: | |
cb323159 A |
862 | print "\t" + GetThreadSummary.header |
863 | first = False | |
864 | print "\t" + GetThreadSummary(thread) | |
865 | ||
866 | def dump_mpsc_thread_queue(name, head): | |
867 | head = addressof(head) | |
868 | print "\n{:s}: ({:<#012x})\n".format(name, head) | |
869 | first = True | |
870 | for thread in IterateMPSCQueue(head.mpd_queue, 'struct thread', 'mpsc_links'): | |
871 | if first: | |
872 | print "\t" + GetThreadSummary.header | |
873 | first = False | |
874 | print "\t" + GetThreadSummary(thread) | |
875 | ||
876 | dump_mpsc_thread_queue("Terminate Queue", kern.globals.thread_terminate_queue) | |
877 | dump_mpsc_thread_queue("Waiting For Kernel Stacks Queue", kern.globals.thread_stack_queue) | |
878 | dump_mpsc_thread_queue("Thread Exception Queue", kern.globals.thread_exception_queue) | |
879 | dump_mpsc_thread_queue("Thread Deallocate Queue", kern.globals.thread_deallocate_queue) | |
39037602 A |
880 | |
881 | print "\n" | |
882 | ||
883 | print "\n" | |
884 | ||
885 | # EndMacro: showallprocessors | |
886 | ||
887 | ||
cb323159 A |
888 | def ParanoidIterateLinkageChain(queue_head, element_type, field_name, field_ofst=0, circleQueue=False): |
889 | """ Iterate over a Linkage Chain queue in kernel of type queue_head_t or circle_queue_head_t. (osfmk/kern/queue.h method 1 or circle_queue.h) | |
39037602 A |
890 | This is equivalent to the qe_foreach_element() macro |
891 | Blows up aggressively and descriptively when something goes wrong iterating a queue. | |
892 | Prints correctness errors, and throws exceptions on 'cannot proceed' errors | |
893 | If this is annoying, set the global 'enable_paranoia' to false. | |
894 | ||
895 | params: | |
896 | queue_head - value : Value object for queue_head. | |
897 | element_type - lldb.SBType : pointer type of the element which contains the queue_chain_t. Typically its structs like thread, task etc.. | |
898 | - str : OR a string describing the type. ex. 'task *' | |
899 | field_name - str : Name of the field (in element) which holds a queue_chain_t | |
900 | field_ofst - int : offset from the 'field_name' (in element) which holds a queue_chain_t | |
901 | This is mostly useful if a particular element contains an array of queue_chain_t | |
902 | returns: | |
903 | A generator does not return. It is used for iterating. | |
904 | value : An object thats of type (element_type). Always a pointer object | |
905 | example usage: | |
906 | for thread in IterateQueue(kern.globals.threads, 'thread *', 'threads'): | |
907 | print thread.thread_id | |
908 | """ | |
909 | ||
910 | if type(element_type) is str: | |
911 | element_type = gettype(element_type) | |
912 | ||
913 | # Some ways of constructing a queue head seem to end up with the | |
914 | # struct object as the value and not a pointer to the struct head | |
915 | # In that case, addressof will give us a pointer to the struct, which is what we need | |
916 | if not queue_head.GetSBValue().GetType().IsPointerType() : | |
917 | queue_head = addressof(queue_head) | |
918 | ||
cb323159 A |
919 | if circleQueue: |
920 | # Mosh the value into a brand new value, to really get rid of its old cvalue history | |
921 | queue_head = kern.GetValueFromAddress(unsigned(queue_head), 'struct circle_queue_head *').head | |
922 | else: | |
923 | # Mosh the value into a brand new value, to really get rid of its old cvalue history | |
924 | queue_head = kern.GetValueFromAddress(unsigned(queue_head), 'struct queue_entry *') | |
39037602 A |
925 | |
926 | if unsigned(queue_head) == 0: | |
cb323159 | 927 | if not circleQueue and ParanoidIterateLinkageChain.enable_paranoia: |
39037602 A |
928 | print "bad queue_head_t: {:s}".format(queue_head) |
929 | return | |
930 | ||
931 | if element_type.IsPointerType(): | |
5ba3f43e | 932 | struct_type = element_type.GetPointeeType() |
39037602 | 933 | else: |
5ba3f43e A |
934 | struct_type = element_type |
935 | ||
936 | elem_ofst = getfieldoffset(struct_type, field_name) + field_ofst | |
39037602 A |
937 | |
938 | try: | |
939 | link = queue_head.next | |
940 | last_link = queue_head | |
941 | try_read_next = unsigned(queue_head.next) | |
942 | except: | |
943 | print "Exception while looking at queue_head: {:>#18x}".format(unsigned(queue_head)) | |
944 | raise | |
945 | ||
946 | if ParanoidIterateLinkageChain.enable_paranoia: | |
947 | if unsigned(queue_head.next) == 0: | |
948 | raise ValueError("NULL next pointer on head: queue_head {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head, queue_head.next, queue_head.prev)) | |
949 | if unsigned(queue_head.prev) == 0: | |
950 | print "NULL prev pointer on head: queue_head {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head, queue_head.next, queue_head.prev) | |
951 | if unsigned(queue_head.next) == unsigned(queue_head) and unsigned(queue_head.prev) != unsigned(queue_head): | |
952 | print "corrupt queue_head {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head, queue_head.next, queue_head.prev) | |
953 | ||
954 | if ParanoidIterateLinkageChain.enable_debug : | |
955 | print "starting at queue_head {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head, queue_head.next, queue_head.prev) | |
956 | ||
957 | addr = 0 | |
958 | obj = 0 | |
959 | ||
960 | try: | |
cb323159 A |
961 | while True: |
962 | if not circleQueue and unsigned(queue_head) == unsigned(link): | |
963 | break; | |
39037602 A |
964 | if ParanoidIterateLinkageChain.enable_paranoia: |
965 | if unsigned(link.next) == 0: | |
966 | raise ValueError("NULL next pointer: queue_head {:>#18x} link: {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head, link, link.next, link.prev)) | |
967 | if unsigned(link.prev) == 0: | |
968 | print "NULL prev pointer: queue_head {:>#18x} link: {:>#18x} next: {:>#18x} prev: {:>#18x}".format(queue_head, link, link.next, link.prev) | |
969 | if unsigned(last_link) != unsigned(link.prev): | |
970 | print "Corrupt prev pointer: queue_head {:>#18x} link: {:>#18x} next: {:>#18x} prev: {:>#18x} prev link: {:>#18x} ".format( | |
971 | queue_head, link, link.next, link.prev, last_link) | |
972 | ||
973 | addr = unsigned(link) - unsigned(elem_ofst); | |
974 | obj = kern.GetValueFromAddress(addr, element_type) | |
975 | if ParanoidIterateLinkageChain.enable_debug : | |
976 | print "yielding link: {:>#18x} next: {:>#18x} prev: {:>#18x} addr: {:>#18x} obj: {:>#18x}".format(link, link.next, link.prev, addr, obj) | |
977 | yield obj | |
978 | last_link = link | |
979 | link = link.next | |
cb323159 A |
980 | if circleQueue and unsigned(queue_head) == unsigned(link): |
981 | break; | |
39037602 A |
982 | except: |
983 | exc_info = sys.exc_info() | |
984 | try: | |
985 | print "Exception while iterating queue: {:>#18x} link: {:>#18x} addr: {:>#18x} obj: {:>#18x} last link: {:>#18x}".format(queue_head, link, addr, obj, last_link) | |
986 | except: | |
987 | import traceback | |
988 | traceback.print_exc() | |
989 | raise exc_info[0], exc_info[1], exc_info[2] | |
990 | ||
991 | ParanoidIterateLinkageChain.enable_paranoia = True | |
992 | ParanoidIterateLinkageChain.enable_debug = False | |
993 | ||
f427ee49 A |
994 | def LinkageChainEmpty(queue_head): |
995 | if not queue_head.GetSBValue().GetType().IsPointerType() : | |
996 | queue_head = addressof(queue_head) | |
997 | ||
998 | # Mosh the value into a brand new value, to really get rid of its old cvalue history | |
999 | # avoid using GetValueFromAddress | |
1000 | queue_head = value(queue_head.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(unsigned(queue_head)))) | |
1001 | queue_head = cast(queue_head, 'struct queue_entry *') | |
1002 | ||
1003 | link = queue_head.next | |
1004 | ||
1005 | return unsigned(queue_head) == unsigned(link) | |
1006 | ||
d9a64523 A |
1007 | def bit_first(bitmap): |
1008 | return bitmap.bit_length() - 1 | |
1009 | ||
1010 | def lsb_first(bitmap): | |
1011 | bitmap = bitmap & -bitmap | |
1012 | return bit_first(bitmap) | |
1013 | ||
1014 | def IterateBitmap(bitmap): | |
1015 | """ Iterate over a bitmap, returning the index of set bits starting from 0 | |
1016 | ||
1017 | params: | |
1018 | bitmap - value : bitmap | |
1019 | returns: | |
1020 | A generator does not return. It is used for iterating. | |
1021 | value : index of a set bit | |
1022 | example usage: | |
1023 | for cpuid in IterateBitmap(running_bitmap): | |
1024 | print processor_array[cpuid] | |
1025 | """ | |
1026 | i = lsb_first(bitmap) | |
1027 | while (i >= 0): | |
1028 | yield i | |
1029 | bitmap = bitmap & ~((1 << (i + 1)) - 1) | |
1030 | i = lsb_first(bitmap) | |
1031 | ||
1032 | ||
39037602 | 1033 | # Macro: showallcallouts |
5ba3f43e | 1034 | |
f427ee49 A |
1035 | from kevent import GetKnoteKqueue |
1036 | ||
1037 | def ShowThreadCall(prefix, call, recent_timestamp, is_pending=False): | |
5ba3f43e A |
1038 | """ |
1039 | Print a description of a thread_call_t and its relationship to its expected fire time | |
1040 | """ | |
f427ee49 A |
1041 | func = call.tc_func |
1042 | param0 = call.tc_param0 | |
1043 | param1 = call.tc_param1 | |
5ba3f43e | 1044 | |
f427ee49 | 1045 | is_iotes = False |
5ba3f43e A |
1046 | |
1047 | func_name = kern.Symbolicate(func) | |
5ba3f43e | 1048 | |
f427ee49 | 1049 | extra_string = "" |
5ba3f43e | 1050 | |
f427ee49 A |
1051 | strip_func = kern.StripKernelPAC(unsigned(func)) |
1052 | ||
1053 | func_syms = kern.SymbolicateFromAddress(strip_func) | |
1054 | # returns an array of SBSymbol | |
1055 | ||
1056 | if func_syms and func_syms[0] : | |
1057 | func_name = func_syms[0].GetName() | |
1058 | ||
1059 | try : | |
1060 | if ("IOTimerEventSource::timeoutAndRelease" in func_name or | |
1061 | "IOTimerEventSource::timeoutSignaled" in func_name) : | |
1062 | iotes = Cast(call.tc_param0, 'IOTimerEventSource*') | |
1063 | try: | |
1064 | func = iotes.action | |
1065 | param0 = iotes.owner | |
1066 | param1 = unsigned(iotes) | |
1067 | except AttributeError: | |
1068 | # This is horrible, horrible, horrible. But it works. Needed because IOEventSource hides the action member in an | |
1069 | # anonymous union when XNU_PRIVATE_SOURCE is set. To grab it, we work backwards from the enabled member. | |
1070 | func = dereference(kern.GetValueFromAddress(addressof(iotes.enabled) - sizeof('IOEventSource::Action'), 'uint64_t *')) | |
1071 | param0 = iotes.owner | |
1072 | param1 = unsigned(iotes) | |
1073 | ||
1074 | workloop = iotes.workLoop | |
1075 | thread = workloop.workThread | |
1076 | ||
1077 | is_iotes = True | |
5ba3f43e | 1078 | |
f427ee49 A |
1079 | # re-symbolicate the func we found inside the IOTES |
1080 | strip_func = kern.StripKernelPAC(unsigned(func)) | |
1081 | func_syms = kern.SymbolicateFromAddress(strip_func) | |
1082 | if func_syms and func_syms[0] : | |
1083 | func_name = func_syms[0].GetName() | |
1084 | else : | |
1085 | func_name = str(FindKmodNameForAddr(func)) | |
1086 | ||
1087 | # cast from IOThread to thread_t, because IOThread is sometimes opaque | |
1088 | thread = Cast(thread, 'thread_t') | |
1089 | thread_id = thread.thread_id | |
1090 | thread_name = GetThreadName(thread) | |
1091 | ||
1092 | extra_string += "workloop thread: {:#x} ({:#x}) {:s}".format(thread, thread_id, thread_name) | |
1093 | ||
1094 | if "filt_timerexpire" in func_name : | |
1095 | knote = Cast(call.tc_param0, 'struct knote *') | |
1096 | kqueue = GetKnoteKqueue(knote) | |
1097 | proc = kqueue.kq_p | |
1098 | proc_name = GetProcName(proc) | |
1099 | proc_pid = proc.p_pid | |
1100 | ||
1101 | extra_string += "kq: {:#018x} {:s}[{:d}]".format(kqueue, proc_name, proc_pid) | |
1102 | ||
1103 | if "mk_timer_expire" in func_name : | |
1104 | timer = Cast(call.tc_param0, 'struct mk_timer *') | |
1105 | port = timer.port | |
1106 | ||
1107 | extra_string += "port: {:#018x} {:s}".format(port, GetPortDestinationSummary(port)) | |
1108 | ||
1109 | if "workq_kill_old_threads_call" in func_name : | |
1110 | workq = Cast(call.tc_param0, 'struct workqueue *') | |
1111 | proc = workq.wq_proc | |
1112 | proc_name = GetProcName(proc) | |
1113 | proc_pid = proc.p_pid | |
1114 | ||
1115 | extra_string += "{:s}[{:d}]".format(proc_name, proc_pid) | |
1116 | ||
1117 | if ("workq_add_new_threads_call" in func_name or | |
1118 | "realitexpire" in func_name): | |
1119 | proc = Cast(call.tc_param0, 'struct proc *') | |
1120 | proc_name = GetProcName(proc) | |
1121 | proc_pid = proc.p_pid | |
1122 | ||
1123 | extra_string += "{:s}[{:d}]".format(proc_name, proc_pid) | |
1124 | ||
1125 | except: | |
1126 | print "exception generating extra_string for call: {:#018x}".format(call) | |
1127 | if ShowThreadCall.enable_debug : | |
1128 | raise | |
1129 | ||
1130 | if (func_name == "") : | |
1131 | func_name = FindKmodNameForAddr(func) | |
1132 | ||
1133 | if (call.tc_flags & GetEnumValue('thread_call_flags_t::THREAD_CALL_FLAG_CONTINUOUS')) : | |
1134 | timer_fire = call.tc_pqlink.deadline - (recent_timestamp + kern.globals.mach_absolutetime_asleep) | |
5ba3f43e | 1135 | else : |
f427ee49 | 1136 | timer_fire = call.tc_pqlink.deadline - recent_timestamp |
5ba3f43e A |
1137 | |
1138 | timer_fire_s = kern.GetNanotimeFromAbstime(timer_fire) / 1000000000.0 | |
1139 | ||
1140 | ttd_s = kern.GetNanotimeFromAbstime(call.tc_ttd) / 1000000000.0 | |
1141 | ||
f427ee49 A |
1142 | if (is_pending) : |
1143 | pending_time = call.tc_pending_timestamp - recent_timestamp | |
1144 | pending_time = kern.GetNanotimeFromAbstime(pending_time) / 1000000000.0 | |
1145 | ||
1146 | flags = int(call.tc_flags) | |
1147 | # TODO: extract this out of the thread_call_flags_t enum | |
1148 | thread_call_flags = {0x0:'', 0x1:'A', 0x2:'W', 0x4:'D', 0x8:'R', 0x10:'S', 0x20:'O', | |
1149 | 0x40:'P', 0x80:'L', 0x100:'C'} | |
1150 | ||
1151 | flags_str = '' | |
1152 | mask = 0x1 | |
1153 | while mask <= 0x100 : | |
1154 | flags_str += thread_call_flags[int(flags & mask)] | |
1155 | mask = mask << 1 | |
1156 | ||
1157 | if is_iotes : | |
1158 | flags_str += 'I' | |
1159 | ||
1160 | if (is_pending) : | |
1161 | print ("{:s}{:#018x}: {:18d} {:18d} {:16.06f} {:16.06f} {:16.06f} {:9s} " + | |
1162 | "{:#018x} ({:#018x}, {:#018x}) ({:s}) {:s}").format(prefix, | |
1163 | unsigned(call), call.tc_pqlink.deadline, call.tc_soft_deadline, ttd_s, | |
1164 | timer_fire_s, pending_time, flags_str, | |
1165 | func, param0, param1, func_name, extra_string) | |
1166 | else : | |
1167 | print ("{:s}{:#018x}: {:18d} {:18d} {:16.06f} {:16.06f} {:9s} " + | |
1168 | "{:#018x} ({:#018x}, {:#018x}) ({:s}) {:s}").format(prefix, | |
1169 | unsigned(call), call.tc_pqlink.deadline, call.tc_soft_deadline, ttd_s, | |
1170 | timer_fire_s, flags_str, | |
1171 | func, param0, param1, func_name, extra_string) | |
1172 | ||
1173 | ShowThreadCall.enable_debug = False | |
1174 | ||
1175 | @header("{:>18s} {:>18s} {:>18s} {:>16s} {:>16s} {:9s} {:>18s}".format( | |
1176 | "entry", "deadline", "soft_deadline", | |
1177 | "duration (s)", "to go (s)", "flags", "(*func) (param0, param1)")) | |
1178 | def PrintThreadGroup(group): | |
1179 | header = PrintThreadGroup.header | |
1180 | pending_header = "{:>18s} {:>18s} {:>18s} {:>16s} {:>16s} {:>16s} {:9s} {:>18s}".format( | |
1181 | "entry", "deadline", "soft_deadline", | |
1182 | "duration (s)", "to go (s)", "pending", "flags", "(*func) (param0, param1)") | |
1183 | ||
1184 | recent_timestamp = GetRecentTimestamp() | |
1185 | ||
1186 | idle_timestamp_distance = group.idle_timestamp - recent_timestamp | |
1187 | idle_timestamp_distance_s = kern.GetNanotimeFromAbstime(idle_timestamp_distance) / 1000000000.0 | |
1188 | ||
1189 | is_parallel = "" | |
1190 | ||
1191 | if (group.tcg_flags & GetEnumValue('thread_call_group_flags_t::TCG_PARALLEL')) : | |
1192 | is_parallel = " (parallel)" | |
1193 | ||
1194 | print "Group: {g.tcg_name:s} ({:#18x}){:s}".format(unsigned(group), is_parallel, g=group) | |
1195 | print "\t" +"Thread Priority: {g.tcg_thread_pri:d}\n".format(g=group) | |
1196 | print ("\t" +"Active: {g.active_count:<3d} Idle: {g.idle_count:<3d}" + | |
1197 | "Blocked: {g.blocked_count:<3d} Pending: {g.pending_count:<3d}" + | |
1198 | "Target: {g.target_thread_count:<3d}\n").format(g=group) | |
1199 | ||
1200 | if unsigned(group.idle_timestamp) is not 0 : | |
1201 | print "\t" +"Idle Timestamp: {g.idle_timestamp:d} ({:03.06f})\n".format(idle_timestamp_distance_s, | |
1202 | g=group) | |
1203 | ||
1204 | print "\t" +"Pending Queue: ({:>#18x})\n".format(addressof(group.pending_queue)) | |
1205 | if not LinkageChainEmpty(group.pending_queue) : | |
1206 | print "\t\t" + pending_header | |
1207 | for call in ParanoidIterateLinkageChain(group.pending_queue, "thread_call_t", "tc_qlink"): | |
1208 | ShowThreadCall("\t\t", call, recent_timestamp, is_pending=True) | |
1209 | ||
1210 | print "\t" +"Delayed Queue (Absolute Time): ({:>#18x}) timer: ({:>#18x})\n".format( | |
1211 | addressof(group.delayed_queues[0]), addressof(group.delayed_timers[0])) | |
1212 | if not LinkageChainEmpty(group.delayed_queues[0]) : | |
1213 | print "\t\t" + header | |
1214 | for call in ParanoidIterateLinkageChain(group.delayed_queues[0], "thread_call_t", "tc_qlink"): | |
1215 | ShowThreadCall("\t\t", call, recent_timestamp) | |
1216 | ||
1217 | print "\t" +"Delayed Queue (Continuous Time): ({:>#18x}) timer: ({:>#18x})\n".format( | |
1218 | addressof(group.delayed_queues[1]), addressof(group.delayed_timers[1])) | |
1219 | if not LinkageChainEmpty(group.delayed_queues[1]) : | |
1220 | print "\t\t" + header | |
1221 | for call in ParanoidIterateLinkageChain(group.delayed_queues[1], "thread_call_t", "tc_qlink"): | |
1222 | ShowThreadCall("\t\t", call, recent_timestamp) | |
1223 | ||
1224 | def PrintThreadCallThreads() : | |
1225 | callout_flag = GetEnumValue('thread_tag_t::THREAD_TAG_CALLOUT') | |
1226 | recent_timestamp = GetRecentTimestamp() | |
1227 | ||
1228 | for thread in IterateQueue(kern.globals.kernel_task.threads, 'thread *', 'task_threads'): | |
1229 | if (thread.thread_tag & callout_flag) : | |
1230 | print " {:#20x} {:#12x} {:s}".format(thread, thread.thread_id, GetThreadName(thread)) | |
1231 | state = thread.thc_state | |
1232 | if state and state.thc_call : | |
1233 | print "\t" + PrintThreadGroup.header | |
1234 | ShowThreadCall("\t", state.thc_call, recent_timestamp) | |
1235 | soft_deadline = state.thc_call_soft_deadline | |
1236 | slop_time = state.thc_call_hard_deadline - soft_deadline | |
1237 | slop_time = kern.GetNanotimeFromAbstime(slop_time) / 1000000000.0 | |
1238 | print "\t original soft deadline {:d}, hard deadline {:d} (leeway {:.06f}s)".format( | |
1239 | soft_deadline, state.thc_call_hard_deadline, slop_time) | |
1240 | enqueue_time = state.thc_call_pending_timestamp - soft_deadline | |
1241 | enqueue_time = kern.GetNanotimeFromAbstime(enqueue_time) / 1000000000.0 | |
1242 | print "\t time to enqueue after deadline: {:.06f}s (enqueued at: {:d})".format( | |
1243 | enqueue_time, state.thc_call_pending_timestamp) | |
1244 | wait_time = state.thc_call_start - state.thc_call_pending_timestamp | |
1245 | wait_time = kern.GetNanotimeFromAbstime(wait_time) / 1000000000.0 | |
1246 | print "\t time to start executing after enqueue: {:.06f}s (executing at: {:d})".format( | |
1247 | wait_time, state.thc_call_start) | |
1248 | ||
1249 | if (state.thc_IOTES_invocation_timestamp) : | |
1250 | iotes_acquire_time = state.thc_IOTES_invocation_timestamp - state.thc_call_start | |
1251 | iotes_acquire_time = kern.GetNanotimeFromAbstime(iotes_acquire_time) / 1000000000.0 | |
1252 | print "\t IOTES acquire time: {:.06f}s (acquired at: {:d})".format( | |
1253 | iotes_acquire_time, state.thc_IOTES_invocation_timestamp) | |
1254 | ||
1255 | ||
1256 | @lldb_command('showcalloutgroup') | |
1257 | def ShowCalloutGroup(cmd_args=None): | |
1258 | """ Prints out the pending and delayed thread calls for a specific group | |
1259 | ||
1260 | Pass 'threads' to show the thread call threads themselves. | |
1261 | ||
1262 | Callout flags: | |
1263 | ||
1264 | A - Allocated memory owned by thread_call.c | |
1265 | W - Wait - thread waiting for call to finish running | |
1266 | D - Delayed - deadline based | |
1267 | R - Running - currently executing on a thread | |
1268 | S - Signal - call from timer interrupt instead of thread | |
1269 | O - Once - pend the enqueue if re-armed while running | |
1270 | P - Reschedule pending - enqueue is pending due to re-arm while running | |
1271 | L - Rate-limited - (App Nap) | |
1272 | C - Continuous time - Timeout is in mach_continuous_time | |
1273 | I - Callout is an IOTimerEventSource | |
1274 | """ | |
1275 | if not cmd_args: | |
1276 | print "No arguments passed" | |
1277 | print ShowCalloutGroup.__doc__ | |
1278 | return False | |
1279 | ||
1280 | if "threads" in cmd_args[0] : | |
1281 | PrintThreadCallThreads() | |
1282 | return | |
1283 | ||
1284 | group = kern.GetValueFromAddress(cmd_args[0], 'struct thread_call_group *') | |
1285 | if not group: | |
1286 | print "unknown arguments:", str(cmd_args) | |
1287 | return False | |
1288 | ||
1289 | PrintThreadGroup(group) | |
5ba3f43e | 1290 | |
39037602 A |
1291 | @lldb_command('showallcallouts') |
1292 | def ShowAllCallouts(cmd_args=None): | |
5ba3f43e | 1293 | """ Prints out the pending and delayed thread calls for the thread call groups |
5ba3f43e | 1294 | |
f427ee49 A |
1295 | Callout flags: |
1296 | ||
1297 | A - Allocated memory owned by thread_call.c | |
1298 | W - Wait - thread waiting for call to finish running | |
1299 | D - Delayed - deadline based | |
1300 | R - Running - currently executing on a thread | |
1301 | S - Signal - call from timer interrupt instead of thread | |
1302 | O - Once - pend the enqueue if re-armed while running | |
1303 | P - Reschedule pending - enqueue is pending due to re-arm while running | |
1304 | L - Rate-limited - (App Nap) | |
1305 | C - Continuous time - Timeout is in mach_continuous_time | |
1306 | I - Callout is an IOTimerEventSource | |
1307 | """ | |
5ba3f43e A |
1308 | index_max = GetEnumValue('thread_call_index_t::THREAD_CALL_INDEX_MAX') |
1309 | ||
1310 | for i in range (0, index_max) : | |
f427ee49 A |
1311 | group = addressof(kern.globals.thread_call_groups[i]) |
1312 | PrintThreadGroup(group) | |
1313 | ||
1314 | print "Thread Call Threads:" | |
1315 | PrintThreadCallThreads() | |
39037602 A |
1316 | |
1317 | # EndMacro: showallcallouts | |
1318 |