]> git.saurik.com Git - apple/system_cmds.git/blob - latency.tproj/latency.c
system_cmds-336.10.tar.gz
[apple/system_cmds.git] / latency.tproj / latency.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
7 * Reserved. This file contains Original Code and/or Modifications of
8 * Original Code as defined in and that are subject to the Apple Public
9 * Source License Version 1.0 (the 'License'). You may not use this file
10 * except in compliance with the License. Please obtain a copy of the
11 * License at http://www.apple.com/publicsource and read it before using
12 * this file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
19 * License for the specific language governing rights and limitations
20 * under the License."
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 /*
27 cc -I. -DPRIVATE -D__APPLE_PRIVATE -O -o latency latency.c -lncurses
28 */
29
30 #include <mach/mach.h>
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <signal.h>
34 #include <strings.h>
35 #include <nlist.h>
36 #include <fcntl.h>
37 #include <string.h>
38
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/time.h>
42
43 #include <libc.h>
44 #include <termios.h>
45 #include <curses.h>
46 #include <sys/ioctl.h>
47
48 #ifndef KERNEL_PRIVATE
49 #define KERNEL_PRIVATE
50 #include <sys/kdebug.h>
51 #undef KERNEL_PRIVATE
52 #else
53 #include <sys/kdebug.h>
54 #endif /*KERNEL_PRIVATE*/
55
56 #include <sys/sysctl.h>
57 #include <errno.h>
58 #include <err.h>
59
60 #include <mach/host_info.h>
61 #include <mach/mach_error.h>
62 #include <mach/mach_types.h>
63 #include <mach/message.h>
64 #include <mach/mach_syscalls.h>
65 #include <mach/clock_types.h>
66 #include <mach/mach_time.h>
67
68 #include <libkern/OSTypes.h>
69
70 extern mach_port_t clock_port;
71
72 #define KERN_KDPIDEX 14
73
74
75 int s_usec_10_bins[10];
76 int s_usec_100_bins[10];
77 int s_msec_1_bins[10];
78 int s_msec_10_bins[5];
79 int s_too_slow;
80 int s_max_latency;
81 int s_min_latency = 0;
82 long long s_total_latency = 0;
83 int s_total_samples;
84 long s_thresh_hold;
85 int s_exceeded_threshold = 0;
86
87 int i_usec_10_bins[10];
88 int i_usec_100_bins[10];
89 int i_msec_1_bins[10];
90 int i_msec_10_bins[5];
91 int i_too_slow;
92 int i_max_latency;
93 int i_min_latency = 0;
94 long long i_total_latency = 0;
95 int i_total_samples;
96 long i_thresh_hold;
97 int i_exceeded_threshold = 0;
98
99 long start_time;
100 long curr_time;
101 long refresh_time;
102
103 char *policy_name;
104 int my_policy;
105 int my_pri = -1;
106 int num_of_usecs_to_sleep = 1000;
107
108 char *kernelpath = (char *)0;
109 char *code_file = (char *)0;
110
111 typedef struct {
112 u_long k_sym_addr; /* kernel symbol address from nm */
113 u_int k_sym_len; /* length of kernel symbol string */
114 char *k_sym_name; /* kernel symbol string from nm */
115 } kern_sym_t;
116
117 kern_sym_t *kern_sym_tbl; /* pointer to the nm table */
118 int kern_sym_count; /* number of entries in nm table */
119 char pcstring[128];
120
121 #define UNKNOWN "Can't find symbol name"
122
123
124 double divisor;
125 int gotSIGWINCH = 0;
126 int trace_enabled = 0;
127 struct host_basic_info hi;
128
129
130 #define SAMPLE_SIZE 300000
131
132 int mib[6];
133 size_t needed;
134 char *my_buffer;
135
136 kbufinfo_t bufinfo = {0, 0, 0};
137
138 FILE *log_fp = (FILE *)0;
139 int num_of_codes = 0;
140 int need_new_map = 0;
141 int total_threads = 0;
142 kd_threadmap *mapptr = 0;
143
144 #define MAX_ENTRIES 1024
145 struct ct {
146 int type;
147 char name[32];
148 } codes_tab[MAX_ENTRIES];
149
150
151 #define NUMPARMS 23
152
153 struct th_info {
154 int thread;
155 int type;
156 int child_thread;
157 int arg1;
158 double stime;
159 long *pathptr;
160 long pathname[NUMPARMS + 1];
161 };
162
163 #define MAX_THREADS 512
164 struct th_info th_state[MAX_THREADS];
165
166 int cur_max = 0;
167
168 #define TRACE_DATA_NEWTHREAD 0x07000004
169 #define TRACE_STRING_NEWTHREAD 0x07010004
170 #define TRACE_STRING_EXEC 0x07010008
171
172 #define INTERRUPT 0x01050000
173 #define DECR_TRAP 0x01090000
174 #define DECR_SET 0x01090004
175 #define MACH_vmfault 0x01300000
176 #define MACH_sched 0x01400000
177 #define MACH_stkhandoff 0x01400008
178 #define VFS_LOOKUP 0x03010090
179 #define BSC_exit 0x040C0004
180 #define IES_action 0x050b0018
181 #define IES_filter 0x050b001c
182 #define TES_action 0x050c0010
183 #define CQ_action 0x050d0018
184
185
186 #define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
187 #define DBG_FUNC_MASK 0xfffffffc
188
189 #define CPU_NUMBER(ts) ((ts & KDBG_CPU_MASK) >> KDBG_CPU_SHIFT)
190
191 #define DBG_ZERO_FILL_FAULT 1
192 #define DBG_PAGEIN_FAULT 2
193 #define DBG_COW_FAULT 3
194 #define DBG_CACHE_HIT_FAULT 4
195
196 char *fault_name[5] = {
197 "",
198 "ZeroFill",
199 "PageIn",
200 "COW",
201 "CacheHit",
202 };
203
204 char *pc_to_string();
205 static kern_return_t set_time_constraint_policy(void);
206 static kern_return_t set_standard_policy(void);
207
208 int decrementer_val = 0; /* Value used to reset decrementer */
209 int set_remove_flag = 1; /* By default, remove trace buffer */
210
211 kd_buf **last_decrementer_kd; /* last DECR_TRAP per cpu */
212 #define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
213
214 int
215 quit(s)
216 char *s;
217 {
218 void set_enable();
219 void set_rtcdec();
220 void set_remove();
221
222 if (trace_enabled)
223 set_enable(0);
224
225 /*
226 This flag is turned off when calling
227 quit() due to a set_remove() failure.
228 */
229 if (set_remove_flag)
230 set_remove();
231
232 if (decrementer_val)
233 set_rtcdec(0);
234
235 printf("latency: ");
236 if (s)
237 printf("%s", s);
238
239 exit(1);
240 }
241
242 void
243 set_enable(int val)
244 {
245 mib[0] = CTL_KERN;
246 mib[1] = KERN_KDEBUG;
247 mib[2] = KERN_KDENABLE; /* protocol */
248 mib[3] = val;
249 mib[4] = 0;
250 mib[5] = 0; /* no flags */
251
252 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
253 quit("trace facility failure, KERN_KDENABLE\n");
254 }
255
256 void
257 set_numbufs(int nbufs)
258 {
259 mib[0] = CTL_KERN;
260 mib[1] = KERN_KDEBUG;
261 mib[2] = KERN_KDSETBUF;
262 mib[3] = nbufs;
263 mib[4] = 0;
264 mib[5] = 0; /* no flags */
265 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
266 quit("trace facility failure, KERN_KDSETBUF\n");
267
268 mib[0] = CTL_KERN;
269 mib[1] = KERN_KDEBUG;
270 mib[2] = KERN_KDSETUP;
271 mib[3] = 0;
272 mib[4] = 0;
273 mib[5] = 0; /* no flags */
274 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
275 quit("trace facility failure, KERN_KDSETUP\n");
276
277 }
278
279 void
280 set_pidexclude(int pid, int on_off)
281 {
282 kd_regtype kr;
283
284 kr.type = KDBG_TYPENONE;
285 kr.value1 = pid;
286 kr.value2 = on_off;
287 needed = sizeof(kd_regtype);
288 mib[0] = CTL_KERN;
289 mib[1] = KERN_KDEBUG;
290 mib[2] = KERN_KDPIDEX;
291 mib[3] = 0;
292 mib[4] = 0;
293 mib[5] = 0;
294
295 sysctl(mib, 3, &kr, &needed, NULL, 0);
296 }
297
298 void set_rtcdec(decval)
299 int decval;
300 {kd_regtype kr;
301 int ret;
302 extern int errno;
303
304 kr.type = KDBG_TYPENONE;
305 kr.value1 = decval;
306 needed = sizeof(kd_regtype);
307 mib[0] = CTL_KERN;
308 mib[1] = KERN_KDEBUG;
309 mib[2] = KERN_KDSETRTCDEC; /* protocol */
310 mib[3] = 0; /* wildcard address family */
311 mib[4] = 0;
312 mib[5] = 0; /* no flags */
313
314 errno = 0;
315 if ((ret=sysctl(mib, 3, &kr, &needed, NULL, 0)) < 0)
316 {
317 decrementer_val = 0;
318 /* ignore this sysctl error if it's not supported */
319 if (errno == ENOENT)
320 return;
321 else
322 quit("trace facility failure, KERN_KDSETRTCDEC\n");
323 }
324 }
325
326
327 void
328 get_bufinfo(kbufinfo_t *val)
329 {
330 needed = sizeof (*val);
331 mib[0] = CTL_KERN;
332 mib[1] = KERN_KDEBUG;
333 mib[2] = KERN_KDGETBUF;
334 mib[3] = 0;
335 mib[4] = 0;
336 mib[5] = 0; /* no flags */
337
338 if (sysctl(mib, 3, val, &needed, 0, 0) < 0)
339 quit("trace facility failure, KERN_KDGETBUF\n");
340
341 }
342
343 void
344 set_remove()
345 {
346 extern int errno;
347
348 mib[0] = CTL_KERN;
349 mib[1] = KERN_KDEBUG;
350 mib[2] = KERN_KDREMOVE; /* protocol */
351 mib[3] = 0;
352 mib[4] = 0;
353 mib[5] = 0; /* no flags */
354
355 errno = 0;
356
357 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
358 {
359 set_remove_flag = 0;
360 if(errno == EBUSY)
361 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
362 else
363 quit("trace facility failure, KERN_KDREMOVE\n");
364 }
365 }
366
367 void
368 set_init_nologging()
369 {
370 /* When we aren't logging, only collect the DECR_TRAP trace points */
371 kd_regtype kr;
372 kr.type = KDBG_VALCHECK;
373 kr.value1 = DECR_TRAP;
374 kr.value2 = 0;
375 kr.value3 = 0;
376 kr.value4 = 0;
377 needed = sizeof(kd_regtype);
378 mib[0] = CTL_KERN;
379 mib[1] = KERN_KDEBUG;
380 mib[2] = KERN_KDSETREG;
381 mib[3] = 0;
382 mib[4] = 0;
383 mib[5] = 0; /* no flags */
384 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
385 quit("trace facility failure, KERN_KDSETREG\n");
386
387 mib[0] = CTL_KERN;
388 mib[1] = KERN_KDEBUG;
389 mib[2] = KERN_KDSETUP;
390 mib[3] = 0;
391 mib[4] = 0;
392 mib[5] = 0; /* no flags */
393
394 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
395 quit("trace facility failure, KERN_KDSETUP\n");
396 }
397
398 void
399 set_init_logging()
400 { kd_regtype kr;
401
402 kr.type = KDBG_RANGETYPE;
403 kr.value1 = 0;
404 kr.value2 = -1;
405 needed = sizeof(kd_regtype);
406 mib[0] = CTL_KERN;
407 mib[1] = KERN_KDEBUG;
408 mib[2] = KERN_KDSETREG;
409 mib[3] = 0;
410 mib[4] = 0;
411 mib[5] = 0; /* no flags */
412
413 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
414 quit("trace facility failure, KERN_KDSETREG\n");
415
416 mib[0] = CTL_KERN;
417 mib[1] = KERN_KDEBUG;
418 mib[2] = KERN_KDSETUP;
419 mib[3] = 0;
420 mib[4] = 0;
421 mib[5] = 0; /* no flags */
422
423 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
424 quit("trace facility failure, KERN_KDSETUP\n");
425 }
426
427
428 void sigwinch()
429 {
430 gotSIGWINCH = 1;
431 }
432
433 void sigintr()
434 {
435 void screen_update();
436
437 set_enable(0);
438 set_pidexclude(getpid(), 0);
439 screen_update(log_fp);
440 endwin();
441 set_rtcdec(0);
442 set_remove();
443
444 exit(1);
445 }
446
447 void leave() /* exit under normal conditions -- signal handler */
448 {
449 set_enable(0);
450 set_pidexclude(getpid(), 0);
451 endwin();
452 set_rtcdec(0);
453 set_remove();
454
455 exit(1);
456 }
457
458 void
459 screen_update(FILE *fp)
460 {
461 int i;
462 int itotal, stotal;
463 int elapsed_secs;
464 int elapsed_mins;
465 int elapsed_hours;
466 unsigned int average_s_latency;
467 unsigned int average_i_latency;
468 char tbuf[256];
469
470 if (fp == (FILE *)0) {
471 erase();
472 move(0, 0);
473 } else
474 fprintf(fp,"\n\n===================================================================================================\n");
475 /*
476 * Display the current time.
477 * "ctime" always returns a string that looks like this:
478 *
479 * Sun Sep 16 01:03:52 1973
480 * 012345678901234567890123
481 * 1 2
482 *
483 * We want indices 11 thru 18 (length 8).
484 */
485 elapsed_secs = curr_time - start_time;
486 elapsed_hours = elapsed_secs / 3600;
487 elapsed_secs -= elapsed_hours * 3600;
488 elapsed_mins = elapsed_secs / 60;
489 elapsed_secs -= elapsed_mins * 60;
490
491 sprintf(tbuf, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time)[0]),
492 (long)elapsed_hours, (long)elapsed_mins, (long)elapsed_secs);
493 if (fp)
494 fprintf(fp, "%s", tbuf);
495 else
496 printw(tbuf);
497
498
499
500 sprintf(tbuf, " SCHEDULER INTERRUPTS\n");
501
502 if (fp)
503 fprintf(fp, "%s", tbuf);
504 else
505 printw(tbuf);
506
507 sprintf(tbuf, "---------------------------------------------\n");
508
509 if (fp)
510 fprintf(fp, "%s", tbuf);
511 else
512 printw(tbuf);
513
514
515
516 sprintf(tbuf, "total_samples %10d %10d\n\n", s_total_samples, i_total_samples);
517
518 if (fp)
519 fprintf(fp, "%s", tbuf);
520 else
521 printw(tbuf);
522
523
524
525 for (itotal = 0, stotal = 0, i = 0; i < 10; i++) {
526 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 10, s_usec_10_bins[i], i_usec_10_bins[i]);
527
528 if (fp)
529 fprintf(fp, "%s", tbuf);
530 else
531 printw(tbuf);
532
533 stotal += s_usec_10_bins[i];
534 itotal += i_usec_10_bins[i];
535 }
536 sprintf(tbuf, "total < 100 usecs %10d %10d\n\n", stotal, itotal);
537
538 if (fp)
539 fprintf(fp, "%s", tbuf);
540 else
541 printw(tbuf);
542
543
544
545 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
546 if (i < 9)
547 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 100, s_usec_100_bins[i], i_usec_100_bins[i]);
548 else
549 sprintf(tbuf, "delays < 1 msec %10d %10d\n", s_usec_100_bins[i], i_usec_100_bins[i]);
550
551 if (fp)
552 fprintf(fp, "%s", tbuf);
553 else
554 printw(tbuf);
555
556 stotal += s_usec_100_bins[i];
557 itotal += i_usec_100_bins[i];
558 }
559 sprintf(tbuf, "total < 1 msec %10d %10d\n\n", stotal, itotal);
560
561 if (fp)
562 fprintf(fp, "%s", tbuf);
563 else
564 printw(tbuf);
565
566
567
568 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
569 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1), s_msec_1_bins[i], i_msec_1_bins[i]);
570
571 if (fp)
572 fprintf(fp, "%s", tbuf);
573 else
574 printw(tbuf);
575
576 stotal += s_msec_1_bins[i];
577 itotal += i_msec_1_bins[i];
578 }
579 sprintf(tbuf, "total < 10 msecs %10d %10d\n\n", stotal, itotal);
580
581 if (fp)
582 fprintf(fp, "%s", tbuf);
583 else
584 printw(tbuf);
585
586
587
588
589 for (itotal = 0, stotal = 0, i = 1; i < 5; i++) {
590 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1)*10, s_msec_10_bins[i], i_msec_10_bins[i]);
591
592 if (fp)
593 fprintf(fp, "%s", tbuf);
594 else
595 printw(tbuf);
596
597 stotal += s_msec_10_bins[i];
598 itotal += i_msec_10_bins[i];
599 }
600 sprintf(tbuf, "total < 50 msecs %10d %10d\n\n", stotal, itotal);
601
602 if (fp)
603 fprintf(fp, "%s", tbuf);
604 else
605 printw(tbuf);
606
607 sprintf(tbuf, "delays > 50 msecs %10d %10d\n", s_too_slow, i_too_slow);
608
609 if (fp)
610 fprintf(fp, "%s", tbuf);
611 else
612 printw(tbuf);
613
614
615
616 sprintf(tbuf, "\nminimum latency(usecs) %7d %7d\n", s_min_latency, i_min_latency);
617
618 if (fp)
619 fprintf(fp, "%s", tbuf);
620 else
621 printw(tbuf);
622
623 sprintf(tbuf, "maximum latency(usecs) %7d %7d\n", s_max_latency, i_max_latency);
624
625 if (fp)
626 fprintf(fp, "%s", tbuf);
627 else
628 printw(tbuf);
629
630 if (s_total_samples)
631 average_s_latency = (unsigned int)(s_total_latency/s_total_samples);
632 else
633 average_s_latency = 0;
634
635 if (i_total_samples)
636 average_i_latency = (unsigned int)(i_total_latency/i_total_samples);
637 else
638 average_i_latency = 0;
639
640 sprintf(tbuf, "average latency(usecs) %7d %7d\n", average_s_latency, average_i_latency);
641
642 if (fp)
643 fprintf(fp, "%s", tbuf);
644 else
645 printw(tbuf);
646
647 sprintf(tbuf, "exceeded threshold %7d %7d\n", s_exceeded_threshold, i_exceeded_threshold);
648
649 if (fp)
650 fprintf(fp, "%s", tbuf);
651 else
652 printw(tbuf);
653
654 if (fp == (FILE *)0)
655 refresh();
656 else
657 fflush(fp);
658 }
659
660 int
661 exit_usage()
662 {
663
664 fprintf(stderr, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
665
666 #if defined (__i386__)
667 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs] [-n kernel]\n\n");
668 #else
669 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs]\n");
670 fprintf(stderr, " [-d decrementer_in_usecs] [-n kernel]\n\n");
671 #endif
672
673
674 fprintf(stderr, " -rt Set realtime scheduling policy. Default is timeshare.\n");
675 fprintf(stderr, " -c specify name of codes file\n");
676 fprintf(stderr, " -l specify name of file to log trace entries to when threshold is exceeded\n");
677 fprintf(stderr, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
678 fprintf(stderr, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
679 fprintf(stderr, " -s set sleep time in microseconds\n");
680 #if !defined (__i386__)
681 fprintf(stderr, " -d set decrementer in microseconds.\n");
682 #endif
683 fprintf(stderr, " -n specify kernel, default is /mach_kernel\n");
684
685 fprintf(stderr, "\nlatency must be run as root\n\n");
686
687 exit(1);
688 }
689
690
691 int
692 main(argc, argv)
693 int argc;
694 char *argv[];
695 {
696 uint64_t start, stop;
697 uint64_t timestamp1;
698 uint64_t timestamp2;
699 uint64_t adeadline, adelay;
700 double fdelay;
701 int elapsed_usecs;
702 double nanosecs_to_sleep;
703 int loop_cnt, sample_sc_now;
704 int decrementer_usec = 0;
705 kern_return_t ret;
706 unsigned int size;
707 host_name_port_t host;
708 void getdivisor();
709 void sample_sc();
710 void init_code_file();
711 void do_kernel_nm();
712 void open_logfile();
713
714 my_policy = THREAD_STANDARD_POLICY;
715 policy_name = "TIMESHARE";
716
717 while (argc > 1) {
718 if (strcmp(argv[1], "-rt") == 0) {
719 my_policy = THREAD_TIME_CONSTRAINT_POLICY; /* the real time band */
720 policy_name = "REALTIME";
721
722 } else if (strcmp(argv[1], "-st") == 0) {
723 argc--;
724 argv++;
725
726 if (argc > 1)
727 s_thresh_hold = atoi(argv[1]);
728 else
729 exit_usage();
730
731 } else if (strcmp(argv[1], "-it") == 0) {
732 argc--;
733 argv++;
734
735 if (argc > 1)
736 i_thresh_hold = atoi(argv[1]);
737 else
738 exit_usage();
739 } else if (strcmp(argv[1], "-c") == 0) {
740 argc--;
741 argv++;
742
743 if (argc > 1)
744 code_file = argv[1];
745 else
746 exit_usage();
747 } else if (strcmp(argv[1], "-l") == 0) {
748 argc--;
749 argv++;
750
751 if (argc > 1)
752 open_logfile(argv[1]);
753 else
754 exit_usage();
755
756 } else if (strcmp(argv[1], "-s") == 0) {
757 argc--;
758 argv++;
759
760 if (argc > 1)
761 num_of_usecs_to_sleep = atoi(argv[1]);
762 else
763 exit_usage();
764 }
765 else if (strcmp(argv[1], "-d") == 0) {
766 argc--;
767 argv++;
768
769 if (argc > 1)
770 decrementer_usec = atoi(argv[1]);
771 else
772 exit_usage();
773 #if defined(__i386__)
774 /* ignore this option - setting the decrementer has no effect */
775 decrementer_usec = 0;
776 #endif
777 }
778 else if (strcmp(argv[1], "-n") == 0) {
779 argc--;
780 argv++;
781
782 if (argc > 1)
783 kernelpath = argv[1];
784 else
785 exit_usage();
786 } else
787 exit_usage();
788
789 argc--;
790 argv++;
791 }
792
793 if ( geteuid() != 0 ) {
794 printf("'latency' must be run as root...\n");
795 exit(1);
796 }
797
798 if (kernelpath == (char *) 0)
799 kernelpath = "/mach_kernel";
800
801 if (code_file == (char *) 0)
802 code_file = "/usr/share/misc/trace.codes";
803
804 do_kernel_nm();
805
806 sample_sc_now = 25000 / num_of_usecs_to_sleep;
807
808 getdivisor();
809 decrementer_val = decrementer_usec * divisor;
810
811 /* get the cpu countfor the DECR_TRAP array */
812 host = mach_host_self();
813 size = sizeof(hi)/sizeof(int);
814 ret = host_info(host, HOST_BASIC_INFO, (host_info_t)&hi, &size);
815 if (ret != KERN_SUCCESS) {
816 mach_error(argv[0], ret);
817 exit(EXIT_FAILURE);
818 }
819
820 if ((last_decrementer_kd = (kd_buf **)malloc(hi.avail_cpus * sizeof(kd_buf *))) == (kd_buf **)0)
821 quit("can't allocate memory for decrementer tracing info\n");
822
823 nanosecs_to_sleep = (double)(num_of_usecs_to_sleep * 1000);
824 fdelay = nanosecs_to_sleep * (divisor /1000);
825 adelay = (uint64_t)fdelay;
826
827 init_code_file();
828
829 /*
830 When the decrementer isn't set in the options,
831 decval will be zero and this call will reset
832 the system default ...
833 */
834 set_rtcdec(decrementer_val);
835
836 if (initscr() == (WINDOW *) 0)
837 {
838 printf("Unrecognized TERM type, try vt100\n");
839 exit(1);
840 }
841
842 clear();
843 refresh();
844 signal(SIGWINCH, sigwinch);
845 signal(SIGINT, sigintr);
846 signal(SIGQUIT, leave);
847 signal(SIGTERM, leave);
848 signal(SIGHUP, leave);
849
850
851 if ((my_buffer = malloc(SAMPLE_SIZE * sizeof(kd_buf))) == (char *)0)
852 quit("can't allocate memory for tracing info\n");
853 set_remove();
854 set_numbufs(SAMPLE_SIZE);
855 set_enable(0);
856 if(log_fp)
857 set_init_logging();
858 else
859 set_init_nologging();
860 set_pidexclude(getpid(), 1);
861 set_enable(1);
862 trace_enabled = 1;
863 need_new_map = 1;
864
865 loop_cnt = 0;
866 start_time = time((long *)0);
867 refresh_time = start_time;
868
869 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
870 {
871 /* the realtime band */
872 if(set_time_constraint_policy() != KERN_SUCCESS)
873 quit("Failed to set realtime policy.\n");
874 }
875
876 for (;;) {
877 curr_time = time((long *)0);
878
879 if (curr_time >= refresh_time) {
880 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
881 {
882 /* set standard timeshare policy during screen update */
883 if(set_standard_policy() != KERN_SUCCESS)
884 quit("Failed to set standard policy.\n");
885 }
886 screen_update((FILE *)0);
887 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
888 {
889 /* set back to realtime band */
890 if(set_time_constraint_policy() != KERN_SUCCESS)
891 quit("Failed to set time_constraint policy.\n");
892 }
893 refresh_time = curr_time + 1;
894 }
895
896 timestamp1 = mach_absolute_time();
897 adeadline = timestamp1 + adelay;
898 mach_wait_until(adeadline);
899 timestamp2 = mach_absolute_time();
900
901 start = timestamp1;
902
903 stop = timestamp2;
904
905 elapsed_usecs = (int)(((double)(stop - start)) / divisor);
906
907 if ((elapsed_usecs -= num_of_usecs_to_sleep) <= 0)
908 continue;
909
910 if (elapsed_usecs < 100)
911 s_usec_10_bins[elapsed_usecs/10]++;
912 if (elapsed_usecs < 1000)
913 s_usec_100_bins[elapsed_usecs/100]++;
914 else if (elapsed_usecs < 10000)
915 s_msec_1_bins[elapsed_usecs/1000]++;
916 else if (elapsed_usecs < 50000)
917 s_msec_10_bins[elapsed_usecs/10000]++;
918 else
919 s_too_slow++;
920
921 if (elapsed_usecs > s_max_latency)
922 s_max_latency = elapsed_usecs;
923 if (elapsed_usecs < s_min_latency || s_total_samples == 0)
924 s_min_latency = elapsed_usecs;
925 s_total_latency += elapsed_usecs;
926 s_total_samples++;
927
928 if (s_thresh_hold && elapsed_usecs > s_thresh_hold)
929 s_exceeded_threshold++;
930 loop_cnt++;
931
932 if (log_fp && s_thresh_hold && elapsed_usecs > s_thresh_hold)
933 sample_sc(start, stop);
934 else {
935 if (loop_cnt >= sample_sc_now) {
936 sample_sc((long long)0, (long long)0);
937 loop_cnt = 0;
938 }
939 }
940 if (gotSIGWINCH) {
941 /*
942 No need to check for initscr error return.
943 We won't get here if it fails on the first call.
944 */
945 endwin();
946 clear();
947 refresh();
948
949 gotSIGWINCH = 0;
950 }
951 }
952 }
953
954
955 void getdivisor()
956 {
957 mach_timebase_info_data_t info;
958
959 (void) mach_timebase_info (&info);
960
961 divisor = ( (double)info.denom / (double)info.numer) * 1000;
962
963 }
964
965 /* This is the realtime band */
966 static kern_return_t
967 set_time_constraint_policy()
968 {
969 kern_return_t result;
970 thread_time_constraint_policy_data_t info;
971 mach_msg_type_number_t count;
972 boolean_t get_default;
973
974 get_default = TRUE;
975 count = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
976 result = thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
977 (thread_policy_t)&info, &count, &get_default);
978 if (result != KERN_SUCCESS)
979 return (result);
980
981 result = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
982 (thread_policy_t)&info, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
983
984 return (result);
985 }
986
987 /* This is the timeshare mode */
988 static kern_return_t
989 set_standard_policy()
990 {
991 kern_return_t result;
992 thread_standard_policy_data_t info;
993 mach_msg_type_number_t count;
994 boolean_t get_default;
995
996 get_default = TRUE;
997 count = THREAD_STANDARD_POLICY_COUNT;
998 result = thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY,
999 (thread_policy_t)&info, &count, &get_default);
1000 if (result != KERN_SUCCESS)
1001 return (result);
1002
1003 result = thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY,
1004 (thread_policy_t)&info, THREAD_STANDARD_POLICY_COUNT);
1005
1006 return (result);
1007 }
1008
1009
1010 void read_command_map()
1011 {
1012 size_t size;
1013 int mib[6];
1014
1015 if (mapptr) {
1016 free(mapptr);
1017 mapptr = 0;
1018 }
1019 total_threads = bufinfo.nkdthreads;
1020 size = bufinfo.nkdthreads * sizeof(kd_threadmap);
1021 if (size)
1022 {
1023 if ((mapptr = (kd_threadmap *) malloc(size)))
1024 bzero (mapptr, size);
1025 else
1026 {
1027 printf("Thread map is not initialized -- this is not fatal\n");
1028 return;
1029 }
1030 }
1031
1032 /* Now read the threadmap */
1033 mib[0] = CTL_KERN;
1034 mib[1] = KERN_KDEBUG;
1035 mib[2] = KERN_KDTHRMAP;
1036 mib[3] = 0;
1037 mib[4] = 0;
1038 mib[5] = 0; /* no flags */
1039 if (sysctl(mib, 3, mapptr, &size, NULL, 0) < 0)
1040 {
1041 /* This is not fatal -- just means I cant map command strings */
1042
1043 printf("Can't read the thread map -- this is not fatal\n");
1044 free(mapptr);
1045 mapptr = 0;
1046 return;
1047 }
1048 return;
1049 }
1050
1051
1052 void create_map_entry(int thread, char *command)
1053 {
1054 int i, n;
1055 kd_threadmap *map;
1056
1057 if (!mapptr)
1058 return;
1059
1060 for (i = 0, map = 0; !map && i < total_threads; i++)
1061 {
1062 if (mapptr[i].thread == thread )
1063 map = &mapptr[i]; /* Reuse this entry, the thread has been reassigned */
1064 }
1065
1066 if (!map) /* look for invalid entries that I can reuse*/
1067 {
1068 for (i = 0, map = 0; !map && i < total_threads; i++)
1069 {
1070 if (mapptr[i].valid == 0 )
1071 map = &mapptr[i]; /* Reuse this invalid entry */
1072 }
1073 }
1074
1075 if (!map)
1076 {
1077 /* If reach here, then this is a new thread and
1078 * there are no invalid entries to reuse
1079 * Double the size of the thread map table.
1080 */
1081
1082 n = total_threads * 2;
1083 mapptr = (kd_threadmap *) realloc(mapptr, n * sizeof(kd_threadmap));
1084 bzero(&mapptr[total_threads], total_threads*sizeof(kd_threadmap));
1085 map = &mapptr[total_threads];
1086 total_threads = n;
1087 #if 0
1088 if (log_fp)
1089 fprintf(log_fp, "MAP: increasing thread map to %d entries\n", total_threads);
1090 #endif
1091 }
1092 #if 0
1093 if (log_fp)
1094 fprintf(log_fp, "MAP: adding thread %x with name %s\n", thread, command);
1095 #endif
1096 map->valid = 1;
1097 map->thread = thread;
1098 /*
1099 The trace entry that returns the command name will hold
1100 at most, MAXCOMLEN chars, and in that case, is not
1101 guaranteed to be null terminated.
1102 */
1103 (void)strncpy (map->command, command, MAXCOMLEN);
1104 map->command[MAXCOMLEN] = '\0';
1105 }
1106
1107
1108 kd_threadmap *find_thread_map(int thread)
1109 {
1110 int i;
1111 kd_threadmap *map;
1112
1113 if (!mapptr)
1114 return((kd_threadmap *)0);
1115
1116 for (i = 0; i < total_threads; i++)
1117 {
1118 map = &mapptr[i];
1119 if (map->valid && (map->thread == thread))
1120 {
1121 return(map);
1122 }
1123 }
1124 return ((kd_threadmap *)0);
1125 }
1126
1127 void
1128 kill_thread_map(int thread)
1129 {
1130 kd_threadmap *map;
1131
1132 if ((map = find_thread_map(thread))) {
1133
1134 #if 0
1135 if (log_fp)
1136 fprintf(log_fp, "MAP: deleting thread %x with name %s\n", thread, map->command);
1137 #endif
1138 map->valid = 0;
1139 map->thread = 0;
1140 map->command[0] = '\0';
1141 }
1142 }
1143
1144
1145 struct th_info *find_thread(int thread, int type1, int type2) {
1146 struct th_info *ti;
1147
1148 for (ti = th_state; ti < &th_state[cur_max]; ti++) {
1149 if (ti->thread == thread) {
1150 if (type1 == 0)
1151 return(ti);
1152 if (type1 == ti->type)
1153 return(ti);
1154 if (type2 == ti->type)
1155 return(ti);
1156 }
1157 }
1158 return ((struct th_info *)0);
1159 }
1160
1161
1162 char *find_code(type)
1163 {
1164 int i;
1165
1166 for (i = 0; i < num_of_codes; i++) {
1167 if (codes_tab[i].type == type)
1168 return(codes_tab[i].name);
1169 }
1170 return ((char *)0);
1171 }
1172
1173
1174 void sample_sc(uint64_t start, uint64_t stop)
1175 {
1176 kd_buf *kd, *last_mach_sched, *start_kd, *end_of_sample;
1177 uint64_t now;
1178 int count, i;
1179 int first_entry = 1;
1180 double timestamp = 0.0;
1181 double last_timestamp = 0.0;
1182 double delta = 0.0;
1183 double start_bias = 0.0;
1184 char command[32];
1185 void read_command_map();
1186
1187 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1188 {
1189 /* set standard timeshare policy when logging */
1190 if(set_standard_policy() != KERN_SUCCESS)
1191 quit("Failed to set standard policy.\n");
1192 }
1193
1194 /* Get kernel buffer information */
1195 get_bufinfo(&bufinfo);
1196
1197 if (need_new_map) {
1198 read_command_map();
1199 need_new_map = 0;
1200 }
1201 needed = bufinfo.nkdbufs * sizeof(kd_buf);
1202 mib[0] = CTL_KERN;
1203 mib[1] = KERN_KDEBUG;
1204 mib[2] = KERN_KDREADTR;
1205 mib[3] = 0;
1206 mib[4] = 0;
1207 mib[5] = 0; /* no flags */
1208
1209 if (sysctl(mib, 3, my_buffer, &needed, NULL, 0) < 0)
1210 quit("trace facility failure, KERN_KDREADTR\n");
1211
1212 count = needed;
1213
1214 if (bufinfo.flags & KDBG_WRAPPED) {
1215 for (i = 0; i < cur_max; i++) {
1216 th_state[i].thread = 0;
1217 th_state[i].type = -1;
1218 th_state[i].pathptr = (long *)NULL;
1219 th_state[i].pathname[0] = 0;
1220 }
1221 cur_max = 0;
1222 need_new_map = 1;
1223
1224 set_enable(0);
1225 set_enable(1);
1226
1227 if (log_fp) {
1228 double latency;
1229
1230 latency = (double)(stop - start) / divisor;
1231 latency -= (double)num_of_usecs_to_sleep;
1232
1233 fprintf(log_fp, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1234 &(ctime(&curr_time)[0]), latency, count);
1235 }
1236 }
1237 end_of_sample = &((kd_buf *)my_buffer)[count];
1238
1239 /* Always reinitialize the DECR_TRAP array */
1240 for (i=0; i < hi.avail_cpus; i++)
1241 last_decrementer_kd[i] = (kd_buf *)my_buffer;
1242
1243 last_mach_sched = (kd_buf *)0;
1244
1245 for (kd = (kd_buf *)my_buffer; kd < end_of_sample; kd++) {
1246 int debugid, thread, cpunum;
1247 int type, clen, mode;
1248 int len;
1249 char *p;
1250 long *sargptr;
1251 kd_buf *cur_kd;
1252 double i_latency = 0.0;
1253 struct th_info *ti;
1254 char command1[32];
1255 char sched_info[64];
1256 kd_threadmap *map;
1257 kd_threadmap *find_thread_map();
1258 double handle_decrementer();
1259 kd_buf *log_decrementer();
1260 int check_for_thread_update();
1261 void enter_syscall();
1262 void exit_syscall();
1263 void print_entry();
1264
1265 thread = kd->arg5;
1266 cpunum = CPU_NUMBER(kd->timestamp);
1267 debugid = kd->debugid;
1268 type = kd->debugid & DBG_FUNC_MASK;
1269
1270 if (check_for_thread_update(thread, type, kd))
1271 continue;
1272
1273 if (type == DECR_TRAP)
1274 i_latency = handle_decrementer(kd);
1275
1276 now = kd->timestamp & KDBG_TIMESTAMP_MASK;
1277
1278 timestamp = ((double)now) / divisor;
1279
1280 if (now < start || now > stop) {
1281 if (debugid & DBG_FUNC_START)
1282 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1283 else if (debugid & DBG_FUNC_END)
1284 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1285 else if (type == DECR_TRAP) {
1286 cur_kd = kd;
1287 if (log_fp && i_thresh_hold && (int)i_latency > i_thresh_hold) {
1288 start_kd = last_decrementer_kd[cpunum];
1289 kd = log_decrementer(start_kd, kd, end_of_sample, i_latency);
1290 if (kd >= end_of_sample)
1291 break;
1292 }
1293 if ((kd->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1294 {
1295 cpunum = CPU_NUMBER(kd->timestamp);
1296 last_decrementer_kd[cpunum] = kd;
1297 }
1298 else
1299 last_decrementer_kd[cpunum] = cur_kd;
1300 }
1301 continue;
1302 }
1303 if (first_entry) {
1304 double latency;
1305 char buf1[132];
1306 char buf2[132];
1307
1308 latency = (double)(stop - start) / divisor;
1309 latency -= (double)num_of_usecs_to_sleep;
1310
1311 if (my_pri == -1)
1312 sprintf(buf2, "default");
1313 else
1314 sprintf(buf2, "%d", my_pri);
1315 sprintf(buf1, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1316 &(ctime(&curr_time)[0]), latency, num_of_usecs_to_sleep, policy_name, buf2);
1317 clen = strlen(buf1);
1318 memset(buf2, '-', clen);
1319 buf2[clen] = 0;
1320
1321 if (log_fp) {
1322 fprintf(log_fp, "\n\n%s\n", buf2);
1323 fprintf(log_fp, "%s\n\n", buf1);
1324 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1325 }
1326 start_bias = ((double)start) / divisor;
1327 last_timestamp = timestamp;
1328 first_entry = 0;
1329 }
1330 delta = timestamp - last_timestamp;
1331
1332 if ((map = find_thread_map(thread)))
1333 strcpy(command, map->command);
1334 else
1335 command[0] = 0;
1336
1337 switch (type) {
1338
1339 case CQ_action:
1340 if (log_fp) {
1341 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1342 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1343 }
1344 last_timestamp = timestamp;
1345 break;
1346
1347 case TES_action:
1348 if (log_fp) {
1349 fprintf(log_fp, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1350 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1351 }
1352
1353 last_timestamp = timestamp;
1354 break;
1355
1356 case IES_action:
1357 if (log_fp) {
1358 fprintf(log_fp, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1359 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1360 }
1361
1362 last_timestamp = timestamp;
1363 break;
1364
1365 case IES_filter:
1366 if (log_fp) {
1367 fprintf(log_fp, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1368 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1369 }
1370
1371 last_timestamp = timestamp;
1372 break;
1373
1374 case DECR_TRAP:
1375 last_decrementer_kd[cpunum] = kd;
1376
1377 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1378 p = "*";
1379 else
1380 p = " ";
1381
1382 mode = 1;
1383
1384 if ((ti = find_thread(kd->arg5, 0, 0))) {
1385 if (ti->type == -1 && strcmp(command, "kernel_task"))
1386 mode = 0;
1387 }
1388
1389 if (log_fp) {
1390 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1391 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1392 }
1393
1394 last_timestamp = timestamp;
1395 break;
1396
1397 case DECR_SET:
1398 if (log_fp) {
1399 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1400 timestamp - start_bias, delta, (double)kd->arg1/divisor, "DECR_SET", thread, cpunum, command);
1401 }
1402
1403 last_timestamp = timestamp;
1404 break;
1405
1406 case MACH_sched:
1407 case MACH_stkhandoff:
1408 last_mach_sched = kd;
1409
1410 if ((map = find_thread_map(kd->arg2)))
1411 strcpy(command1, map->command);
1412 else
1413 sprintf(command1, "%-8x", kd->arg2);
1414
1415 if ((ti = find_thread(kd->arg2, 0, 0))) {
1416 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1417 p = "U";
1418 else
1419 p = "K";
1420 } else
1421 p = "*";
1422 memset(sched_info, ' ', sizeof(sched_info));
1423
1424 sprintf(sched_info, "%14.14s", command);
1425 clen = strlen(sched_info);
1426 sched_info[clen] = ' ';
1427
1428 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1429 clen = strlen(sched_info);
1430 sched_info[clen] = ' ';
1431
1432 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1433
1434 if (log_fp) {
1435 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1436 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1437 }
1438
1439 last_timestamp = timestamp;
1440 break;
1441
1442 case VFS_LOOKUP:
1443 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1444 if (cur_max >= MAX_THREADS)
1445 continue;
1446 ti = &th_state[cur_max++];
1447
1448 ti->thread = thread;
1449 ti->type = -1;
1450 ti->pathptr = (long *)0;
1451 ti->child_thread = 0;
1452 }
1453 while ( (kd < end_of_sample) && ((kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP))
1454 {
1455 if (ti->pathptr == NULL) {
1456 ti->arg1 = kd->arg1;
1457 sargptr = ti->pathname;
1458
1459 *sargptr++ = kd->arg2;
1460 *sargptr++ = kd->arg3;
1461 *sargptr++ = kd->arg4;
1462 /*
1463 * NULL terminate the 'string'
1464 */
1465 *sargptr = 0;
1466 ti->pathptr = sargptr;
1467
1468 } else {
1469 sargptr = ti->pathptr;
1470
1471 /*
1472 We don't want to overrun our pathname buffer if the
1473 kernel sends us more VFS_LOOKUP entries than we can
1474 handle.
1475 */
1476
1477 if (sargptr >= &ti->pathname[NUMPARMS])
1478 {
1479 kd++;
1480 continue;
1481 }
1482
1483 /*
1484 We need to detect consecutive vfslookup entries.
1485 So, if we get here and find a START entry,
1486 fake the pathptr so we can bypass all further
1487 vfslookup entries.
1488 */
1489
1490 if (kd->debugid & DBG_FUNC_START)
1491 {
1492 ti->pathptr = &ti->pathname[NUMPARMS];
1493 }
1494 else
1495 {
1496 *sargptr++ = kd->arg1;
1497 *sargptr++ = kd->arg2;
1498 *sargptr++ = kd->arg3;
1499 *sargptr++ = kd->arg4;
1500 /*
1501 * NULL terminate the 'string'
1502 */
1503 *sargptr = 0;
1504
1505 ti->pathptr = sargptr;
1506 }
1507 }
1508 kd++;
1509 }
1510 p = (char *)ti->pathname;
1511
1512 kd--;
1513
1514 /* print the tail end of the pathname */
1515 len = strlen(p);
1516 if (len > 42)
1517 len -= 42;
1518 else
1519 len = 0;
1520
1521 if (log_fp) {
1522 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1523 timestamp - start_bias, delta, "VFS_LOOKUP",
1524 &p[len], ti->arg1, thread, cpunum, command);
1525 }
1526
1527 last_timestamp = timestamp;
1528 break;
1529
1530 default:
1531 if (debugid & DBG_FUNC_START)
1532 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1533 else if (debugid & DBG_FUNC_END)
1534 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1535 else
1536 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1537
1538 last_timestamp = timestamp;
1539 break;
1540 }
1541 }
1542 if (last_mach_sched && log_fp)
1543 fprintf(log_fp, "\nblocked by %s @ priority %d\n", command, last_mach_sched->arg3);
1544 #if 0
1545 if (first_entry == 0 && log_fp)
1546 fprintf(log_fp, "\n start = %qd stop = %qd count = %d now = %qd\n", start, stop, count, now);
1547 #endif
1548 if (log_fp)
1549 fflush(log_fp);
1550
1551 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1552 {
1553 /* set back to realtime band */
1554 if(set_time_constraint_policy() != KERN_SUCCESS)
1555 quit("Failed to set time_constraint policy.\n");
1556 }
1557 }
1558
1559 void
1560 enter_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1561 {
1562 struct th_info *ti;
1563 int i;
1564 int cpunum;
1565 char *p;
1566
1567 cpunum = CPU_NUMBER(kd->timestamp);
1568
1569 if (print_info && fp) {
1570 if ((p = find_code(type))) {
1571 if (type == INTERRUPT) {
1572 int mode = 1;
1573
1574 if ((ti = find_thread(kd->arg5, 0, 0))) {
1575 if (ti->type == -1 && strcmp(command, "kernel_task"))
1576 mode = 0;
1577 }
1578
1579 fprintf(fp, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1580 timestamp - bias, delta, pc_to_string(kd->arg2, 59, mode), thread, cpunum, command);
1581 } else if (type == MACH_vmfault) {
1582 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1583 timestamp - bias, delta, p, thread, cpunum, command);
1584 } else {
1585 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1586 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1587 thread, cpunum, command);
1588 }
1589 } else {
1590 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1591 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1592 thread, cpunum, command);
1593 }
1594 }
1595 if ((ti = find_thread(thread, -1, type)) == (struct th_info *)0) {
1596 if (cur_max >= MAX_THREADS) {
1597 static int do_this_once = 1;
1598
1599 if (do_this_once) {
1600 for (i = 0; i < cur_max; i++) {
1601 if (!fp)
1602 break;
1603 fprintf(fp, "thread = %x, type = %x\n",
1604 th_state[i].thread, th_state[i].type);
1605 }
1606 do_this_once = 0;
1607 }
1608 return;
1609
1610 }
1611 ti = &th_state[cur_max++];
1612
1613 ti->thread = thread;
1614 ti->child_thread = 0;
1615 }
1616 if (type != BSC_exit)
1617 ti->type = type;
1618 else
1619 ti->type = -1;
1620 ti->stime = timestamp;
1621 ti->pathptr = (long *)NULL;
1622
1623 #if 0
1624 if (print_info && fp)
1625 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, ti->type, ti->thread);
1626 #endif
1627 }
1628
1629
1630 void
1631 exit_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1632 {
1633 struct th_info *ti;
1634 int cpunum;
1635 char *p;
1636
1637 cpunum = CPU_NUMBER(kd->timestamp);
1638
1639 ti = find_thread(thread, type, type);
1640 #if 0
1641 if (print_info && fp)
1642 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, type, thread);
1643 #endif
1644 if (print_info && fp) {
1645 if (ti)
1646 fprintf(fp, "%9.1f %8.1f(%.1f) \t", timestamp - bias, delta, timestamp - ti->stime);
1647 else
1648 fprintf(fp, "%9.1f %8.1f() \t", timestamp - bias, delta);
1649
1650 if ((p = find_code(type))) {
1651 if (type == INTERRUPT) {
1652 fprintf(fp, "INTERRUPT %-8x %d %s\n", thread, cpunum, command);
1653 } else if (type == MACH_vmfault && kd->arg2 <= DBG_CACHE_HIT_FAULT) {
1654 fprintf(fp, "%-28.28s %-8.8s %-8x %-8x %d %s\n",
1655 p, fault_name[kd->arg2], kd->arg1,
1656 thread, cpunum, command);
1657 } else {
1658 fprintf(fp, "%-28.28s %-8x %-8x %-8x %d %s\n",
1659 p, kd->arg1, kd->arg2,
1660 thread, cpunum, command);
1661 }
1662 } else {
1663 fprintf(fp, "%-8x %-8x %-8x %-8x %d %s\n",
1664 type, kd->arg1, kd->arg2,
1665 thread, cpunum, command);
1666 }
1667 }
1668 if (ti == (struct th_info *)0) {
1669 if ((ti = find_thread(thread, -1, -1)) == (struct th_info *)0) {
1670 if (cur_max >= MAX_THREADS)
1671 return;
1672 ti = &th_state[cur_max++];
1673
1674 ti->thread = thread;
1675 ti->child_thread = 0;
1676 ti->pathptr = (long *)NULL;
1677 }
1678 }
1679 ti->type = -1;
1680 }
1681
1682 void
1683 print_entry(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias)
1684 {
1685 char *p;
1686 int cpunum;
1687
1688 if (!fp)
1689 return;
1690
1691 cpunum = CPU_NUMBER(kd->timestamp);
1692 #if 0
1693 fprintf(fp, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max, type, thread, cpunum);
1694 #endif
1695 if ((p = find_code(type))) {
1696 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1697 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1698 thread, cpunum, command);
1699 } else {
1700 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1701 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1702 thread, cpunum, command);
1703 }
1704 }
1705
1706 int
1707 check_for_thread_update(int thread, int type, kd_buf *kd)
1708 {
1709 struct th_info *ti;
1710 void create_map_entry();
1711
1712 switch (type) {
1713
1714 case TRACE_DATA_NEWTHREAD:
1715 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1716 if (cur_max >= MAX_THREADS)
1717 return (1);
1718 ti = &th_state[cur_max++];
1719
1720 ti->thread = thread;
1721 ti->type = -1;
1722 ti->pathptr = (long *)NULL;
1723 }
1724 ti->child_thread = kd->arg1;
1725 return (1);
1726
1727 case TRACE_STRING_NEWTHREAD:
1728 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0)
1729 return (1);
1730 if (ti->child_thread == 0)
1731 return (1);
1732 create_map_entry(ti->child_thread, (char *)&kd->arg1);
1733
1734 ti->child_thread = 0;
1735 return (1);
1736
1737 case TRACE_STRING_EXEC:
1738 create_map_entry(thread, (char *)&kd->arg1);
1739 return (1);
1740
1741 }
1742 return (0);
1743 }
1744
1745
1746 kd_buf *log_decrementer(kd_buf *kd_beg, kd_buf *kd_end, kd_buf *end_of_sample, double i_latency)
1747 {
1748 kd_buf *kd, *kd_start, *kd_stop;
1749 int kd_count; /* Limit the boundary of kd_start */
1750 double timestamp = 0.0;
1751 double last_timestamp = 0.0;
1752 double delta = 0.0;
1753 double start_bias = 0.0;
1754 int thread, cpunum;
1755 int debugid, type, clen;
1756 int len;
1757 uint64_t now;
1758 struct th_info *ti;
1759 long *sargptr;
1760 char *p;
1761 char command[32];
1762 char command1[32];
1763 char sched_info[64];
1764 char buf1[128];
1765 char buf2[128];
1766 kd_threadmap *map;
1767 kd_threadmap *find_thread_map();
1768
1769 sprintf(buf1, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time)[0]), i_latency);
1770 clen = strlen(buf1);
1771 memset(buf2, '-', clen);
1772 buf2[clen] = 0;
1773 fprintf(log_fp, "\n\n%s\n", buf2);
1774 fprintf(log_fp, "%s\n\n", buf1);
1775
1776 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1777
1778 thread = kd_beg->arg5;
1779 cpunum = CPU_NUMBER(kd_end->timestamp);
1780
1781 for (kd_count = 0, kd_start = kd_beg - 1; (kd_start >= (kd_buf *)my_buffer); kd_start--, kd_count++) {
1782 if (kd_count == MAX_LOG_COUNT)
1783 break;
1784
1785 if (CPU_NUMBER(kd_start->timestamp) != cpunum)
1786 continue;
1787
1788 if ((kd_start->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1789 break;
1790
1791 if (kd_start->arg5 != thread)
1792 break;
1793 }
1794
1795 if (kd_start < (kd_buf *)my_buffer)
1796 kd_start = (kd_buf *)my_buffer;
1797
1798 thread = kd_end->arg5;
1799
1800 for (kd_stop = kd_end + 1; kd_stop < end_of_sample; kd_stop++) {
1801
1802 if ((kd_stop->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1803 break;
1804
1805 if (CPU_NUMBER(kd_stop->timestamp) != cpunum)
1806 continue;
1807
1808 if (kd_stop->arg5 != thread)
1809 break;
1810 }
1811
1812 if (kd_stop >= end_of_sample)
1813 kd_stop = end_of_sample - 1;
1814
1815 now = kd_start->timestamp & KDBG_TIMESTAMP_MASK;
1816 timestamp = ((double)now) / divisor;
1817
1818 for (kd = kd_start; kd <= kd_stop; kd++) {
1819 type = kd->debugid & DBG_FUNC_MASK;
1820
1821 if ((ti = find_thread(kd->arg5, type, type))) {
1822 if (ti->stime >= timestamp)
1823 ti->type = -1;
1824 }
1825 }
1826 for (kd = kd_start; kd <= kd_stop; kd++) {
1827 int mode;
1828
1829 thread = kd->arg5;
1830 cpunum = CPU_NUMBER(kd->timestamp);
1831 debugid = kd->debugid;
1832 type = kd->debugid & DBG_FUNC_MASK;
1833
1834 now = kd->timestamp & KDBG_TIMESTAMP_MASK;
1835
1836 timestamp = ((double)now) / divisor;
1837
1838 if (kd == kd_start) {
1839 start_bias = timestamp;
1840 last_timestamp = timestamp;
1841 }
1842 delta = timestamp - last_timestamp;
1843
1844 if ((map = find_thread_map(thread)))
1845 strcpy(command, map->command);
1846 else
1847 command[0] = 0;
1848
1849
1850 switch (type) {
1851
1852 case CQ_action:
1853 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1854 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1855
1856 last_timestamp = timestamp;
1857 break;
1858
1859 case DECR_TRAP:
1860 if ((int)(kd->arg1) >= 0)
1861 i_latency = 0;
1862 else
1863 i_latency = (((double)(-1 - kd->arg1)) / divisor);
1864
1865 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1866 p = "*";
1867 else
1868 p = " ";
1869
1870 mode = 1;
1871
1872 if ((ti = find_thread(kd->arg5, 0, 0))) {
1873 if (ti->type == -1 && strcmp(command, "kernel_task"))
1874 mode = 0;
1875 }
1876 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1877 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1878
1879 last_timestamp = timestamp;
1880 break;
1881
1882 case DECR_SET:
1883 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1884 timestamp - start_bias, delta, (double)kd->arg1/divisor,
1885 "DECR_SET", thread, cpunum, command);
1886
1887 last_timestamp = timestamp;
1888 break;
1889
1890 case MACH_sched:
1891 case MACH_stkhandoff:
1892 if ((map = find_thread_map(kd->arg2)))
1893 strcpy(command1, map->command);
1894 else
1895 sprintf(command1, "%-8x", kd->arg2);
1896
1897 if ((ti = find_thread(kd->arg2, 0, 0))) {
1898 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1899 p = "U";
1900 else
1901 p = "K";
1902 } else
1903 p = "*";
1904 memset(sched_info, ' ', sizeof(sched_info));
1905
1906 sprintf(sched_info, "%14.14s", command);
1907 clen = strlen(sched_info);
1908 sched_info[clen] = ' ';
1909
1910 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1911 clen = strlen(sched_info);
1912 sched_info[clen] = ' ';
1913
1914 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1915
1916 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1917 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1918
1919 last_timestamp = timestamp;
1920 break;
1921
1922 case VFS_LOOKUP:
1923 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1924 if (cur_max >= MAX_THREADS)
1925 continue;
1926 ti = &th_state[cur_max++];
1927
1928 ti->thread = thread;
1929 ti->type = -1;
1930 ti->pathptr = (long *)NULL;
1931 ti->child_thread = 0;
1932 }
1933
1934 while ( (kd <= kd_stop) && (kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP)
1935 {
1936 if (ti->pathptr == NULL) {
1937 ti->arg1 = kd->arg1;
1938 sargptr = ti->pathname;
1939
1940 *sargptr++ = kd->arg2;
1941 *sargptr++ = kd->arg3;
1942 *sargptr++ = kd->arg4;
1943 /*
1944 * NULL terminate the 'string'
1945 */
1946 *sargptr = 0;
1947
1948 ti->pathptr = sargptr;
1949
1950 } else {
1951 sargptr = ti->pathptr;
1952
1953 /*
1954 We don't want to overrun our pathname buffer if the
1955 kernel sends us more VFS_LOOKUP entries than we can
1956 handle.
1957 */
1958
1959 if (sargptr >= &ti->pathname[NUMPARMS])
1960 {
1961 kd++;
1962 continue;
1963 }
1964
1965 /*
1966 We need to detect consecutive vfslookup entries.
1967 So, if we get here and find a START entry,
1968 fake the pathptr so we can bypass all further
1969 vfslookup entries.
1970 */
1971
1972 if (kd->debugid & DBG_FUNC_START)
1973 {
1974 ti->pathptr = &ti->pathname[NUMPARMS];
1975 }
1976 else
1977 {
1978 *sargptr++ = kd->arg1;
1979 *sargptr++ = kd->arg2;
1980 *sargptr++ = kd->arg3;
1981 *sargptr++ = kd->arg4;
1982 /*
1983 * NULL terminate the 'string'
1984 */
1985 *sargptr = 0;
1986
1987 ti->pathptr = sargptr;
1988 }
1989 }
1990 kd++;
1991 }
1992 p = (char *)ti->pathname;
1993
1994 kd--;
1995 /* print the tail end of the pathname */
1996 len = strlen(p);
1997 if (len > 42)
1998 len -= 42;
1999 else
2000 len = 0;
2001
2002 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
2003 timestamp - start_bias, delta, "VFS_LOOKUP",
2004 &p[len], ti->arg1, thread, cpunum, command);
2005
2006 last_timestamp = timestamp;
2007 break;
2008
2009 default:
2010 if (debugid & DBG_FUNC_START)
2011 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
2012 else if (debugid & DBG_FUNC_END)
2013 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
2014 else
2015 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
2016
2017 last_timestamp = timestamp;
2018 break;
2019 }
2020 }
2021 return(kd_stop);
2022 }
2023
2024
2025 double handle_decrementer(kd_buf *kd)
2026 {
2027 double latency;
2028 int elapsed_usecs;
2029
2030 if ((int)(kd->arg1) >= 0)
2031 latency = 1;
2032 else
2033 latency = (((double)(-1 - kd->arg1)) / divisor);
2034 elapsed_usecs = (int)latency;
2035
2036 if (elapsed_usecs < 100)
2037 i_usec_10_bins[elapsed_usecs/10]++;
2038 if (elapsed_usecs < 1000)
2039 i_usec_100_bins[elapsed_usecs/100]++;
2040 else if (elapsed_usecs < 10000)
2041 i_msec_1_bins[elapsed_usecs/1000]++;
2042 else if (elapsed_usecs < 50000)
2043 i_msec_10_bins[elapsed_usecs/10000]++;
2044 else
2045 i_too_slow++;
2046
2047 if (i_thresh_hold && elapsed_usecs > i_thresh_hold)
2048 i_exceeded_threshold++;
2049 if (elapsed_usecs > i_max_latency)
2050 i_max_latency = elapsed_usecs;
2051 if (elapsed_usecs < i_min_latency || i_total_samples == 0)
2052 i_min_latency = elapsed_usecs;
2053 i_total_latency += elapsed_usecs;
2054 i_total_samples++;
2055
2056 return (latency);
2057 }
2058
2059
2060 void init_code_file()
2061 {
2062 FILE *fp;
2063 int i, n, cnt, code;
2064 char name[128];
2065
2066 if ((fp = fopen(code_file, "r")) == (FILE *)0) {
2067 if (log_fp)
2068 fprintf(log_fp, "open of %s failed\n", code_file);
2069 return;
2070 }
2071 n = fscanf(fp, "%d\n", &cnt);
2072
2073 if (n != 1) {
2074 if (log_fp)
2075 fprintf(log_fp, "bad format found in %s\n", code_file);
2076 return;
2077 }
2078 for (i = 0; i < MAX_ENTRIES; i++) {
2079 n = fscanf(fp, "%x%s\n", &code, name);
2080
2081 if (n != 2)
2082 break;
2083
2084 strncpy(codes_tab[i].name, name, 32);
2085 codes_tab[i].type = code;
2086 }
2087 num_of_codes = i;
2088
2089 fclose(fp);
2090 }
2091
2092
2093 void
2094 do_kernel_nm()
2095 {
2096 int i, len;
2097 FILE *fp = (FILE *)0;
2098 char tmp_nm_file[128];
2099 char tmpstr[1024];
2100 char inchr;
2101
2102 bzero(tmp_nm_file, 128);
2103 bzero(tmpstr, 1024);
2104
2105 /* Build the temporary nm file path */
2106 sprintf(tmp_nm_file, "/tmp/knm.out.%d", getpid());
2107
2108 /* Build the nm command and create a tmp file with the output*/
2109 sprintf (tmpstr, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2110 kernelpath, tmp_nm_file);
2111 system(tmpstr);
2112
2113 /* Parse the output from the nm command */
2114 if ((fp=fopen(tmp_nm_file, "r")) == (FILE *)0)
2115 {
2116 /* Hmmm, let's not treat this as fatal */
2117 fprintf(stderr, "Failed to open nm symbol file [%s]\n", tmp_nm_file);
2118 return;
2119 }
2120
2121 /* Count the number of symbols in the nm symbol table */
2122 kern_sym_count=0;
2123 while ( (inchr = getc(fp)) != -1)
2124 {
2125 if (inchr == '\n')
2126 kern_sym_count++;
2127 }
2128
2129 rewind(fp);
2130
2131 /* Malloc the space for symbol table */
2132 if (kern_sym_count > 0)
2133 {
2134 kern_sym_tbl = (kern_sym_t *)malloc(kern_sym_count * sizeof (kern_sym_t));
2135 if (!kern_sym_tbl)
2136 {
2137 /* Hmmm, lets not treat this as fatal */
2138 fprintf(stderr, "Can't allocate memory for kernel symbol table\n");
2139 }
2140 else
2141 bzero(kern_sym_tbl, (kern_sym_count * sizeof(kern_sym_t)));
2142 }
2143 else
2144 {
2145 /* Hmmm, lets not treat this as fatal */
2146 fprintf(stderr, "No kernel symbol table \n");
2147 }
2148
2149 for (i=0; i<kern_sym_count; i++)
2150 {
2151 bzero(tmpstr, 1024);
2152 if (fscanf(fp, "%lx %c %s", &kern_sym_tbl[i].k_sym_addr, &inchr, tmpstr) != 3)
2153 break;
2154 else
2155 {
2156 len = strlen(tmpstr);
2157 kern_sym_tbl[i].k_sym_name = malloc(len + 1);
2158
2159 if (kern_sym_tbl[i].k_sym_name == (char *)0)
2160 {
2161 fprintf(stderr, "Can't allocate memory for symbol name [%s]\n", tmpstr);
2162 kern_sym_tbl[i].k_sym_name = (char *)0;
2163 len = 0;
2164 }
2165 else
2166 strcpy(kern_sym_tbl[i].k_sym_name, tmpstr);
2167
2168 kern_sym_tbl[i].k_sym_len = len;
2169 }
2170 } /* end for */
2171
2172 if (i != kern_sym_count)
2173 {
2174 /* Hmmm, didn't build up entire table from nm */
2175 /* scrap the entire thing */
2176 if (kern_sym_tbl)
2177 free (kern_sym_tbl);
2178 kern_sym_tbl = (kern_sym_t *)0;
2179 kern_sym_count = 0;
2180 }
2181
2182 fclose(fp);
2183
2184 /* Remove the temporary nm file */
2185 unlink(tmp_nm_file);
2186
2187 #if 0
2188 /* Dump the kernel symbol table */
2189 for (i=0; i < kern_sym_count; i++)
2190 {
2191 if (kern_sym_tbl[i].k_sym_name)
2192 printf ("[%d] 0x%x %s\n", i,
2193 kern_sym_tbl[i].k_sym_addr, kern_sym_tbl[i].k_sym_name);
2194 else
2195 printf ("[%d] 0x%x %s\n", i,
2196 kern_sym_tbl[i].k_sym_addr, "No symbol name");
2197 }
2198 #endif
2199 }
2200
2201 char *
2202 pc_to_string(unsigned int pc, int max_len, int mode)
2203 {
2204 int ret;
2205 int len;
2206
2207 int binary_search();
2208
2209 if (mode == 0)
2210 {
2211 sprintf(pcstring, "0x%-8x [usermode addr]", pc);
2212 return(pcstring);
2213 }
2214
2215 ret=0;
2216 ret = binary_search(kern_sym_tbl, 0, kern_sym_count-1, pc);
2217
2218 if (ret == -1)
2219 {
2220 sprintf(pcstring, "0x%x", pc);
2221 return(pcstring);
2222 }
2223 else if (kern_sym_tbl[ret].k_sym_name == (char *)0)
2224 {
2225 sprintf(pcstring, "0x%x", pc);
2226 return(pcstring);
2227 }
2228 else
2229 {
2230 if ((len = kern_sym_tbl[ret].k_sym_len) > (max_len - 8))
2231 len = max_len - 8;
2232
2233 memcpy(pcstring, kern_sym_tbl[ret].k_sym_name, len);
2234 sprintf(&pcstring[len], "+0x%-5lx", pc - kern_sym_tbl[ret].k_sym_addr);
2235
2236 return (pcstring);
2237 }
2238 }
2239
2240
2241 /* Return -1 if not found, else return index */
2242 int binary_search(list, low, high, addr)
2243 kern_sym_t *list;
2244 int low, high;
2245 unsigned int addr;
2246 {
2247 int mid;
2248
2249 mid = (low + high) / 2;
2250
2251 if (low > high)
2252 return (-1); /* failed */
2253 else if (low + 1 == high)
2254 {
2255 if (list[low].k_sym_addr <= addr &&
2256 addr < list[high].k_sym_addr)
2257 {
2258 /* We have a range match */
2259 return(low);
2260 }
2261 else if (list[high].k_sym_addr <= addr)
2262 {
2263 return(high);
2264 }
2265 else
2266 return(-1); /* Failed */
2267 }
2268 else if (addr < list[mid].k_sym_addr)
2269 {
2270 return(binary_search (list, low, mid, addr));
2271 }
2272 else
2273 {
2274 return(binary_search (list, mid, high, addr));
2275 }
2276 }
2277
2278 void
2279 open_logfile(char *path)
2280 {
2281 log_fp = fopen(path, "a");
2282
2283 if (!log_fp)
2284 {
2285 /* failed to open path */
2286 fprintf(stderr, "latency: failed to open logfile [%s]\n", path);
2287 exit_usage();
2288 }
2289 }