]> git.saurik.com Git - apple/system_cmds.git/blob - latency.tproj/latency.c
system_cmds-336.tar.gz
[apple/system_cmds.git] / latency.tproj / latency.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
7 * Reserved. This file contains Original Code and/or Modifications of
8 * Original Code as defined in and that are subject to the Apple Public
9 * Source License Version 1.0 (the 'License'). You may not use this file
10 * except in compliance with the License. Please obtain a copy of the
11 * License at http://www.apple.com/publicsource and read it before using
12 * this file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
19 * License for the specific language governing rights and limitations
20 * under the License."
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 /*
27 cc -I. -DKERNEL_PRIVATE -O -o latency latency.c -lncurses
28 */
29
30 #include <mach/mach.h>
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <signal.h>
34 #include <strings.h>
35 #include <nlist.h>
36 #include <fcntl.h>
37 #include <string.h>
38
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/time.h>
42
43 #include <libc.h>
44 #include <termios.h>
45 #include <curses.h>
46 #include <sys/ioctl.h>
47
48 #ifndef KERNEL_PRIVATE
49 #define KERNEL_PRIVATE
50 #include <sys/kdebug.h>
51 #undef KERNEL_PRIVATE
52 #else
53 #include <sys/kdebug.h>
54 #endif /*KERNEL_PRIVATE*/
55
56 #include <sys/sysctl.h>
57 #include <errno.h>
58 #include <err.h>
59
60 #include <mach/host_info.h>
61 #include <mach/mach_error.h>
62 #include <mach/mach_types.h>
63 #include <mach/message.h>
64 #include <mach/mach_syscalls.h>
65 #include <mach/clock_types.h>
66 #include <mach/mach_time.h>
67
68 #include <libkern/OSTypes.h>
69
70 extern mach_port_t clock_port;
71
72 #define KERN_KDPIDEX 14
73
74
75 int s_usec_10_bins[10];
76 int s_usec_100_bins[10];
77 int s_msec_1_bins[10];
78 int s_msec_10_bins[5];
79 int s_too_slow;
80 int s_max_latency;
81 int s_min_latency = 0;
82 long long s_total_latency = 0;
83 int s_total_samples;
84 long s_thresh_hold;
85 int s_exceeded_threshold = 0;
86
87 int i_usec_10_bins[10];
88 int i_usec_100_bins[10];
89 int i_msec_1_bins[10];
90 int i_msec_10_bins[5];
91 int i_too_slow;
92 int i_max_latency;
93 int i_min_latency = 0;
94 long long i_total_latency = 0;
95 int i_total_samples;
96 long i_thresh_hold;
97 int i_exceeded_threshold = 0;
98
99 long start_time;
100 long curr_time;
101 long refresh_time;
102
103 char *policy_name;
104 int my_policy;
105 int my_pri = -1;
106 int num_of_usecs_to_sleep = 1000;
107
108 char *kernelpath = (char *)0;
109 char *code_file = (char *)0;
110
111 typedef struct {
112 u_long k_sym_addr; /* kernel symbol address from nm */
113 u_int k_sym_len; /* length of kernel symbol string */
114 char *k_sym_name; /* kernel symbol string from nm */
115 } kern_sym_t;
116
117 kern_sym_t *kern_sym_tbl; /* pointer to the nm table */
118 int kern_sym_count; /* number of entries in nm table */
119 char pcstring[128];
120
121 #define UNKNOWN "Can't find symbol name"
122
123
124 double divisor;
125 int gotSIGWINCH = 0;
126 int trace_enabled = 0;
127 struct host_basic_info hi;
128
129
130 #define SAMPLE_SIZE 300000
131
132 int mib[6];
133 size_t needed;
134 char *my_buffer;
135
136 kbufinfo_t bufinfo = {0, 0, 0};
137
138 FILE *log_fp = (FILE *)0;
139 int num_of_codes = 0;
140 int need_new_map = 0;
141 int total_threads = 0;
142 kd_threadmap *mapptr = 0;
143
144 #define MAX_ENTRIES 1024
145 struct ct {
146 int type;
147 char name[32];
148 } codes_tab[MAX_ENTRIES];
149
150 /* If NUMPARMS changes from the kernel, then PATHLENGTH will also reflect the change */
151 #define NUMPARMS 23
152 #define PATHLENGTH (NUMPARMS*sizeof(long))
153
154 struct th_info {
155 int thread;
156 int type;
157 int child_thread;
158 int arg1;
159 double stime;
160 long *pathptr;
161 char pathname[PATHLENGTH + 1];
162 };
163
164 #define MAX_THREADS 512
165 struct th_info th_state[MAX_THREADS];
166
167 int cur_max = 0;
168
169 #define TRACE_DATA_NEWTHREAD 0x07000004
170 #define TRACE_STRING_NEWTHREAD 0x07010004
171 #define TRACE_STRING_EXEC 0x07010008
172
173 #define INTERRUPT 0x01050000
174 #define DECR_TRAP 0x01090000
175 #define DECR_SET 0x01090004
176 #define MACH_vmfault 0x01300000
177 #define MACH_sched 0x01400000
178 #define MACH_stkhandoff 0x01400008
179 #define VFS_LOOKUP 0x03010090
180 #define BSC_exit 0x040C0004
181 #define IES_action 0x050b0018
182 #define IES_filter 0x050b001c
183 #define TES_action 0x050c0010
184 #define CQ_action 0x050d0018
185
186
187 #define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
188 #define DBG_FUNC_MASK 0xfffffffc
189
190 #define CPU_NUMBER(ts) ((ts & KDBG_CPU_MASK) >> KDBG_CPU_SHIFT)
191
192 #define DBG_ZERO_FILL_FAULT 1
193 #define DBG_PAGEIN_FAULT 2
194 #define DBG_COW_FAULT 3
195 #define DBG_CACHE_HIT_FAULT 4
196
197 char *fault_name[5] = {
198 "",
199 "ZeroFill",
200 "PageIn",
201 "COW",
202 "CacheHit",
203 };
204
205 char *pc_to_string();
206 static kern_return_t set_time_constraint_policy(void);
207 static kern_return_t set_standard_policy(void);
208
209 int decrementer_val = 0; /* Value used to reset decrementer */
210 int set_remove_flag = 1; /* By default, remove trace buffer */
211
212 kd_buf **last_decrementer_kd; /* last DECR_TRAP per cpu */
213 #define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
214
215 int
216 quit(s)
217 char *s;
218 {
219 void set_enable();
220 void set_rtcdec();
221 void set_remove();
222
223 if (trace_enabled)
224 set_enable(0);
225
226 /*
227 This flag is turned off when calling
228 quit() due to a set_remove() failure.
229 */
230 if (set_remove_flag)
231 set_remove();
232
233 if (decrementer_val)
234 set_rtcdec(0);
235
236 printf("latency: ");
237 if (s)
238 printf("%s", s);
239
240 exit(1);
241 }
242
243 void
244 set_enable(int val)
245 {
246 mib[0] = CTL_KERN;
247 mib[1] = KERN_KDEBUG;
248 mib[2] = KERN_KDENABLE; /* protocol */
249 mib[3] = val;
250 mib[4] = 0;
251 mib[5] = 0; /* no flags */
252
253 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
254 quit("trace facility failure, KERN_KDENABLE\n");
255 }
256
257 void
258 set_numbufs(int nbufs)
259 {
260 mib[0] = CTL_KERN;
261 mib[1] = KERN_KDEBUG;
262 mib[2] = KERN_KDSETBUF;
263 mib[3] = nbufs;
264 mib[4] = 0;
265 mib[5] = 0; /* no flags */
266 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
267 quit("trace facility failure, KERN_KDSETBUF\n");
268
269 mib[0] = CTL_KERN;
270 mib[1] = KERN_KDEBUG;
271 mib[2] = KERN_KDSETUP;
272 mib[3] = 0;
273 mib[4] = 0;
274 mib[5] = 0; /* no flags */
275 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
276 quit("trace facility failure, KERN_KDSETUP\n");
277
278 }
279
280 void
281 set_pidexclude(int pid, int on_off)
282 {
283 kd_regtype kr;
284
285 kr.type = KDBG_TYPENONE;
286 kr.value1 = pid;
287 kr.value2 = on_off;
288 needed = sizeof(kd_regtype);
289 mib[0] = CTL_KERN;
290 mib[1] = KERN_KDEBUG;
291 mib[2] = KERN_KDPIDEX;
292 mib[3] = 0;
293 mib[4] = 0;
294 mib[5] = 0;
295
296 sysctl(mib, 3, &kr, &needed, NULL, 0);
297 }
298
299 void set_rtcdec(decval)
300 int decval;
301 {kd_regtype kr;
302 int ret;
303 extern int errno;
304
305 kr.type = KDBG_TYPENONE;
306 kr.value1 = decval;
307 needed = sizeof(kd_regtype);
308 mib[0] = CTL_KERN;
309 mib[1] = KERN_KDEBUG;
310 mib[2] = KERN_KDSETRTCDEC; /* protocol */
311 mib[3] = 0; /* wildcard address family */
312 mib[4] = 0;
313 mib[5] = 0; /* no flags */
314
315 errno = 0;
316 if ((ret=sysctl(mib, 3, &kr, &needed, NULL, 0)) < 0)
317 {
318 decrementer_val = 0;
319 /* ignore this sysctl error if it's not supported */
320 if (errno == ENOENT)
321 return;
322 else
323 quit("trace facility failure, KERN_KDSETRTCDEC\n");
324 }
325 }
326
327
328 void
329 get_bufinfo(kbufinfo_t *val)
330 {
331 needed = sizeof (*val);
332 mib[0] = CTL_KERN;
333 mib[1] = KERN_KDEBUG;
334 mib[2] = KERN_KDGETBUF;
335 mib[3] = 0;
336 mib[4] = 0;
337 mib[5] = 0; /* no flags */
338
339 if (sysctl(mib, 3, val, &needed, 0, 0) < 0)
340 quit("trace facility failure, KERN_KDGETBUF\n");
341
342 }
343
344 void
345 set_remove()
346 {
347 extern int errno;
348
349 mib[0] = CTL_KERN;
350 mib[1] = KERN_KDEBUG;
351 mib[2] = KERN_KDREMOVE; /* protocol */
352 mib[3] = 0;
353 mib[4] = 0;
354 mib[5] = 0; /* no flags */
355
356 errno = 0;
357
358 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
359 {
360 set_remove_flag = 0;
361 if(errno == EBUSY)
362 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
363 else
364 quit("trace facility failure, KERN_KDREMOVE\n");
365 }
366 }
367
368 void
369 set_init_nologging()
370 {
371 /* When we aren't logging, only collect the DECR_TRAP trace points */
372 kd_regtype kr;
373 kr.type = KDBG_VALCHECK;
374 kr.value1 = DECR_TRAP;
375 kr.value2 = 0;
376 kr.value3 = 0;
377 kr.value4 = 0;
378 needed = sizeof(kd_regtype);
379 mib[0] = CTL_KERN;
380 mib[1] = KERN_KDEBUG;
381 mib[2] = KERN_KDSETREG;
382 mib[3] = 0;
383 mib[4] = 0;
384 mib[5] = 0; /* no flags */
385 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
386 quit("trace facility failure, KERN_KDSETREG\n");
387
388 mib[0] = CTL_KERN;
389 mib[1] = KERN_KDEBUG;
390 mib[2] = KERN_KDSETUP;
391 mib[3] = 0;
392 mib[4] = 0;
393 mib[5] = 0; /* no flags */
394
395 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
396 quit("trace facility failure, KERN_KDSETUP\n");
397 }
398
399 void
400 set_init_logging()
401 { kd_regtype kr;
402
403 kr.type = KDBG_RANGETYPE;
404 kr.value1 = 0;
405 kr.value2 = -1;
406 needed = sizeof(kd_regtype);
407 mib[0] = CTL_KERN;
408 mib[1] = KERN_KDEBUG;
409 mib[2] = KERN_KDSETREG;
410 mib[3] = 0;
411 mib[4] = 0;
412 mib[5] = 0; /* no flags */
413
414 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
415 quit("trace facility failure, KERN_KDSETREG\n");
416
417 mib[0] = CTL_KERN;
418 mib[1] = KERN_KDEBUG;
419 mib[2] = KERN_KDSETUP;
420 mib[3] = 0;
421 mib[4] = 0;
422 mib[5] = 0; /* no flags */
423
424 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
425 quit("trace facility failure, KERN_KDSETUP\n");
426 }
427
428
429 void sigwinch()
430 {
431 gotSIGWINCH = 1;
432 }
433
434 void sigintr()
435 {
436 void screen_update();
437
438 set_enable(0);
439 set_pidexclude(getpid(), 0);
440 screen_update(log_fp);
441 endwin();
442 set_rtcdec(0);
443 set_remove();
444
445 exit(1);
446 }
447
448 void leave() /* exit under normal conditions -- signal handler */
449 {
450 set_enable(0);
451 set_pidexclude(getpid(), 0);
452 endwin();
453 set_rtcdec(0);
454 set_remove();
455
456 exit(1);
457 }
458
459 void
460 screen_update(FILE *fp)
461 {
462 int i;
463 int itotal, stotal;
464 int elapsed_secs;
465 int elapsed_mins;
466 int elapsed_hours;
467 unsigned int average_s_latency;
468 unsigned int average_i_latency;
469 char tbuf[256];
470
471 if (fp == (FILE *)0) {
472 erase();
473 move(0, 0);
474 } else
475 fprintf(fp,"\n\n===================================================================================================\n");
476 /*
477 * Display the current time.
478 * "ctime" always returns a string that looks like this:
479 *
480 * Sun Sep 16 01:03:52 1973
481 * 012345678901234567890123
482 * 1 2
483 *
484 * We want indices 11 thru 18 (length 8).
485 */
486 elapsed_secs = curr_time - start_time;
487 elapsed_hours = elapsed_secs / 3600;
488 elapsed_secs -= elapsed_hours * 3600;
489 elapsed_mins = elapsed_secs / 60;
490 elapsed_secs -= elapsed_mins * 60;
491
492 sprintf(tbuf, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time)[0]),
493 (long)elapsed_hours, (long)elapsed_mins, (long)elapsed_secs);
494 if (fp)
495 fprintf(fp, "%s", tbuf);
496 else
497 printw(tbuf);
498
499
500
501 sprintf(tbuf, " SCHEDULER INTERRUPTS\n");
502
503 if (fp)
504 fprintf(fp, "%s", tbuf);
505 else
506 printw(tbuf);
507
508 sprintf(tbuf, "---------------------------------------------\n");
509
510 if (fp)
511 fprintf(fp, "%s", tbuf);
512 else
513 printw(tbuf);
514
515
516
517 sprintf(tbuf, "total_samples %10d %10d\n\n", s_total_samples, i_total_samples);
518
519 if (fp)
520 fprintf(fp, "%s", tbuf);
521 else
522 printw(tbuf);
523
524
525
526 for (itotal = 0, stotal = 0, i = 0; i < 10; i++) {
527 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 10, s_usec_10_bins[i], i_usec_10_bins[i]);
528
529 if (fp)
530 fprintf(fp, "%s", tbuf);
531 else
532 printw(tbuf);
533
534 stotal += s_usec_10_bins[i];
535 itotal += i_usec_10_bins[i];
536 }
537 sprintf(tbuf, "total < 100 usecs %10d %10d\n\n", stotal, itotal);
538
539 if (fp)
540 fprintf(fp, "%s", tbuf);
541 else
542 printw(tbuf);
543
544
545
546 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
547 if (i < 9)
548 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 100, s_usec_100_bins[i], i_usec_100_bins[i]);
549 else
550 sprintf(tbuf, "delays < 1 msec %10d %10d\n", s_usec_100_bins[i], i_usec_100_bins[i]);
551
552 if (fp)
553 fprintf(fp, "%s", tbuf);
554 else
555 printw(tbuf);
556
557 stotal += s_usec_100_bins[i];
558 itotal += i_usec_100_bins[i];
559 }
560 sprintf(tbuf, "total < 1 msec %10d %10d\n\n", stotal, itotal);
561
562 if (fp)
563 fprintf(fp, "%s", tbuf);
564 else
565 printw(tbuf);
566
567
568
569 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
570 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1), s_msec_1_bins[i], i_msec_1_bins[i]);
571
572 if (fp)
573 fprintf(fp, "%s", tbuf);
574 else
575 printw(tbuf);
576
577 stotal += s_msec_1_bins[i];
578 itotal += i_msec_1_bins[i];
579 }
580 sprintf(tbuf, "total < 10 msecs %10d %10d\n\n", stotal, itotal);
581
582 if (fp)
583 fprintf(fp, "%s", tbuf);
584 else
585 printw(tbuf);
586
587
588
589
590 for (itotal = 0, stotal = 0, i = 1; i < 5; i++) {
591 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1)*10, s_msec_10_bins[i], i_msec_10_bins[i]);
592
593 if (fp)
594 fprintf(fp, "%s", tbuf);
595 else
596 printw(tbuf);
597
598 stotal += s_msec_10_bins[i];
599 itotal += i_msec_10_bins[i];
600 }
601 sprintf(tbuf, "total < 50 msecs %10d %10d\n\n", stotal, itotal);
602
603 if (fp)
604 fprintf(fp, "%s", tbuf);
605 else
606 printw(tbuf);
607
608 sprintf(tbuf, "delays > 50 msecs %10d %10d\n", s_too_slow, i_too_slow);
609
610 if (fp)
611 fprintf(fp, "%s", tbuf);
612 else
613 printw(tbuf);
614
615
616
617 sprintf(tbuf, "\nminimum latency(usecs) %7d %7d\n", s_min_latency, i_min_latency);
618
619 if (fp)
620 fprintf(fp, "%s", tbuf);
621 else
622 printw(tbuf);
623
624 sprintf(tbuf, "maximum latency(usecs) %7d %7d\n", s_max_latency, i_max_latency);
625
626 if (fp)
627 fprintf(fp, "%s", tbuf);
628 else
629 printw(tbuf);
630
631 if (s_total_samples)
632 average_s_latency = (unsigned int)(s_total_latency/s_total_samples);
633 else
634 average_s_latency = 0;
635
636 if (i_total_samples)
637 average_i_latency = (unsigned int)(i_total_latency/i_total_samples);
638 else
639 average_i_latency = 0;
640
641 sprintf(tbuf, "average latency(usecs) %7d %7d\n", average_s_latency, average_i_latency);
642
643 if (fp)
644 fprintf(fp, "%s", tbuf);
645 else
646 printw(tbuf);
647
648 sprintf(tbuf, "exceeded threshold %7d %7d\n", s_exceeded_threshold, i_exceeded_threshold);
649
650 if (fp)
651 fprintf(fp, "%s", tbuf);
652 else
653 printw(tbuf);
654
655 if (fp == (FILE *)0)
656 refresh();
657 else
658 fflush(fp);
659 }
660
661 int
662 exit_usage()
663 {
664
665 fprintf(stderr, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
666
667 #if defined (__i386__)
668 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs] [-n kernel]\n\n");
669 #else
670 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs]\n");
671 fprintf(stderr, " [-d decrementer_in_usecs] [-n kernel]\n\n");
672 #endif
673
674
675 fprintf(stderr, " -rt Set realtime scheduling policy. Default is timeshare.\n");
676 fprintf(stderr, " -c specify name of codes file\n");
677 fprintf(stderr, " -l specify name of file to log trace entries to when threshold is exceeded\n");
678 fprintf(stderr, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
679 fprintf(stderr, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
680 fprintf(stderr, " -s set sleep time in microseconds\n");
681 #if !defined (__i386__)
682 fprintf(stderr, " -d set decrementer in microseconds.\n");
683 #endif
684 fprintf(stderr, " -n specify kernel, default is /mach_kernel\n");
685
686 fprintf(stderr, "\nlatency must be run as root\n\n");
687
688 exit(1);
689 }
690
691
692 int
693 main(argc, argv)
694 int argc;
695 char *argv[];
696 {
697 uint64_t start, stop;
698 uint64_t timestamp1;
699 uint64_t timestamp2;
700 uint64_t adeadline, adelay;
701 double fdelay;
702 int elapsed_usecs;
703 double nanosecs_to_sleep;
704 int loop_cnt, sample_sc_now;
705 int decrementer_usec = 0;
706 kern_return_t ret;
707 int size;
708 host_name_port_t host;
709 void getdivisor();
710 void sample_sc();
711 void init_code_file();
712 void do_kernel_nm();
713 void open_logfile();
714
715 my_policy = THREAD_STANDARD_POLICY;
716 policy_name = "TIMESHARE";
717
718 while (argc > 1) {
719 if (strcmp(argv[1], "-rt") == 0) {
720 my_policy = THREAD_TIME_CONSTRAINT_POLICY; /* the real time band */
721 policy_name = "REALTIME";
722
723 } else if (strcmp(argv[1], "-st") == 0) {
724 argc--;
725 argv++;
726
727 if (argc > 1)
728 s_thresh_hold = atoi(argv[1]);
729 else
730 exit_usage();
731
732 } else if (strcmp(argv[1], "-it") == 0) {
733 argc--;
734 argv++;
735
736 if (argc > 1)
737 i_thresh_hold = atoi(argv[1]);
738 else
739 exit_usage();
740 } else if (strcmp(argv[1], "-c") == 0) {
741 argc--;
742 argv++;
743
744 if (argc > 1)
745 code_file = argv[1];
746 else
747 exit_usage();
748 } else if (strcmp(argv[1], "-l") == 0) {
749 argc--;
750 argv++;
751
752 if (argc > 1)
753 open_logfile(argv[1]);
754 else
755 exit_usage();
756
757 } else if (strcmp(argv[1], "-s") == 0) {
758 argc--;
759 argv++;
760
761 if (argc > 1)
762 num_of_usecs_to_sleep = atoi(argv[1]);
763 else
764 exit_usage();
765 }
766 else if (strcmp(argv[1], "-d") == 0) {
767 argc--;
768 argv++;
769
770 if (argc > 1)
771 decrementer_usec = atoi(argv[1]);
772 else
773 exit_usage();
774 #if defined(__i386__)
775 /* ignore this option - setting the decrementer has no effect */
776 decrementer_usec = 0;
777 #endif
778 }
779 else if (strcmp(argv[1], "-n") == 0) {
780 argc--;
781 argv++;
782
783 if (argc > 1)
784 kernelpath = argv[1];
785 else
786 exit_usage();
787 } else
788 exit_usage();
789
790 argc--;
791 argv++;
792 }
793
794 if ( geteuid() != 0 ) {
795 printf("'latency' must be run as root...\n");
796 exit(1);
797 }
798
799 if (kernelpath == (char *) 0)
800 kernelpath = "/mach_kernel";
801
802 if (code_file == (char *) 0)
803 code_file = "/usr/share/misc/trace.codes";
804
805 do_kernel_nm();
806
807 sample_sc_now = 25000 / num_of_usecs_to_sleep;
808
809 getdivisor();
810 decrementer_val = decrementer_usec * divisor;
811
812 /* get the cpu countfor the DECR_TRAP array */
813 host = mach_host_self();
814 size = sizeof(hi)/sizeof(int);
815 ret = host_info(host, HOST_BASIC_INFO, (host_info_t)&hi, &size);
816 if (ret != KERN_SUCCESS) {
817 mach_error(argv[0], ret);
818 exit(EXIT_FAILURE);
819 }
820
821 if ((last_decrementer_kd = (kd_buf **)malloc(hi.avail_cpus * sizeof(kd_buf *))) == (kd_buf **)0)
822 quit("can't allocate memory for decrementer tracing info\n");
823
824 nanosecs_to_sleep = (double)(num_of_usecs_to_sleep * 1000);
825 fdelay = nanosecs_to_sleep * (divisor /1000);
826 adelay = (uint64_t)fdelay;
827
828 init_code_file();
829
830 /*
831 When the decrementer isn't set in the options,
832 decval will be zero and this call will reset
833 the system default ...
834 */
835 set_rtcdec(decrementer_val);
836
837 if (initscr() == (WINDOW *) 0)
838 {
839 printf("Unrecognized TERM type, try vt100\n");
840 exit(1);
841 }
842
843 clear();
844 refresh();
845 signal(SIGWINCH, sigwinch);
846 signal(SIGINT, sigintr);
847 signal(SIGQUIT, leave);
848 signal(SIGTERM, leave);
849 signal(SIGHUP, leave);
850
851
852 if ((my_buffer = malloc(SAMPLE_SIZE * sizeof(kd_buf))) == (char *)0)
853 quit("can't allocate memory for tracing info\n");
854 set_remove();
855 set_numbufs(SAMPLE_SIZE);
856 set_enable(0);
857 if(log_fp)
858 set_init_logging();
859 else
860 set_init_nologging();
861 set_pidexclude(getpid(), 1);
862 set_enable(1);
863 trace_enabled = 1;
864 need_new_map = 1;
865
866 loop_cnt = 0;
867 start_time = time((long *)0);
868 refresh_time = start_time;
869
870 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
871 {
872 /* the realtime band */
873 if(set_time_constraint_policy() != KERN_SUCCESS)
874 quit("Failed to set realtime policy.\n");
875 }
876
877 for (;;) {
878 curr_time = time((long *)0);
879
880 if (curr_time >= refresh_time) {
881 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
882 {
883 /* set standard timeshare policy during screen update */
884 if(set_standard_policy() != KERN_SUCCESS)
885 quit("Failed to set standard policy.\n");
886 }
887 screen_update((FILE *)0);
888 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
889 {
890 /* set back to realtime band */
891 if(set_time_constraint_policy() != KERN_SUCCESS)
892 quit("Failed to set time_constraint policy.\n");
893 }
894 refresh_time = curr_time + 1;
895 }
896
897 timestamp1 = mach_absolute_time();
898 adeadline = timestamp1 + adelay;
899 mach_wait_until(adeadline);
900 timestamp2 = mach_absolute_time();
901
902 start = timestamp1;
903
904 stop = timestamp2;
905
906 elapsed_usecs = (int)(((double)(stop - start)) / divisor);
907
908 if ((elapsed_usecs -= num_of_usecs_to_sleep) <= 0)
909 continue;
910
911 if (elapsed_usecs < 100)
912 s_usec_10_bins[elapsed_usecs/10]++;
913 if (elapsed_usecs < 1000)
914 s_usec_100_bins[elapsed_usecs/100]++;
915 else if (elapsed_usecs < 10000)
916 s_msec_1_bins[elapsed_usecs/1000]++;
917 else if (elapsed_usecs < 50000)
918 s_msec_10_bins[elapsed_usecs/10000]++;
919 else
920 s_too_slow++;
921
922 if (elapsed_usecs > s_max_latency)
923 s_max_latency = elapsed_usecs;
924 if (elapsed_usecs < s_min_latency || s_total_samples == 0)
925 s_min_latency = elapsed_usecs;
926 s_total_latency += elapsed_usecs;
927 s_total_samples++;
928
929 if (s_thresh_hold && elapsed_usecs > s_thresh_hold)
930 s_exceeded_threshold++;
931 loop_cnt++;
932
933 if (log_fp && s_thresh_hold && elapsed_usecs > s_thresh_hold)
934 sample_sc(start, stop);
935 else {
936 if (loop_cnt >= sample_sc_now) {
937 sample_sc((long long)0, (long long)0);
938 loop_cnt = 0;
939 }
940 }
941 if (gotSIGWINCH) {
942 /*
943 No need to check for initscr error return.
944 We won't get here if it fails on the first call.
945 */
946 endwin();
947 clear();
948 refresh();
949
950 gotSIGWINCH = 0;
951 }
952 }
953 }
954
955
956 void getdivisor()
957 {
958 mach_timebase_info_data_t info;
959
960 (void) mach_timebase_info (&info);
961
962 divisor = ( (double)info.denom / (double)info.numer) * 1000;
963
964 }
965
966 /* This is the realtime band */
967 static kern_return_t
968 set_time_constraint_policy()
969 {
970 kern_return_t result;
971 thread_time_constraint_policy_data_t info;
972 mach_msg_type_number_t count;
973 boolean_t get_default;
974
975 get_default = TRUE;
976 count = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
977 result = thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
978 (thread_policy_t)&info, &count, &get_default);
979 if (result != KERN_SUCCESS)
980 return (result);
981
982 result = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
983 (thread_policy_t)&info, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
984
985 return (result);
986 }
987
988 /* This is the timeshare mode */
989 static kern_return_t
990 set_standard_policy()
991 {
992 kern_return_t result;
993 thread_standard_policy_data_t info;
994 mach_msg_type_number_t count;
995 boolean_t get_default;
996
997 get_default = TRUE;
998 count = THREAD_STANDARD_POLICY_COUNT;
999 result = thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY,
1000 (thread_policy_t)&info, &count, &get_default);
1001 if (result != KERN_SUCCESS)
1002 return (result);
1003
1004 result = thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY,
1005 (thread_policy_t)&info, THREAD_STANDARD_POLICY_COUNT);
1006
1007 return (result);
1008 }
1009
1010
1011 void read_command_map()
1012 {
1013 size_t size;
1014 int mib[6];
1015
1016 if (mapptr) {
1017 free(mapptr);
1018 mapptr = 0;
1019 }
1020 total_threads = bufinfo.nkdthreads;
1021 size = bufinfo.nkdthreads * sizeof(kd_threadmap);
1022 if (size)
1023 {
1024 if ((mapptr = (kd_threadmap *) malloc(size)))
1025 bzero (mapptr, size);
1026 else
1027 {
1028 printf("Thread map is not initialized -- this is not fatal\n");
1029 return;
1030 }
1031 }
1032
1033 /* Now read the threadmap */
1034 mib[0] = CTL_KERN;
1035 mib[1] = KERN_KDEBUG;
1036 mib[2] = KERN_KDTHRMAP;
1037 mib[3] = 0;
1038 mib[4] = 0;
1039 mib[5] = 0; /* no flags */
1040 if (sysctl(mib, 3, mapptr, &size, NULL, 0) < 0)
1041 {
1042 /* This is not fatal -- just means I cant map command strings */
1043
1044 printf("Can't read the thread map -- this is not fatal\n");
1045 free(mapptr);
1046 mapptr = 0;
1047 return;
1048 }
1049 return;
1050 }
1051
1052
1053 void create_map_entry(int thread, char *command)
1054 {
1055 int i, n;
1056 kd_threadmap *map;
1057
1058 if (!mapptr)
1059 return;
1060
1061 for (i = 0, map = 0; !map && i < total_threads; i++)
1062 {
1063 if (mapptr[i].thread == thread )
1064 map = &mapptr[i]; /* Reuse this entry, the thread has been reassigned */
1065 }
1066
1067 if (!map) /* look for invalid entries that I can reuse*/
1068 {
1069 for (i = 0, map = 0; !map && i < total_threads; i++)
1070 {
1071 if (mapptr[i].valid == 0 )
1072 map = &mapptr[i]; /* Reuse this invalid entry */
1073 }
1074 }
1075
1076 if (!map)
1077 {
1078 /* If reach here, then this is a new thread and
1079 * there are no invalid entries to reuse
1080 * Double the size of the thread map table.
1081 */
1082
1083 n = total_threads * 2;
1084 mapptr = (kd_threadmap *) realloc(mapptr, n * sizeof(kd_threadmap));
1085 bzero(&mapptr[total_threads], total_threads*sizeof(kd_threadmap));
1086 map = &mapptr[total_threads];
1087 total_threads = n;
1088 #if 0
1089 if (log_fp)
1090 fprintf(log_fp, "MAP: increasing thread map to %d entries\n", total_threads);
1091 #endif
1092 }
1093 #if 0
1094 if (log_fp)
1095 fprintf(log_fp, "MAP: adding thread %x with name %s\n", thread, command);
1096 #endif
1097 map->valid = 1;
1098 map->thread = thread;
1099 /*
1100 The trace entry that returns the command name will hold
1101 at most, MAXCOMLEN chars, and in that case, is not
1102 guaranteed to be null terminated.
1103 */
1104 (void)strncpy (map->command, command, MAXCOMLEN);
1105 map->command[MAXCOMLEN] = '\0';
1106 }
1107
1108
1109 kd_threadmap *find_thread_map(int thread)
1110 {
1111 int i;
1112 kd_threadmap *map;
1113
1114 if (!mapptr)
1115 return((kd_threadmap *)0);
1116
1117 for (i = 0; i < total_threads; i++)
1118 {
1119 map = &mapptr[i];
1120 if (map->valid && (map->thread == thread))
1121 {
1122 return(map);
1123 }
1124 }
1125 return ((kd_threadmap *)0);
1126 }
1127
1128 void
1129 kill_thread_map(int thread)
1130 {
1131 kd_threadmap *map;
1132
1133 if ((map = find_thread_map(thread))) {
1134
1135 #if 0
1136 if (log_fp)
1137 fprintf(log_fp, "MAP: deleting thread %x with name %s\n", thread, map->command);
1138 #endif
1139 map->valid = 0;
1140 map->thread = 0;
1141 map->command[0] = '\0';
1142 }
1143 }
1144
1145
1146 struct th_info *find_thread(int thread, int type1, int type2) {
1147 struct th_info *ti;
1148
1149 for (ti = th_state; ti < &th_state[cur_max]; ti++) {
1150 if (ti->thread == thread) {
1151 if (type1 == 0)
1152 return(ti);
1153 if (type1 == ti->type)
1154 return(ti);
1155 if (type2 == ti->type)
1156 return(ti);
1157 }
1158 }
1159 return ((struct th_info *)0);
1160 }
1161
1162
1163 char *find_code(type)
1164 {
1165 int i;
1166
1167 for (i = 0; i < num_of_codes; i++) {
1168 if (codes_tab[i].type == type)
1169 return(codes_tab[i].name);
1170 }
1171 return ((char *)0);
1172 }
1173
1174
1175 void sample_sc(uint64_t start, uint64_t stop)
1176 {
1177 kd_buf *kd, *last_mach_sched, *start_kd, *end_of_sample;
1178 uint64_t now;
1179 int count, i;
1180 int first_entry = 1;
1181 double timestamp = 0.0;
1182 double last_timestamp = 0.0;
1183 double delta = 0.0;
1184 double start_bias = 0.0;
1185 char command[32];
1186 void read_command_map();
1187
1188 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1189 {
1190 /* set standard timeshare policy when logging */
1191 if(set_standard_policy() != KERN_SUCCESS)
1192 quit("Failed to set standard policy.\n");
1193 }
1194
1195 /* Get kernel buffer information */
1196 get_bufinfo(&bufinfo);
1197
1198 if (need_new_map) {
1199 read_command_map();
1200 need_new_map = 0;
1201 }
1202 needed = bufinfo.nkdbufs * sizeof(kd_buf);
1203 mib[0] = CTL_KERN;
1204 mib[1] = KERN_KDEBUG;
1205 mib[2] = KERN_KDREADTR;
1206 mib[3] = 0;
1207 mib[4] = 0;
1208 mib[5] = 0; /* no flags */
1209
1210 if (sysctl(mib, 3, my_buffer, &needed, NULL, 0) < 0)
1211 quit("trace facility failure, KERN_KDREADTR\n");
1212
1213 count = needed;
1214
1215 if (bufinfo.flags & KDBG_WRAPPED) {
1216 for (i = 0; i < cur_max; i++) {
1217 th_state[i].thread = 0;
1218 th_state[i].type = -1;
1219 th_state[i].pathptr = (long *)0;
1220 th_state[i].pathname[0] = 0;
1221 }
1222 cur_max = 0;
1223 need_new_map = 1;
1224
1225 set_enable(0);
1226 set_enable(1);
1227
1228 if (log_fp) {
1229 double latency;
1230
1231 latency = (double)(stop - start) / divisor;
1232 latency -= (double)num_of_usecs_to_sleep;
1233
1234 fprintf(log_fp, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1235 &(ctime(&curr_time)[0]), latency, count);
1236 }
1237 }
1238 end_of_sample = &((kd_buf *)my_buffer)[count];
1239
1240 /* Always reinitialize the DECR_TRAP array */
1241 for (i=0; i < hi.avail_cpus; i++)
1242 last_decrementer_kd[i] = (kd_buf *)my_buffer;
1243
1244 last_mach_sched = (kd_buf *)0;
1245
1246 for (kd = (kd_buf *)my_buffer; kd < end_of_sample; kd++) {
1247 int debugid, thread, cpunum;
1248 int type, clen, mode;
1249 int len;
1250 char *p;
1251 long *sargptr;
1252 kd_buf *cur_kd;
1253 double i_latency = 0.0;
1254 struct th_info *ti;
1255 char command1[32];
1256 char sched_info[64];
1257 kd_threadmap *map;
1258 kd_threadmap *find_thread_map();
1259 double handle_decrementer();
1260 kd_buf *log_decrementer();
1261 int check_for_thread_update();
1262 void enter_syscall();
1263 void exit_syscall();
1264 void print_entry();
1265
1266 thread = kd->arg5;
1267 cpunum = CPU_NUMBER(kd->timestamp);
1268 debugid = kd->debugid;
1269 type = kd->debugid & DBG_FUNC_MASK;
1270
1271 if (check_for_thread_update(thread, type, kd))
1272 continue;
1273
1274 if (type == DECR_TRAP)
1275 i_latency = handle_decrementer(kd);
1276
1277 now = kd->timestamp & KDBG_TIMESTAMP_MASK;
1278
1279 timestamp = ((double)now) / divisor;
1280
1281 if (now < start || now > stop) {
1282 if (debugid & DBG_FUNC_START)
1283 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1284 else if (debugid & DBG_FUNC_END)
1285 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1286 else if (type == DECR_TRAP) {
1287 cur_kd = kd;
1288 if (log_fp && i_thresh_hold && (int)i_latency > i_thresh_hold) {
1289 start_kd = last_decrementer_kd[cpunum];
1290 kd = log_decrementer(start_kd, kd, end_of_sample, i_latency);
1291 if (kd >= end_of_sample)
1292 break;
1293 }
1294 if ((kd->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1295 {
1296 cpunum = CPU_NUMBER(kd->timestamp);
1297 last_decrementer_kd[cpunum] = kd;
1298 }
1299 else
1300 last_decrementer_kd[cpunum] = cur_kd;
1301 }
1302 continue;
1303 }
1304 if (first_entry) {
1305 double latency;
1306 char buf1[132];
1307 char buf2[132];
1308
1309 latency = (double)(stop - start) / divisor;
1310 latency -= (double)num_of_usecs_to_sleep;
1311
1312 if (my_pri == -1)
1313 sprintf(buf2, "default");
1314 else
1315 sprintf(buf2, "%d", my_pri);
1316 sprintf(buf1, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1317 &(ctime(&curr_time)[0]), latency, num_of_usecs_to_sleep, policy_name, buf2);
1318 clen = strlen(buf1);
1319 memset(buf2, '-', clen);
1320 buf2[clen] = 0;
1321
1322 if (log_fp) {
1323 fprintf(log_fp, "\n\n%s\n", buf2);
1324 fprintf(log_fp, "%s\n\n", buf1);
1325 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1326 }
1327 start_bias = ((double)start) / divisor;
1328 last_timestamp = timestamp;
1329 first_entry = 0;
1330 }
1331 delta = timestamp - last_timestamp;
1332
1333 if ((map = find_thread_map(thread)))
1334 strcpy(command, map->command);
1335 else
1336 command[0] = 0;
1337
1338 switch (type) {
1339
1340 case CQ_action:
1341 if (log_fp) {
1342 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1343 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1344 }
1345 last_timestamp = timestamp;
1346 break;
1347
1348 case TES_action:
1349 if (log_fp) {
1350 fprintf(log_fp, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1351 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1352 }
1353
1354 last_timestamp = timestamp;
1355 break;
1356
1357 case IES_action:
1358 if (log_fp) {
1359 fprintf(log_fp, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1360 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1361 }
1362
1363 last_timestamp = timestamp;
1364 break;
1365
1366 case IES_filter:
1367 if (log_fp) {
1368 fprintf(log_fp, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1369 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1370 }
1371
1372 last_timestamp = timestamp;
1373 break;
1374
1375 case DECR_TRAP:
1376 last_decrementer_kd[cpunum] = kd;
1377
1378 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1379 p = "*";
1380 else
1381 p = " ";
1382
1383 mode = 1;
1384
1385 if ((ti = find_thread(kd->arg5, 0, 0))) {
1386 if (ti->type == -1 && strcmp(command, "kernel_task"))
1387 mode = 0;
1388 }
1389
1390 if (log_fp) {
1391 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1392 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1393 }
1394
1395 last_timestamp = timestamp;
1396 break;
1397
1398 case DECR_SET:
1399 if (log_fp) {
1400 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1401 timestamp - start_bias, delta, (double)kd->arg1/divisor, "DECR_SET", thread, cpunum, command);
1402 }
1403
1404 last_timestamp = timestamp;
1405 break;
1406
1407 case MACH_sched:
1408 case MACH_stkhandoff:
1409 last_mach_sched = kd;
1410
1411 if ((map = find_thread_map(kd->arg2)))
1412 strcpy(command1, map->command);
1413 else
1414 sprintf(command1, "%-8x", kd->arg2);
1415
1416 if ((ti = find_thread(kd->arg2, 0, 0))) {
1417 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1418 p = "U";
1419 else
1420 p = "K";
1421 } else
1422 p = "*";
1423 memset(sched_info, ' ', sizeof(sched_info));
1424
1425 sprintf(sched_info, "%14.14s", command);
1426 clen = strlen(sched_info);
1427 sched_info[clen] = ' ';
1428
1429 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1430 clen = strlen(sched_info);
1431 sched_info[clen] = ' ';
1432
1433 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1434
1435 if (log_fp) {
1436 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1437 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1438 }
1439
1440 last_timestamp = timestamp;
1441 break;
1442
1443 case VFS_LOOKUP:
1444 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1445 if (cur_max >= MAX_THREADS)
1446 continue;
1447 ti = &th_state[cur_max++];
1448
1449 ti->thread = thread;
1450 ti->type = -1;
1451 ti->pathptr = (long *)0;
1452 ti->child_thread = 0;
1453 }
1454 while ( (kd < end_of_sample) && ((kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP))
1455 {
1456 if (!ti->pathptr) {
1457 ti->arg1 = kd->arg1;
1458 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1459 sargptr = (long *)&ti->pathname[0];
1460
1461 *sargptr++ = kd->arg2;
1462 *sargptr++ = kd->arg3;
1463 *sargptr++ = kd->arg4;
1464 ti->pathptr = sargptr;
1465
1466 } else {
1467 sargptr = ti->pathptr;
1468
1469 /*
1470 We don't want to overrun our pathname buffer if the
1471 kernel sends us more VFS_LOOKUP entries than we can
1472 handle.
1473 */
1474
1475 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1476 {
1477 kd++;
1478 continue;
1479 }
1480
1481 /*
1482 We need to detect consecutive vfslookup entries.
1483 So, if we get here and find a START entry,
1484 fake the pathptr so we can bypass all further
1485 vfslookup entries.
1486 */
1487
1488 if (kd->debugid & DBG_FUNC_START)
1489 {
1490 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1491 }
1492 else
1493 {
1494 *sargptr++ = kd->arg1;
1495 *sargptr++ = kd->arg2;
1496 *sargptr++ = kd->arg3;
1497 *sargptr++ = kd->arg4;
1498 ti->pathptr = sargptr;
1499 }
1500 }
1501 kd++;
1502 }
1503
1504 kd--;
1505
1506 /* print the tail end of the pathname */
1507 len = strlen(ti->pathname);
1508 if (len > 42)
1509 len -= 42;
1510 else
1511 len = 0;
1512
1513 if (log_fp) {
1514 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1515 timestamp - start_bias, delta, "VFS_LOOKUP",
1516 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1517 }
1518
1519 last_timestamp = timestamp;
1520 break;
1521
1522 default:
1523 if (debugid & DBG_FUNC_START)
1524 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1525 else if (debugid & DBG_FUNC_END)
1526 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1527 else
1528 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1529
1530 last_timestamp = timestamp;
1531 break;
1532 }
1533 }
1534 if (last_mach_sched && log_fp)
1535 fprintf(log_fp, "\nblocked by %s @ priority %d\n", command, last_mach_sched->arg3);
1536 #if 0
1537 if (first_entry == 0 && log_fp)
1538 fprintf(log_fp, "\n start = %qd stop = %qd count = %d now = %qd\n", start, stop, count, now);
1539 #endif
1540 if (log_fp)
1541 fflush(log_fp);
1542
1543 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1544 {
1545 /* set back to realtime band */
1546 if(set_time_constraint_policy() != KERN_SUCCESS)
1547 quit("Failed to set time_constraint policy.\n");
1548 }
1549 }
1550
1551 void
1552 enter_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1553 {
1554 struct th_info *ti;
1555 int i;
1556 int cpunum;
1557 char *p;
1558
1559 cpunum = CPU_NUMBER(kd->timestamp);
1560
1561 if (print_info && fp) {
1562 if ((p = find_code(type))) {
1563 if (type == INTERRUPT) {
1564 int mode = 1;
1565
1566 if ((ti = find_thread(kd->arg5, 0, 0))) {
1567 if (ti->type == -1 && strcmp(command, "kernel_task"))
1568 mode = 0;
1569 }
1570
1571 fprintf(fp, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1572 timestamp - bias, delta, pc_to_string(kd->arg2, 59, mode), thread, cpunum, command);
1573 } else if (type == MACH_vmfault) {
1574 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1575 timestamp - bias, delta, p, thread, cpunum, command);
1576 } else {
1577 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1578 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1579 thread, cpunum, command);
1580 }
1581 } else {
1582 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1583 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1584 thread, cpunum, command);
1585 }
1586 }
1587 if ((ti = find_thread(thread, -1, type)) == (struct th_info *)0) {
1588 if (cur_max >= MAX_THREADS) {
1589 static int do_this_once = 1;
1590
1591 if (do_this_once) {
1592 for (i = 0; i < cur_max; i++) {
1593 if (!fp)
1594 break;
1595 fprintf(fp, "thread = %x, type = %x\n",
1596 th_state[i].thread, th_state[i].type);
1597 }
1598 do_this_once = 0;
1599 }
1600 return;
1601
1602 }
1603 ti = &th_state[cur_max++];
1604
1605 ti->thread = thread;
1606 ti->child_thread = 0;
1607 }
1608 if (type != BSC_exit)
1609 ti->type = type;
1610 else
1611 ti->type = -1;
1612 ti->stime = timestamp;
1613 ti->pathptr = (long *)0;
1614
1615 #if 0
1616 if (print_info && fp)
1617 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, ti->type, ti->thread);
1618 #endif
1619 }
1620
1621
1622 void
1623 exit_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1624 {
1625 struct th_info *ti;
1626 int cpunum;
1627 char *p;
1628
1629 cpunum = CPU_NUMBER(kd->timestamp);
1630
1631 ti = find_thread(thread, type, type);
1632 #if 0
1633 if (print_info && fp)
1634 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, type, thread);
1635 #endif
1636 if (print_info && fp) {
1637 if (ti)
1638 fprintf(fp, "%9.1f %8.1f(%.1f) \t", timestamp - bias, delta, timestamp - ti->stime);
1639 else
1640 fprintf(fp, "%9.1f %8.1f() \t", timestamp - bias, delta);
1641
1642 if ((p = find_code(type))) {
1643 if (type == INTERRUPT) {
1644 fprintf(fp, "INTERRUPT %-8x %d %s\n", thread, cpunum, command);
1645 } else if (type == MACH_vmfault && kd->arg2 <= DBG_CACHE_HIT_FAULT) {
1646 fprintf(fp, "%-28.28s %-8.8s %-8x %-8x %d %s\n",
1647 p, fault_name[kd->arg2], kd->arg1,
1648 thread, cpunum, command);
1649 } else {
1650 fprintf(fp, "%-28.28s %-8x %-8x %-8x %d %s\n",
1651 p, kd->arg1, kd->arg2,
1652 thread, cpunum, command);
1653 }
1654 } else {
1655 fprintf(fp, "%-8x %-8x %-8x %-8x %d %s\n",
1656 type, kd->arg1, kd->arg2,
1657 thread, cpunum, command);
1658 }
1659 }
1660 if (ti == (struct th_info *)0) {
1661 if ((ti = find_thread(thread, -1, -1)) == (struct th_info *)0) {
1662 if (cur_max >= MAX_THREADS)
1663 return;
1664 ti = &th_state[cur_max++];
1665
1666 ti->thread = thread;
1667 ti->child_thread = 0;
1668 ti->pathptr = (long *)0;
1669 }
1670 }
1671 ti->type = -1;
1672 }
1673
1674 void
1675 print_entry(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias)
1676 {
1677 char *p;
1678 int cpunum;
1679
1680 if (!fp)
1681 return;
1682
1683 cpunum = CPU_NUMBER(kd->timestamp);
1684 #if 0
1685 fprintf(fp, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max, type, thread, cpunum);
1686 #endif
1687 if ((p = find_code(type))) {
1688 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1689 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1690 thread, cpunum, command);
1691 } else {
1692 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1693 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1694 thread, cpunum, command);
1695 }
1696 }
1697
1698 int
1699 check_for_thread_update(int thread, int type, kd_buf *kd)
1700 {
1701 struct th_info *ti;
1702 void create_map_entry();
1703
1704 switch (type) {
1705
1706 case TRACE_DATA_NEWTHREAD:
1707 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1708 if (cur_max >= MAX_THREADS)
1709 return (1);
1710 ti = &th_state[cur_max++];
1711
1712 ti->thread = thread;
1713 ti->type = -1;
1714 ti->pathptr = (long *)0;
1715 }
1716 ti->child_thread = kd->arg1;
1717 return (1);
1718
1719 case TRACE_STRING_NEWTHREAD:
1720 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0)
1721 return (1);
1722 if (ti->child_thread == 0)
1723 return (1);
1724 create_map_entry(ti->child_thread, (char *)&kd->arg1);
1725
1726 ti->child_thread = 0;
1727 return (1);
1728
1729 case TRACE_STRING_EXEC:
1730 create_map_entry(thread, (char *)&kd->arg1);
1731 return (1);
1732
1733 }
1734 return (0);
1735 }
1736
1737
1738 kd_buf *log_decrementer(kd_buf *kd_beg, kd_buf *kd_end, kd_buf *end_of_sample, double i_latency)
1739 {
1740 kd_buf *kd, *kd_start, *kd_stop;
1741 int kd_count; /* Limit the boundary of kd_start */
1742 double timestamp = 0.0;
1743 double last_timestamp = 0.0;
1744 double delta = 0.0;
1745 double start_bias = 0.0;
1746 int thread, cpunum;
1747 int debugid, type, clen;
1748 int len;
1749 uint64_t now;
1750 struct th_info *ti;
1751 long *sargptr;
1752 char *p;
1753 char command[32];
1754 char command1[32];
1755 char sched_info[64];
1756 char buf1[128];
1757 char buf2[128];
1758 kd_threadmap *map;
1759 kd_threadmap *find_thread_map();
1760
1761 sprintf(buf1, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time)[0]), i_latency);
1762 clen = strlen(buf1);
1763 memset(buf2, '-', clen);
1764 buf2[clen] = 0;
1765 fprintf(log_fp, "\n\n%s\n", buf2);
1766 fprintf(log_fp, "%s\n\n", buf1);
1767
1768 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1769
1770 thread = kd_beg->arg5;
1771 cpunum = CPU_NUMBER(kd_end->timestamp);
1772
1773 for (kd_count = 0, kd_start = kd_beg - 1; (kd_start >= (kd_buf *)my_buffer); kd_start--, kd_count++) {
1774 if (kd_count == MAX_LOG_COUNT)
1775 break;
1776
1777 if (CPU_NUMBER(kd_start->timestamp) != cpunum)
1778 continue;
1779
1780 if ((kd_start->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1781 break;
1782
1783 if (kd_start->arg5 != thread)
1784 break;
1785 }
1786
1787 if (kd_start < (kd_buf *)my_buffer)
1788 kd_start = (kd_buf *)my_buffer;
1789
1790 thread = kd_end->arg5;
1791
1792 for (kd_stop = kd_end + 1; kd_stop < end_of_sample; kd_stop++) {
1793
1794 if ((kd_stop->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1795 break;
1796
1797 if (CPU_NUMBER(kd_stop->timestamp) != cpunum)
1798 continue;
1799
1800 if (kd_stop->arg5 != thread)
1801 break;
1802 }
1803
1804 if (kd_stop >= end_of_sample)
1805 kd_stop = end_of_sample - 1;
1806
1807 now = kd_start->timestamp & KDBG_TIMESTAMP_MASK;
1808 timestamp = ((double)now) / divisor;
1809
1810 for (kd = kd_start; kd <= kd_stop; kd++) {
1811 type = kd->debugid & DBG_FUNC_MASK;
1812
1813 if ((ti = find_thread(kd->arg5, type, type))) {
1814 if (ti->stime >= timestamp)
1815 ti->type = -1;
1816 }
1817 }
1818 for (kd = kd_start; kd <= kd_stop; kd++) {
1819 int mode;
1820
1821 thread = kd->arg5;
1822 cpunum = CPU_NUMBER(kd->timestamp);
1823 debugid = kd->debugid;
1824 type = kd->debugid & DBG_FUNC_MASK;
1825
1826 now = kd->timestamp & KDBG_TIMESTAMP_MASK;
1827
1828 timestamp = ((double)now) / divisor;
1829
1830 if (kd == kd_start) {
1831 start_bias = timestamp;
1832 last_timestamp = timestamp;
1833 }
1834 delta = timestamp - last_timestamp;
1835
1836 if ((map = find_thread_map(thread)))
1837 strcpy(command, map->command);
1838 else
1839 command[0] = 0;
1840
1841
1842 switch (type) {
1843
1844 case CQ_action:
1845 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1846 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1847
1848 last_timestamp = timestamp;
1849 break;
1850
1851 case DECR_TRAP:
1852 if ((int)(kd->arg1) >= 0)
1853 i_latency = 0;
1854 else
1855 i_latency = (((double)(-1 - kd->arg1)) / divisor);
1856
1857 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1858 p = "*";
1859 else
1860 p = " ";
1861
1862 mode = 1;
1863
1864 if ((ti = find_thread(kd->arg5, 0, 0))) {
1865 if (ti->type == -1 && strcmp(command, "kernel_task"))
1866 mode = 0;
1867 }
1868 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1869 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1870
1871 last_timestamp = timestamp;
1872 break;
1873
1874 case DECR_SET:
1875 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1876 timestamp - start_bias, delta, (double)kd->arg1/divisor,
1877 "DECR_SET", thread, cpunum, command);
1878
1879 last_timestamp = timestamp;
1880 break;
1881
1882 case MACH_sched:
1883 case MACH_stkhandoff:
1884 if ((map = find_thread_map(kd->arg2)))
1885 strcpy(command1, map->command);
1886 else
1887 sprintf(command1, "%-8x", kd->arg2);
1888
1889 if ((ti = find_thread(kd->arg2, 0, 0))) {
1890 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1891 p = "U";
1892 else
1893 p = "K";
1894 } else
1895 p = "*";
1896 memset(sched_info, ' ', sizeof(sched_info));
1897
1898 sprintf(sched_info, "%14.14s", command);
1899 clen = strlen(sched_info);
1900 sched_info[clen] = ' ';
1901
1902 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1903 clen = strlen(sched_info);
1904 sched_info[clen] = ' ';
1905
1906 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1907
1908 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1909 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1910
1911 last_timestamp = timestamp;
1912 break;
1913
1914 case VFS_LOOKUP:
1915 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1916 if (cur_max >= MAX_THREADS)
1917 continue;
1918 ti = &th_state[cur_max++];
1919
1920 ti->thread = thread;
1921 ti->type = -1;
1922 ti->pathptr = (long *)0;
1923 ti->child_thread = 0;
1924 }
1925
1926 while ( (kd <= kd_stop) && (kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP)
1927 {
1928 if (!ti->pathptr) {
1929 ti->arg1 = kd->arg1;
1930 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1931 sargptr = (long *)&ti->pathname[0];
1932
1933 *sargptr++ = kd->arg2;
1934 *sargptr++ = kd->arg3;
1935 *sargptr++ = kd->arg4;
1936 ti->pathptr = sargptr;
1937
1938 } else {
1939 sargptr = ti->pathptr;
1940
1941 /*
1942 We don't want to overrun our pathname buffer if the
1943 kernel sends us more VFS_LOOKUP entries than we can
1944 handle.
1945 */
1946
1947 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1948 {
1949 kd++;
1950 continue;
1951 }
1952
1953 /*
1954 We need to detect consecutive vfslookup entries.
1955 So, if we get here and find a START entry,
1956 fake the pathptr so we can bypass all further
1957 vfslookup entries.
1958 */
1959
1960 if (kd->debugid & DBG_FUNC_START)
1961 {
1962 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1963 }
1964 else
1965 {
1966 *sargptr++ = kd->arg1;
1967 *sargptr++ = kd->arg2;
1968 *sargptr++ = kd->arg3;
1969 *sargptr++ = kd->arg4;
1970 ti->pathptr = sargptr;
1971 }
1972 }
1973 kd++;
1974 }
1975
1976 kd--;
1977 /* print the tail end of the pathname */
1978 len = strlen(ti->pathname);
1979 if (len > 42)
1980 len -= 42;
1981 else
1982 len = 0;
1983
1984 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1985 timestamp - start_bias, delta, "VFS_LOOKUP",
1986 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1987
1988 last_timestamp = timestamp;
1989 break;
1990
1991 default:
1992 if (debugid & DBG_FUNC_START)
1993 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1994 else if (debugid & DBG_FUNC_END)
1995 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1996 else
1997 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1998
1999 last_timestamp = timestamp;
2000 break;
2001 }
2002 }
2003 return(kd_stop);
2004 }
2005
2006
2007 double handle_decrementer(kd_buf *kd)
2008 {
2009 double latency;
2010 int elapsed_usecs;
2011
2012 if ((int)(kd->arg1) >= 0)
2013 latency = 1;
2014 else
2015 latency = (((double)(-1 - kd->arg1)) / divisor);
2016 elapsed_usecs = (int)latency;
2017
2018 if (elapsed_usecs < 100)
2019 i_usec_10_bins[elapsed_usecs/10]++;
2020 if (elapsed_usecs < 1000)
2021 i_usec_100_bins[elapsed_usecs/100]++;
2022 else if (elapsed_usecs < 10000)
2023 i_msec_1_bins[elapsed_usecs/1000]++;
2024 else if (elapsed_usecs < 50000)
2025 i_msec_10_bins[elapsed_usecs/10000]++;
2026 else
2027 i_too_slow++;
2028
2029 if (i_thresh_hold && elapsed_usecs > i_thresh_hold)
2030 i_exceeded_threshold++;
2031 if (elapsed_usecs > i_max_latency)
2032 i_max_latency = elapsed_usecs;
2033 if (elapsed_usecs < i_min_latency || i_total_samples == 0)
2034 i_min_latency = elapsed_usecs;
2035 i_total_latency += elapsed_usecs;
2036 i_total_samples++;
2037
2038 return (latency);
2039 }
2040
2041
2042 void init_code_file()
2043 {
2044 FILE *fp;
2045 int i, n, cnt, code;
2046 char name[128];
2047
2048 if ((fp = fopen(code_file, "r")) == (FILE *)0) {
2049 if (log_fp)
2050 fprintf(log_fp, "open of %s failed\n", code_file);
2051 return;
2052 }
2053 n = fscanf(fp, "%d\n", &cnt);
2054
2055 if (n != 1) {
2056 if (log_fp)
2057 fprintf(log_fp, "bad format found in %s\n", code_file);
2058 return;
2059 }
2060 for (i = 0; i < MAX_ENTRIES; i++) {
2061 n = fscanf(fp, "%x%s\n", &code, name);
2062
2063 if (n != 2)
2064 break;
2065
2066 strncpy(codes_tab[i].name, name, 32);
2067 codes_tab[i].type = code;
2068 }
2069 num_of_codes = i;
2070
2071 fclose(fp);
2072 }
2073
2074
2075 void
2076 do_kernel_nm()
2077 {
2078 int i, len;
2079 FILE *fp = (FILE *)0;
2080 char tmp_nm_file[128];
2081 char tmpstr[1024];
2082 char inchr;
2083
2084 bzero(tmp_nm_file, 128);
2085 bzero(tmpstr, 1024);
2086
2087 /* Build the temporary nm file path */
2088 sprintf(tmp_nm_file, "/tmp/knm.out.%d", getpid());
2089
2090 /* Build the nm command and create a tmp file with the output*/
2091 sprintf (tmpstr, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2092 kernelpath, tmp_nm_file);
2093 system(tmpstr);
2094
2095 /* Parse the output from the nm command */
2096 if ((fp=fopen(tmp_nm_file, "r")) == (FILE *)0)
2097 {
2098 /* Hmmm, let's not treat this as fatal */
2099 fprintf(stderr, "Failed to open nm symbol file [%s]\n", tmp_nm_file);
2100 return;
2101 }
2102
2103 /* Count the number of symbols in the nm symbol table */
2104 kern_sym_count=0;
2105 while ( (inchr = getc(fp)) != -1)
2106 {
2107 if (inchr == '\n')
2108 kern_sym_count++;
2109 }
2110
2111 rewind(fp);
2112
2113 /* Malloc the space for symbol table */
2114 if (kern_sym_count > 0)
2115 {
2116 kern_sym_tbl = (kern_sym_t *)malloc(kern_sym_count * sizeof (kern_sym_t));
2117 if (!kern_sym_tbl)
2118 {
2119 /* Hmmm, lets not treat this as fatal */
2120 fprintf(stderr, "Can't allocate memory for kernel symbol table\n");
2121 }
2122 else
2123 bzero(kern_sym_tbl, (kern_sym_count * sizeof(kern_sym_t)));
2124 }
2125 else
2126 {
2127 /* Hmmm, lets not treat this as fatal */
2128 fprintf(stderr, "No kernel symbol table \n");
2129 }
2130
2131 for (i=0; i<kern_sym_count; i++)
2132 {
2133 bzero(tmpstr, 1024);
2134 if (fscanf(fp, "%lx %c %s", &kern_sym_tbl[i].k_sym_addr, &inchr, tmpstr) != 3)
2135 break;
2136 else
2137 {
2138 len = strlen(tmpstr);
2139 kern_sym_tbl[i].k_sym_name = malloc(len + 1);
2140
2141 if (kern_sym_tbl[i].k_sym_name == (char *)0)
2142 {
2143 fprintf(stderr, "Can't allocate memory for symbol name [%s]\n", tmpstr);
2144 kern_sym_tbl[i].k_sym_name = (char *)0;
2145 len = 0;
2146 }
2147 else
2148 strcpy(kern_sym_tbl[i].k_sym_name, tmpstr);
2149
2150 kern_sym_tbl[i].k_sym_len = len;
2151 }
2152 } /* end for */
2153
2154 if (i != kern_sym_count)
2155 {
2156 /* Hmmm, didn't build up entire table from nm */
2157 /* scrap the entire thing */
2158 if (kern_sym_tbl)
2159 free (kern_sym_tbl);
2160 kern_sym_tbl = (kern_sym_t *)0;
2161 kern_sym_count = 0;
2162 }
2163
2164 fclose(fp);
2165
2166 /* Remove the temporary nm file */
2167 unlink(tmp_nm_file);
2168
2169 #if 0
2170 /* Dump the kernel symbol table */
2171 for (i=0; i < kern_sym_count; i++)
2172 {
2173 if (kern_sym_tbl[i].k_sym_name)
2174 printf ("[%d] 0x%x %s\n", i,
2175 kern_sym_tbl[i].k_sym_addr, kern_sym_tbl[i].k_sym_name);
2176 else
2177 printf ("[%d] 0x%x %s\n", i,
2178 kern_sym_tbl[i].k_sym_addr, "No symbol name");
2179 }
2180 #endif
2181 }
2182
2183 char *
2184 pc_to_string(unsigned int pc, int max_len, int mode)
2185 {
2186 int ret;
2187 int len;
2188
2189 int binary_search();
2190
2191 if (mode == 0)
2192 {
2193 sprintf(pcstring, "0x%-8x [usermode addr]", pc);
2194 return(pcstring);
2195 }
2196
2197 ret=0;
2198 ret = binary_search(kern_sym_tbl, 0, kern_sym_count-1, pc);
2199
2200 if (ret == -1)
2201 {
2202 sprintf(pcstring, "0x%x", pc);
2203 return(pcstring);
2204 }
2205 else if (kern_sym_tbl[ret].k_sym_name == (char *)0)
2206 {
2207 sprintf(pcstring, "0x%x", pc);
2208 return(pcstring);
2209 }
2210 else
2211 {
2212 if ((len = kern_sym_tbl[ret].k_sym_len) > (max_len - 8))
2213 len = max_len - 8;
2214
2215 memcpy(pcstring, kern_sym_tbl[ret].k_sym_name, len);
2216 sprintf(&pcstring[len], "+0x%-5lx", pc - kern_sym_tbl[ret].k_sym_addr);
2217
2218 return (pcstring);
2219 }
2220 }
2221
2222
2223 /* Return -1 if not found, else return index */
2224 int binary_search(list, low, high, addr)
2225 kern_sym_t *list;
2226 int low, high;
2227 unsigned int addr;
2228 {
2229 int mid;
2230
2231 mid = (low + high) / 2;
2232
2233 if (low > high)
2234 return (-1); /* failed */
2235 else if (low + 1 == high)
2236 {
2237 if (list[low].k_sym_addr <= addr &&
2238 addr < list[high].k_sym_addr)
2239 {
2240 /* We have a range match */
2241 return(low);
2242 }
2243 else if (list[high].k_sym_addr <= addr)
2244 {
2245 return(high);
2246 }
2247 else
2248 return(-1); /* Failed */
2249 }
2250 else if (addr < list[mid].k_sym_addr)
2251 {
2252 return(binary_search (list, low, mid, addr));
2253 }
2254 else
2255 {
2256 return(binary_search (list, mid, high, addr));
2257 }
2258 }
2259
2260 void
2261 open_logfile(char *path)
2262 {
2263 log_fp = fopen(path, "a");
2264
2265 if (!log_fp)
2266 {
2267 /* failed to open path */
2268 fprintf(stderr, "latency: failed to open logfile [%s]\n", path);
2269 exit_usage();
2270 }
2271 }