]> git.saurik.com Git - apple/system_cmds.git/blob - latency.tproj/latency.c
system_cmds-433.1.tar.gz
[apple/system_cmds.git] / latency.tproj / latency.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
7 * Reserved. This file contains Original Code and/or Modifications of
8 * Original Code as defined in and that are subject to the Apple Public
9 * Source License Version 1.0 (the 'License'). You may not use this file
10 * except in compliance with the License. Please obtain a copy of the
11 * License at http://www.apple.com/publicsource and read it before using
12 * this file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
19 * License for the specific language governing rights and limitations
20 * under the License."
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 /*
27 cc -I. -DPRIVATE -D__APPLE_PRIVATE -O -o latency latency.c -lncurses
28 */
29
30 #include <mach/mach.h>
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <signal.h>
35 #include <strings.h>
36 #include <nlist.h>
37 #include <fcntl.h>
38 #include <string.h>
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/time.h>
43
44 #include <libc.h>
45 #include <termios.h>
46 #include <curses.h>
47 #include <sys/ioctl.h>
48
49 #ifndef KERNEL_PRIVATE
50 #define KERNEL_PRIVATE
51 #include <sys/kdebug.h>
52 #undef KERNEL_PRIVATE
53 #else
54 #include <sys/kdebug.h>
55 #endif /*KERNEL_PRIVATE*/
56
57 #include <sys/sysctl.h>
58 #include <errno.h>
59 #include <err.h>
60
61 #include <mach/host_info.h>
62 #include <mach/mach_error.h>
63 #include <mach/mach_types.h>
64 #include <mach/message.h>
65 #include <mach/mach_syscalls.h>
66 #include <mach/clock_types.h>
67 #include <mach/mach_time.h>
68
69 #include <libkern/OSTypes.h>
70
71 extern mach_port_t clock_port;
72
73 #define KERN_KDPIDEX 14
74
75
76 int s_usec_10_bins[10];
77 int s_usec_100_bins[10];
78 int s_msec_1_bins[10];
79 int s_msec_10_bins[5];
80 int s_too_slow;
81 int s_max_latency;
82 int s_min_latency = 0;
83 long long s_total_latency = 0;
84 int s_total_samples;
85 long s_thresh_hold;
86 int s_exceeded_threshold = 0;
87
88 int i_usec_10_bins[10];
89 int i_usec_100_bins[10];
90 int i_msec_1_bins[10];
91 int i_msec_10_bins[5];
92 int i_too_slow;
93 int i_max_latency;
94 int i_min_latency = 0;
95 long long i_total_latency = 0;
96 int i_total_samples;
97 long i_thresh_hold;
98 int i_exceeded_threshold = 0;
99
100 long start_time;
101 long curr_time;
102 long refresh_time;
103
104 char *policy_name;
105 int my_policy;
106 int my_pri = -1;
107 int num_of_usecs_to_sleep = 1000;
108
109 char *kernelpath = (char *)0;
110 char *code_file = (char *)0;
111
112 typedef struct {
113 u_long k_sym_addr; /* kernel symbol address from nm */
114 u_int k_sym_len; /* length of kernel symbol string */
115 char *k_sym_name; /* kernel symbol string from nm */
116 } kern_sym_t;
117
118 kern_sym_t *kern_sym_tbl; /* pointer to the nm table */
119 int kern_sym_count; /* number of entries in nm table */
120 char pcstring[128];
121
122 #define UNKNOWN "Can't find symbol name"
123
124
125 double divisor;
126 int gotSIGWINCH = 0;
127 int trace_enabled = 0;
128 struct host_basic_info hi;
129
130
131 #define SAMPLE_SIZE 300000
132
133 int mib[6];
134 size_t needed;
135 char *my_buffer;
136
137 kbufinfo_t bufinfo = {0, 0, 0};
138
139 FILE *log_fp = (FILE *)0;
140 int num_of_codes = 0;
141 int need_new_map = 0;
142 int total_threads = 0;
143 kd_threadmap *mapptr = 0;
144
145 #define MAX_ENTRIES 4096
146 struct ct {
147 int type;
148 char name[32];
149 } codes_tab[MAX_ENTRIES];
150
151
152 #define NUMPARMS 23
153
154 struct th_info {
155 int thread;
156 int type;
157 int child_thread;
158 int arg1;
159 double stime;
160 long *pathptr;
161 long pathname[NUMPARMS + 1];
162 };
163
164 #define MAX_THREADS 512
165 struct th_info th_state[MAX_THREADS];
166
167 int cur_max = 0;
168
169 #define TRACE_DATA_NEWTHREAD 0x07000004
170 #define TRACE_STRING_NEWTHREAD 0x07010004
171 #define TRACE_STRING_EXEC 0x07010008
172
173 #define INTERRUPT 0x01050000
174 #define DECR_TRAP 0x01090000
175 #define DECR_SET 0x01090004
176 #define MACH_vmfault 0x01300008
177 #define MACH_sched 0x01400000
178 #define MACH_stkhandoff 0x01400008
179 #define VFS_LOOKUP 0x03010090
180 #define BSC_exit 0x040C0004
181 #define IES_action 0x050b0018
182 #define IES_filter 0x050b001c
183 #define TES_action 0x050c0010
184 #define CQ_action 0x050d0018
185
186
187 #define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
188 #define DBG_FUNC_MASK 0xfffffffc
189
190 #define CPU_NUMBER(ts) ((ts & KDBG_CPU_MASK) >> KDBG_CPU_SHIFT)
191
192 #define DBG_ZERO_FILL_FAULT 1
193 #define DBG_PAGEIN_FAULT 2
194 #define DBG_COW_FAULT 3
195 #define DBG_CACHE_HIT_FAULT 4
196
197 char *fault_name[5] = {
198 "",
199 "ZeroFill",
200 "PageIn",
201 "COW",
202 "CacheHit",
203 };
204
205 char *pc_to_string();
206 static kern_return_t set_time_constraint_policy(void);
207 static kern_return_t set_standard_policy(void);
208
209 int decrementer_val = 0; /* Value used to reset decrementer */
210 int set_remove_flag = 1; /* By default, remove trace buffer */
211
212 kd_buf **last_decrementer_kd; /* last DECR_TRAP per cpu */
213 #define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
214
215 int
216 quit(s)
217 char *s;
218 {
219 void set_enable();
220 void set_rtcdec();
221 void set_remove();
222
223 if (trace_enabled)
224 set_enable(0);
225
226 /*
227 This flag is turned off when calling
228 quit() due to a set_remove() failure.
229 */
230 if (set_remove_flag)
231 set_remove();
232
233 if (decrementer_val)
234 set_rtcdec(0);
235
236 printf("latency: ");
237 if (s)
238 printf("%s", s);
239
240 exit(1);
241 }
242
243 void
244 set_enable(int val)
245 {
246 mib[0] = CTL_KERN;
247 mib[1] = KERN_KDEBUG;
248 mib[2] = KERN_KDENABLE; /* protocol */
249 mib[3] = val;
250 mib[4] = 0;
251 mib[5] = 0; /* no flags */
252
253 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
254 quit("trace facility failure, KERN_KDENABLE\n");
255 }
256
257 void
258 set_numbufs(int nbufs)
259 {
260 mib[0] = CTL_KERN;
261 mib[1] = KERN_KDEBUG;
262 mib[2] = KERN_KDSETBUF;
263 mib[3] = nbufs;
264 mib[4] = 0;
265 mib[5] = 0; /* no flags */
266 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
267 quit("trace facility failure, KERN_KDSETBUF\n");
268
269 mib[0] = CTL_KERN;
270 mib[1] = KERN_KDEBUG;
271 mib[2] = KERN_KDSETUP;
272 mib[3] = 0;
273 mib[4] = 0;
274 mib[5] = 0; /* no flags */
275 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
276 quit("trace facility failure, KERN_KDSETUP\n");
277
278 }
279
280 void
281 set_pidexclude(int pid, int on_off)
282 {
283 kd_regtype kr;
284
285 kr.type = KDBG_TYPENONE;
286 kr.value1 = pid;
287 kr.value2 = on_off;
288 needed = sizeof(kd_regtype);
289 mib[0] = CTL_KERN;
290 mib[1] = KERN_KDEBUG;
291 mib[2] = KERN_KDPIDEX;
292 mib[3] = 0;
293 mib[4] = 0;
294 mib[5] = 0;
295
296 sysctl(mib, 3, &kr, &needed, NULL, 0);
297 }
298
299 void set_rtcdec(decval)
300 int decval;
301 {kd_regtype kr;
302 int ret;
303 extern int errno;
304
305 kr.type = KDBG_TYPENONE;
306 kr.value1 = decval;
307 needed = sizeof(kd_regtype);
308 mib[0] = CTL_KERN;
309 mib[1] = KERN_KDEBUG;
310 mib[2] = KERN_KDSETRTCDEC; /* protocol */
311 mib[3] = 0; /* wildcard address family */
312 mib[4] = 0;
313 mib[5] = 0; /* no flags */
314
315 errno = 0;
316 if ((ret=sysctl(mib, 3, &kr, &needed, NULL, 0)) < 0)
317 {
318 decrementer_val = 0;
319 /* ignore this sysctl error if it's not supported */
320 if (errno == ENOENT)
321 return;
322 else
323 quit("trace facility failure, KERN_KDSETRTCDEC\n");
324 }
325 }
326
327
328 void
329 get_bufinfo(kbufinfo_t *val)
330 {
331 needed = sizeof (*val);
332 mib[0] = CTL_KERN;
333 mib[1] = KERN_KDEBUG;
334 mib[2] = KERN_KDGETBUF;
335 mib[3] = 0;
336 mib[4] = 0;
337 mib[5] = 0; /* no flags */
338
339 if (sysctl(mib, 3, val, &needed, 0, 0) < 0)
340 quit("trace facility failure, KERN_KDGETBUF\n");
341
342 }
343
344 void
345 set_remove()
346 {
347 extern int errno;
348
349 mib[0] = CTL_KERN;
350 mib[1] = KERN_KDEBUG;
351 mib[2] = KERN_KDREMOVE; /* protocol */
352 mib[3] = 0;
353 mib[4] = 0;
354 mib[5] = 0; /* no flags */
355
356 errno = 0;
357
358 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
359 {
360 set_remove_flag = 0;
361 if(errno == EBUSY)
362 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
363 else
364 quit("trace facility failure, KERN_KDREMOVE\n");
365 }
366 }
367
368 void
369 set_init_nologging()
370 {
371 /* When we aren't logging, only collect the DECR_TRAP trace points */
372 kd_regtype kr;
373 kr.type = KDBG_VALCHECK;
374 kr.value1 = DECR_TRAP;
375 kr.value2 = 0;
376 kr.value3 = 0;
377 kr.value4 = 0;
378 needed = sizeof(kd_regtype);
379 mib[0] = CTL_KERN;
380 mib[1] = KERN_KDEBUG;
381 mib[2] = KERN_KDSETREG;
382 mib[3] = 0;
383 mib[4] = 0;
384 mib[5] = 0; /* no flags */
385 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
386 quit("trace facility failure, KERN_KDSETREG\n");
387
388 mib[0] = CTL_KERN;
389 mib[1] = KERN_KDEBUG;
390 mib[2] = KERN_KDSETUP;
391 mib[3] = 0;
392 mib[4] = 0;
393 mib[5] = 0; /* no flags */
394
395 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
396 quit("trace facility failure, KERN_KDSETUP\n");
397 }
398
399 void
400 set_init_logging()
401 { kd_regtype kr;
402
403 kr.type = KDBG_RANGETYPE;
404 kr.value1 = 0;
405 kr.value2 = -1;
406 needed = sizeof(kd_regtype);
407 mib[0] = CTL_KERN;
408 mib[1] = KERN_KDEBUG;
409 mib[2] = KERN_KDSETREG;
410 mib[3] = 0;
411 mib[4] = 0;
412 mib[5] = 0; /* no flags */
413
414 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
415 quit("trace facility failure, KERN_KDSETREG\n");
416
417 mib[0] = CTL_KERN;
418 mib[1] = KERN_KDEBUG;
419 mib[2] = KERN_KDSETUP;
420 mib[3] = 0;
421 mib[4] = 0;
422 mib[5] = 0; /* no flags */
423
424 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
425 quit("trace facility failure, KERN_KDSETUP\n");
426 }
427
428
429 void sigwinch()
430 {
431 gotSIGWINCH = 1;
432 }
433
434 void sigintr()
435 {
436 void screen_update();
437
438 set_enable(0);
439 set_pidexclude(getpid(), 0);
440 screen_update(log_fp);
441 endwin();
442 set_rtcdec(0);
443 set_remove();
444
445 exit(1);
446 }
447
448 void leave() /* exit under normal conditions -- signal handler */
449 {
450 set_enable(0);
451 set_pidexclude(getpid(), 0);
452 endwin();
453 set_rtcdec(0);
454 set_remove();
455
456 exit(1);
457 }
458
459 void
460 screen_update(FILE *fp)
461 {
462 int i;
463 int itotal, stotal;
464 int elapsed_secs;
465 int elapsed_mins;
466 int elapsed_hours;
467 unsigned int average_s_latency;
468 unsigned int average_i_latency;
469 char tbuf[256];
470
471 if (fp == (FILE *)0) {
472 erase();
473 move(0, 0);
474 } else
475 fprintf(fp,"\n\n===================================================================================================\n");
476 /*
477 * Display the current time.
478 * "ctime" always returns a string that looks like this:
479 *
480 * Sun Sep 16 01:03:52 1973
481 * 012345678901234567890123
482 * 1 2
483 *
484 * We want indices 11 thru 18 (length 8).
485 */
486 elapsed_secs = curr_time - start_time;
487 elapsed_hours = elapsed_secs / 3600;
488 elapsed_secs -= elapsed_hours * 3600;
489 elapsed_mins = elapsed_secs / 60;
490 elapsed_secs -= elapsed_mins * 60;
491
492 sprintf(tbuf, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time)[0]),
493 (long)elapsed_hours, (long)elapsed_mins, (long)elapsed_secs);
494 if (fp)
495 fprintf(fp, "%s", tbuf);
496 else
497 printw(tbuf);
498
499
500
501 sprintf(tbuf, " SCHEDULER INTERRUPTS\n");
502
503 if (fp)
504 fprintf(fp, "%s", tbuf);
505 else
506 printw(tbuf);
507
508 sprintf(tbuf, "---------------------------------------------\n");
509
510 if (fp)
511 fprintf(fp, "%s", tbuf);
512 else
513 printw(tbuf);
514
515
516
517 sprintf(tbuf, "total_samples %10d %10d\n\n", s_total_samples, i_total_samples);
518
519 if (fp)
520 fprintf(fp, "%s", tbuf);
521 else
522 printw(tbuf);
523
524
525
526 for (itotal = 0, stotal = 0, i = 0; i < 10; i++) {
527 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 10, s_usec_10_bins[i], i_usec_10_bins[i]);
528
529 if (fp)
530 fprintf(fp, "%s", tbuf);
531 else
532 printw(tbuf);
533
534 stotal += s_usec_10_bins[i];
535 itotal += i_usec_10_bins[i];
536 }
537 sprintf(tbuf, "total < 100 usecs %10d %10d\n\n", stotal, itotal);
538
539 if (fp)
540 fprintf(fp, "%s", tbuf);
541 else
542 printw(tbuf);
543
544
545
546 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
547 if (i < 9)
548 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 100, s_usec_100_bins[i], i_usec_100_bins[i]);
549 else
550 sprintf(tbuf, "delays < 1 msec %10d %10d\n", s_usec_100_bins[i], i_usec_100_bins[i]);
551
552 if (fp)
553 fprintf(fp, "%s", tbuf);
554 else
555 printw(tbuf);
556
557 stotal += s_usec_100_bins[i];
558 itotal += i_usec_100_bins[i];
559 }
560 sprintf(tbuf, "total < 1 msec %10d %10d\n\n", stotal, itotal);
561
562 if (fp)
563 fprintf(fp, "%s", tbuf);
564 else
565 printw(tbuf);
566
567
568
569 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
570 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1), s_msec_1_bins[i], i_msec_1_bins[i]);
571
572 if (fp)
573 fprintf(fp, "%s", tbuf);
574 else
575 printw(tbuf);
576
577 stotal += s_msec_1_bins[i];
578 itotal += i_msec_1_bins[i];
579 }
580 sprintf(tbuf, "total < 10 msecs %10d %10d\n\n", stotal, itotal);
581
582 if (fp)
583 fprintf(fp, "%s", tbuf);
584 else
585 printw(tbuf);
586
587
588
589
590 for (itotal = 0, stotal = 0, i = 1; i < 5; i++) {
591 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1)*10, s_msec_10_bins[i], i_msec_10_bins[i]);
592
593 if (fp)
594 fprintf(fp, "%s", tbuf);
595 else
596 printw(tbuf);
597
598 stotal += s_msec_10_bins[i];
599 itotal += i_msec_10_bins[i];
600 }
601 sprintf(tbuf, "total < 50 msecs %10d %10d\n\n", stotal, itotal);
602
603 if (fp)
604 fprintf(fp, "%s", tbuf);
605 else
606 printw(tbuf);
607
608 sprintf(tbuf, "delays > 50 msecs %10d %10d\n", s_too_slow, i_too_slow);
609
610 if (fp)
611 fprintf(fp, "%s", tbuf);
612 else
613 printw(tbuf);
614
615
616
617 sprintf(tbuf, "\nminimum latency(usecs) %7d %7d\n", s_min_latency, i_min_latency);
618
619 if (fp)
620 fprintf(fp, "%s", tbuf);
621 else
622 printw(tbuf);
623
624 sprintf(tbuf, "maximum latency(usecs) %7d %7d\n", s_max_latency, i_max_latency);
625
626 if (fp)
627 fprintf(fp, "%s", tbuf);
628 else
629 printw(tbuf);
630
631 if (s_total_samples)
632 average_s_latency = (unsigned int)(s_total_latency/s_total_samples);
633 else
634 average_s_latency = 0;
635
636 if (i_total_samples)
637 average_i_latency = (unsigned int)(i_total_latency/i_total_samples);
638 else
639 average_i_latency = 0;
640
641 sprintf(tbuf, "average latency(usecs) %7d %7d\n", average_s_latency, average_i_latency);
642
643 if (fp)
644 fprintf(fp, "%s", tbuf);
645 else
646 printw(tbuf);
647
648 sprintf(tbuf, "exceeded threshold %7d %7d\n", s_exceeded_threshold, i_exceeded_threshold);
649
650 if (fp)
651 fprintf(fp, "%s", tbuf);
652 else
653 printw(tbuf);
654
655 if (fp == (FILE *)0)
656 refresh();
657 else
658 fflush(fp);
659 }
660
661 int
662 exit_usage()
663 {
664
665 fprintf(stderr, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
666
667 #if defined (__i386__)
668 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs] [-n kernel]\n\n");
669 #else
670 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs]\n");
671 fprintf(stderr, " [-d decrementer_in_usecs] [-n kernel]\n\n");
672 #endif
673
674
675 fprintf(stderr, " -rt Set realtime scheduling policy. Default is timeshare.\n");
676 fprintf(stderr, " -c specify name of codes file\n");
677 fprintf(stderr, " -l specify name of file to log trace entries to when threshold is exceeded\n");
678 fprintf(stderr, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
679 fprintf(stderr, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
680 fprintf(stderr, " -s set sleep time in microseconds\n");
681 #if !defined (__i386__)
682 fprintf(stderr, " -d set decrementer in microseconds.\n");
683 #endif
684 fprintf(stderr, " -n specify kernel, default is /mach_kernel\n");
685
686 fprintf(stderr, "\nlatency must be run as root\n\n");
687
688 exit(1);
689 }
690
691
692 int
693 main(argc, argv)
694 int argc;
695 char *argv[];
696 {
697 uint64_t start, stop;
698 uint64_t timestamp1;
699 uint64_t timestamp2;
700 uint64_t adeadline, adelay;
701 double fdelay;
702 int elapsed_usecs;
703 double nanosecs_to_sleep;
704 int loop_cnt, sample_sc_now;
705 int decrementer_usec = 0;
706 kern_return_t ret;
707 unsigned int size;
708 host_name_port_t host;
709 void getdivisor();
710 void sample_sc();
711 void init_code_file();
712 void do_kernel_nm();
713 void open_logfile();
714
715 my_policy = THREAD_STANDARD_POLICY;
716 policy_name = "TIMESHARE";
717
718 while (argc > 1) {
719 if (strcmp(argv[1], "-rt") == 0) {
720 my_policy = THREAD_TIME_CONSTRAINT_POLICY; /* the real time band */
721 policy_name = "REALTIME";
722
723 } else if (strcmp(argv[1], "-st") == 0) {
724 argc--;
725 argv++;
726
727 if (argc > 1)
728 s_thresh_hold = atoi(argv[1]);
729 else
730 exit_usage();
731
732 } else if (strcmp(argv[1], "-it") == 0) {
733 argc--;
734 argv++;
735
736 if (argc > 1)
737 i_thresh_hold = atoi(argv[1]);
738 else
739 exit_usage();
740 } else if (strcmp(argv[1], "-c") == 0) {
741 argc--;
742 argv++;
743
744 if (argc > 1)
745 code_file = argv[1];
746 else
747 exit_usage();
748 } else if (strcmp(argv[1], "-l") == 0) {
749 argc--;
750 argv++;
751
752 if (argc > 1)
753 open_logfile(argv[1]);
754 else
755 exit_usage();
756
757 } else if (strcmp(argv[1], "-s") == 0) {
758 argc--;
759 argv++;
760
761 if (argc > 1)
762 num_of_usecs_to_sleep = atoi(argv[1]);
763 else
764 exit_usage();
765 }
766 else if (strcmp(argv[1], "-d") == 0) {
767 argc--;
768 argv++;
769
770 if (argc > 1)
771 decrementer_usec = atoi(argv[1]);
772 else
773 exit_usage();
774 #if defined(__i386__)
775 /* ignore this option - setting the decrementer has no effect */
776 decrementer_usec = 0;
777 #endif
778 }
779 else if (strcmp(argv[1], "-n") == 0) {
780 argc--;
781 argv++;
782
783 if (argc > 1)
784 kernelpath = argv[1];
785 else
786 exit_usage();
787 } else
788 exit_usage();
789
790 argc--;
791 argv++;
792 }
793
794 if ( geteuid() != 0 ) {
795 printf("'latency' must be run as root...\n");
796 exit(1);
797 }
798
799 if (kernelpath == (char *) 0)
800 kernelpath = "/mach_kernel";
801
802 if (code_file == (char *) 0)
803 code_file = "/usr/share/misc/trace.codes";
804
805 do_kernel_nm();
806
807 sample_sc_now = 25000 / num_of_usecs_to_sleep;
808
809 getdivisor();
810 decrementer_val = decrementer_usec * divisor;
811
812 /* get the cpu countfor the DECR_TRAP array */
813 host = mach_host_self();
814 size = sizeof(hi)/sizeof(int);
815 ret = host_info(host, HOST_BASIC_INFO, (host_info_t)&hi, &size);
816 if (ret != KERN_SUCCESS) {
817 mach_error(argv[0], ret);
818 exit(EXIT_FAILURE);
819 }
820
821 if ((last_decrementer_kd = (kd_buf **)malloc(hi.avail_cpus * sizeof(kd_buf *))) == (kd_buf **)0)
822 quit("can't allocate memory for decrementer tracing info\n");
823
824 nanosecs_to_sleep = (double)(num_of_usecs_to_sleep * 1000);
825 fdelay = nanosecs_to_sleep * (divisor /1000);
826 adelay = (uint64_t)fdelay;
827
828 init_code_file();
829
830 /*
831 When the decrementer isn't set in the options,
832 decval will be zero and this call will reset
833 the system default ...
834 */
835 set_rtcdec(decrementer_val);
836
837 if (initscr() == (WINDOW *) 0)
838 {
839 printf("Unrecognized TERM type, try vt100\n");
840 exit(1);
841 }
842
843 clear();
844 refresh();
845 signal(SIGWINCH, sigwinch);
846 signal(SIGINT, sigintr);
847 signal(SIGQUIT, leave);
848 signal(SIGTERM, leave);
849 signal(SIGHUP, leave);
850
851
852 if ((my_buffer = malloc(SAMPLE_SIZE * sizeof(kd_buf))) == (char *)0)
853 quit("can't allocate memory for tracing info\n");
854 set_remove();
855 set_numbufs(SAMPLE_SIZE);
856 set_enable(0);
857 if(log_fp)
858 set_init_logging();
859 else
860 set_init_nologging();
861 set_pidexclude(getpid(), 1);
862 set_enable(1);
863 trace_enabled = 1;
864 need_new_map = 1;
865
866 loop_cnt = 0;
867 start_time = time((long *)0);
868 refresh_time = start_time;
869
870 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
871 {
872 /* the realtime band */
873 if(set_time_constraint_policy() != KERN_SUCCESS)
874 quit("Failed to set realtime policy.\n");
875 }
876
877 for (;;) {
878 curr_time = time((long *)0);
879
880 if (curr_time >= refresh_time) {
881 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
882 {
883 /* set standard timeshare policy during screen update */
884 if(set_standard_policy() != KERN_SUCCESS)
885 quit("Failed to set standard policy.\n");
886 }
887 screen_update((FILE *)0);
888 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
889 {
890 /* set back to realtime band */
891 if(set_time_constraint_policy() != KERN_SUCCESS)
892 quit("Failed to set time_constraint policy.\n");
893 }
894 refresh_time = curr_time + 1;
895 }
896
897 timestamp1 = mach_absolute_time();
898 adeadline = timestamp1 + adelay;
899 mach_wait_until(adeadline);
900 timestamp2 = mach_absolute_time();
901
902 start = timestamp1;
903
904 stop = timestamp2;
905
906 elapsed_usecs = (int)(((double)(stop - start)) / divisor);
907
908 if ((elapsed_usecs -= num_of_usecs_to_sleep) <= 0)
909 continue;
910
911 if (elapsed_usecs < 100)
912 s_usec_10_bins[elapsed_usecs/10]++;
913 if (elapsed_usecs < 1000)
914 s_usec_100_bins[elapsed_usecs/100]++;
915 else if (elapsed_usecs < 10000)
916 s_msec_1_bins[elapsed_usecs/1000]++;
917 else if (elapsed_usecs < 50000)
918 s_msec_10_bins[elapsed_usecs/10000]++;
919 else
920 s_too_slow++;
921
922 if (elapsed_usecs > s_max_latency)
923 s_max_latency = elapsed_usecs;
924 if (elapsed_usecs < s_min_latency || s_total_samples == 0)
925 s_min_latency = elapsed_usecs;
926 s_total_latency += elapsed_usecs;
927 s_total_samples++;
928
929 if (s_thresh_hold && elapsed_usecs > s_thresh_hold)
930 s_exceeded_threshold++;
931 loop_cnt++;
932
933 if (log_fp && s_thresh_hold && elapsed_usecs > s_thresh_hold)
934 sample_sc(start, stop);
935 else {
936 if (loop_cnt >= sample_sc_now) {
937 sample_sc((long long)0, (long long)0);
938 loop_cnt = 0;
939 }
940 }
941 if (gotSIGWINCH) {
942 /*
943 No need to check for initscr error return.
944 We won't get here if it fails on the first call.
945 */
946 endwin();
947 clear();
948 refresh();
949
950 gotSIGWINCH = 0;
951 }
952 }
953 }
954
955
956 void getdivisor()
957 {
958 mach_timebase_info_data_t info;
959
960 (void) mach_timebase_info (&info);
961
962 divisor = ( (double)info.denom / (double)info.numer) * 1000;
963
964 }
965
966 /* This is the realtime band */
967 static kern_return_t
968 set_time_constraint_policy()
969 {
970 kern_return_t result;
971 thread_time_constraint_policy_data_t info;
972 mach_msg_type_number_t count;
973 boolean_t get_default;
974
975 get_default = TRUE;
976 count = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
977 result = thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
978 (thread_policy_t)&info, &count, &get_default);
979 if (result != KERN_SUCCESS)
980 return (result);
981
982 result = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
983 (thread_policy_t)&info, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
984
985 return (result);
986 }
987
988 /* This is the timeshare mode */
989 static kern_return_t
990 set_standard_policy()
991 {
992 kern_return_t result;
993 thread_standard_policy_data_t info;
994 mach_msg_type_number_t count;
995 boolean_t get_default;
996
997 get_default = TRUE;
998 count = THREAD_STANDARD_POLICY_COUNT;
999 result = thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY,
1000 (thread_policy_t)&info, &count, &get_default);
1001 if (result != KERN_SUCCESS)
1002 return (result);
1003
1004 result = thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY,
1005 (thread_policy_t)&info, THREAD_STANDARD_POLICY_COUNT);
1006
1007 return (result);
1008 }
1009
1010
1011 void read_command_map()
1012 {
1013 size_t size;
1014 int mib[6];
1015
1016 if (mapptr) {
1017 free(mapptr);
1018 mapptr = 0;
1019 }
1020 total_threads = bufinfo.nkdthreads;
1021 size = bufinfo.nkdthreads * sizeof(kd_threadmap);
1022 if (size)
1023 {
1024 if ((mapptr = (kd_threadmap *) malloc(size)))
1025 bzero (mapptr, size);
1026 else
1027 {
1028 printf("Thread map is not initialized -- this is not fatal\n");
1029 return;
1030 }
1031 }
1032
1033 /* Now read the threadmap */
1034 mib[0] = CTL_KERN;
1035 mib[1] = KERN_KDEBUG;
1036 mib[2] = KERN_KDTHRMAP;
1037 mib[3] = 0;
1038 mib[4] = 0;
1039 mib[5] = 0; /* no flags */
1040 if (sysctl(mib, 3, mapptr, &size, NULL, 0) < 0)
1041 {
1042 /* This is not fatal -- just means I cant map command strings */
1043
1044 printf("Can't read the thread map -- this is not fatal\n");
1045 free(mapptr);
1046 mapptr = 0;
1047 return;
1048 }
1049 return;
1050 }
1051
1052
1053 void create_map_entry(int thread, char *command)
1054 {
1055 int i, n;
1056 kd_threadmap *map;
1057
1058 if (!mapptr)
1059 return;
1060
1061 for (i = 0, map = 0; !map && i < total_threads; i++)
1062 {
1063 if (mapptr[i].thread == thread )
1064 map = &mapptr[i]; /* Reuse this entry, the thread has been reassigned */
1065 }
1066
1067 if (!map) /* look for invalid entries that I can reuse*/
1068 {
1069 for (i = 0, map = 0; !map && i < total_threads; i++)
1070 {
1071 if (mapptr[i].valid == 0 )
1072 map = &mapptr[i]; /* Reuse this invalid entry */
1073 }
1074 }
1075
1076 if (!map)
1077 {
1078 /* If reach here, then this is a new thread and
1079 * there are no invalid entries to reuse
1080 * Double the size of the thread map table.
1081 */
1082
1083 n = total_threads * 2;
1084 mapptr = (kd_threadmap *) realloc(mapptr, n * sizeof(kd_threadmap));
1085 bzero(&mapptr[total_threads], total_threads*sizeof(kd_threadmap));
1086 map = &mapptr[total_threads];
1087 total_threads = n;
1088 #if 0
1089 if (log_fp)
1090 fprintf(log_fp, "MAP: increasing thread map to %d entries\n", total_threads);
1091 #endif
1092 }
1093 #if 0
1094 if (log_fp)
1095 fprintf(log_fp, "MAP: adding thread %x with name %s\n", thread, command);
1096 #endif
1097 map->valid = 1;
1098 map->thread = thread;
1099 /*
1100 The trace entry that returns the command name will hold
1101 at most, MAXCOMLEN chars, and in that case, is not
1102 guaranteed to be null terminated.
1103 */
1104 (void)strncpy (map->command, command, MAXCOMLEN);
1105 map->command[MAXCOMLEN] = '\0';
1106 }
1107
1108
1109 kd_threadmap *find_thread_map(int thread)
1110 {
1111 int i;
1112 kd_threadmap *map;
1113
1114 if (!mapptr)
1115 return((kd_threadmap *)0);
1116
1117 for (i = 0; i < total_threads; i++)
1118 {
1119 map = &mapptr[i];
1120 if (map->valid && (map->thread == thread))
1121 {
1122 return(map);
1123 }
1124 }
1125 return ((kd_threadmap *)0);
1126 }
1127
1128 void
1129 kill_thread_map(int thread)
1130 {
1131 kd_threadmap *map;
1132
1133 if ((map = find_thread_map(thread))) {
1134
1135 #if 0
1136 if (log_fp)
1137 fprintf(log_fp, "MAP: deleting thread %x with name %s\n", thread, map->command);
1138 #endif
1139 map->valid = 0;
1140 map->thread = 0;
1141 map->command[0] = '\0';
1142 }
1143 }
1144
1145
1146 struct th_info *find_thread(int thread, int type1, int type2) {
1147 struct th_info *ti;
1148
1149 for (ti = th_state; ti < &th_state[cur_max]; ti++) {
1150 if (ti->thread == thread) {
1151 if (type1 == 0)
1152 return(ti);
1153 if (type1 == ti->type)
1154 return(ti);
1155 if (type2 == ti->type)
1156 return(ti);
1157 }
1158 }
1159 return ((struct th_info *)0);
1160 }
1161
1162
1163 char *find_code(type)
1164 {
1165 int i;
1166
1167 for (i = 0; i < num_of_codes; i++) {
1168 if (codes_tab[i].type == type)
1169 return(codes_tab[i].name);
1170 }
1171 return ((char *)0);
1172 }
1173
1174
1175 void sample_sc(uint64_t start, uint64_t stop)
1176 {
1177 kd_buf *kd, *last_mach_sched, *start_kd, *end_of_sample;
1178 uint64_t now;
1179 int count, i;
1180 int first_entry = 1;
1181 double timestamp = 0.0;
1182 double last_timestamp = 0.0;
1183 double delta = 0.0;
1184 double start_bias = 0.0;
1185 char command[32];
1186 void read_command_map();
1187
1188 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1189 {
1190 /* set standard timeshare policy when logging */
1191 if(set_standard_policy() != KERN_SUCCESS)
1192 quit("Failed to set standard policy.\n");
1193 }
1194
1195 /* Get kernel buffer information */
1196 get_bufinfo(&bufinfo);
1197
1198 if (need_new_map) {
1199 read_command_map();
1200 need_new_map = 0;
1201 }
1202 needed = bufinfo.nkdbufs * sizeof(kd_buf);
1203 mib[0] = CTL_KERN;
1204 mib[1] = KERN_KDEBUG;
1205 mib[2] = KERN_KDREADTR;
1206 mib[3] = 0;
1207 mib[4] = 0;
1208 mib[5] = 0; /* no flags */
1209
1210 if (sysctl(mib, 3, my_buffer, &needed, NULL, 0) < 0)
1211 quit("trace facility failure, KERN_KDREADTR\n");
1212
1213 count = needed;
1214
1215 if (bufinfo.flags & KDBG_WRAPPED) {
1216 for (i = 0; i < cur_max; i++) {
1217 th_state[i].thread = 0;
1218 th_state[i].type = -1;
1219 th_state[i].pathptr = (long *)NULL;
1220 th_state[i].pathname[0] = 0;
1221 }
1222 cur_max = 0;
1223 need_new_map = 1;
1224
1225 set_enable(0);
1226 set_enable(1);
1227
1228 if (log_fp) {
1229 double latency;
1230
1231 latency = (double)(stop - start) / divisor;
1232 latency -= (double)num_of_usecs_to_sleep;
1233
1234 fprintf(log_fp, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1235 &(ctime(&curr_time)[0]), latency, count);
1236 }
1237 }
1238 end_of_sample = &((kd_buf *)my_buffer)[count];
1239
1240 /* Always reinitialize the DECR_TRAP array */
1241 for (i=0; i < hi.avail_cpus; i++)
1242 last_decrementer_kd[i] = (kd_buf *)my_buffer;
1243
1244 last_mach_sched = (kd_buf *)0;
1245
1246 for (kd = (kd_buf *)my_buffer; kd < end_of_sample; kd++) {
1247 int debugid, thread, cpunum;
1248 int type, clen, mode;
1249 int len;
1250 char *p;
1251 long *sargptr;
1252 kd_buf *cur_kd;
1253 double i_latency = 0.0;
1254 struct th_info *ti;
1255 char command1[32];
1256 char sched_info[64];
1257 kd_threadmap *map;
1258 kd_threadmap *find_thread_map();
1259 double handle_decrementer();
1260 kd_buf *log_decrementer();
1261 int check_for_thread_update();
1262 void enter_syscall();
1263 void exit_syscall();
1264 void print_entry();
1265
1266 thread = kd->arg5;
1267 cpunum = CPU_NUMBER(kd->timestamp);
1268 debugid = kd->debugid;
1269 type = kd->debugid & DBG_FUNC_MASK;
1270
1271 if (check_for_thread_update(thread, type, kd))
1272 continue;
1273
1274 if (type == DECR_TRAP)
1275 i_latency = handle_decrementer(kd);
1276
1277 now = kd->timestamp & KDBG_TIMESTAMP_MASK;
1278
1279 timestamp = ((double)now) / divisor;
1280
1281 if (now < start || now > stop) {
1282 if (debugid & DBG_FUNC_START)
1283 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1284 else if (debugid & DBG_FUNC_END)
1285 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1286 else if (type == DECR_TRAP) {
1287 cur_kd = kd;
1288 if (log_fp && i_thresh_hold && (int)i_latency > i_thresh_hold) {
1289 start_kd = last_decrementer_kd[cpunum];
1290 kd = log_decrementer(start_kd, kd, end_of_sample, i_latency);
1291 if (kd >= end_of_sample)
1292 break;
1293 }
1294 if ((kd->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1295 {
1296 cpunum = CPU_NUMBER(kd->timestamp);
1297 last_decrementer_kd[cpunum] = kd;
1298 }
1299 else
1300 last_decrementer_kd[cpunum] = cur_kd;
1301 }
1302 continue;
1303 }
1304 if (first_entry) {
1305 double latency;
1306 char buf1[132];
1307 char buf2[132];
1308
1309 latency = (double)(stop - start) / divisor;
1310 latency -= (double)num_of_usecs_to_sleep;
1311
1312 if (my_pri == -1)
1313 sprintf(buf2, "default");
1314 else
1315 sprintf(buf2, "%d", my_pri);
1316 sprintf(buf1, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1317 &(ctime(&curr_time)[0]), latency, num_of_usecs_to_sleep, policy_name, buf2);
1318 clen = strlen(buf1);
1319 memset(buf2, '-', clen);
1320 buf2[clen] = 0;
1321
1322 if (log_fp) {
1323 fprintf(log_fp, "\n\n%s\n", buf2);
1324 fprintf(log_fp, "%s\n\n", buf1);
1325 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1326 }
1327 start_bias = ((double)start) / divisor;
1328 last_timestamp = timestamp;
1329 first_entry = 0;
1330 }
1331 delta = timestamp - last_timestamp;
1332
1333 if ((map = find_thread_map(thread)))
1334 strcpy(command, map->command);
1335 else
1336 command[0] = 0;
1337
1338 switch (type) {
1339
1340 case CQ_action:
1341 if (log_fp) {
1342 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1343 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1344 }
1345 last_timestamp = timestamp;
1346 break;
1347
1348 case TES_action:
1349 if (log_fp) {
1350 fprintf(log_fp, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1351 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1352 }
1353
1354 last_timestamp = timestamp;
1355 break;
1356
1357 case IES_action:
1358 if (log_fp) {
1359 fprintf(log_fp, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1360 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1361 }
1362
1363 last_timestamp = timestamp;
1364 break;
1365
1366 case IES_filter:
1367 if (log_fp) {
1368 fprintf(log_fp, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1369 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1370 }
1371
1372 last_timestamp = timestamp;
1373 break;
1374
1375 case DECR_TRAP:
1376 last_decrementer_kd[cpunum] = kd;
1377
1378 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1379 p = "*";
1380 else
1381 p = " ";
1382
1383 mode = 1;
1384
1385 if ((ti = find_thread(kd->arg5, 0, 0))) {
1386 if (ti->type == -1 && strcmp(command, "kernel_task"))
1387 mode = 0;
1388 }
1389
1390 if (log_fp) {
1391 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1392 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1393 }
1394
1395 last_timestamp = timestamp;
1396 break;
1397
1398 case DECR_SET:
1399 if (log_fp) {
1400 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1401 timestamp - start_bias, delta, (double)kd->arg1/divisor, "DECR_SET", thread, cpunum, command);
1402 }
1403
1404 last_timestamp = timestamp;
1405 break;
1406
1407 case MACH_sched:
1408 case MACH_stkhandoff:
1409 last_mach_sched = kd;
1410
1411 if ((map = find_thread_map(kd->arg2)))
1412 strcpy(command1, map->command);
1413 else
1414 sprintf(command1, "%-8x", kd->arg2);
1415
1416 if ((ti = find_thread(kd->arg2, 0, 0))) {
1417 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1418 p = "U";
1419 else
1420 p = "K";
1421 } else
1422 p = "*";
1423 memset(sched_info, ' ', sizeof(sched_info));
1424
1425 sprintf(sched_info, "%14.14s", command);
1426 clen = strlen(sched_info);
1427 sched_info[clen] = ' ';
1428
1429 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1430 clen = strlen(sched_info);
1431 sched_info[clen] = ' ';
1432
1433 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1434
1435 if (log_fp) {
1436 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1437 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1438 }
1439
1440 last_timestamp = timestamp;
1441 break;
1442
1443 case VFS_LOOKUP:
1444 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1445 if (cur_max >= MAX_THREADS)
1446 continue;
1447 ti = &th_state[cur_max++];
1448
1449 ti->thread = thread;
1450 ti->type = -1;
1451 ti->pathptr = (long *)0;
1452 ti->child_thread = 0;
1453 }
1454 while ( (kd < end_of_sample) && ((kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP))
1455 {
1456 if (ti->pathptr == NULL) {
1457 ti->arg1 = kd->arg1;
1458 sargptr = ti->pathname;
1459
1460 *sargptr++ = kd->arg2;
1461 *sargptr++ = kd->arg3;
1462 *sargptr++ = kd->arg4;
1463 /*
1464 * NULL terminate the 'string'
1465 */
1466 *sargptr = 0;
1467 ti->pathptr = sargptr;
1468
1469 } else {
1470 sargptr = ti->pathptr;
1471
1472 /*
1473 We don't want to overrun our pathname buffer if the
1474 kernel sends us more VFS_LOOKUP entries than we can
1475 handle.
1476 */
1477
1478 if (sargptr >= &ti->pathname[NUMPARMS])
1479 {
1480 kd++;
1481 continue;
1482 }
1483
1484 /*
1485 We need to detect consecutive vfslookup entries.
1486 So, if we get here and find a START entry,
1487 fake the pathptr so we can bypass all further
1488 vfslookup entries.
1489 */
1490
1491 if (kd->debugid & DBG_FUNC_START)
1492 {
1493 ti->pathptr = &ti->pathname[NUMPARMS];
1494 }
1495 else
1496 {
1497 *sargptr++ = kd->arg1;
1498 *sargptr++ = kd->arg2;
1499 *sargptr++ = kd->arg3;
1500 *sargptr++ = kd->arg4;
1501 /*
1502 * NULL terminate the 'string'
1503 */
1504 *sargptr = 0;
1505
1506 ti->pathptr = sargptr;
1507 }
1508 }
1509 kd++;
1510 }
1511 p = (char *)ti->pathname;
1512
1513 kd--;
1514
1515 /* print the tail end of the pathname */
1516 len = strlen(p);
1517 if (len > 42)
1518 len -= 42;
1519 else
1520 len = 0;
1521
1522 if (log_fp) {
1523 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1524 timestamp - start_bias, delta, "VFS_LOOKUP",
1525 &p[len], ti->arg1, thread, cpunum, command);
1526 }
1527
1528 last_timestamp = timestamp;
1529 break;
1530
1531 default:
1532 if (debugid & DBG_FUNC_START)
1533 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1534 else if (debugid & DBG_FUNC_END)
1535 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1536 else
1537 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1538
1539 last_timestamp = timestamp;
1540 break;
1541 }
1542 }
1543 if (last_mach_sched && log_fp)
1544 fprintf(log_fp, "\nblocked by %s @ priority %d\n", command, last_mach_sched->arg3);
1545 #if 0
1546 if (first_entry == 0 && log_fp)
1547 fprintf(log_fp, "\n start = %qd stop = %qd count = %d now = %qd\n", start, stop, count, now);
1548 #endif
1549 if (log_fp)
1550 fflush(log_fp);
1551
1552 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1553 {
1554 /* set back to realtime band */
1555 if(set_time_constraint_policy() != KERN_SUCCESS)
1556 quit("Failed to set time_constraint policy.\n");
1557 }
1558 }
1559
1560 void
1561 enter_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1562 {
1563 struct th_info *ti;
1564 int i;
1565 int cpunum;
1566 char *p;
1567
1568 cpunum = CPU_NUMBER(kd->timestamp);
1569
1570 if (print_info && fp) {
1571 if ((p = find_code(type))) {
1572 if (type == INTERRUPT) {
1573 int mode = 1;
1574
1575 if ((ti = find_thread(kd->arg5, 0, 0))) {
1576 if (ti->type == -1 && strcmp(command, "kernel_task"))
1577 mode = 0;
1578 }
1579
1580 fprintf(fp, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1581 timestamp - bias, delta, pc_to_string(kd->arg2, 59, mode), thread, cpunum, command);
1582 } else if (type == MACH_vmfault) {
1583 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1584 timestamp - bias, delta, p, thread, cpunum, command);
1585 } else {
1586 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1587 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1588 thread, cpunum, command);
1589 }
1590 } else {
1591 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1592 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1593 thread, cpunum, command);
1594 }
1595 }
1596 if ((ti = find_thread(thread, -1, type)) == (struct th_info *)0) {
1597 if (cur_max >= MAX_THREADS) {
1598 static int do_this_once = 1;
1599
1600 if (do_this_once) {
1601 for (i = 0; i < cur_max; i++) {
1602 if (!fp)
1603 break;
1604 fprintf(fp, "thread = %x, type = %x\n",
1605 th_state[i].thread, th_state[i].type);
1606 }
1607 do_this_once = 0;
1608 }
1609 return;
1610
1611 }
1612 ti = &th_state[cur_max++];
1613
1614 ti->thread = thread;
1615 ti->child_thread = 0;
1616 }
1617 if (type != BSC_exit)
1618 ti->type = type;
1619 else
1620 ti->type = -1;
1621 ti->stime = timestamp;
1622 ti->pathptr = (long *)NULL;
1623
1624 #if 0
1625 if (print_info && fp)
1626 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, ti->type, ti->thread);
1627 #endif
1628 }
1629
1630
1631 void
1632 exit_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1633 {
1634 struct th_info *ti;
1635 int cpunum;
1636 char *p;
1637 uint64_t user_addr;
1638
1639 cpunum = CPU_NUMBER(kd->timestamp);
1640
1641 ti = find_thread(thread, type, type);
1642 #if 0
1643 if (print_info && fp)
1644 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, type, thread);
1645 #endif
1646 if (print_info && fp) {
1647 if (ti)
1648 fprintf(fp, "%9.1f %8.1f(%.1f) \t", timestamp - bias, delta, timestamp - ti->stime);
1649 else
1650 fprintf(fp, "%9.1f %8.1f() \t", timestamp - bias, delta);
1651
1652 if ((p = find_code(type))) {
1653 if (type == INTERRUPT) {
1654 fprintf(fp, "INTERRUPT %-8x %d %s\n", thread, cpunum, command);
1655 } else if (type == MACH_vmfault && kd->arg4 <= DBG_CACHE_HIT_FAULT) {
1656 user_addr = ((uint64_t)kd->arg1 << 32) | (uint32_t)kd->arg2;
1657
1658 fprintf(fp, "%-28.28s %-8.8s %-16qx %-8x %d %s\n",
1659 p, fault_name[kd->arg4], user_addr,
1660 thread, cpunum, command);
1661 } else {
1662 fprintf(fp, "%-28.28s %-8x %-8x %-8x %d %s\n",
1663 p, kd->arg1, kd->arg2,
1664 thread, cpunum, command);
1665 }
1666 } else {
1667 fprintf(fp, "%-8x %-8x %-8x %-8x %d %s\n",
1668 type, kd->arg1, kd->arg2,
1669 thread, cpunum, command);
1670 }
1671 }
1672 if (ti == (struct th_info *)0) {
1673 if ((ti = find_thread(thread, -1, -1)) == (struct th_info *)0) {
1674 if (cur_max >= MAX_THREADS)
1675 return;
1676 ti = &th_state[cur_max++];
1677
1678 ti->thread = thread;
1679 ti->child_thread = 0;
1680 ti->pathptr = (long *)NULL;
1681 }
1682 }
1683 ti->type = -1;
1684 }
1685
1686 void
1687 print_entry(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias)
1688 {
1689 char *p;
1690 int cpunum;
1691
1692 if (!fp)
1693 return;
1694
1695 cpunum = CPU_NUMBER(kd->timestamp);
1696 #if 0
1697 fprintf(fp, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max, type, thread, cpunum);
1698 #endif
1699 if ((p = find_code(type))) {
1700 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1701 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1702 thread, cpunum, command);
1703 } else {
1704 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1705 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1706 thread, cpunum, command);
1707 }
1708 }
1709
1710 int
1711 check_for_thread_update(int thread, int type, kd_buf *kd)
1712 {
1713 struct th_info *ti;
1714 void create_map_entry();
1715
1716 switch (type) {
1717
1718 case TRACE_DATA_NEWTHREAD:
1719 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1720 if (cur_max >= MAX_THREADS)
1721 return (1);
1722 ti = &th_state[cur_max++];
1723
1724 ti->thread = thread;
1725 ti->type = -1;
1726 ti->pathptr = (long *)NULL;
1727 }
1728 ti->child_thread = kd->arg1;
1729 return (1);
1730
1731 case TRACE_STRING_NEWTHREAD:
1732 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0)
1733 return (1);
1734 if (ti->child_thread == 0)
1735 return (1);
1736 create_map_entry(ti->child_thread, (char *)&kd->arg1);
1737
1738 ti->child_thread = 0;
1739 return (1);
1740
1741 case TRACE_STRING_EXEC:
1742 create_map_entry(thread, (char *)&kd->arg1);
1743 return (1);
1744
1745 }
1746 return (0);
1747 }
1748
1749
1750 kd_buf *log_decrementer(kd_buf *kd_beg, kd_buf *kd_end, kd_buf *end_of_sample, double i_latency)
1751 {
1752 kd_buf *kd, *kd_start, *kd_stop;
1753 int kd_count; /* Limit the boundary of kd_start */
1754 double timestamp = 0.0;
1755 double last_timestamp = 0.0;
1756 double delta = 0.0;
1757 double start_bias = 0.0;
1758 int thread, cpunum;
1759 int debugid, type, clen;
1760 int len;
1761 uint64_t now;
1762 struct th_info *ti;
1763 long *sargptr;
1764 char *p;
1765 char command[32];
1766 char command1[32];
1767 char sched_info[64];
1768 char buf1[128];
1769 char buf2[128];
1770 kd_threadmap *map;
1771 kd_threadmap *find_thread_map();
1772
1773 sprintf(buf1, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time)[0]), i_latency);
1774 clen = strlen(buf1);
1775 memset(buf2, '-', clen);
1776 buf2[clen] = 0;
1777 fprintf(log_fp, "\n\n%s\n", buf2);
1778 fprintf(log_fp, "%s\n\n", buf1);
1779
1780 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1781
1782 thread = kd_beg->arg5;
1783 cpunum = CPU_NUMBER(kd_end->timestamp);
1784
1785 for (kd_count = 0, kd_start = kd_beg - 1; (kd_start >= (kd_buf *)my_buffer); kd_start--, kd_count++) {
1786 if (kd_count == MAX_LOG_COUNT)
1787 break;
1788
1789 if (CPU_NUMBER(kd_start->timestamp) != cpunum)
1790 continue;
1791
1792 if ((kd_start->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1793 break;
1794
1795 if (kd_start->arg5 != thread)
1796 break;
1797 }
1798
1799 if (kd_start < (kd_buf *)my_buffer)
1800 kd_start = (kd_buf *)my_buffer;
1801
1802 thread = kd_end->arg5;
1803
1804 for (kd_stop = kd_end + 1; kd_stop < end_of_sample; kd_stop++) {
1805
1806 if ((kd_stop->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1807 break;
1808
1809 if (CPU_NUMBER(kd_stop->timestamp) != cpunum)
1810 continue;
1811
1812 if (kd_stop->arg5 != thread)
1813 break;
1814 }
1815
1816 if (kd_stop >= end_of_sample)
1817 kd_stop = end_of_sample - 1;
1818
1819 now = kd_start->timestamp & KDBG_TIMESTAMP_MASK;
1820 timestamp = ((double)now) / divisor;
1821
1822 for (kd = kd_start; kd <= kd_stop; kd++) {
1823 type = kd->debugid & DBG_FUNC_MASK;
1824
1825 if ((ti = find_thread(kd->arg5, type, type))) {
1826 if (ti->stime >= timestamp)
1827 ti->type = -1;
1828 }
1829 }
1830 for (kd = kd_start; kd <= kd_stop; kd++) {
1831 int mode;
1832
1833 thread = kd->arg5;
1834 cpunum = CPU_NUMBER(kd->timestamp);
1835 debugid = kd->debugid;
1836 type = kd->debugid & DBG_FUNC_MASK;
1837
1838 now = kd->timestamp & KDBG_TIMESTAMP_MASK;
1839
1840 timestamp = ((double)now) / divisor;
1841
1842 if (kd == kd_start) {
1843 start_bias = timestamp;
1844 last_timestamp = timestamp;
1845 }
1846 delta = timestamp - last_timestamp;
1847
1848 if ((map = find_thread_map(thread)))
1849 strcpy(command, map->command);
1850 else
1851 command[0] = 0;
1852
1853
1854 switch (type) {
1855
1856 case CQ_action:
1857 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1858 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1859
1860 last_timestamp = timestamp;
1861 break;
1862
1863 case DECR_TRAP:
1864 if ((int)(kd->arg1) >= 0)
1865 i_latency = 0;
1866 else
1867 i_latency = (((double)(-1 - kd->arg1)) / divisor);
1868
1869 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1870 p = "*";
1871 else
1872 p = " ";
1873
1874 mode = 1;
1875
1876 if ((ti = find_thread(kd->arg5, 0, 0))) {
1877 if (ti->type == -1 && strcmp(command, "kernel_task"))
1878 mode = 0;
1879 }
1880 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1881 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1882
1883 last_timestamp = timestamp;
1884 break;
1885
1886 case DECR_SET:
1887 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1888 timestamp - start_bias, delta, (double)kd->arg1/divisor,
1889 "DECR_SET", thread, cpunum, command);
1890
1891 last_timestamp = timestamp;
1892 break;
1893
1894 case MACH_sched:
1895 case MACH_stkhandoff:
1896 if ((map = find_thread_map(kd->arg2)))
1897 strcpy(command1, map->command);
1898 else
1899 sprintf(command1, "%-8x", kd->arg2);
1900
1901 if ((ti = find_thread(kd->arg2, 0, 0))) {
1902 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1903 p = "U";
1904 else
1905 p = "K";
1906 } else
1907 p = "*";
1908 memset(sched_info, ' ', sizeof(sched_info));
1909
1910 sprintf(sched_info, "%14.14s", command);
1911 clen = strlen(sched_info);
1912 sched_info[clen] = ' ';
1913
1914 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1915 clen = strlen(sched_info);
1916 sched_info[clen] = ' ';
1917
1918 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1919
1920 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1921 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1922
1923 last_timestamp = timestamp;
1924 break;
1925
1926 case VFS_LOOKUP:
1927 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1928 if (cur_max >= MAX_THREADS)
1929 continue;
1930 ti = &th_state[cur_max++];
1931
1932 ti->thread = thread;
1933 ti->type = -1;
1934 ti->pathptr = (long *)NULL;
1935 ti->child_thread = 0;
1936 }
1937
1938 while ( (kd <= kd_stop) && (kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP)
1939 {
1940 if (ti->pathptr == NULL) {
1941 ti->arg1 = kd->arg1;
1942 sargptr = ti->pathname;
1943
1944 *sargptr++ = kd->arg2;
1945 *sargptr++ = kd->arg3;
1946 *sargptr++ = kd->arg4;
1947 /*
1948 * NULL terminate the 'string'
1949 */
1950 *sargptr = 0;
1951
1952 ti->pathptr = sargptr;
1953
1954 } else {
1955 sargptr = ti->pathptr;
1956
1957 /*
1958 We don't want to overrun our pathname buffer if the
1959 kernel sends us more VFS_LOOKUP entries than we can
1960 handle.
1961 */
1962
1963 if (sargptr >= &ti->pathname[NUMPARMS])
1964 {
1965 kd++;
1966 continue;
1967 }
1968
1969 /*
1970 We need to detect consecutive vfslookup entries.
1971 So, if we get here and find a START entry,
1972 fake the pathptr so we can bypass all further
1973 vfslookup entries.
1974 */
1975
1976 if (kd->debugid & DBG_FUNC_START)
1977 {
1978 ti->pathptr = &ti->pathname[NUMPARMS];
1979 }
1980 else
1981 {
1982 *sargptr++ = kd->arg1;
1983 *sargptr++ = kd->arg2;
1984 *sargptr++ = kd->arg3;
1985 *sargptr++ = kd->arg4;
1986 /*
1987 * NULL terminate the 'string'
1988 */
1989 *sargptr = 0;
1990
1991 ti->pathptr = sargptr;
1992 }
1993 }
1994 kd++;
1995 }
1996 p = (char *)ti->pathname;
1997
1998 kd--;
1999 /* print the tail end of the pathname */
2000 len = strlen(p);
2001 if (len > 42)
2002 len -= 42;
2003 else
2004 len = 0;
2005
2006 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
2007 timestamp - start_bias, delta, "VFS_LOOKUP",
2008 &p[len], ti->arg1, thread, cpunum, command);
2009
2010 last_timestamp = timestamp;
2011 break;
2012
2013 default:
2014 if (debugid & DBG_FUNC_START)
2015 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
2016 else if (debugid & DBG_FUNC_END)
2017 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
2018 else
2019 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
2020
2021 last_timestamp = timestamp;
2022 break;
2023 }
2024 }
2025 return(kd_stop);
2026 }
2027
2028
2029 double handle_decrementer(kd_buf *kd)
2030 {
2031 double latency;
2032 int elapsed_usecs;
2033
2034 if ((int)(kd->arg1) >= 0)
2035 latency = 1;
2036 else
2037 latency = (((double)(-1 - kd->arg1)) / divisor);
2038 elapsed_usecs = (int)latency;
2039
2040 if (elapsed_usecs < 100)
2041 i_usec_10_bins[elapsed_usecs/10]++;
2042 if (elapsed_usecs < 1000)
2043 i_usec_100_bins[elapsed_usecs/100]++;
2044 else if (elapsed_usecs < 10000)
2045 i_msec_1_bins[elapsed_usecs/1000]++;
2046 else if (elapsed_usecs < 50000)
2047 i_msec_10_bins[elapsed_usecs/10000]++;
2048 else
2049 i_too_slow++;
2050
2051 if (i_thresh_hold && elapsed_usecs > i_thresh_hold)
2052 i_exceeded_threshold++;
2053 if (elapsed_usecs > i_max_latency)
2054 i_max_latency = elapsed_usecs;
2055 if (elapsed_usecs < i_min_latency || i_total_samples == 0)
2056 i_min_latency = elapsed_usecs;
2057 i_total_latency += elapsed_usecs;
2058 i_total_samples++;
2059
2060 return (latency);
2061 }
2062
2063
2064 void init_code_file()
2065 {
2066 FILE *fp;
2067 int i, n, cnt, code;
2068 char name[128];
2069
2070 if ((fp = fopen(code_file, "r")) == (FILE *)0) {
2071 if (log_fp)
2072 fprintf(log_fp, "open of %s failed\n", code_file);
2073 return;
2074 }
2075 n = fscanf(fp, "%d\n", &cnt);
2076
2077 if (n != 1) {
2078 if (log_fp)
2079 fprintf(log_fp, "bad format found in %s\n", code_file);
2080 return;
2081 }
2082 for (i = 0; i < MAX_ENTRIES; i++) {
2083 n = fscanf(fp, "%x%127s\n", &code, name);
2084
2085 if (n != 2)
2086 break;
2087
2088 strncpy(codes_tab[i].name, name, 32);
2089 codes_tab[i].type = code;
2090 }
2091 num_of_codes = i;
2092
2093 fclose(fp);
2094 }
2095
2096
2097 void
2098 do_kernel_nm()
2099 {
2100 int i, len;
2101 FILE *fp = (FILE *)0;
2102 char tmp_nm_file[128];
2103 char tmpstr[1024];
2104 char inchr;
2105
2106 bzero(tmp_nm_file, 128);
2107 bzero(tmpstr, 1024);
2108
2109 /* Build the temporary nm file path */
2110 strcpy(tmp_nm_file,"/tmp/knm.out.XXXXXX");
2111 if (!mktemp(tmp_nm_file)) {
2112 fprintf(stderr, "Error in mktemp call\n");
2113 return;
2114 }
2115
2116 /* Build the nm command and create a tmp file with the output*/
2117 sprintf (tmpstr, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2118 kernelpath, tmp_nm_file);
2119 system(tmpstr);
2120
2121 /* Parse the output from the nm command */
2122 if ((fp=fopen(tmp_nm_file, "r")) == (FILE *)0)
2123 {
2124 /* Hmmm, let's not treat this as fatal */
2125 fprintf(stderr, "Failed to open nm symbol file [%s]\n", tmp_nm_file);
2126 return;
2127 }
2128
2129 /* Count the number of symbols in the nm symbol table */
2130 kern_sym_count=0;
2131 while ( (inchr = getc(fp)) != -1)
2132 {
2133 if (inchr == '\n')
2134 kern_sym_count++;
2135 }
2136
2137 rewind(fp);
2138
2139 /* Malloc the space for symbol table */
2140 if (kern_sym_count > 0)
2141 {
2142 kern_sym_tbl = (kern_sym_t *)malloc(kern_sym_count * sizeof (kern_sym_t));
2143 if (!kern_sym_tbl)
2144 {
2145 /* Hmmm, lets not treat this as fatal */
2146 fprintf(stderr, "Can't allocate memory for kernel symbol table\n");
2147 }
2148 else
2149 bzero(kern_sym_tbl, (kern_sym_count * sizeof(kern_sym_t)));
2150 }
2151 else
2152 {
2153 /* Hmmm, lets not treat this as fatal */
2154 fprintf(stderr, "No kernel symbol table \n");
2155 }
2156
2157 for (i=0; i<kern_sym_count; i++)
2158 {
2159 bzero(tmpstr, 1024);
2160 if (fscanf(fp, "%lx %c %s", &kern_sym_tbl[i].k_sym_addr, &inchr, tmpstr) != 3)
2161 break;
2162 else
2163 {
2164 len = strlen(tmpstr);
2165 kern_sym_tbl[i].k_sym_name = malloc(len + 1);
2166
2167 if (kern_sym_tbl[i].k_sym_name == (char *)0)
2168 {
2169 fprintf(stderr, "Can't allocate memory for symbol name [%s]\n", tmpstr);
2170 kern_sym_tbl[i].k_sym_name = (char *)0;
2171 len = 0;
2172 }
2173 else
2174 strcpy(kern_sym_tbl[i].k_sym_name, tmpstr);
2175
2176 kern_sym_tbl[i].k_sym_len = len;
2177 }
2178 } /* end for */
2179
2180 if (i != kern_sym_count)
2181 {
2182 /* Hmmm, didn't build up entire table from nm */
2183 /* scrap the entire thing */
2184 if (kern_sym_tbl)
2185 free (kern_sym_tbl);
2186 kern_sym_tbl = (kern_sym_t *)0;
2187 kern_sym_count = 0;
2188 }
2189
2190 fclose(fp);
2191
2192 /* Remove the temporary nm file */
2193 unlink(tmp_nm_file);
2194
2195 #if 0
2196 /* Dump the kernel symbol table */
2197 for (i=0; i < kern_sym_count; i++)
2198 {
2199 if (kern_sym_tbl[i].k_sym_name)
2200 printf ("[%d] 0x%x %s\n", i,
2201 kern_sym_tbl[i].k_sym_addr, kern_sym_tbl[i].k_sym_name);
2202 else
2203 printf ("[%d] 0x%x %s\n", i,
2204 kern_sym_tbl[i].k_sym_addr, "No symbol name");
2205 }
2206 #endif
2207 }
2208
2209 char *
2210 pc_to_string(unsigned int pc, int max_len, int mode)
2211 {
2212 int ret;
2213 int len;
2214
2215 int binary_search();
2216
2217 if (mode == 0)
2218 {
2219 sprintf(pcstring, "0x%-8x [usermode addr]", pc);
2220 return(pcstring);
2221 }
2222
2223 ret=0;
2224 ret = binary_search(kern_sym_tbl, 0, kern_sym_count-1, pc);
2225
2226 if (ret == -1)
2227 {
2228 sprintf(pcstring, "0x%x", pc);
2229 return(pcstring);
2230 }
2231 else if (kern_sym_tbl[ret].k_sym_name == (char *)0)
2232 {
2233 sprintf(pcstring, "0x%x", pc);
2234 return(pcstring);
2235 }
2236 else
2237 {
2238 if ((len = kern_sym_tbl[ret].k_sym_len) > (max_len - 8))
2239 len = max_len - 8;
2240
2241 memcpy(pcstring, kern_sym_tbl[ret].k_sym_name, len);
2242 sprintf(&pcstring[len], "+0x%-5lx", pc - kern_sym_tbl[ret].k_sym_addr);
2243
2244 return (pcstring);
2245 }
2246 }
2247
2248
2249 /* Return -1 if not found, else return index */
2250 int binary_search(list, low, high, addr)
2251 kern_sym_t *list;
2252 int low, high;
2253 unsigned int addr;
2254 {
2255 int mid;
2256
2257 mid = (low + high) / 2;
2258
2259 if (low > high)
2260 return (-1); /* failed */
2261 else if (low + 1 == high)
2262 {
2263 if (list[low].k_sym_addr <= addr &&
2264 addr < list[high].k_sym_addr)
2265 {
2266 /* We have a range match */
2267 return(low);
2268 }
2269 else if (list[high].k_sym_addr <= addr)
2270 {
2271 return(high);
2272 }
2273 else
2274 return(-1); /* Failed */
2275 }
2276 else if (addr < list[mid].k_sym_addr)
2277 {
2278 return(binary_search (list, low, mid, addr));
2279 }
2280 else
2281 {
2282 return(binary_search (list, mid, high, addr));
2283 }
2284 }
2285
2286 void
2287 open_logfile(char *path)
2288 {
2289 log_fp = fopen(path, "a");
2290
2291 if (!log_fp)
2292 {
2293 /* failed to open path */
2294 fprintf(stderr, "latency: failed to open logfile [%s]\n", path);
2295 exit_usage();
2296 }
2297 }