]> git.saurik.com Git - apple/system_cmds.git/blob - latency.tproj/latency.c
f9bb800af1213050585c3f75c46d7c13650c6c76
[apple/system_cmds.git] / latency.tproj / latency.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
7 * Reserved. This file contains Original Code and/or Modifications of
8 * Original Code as defined in and that are subject to the Apple Public
9 * Source License Version 1.0 (the 'License'). You may not use this file
10 * except in compliance with the License. Please obtain a copy of the
11 * License at http://www.apple.com/publicsource and read it before using
12 * this file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
19 * License for the specific language governing rights and limitations
20 * under the License."
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 /*
27 cc -I. -DKERNEL_PRIVATE -O -o latency latency.c -lncurses
28 */
29
30 #include <mach/mach.h>
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <signal.h>
34 #include <strings.h>
35 #include <nlist.h>
36 #include <fcntl.h>
37 #include <string.h>
38
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/time.h>
42
43 #include <libc.h>
44 #include <termios.h>
45 #include <curses.h>
46 #include <sys/ioctl.h>
47
48 #ifndef KERNEL_PRIVATE
49 #define KERNEL_PRIVATE
50 #include <sys/kdebug.h>
51 #undef KERNEL_PRIVATE
52 #else
53 #include <sys/kdebug.h>
54 #endif /*KERNEL_PRIVATE*/
55
56 #include <sys/sysctl.h>
57 #include <errno.h>
58 #include <err.h>
59
60 #include <mach/host_info.h>
61 #include <mach/mach_error.h>
62 #include <mach/mach_types.h>
63 #include <mach/message.h>
64 #include <mach/mach_syscalls.h>
65 #include <mach/clock_types.h>
66 #include <mach/mach_time.h>
67
68 #include <libkern/OSTypes.h>
69
70 extern mach_port_t clock_port;
71
72 #define KERN_KDPIDEX 14
73
74
75 int s_usec_10_bins[10];
76 int s_usec_100_bins[10];
77 int s_msec_1_bins[10];
78 int s_msec_10_bins[5];
79 int s_too_slow;
80 int s_max_latency;
81 int s_min_latency = 0;
82 long long s_total_latency = 0;
83 int s_total_samples;
84 long s_thresh_hold;
85 int s_exceeded_threshold = 0;
86
87 int i_usec_10_bins[10];
88 int i_usec_100_bins[10];
89 int i_msec_1_bins[10];
90 int i_msec_10_bins[5];
91 int i_too_slow;
92 int i_max_latency;
93 int i_min_latency = 0;
94 long long i_total_latency = 0;
95 int i_total_samples;
96 long i_thresh_hold;
97 int i_exceeded_threshold = 0;
98
99 long start_time;
100 long curr_time;
101 long refresh_time;
102
103 char *policy_name;
104 int my_policy;
105 int my_pri = -1;
106 int num_of_usecs_to_sleep = 1000;
107
108 char *kernelpath = (char *)0;
109 char *code_file = (char *)0;
110
111 typedef struct {
112 u_long k_sym_addr; /* kernel symbol address from nm */
113 u_int k_sym_len; /* length of kernel symbol string */
114 char *k_sym_name; /* kernel symbol string from nm */
115 } kern_sym_t;
116
117 kern_sym_t *kern_sym_tbl; /* pointer to the nm table */
118 int kern_sym_count; /* number of entries in nm table */
119 char pcstring[128];
120
121 #define UNKNOWN "Can't find symbol name"
122
123
124 double divisor;
125 int gotSIGWINCH = 0;
126 int trace_enabled = 0;
127 struct host_basic_info hi;
128
129
130 #define SAMPLE_SIZE 300000
131
132 int mib[6];
133 size_t needed;
134 char *my_buffer;
135
136 kbufinfo_t bufinfo = {0, 0, 0};
137
138 FILE *log_fp = (FILE *)0;
139 int num_of_codes = 0;
140 int need_new_map = 0;
141 int total_threads = 0;
142 kd_threadmap *mapptr = 0;
143
144 #define MAX_ENTRIES 1024
145 struct ct {
146 int type;
147 char name[32];
148 } codes_tab[MAX_ENTRIES];
149
150 /* If NUMPARMS changes from the kernel, then PATHLENGTH will also reflect the change */
151 #define NUMPARMS 23
152 #define PATHLENGTH (NUMPARMS*sizeof(long))
153
154 struct th_info {
155 int thread;
156 int type;
157 int child_thread;
158 int arg1;
159 double stime;
160 long *pathptr;
161 char pathname[PATHLENGTH + 1];
162 };
163
164 #define MAX_THREADS 512
165 struct th_info th_state[MAX_THREADS];
166
167 int cur_max = 0;
168
169 #define TRACE_DATA_NEWTHREAD 0x07000004
170 #define TRACE_STRING_NEWTHREAD 0x07010004
171 #define TRACE_STRING_EXEC 0x07010008
172
173 #define INTERRUPT 0x01050000
174 #define DECR_TRAP 0x01090000
175 #define DECR_SET 0x01090004
176 #define MACH_vmfault 0x01300000
177 #define MACH_sched 0x01400000
178 #define MACH_stkhandoff 0x01400008
179 #define VFS_LOOKUP 0x03010090
180 #define BSC_exit 0x040C0004
181 #define IES_action 0x050b0018
182 #define IES_filter 0x050b001c
183 #define TES_action 0x050c0010
184 #define CQ_action 0x050d0018
185
186
187 #define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
188 #define DBG_FUNC_MASK 0xfffffffc
189
190 #define DBG_ZERO_FILL_FAULT 1
191 #define DBG_PAGEIN_FAULT 2
192 #define DBG_COW_FAULT 3
193 #define DBG_CACHE_HIT_FAULT 4
194
195 char *fault_name[5] = {
196 "",
197 "ZeroFill",
198 "PageIn",
199 "COW",
200 "CacheHit",
201 };
202
203 char *pc_to_string();
204 static kern_return_t set_time_constraint_policy(void);
205 static kern_return_t set_standard_policy(void);
206
207 int decrementer_val = 0; /* Value used to reset decrementer */
208 int set_remove_flag = 1; /* By default, remove trace buffer */
209
210 kd_buf **last_decrementer_kd; /* last DECR_TRAP per cpu */
211 #define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
212
213 int
214 quit(s)
215 char *s;
216 {
217 void set_enable();
218 void set_rtcdec();
219 void set_remove();
220
221 if (trace_enabled)
222 set_enable(0);
223
224 /*
225 This flag is turned off when calling
226 quit() due to a set_remove() failure.
227 */
228 if (set_remove_flag)
229 set_remove();
230
231 if (decrementer_val)
232 set_rtcdec(0);
233
234 printf("latency: ");
235 if (s)
236 printf("%s", s);
237
238 exit(1);
239 }
240
241 void
242 set_enable(int val)
243 {
244 mib[0] = CTL_KERN;
245 mib[1] = KERN_KDEBUG;
246 mib[2] = KERN_KDENABLE; /* protocol */
247 mib[3] = val;
248 mib[4] = 0;
249 mib[5] = 0; /* no flags */
250
251 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
252 quit("trace facility failure, KERN_KDENABLE\n");
253 }
254
255 void
256 set_numbufs(int nbufs)
257 {
258 mib[0] = CTL_KERN;
259 mib[1] = KERN_KDEBUG;
260 mib[2] = KERN_KDSETBUF;
261 mib[3] = nbufs;
262 mib[4] = 0;
263 mib[5] = 0; /* no flags */
264 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
265 quit("trace facility failure, KERN_KDSETBUF\n");
266
267 mib[0] = CTL_KERN;
268 mib[1] = KERN_KDEBUG;
269 mib[2] = KERN_KDSETUP;
270 mib[3] = 0;
271 mib[4] = 0;
272 mib[5] = 0; /* no flags */
273 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
274 quit("trace facility failure, KERN_KDSETUP\n");
275
276 }
277
278 void
279 set_pidexclude(int pid, int on_off)
280 {
281 kd_regtype kr;
282
283 kr.type = KDBG_TYPENONE;
284 kr.value1 = pid;
285 kr.value2 = on_off;
286 needed = sizeof(kd_regtype);
287 mib[0] = CTL_KERN;
288 mib[1] = KERN_KDEBUG;
289 mib[2] = KERN_KDPIDEX;
290 mib[3] = 0;
291 mib[4] = 0;
292 mib[5] = 0;
293
294 sysctl(mib, 3, &kr, &needed, NULL, 0);
295 }
296
297 set_rtcdec(decval)
298 int decval;
299 {kd_regtype kr;
300 int ret;
301 extern int errno;
302
303 kr.type = KDBG_TYPENONE;
304 kr.value1 = decval;
305 needed = sizeof(kd_regtype);
306 mib[0] = CTL_KERN;
307 mib[1] = KERN_KDEBUG;
308 mib[2] = KERN_KDSETRTCDEC; /* protocol */
309 mib[3] = 0; /* wildcard address family */
310 mib[4] = 0;
311 mib[5] = 0; /* no flags */
312
313 errno = 0;
314
315 if ((ret=sysctl(mib, 3, &kr, &needed, NULL, 0)) < 0)
316 {
317 decrementer_val = 0;
318 quit("trace facility failure, KERN_KDSETRTCDEC\n");
319 }
320 }
321
322
323 void
324 get_bufinfo(kbufinfo_t *val)
325 {
326 needed = sizeof (*val);
327 mib[0] = CTL_KERN;
328 mib[1] = KERN_KDEBUG;
329 mib[2] = KERN_KDGETBUF;
330 mib[3] = 0;
331 mib[4] = 0;
332 mib[5] = 0; /* no flags */
333
334 if (sysctl(mib, 3, val, &needed, 0, 0) < 0)
335 quit("trace facility failure, KERN_KDGETBUF\n");
336
337 }
338
339 void
340 set_remove()
341 {
342 extern int errno;
343
344 mib[0] = CTL_KERN;
345 mib[1] = KERN_KDEBUG;
346 mib[2] = KERN_KDREMOVE; /* protocol */
347 mib[3] = 0;
348 mib[4] = 0;
349 mib[5] = 0; /* no flags */
350
351 errno = 0;
352
353 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
354 {
355 set_remove_flag = 0;
356 if(errno == EBUSY)
357 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
358 else
359 quit("trace facility failure, KERN_KDREMOVE\n");
360 }
361 }
362
363 void
364 set_init_nologging()
365 {
366 /* When we aren't logging, only collect the DECR_TRAP trace points */
367 kd_regtype kr;
368 kr.type = KDBG_VALCHECK;
369 kr.value1 = DECR_TRAP;
370 kr.value2 = 0;
371 kr.value3 = 0;
372 kr.value4 = 0;
373 needed = sizeof(kd_regtype);
374 mib[0] = CTL_KERN;
375 mib[1] = KERN_KDEBUG;
376 mib[2] = KERN_KDSETREG;
377 mib[3] = 0;
378 mib[4] = 0;
379 mib[5] = 0; /* no flags */
380 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
381 quit("trace facility failure, KERN_KDSETREG\n");
382
383 mib[0] = CTL_KERN;
384 mib[1] = KERN_KDEBUG;
385 mib[2] = KERN_KDSETUP;
386 mib[3] = 0;
387 mib[4] = 0;
388 mib[5] = 0; /* no flags */
389
390 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
391 quit("trace facility failure, KERN_KDSETUP\n");
392 }
393
394 void
395 set_init_logging()
396 { kd_regtype kr;
397
398 kr.type = KDBG_RANGETYPE;
399 kr.value1 = 0;
400 kr.value2 = -1;
401 needed = sizeof(kd_regtype);
402 mib[0] = CTL_KERN;
403 mib[1] = KERN_KDEBUG;
404 mib[2] = KERN_KDSETREG;
405 mib[3] = 0;
406 mib[4] = 0;
407 mib[5] = 0; /* no flags */
408
409 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
410 quit("trace facility failure, KERN_KDSETREG\n");
411
412 mib[0] = CTL_KERN;
413 mib[1] = KERN_KDEBUG;
414 mib[2] = KERN_KDSETUP;
415 mib[3] = 0;
416 mib[4] = 0;
417 mib[5] = 0; /* no flags */
418
419 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
420 quit("trace facility failure, KERN_KDSETUP\n");
421 }
422
423
424 void sigwinch()
425 {
426 gotSIGWINCH = 1;
427 }
428
429 void sigintr()
430 {
431 void screen_update();
432
433 set_enable(0);
434 set_pidexclude(getpid(), 0);
435 screen_update(log_fp);
436 set_rtcdec(0);
437 set_remove();
438
439 exit(1);
440 }
441
442 void leave() /* exit under normal conditions -- signal handler */
443 {
444 set_enable(0);
445 set_pidexclude(getpid(), 0);
446 set_rtcdec(0);
447 set_remove();
448
449 exit(1);
450 }
451
452 void
453 screen_update(FILE *fp)
454 {
455 int i;
456 int itotal, stotal;
457 int elapsed_secs;
458 int elapsed_mins;
459 int elapsed_hours;
460 unsigned int average_s_latency;
461 unsigned int average_i_latency;
462 char tbuf[256];
463
464 if (fp == (FILE *)0) {
465 erase();
466 move(0, 0);
467 } else
468 fprintf(fp,"\n\n===================================================================================================\n");
469 /*
470 * Display the current time.
471 * "ctime" always returns a string that looks like this:
472 *
473 * Sun Sep 16 01:03:52 1973
474 * 012345678901234567890123
475 * 1 2
476 *
477 * We want indices 11 thru 18 (length 8).
478 */
479 elapsed_secs = curr_time - start_time;
480 elapsed_hours = elapsed_secs / 3600;
481 elapsed_secs -= elapsed_hours * 3600;
482 elapsed_mins = elapsed_secs / 60;
483 elapsed_secs -= elapsed_mins * 60;
484
485 sprintf(tbuf, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time)[0]),
486 elapsed_hours, elapsed_mins, elapsed_secs);
487 if (fp)
488 fprintf(fp, "%s", tbuf);
489 else
490 printw(tbuf);
491
492
493
494 sprintf(tbuf, " SCHEDULER INTERRUPTS\n");
495
496 if (fp)
497 fprintf(fp, "%s", tbuf);
498 else
499 printw(tbuf);
500
501 sprintf(tbuf, "---------------------------------------------\n");
502
503 if (fp)
504 fprintf(fp, "%s", tbuf);
505 else
506 printw(tbuf);
507
508
509
510 sprintf(tbuf, "total_samples %10d %10d\n\n", s_total_samples, i_total_samples);
511
512 if (fp)
513 fprintf(fp, "%s", tbuf);
514 else
515 printw(tbuf);
516
517
518
519 for (itotal = 0, stotal = 0, i = 0; i < 10; i++) {
520 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 10, s_usec_10_bins[i], i_usec_10_bins[i]);
521
522 if (fp)
523 fprintf(fp, "%s", tbuf);
524 else
525 printw(tbuf);
526
527 stotal += s_usec_10_bins[i];
528 itotal += i_usec_10_bins[i];
529 }
530 sprintf(tbuf, "total < 100 usecs %10d %10d\n\n", stotal, itotal);
531
532 if (fp)
533 fprintf(fp, "%s", tbuf);
534 else
535 printw(tbuf);
536
537
538
539 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
540 if (i < 9)
541 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 100, s_usec_100_bins[i], i_usec_100_bins[i]);
542 else
543 sprintf(tbuf, "delays < 1 msec %10d %10d\n", s_usec_100_bins[i], i_usec_100_bins[i]);
544
545 if (fp)
546 fprintf(fp, "%s", tbuf);
547 else
548 printw(tbuf);
549
550 stotal += s_usec_100_bins[i];
551 itotal += i_usec_100_bins[i];
552 }
553 sprintf(tbuf, "total < 1 msec %10d %10d\n\n", stotal, itotal);
554
555 if (fp)
556 fprintf(fp, "%s", tbuf);
557 else
558 printw(tbuf);
559
560
561
562 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
563 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1), s_msec_1_bins[i], i_msec_1_bins[i]);
564
565 if (fp)
566 fprintf(fp, "%s", tbuf);
567 else
568 printw(tbuf);
569
570 stotal += s_msec_1_bins[i];
571 itotal += i_msec_1_bins[i];
572 }
573 sprintf(tbuf, "total < 10 msecs %10d %10d\n\n", stotal, itotal);
574
575 if (fp)
576 fprintf(fp, "%s", tbuf);
577 else
578 printw(tbuf);
579
580
581
582
583 for (itotal = 0, stotal = 0, i = 1; i < 5; i++) {
584 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1)*10, s_msec_10_bins[i], i_msec_10_bins[i]);
585
586 if (fp)
587 fprintf(fp, "%s", tbuf);
588 else
589 printw(tbuf);
590
591 stotal += s_msec_10_bins[i];
592 itotal += i_msec_10_bins[i];
593 }
594 sprintf(tbuf, "total < 50 msecs %10d %10d\n\n", stotal, itotal);
595
596 if (fp)
597 fprintf(fp, "%s", tbuf);
598 else
599 printw(tbuf);
600
601 sprintf(tbuf, "delays > 50 msecs %10d %10d\n", s_too_slow, i_too_slow);
602
603 if (fp)
604 fprintf(fp, "%s", tbuf);
605 else
606 printw(tbuf);
607
608
609
610 sprintf(tbuf, "\nminimum latency(usecs) %7d %7d\n", s_min_latency, i_min_latency);
611
612 if (fp)
613 fprintf(fp, "%s", tbuf);
614 else
615 printw(tbuf);
616
617 sprintf(tbuf, "maximum latency(usecs) %7d %7d\n", s_max_latency, i_max_latency);
618
619 if (fp)
620 fprintf(fp, "%s", tbuf);
621 else
622 printw(tbuf);
623
624 if (s_total_samples)
625 average_s_latency = (unsigned int)(s_total_latency/s_total_samples);
626 else
627 average_s_latency = 0;
628
629 if (i_total_samples)
630 average_i_latency = (unsigned int)(i_total_latency/i_total_samples);
631 else
632 average_i_latency = 0;
633
634 sprintf(tbuf, "average latency(usecs) %7d %7d\n", average_s_latency, average_i_latency);
635
636 if (fp)
637 fprintf(fp, "%s", tbuf);
638 else
639 printw(tbuf);
640
641 sprintf(tbuf, "exceeded threshold %7d %7d\n", s_exceeded_threshold, i_exceeded_threshold);
642
643 if (fp)
644 fprintf(fp, "%s", tbuf);
645 else
646 printw(tbuf);
647
648 if (fp == (FILE *)0)
649 refresh();
650 else
651 fflush(fp);
652 }
653
654 int
655 exit_usage()
656 {
657
658 fprintf(stderr, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
659 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs]\n");
660 fprintf(stderr, " [-d decrementer_in_usecs] [-n kernel]\n\n");
661
662 fprintf(stderr, " -rt Set realtime scheduling policy. Default is timeshare.\n");
663 fprintf(stderr, " -c specify name of codes file\n");
664 fprintf(stderr, " -l specify name of file to log trace entries to when threshold is exceeded\n");
665 fprintf(stderr, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
666 fprintf(stderr, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
667 fprintf(stderr, " -s set sleep time in microseconds\n");
668 fprintf(stderr, " -d set decrementer in microseconds.\n");
669 fprintf(stderr, " -n specify kernel, default is /mach_kernel\n");
670
671 fprintf(stderr, "\nlatency must be run as root\n\n");
672
673 exit(1);
674 }
675
676
677
678 main(argc, argv)
679 int argc;
680 char *argv[];
681 {
682 mach_timespec_t remain;
683 uint64_t start, stop;
684 uint64_t timestamp1;
685 uint64_t timestamp2;
686 uint64_t adeadline, adelay;
687 double fdelay;
688 int elapsed_usecs;
689 double nanosecs_to_sleep;
690 int loop_cnt, sample_sc_now;
691 int decrementer_usec = 0;
692 kern_return_t ret;
693 int size;
694 int i, count;
695 host_name_port_t host;
696 void getdivisor();
697 void sample_sc();
698 void init_code_file();
699 void do_kernel_nm();
700 void open_logfile();
701
702 my_policy = THREAD_STANDARD_POLICY;
703 policy_name = "TIMESHARE";
704
705 while (argc > 1) {
706 if (strcmp(argv[1], "-rt") == 0) {
707 my_policy = THREAD_TIME_CONSTRAINT_POLICY; /* the real time band */
708 policy_name = "REALTIME";
709
710 } else if (strcmp(argv[1], "-st") == 0) {
711 argc--;
712 argv++;
713
714 if (argc > 1)
715 s_thresh_hold = atoi(argv[1]);
716 else
717 exit_usage();
718
719 } else if (strcmp(argv[1], "-it") == 0) {
720 argc--;
721 argv++;
722
723 if (argc > 1)
724 i_thresh_hold = atoi(argv[1]);
725 else
726 exit_usage();
727 } else if (strcmp(argv[1], "-c") == 0) {
728 argc--;
729 argv++;
730
731 if (argc > 1)
732 code_file = argv[1];
733 else
734 exit_usage();
735 } else if (strcmp(argv[1], "-l") == 0) {
736 argc--;
737 argv++;
738
739 if (argc > 1)
740 open_logfile(argv[1]);
741 else
742 exit_usage();
743
744 } else if (strcmp(argv[1], "-s") == 0) {
745 argc--;
746 argv++;
747
748 if (argc > 1)
749 num_of_usecs_to_sleep = atoi(argv[1]);
750 else
751 exit_usage();
752 } else if (strcmp(argv[1], "-d") == 0) {
753 argc--;
754 argv++;
755
756 if (argc > 1)
757 decrementer_usec = atoi(argv[1]);
758 else
759 exit_usage();
760 } else if (strcmp(argv[1], "-n") == 0) {
761 argc--;
762 argv++;
763
764 if (argc > 1)
765 kernelpath = argv[1];
766 else
767 exit_usage();
768 } else
769 exit_usage();
770
771 argc--;
772 argv++;
773 }
774
775 if ( geteuid() != 0 ) {
776 printf("'latency' must be run as root...\n");
777 exit(1);
778 }
779
780 if (kernelpath == (char *) 0)
781 kernelpath = "/mach_kernel";
782
783 if (code_file == (char *) 0)
784 code_file = "/usr/share/misc/trace.codes";
785
786 do_kernel_nm();
787
788 sample_sc_now = 25000 / num_of_usecs_to_sleep;
789
790 getdivisor();
791 decrementer_val = decrementer_usec * divisor;
792
793 /* get the cpu count for the DECR_TRAP array */
794 host = mach_host_self();
795 size = sizeof(hi)/sizeof(int);
796 ret = host_info(host, HOST_BASIC_INFO, (host_info_t)&hi, &size);
797 if (ret != KERN_SUCCESS) {
798 mach_error(argv[0], ret);
799 exit(EXIT_FAILURE);
800 }
801
802 if ((last_decrementer_kd = (kd_buf **)malloc(hi.avail_cpus * sizeof(kd_buf *))) == (kd_buf **)0)
803 quit("can't allocate memory for decrementer tracing info\n");
804
805 nanosecs_to_sleep = (double)(num_of_usecs_to_sleep * 1000);
806 fdelay = nanosecs_to_sleep * (divisor /1000);
807 adelay = (uint64_t)fdelay;
808
809 init_code_file();
810
811 /*
812 When the decrementer isn't set in the options,
813 decval will be zero and this call will reset
814 the system default ...
815 */
816 set_rtcdec(decrementer_val);
817
818 if (initscr() == (WINDOW *) 0)
819 {
820 printf("Unrecognized TERM type, try vt100\n");
821 exit(1);
822 }
823
824 clear();
825 refresh();
826 signal(SIGWINCH, sigwinch);
827 signal(SIGINT, sigintr);
828 signal(SIGQUIT, leave);
829 signal(SIGTERM, leave);
830 signal(SIGHUP, leave);
831
832
833 if ((my_buffer = malloc(SAMPLE_SIZE * sizeof(kd_buf))) == (char *)0)
834 quit("can't allocate memory for tracing info\n");
835 set_remove();
836 set_numbufs(SAMPLE_SIZE);
837 set_enable(0);
838 if(log_fp)
839 set_init_logging();
840 else
841 set_init_nologging();
842 set_pidexclude(getpid(), 1);
843 set_enable(1);
844 trace_enabled = 1;
845 need_new_map = 1;
846
847 loop_cnt = 0;
848 start_time = time((long *)0);
849 refresh_time = start_time;
850
851 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
852 {
853 /* the realtime band */
854 if(set_time_constraint_policy() != KERN_SUCCESS)
855 quit("Failed to set realtime policy.\n");
856 }
857
858 for (;;) {
859 curr_time = time((long *)0);
860
861 if (curr_time >= refresh_time) {
862 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
863 {
864 /* set standard timeshare policy during screen update */
865 if(set_standard_policy() != KERN_SUCCESS)
866 quit("Failed to set standard policy.\n");
867 }
868 screen_update((FILE *)0);
869 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
870 {
871 /* set back to realtime band */
872 if(set_time_constraint_policy() != KERN_SUCCESS)
873 quit("Failed to set time_constraint policy.\n");
874 }
875 refresh_time = curr_time + 1;
876 }
877
878 timestamp1 = mach_absolute_time();
879 adeadline = timestamp1 + adelay;
880 mk_wait_until(adeadline);
881 timestamp2 = mach_absolute_time();
882
883 start = timestamp1;
884
885 stop = timestamp2;
886
887 elapsed_usecs = (int)(((double)(stop - start)) / divisor);
888
889 if ((elapsed_usecs -= num_of_usecs_to_sleep) <= 0)
890 continue;
891
892 if (elapsed_usecs < 100)
893 s_usec_10_bins[elapsed_usecs/10]++;
894 if (elapsed_usecs < 1000)
895 s_usec_100_bins[elapsed_usecs/100]++;
896 else if (elapsed_usecs < 10000)
897 s_msec_1_bins[elapsed_usecs/1000]++;
898 else if (elapsed_usecs < 50000)
899 s_msec_10_bins[elapsed_usecs/10000]++;
900 else
901 s_too_slow++;
902
903 if (elapsed_usecs > s_max_latency)
904 s_max_latency = elapsed_usecs;
905 if (elapsed_usecs < s_min_latency || s_total_samples == 0)
906 s_min_latency = elapsed_usecs;
907 s_total_latency += elapsed_usecs;
908 s_total_samples++;
909
910 if (s_thresh_hold && elapsed_usecs > s_thresh_hold)
911 s_exceeded_threshold++;
912 loop_cnt++;
913
914 if (log_fp && s_thresh_hold && elapsed_usecs > s_thresh_hold)
915 sample_sc(start, stop);
916 else {
917 if (loop_cnt >= sample_sc_now) {
918 sample_sc((long long)0, (long long)0);
919 loop_cnt = 0;
920 }
921 }
922 if (gotSIGWINCH) {
923 /*
924 No need to check for initscr error return.
925 We won't get here if it fails on the first call.
926 */
927 endwin();
928 clear();
929 refresh();
930
931 gotSIGWINCH = 0;
932 }
933 }
934 }
935
936
937 void getdivisor()
938 {
939 mach_timebase_info_data_t info;
940
941 (void) mach_timebase_info (&info);
942
943 divisor = ( (double)info.denom / (double)info.numer) * 1000;
944
945 }
946
947 /* This is the realtime band */
948 static kern_return_t
949 set_time_constraint_policy()
950 {
951 kern_return_t result;
952 thread_time_constraint_policy_data_t info;
953 mach_msg_type_number_t count;
954 boolean_t get_default;
955
956 get_default = TRUE;
957 count = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
958 result = thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
959 (thread_policy_t)&info, &count, &get_default);
960 if (result != KERN_SUCCESS)
961 return (result);
962
963 result = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
964 (thread_policy_t)&info, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
965
966 return (result);
967 }
968
969 /* This is the timeshare mode */
970 static kern_return_t
971 set_standard_policy()
972 {
973 kern_return_t result;
974 thread_standard_policy_data_t info;
975 mach_msg_type_number_t count;
976 boolean_t get_default;
977
978 get_default = TRUE;
979 count = THREAD_STANDARD_POLICY_COUNT;
980 result = thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY,
981 (thread_policy_t)&info, &count, &get_default);
982 if (result != KERN_SUCCESS)
983 return (result);
984
985 result = thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY,
986 (thread_policy_t)&info, THREAD_STANDARD_POLICY_COUNT);
987
988 return (result);
989 }
990
991
992 void read_command_map()
993 {
994 size_t size;
995 int mib[6];
996
997 if (mapptr) {
998 free(mapptr);
999 mapptr = 0;
1000 }
1001 total_threads = bufinfo.nkdthreads;
1002 size = bufinfo.nkdthreads * sizeof(kd_threadmap);
1003 if (size)
1004 {
1005 if (mapptr = (kd_threadmap *) malloc(size))
1006 bzero (mapptr, size);
1007 else
1008 {
1009 printf("Thread map is not initialized -- this is not fatal\n");
1010 return;
1011 }
1012 }
1013
1014 /* Now read the threadmap */
1015 mib[0] = CTL_KERN;
1016 mib[1] = KERN_KDEBUG;
1017 mib[2] = KERN_KDTHRMAP;
1018 mib[3] = 0;
1019 mib[4] = 0;
1020 mib[5] = 0; /* no flags */
1021 if (sysctl(mib, 3, mapptr, &size, NULL, 0) < 0)
1022 {
1023 /* This is not fatal -- just means I cant map command strings */
1024
1025 printf("Can't read the thread map -- this is not fatal\n");
1026 free(mapptr);
1027 mapptr = 0;
1028 return;
1029 }
1030 return;
1031 }
1032
1033
1034 void create_map_entry(int thread, char *command)
1035 {
1036 int i, n;
1037 kd_threadmap *map;
1038
1039 if (!mapptr)
1040 return;
1041
1042 for (i = 0, map = 0; !map && i < total_threads; i++)
1043 {
1044 if (mapptr[i].thread == thread )
1045 map = &mapptr[i]; /* Reuse this entry, the thread has been reassigned */
1046 }
1047
1048 if (!map) /* look for invalid entries that I can reuse*/
1049 {
1050 for (i = 0, map = 0; !map && i < total_threads; i++)
1051 {
1052 if (mapptr[i].valid == 0 )
1053 map = &mapptr[i]; /* Reuse this invalid entry */
1054 }
1055 }
1056
1057 if (!map)
1058 {
1059 /* If reach here, then this is a new thread and
1060 * there are no invalid entries to reuse
1061 * Double the size of the thread map table.
1062 */
1063
1064 n = total_threads * 2;
1065 mapptr = (kd_threadmap *) realloc(mapptr, n * sizeof(kd_threadmap));
1066 bzero(&mapptr[total_threads], total_threads*sizeof(kd_threadmap));
1067 map = &mapptr[total_threads];
1068 total_threads = n;
1069 #if 0
1070 if (log_fp)
1071 fprintf(log_fp, "MAP: increasing thread map to %d entries\n", total_threads);
1072 #endif
1073 }
1074 #if 0
1075 if (log_fp)
1076 fprintf(log_fp, "MAP: adding thread %x with name %s\n", thread, command);
1077 #endif
1078 map->valid = 1;
1079 map->thread = thread;
1080 /*
1081 The trace entry that returns the command name will hold
1082 at most, MAXCOMLEN chars, and in that case, is not
1083 guaranteed to be null terminated.
1084 */
1085 (void)strncpy (map->command, command, MAXCOMLEN);
1086 map->command[MAXCOMLEN] = '\0';
1087 }
1088
1089
1090 kd_threadmap *find_thread_map(int thread)
1091 {
1092 int i;
1093 kd_threadmap *map;
1094
1095 if (!mapptr)
1096 return((kd_threadmap *)0);
1097
1098 for (i = 0; i < total_threads; i++)
1099 {
1100 map = &mapptr[i];
1101 if (map->valid && (map->thread == thread))
1102 {
1103 return(map);
1104 }
1105 }
1106 return ((kd_threadmap *)0);
1107 }
1108
1109 void
1110 kill_thread_map(int thread)
1111 {
1112 kd_threadmap *map;
1113
1114 if (map = find_thread_map(thread)) {
1115
1116 #if 0
1117 if (log_fp)
1118 fprintf(log_fp, "MAP: deleting thread %x with name %s\n", thread, map->command);
1119 #endif
1120 map->valid = 0;
1121 map->thread = 0;
1122 map->command[0] = '\0';
1123 }
1124 }
1125
1126
1127 struct th_info *find_thread(int thread, int type1, int type2) {
1128 struct th_info *ti;
1129
1130 for (ti = th_state; ti < &th_state[cur_max]; ti++) {
1131 if (ti->thread == thread) {
1132 if (type1 == 0)
1133 return(ti);
1134 if (type1 == ti->type)
1135 return(ti);
1136 if (type2 == ti->type)
1137 return(ti);
1138 }
1139 }
1140 return ((struct th_info *)0);
1141 }
1142
1143
1144 char *find_code(type)
1145 {
1146 int i;
1147
1148 for (i = 0; i < num_of_codes; i++) {
1149 if (codes_tab[i].type == type)
1150 return(codes_tab[i].name);
1151 }
1152 return ((char *)0);
1153 }
1154
1155
1156 void sample_sc(uint64_t start, uint64_t stop)
1157 {
1158 kd_buf *kd, *last_mach_sched, *start_kd, *end_of_sample;
1159 uint64_t now;
1160 int count, i;
1161 int first_entry = 1;
1162 char command[32];
1163 double timestamp, last_timestamp, delta, start_bias;
1164 void read_command_map();
1165
1166 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1167 {
1168 /* set standard timeshare policy when logging */
1169 if(set_standard_policy() != KERN_SUCCESS)
1170 quit("Failed to set standard policy.\n");
1171 }
1172
1173 /* Get kernel buffer information */
1174 get_bufinfo(&bufinfo);
1175
1176 if (need_new_map) {
1177 read_command_map();
1178 need_new_map = 0;
1179 }
1180 needed = bufinfo.nkdbufs * sizeof(kd_buf);
1181 mib[0] = CTL_KERN;
1182 mib[1] = KERN_KDEBUG;
1183 mib[2] = KERN_KDREADTR;
1184 mib[3] = 0;
1185 mib[4] = 0;
1186 mib[5] = 0; /* no flags */
1187
1188 if (sysctl(mib, 3, my_buffer, &needed, NULL, 0) < 0)
1189 quit("trace facility failure, KERN_KDREADTR\n");
1190
1191 count = needed;
1192
1193 if (bufinfo.flags & KDBG_WRAPPED) {
1194 for (i = 0; i < cur_max; i++) {
1195 th_state[i].thread = 0;
1196 th_state[i].type = -1;
1197 th_state[i].pathptr = (long *)0;
1198 th_state[i].pathname[0] = 0;
1199 }
1200 cur_max = 0;
1201 need_new_map = 1;
1202
1203 set_enable(0);
1204 set_enable(1);
1205
1206 if (log_fp) {
1207 double latency;
1208
1209 latency = (double)(stop - start) / divisor;
1210 latency -= (double)num_of_usecs_to_sleep;
1211
1212 fprintf(log_fp, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1213 &(ctime(&curr_time)[0]), latency, count);
1214 }
1215 }
1216 end_of_sample = &((kd_buf *)my_buffer)[count];
1217
1218 /* Always reinitialize the DECR_TRAP array */
1219 for (i=0; i < hi.avail_cpus; i++)
1220 last_decrementer_kd[i] = (kd_buf *)my_buffer;
1221
1222 last_mach_sched = (kd_buf *)0;
1223
1224 for (kd = (kd_buf *)my_buffer; kd < end_of_sample; kd++) {
1225 int debugid, thread, cpunum;
1226 int type, clen, mode;
1227 int len;
1228 char *p;
1229 long *sargptr;
1230 kd_buf *cur_kd;
1231 double i_latency;
1232 struct th_info *ti;
1233 char command1[32];
1234 char sched_info[64];
1235 kd_threadmap *map;
1236 kd_threadmap *find_thread_map();
1237 double handle_decrementer();
1238 kd_buf *log_decrementer();
1239 int check_for_thread_update();
1240 void enter_syscall();
1241 void exit_syscall();
1242 void print_entry();
1243
1244 thread = kd->arg5 & KDBG_THREAD_MASK;
1245 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1246 debugid = kd->debugid;
1247 type = kd->debugid & DBG_FUNC_MASK;
1248
1249 if (check_for_thread_update(thread, type, kd))
1250 continue;
1251
1252 if (type == DECR_TRAP)
1253 i_latency = handle_decrementer(kd);
1254
1255 now = (((uint64_t)kd->timestamp.tv_sec) << 32) |
1256 (uint64_t)((unsigned int)(kd->timestamp.tv_nsec));
1257
1258 timestamp = ((double)now) / divisor;
1259
1260 if (now < start || now > stop) {
1261 if (debugid & DBG_FUNC_START)
1262 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1263 else if (debugid & DBG_FUNC_END)
1264 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1265 else if (type == DECR_TRAP) {
1266 cur_kd = kd;
1267 if (log_fp && i_thresh_hold && (int)i_latency > i_thresh_hold) {
1268 start_kd = last_decrementer_kd[cpunum];
1269 kd = log_decrementer(start_kd, kd, end_of_sample, i_latency);
1270 if (kd >= end_of_sample)
1271 break;
1272 }
1273 if ((kd->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1274 {
1275 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1276 last_decrementer_kd[cpunum] = kd;
1277 }
1278 else
1279 last_decrementer_kd[cpunum] = cur_kd;
1280 }
1281 continue;
1282 }
1283 if (first_entry) {
1284 double latency;
1285 char buf1[132];
1286 char buf2[132];
1287
1288 latency = (double)(stop - start) / divisor;
1289 latency -= (double)num_of_usecs_to_sleep;
1290
1291 if (my_pri == -1)
1292 sprintf(buf2, "default");
1293 else
1294 sprintf(buf2, "%d", my_pri);
1295 sprintf(buf1, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1296 &(ctime(&curr_time)[0]), latency, num_of_usecs_to_sleep, policy_name, buf2);
1297 clen = strlen(buf1);
1298 memset(buf2, '-', clen);
1299 buf2[clen] = 0;
1300
1301 if (log_fp) {
1302 fprintf(log_fp, "\n\n%s\n", buf2);
1303 fprintf(log_fp, "%s\n\n", buf1);
1304 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1305 }
1306 start_bias = ((double)start) / divisor;
1307 last_timestamp = timestamp;
1308 first_entry = 0;
1309 }
1310 delta = timestamp - last_timestamp;
1311
1312 if (map = find_thread_map(thread))
1313 strcpy(command, map->command);
1314 else
1315 command[0] = 0;
1316
1317 switch (type) {
1318
1319 case CQ_action:
1320 if (log_fp) {
1321 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1322 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1323 }
1324 last_timestamp = timestamp;
1325 break;
1326
1327 case TES_action:
1328 if (log_fp) {
1329 fprintf(log_fp, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1330 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1331 }
1332
1333 last_timestamp = timestamp;
1334 break;
1335
1336 case IES_action:
1337 if (log_fp) {
1338 fprintf(log_fp, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1339 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1340 }
1341
1342 last_timestamp = timestamp;
1343 break;
1344
1345 case IES_filter:
1346 if (log_fp) {
1347 fprintf(log_fp, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1348 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1349 }
1350
1351 last_timestamp = timestamp;
1352 break;
1353
1354 case DECR_TRAP:
1355 last_decrementer_kd[cpunum] = kd;
1356
1357 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1358 p = "*";
1359 else
1360 p = " ";
1361
1362 mode = 1;
1363
1364 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1365 if (ti->type == -1 && strcmp(command, "kernel_task"))
1366 mode = 0;
1367 }
1368
1369 if (log_fp) {
1370 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1371 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1372 }
1373
1374 last_timestamp = timestamp;
1375 break;
1376
1377 case DECR_SET:
1378 if (log_fp) {
1379 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1380 timestamp - start_bias, delta, (double)kd->arg1/divisor, "DECR_SET", thread, cpunum, command);
1381 }
1382
1383 last_timestamp = timestamp;
1384 break;
1385
1386 case MACH_sched:
1387 case MACH_stkhandoff:
1388 last_mach_sched = kd;
1389
1390 if (map = find_thread_map(kd->arg2))
1391 strcpy(command1, map->command);
1392 else
1393 sprintf(command1, "%-8x", kd->arg2);
1394
1395 if (ti = find_thread(kd->arg2, 0, 0)) {
1396 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1397 p = "U";
1398 else
1399 p = "K";
1400 } else
1401 p = "*";
1402 memset(sched_info, ' ', sizeof(sched_info));
1403
1404 sprintf(sched_info, "%14.14s", command);
1405 clen = strlen(sched_info);
1406 sched_info[clen] = ' ';
1407
1408 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1409 clen = strlen(sched_info);
1410 sched_info[clen] = ' ';
1411
1412 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1413
1414 if (log_fp) {
1415 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1416 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1417 }
1418
1419 last_timestamp = timestamp;
1420 break;
1421
1422 case VFS_LOOKUP:
1423 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1424 if (cur_max >= MAX_THREADS)
1425 continue;
1426 ti = &th_state[cur_max++];
1427
1428 ti->thread = thread;
1429 ti->type = -1;
1430 ti->pathptr = (long *)0;
1431 ti->child_thread = 0;
1432 }
1433 while ( (kd < end_of_sample) && ((kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP))
1434 {
1435 if (!ti->pathptr) {
1436 ti->arg1 = kd->arg1;
1437 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1438 sargptr = (long *)&ti->pathname[0];
1439
1440 *sargptr++ = kd->arg2;
1441 *sargptr++ = kd->arg3;
1442 *sargptr++ = kd->arg4;
1443 ti->pathptr = sargptr;
1444
1445 } else {
1446 sargptr = ti->pathptr;
1447
1448 /*
1449 We don't want to overrun our pathname buffer if the
1450 kernel sends us more VFS_LOOKUP entries than we can
1451 handle.
1452 */
1453
1454 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1455 {
1456 kd++;
1457 continue;
1458 }
1459
1460 /*
1461 We need to detect consecutive vfslookup entries.
1462 So, if we get here and find a START entry,
1463 fake the pathptr so we can bypass all further
1464 vfslookup entries.
1465 */
1466
1467 if (kd->debugid & DBG_FUNC_START)
1468 {
1469 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1470 }
1471 else
1472 {
1473 *sargptr++ = kd->arg1;
1474 *sargptr++ = kd->arg2;
1475 *sargptr++ = kd->arg3;
1476 *sargptr++ = kd->arg4;
1477 ti->pathptr = sargptr;
1478 }
1479 }
1480 kd++;
1481 }
1482
1483 kd--;
1484
1485 /* print the tail end of the pathname */
1486 len = strlen(ti->pathname);
1487 if (len > 42)
1488 len -= 42;
1489 else
1490 len = 0;
1491
1492 if (log_fp) {
1493 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1494 timestamp - start_bias, delta, "VFS_LOOKUP",
1495 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1496 }
1497
1498 last_timestamp = timestamp;
1499 break;
1500
1501 default:
1502 if (debugid & DBG_FUNC_START)
1503 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1504 else if (debugid & DBG_FUNC_END)
1505 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1506 else
1507 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1508
1509 last_timestamp = timestamp;
1510 break;
1511 }
1512 }
1513 if (last_mach_sched && log_fp)
1514 fprintf(log_fp, "\nblocked by %s @ priority %d\n", command, last_mach_sched->arg3);
1515 #if 0
1516 if (first_entry == 0 && log_fp)
1517 fprintf(log_fp, "\n start = %qd stop = %qd count = %d now = %qd\n", start, stop, count, now);
1518 #endif
1519 if (log_fp)
1520 fflush(log_fp);
1521
1522 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1523 {
1524 /* set back to realtime band */
1525 if(set_time_constraint_policy() != KERN_SUCCESS)
1526 quit("Failed to set time_constraint policy.\n");
1527 }
1528 }
1529
1530 void
1531 enter_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1532 {
1533 struct th_info *ti;
1534 int i;
1535 int cpunum;
1536 char *p;
1537
1538 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1539
1540 if (print_info && fp) {
1541 if (p = find_code(type)) {
1542 if (type == INTERRUPT) {
1543 int mode = 1;
1544
1545 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1546 if (ti->type == -1 && strcmp(command, "kernel_task"))
1547 mode = 0;
1548 }
1549
1550 fprintf(fp, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1551 timestamp - bias, delta, pc_to_string(kd->arg2, 59, mode), thread, cpunum, command);
1552 } else if (type == MACH_vmfault) {
1553 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1554 timestamp - bias, delta, p, thread, cpunum, command);
1555 } else {
1556 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1557 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1558 thread, cpunum, command);
1559 }
1560 } else {
1561 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1562 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1563 thread, cpunum, command);
1564 }
1565 }
1566 if ((ti = find_thread(thread, -1, type)) == (struct th_info *)0) {
1567 if (cur_max >= MAX_THREADS) {
1568 static int do_this_once = 1;
1569
1570 if (do_this_once) {
1571 for (i = 0; i < cur_max; i++) {
1572 if (!fp)
1573 break;
1574 fprintf(fp, "thread = %x, type = %x\n",
1575 th_state[i].thread, th_state[i].type);
1576 }
1577 do_this_once = 0;
1578 }
1579 return;
1580
1581 }
1582 ti = &th_state[cur_max++];
1583
1584 ti->thread = thread;
1585 ti->child_thread = 0;
1586 }
1587 if (type != BSC_exit)
1588 ti->type = type;
1589 else
1590 ti->type = -1;
1591 ti->stime = timestamp;
1592 ti->pathptr = (long *)0;
1593
1594 #if 0
1595 if (print_info && fp)
1596 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, ti->type, ti->thread);
1597 #endif
1598 }
1599
1600
1601 void
1602 exit_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1603 {
1604 struct th_info *ti;
1605 int cpunum;
1606 char *p;
1607
1608 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1609 ti = find_thread(thread, type, type);
1610 #if 0
1611 if (print_info && fp)
1612 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, type, thread);
1613 #endif
1614 if (print_info && fp) {
1615 if (ti)
1616 fprintf(fp, "%9.1f %8.1f(%.1f) \t", timestamp - bias, delta, timestamp - ti->stime);
1617 else
1618 fprintf(fp, "%9.1f %8.1f() \t", timestamp - bias, delta);
1619
1620 if (p = find_code(type)) {
1621 if (type == INTERRUPT) {
1622 fprintf(fp, "INTERRUPT %-8x %d %s\n", thread, cpunum, command);
1623 } else if (type == MACH_vmfault && kd->arg2 <= DBG_CACHE_HIT_FAULT) {
1624 fprintf(fp, "%-28.28s %-8.8s %-8x %-8x %d %s\n",
1625 p, fault_name[kd->arg2], kd->arg1,
1626 thread, cpunum, command);
1627 } else {
1628 fprintf(fp, "%-28.28s %-8x %-8x %-8x %d %s\n",
1629 p, kd->arg1, kd->arg2,
1630 thread, cpunum, command);
1631 }
1632 } else {
1633 fprintf(fp, "%-8x %-8x %-8x %-8x %d %s\n",
1634 type, kd->arg1, kd->arg2,
1635 thread, cpunum, command);
1636 }
1637 }
1638 if (ti == (struct th_info *)0) {
1639 if ((ti = find_thread(thread, -1, -1)) == (struct th_info *)0) {
1640 if (cur_max >= MAX_THREADS)
1641 return;
1642 ti = &th_state[cur_max++];
1643
1644 ti->thread = thread;
1645 ti->child_thread = 0;
1646 ti->pathptr = (long *)0;
1647 }
1648 }
1649 ti->type = -1;
1650 }
1651
1652 void
1653 print_entry(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias)
1654 {
1655 char *p;
1656 int cpunum;
1657
1658 if (!fp)
1659 return;
1660
1661 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1662
1663 #if 0
1664 fprintf(fp, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max, type, thread, cpunum);
1665 #endif
1666 if (p = find_code(type)) {
1667 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1668 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1669 thread, cpunum, command);
1670 } else {
1671 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1672 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1673 thread, cpunum, command);
1674 }
1675 }
1676
1677 int
1678 check_for_thread_update(int thread, int type, kd_buf *kd)
1679 {
1680 struct th_info *ti;
1681 void create_map_entry();
1682
1683 switch (type) {
1684
1685 case TRACE_DATA_NEWTHREAD:
1686 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1687 if (cur_max >= MAX_THREADS)
1688 return (1);
1689 ti = &th_state[cur_max++];
1690
1691 ti->thread = thread;
1692 ti->type = -1;
1693 ti->pathptr = (long *)0;
1694 }
1695 ti->child_thread = kd->arg1;
1696 return (1);
1697
1698 case TRACE_STRING_NEWTHREAD:
1699 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0)
1700 return (1);
1701 if (ti->child_thread == 0)
1702 return (1);
1703 create_map_entry(ti->child_thread, (char *)&kd->arg1);
1704
1705 ti->child_thread = 0;
1706 return (1);
1707
1708 case TRACE_STRING_EXEC:
1709 create_map_entry(thread, (char *)&kd->arg1);
1710 return (1);
1711
1712 }
1713 return (0);
1714 }
1715
1716
1717 kd_buf *log_decrementer(kd_buf *kd_beg, kd_buf *kd_end, kd_buf *end_of_sample, double i_latency)
1718 {
1719 kd_buf *kd, *kd_start, *kd_stop;
1720 int kd_count; /* Limit the boundary of kd_start */
1721 double timestamp, last_timestamp, delta, start_bias;
1722 int thread, cpunum;
1723 int debugid, type, clen;
1724 int len;
1725 uint64_t now;
1726 struct th_info *ti;
1727 long *sargptr;
1728 char *p;
1729 char command[32];
1730 char command1[32];
1731 char sched_info[64];
1732 char buf1[128];
1733 char buf2[128];
1734 kd_threadmap *map;
1735 kd_threadmap *find_thread_map();
1736
1737 sprintf(buf1, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time)[0]), i_latency);
1738 clen = strlen(buf1);
1739 memset(buf2, '-', clen);
1740 buf2[clen] = 0;
1741 fprintf(log_fp, "\n\n%s\n", buf2);
1742 fprintf(log_fp, "%s\n\n", buf1);
1743
1744 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1745
1746 thread = kd_beg->arg5 & KDBG_THREAD_MASK;
1747 cpunum = (kd_end->arg5 & KDBG_CPU_MASK) ? 1: 0;
1748
1749 for (kd_count = 0, kd_start = kd_beg - 1; (kd_start >= (kd_buf *)my_buffer); kd_start--, kd_count++) {
1750 if (kd_count == MAX_LOG_COUNT)
1751 break;
1752
1753 if((kd_start->arg5 & KDBG_CPU_MASK) != cpunum)
1754 continue;
1755
1756 if ((kd_start->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1757 break;
1758
1759 if((kd_start->arg5 & KDBG_THREAD_MASK) != thread)
1760 break;
1761 }
1762
1763 if (kd_start < (kd_buf *)my_buffer)
1764 kd_start = (kd_buf *)my_buffer;
1765
1766 thread = kd_end->arg5 & KDBG_THREAD_MASK;
1767
1768 for (kd_stop = kd_end + 1; kd_stop < end_of_sample; kd_stop++) {
1769
1770 if ((kd_stop->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1771 break;
1772
1773 if((kd_stop->arg5 & KDBG_CPU_MASK) != cpunum)
1774 continue;
1775
1776 if((kd_stop->arg5 & KDBG_THREAD_MASK) != thread)
1777 break;
1778 }
1779
1780 if (kd_stop >= end_of_sample)
1781 kd_stop = end_of_sample - 1;
1782
1783 now = (((uint64_t)kd_start->timestamp.tv_sec) << 32) |
1784 (uint64_t)((unsigned int)(kd_start->timestamp.tv_nsec));
1785 timestamp = ((double)now) / divisor;
1786
1787 for (kd = kd_start; kd <= kd_stop; kd++) {
1788 type = kd->debugid & DBG_FUNC_MASK;
1789
1790 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), type, type)) {
1791 if (ti->stime >= timestamp)
1792 ti->type = -1;
1793 }
1794 }
1795 for (kd = kd_start; kd <= kd_stop; kd++) {
1796 int mode;
1797
1798 thread = kd->arg5 & KDBG_THREAD_MASK;
1799 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1800 debugid = kd->debugid;
1801 type = kd->debugid & DBG_FUNC_MASK;
1802
1803 now = (((uint64_t)kd->timestamp.tv_sec) << 32) |
1804 (uint64_t)((unsigned int)(kd->timestamp.tv_nsec));
1805
1806 timestamp = ((double)now) / divisor;
1807
1808 if (kd == kd_start) {
1809 start_bias = timestamp;
1810 last_timestamp = timestamp;
1811 }
1812 delta = timestamp - last_timestamp;
1813
1814 if (map = find_thread_map(thread))
1815 strcpy(command, map->command);
1816 else
1817 command[0] = 0;
1818
1819
1820 switch (type) {
1821
1822 case CQ_action:
1823 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1824 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1825
1826 last_timestamp = timestamp;
1827 break;
1828
1829 case DECR_TRAP:
1830 if ((int)(kd->arg1) >= 0)
1831 i_latency = 0;
1832 else
1833 i_latency = (((double)(-1 - kd->arg1)) / divisor);
1834
1835 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1836 p = "*";
1837 else
1838 p = " ";
1839
1840 mode = 1;
1841
1842 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1843 if (ti->type == -1 && strcmp(command, "kernel_task"))
1844 mode = 0;
1845 }
1846 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1847 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1848
1849 last_timestamp = timestamp;
1850 break;
1851
1852 case DECR_SET:
1853 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1854 timestamp - start_bias, delta, (double)kd->arg1/divisor,
1855 "DECR_SET", thread, cpunum, command);
1856
1857 last_timestamp = timestamp;
1858 break;
1859
1860 case MACH_sched:
1861 case MACH_stkhandoff:
1862 if (map = find_thread_map(kd->arg2))
1863 strcpy(command1, map->command);
1864 else
1865 sprintf(command1, "%-8x", kd->arg2);
1866
1867 if (ti = find_thread(kd->arg2, 0, 0)) {
1868 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1869 p = "U";
1870 else
1871 p = "K";
1872 } else
1873 p = "*";
1874 memset(sched_info, ' ', sizeof(sched_info));
1875
1876 sprintf(sched_info, "%14.14s", command);
1877 clen = strlen(sched_info);
1878 sched_info[clen] = ' ';
1879
1880 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1881 clen = strlen(sched_info);
1882 sched_info[clen] = ' ';
1883
1884 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1885
1886 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1887 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1888
1889 last_timestamp = timestamp;
1890 break;
1891
1892 case VFS_LOOKUP:
1893 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1894 if (cur_max >= MAX_THREADS)
1895 continue;
1896 ti = &th_state[cur_max++];
1897
1898 ti->thread = thread;
1899 ti->type = -1;
1900 ti->pathptr = (long *)0;
1901 ti->child_thread = 0;
1902 }
1903
1904 while ( (kd <= kd_stop) && (kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP)
1905 {
1906 if (!ti->pathptr) {
1907 ti->arg1 = kd->arg1;
1908 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1909 sargptr = (long *)&ti->pathname[0];
1910
1911 *sargptr++ = kd->arg2;
1912 *sargptr++ = kd->arg3;
1913 *sargptr++ = kd->arg4;
1914 ti->pathptr = sargptr;
1915
1916 } else {
1917 sargptr = ti->pathptr;
1918
1919 /*
1920 We don't want to overrun our pathname buffer if the
1921 kernel sends us more VFS_LOOKUP entries than we can
1922 handle.
1923 */
1924
1925 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1926 {
1927 kd++;
1928 continue;
1929 }
1930
1931 /*
1932 We need to detect consecutive vfslookup entries.
1933 So, if we get here and find a START entry,
1934 fake the pathptr so we can bypass all further
1935 vfslookup entries.
1936 */
1937
1938 if (kd->debugid & DBG_FUNC_START)
1939 {
1940 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1941 }
1942 else
1943 {
1944 *sargptr++ = kd->arg1;
1945 *sargptr++ = kd->arg2;
1946 *sargptr++ = kd->arg3;
1947 *sargptr++ = kd->arg4;
1948 ti->pathptr = sargptr;
1949 }
1950 }
1951 kd++;
1952 }
1953
1954 kd--;
1955 /* print the tail end of the pathname */
1956 len = strlen(ti->pathname);
1957 if (len > 42)
1958 len -= 42;
1959 else
1960 len = 0;
1961
1962 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1963 timestamp - start_bias, delta, "VFS_LOOKUP",
1964 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1965
1966 last_timestamp = timestamp;
1967 break;
1968
1969 default:
1970 if (debugid & DBG_FUNC_START)
1971 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1972 else if (debugid & DBG_FUNC_END)
1973 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1974 else
1975 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1976
1977 last_timestamp = timestamp;
1978 break;
1979 }
1980 }
1981 return(kd_stop);
1982 }
1983
1984
1985 double handle_decrementer(kd_buf *kd)
1986 {
1987 double latency;
1988 int elapsed_usecs;
1989
1990 if ((int)(kd->arg1) >= 0)
1991 latency = 1;
1992 else
1993 latency = (((double)(-1 - kd->arg1)) / divisor);
1994 elapsed_usecs = (int)latency;
1995
1996 if (elapsed_usecs < 100)
1997 i_usec_10_bins[elapsed_usecs/10]++;
1998 if (elapsed_usecs < 1000)
1999 i_usec_100_bins[elapsed_usecs/100]++;
2000 else if (elapsed_usecs < 10000)
2001 i_msec_1_bins[elapsed_usecs/1000]++;
2002 else if (elapsed_usecs < 50000)
2003 i_msec_10_bins[elapsed_usecs/10000]++;
2004 else
2005 i_too_slow++;
2006
2007 if (i_thresh_hold && elapsed_usecs > i_thresh_hold)
2008 i_exceeded_threshold++;
2009 if (elapsed_usecs > i_max_latency)
2010 i_max_latency = elapsed_usecs;
2011 if (elapsed_usecs < i_min_latency || i_total_samples == 0)
2012 i_min_latency = elapsed_usecs;
2013 i_total_latency += elapsed_usecs;
2014 i_total_samples++;
2015
2016 return (latency);
2017 }
2018
2019
2020 void init_code_file()
2021 {
2022 FILE *fp;
2023 int i, n, cnt, code;
2024 char name[128];
2025
2026 if ((fp = fopen(code_file, "r")) == (FILE *)0) {
2027 if (log_fp)
2028 fprintf(log_fp, "open of %s failed\n", code_file);
2029 return;
2030 }
2031 n = fscanf(fp, "%d\n", &cnt);
2032
2033 if (n != 1) {
2034 if (log_fp)
2035 fprintf(log_fp, "bad format found in %s\n", code_file);
2036 return;
2037 }
2038 for (i = 0; i < MAX_ENTRIES; i++) {
2039 n = fscanf(fp, "%x%s\n", &code, name);
2040
2041 if (n != 2)
2042 break;
2043
2044 strncpy(codes_tab[i].name, name, 32);
2045 codes_tab[i].type = code;
2046 }
2047 num_of_codes = i;
2048
2049 fclose(fp);
2050 }
2051
2052
2053 void
2054 do_kernel_nm()
2055 {
2056 int i, len;
2057 FILE *fp = (FILE *)0;
2058 char tmp_nm_file[128];
2059 char tmpstr[1024];
2060 int inchr;
2061
2062 bzero(tmp_nm_file, 128);
2063 bzero(tmpstr, 1024);
2064
2065 /* Build the temporary nm file path */
2066 sprintf(tmp_nm_file, "/tmp/knm.out.%d", getpid());
2067
2068 /* Build the nm command and create a tmp file with the output*/
2069 sprintf (tmpstr, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2070 kernelpath, tmp_nm_file);
2071 system(tmpstr);
2072
2073 /* Parse the output from the nm command */
2074 if ((fp=fopen(tmp_nm_file, "r")) == (FILE *)0)
2075 {
2076 /* Hmmm, let's not treat this as fatal */
2077 fprintf(stderr, "Failed to open nm symbol file [%s]\n", tmp_nm_file);
2078 return;
2079 }
2080
2081 /* Count the number of symbols in the nm symbol table */
2082 kern_sym_count=0;
2083 while ( (inchr = getc(fp)) != -1)
2084 {
2085 if (inchr == '\n')
2086 kern_sym_count++;
2087 }
2088
2089 rewind(fp);
2090
2091 /* Malloc the space for symbol table */
2092 if (kern_sym_count > 0)
2093 {
2094 kern_sym_tbl = (kern_sym_t *)malloc(kern_sym_count * sizeof (kern_sym_t));
2095 if (!kern_sym_tbl)
2096 {
2097 /* Hmmm, lets not treat this as fatal */
2098 fprintf(stderr, "Can't allocate memory for kernel symbol table\n");
2099 }
2100 else
2101 bzero(kern_sym_tbl, (kern_sym_count * sizeof(kern_sym_t)));
2102 }
2103 else
2104 {
2105 /* Hmmm, lets not treat this as fatal */
2106 fprintf(stderr, "No kernel symbol table \n");
2107 }
2108
2109 for (i=0; i<kern_sym_count; i++)
2110 {
2111 bzero(tmpstr, 1024);
2112 if (fscanf(fp, "%x %c %s", &kern_sym_tbl[i].k_sym_addr, &inchr, tmpstr) != 3)
2113 break;
2114 else
2115 {
2116 len = strlen(tmpstr);
2117 kern_sym_tbl[i].k_sym_name = malloc(len + 1);
2118
2119 if (kern_sym_tbl[i].k_sym_name == (char *)0)
2120 {
2121 fprintf(stderr, "Can't allocate memory for symbol name [%s]\n", tmpstr);
2122 kern_sym_tbl[i].k_sym_name = (char *)0;
2123 len = 0;
2124 }
2125 else
2126 strcpy(kern_sym_tbl[i].k_sym_name, tmpstr);
2127
2128 kern_sym_tbl[i].k_sym_len = len;
2129 }
2130 } /* end for */
2131
2132 if (i != kern_sym_count)
2133 {
2134 /* Hmmm, didn't build up entire table from nm */
2135 /* scrap the entire thing */
2136 if (kern_sym_tbl)
2137 free (kern_sym_tbl);
2138 kern_sym_tbl = (kern_sym_t *)0;
2139 kern_sym_count = 0;
2140 }
2141
2142 fclose(fp);
2143
2144 /* Remove the temporary nm file */
2145 unlink(tmp_nm_file);
2146
2147 #if 0
2148 /* Dump the kernel symbol table */
2149 for (i=0; i < kern_sym_count; i++)
2150 {
2151 if (kern_sym_tbl[i].k_sym_name)
2152 printf ("[%d] 0x%x %s\n", i,
2153 kern_sym_tbl[i].k_sym_addr, kern_sym_tbl[i].k_sym_name);
2154 else
2155 printf ("[%d] 0x%x %s\n", i,
2156 kern_sym_tbl[i].k_sym_addr, "No symbol name");
2157 }
2158 #endif
2159 }
2160
2161 char *
2162 pc_to_string(unsigned int pc, int max_len, int mode)
2163 {
2164 int ret;
2165 int len;
2166
2167 int binary_search();
2168
2169 if (mode == 0)
2170 {
2171 sprintf(pcstring, "0x%-8x [usermode addr]", pc);
2172 return(pcstring);
2173 }
2174
2175 ret=0;
2176 ret = binary_search(kern_sym_tbl, 0, kern_sym_count-1, pc);
2177
2178 if (ret == -1)
2179 {
2180 sprintf(pcstring, "0x%x", pc);
2181 return(pcstring);
2182 }
2183 else if (kern_sym_tbl[ret].k_sym_name == (char *)0)
2184 {
2185 sprintf(pcstring, "0x%x", pc);
2186 return(pcstring);
2187 }
2188 else
2189 {
2190 if ((len = kern_sym_tbl[ret].k_sym_len) > (max_len - 8))
2191 len = max_len - 8;
2192
2193 memcpy(pcstring, kern_sym_tbl[ret].k_sym_name, len);
2194 sprintf(&pcstring[len], "+0x%-5x", pc - kern_sym_tbl[ret].k_sym_addr);
2195
2196 return (pcstring);
2197 }
2198 }
2199
2200
2201 /* Return -1 if not found, else return index */
2202 int binary_search(list, low, high, addr)
2203 kern_sym_t *list;
2204 int low, high;
2205 unsigned int addr;
2206 {
2207 int mid;
2208
2209 mid = (low + high) / 2;
2210
2211 if (low > high)
2212 return (-1); /* failed */
2213 else if (low + 1 == high)
2214 {
2215 if (list[low].k_sym_addr <= addr &&
2216 addr < list[high].k_sym_addr)
2217 {
2218 /* We have a range match */
2219 return(low);
2220 }
2221 else if (list[high].k_sym_addr <= addr)
2222 {
2223 return(high);
2224 }
2225 else
2226 return(-1); /* Failed */
2227 }
2228 else if (addr < list[mid].k_sym_addr)
2229 {
2230 return(binary_search (list, low, mid, addr));
2231 }
2232 else
2233 {
2234 return(binary_search (list, mid, high, addr));
2235 }
2236 }
2237
2238 void
2239 open_logfile(char *path)
2240 {
2241 log_fp = fopen(path, "a");
2242
2243 if (!log_fp)
2244 {
2245 /* failed to open path */
2246 fprintf(stderr, "latency: failed to open logfile [%s]\n", path);
2247 exit_usage();
2248 }
2249 }