]> git.saurik.com Git - apple/system_cmds.git/blob - latency.tproj/latency.c
system_cmds-279.tar.gz
[apple/system_cmds.git] / latency.tproj / latency.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26
27 /*
28 cc -I. -DKERNEL_PRIVATE -O -o latency latency.c -lncurses
29 */
30
31 #include <mach/mach.h>
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <signal.h>
35 #include <strings.h>
36 #include <nlist.h>
37 #include <fcntl.h>
38 #include <string.h>
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/time.h>
43
44 #include <libc.h>
45 #include <termios.h>
46 #include <curses.h>
47 #include <sys/ioctl.h>
48
49 #ifndef KERNEL_PRIVATE
50 #define KERNEL_PRIVATE
51 #include <sys/kdebug.h>
52 #undef KERNEL_PRIVATE
53 #else
54 #include <sys/kdebug.h>
55 #endif /*KERNEL_PRIVATE*/
56
57 #include <sys/sysctl.h>
58 #include <errno.h>
59 #include <err.h>
60
61 #include <mach/host_info.h>
62 #include <mach/mach_error.h>
63 #include <mach/mach_types.h>
64 #include <mach/message.h>
65 #include <mach/mach_syscalls.h>
66 #include <mach/clock_types.h>
67 #include <mach/mach_time.h>
68
69 #include <libkern/OSTypes.h>
70
71 extern mach_port_t clock_port;
72
73 #define KERN_KDPIDEX 14
74
75
76 int s_usec_10_bins[10];
77 int s_usec_100_bins[10];
78 int s_msec_1_bins[10];
79 int s_msec_10_bins[5];
80 int s_too_slow;
81 int s_max_latency;
82 int s_min_latency = 0;
83 long long s_total_latency = 0;
84 int s_total_samples;
85 long s_thresh_hold;
86 int s_exceeded_threshold = 0;
87
88 int i_usec_10_bins[10];
89 int i_usec_100_bins[10];
90 int i_msec_1_bins[10];
91 int i_msec_10_bins[5];
92 int i_too_slow;
93 int i_max_latency;
94 int i_min_latency = 0;
95 long long i_total_latency = 0;
96 int i_total_samples;
97 long i_thresh_hold;
98 int i_exceeded_threshold = 0;
99
100 long start_time;
101 long curr_time;
102 long refresh_time;
103
104 char *policy_name;
105 int my_policy;
106 int my_pri = -1;
107 int num_of_usecs_to_sleep = 1000;
108
109 char *kernelpath = (char *)0;
110 char *code_file = (char *)0;
111
112 typedef struct {
113 u_long k_sym_addr; /* kernel symbol address from nm */
114 u_int k_sym_len; /* length of kernel symbol string */
115 char *k_sym_name; /* kernel symbol string from nm */
116 } kern_sym_t;
117
118 kern_sym_t *kern_sym_tbl; /* pointer to the nm table */
119 int kern_sym_count; /* number of entries in nm table */
120 char pcstring[128];
121
122 #define UNKNOWN "Can't find symbol name"
123
124
125 double divisor;
126 int gotSIGWINCH = 0;
127 int trace_enabled = 0;
128 struct host_basic_info hi;
129
130
131 #define SAMPLE_SIZE 300000
132
133 int mib[6];
134 size_t needed;
135 char *my_buffer;
136
137 kbufinfo_t bufinfo = {0, 0, 0};
138
139 FILE *log_fp = (FILE *)0;
140 int num_of_codes = 0;
141 int need_new_map = 0;
142 int total_threads = 0;
143 kd_threadmap *mapptr = 0;
144
145 #define MAX_ENTRIES 1024
146 struct ct {
147 int type;
148 char name[32];
149 } codes_tab[MAX_ENTRIES];
150
151 /* If NUMPARMS changes from the kernel, then PATHLENGTH will also reflect the change */
152 #define NUMPARMS 23
153 #define PATHLENGTH (NUMPARMS*sizeof(long))
154
155 struct th_info {
156 int thread;
157 int type;
158 int child_thread;
159 int arg1;
160 double stime;
161 long *pathptr;
162 char pathname[PATHLENGTH + 1];
163 };
164
165 #define MAX_THREADS 512
166 struct th_info th_state[MAX_THREADS];
167
168 int cur_max = 0;
169
170 #define TRACE_DATA_NEWTHREAD 0x07000004
171 #define TRACE_STRING_NEWTHREAD 0x07010004
172 #define TRACE_STRING_EXEC 0x07010008
173
174 #define INTERRUPT 0x01050000
175 #define DECR_TRAP 0x01090000
176 #define DECR_SET 0x01090004
177 #define MACH_vmfault 0x01300000
178 #define MACH_sched 0x01400000
179 #define MACH_stkhandoff 0x01400008
180 #define VFS_LOOKUP 0x03010090
181 #define BSC_exit 0x040C0004
182 #define IES_action 0x050b0018
183 #define IES_filter 0x050b001c
184 #define TES_action 0x050c0010
185 #define CQ_action 0x050d0018
186
187
188 #define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
189 #define DBG_FUNC_MASK 0xfffffffc
190
191 #define DBG_ZERO_FILL_FAULT 1
192 #define DBG_PAGEIN_FAULT 2
193 #define DBG_COW_FAULT 3
194 #define DBG_CACHE_HIT_FAULT 4
195
196 char *fault_name[5] = {
197 "",
198 "ZeroFill",
199 "PageIn",
200 "COW",
201 "CacheHit",
202 };
203
204 char *pc_to_string();
205 static kern_return_t set_time_constraint_policy(void);
206 static kern_return_t set_standard_policy(void);
207
208 int decrementer_val = 0; /* Value used to reset decrementer */
209 int set_remove_flag = 1; /* By default, remove trace buffer */
210
211 kd_buf **last_decrementer_kd; /* last DECR_TRAP per cpu */
212 #define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
213
214 int
215 quit(s)
216 char *s;
217 {
218 void set_enable();
219 void set_rtcdec();
220 void set_remove();
221
222 if (trace_enabled)
223 set_enable(0);
224
225 /*
226 This flag is turned off when calling
227 quit() due to a set_remove() failure.
228 */
229 if (set_remove_flag)
230 set_remove();
231
232 if (decrementer_val)
233 set_rtcdec(0);
234
235 printf("latency: ");
236 if (s)
237 printf("%s", s);
238
239 exit(1);
240 }
241
242 void
243 set_enable(int val)
244 {
245 mib[0] = CTL_KERN;
246 mib[1] = KERN_KDEBUG;
247 mib[2] = KERN_KDENABLE; /* protocol */
248 mib[3] = val;
249 mib[4] = 0;
250 mib[5] = 0; /* no flags */
251
252 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
253 quit("trace facility failure, KERN_KDENABLE\n");
254 }
255
256 void
257 set_numbufs(int nbufs)
258 {
259 mib[0] = CTL_KERN;
260 mib[1] = KERN_KDEBUG;
261 mib[2] = KERN_KDSETBUF;
262 mib[3] = nbufs;
263 mib[4] = 0;
264 mib[5] = 0; /* no flags */
265 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
266 quit("trace facility failure, KERN_KDSETBUF\n");
267
268 mib[0] = CTL_KERN;
269 mib[1] = KERN_KDEBUG;
270 mib[2] = KERN_KDSETUP;
271 mib[3] = 0;
272 mib[4] = 0;
273 mib[5] = 0; /* no flags */
274 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
275 quit("trace facility failure, KERN_KDSETUP\n");
276
277 }
278
279 void
280 set_pidexclude(int pid, int on_off)
281 {
282 kd_regtype kr;
283
284 kr.type = KDBG_TYPENONE;
285 kr.value1 = pid;
286 kr.value2 = on_off;
287 needed = sizeof(kd_regtype);
288 mib[0] = CTL_KERN;
289 mib[1] = KERN_KDEBUG;
290 mib[2] = KERN_KDPIDEX;
291 mib[3] = 0;
292 mib[4] = 0;
293 mib[5] = 0;
294
295 sysctl(mib, 3, &kr, &needed, NULL, 0);
296 }
297
298 set_rtcdec(decval)
299 int decval;
300 {kd_regtype kr;
301 int ret;
302 extern int errno;
303
304 kr.type = KDBG_TYPENONE;
305 kr.value1 = decval;
306 needed = sizeof(kd_regtype);
307 mib[0] = CTL_KERN;
308 mib[1] = KERN_KDEBUG;
309 mib[2] = KERN_KDSETRTCDEC; /* protocol */
310 mib[3] = 0; /* wildcard address family */
311 mib[4] = 0;
312 mib[5] = 0; /* no flags */
313
314 errno = 0;
315
316 if ((ret=sysctl(mib, 3, &kr, &needed, NULL, 0)) < 0)
317 {
318 decrementer_val = 0;
319 quit("trace facility failure, KERN_KDSETRTCDEC\n");
320 }
321 }
322
323
324 void
325 get_bufinfo(kbufinfo_t *val)
326 {
327 needed = sizeof (*val);
328 mib[0] = CTL_KERN;
329 mib[1] = KERN_KDEBUG;
330 mib[2] = KERN_KDGETBUF;
331 mib[3] = 0;
332 mib[4] = 0;
333 mib[5] = 0; /* no flags */
334
335 if (sysctl(mib, 3, val, &needed, 0, 0) < 0)
336 quit("trace facility failure, KERN_KDGETBUF\n");
337
338 }
339
340 void
341 set_remove()
342 {
343 extern int errno;
344
345 mib[0] = CTL_KERN;
346 mib[1] = KERN_KDEBUG;
347 mib[2] = KERN_KDREMOVE; /* protocol */
348 mib[3] = 0;
349 mib[4] = 0;
350 mib[5] = 0; /* no flags */
351
352 errno = 0;
353
354 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
355 {
356 set_remove_flag = 0;
357 if(errno == EBUSY)
358 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
359 else
360 quit("trace facility failure, KERN_KDREMOVE\n");
361 }
362 }
363
364 void
365 set_init_nologging()
366 {
367 /* When we aren't logging, only collect the DECR_TRAP trace points */
368 kd_regtype kr;
369 kr.type = KDBG_VALCHECK;
370 kr.value1 = DECR_TRAP;
371 kr.value2 = 0;
372 kr.value3 = 0;
373 kr.value4 = 0;
374 needed = sizeof(kd_regtype);
375 mib[0] = CTL_KERN;
376 mib[1] = KERN_KDEBUG;
377 mib[2] = KERN_KDSETREG;
378 mib[3] = 0;
379 mib[4] = 0;
380 mib[5] = 0; /* no flags */
381 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
382 quit("trace facility failure, KERN_KDSETREG\n");
383
384 mib[0] = CTL_KERN;
385 mib[1] = KERN_KDEBUG;
386 mib[2] = KERN_KDSETUP;
387 mib[3] = 0;
388 mib[4] = 0;
389 mib[5] = 0; /* no flags */
390
391 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
392 quit("trace facility failure, KERN_KDSETUP\n");
393 }
394
395 void
396 set_init_logging()
397 { kd_regtype kr;
398
399 kr.type = KDBG_RANGETYPE;
400 kr.value1 = 0;
401 kr.value2 = -1;
402 needed = sizeof(kd_regtype);
403 mib[0] = CTL_KERN;
404 mib[1] = KERN_KDEBUG;
405 mib[2] = KERN_KDSETREG;
406 mib[3] = 0;
407 mib[4] = 0;
408 mib[5] = 0; /* no flags */
409
410 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
411 quit("trace facility failure, KERN_KDSETREG\n");
412
413 mib[0] = CTL_KERN;
414 mib[1] = KERN_KDEBUG;
415 mib[2] = KERN_KDSETUP;
416 mib[3] = 0;
417 mib[4] = 0;
418 mib[5] = 0; /* no flags */
419
420 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
421 quit("trace facility failure, KERN_KDSETUP\n");
422 }
423
424
425 void sigwinch()
426 {
427 gotSIGWINCH = 1;
428 }
429
430 void sigintr()
431 {
432 void screen_update();
433
434 set_enable(0);
435 set_pidexclude(getpid(), 0);
436 screen_update(log_fp);
437 endwin();
438 set_rtcdec(0);
439 set_remove();
440
441 exit(1);
442 }
443
444 void leave() /* exit under normal conditions -- signal handler */
445 {
446 set_enable(0);
447 set_pidexclude(getpid(), 0);
448 endwin();
449 set_rtcdec(0);
450 set_remove();
451
452 exit(1);
453 }
454
455 void
456 screen_update(FILE *fp)
457 {
458 int i;
459 int itotal, stotal;
460 int elapsed_secs;
461 int elapsed_mins;
462 int elapsed_hours;
463 unsigned int average_s_latency;
464 unsigned int average_i_latency;
465 char tbuf[256];
466
467 if (fp == (FILE *)0) {
468 erase();
469 move(0, 0);
470 } else
471 fprintf(fp,"\n\n===================================================================================================\n");
472 /*
473 * Display the current time.
474 * "ctime" always returns a string that looks like this:
475 *
476 * Sun Sep 16 01:03:52 1973
477 * 012345678901234567890123
478 * 1 2
479 *
480 * We want indices 11 thru 18 (length 8).
481 */
482 elapsed_secs = curr_time - start_time;
483 elapsed_hours = elapsed_secs / 3600;
484 elapsed_secs -= elapsed_hours * 3600;
485 elapsed_mins = elapsed_secs / 60;
486 elapsed_secs -= elapsed_mins * 60;
487
488 sprintf(tbuf, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time)[0]),
489 elapsed_hours, elapsed_mins, elapsed_secs);
490 if (fp)
491 fprintf(fp, "%s", tbuf);
492 else
493 printw(tbuf);
494
495
496
497 sprintf(tbuf, " SCHEDULER INTERRUPTS\n");
498
499 if (fp)
500 fprintf(fp, "%s", tbuf);
501 else
502 printw(tbuf);
503
504 sprintf(tbuf, "---------------------------------------------\n");
505
506 if (fp)
507 fprintf(fp, "%s", tbuf);
508 else
509 printw(tbuf);
510
511
512
513 sprintf(tbuf, "total_samples %10d %10d\n\n", s_total_samples, i_total_samples);
514
515 if (fp)
516 fprintf(fp, "%s", tbuf);
517 else
518 printw(tbuf);
519
520
521
522 for (itotal = 0, stotal = 0, i = 0; i < 10; i++) {
523 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 10, s_usec_10_bins[i], i_usec_10_bins[i]);
524
525 if (fp)
526 fprintf(fp, "%s", tbuf);
527 else
528 printw(tbuf);
529
530 stotal += s_usec_10_bins[i];
531 itotal += i_usec_10_bins[i];
532 }
533 sprintf(tbuf, "total < 100 usecs %10d %10d\n\n", stotal, itotal);
534
535 if (fp)
536 fprintf(fp, "%s", tbuf);
537 else
538 printw(tbuf);
539
540
541
542 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
543 if (i < 9)
544 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 100, s_usec_100_bins[i], i_usec_100_bins[i]);
545 else
546 sprintf(tbuf, "delays < 1 msec %10d %10d\n", s_usec_100_bins[i], i_usec_100_bins[i]);
547
548 if (fp)
549 fprintf(fp, "%s", tbuf);
550 else
551 printw(tbuf);
552
553 stotal += s_usec_100_bins[i];
554 itotal += i_usec_100_bins[i];
555 }
556 sprintf(tbuf, "total < 1 msec %10d %10d\n\n", stotal, itotal);
557
558 if (fp)
559 fprintf(fp, "%s", tbuf);
560 else
561 printw(tbuf);
562
563
564
565 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
566 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1), s_msec_1_bins[i], i_msec_1_bins[i]);
567
568 if (fp)
569 fprintf(fp, "%s", tbuf);
570 else
571 printw(tbuf);
572
573 stotal += s_msec_1_bins[i];
574 itotal += i_msec_1_bins[i];
575 }
576 sprintf(tbuf, "total < 10 msecs %10d %10d\n\n", stotal, itotal);
577
578 if (fp)
579 fprintf(fp, "%s", tbuf);
580 else
581 printw(tbuf);
582
583
584
585
586 for (itotal = 0, stotal = 0, i = 1; i < 5; i++) {
587 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1)*10, s_msec_10_bins[i], i_msec_10_bins[i]);
588
589 if (fp)
590 fprintf(fp, "%s", tbuf);
591 else
592 printw(tbuf);
593
594 stotal += s_msec_10_bins[i];
595 itotal += i_msec_10_bins[i];
596 }
597 sprintf(tbuf, "total < 50 msecs %10d %10d\n\n", stotal, itotal);
598
599 if (fp)
600 fprintf(fp, "%s", tbuf);
601 else
602 printw(tbuf);
603
604 sprintf(tbuf, "delays > 50 msecs %10d %10d\n", s_too_slow, i_too_slow);
605
606 if (fp)
607 fprintf(fp, "%s", tbuf);
608 else
609 printw(tbuf);
610
611
612
613 sprintf(tbuf, "\nminimum latency(usecs) %7d %7d\n", s_min_latency, i_min_latency);
614
615 if (fp)
616 fprintf(fp, "%s", tbuf);
617 else
618 printw(tbuf);
619
620 sprintf(tbuf, "maximum latency(usecs) %7d %7d\n", s_max_latency, i_max_latency);
621
622 if (fp)
623 fprintf(fp, "%s", tbuf);
624 else
625 printw(tbuf);
626
627 if (s_total_samples)
628 average_s_latency = (unsigned int)(s_total_latency/s_total_samples);
629 else
630 average_s_latency = 0;
631
632 if (i_total_samples)
633 average_i_latency = (unsigned int)(i_total_latency/i_total_samples);
634 else
635 average_i_latency = 0;
636
637 sprintf(tbuf, "average latency(usecs) %7d %7d\n", average_s_latency, average_i_latency);
638
639 if (fp)
640 fprintf(fp, "%s", tbuf);
641 else
642 printw(tbuf);
643
644 sprintf(tbuf, "exceeded threshold %7d %7d\n", s_exceeded_threshold, i_exceeded_threshold);
645
646 if (fp)
647 fprintf(fp, "%s", tbuf);
648 else
649 printw(tbuf);
650
651 if (fp == (FILE *)0)
652 refresh();
653 else
654 fflush(fp);
655 }
656
657 int
658 exit_usage()
659 {
660
661 fprintf(stderr, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
662 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs]\n");
663 fprintf(stderr, " [-d decrementer_in_usecs] [-n kernel]\n\n");
664
665 fprintf(stderr, " -rt Set realtime scheduling policy. Default is timeshare.\n");
666 fprintf(stderr, " -c specify name of codes file\n");
667 fprintf(stderr, " -l specify name of file to log trace entries to when threshold is exceeded\n");
668 fprintf(stderr, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
669 fprintf(stderr, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
670 fprintf(stderr, " -s set sleep time in microseconds\n");
671 fprintf(stderr, " -d set decrementer in microseconds.\n");
672 fprintf(stderr, " -n specify kernel, default is /mach_kernel\n");
673
674 fprintf(stderr, "\nlatency must be run as root\n\n");
675
676 exit(1);
677 }
678
679
680
681 main(argc, argv)
682 int argc;
683 char *argv[];
684 {
685 mach_timespec_t remain;
686 uint64_t start, stop;
687 uint64_t timestamp1;
688 uint64_t timestamp2;
689 uint64_t adeadline, adelay;
690 double fdelay;
691 int elapsed_usecs;
692 double nanosecs_to_sleep;
693 int loop_cnt, sample_sc_now;
694 int decrementer_usec = 0;
695 kern_return_t ret;
696 int size;
697 int i, count;
698 host_name_port_t host;
699 void getdivisor();
700 void sample_sc();
701 void init_code_file();
702 void do_kernel_nm();
703 void open_logfile();
704
705 my_policy = THREAD_STANDARD_POLICY;
706 policy_name = "TIMESHARE";
707
708 while (argc > 1) {
709 if (strcmp(argv[1], "-rt") == 0) {
710 my_policy = THREAD_TIME_CONSTRAINT_POLICY; /* the real time band */
711 policy_name = "REALTIME";
712
713 } else if (strcmp(argv[1], "-st") == 0) {
714 argc--;
715 argv++;
716
717 if (argc > 1)
718 s_thresh_hold = atoi(argv[1]);
719 else
720 exit_usage();
721
722 } else if (strcmp(argv[1], "-it") == 0) {
723 argc--;
724 argv++;
725
726 if (argc > 1)
727 i_thresh_hold = atoi(argv[1]);
728 else
729 exit_usage();
730 } else if (strcmp(argv[1], "-c") == 0) {
731 argc--;
732 argv++;
733
734 if (argc > 1)
735 code_file = argv[1];
736 else
737 exit_usage();
738 } else if (strcmp(argv[1], "-l") == 0) {
739 argc--;
740 argv++;
741
742 if (argc > 1)
743 open_logfile(argv[1]);
744 else
745 exit_usage();
746
747 } else if (strcmp(argv[1], "-s") == 0) {
748 argc--;
749 argv++;
750
751 if (argc > 1)
752 num_of_usecs_to_sleep = atoi(argv[1]);
753 else
754 exit_usage();
755 } else if (strcmp(argv[1], "-d") == 0) {
756 argc--;
757 argv++;
758
759 if (argc > 1)
760 decrementer_usec = atoi(argv[1]);
761 else
762 exit_usage();
763 } else if (strcmp(argv[1], "-n") == 0) {
764 argc--;
765 argv++;
766
767 if (argc > 1)
768 kernelpath = argv[1];
769 else
770 exit_usage();
771 } else
772 exit_usage();
773
774 argc--;
775 argv++;
776 }
777
778 if ( geteuid() != 0 ) {
779 printf("'latency' must be run as root...\n");
780 exit(1);
781 }
782
783 if (kernelpath == (char *) 0)
784 kernelpath = "/mach_kernel";
785
786 if (code_file == (char *) 0)
787 code_file = "/usr/share/misc/trace.codes";
788
789 do_kernel_nm();
790
791 sample_sc_now = 25000 / num_of_usecs_to_sleep;
792
793 getdivisor();
794 decrementer_val = decrementer_usec * divisor;
795
796 /* get the cpu count for the DECR_TRAP array */
797 host = mach_host_self();
798 size = sizeof(hi)/sizeof(int);
799 ret = host_info(host, HOST_BASIC_INFO, (host_info_t)&hi, &size);
800 if (ret != KERN_SUCCESS) {
801 mach_error(argv[0], ret);
802 exit(EXIT_FAILURE);
803 }
804
805 if ((last_decrementer_kd = (kd_buf **)malloc(hi.avail_cpus * sizeof(kd_buf *))) == (kd_buf **)0)
806 quit("can't allocate memory for decrementer tracing info\n");
807
808 nanosecs_to_sleep = (double)(num_of_usecs_to_sleep * 1000);
809 fdelay = nanosecs_to_sleep * (divisor /1000);
810 adelay = (uint64_t)fdelay;
811
812 init_code_file();
813
814 /*
815 When the decrementer isn't set in the options,
816 decval will be zero and this call will reset
817 the system default ...
818 */
819 set_rtcdec(decrementer_val);
820
821 if (initscr() == (WINDOW *) 0)
822 {
823 printf("Unrecognized TERM type, try vt100\n");
824 exit(1);
825 }
826
827 clear();
828 refresh();
829 signal(SIGWINCH, sigwinch);
830 signal(SIGINT, sigintr);
831 signal(SIGQUIT, leave);
832 signal(SIGTERM, leave);
833 signal(SIGHUP, leave);
834
835
836 if ((my_buffer = malloc(SAMPLE_SIZE * sizeof(kd_buf))) == (char *)0)
837 quit("can't allocate memory for tracing info\n");
838 set_remove();
839 set_numbufs(SAMPLE_SIZE);
840 set_enable(0);
841 if(log_fp)
842 set_init_logging();
843 else
844 set_init_nologging();
845 set_pidexclude(getpid(), 1);
846 set_enable(1);
847 trace_enabled = 1;
848 need_new_map = 1;
849
850 loop_cnt = 0;
851 start_time = time((long *)0);
852 refresh_time = start_time;
853
854 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
855 {
856 /* the realtime band */
857 if(set_time_constraint_policy() != KERN_SUCCESS)
858 quit("Failed to set realtime policy.\n");
859 }
860
861 for (;;) {
862 curr_time = time((long *)0);
863
864 if (curr_time >= refresh_time) {
865 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
866 {
867 /* set standard timeshare policy during screen update */
868 if(set_standard_policy() != KERN_SUCCESS)
869 quit("Failed to set standard policy.\n");
870 }
871 screen_update((FILE *)0);
872 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
873 {
874 /* set back to realtime band */
875 if(set_time_constraint_policy() != KERN_SUCCESS)
876 quit("Failed to set time_constraint policy.\n");
877 }
878 refresh_time = curr_time + 1;
879 }
880
881 timestamp1 = mach_absolute_time();
882 adeadline = timestamp1 + adelay;
883 mk_wait_until(adeadline);
884 timestamp2 = mach_absolute_time();
885
886 start = timestamp1;
887
888 stop = timestamp2;
889
890 elapsed_usecs = (int)(((double)(stop - start)) / divisor);
891
892 if ((elapsed_usecs -= num_of_usecs_to_sleep) <= 0)
893 continue;
894
895 if (elapsed_usecs < 100)
896 s_usec_10_bins[elapsed_usecs/10]++;
897 if (elapsed_usecs < 1000)
898 s_usec_100_bins[elapsed_usecs/100]++;
899 else if (elapsed_usecs < 10000)
900 s_msec_1_bins[elapsed_usecs/1000]++;
901 else if (elapsed_usecs < 50000)
902 s_msec_10_bins[elapsed_usecs/10000]++;
903 else
904 s_too_slow++;
905
906 if (elapsed_usecs > s_max_latency)
907 s_max_latency = elapsed_usecs;
908 if (elapsed_usecs < s_min_latency || s_total_samples == 0)
909 s_min_latency = elapsed_usecs;
910 s_total_latency += elapsed_usecs;
911 s_total_samples++;
912
913 if (s_thresh_hold && elapsed_usecs > s_thresh_hold)
914 s_exceeded_threshold++;
915 loop_cnt++;
916
917 if (log_fp && s_thresh_hold && elapsed_usecs > s_thresh_hold)
918 sample_sc(start, stop);
919 else {
920 if (loop_cnt >= sample_sc_now) {
921 sample_sc((long long)0, (long long)0);
922 loop_cnt = 0;
923 }
924 }
925 if (gotSIGWINCH) {
926 /*
927 No need to check for initscr error return.
928 We won't get here if it fails on the first call.
929 */
930 endwin();
931 clear();
932 refresh();
933
934 gotSIGWINCH = 0;
935 }
936 }
937 }
938
939
940 void getdivisor()
941 {
942 mach_timebase_info_data_t info;
943
944 (void) mach_timebase_info (&info);
945
946 divisor = ( (double)info.denom / (double)info.numer) * 1000;
947
948 }
949
950 /* This is the realtime band */
951 static kern_return_t
952 set_time_constraint_policy()
953 {
954 kern_return_t result;
955 thread_time_constraint_policy_data_t info;
956 mach_msg_type_number_t count;
957 boolean_t get_default;
958
959 get_default = TRUE;
960 count = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
961 result = thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
962 (thread_policy_t)&info, &count, &get_default);
963 if (result != KERN_SUCCESS)
964 return (result);
965
966 result = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
967 (thread_policy_t)&info, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
968
969 return (result);
970 }
971
972 /* This is the timeshare mode */
973 static kern_return_t
974 set_standard_policy()
975 {
976 kern_return_t result;
977 thread_standard_policy_data_t info;
978 mach_msg_type_number_t count;
979 boolean_t get_default;
980
981 get_default = TRUE;
982 count = THREAD_STANDARD_POLICY_COUNT;
983 result = thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY,
984 (thread_policy_t)&info, &count, &get_default);
985 if (result != KERN_SUCCESS)
986 return (result);
987
988 result = thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY,
989 (thread_policy_t)&info, THREAD_STANDARD_POLICY_COUNT);
990
991 return (result);
992 }
993
994
995 void read_command_map()
996 {
997 size_t size;
998 int mib[6];
999
1000 if (mapptr) {
1001 free(mapptr);
1002 mapptr = 0;
1003 }
1004 total_threads = bufinfo.nkdthreads;
1005 size = bufinfo.nkdthreads * sizeof(kd_threadmap);
1006 if (size)
1007 {
1008 if (mapptr = (kd_threadmap *) malloc(size))
1009 bzero (mapptr, size);
1010 else
1011 {
1012 printf("Thread map is not initialized -- this is not fatal\n");
1013 return;
1014 }
1015 }
1016
1017 /* Now read the threadmap */
1018 mib[0] = CTL_KERN;
1019 mib[1] = KERN_KDEBUG;
1020 mib[2] = KERN_KDTHRMAP;
1021 mib[3] = 0;
1022 mib[4] = 0;
1023 mib[5] = 0; /* no flags */
1024 if (sysctl(mib, 3, mapptr, &size, NULL, 0) < 0)
1025 {
1026 /* This is not fatal -- just means I cant map command strings */
1027
1028 printf("Can't read the thread map -- this is not fatal\n");
1029 free(mapptr);
1030 mapptr = 0;
1031 return;
1032 }
1033 return;
1034 }
1035
1036
1037 void create_map_entry(int thread, char *command)
1038 {
1039 int i, n;
1040 kd_threadmap *map;
1041
1042 if (!mapptr)
1043 return;
1044
1045 for (i = 0, map = 0; !map && i < total_threads; i++)
1046 {
1047 if (mapptr[i].thread == thread )
1048 map = &mapptr[i]; /* Reuse this entry, the thread has been reassigned */
1049 }
1050
1051 if (!map) /* look for invalid entries that I can reuse*/
1052 {
1053 for (i = 0, map = 0; !map && i < total_threads; i++)
1054 {
1055 if (mapptr[i].valid == 0 )
1056 map = &mapptr[i]; /* Reuse this invalid entry */
1057 }
1058 }
1059
1060 if (!map)
1061 {
1062 /* If reach here, then this is a new thread and
1063 * there are no invalid entries to reuse
1064 * Double the size of the thread map table.
1065 */
1066
1067 n = total_threads * 2;
1068 mapptr = (kd_threadmap *) realloc(mapptr, n * sizeof(kd_threadmap));
1069 bzero(&mapptr[total_threads], total_threads*sizeof(kd_threadmap));
1070 map = &mapptr[total_threads];
1071 total_threads = n;
1072 #if 0
1073 if (log_fp)
1074 fprintf(log_fp, "MAP: increasing thread map to %d entries\n", total_threads);
1075 #endif
1076 }
1077 #if 0
1078 if (log_fp)
1079 fprintf(log_fp, "MAP: adding thread %x with name %s\n", thread, command);
1080 #endif
1081 map->valid = 1;
1082 map->thread = thread;
1083 /*
1084 The trace entry that returns the command name will hold
1085 at most, MAXCOMLEN chars, and in that case, is not
1086 guaranteed to be null terminated.
1087 */
1088 (void)strncpy (map->command, command, MAXCOMLEN);
1089 map->command[MAXCOMLEN] = '\0';
1090 }
1091
1092
1093 kd_threadmap *find_thread_map(int thread)
1094 {
1095 int i;
1096 kd_threadmap *map;
1097
1098 if (!mapptr)
1099 return((kd_threadmap *)0);
1100
1101 for (i = 0; i < total_threads; i++)
1102 {
1103 map = &mapptr[i];
1104 if (map->valid && (map->thread == thread))
1105 {
1106 return(map);
1107 }
1108 }
1109 return ((kd_threadmap *)0);
1110 }
1111
1112 void
1113 kill_thread_map(int thread)
1114 {
1115 kd_threadmap *map;
1116
1117 if (map = find_thread_map(thread)) {
1118
1119 #if 0
1120 if (log_fp)
1121 fprintf(log_fp, "MAP: deleting thread %x with name %s\n", thread, map->command);
1122 #endif
1123 map->valid = 0;
1124 map->thread = 0;
1125 map->command[0] = '\0';
1126 }
1127 }
1128
1129
1130 struct th_info *find_thread(int thread, int type1, int type2) {
1131 struct th_info *ti;
1132
1133 for (ti = th_state; ti < &th_state[cur_max]; ti++) {
1134 if (ti->thread == thread) {
1135 if (type1 == 0)
1136 return(ti);
1137 if (type1 == ti->type)
1138 return(ti);
1139 if (type2 == ti->type)
1140 return(ti);
1141 }
1142 }
1143 return ((struct th_info *)0);
1144 }
1145
1146
1147 char *find_code(type)
1148 {
1149 int i;
1150
1151 for (i = 0; i < num_of_codes; i++) {
1152 if (codes_tab[i].type == type)
1153 return(codes_tab[i].name);
1154 }
1155 return ((char *)0);
1156 }
1157
1158
1159 void sample_sc(uint64_t start, uint64_t stop)
1160 {
1161 kd_buf *kd, *last_mach_sched, *start_kd, *end_of_sample;
1162 uint64_t now;
1163 int count, i;
1164 int first_entry = 1;
1165 char command[32];
1166 double timestamp, last_timestamp, delta, start_bias;
1167 void read_command_map();
1168
1169 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1170 {
1171 /* set standard timeshare policy when logging */
1172 if(set_standard_policy() != KERN_SUCCESS)
1173 quit("Failed to set standard policy.\n");
1174 }
1175
1176 /* Get kernel buffer information */
1177 get_bufinfo(&bufinfo);
1178
1179 if (need_new_map) {
1180 read_command_map();
1181 need_new_map = 0;
1182 }
1183 needed = bufinfo.nkdbufs * sizeof(kd_buf);
1184 mib[0] = CTL_KERN;
1185 mib[1] = KERN_KDEBUG;
1186 mib[2] = KERN_KDREADTR;
1187 mib[3] = 0;
1188 mib[4] = 0;
1189 mib[5] = 0; /* no flags */
1190
1191 if (sysctl(mib, 3, my_buffer, &needed, NULL, 0) < 0)
1192 quit("trace facility failure, KERN_KDREADTR\n");
1193
1194 count = needed;
1195
1196 if (bufinfo.flags & KDBG_WRAPPED) {
1197 for (i = 0; i < cur_max; i++) {
1198 th_state[i].thread = 0;
1199 th_state[i].type = -1;
1200 th_state[i].pathptr = (long *)0;
1201 th_state[i].pathname[0] = 0;
1202 }
1203 cur_max = 0;
1204 need_new_map = 1;
1205
1206 set_enable(0);
1207 set_enable(1);
1208
1209 if (log_fp) {
1210 double latency;
1211
1212 latency = (double)(stop - start) / divisor;
1213 latency -= (double)num_of_usecs_to_sleep;
1214
1215 fprintf(log_fp, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1216 &(ctime(&curr_time)[0]), latency, count);
1217 }
1218 }
1219 end_of_sample = &((kd_buf *)my_buffer)[count];
1220
1221 /* Always reinitialize the DECR_TRAP array */
1222 for (i=0; i < hi.avail_cpus; i++)
1223 last_decrementer_kd[i] = (kd_buf *)my_buffer;
1224
1225 last_mach_sched = (kd_buf *)0;
1226
1227 for (kd = (kd_buf *)my_buffer; kd < end_of_sample; kd++) {
1228 int debugid, thread, cpunum;
1229 int type, clen, mode;
1230 int len;
1231 char *p;
1232 long *sargptr;
1233 kd_buf *cur_kd;
1234 double i_latency;
1235 struct th_info *ti;
1236 char command1[32];
1237 char sched_info[64];
1238 kd_threadmap *map;
1239 kd_threadmap *find_thread_map();
1240 double handle_decrementer();
1241 kd_buf *log_decrementer();
1242 int check_for_thread_update();
1243 void enter_syscall();
1244 void exit_syscall();
1245 void print_entry();
1246
1247 thread = kd->arg5 & KDBG_THREAD_MASK;
1248 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1249 debugid = kd->debugid;
1250 type = kd->debugid & DBG_FUNC_MASK;
1251
1252 if (check_for_thread_update(thread, type, kd))
1253 continue;
1254
1255 if (type == DECR_TRAP)
1256 i_latency = handle_decrementer(kd);
1257
1258 now = kd->timestamp;
1259
1260 timestamp = ((double)now) / divisor;
1261
1262 if (now < start || now > stop) {
1263 if (debugid & DBG_FUNC_START)
1264 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1265 else if (debugid & DBG_FUNC_END)
1266 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1267 else if (type == DECR_TRAP) {
1268 cur_kd = kd;
1269 if (log_fp && i_thresh_hold && (int)i_latency > i_thresh_hold) {
1270 start_kd = last_decrementer_kd[cpunum];
1271 kd = log_decrementer(start_kd, kd, end_of_sample, i_latency);
1272 if (kd >= end_of_sample)
1273 break;
1274 }
1275 if ((kd->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1276 {
1277 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1278 last_decrementer_kd[cpunum] = kd;
1279 }
1280 else
1281 last_decrementer_kd[cpunum] = cur_kd;
1282 }
1283 continue;
1284 }
1285 if (first_entry) {
1286 double latency;
1287 char buf1[132];
1288 char buf2[132];
1289
1290 latency = (double)(stop - start) / divisor;
1291 latency -= (double)num_of_usecs_to_sleep;
1292
1293 if (my_pri == -1)
1294 sprintf(buf2, "default");
1295 else
1296 sprintf(buf2, "%d", my_pri);
1297 sprintf(buf1, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1298 &(ctime(&curr_time)[0]), latency, num_of_usecs_to_sleep, policy_name, buf2);
1299 clen = strlen(buf1);
1300 memset(buf2, '-', clen);
1301 buf2[clen] = 0;
1302
1303 if (log_fp) {
1304 fprintf(log_fp, "\n\n%s\n", buf2);
1305 fprintf(log_fp, "%s\n\n", buf1);
1306 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1307 }
1308 start_bias = ((double)start) / divisor;
1309 last_timestamp = timestamp;
1310 first_entry = 0;
1311 }
1312 delta = timestamp - last_timestamp;
1313
1314 if (map = find_thread_map(thread))
1315 strcpy(command, map->command);
1316 else
1317 command[0] = 0;
1318
1319 switch (type) {
1320
1321 case CQ_action:
1322 if (log_fp) {
1323 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1324 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1325 }
1326 last_timestamp = timestamp;
1327 break;
1328
1329 case TES_action:
1330 if (log_fp) {
1331 fprintf(log_fp, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1332 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1333 }
1334
1335 last_timestamp = timestamp;
1336 break;
1337
1338 case IES_action:
1339 if (log_fp) {
1340 fprintf(log_fp, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1341 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1342 }
1343
1344 last_timestamp = timestamp;
1345 break;
1346
1347 case IES_filter:
1348 if (log_fp) {
1349 fprintf(log_fp, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1350 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1351 }
1352
1353 last_timestamp = timestamp;
1354 break;
1355
1356 case DECR_TRAP:
1357 last_decrementer_kd[cpunum] = kd;
1358
1359 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1360 p = "*";
1361 else
1362 p = " ";
1363
1364 mode = 1;
1365
1366 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1367 if (ti->type == -1 && strcmp(command, "kernel_task"))
1368 mode = 0;
1369 }
1370
1371 if (log_fp) {
1372 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1373 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1374 }
1375
1376 last_timestamp = timestamp;
1377 break;
1378
1379 case DECR_SET:
1380 if (log_fp) {
1381 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1382 timestamp - start_bias, delta, (double)kd->arg1/divisor, "DECR_SET", thread, cpunum, command);
1383 }
1384
1385 last_timestamp = timestamp;
1386 break;
1387
1388 case MACH_sched:
1389 case MACH_stkhandoff:
1390 last_mach_sched = kd;
1391
1392 if (map = find_thread_map(kd->arg2))
1393 strcpy(command1, map->command);
1394 else
1395 sprintf(command1, "%-8x", kd->arg2);
1396
1397 if (ti = find_thread(kd->arg2, 0, 0)) {
1398 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1399 p = "U";
1400 else
1401 p = "K";
1402 } else
1403 p = "*";
1404 memset(sched_info, ' ', sizeof(sched_info));
1405
1406 sprintf(sched_info, "%14.14s", command);
1407 clen = strlen(sched_info);
1408 sched_info[clen] = ' ';
1409
1410 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1411 clen = strlen(sched_info);
1412 sched_info[clen] = ' ';
1413
1414 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1415
1416 if (log_fp) {
1417 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1418 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1419 }
1420
1421 last_timestamp = timestamp;
1422 break;
1423
1424 case VFS_LOOKUP:
1425 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1426 if (cur_max >= MAX_THREADS)
1427 continue;
1428 ti = &th_state[cur_max++];
1429
1430 ti->thread = thread;
1431 ti->type = -1;
1432 ti->pathptr = (long *)0;
1433 ti->child_thread = 0;
1434 }
1435 while ( (kd < end_of_sample) && ((kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP))
1436 {
1437 if (!ti->pathptr) {
1438 ti->arg1 = kd->arg1;
1439 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1440 sargptr = (long *)&ti->pathname[0];
1441
1442 *sargptr++ = kd->arg2;
1443 *sargptr++ = kd->arg3;
1444 *sargptr++ = kd->arg4;
1445 ti->pathptr = sargptr;
1446
1447 } else {
1448 sargptr = ti->pathptr;
1449
1450 /*
1451 We don't want to overrun our pathname buffer if the
1452 kernel sends us more VFS_LOOKUP entries than we can
1453 handle.
1454 */
1455
1456 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1457 {
1458 kd++;
1459 continue;
1460 }
1461
1462 /*
1463 We need to detect consecutive vfslookup entries.
1464 So, if we get here and find a START entry,
1465 fake the pathptr so we can bypass all further
1466 vfslookup entries.
1467 */
1468
1469 if (kd->debugid & DBG_FUNC_START)
1470 {
1471 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1472 }
1473 else
1474 {
1475 *sargptr++ = kd->arg1;
1476 *sargptr++ = kd->arg2;
1477 *sargptr++ = kd->arg3;
1478 *sargptr++ = kd->arg4;
1479 ti->pathptr = sargptr;
1480 }
1481 }
1482 kd++;
1483 }
1484
1485 kd--;
1486
1487 /* print the tail end of the pathname */
1488 len = strlen(ti->pathname);
1489 if (len > 42)
1490 len -= 42;
1491 else
1492 len = 0;
1493
1494 if (log_fp) {
1495 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1496 timestamp - start_bias, delta, "VFS_LOOKUP",
1497 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1498 }
1499
1500 last_timestamp = timestamp;
1501 break;
1502
1503 default:
1504 if (debugid & DBG_FUNC_START)
1505 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1506 else if (debugid & DBG_FUNC_END)
1507 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1508 else
1509 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1510
1511 last_timestamp = timestamp;
1512 break;
1513 }
1514 }
1515 if (last_mach_sched && log_fp)
1516 fprintf(log_fp, "\nblocked by %s @ priority %d\n", command, last_mach_sched->arg3);
1517 #if 0
1518 if (first_entry == 0 && log_fp)
1519 fprintf(log_fp, "\n start = %qd stop = %qd count = %d now = %qd\n", start, stop, count, now);
1520 #endif
1521 if (log_fp)
1522 fflush(log_fp);
1523
1524 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1525 {
1526 /* set back to realtime band */
1527 if(set_time_constraint_policy() != KERN_SUCCESS)
1528 quit("Failed to set time_constraint policy.\n");
1529 }
1530 }
1531
1532 void
1533 enter_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1534 {
1535 struct th_info *ti;
1536 int i;
1537 int cpunum;
1538 char *p;
1539
1540 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1541
1542 if (print_info && fp) {
1543 if (p = find_code(type)) {
1544 if (type == INTERRUPT) {
1545 int mode = 1;
1546
1547 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1548 if (ti->type == -1 && strcmp(command, "kernel_task"))
1549 mode = 0;
1550 }
1551
1552 fprintf(fp, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1553 timestamp - bias, delta, pc_to_string(kd->arg2, 59, mode), thread, cpunum, command);
1554 } else if (type == MACH_vmfault) {
1555 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1556 timestamp - bias, delta, p, thread, cpunum, command);
1557 } else {
1558 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1559 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1560 thread, cpunum, command);
1561 }
1562 } else {
1563 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1564 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1565 thread, cpunum, command);
1566 }
1567 }
1568 if ((ti = find_thread(thread, -1, type)) == (struct th_info *)0) {
1569 if (cur_max >= MAX_THREADS) {
1570 static int do_this_once = 1;
1571
1572 if (do_this_once) {
1573 for (i = 0; i < cur_max; i++) {
1574 if (!fp)
1575 break;
1576 fprintf(fp, "thread = %x, type = %x\n",
1577 th_state[i].thread, th_state[i].type);
1578 }
1579 do_this_once = 0;
1580 }
1581 return;
1582
1583 }
1584 ti = &th_state[cur_max++];
1585
1586 ti->thread = thread;
1587 ti->child_thread = 0;
1588 }
1589 if (type != BSC_exit)
1590 ti->type = type;
1591 else
1592 ti->type = -1;
1593 ti->stime = timestamp;
1594 ti->pathptr = (long *)0;
1595
1596 #if 0
1597 if (print_info && fp)
1598 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, ti->type, ti->thread);
1599 #endif
1600 }
1601
1602
1603 void
1604 exit_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1605 {
1606 struct th_info *ti;
1607 int cpunum;
1608 char *p;
1609
1610 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1611 ti = find_thread(thread, type, type);
1612 #if 0
1613 if (print_info && fp)
1614 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, type, thread);
1615 #endif
1616 if (print_info && fp) {
1617 if (ti)
1618 fprintf(fp, "%9.1f %8.1f(%.1f) \t", timestamp - bias, delta, timestamp - ti->stime);
1619 else
1620 fprintf(fp, "%9.1f %8.1f() \t", timestamp - bias, delta);
1621
1622 if (p = find_code(type)) {
1623 if (type == INTERRUPT) {
1624 fprintf(fp, "INTERRUPT %-8x %d %s\n", thread, cpunum, command);
1625 } else if (type == MACH_vmfault && kd->arg2 <= DBG_CACHE_HIT_FAULT) {
1626 fprintf(fp, "%-28.28s %-8.8s %-8x %-8x %d %s\n",
1627 p, fault_name[kd->arg2], kd->arg1,
1628 thread, cpunum, command);
1629 } else {
1630 fprintf(fp, "%-28.28s %-8x %-8x %-8x %d %s\n",
1631 p, kd->arg1, kd->arg2,
1632 thread, cpunum, command);
1633 }
1634 } else {
1635 fprintf(fp, "%-8x %-8x %-8x %-8x %d %s\n",
1636 type, kd->arg1, kd->arg2,
1637 thread, cpunum, command);
1638 }
1639 }
1640 if (ti == (struct th_info *)0) {
1641 if ((ti = find_thread(thread, -1, -1)) == (struct th_info *)0) {
1642 if (cur_max >= MAX_THREADS)
1643 return;
1644 ti = &th_state[cur_max++];
1645
1646 ti->thread = thread;
1647 ti->child_thread = 0;
1648 ti->pathptr = (long *)0;
1649 }
1650 }
1651 ti->type = -1;
1652 }
1653
1654 void
1655 print_entry(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias)
1656 {
1657 char *p;
1658 int cpunum;
1659
1660 if (!fp)
1661 return;
1662
1663 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1664
1665 #if 0
1666 fprintf(fp, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max, type, thread, cpunum);
1667 #endif
1668 if (p = find_code(type)) {
1669 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1670 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1671 thread, cpunum, command);
1672 } else {
1673 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1674 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1675 thread, cpunum, command);
1676 }
1677 }
1678
1679 int
1680 check_for_thread_update(int thread, int type, kd_buf *kd)
1681 {
1682 struct th_info *ti;
1683 void create_map_entry();
1684
1685 switch (type) {
1686
1687 case TRACE_DATA_NEWTHREAD:
1688 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1689 if (cur_max >= MAX_THREADS)
1690 return (1);
1691 ti = &th_state[cur_max++];
1692
1693 ti->thread = thread;
1694 ti->type = -1;
1695 ti->pathptr = (long *)0;
1696 }
1697 ti->child_thread = kd->arg1;
1698 return (1);
1699
1700 case TRACE_STRING_NEWTHREAD:
1701 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0)
1702 return (1);
1703 if (ti->child_thread == 0)
1704 return (1);
1705 create_map_entry(ti->child_thread, (char *)&kd->arg1);
1706
1707 ti->child_thread = 0;
1708 return (1);
1709
1710 case TRACE_STRING_EXEC:
1711 create_map_entry(thread, (char *)&kd->arg1);
1712 return (1);
1713
1714 }
1715 return (0);
1716 }
1717
1718
1719 kd_buf *log_decrementer(kd_buf *kd_beg, kd_buf *kd_end, kd_buf *end_of_sample, double i_latency)
1720 {
1721 kd_buf *kd, *kd_start, *kd_stop;
1722 int kd_count; /* Limit the boundary of kd_start */
1723 double timestamp, last_timestamp, delta, start_bias;
1724 int thread, cpunum;
1725 int debugid, type, clen;
1726 int len;
1727 uint64_t now;
1728 struct th_info *ti;
1729 long *sargptr;
1730 char *p;
1731 char command[32];
1732 char command1[32];
1733 char sched_info[64];
1734 char buf1[128];
1735 char buf2[128];
1736 kd_threadmap *map;
1737 kd_threadmap *find_thread_map();
1738
1739 sprintf(buf1, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time)[0]), i_latency);
1740 clen = strlen(buf1);
1741 memset(buf2, '-', clen);
1742 buf2[clen] = 0;
1743 fprintf(log_fp, "\n\n%s\n", buf2);
1744 fprintf(log_fp, "%s\n\n", buf1);
1745
1746 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1747
1748 thread = kd_beg->arg5 & KDBG_THREAD_MASK;
1749 cpunum = (kd_end->arg5 & KDBG_CPU_MASK) ? 1: 0;
1750
1751 for (kd_count = 0, kd_start = kd_beg - 1; (kd_start >= (kd_buf *)my_buffer); kd_start--, kd_count++) {
1752 if (kd_count == MAX_LOG_COUNT)
1753 break;
1754
1755 if((kd_start->arg5 & KDBG_CPU_MASK) != cpunum)
1756 continue;
1757
1758 if ((kd_start->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1759 break;
1760
1761 if((kd_start->arg5 & KDBG_THREAD_MASK) != thread)
1762 break;
1763 }
1764
1765 if (kd_start < (kd_buf *)my_buffer)
1766 kd_start = (kd_buf *)my_buffer;
1767
1768 thread = kd_end->arg5 & KDBG_THREAD_MASK;
1769
1770 for (kd_stop = kd_end + 1; kd_stop < end_of_sample; kd_stop++) {
1771
1772 if ((kd_stop->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1773 break;
1774
1775 if((kd_stop->arg5 & KDBG_CPU_MASK) != cpunum)
1776 continue;
1777
1778 if((kd_stop->arg5 & KDBG_THREAD_MASK) != thread)
1779 break;
1780 }
1781
1782 if (kd_stop >= end_of_sample)
1783 kd_stop = end_of_sample - 1;
1784
1785 now = kd_start->timestamp;
1786 timestamp = ((double)now) / divisor;
1787
1788 for (kd = kd_start; kd <= kd_stop; kd++) {
1789 type = kd->debugid & DBG_FUNC_MASK;
1790
1791 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), type, type)) {
1792 if (ti->stime >= timestamp)
1793 ti->type = -1;
1794 }
1795 }
1796 for (kd = kd_start; kd <= kd_stop; kd++) {
1797 int mode;
1798
1799 thread = kd->arg5 & KDBG_THREAD_MASK;
1800 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1801 debugid = kd->debugid;
1802 type = kd->debugid & DBG_FUNC_MASK;
1803
1804 now = kd->timestamp;
1805
1806 timestamp = ((double)now) / divisor;
1807
1808 if (kd == kd_start) {
1809 start_bias = timestamp;
1810 last_timestamp = timestamp;
1811 }
1812 delta = timestamp - last_timestamp;
1813
1814 if (map = find_thread_map(thread))
1815 strcpy(command, map->command);
1816 else
1817 command[0] = 0;
1818
1819
1820 switch (type) {
1821
1822 case CQ_action:
1823 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1824 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1825
1826 last_timestamp = timestamp;
1827 break;
1828
1829 case DECR_TRAP:
1830 if ((int)(kd->arg1) >= 0)
1831 i_latency = 0;
1832 else
1833 i_latency = (((double)(-1 - kd->arg1)) / divisor);
1834
1835 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1836 p = "*";
1837 else
1838 p = " ";
1839
1840 mode = 1;
1841
1842 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1843 if (ti->type == -1 && strcmp(command, "kernel_task"))
1844 mode = 0;
1845 }
1846 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1847 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1848
1849 last_timestamp = timestamp;
1850 break;
1851
1852 case DECR_SET:
1853 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1854 timestamp - start_bias, delta, (double)kd->arg1/divisor,
1855 "DECR_SET", thread, cpunum, command);
1856
1857 last_timestamp = timestamp;
1858 break;
1859
1860 case MACH_sched:
1861 case MACH_stkhandoff:
1862 if (map = find_thread_map(kd->arg2))
1863 strcpy(command1, map->command);
1864 else
1865 sprintf(command1, "%-8x", kd->arg2);
1866
1867 if (ti = find_thread(kd->arg2, 0, 0)) {
1868 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1869 p = "U";
1870 else
1871 p = "K";
1872 } else
1873 p = "*";
1874 memset(sched_info, ' ', sizeof(sched_info));
1875
1876 sprintf(sched_info, "%14.14s", command);
1877 clen = strlen(sched_info);
1878 sched_info[clen] = ' ';
1879
1880 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1881 clen = strlen(sched_info);
1882 sched_info[clen] = ' ';
1883
1884 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1885
1886 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1887 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1888
1889 last_timestamp = timestamp;
1890 break;
1891
1892 case VFS_LOOKUP:
1893 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1894 if (cur_max >= MAX_THREADS)
1895 continue;
1896 ti = &th_state[cur_max++];
1897
1898 ti->thread = thread;
1899 ti->type = -1;
1900 ti->pathptr = (long *)0;
1901 ti->child_thread = 0;
1902 }
1903
1904 while ( (kd <= kd_stop) && (kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP)
1905 {
1906 if (!ti->pathptr) {
1907 ti->arg1 = kd->arg1;
1908 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1909 sargptr = (long *)&ti->pathname[0];
1910
1911 *sargptr++ = kd->arg2;
1912 *sargptr++ = kd->arg3;
1913 *sargptr++ = kd->arg4;
1914 ti->pathptr = sargptr;
1915
1916 } else {
1917 sargptr = ti->pathptr;
1918
1919 /*
1920 We don't want to overrun our pathname buffer if the
1921 kernel sends us more VFS_LOOKUP entries than we can
1922 handle.
1923 */
1924
1925 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1926 {
1927 kd++;
1928 continue;
1929 }
1930
1931 /*
1932 We need to detect consecutive vfslookup entries.
1933 So, if we get here and find a START entry,
1934 fake the pathptr so we can bypass all further
1935 vfslookup entries.
1936 */
1937
1938 if (kd->debugid & DBG_FUNC_START)
1939 {
1940 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1941 }
1942 else
1943 {
1944 *sargptr++ = kd->arg1;
1945 *sargptr++ = kd->arg2;
1946 *sargptr++ = kd->arg3;
1947 *sargptr++ = kd->arg4;
1948 ti->pathptr = sargptr;
1949 }
1950 }
1951 kd++;
1952 }
1953
1954 kd--;
1955 /* print the tail end of the pathname */
1956 len = strlen(ti->pathname);
1957 if (len > 42)
1958 len -= 42;
1959 else
1960 len = 0;
1961
1962 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1963 timestamp - start_bias, delta, "VFS_LOOKUP",
1964 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1965
1966 last_timestamp = timestamp;
1967 break;
1968
1969 default:
1970 if (debugid & DBG_FUNC_START)
1971 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1972 else if (debugid & DBG_FUNC_END)
1973 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1974 else
1975 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1976
1977 last_timestamp = timestamp;
1978 break;
1979 }
1980 }
1981 return(kd_stop);
1982 }
1983
1984
1985 double handle_decrementer(kd_buf *kd)
1986 {
1987 double latency;
1988 int elapsed_usecs;
1989
1990 if ((int)(kd->arg1) >= 0)
1991 latency = 1;
1992 else
1993 latency = (((double)(-1 - kd->arg1)) / divisor);
1994 elapsed_usecs = (int)latency;
1995
1996 if (elapsed_usecs < 100)
1997 i_usec_10_bins[elapsed_usecs/10]++;
1998 if (elapsed_usecs < 1000)
1999 i_usec_100_bins[elapsed_usecs/100]++;
2000 else if (elapsed_usecs < 10000)
2001 i_msec_1_bins[elapsed_usecs/1000]++;
2002 else if (elapsed_usecs < 50000)
2003 i_msec_10_bins[elapsed_usecs/10000]++;
2004 else
2005 i_too_slow++;
2006
2007 if (i_thresh_hold && elapsed_usecs > i_thresh_hold)
2008 i_exceeded_threshold++;
2009 if (elapsed_usecs > i_max_latency)
2010 i_max_latency = elapsed_usecs;
2011 if (elapsed_usecs < i_min_latency || i_total_samples == 0)
2012 i_min_latency = elapsed_usecs;
2013 i_total_latency += elapsed_usecs;
2014 i_total_samples++;
2015
2016 return (latency);
2017 }
2018
2019
2020 void init_code_file()
2021 {
2022 FILE *fp;
2023 int i, n, cnt, code;
2024 char name[128];
2025
2026 if ((fp = fopen(code_file, "r")) == (FILE *)0) {
2027 if (log_fp)
2028 fprintf(log_fp, "open of %s failed\n", code_file);
2029 return;
2030 }
2031 n = fscanf(fp, "%d\n", &cnt);
2032
2033 if (n != 1) {
2034 if (log_fp)
2035 fprintf(log_fp, "bad format found in %s\n", code_file);
2036 return;
2037 }
2038 for (i = 0; i < MAX_ENTRIES; i++) {
2039 n = fscanf(fp, "%x%s\n", &code, name);
2040
2041 if (n != 2)
2042 break;
2043
2044 strncpy(codes_tab[i].name, name, 32);
2045 codes_tab[i].type = code;
2046 }
2047 num_of_codes = i;
2048
2049 fclose(fp);
2050 }
2051
2052
2053 void
2054 do_kernel_nm()
2055 {
2056 int i, len;
2057 FILE *fp = (FILE *)0;
2058 char tmp_nm_file[128];
2059 char tmpstr[1024];
2060 int inchr;
2061
2062 bzero(tmp_nm_file, 128);
2063 bzero(tmpstr, 1024);
2064
2065 /* Build the temporary nm file path */
2066 sprintf(tmp_nm_file, "/tmp/knm.out.%d", getpid());
2067
2068 /* Build the nm command and create a tmp file with the output*/
2069 sprintf (tmpstr, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2070 kernelpath, tmp_nm_file);
2071 system(tmpstr);
2072
2073 /* Parse the output from the nm command */
2074 if ((fp=fopen(tmp_nm_file, "r")) == (FILE *)0)
2075 {
2076 /* Hmmm, let's not treat this as fatal */
2077 fprintf(stderr, "Failed to open nm symbol file [%s]\n", tmp_nm_file);
2078 return;
2079 }
2080
2081 /* Count the number of symbols in the nm symbol table */
2082 kern_sym_count=0;
2083 while ( (inchr = getc(fp)) != -1)
2084 {
2085 if (inchr == '\n')
2086 kern_sym_count++;
2087 }
2088
2089 rewind(fp);
2090
2091 /* Malloc the space for symbol table */
2092 if (kern_sym_count > 0)
2093 {
2094 kern_sym_tbl = (kern_sym_t *)malloc(kern_sym_count * sizeof (kern_sym_t));
2095 if (!kern_sym_tbl)
2096 {
2097 /* Hmmm, lets not treat this as fatal */
2098 fprintf(stderr, "Can't allocate memory for kernel symbol table\n");
2099 }
2100 else
2101 bzero(kern_sym_tbl, (kern_sym_count * sizeof(kern_sym_t)));
2102 }
2103 else
2104 {
2105 /* Hmmm, lets not treat this as fatal */
2106 fprintf(stderr, "No kernel symbol table \n");
2107 }
2108
2109 for (i=0; i<kern_sym_count; i++)
2110 {
2111 bzero(tmpstr, 1024);
2112 if (fscanf(fp, "%x %c %s", &kern_sym_tbl[i].k_sym_addr, &inchr, tmpstr) != 3)
2113 break;
2114 else
2115 {
2116 len = strlen(tmpstr);
2117 kern_sym_tbl[i].k_sym_name = malloc(len + 1);
2118
2119 if (kern_sym_tbl[i].k_sym_name == (char *)0)
2120 {
2121 fprintf(stderr, "Can't allocate memory for symbol name [%s]\n", tmpstr);
2122 kern_sym_tbl[i].k_sym_name = (char *)0;
2123 len = 0;
2124 }
2125 else
2126 strcpy(kern_sym_tbl[i].k_sym_name, tmpstr);
2127
2128 kern_sym_tbl[i].k_sym_len = len;
2129 }
2130 } /* end for */
2131
2132 if (i != kern_sym_count)
2133 {
2134 /* Hmmm, didn't build up entire table from nm */
2135 /* scrap the entire thing */
2136 if (kern_sym_tbl)
2137 free (kern_sym_tbl);
2138 kern_sym_tbl = (kern_sym_t *)0;
2139 kern_sym_count = 0;
2140 }
2141
2142 fclose(fp);
2143
2144 /* Remove the temporary nm file */
2145 unlink(tmp_nm_file);
2146
2147 #if 0
2148 /* Dump the kernel symbol table */
2149 for (i=0; i < kern_sym_count; i++)
2150 {
2151 if (kern_sym_tbl[i].k_sym_name)
2152 printf ("[%d] 0x%x %s\n", i,
2153 kern_sym_tbl[i].k_sym_addr, kern_sym_tbl[i].k_sym_name);
2154 else
2155 printf ("[%d] 0x%x %s\n", i,
2156 kern_sym_tbl[i].k_sym_addr, "No symbol name");
2157 }
2158 #endif
2159 }
2160
2161 char *
2162 pc_to_string(unsigned int pc, int max_len, int mode)
2163 {
2164 int ret;
2165 int len;
2166
2167 int binary_search();
2168
2169 if (mode == 0)
2170 {
2171 sprintf(pcstring, "0x%-8x [usermode addr]", pc);
2172 return(pcstring);
2173 }
2174
2175 ret=0;
2176 ret = binary_search(kern_sym_tbl, 0, kern_sym_count-1, pc);
2177
2178 if (ret == -1)
2179 {
2180 sprintf(pcstring, "0x%x", pc);
2181 return(pcstring);
2182 }
2183 else if (kern_sym_tbl[ret].k_sym_name == (char *)0)
2184 {
2185 sprintf(pcstring, "0x%x", pc);
2186 return(pcstring);
2187 }
2188 else
2189 {
2190 if ((len = kern_sym_tbl[ret].k_sym_len) > (max_len - 8))
2191 len = max_len - 8;
2192
2193 memcpy(pcstring, kern_sym_tbl[ret].k_sym_name, len);
2194 sprintf(&pcstring[len], "+0x%-5x", pc - kern_sym_tbl[ret].k_sym_addr);
2195
2196 return (pcstring);
2197 }
2198 }
2199
2200
2201 /* Return -1 if not found, else return index */
2202 int binary_search(list, low, high, addr)
2203 kern_sym_t *list;
2204 int low, high;
2205 unsigned int addr;
2206 {
2207 int mid;
2208
2209 mid = (low + high) / 2;
2210
2211 if (low > high)
2212 return (-1); /* failed */
2213 else if (low + 1 == high)
2214 {
2215 if (list[low].k_sym_addr <= addr &&
2216 addr < list[high].k_sym_addr)
2217 {
2218 /* We have a range match */
2219 return(low);
2220 }
2221 else if (list[high].k_sym_addr <= addr)
2222 {
2223 return(high);
2224 }
2225 else
2226 return(-1); /* Failed */
2227 }
2228 else if (addr < list[mid].k_sym_addr)
2229 {
2230 return(binary_search (list, low, mid, addr));
2231 }
2232 else
2233 {
2234 return(binary_search (list, mid, high, addr));
2235 }
2236 }
2237
2238 void
2239 open_logfile(char *path)
2240 {
2241 log_fp = fopen(path, "a");
2242
2243 if (!log_fp)
2244 {
2245 /* failed to open path */
2246 fprintf(stderr, "latency: failed to open logfile [%s]\n", path);
2247 exit_usage();
2248 }
2249 }