]> git.saurik.com Git - apple/system_cmds.git/blame - latency.tproj/latency.c
system_cmds-279.6.1.tar.gz
[apple/system_cmds.git] / latency.tproj / latency.c
CommitLineData
1815bff5
A
1/*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d904471c
A
6 * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
7 * Reserved. This file contains Original Code and/or Modifications of
8 * Original Code as defined in and that are subject to the Apple Public
9 * Source License Version 1.0 (the 'License'). You may not use this file
10 * except in compliance with the License. Please obtain a copy of the
11 * License at http://www.apple.com/publicsource and read it before using
12 * this file.
1815bff5
A
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d904471c
A
18 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
19 * License for the specific language governing rights and limitations
20 * under the License."
1815bff5
A
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26/*
b51d5b5f 27 cc -I. -DKERNEL_PRIVATE -O -o latency latency.c -lncurses
1815bff5
A
28*/
29
30#include <mach/mach.h>
31#include <stdlib.h>
32#include <stdio.h>
33#include <signal.h>
34#include <strings.h>
35#include <nlist.h>
36#include <fcntl.h>
37#include <string.h>
38
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/time.h>
42
43#include <libc.h>
44#include <termios.h>
b51d5b5f 45#include <curses.h>
1815bff5
A
46#include <sys/ioctl.h>
47
48#ifndef KERNEL_PRIVATE
49#define KERNEL_PRIVATE
50#include <sys/kdebug.h>
51#undef KERNEL_PRIVATE
52#else
53#include <sys/kdebug.h>
54#endif /*KERNEL_PRIVATE*/
55
56#include <sys/sysctl.h>
57#include <errno.h>
58#include <err.h>
59
60#include <mach/host_info.h>
61#include <mach/mach_error.h>
62#include <mach/mach_types.h>
63#include <mach/message.h>
64#include <mach/mach_syscalls.h>
1815bff5 65#include <mach/clock_types.h>
1c51fdde 66#include <mach/mach_time.h>
1815bff5
A
67
68#include <libkern/OSTypes.h>
69
70extern mach_port_t clock_port;
71
72#define KERN_KDPIDEX 14
73
74
75int s_usec_10_bins[10];
76int s_usec_100_bins[10];
77int s_msec_1_bins[10];
78int s_msec_10_bins[5];
79int s_too_slow;
80int s_max_latency;
81int s_min_latency = 0;
82long long s_total_latency = 0;
83int s_total_samples;
84long s_thresh_hold;
85int s_exceeded_threshold = 0;
86
87int i_usec_10_bins[10];
88int i_usec_100_bins[10];
89int i_msec_1_bins[10];
90int i_msec_10_bins[5];
91int i_too_slow;
92int i_max_latency;
93int i_min_latency = 0;
94long long i_total_latency = 0;
95int i_total_samples;
96long i_thresh_hold;
97int i_exceeded_threshold = 0;
98
99long start_time;
100long curr_time;
101long refresh_time;
102
103char *policy_name;
104int my_policy;
105int my_pri = -1;
106int num_of_usecs_to_sleep = 1000;
107
108char *kernelpath = (char *)0;
109char *code_file = (char *)0;
110
111typedef struct {
112 u_long k_sym_addr; /* kernel symbol address from nm */
113 u_int k_sym_len; /* length of kernel symbol string */
114 char *k_sym_name; /* kernel symbol string from nm */
115} kern_sym_t;
116
117kern_sym_t *kern_sym_tbl; /* pointer to the nm table */
118int kern_sym_count; /* number of entries in nm table */
119char pcstring[128];
120
121#define UNKNOWN "Can't find symbol name"
122
123
124double divisor;
125int gotSIGWINCH = 0;
126int trace_enabled = 0;
b51d5b5f
A
127struct host_basic_info hi;
128
1815bff5
A
129
130#define SAMPLE_SIZE 300000
131
132int mib[6];
133size_t needed;
134char *my_buffer;
135
136kbufinfo_t bufinfo = {0, 0, 0};
137
138FILE *log_fp = (FILE *)0;
139int num_of_codes = 0;
140int need_new_map = 0;
141int total_threads = 0;
142kd_threadmap *mapptr = 0;
143
144#define MAX_ENTRIES 1024
145struct ct {
146 int type;
147 char name[32];
148} codes_tab[MAX_ENTRIES];
149
1c51fdde
A
150/* If NUMPARMS changes from the kernel, then PATHLENGTH will also reflect the change */
151#define NUMPARMS 23
152#define PATHLENGTH (NUMPARMS*sizeof(long))
1815bff5
A
153
154struct th_info {
155 int thread;
156 int type;
157 int child_thread;
1815bff5
A
158 int arg1;
159 double stime;
1c51fdde
A
160 long *pathptr;
161 char pathname[PATHLENGTH + 1];
1815bff5
A
162};
163
164#define MAX_THREADS 512
165struct th_info th_state[MAX_THREADS];
166
167int cur_max = 0;
168
169#define TRACE_DATA_NEWTHREAD 0x07000004
170#define TRACE_STRING_NEWTHREAD 0x07010004
171#define TRACE_STRING_EXEC 0x07010008
172
173#define INTERRUPT 0x01050000
174#define DECR_TRAP 0x01090000
175#define DECR_SET 0x01090004
176#define MACH_vmfault 0x01300000
177#define MACH_sched 0x01400000
178#define MACH_stkhandoff 0x01400008
179#define VFS_LOOKUP 0x03010090
180#define BSC_exit 0x040C0004
181#define IES_action 0x050b0018
182#define IES_filter 0x050b001c
183#define TES_action 0x050c0010
184#define CQ_action 0x050d0018
185
186
187#define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
188#define DBG_FUNC_MASK 0xfffffffc
189
190#define DBG_ZERO_FILL_FAULT 1
191#define DBG_PAGEIN_FAULT 2
192#define DBG_COW_FAULT 3
193#define DBG_CACHE_HIT_FAULT 4
194
195char *fault_name[5] = {
196 "",
197 "ZeroFill",
198 "PageIn",
199 "COW",
200 "CacheHit",
201};
202
203char *pc_to_string();
204static kern_return_t set_time_constraint_policy(void);
205static kern_return_t set_standard_policy(void);
206
207int decrementer_val = 0; /* Value used to reset decrementer */
208int set_remove_flag = 1; /* By default, remove trace buffer */
209
b51d5b5f
A
210kd_buf **last_decrementer_kd; /* last DECR_TRAP per cpu */
211#define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
212
1815bff5
A
213int
214quit(s)
215char *s;
216{
217 void set_enable();
218 void set_rtcdec();
219 void set_remove();
220
221 if (trace_enabled)
222 set_enable(0);
223
224 /*
225 This flag is turned off when calling
226 quit() due to a set_remove() failure.
227 */
228 if (set_remove_flag)
229 set_remove();
230
231 if (decrementer_val)
232 set_rtcdec(0);
233
234 printf("latency: ");
235 if (s)
236 printf("%s", s);
237
238 exit(1);
239}
240
241void
242set_enable(int val)
243{
244 mib[0] = CTL_KERN;
245 mib[1] = KERN_KDEBUG;
246 mib[2] = KERN_KDENABLE; /* protocol */
247 mib[3] = val;
248 mib[4] = 0;
249 mib[5] = 0; /* no flags */
250
251 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
252 quit("trace facility failure, KERN_KDENABLE\n");
253}
254
255void
256set_numbufs(int nbufs)
257{
258 mib[0] = CTL_KERN;
259 mib[1] = KERN_KDEBUG;
260 mib[2] = KERN_KDSETBUF;
261 mib[3] = nbufs;
262 mib[4] = 0;
263 mib[5] = 0; /* no flags */
264 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
265 quit("trace facility failure, KERN_KDSETBUF\n");
266
267 mib[0] = CTL_KERN;
268 mib[1] = KERN_KDEBUG;
269 mib[2] = KERN_KDSETUP;
270 mib[3] = 0;
271 mib[4] = 0;
272 mib[5] = 0; /* no flags */
273 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
274 quit("trace facility failure, KERN_KDSETUP\n");
275
276}
277
278void
279set_pidexclude(int pid, int on_off)
280{
281 kd_regtype kr;
282
283 kr.type = KDBG_TYPENONE;
284 kr.value1 = pid;
285 kr.value2 = on_off;
286 needed = sizeof(kd_regtype);
287 mib[0] = CTL_KERN;
288 mib[1] = KERN_KDEBUG;
289 mib[2] = KERN_KDPIDEX;
290 mib[3] = 0;
291 mib[4] = 0;
292 mib[5] = 0;
293
294 sysctl(mib, 3, &kr, &needed, NULL, 0);
295}
296
297set_rtcdec(decval)
298int decval;
299{kd_regtype kr;
300 int ret;
301 extern int errno;
302
303 kr.type = KDBG_TYPENONE;
304 kr.value1 = decval;
305 needed = sizeof(kd_regtype);
306 mib[0] = CTL_KERN;
307 mib[1] = KERN_KDEBUG;
308 mib[2] = KERN_KDSETRTCDEC; /* protocol */
309 mib[3] = 0; /* wildcard address family */
310 mib[4] = 0;
311 mib[5] = 0; /* no flags */
312
313 errno = 0;
314
315 if ((ret=sysctl(mib, 3, &kr, &needed, NULL, 0)) < 0)
316 {
317 decrementer_val = 0;
318 quit("trace facility failure, KERN_KDSETRTCDEC\n");
319 }
320}
321
322
323void
324get_bufinfo(kbufinfo_t *val)
325{
326 needed = sizeof (*val);
327 mib[0] = CTL_KERN;
328 mib[1] = KERN_KDEBUG;
329 mib[2] = KERN_KDGETBUF;
330 mib[3] = 0;
331 mib[4] = 0;
332 mib[5] = 0; /* no flags */
333
334 if (sysctl(mib, 3, val, &needed, 0, 0) < 0)
335 quit("trace facility failure, KERN_KDGETBUF\n");
336
337}
338
339void
340set_remove()
341{
342 extern int errno;
343
344 mib[0] = CTL_KERN;
345 mib[1] = KERN_KDEBUG;
346 mib[2] = KERN_KDREMOVE; /* protocol */
347 mib[3] = 0;
348 mib[4] = 0;
349 mib[5] = 0; /* no flags */
350
351 errno = 0;
352
353 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
354 {
355 set_remove_flag = 0;
356 if(errno == EBUSY)
357 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
358 else
359 quit("trace facility failure, KERN_KDREMOVE\n");
360 }
361}
362
363void
364set_init_nologging()
365{
366 /* When we aren't logging, only collect the DECR_TRAP trace points */
367 kd_regtype kr;
368 kr.type = KDBG_VALCHECK;
369 kr.value1 = DECR_TRAP;
370 kr.value2 = 0;
371 kr.value3 = 0;
372 kr.value4 = 0;
373 needed = sizeof(kd_regtype);
374 mib[0] = CTL_KERN;
375 mib[1] = KERN_KDEBUG;
376 mib[2] = KERN_KDSETREG;
377 mib[3] = 0;
378 mib[4] = 0;
379 mib[5] = 0; /* no flags */
380 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
381 quit("trace facility failure, KERN_KDSETREG\n");
382
383 mib[0] = CTL_KERN;
384 mib[1] = KERN_KDEBUG;
385 mib[2] = KERN_KDSETUP;
386 mib[3] = 0;
387 mib[4] = 0;
388 mib[5] = 0; /* no flags */
389
390 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
391 quit("trace facility failure, KERN_KDSETUP\n");
392}
393
394void
395set_init_logging()
396{ kd_regtype kr;
397
398 kr.type = KDBG_RANGETYPE;
399 kr.value1 = 0;
400 kr.value2 = -1;
401 needed = sizeof(kd_regtype);
402 mib[0] = CTL_KERN;
403 mib[1] = KERN_KDEBUG;
404 mib[2] = KERN_KDSETREG;
405 mib[3] = 0;
406 mib[4] = 0;
407 mib[5] = 0; /* no flags */
408
409 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
410 quit("trace facility failure, KERN_KDSETREG\n");
411
412 mib[0] = CTL_KERN;
413 mib[1] = KERN_KDEBUG;
414 mib[2] = KERN_KDSETUP;
415 mib[3] = 0;
416 mib[4] = 0;
417 mib[5] = 0; /* no flags */
418
419 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
420 quit("trace facility failure, KERN_KDSETUP\n");
421}
422
423
424void sigwinch()
425{
426 gotSIGWINCH = 1;
427}
428
429void sigintr()
430{
431 void screen_update();
432
433 set_enable(0);
434 set_pidexclude(getpid(), 0);
435 screen_update(log_fp);
20e66415 436 endwin();
1815bff5
A
437 set_rtcdec(0);
438 set_remove();
439
440 exit(1);
441}
442
b51d5b5f 443void leave() /* exit under normal conditions -- signal handler */
1815bff5
A
444{
445 set_enable(0);
446 set_pidexclude(getpid(), 0);
20e66415 447 endwin();
1815bff5
A
448 set_rtcdec(0);
449 set_remove();
450
451 exit(1);
452}
453
1815bff5
A
454void
455screen_update(FILE *fp)
456{
457 int i;
458 int itotal, stotal;
459 int elapsed_secs;
460 int elapsed_mins;
461 int elapsed_hours;
462 unsigned int average_s_latency;
463 unsigned int average_i_latency;
464 char tbuf[256];
465
466 if (fp == (FILE *)0) {
467 erase();
468 move(0, 0);
469 } else
470 fprintf(fp,"\n\n===================================================================================================\n");
471 /*
472 * Display the current time.
473 * "ctime" always returns a string that looks like this:
474 *
475 * Sun Sep 16 01:03:52 1973
476 * 012345678901234567890123
477 * 1 2
478 *
479 * We want indices 11 thru 18 (length 8).
480 */
481 elapsed_secs = curr_time - start_time;
482 elapsed_hours = elapsed_secs / 3600;
483 elapsed_secs -= elapsed_hours * 3600;
484 elapsed_mins = elapsed_secs / 60;
485 elapsed_secs -= elapsed_mins * 60;
486
487 sprintf(tbuf, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time)[0]),
488 elapsed_hours, elapsed_mins, elapsed_secs);
489 if (fp)
490 fprintf(fp, "%s", tbuf);
491 else
492 printw(tbuf);
493
494
495
496 sprintf(tbuf, " SCHEDULER INTERRUPTS\n");
497
498 if (fp)
499 fprintf(fp, "%s", tbuf);
500 else
501 printw(tbuf);
502
503 sprintf(tbuf, "---------------------------------------------\n");
504
505 if (fp)
506 fprintf(fp, "%s", tbuf);
507 else
508 printw(tbuf);
509
510
511
512 sprintf(tbuf, "total_samples %10d %10d\n\n", s_total_samples, i_total_samples);
513
514 if (fp)
515 fprintf(fp, "%s", tbuf);
516 else
517 printw(tbuf);
518
519
520
521 for (itotal = 0, stotal = 0, i = 0; i < 10; i++) {
522 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 10, s_usec_10_bins[i], i_usec_10_bins[i]);
523
524 if (fp)
525 fprintf(fp, "%s", tbuf);
526 else
527 printw(tbuf);
528
529 stotal += s_usec_10_bins[i];
530 itotal += i_usec_10_bins[i];
531 }
532 sprintf(tbuf, "total < 100 usecs %10d %10d\n\n", stotal, itotal);
533
534 if (fp)
535 fprintf(fp, "%s", tbuf);
536 else
537 printw(tbuf);
538
539
540
541 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
542 if (i < 9)
543 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 100, s_usec_100_bins[i], i_usec_100_bins[i]);
544 else
545 sprintf(tbuf, "delays < 1 msec %10d %10d\n", s_usec_100_bins[i], i_usec_100_bins[i]);
546
547 if (fp)
548 fprintf(fp, "%s", tbuf);
549 else
550 printw(tbuf);
551
552 stotal += s_usec_100_bins[i];
553 itotal += i_usec_100_bins[i];
554 }
555 sprintf(tbuf, "total < 1 msec %10d %10d\n\n", stotal, itotal);
556
557 if (fp)
558 fprintf(fp, "%s", tbuf);
559 else
560 printw(tbuf);
561
562
563
564 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
565 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1), s_msec_1_bins[i], i_msec_1_bins[i]);
566
567 if (fp)
568 fprintf(fp, "%s", tbuf);
569 else
570 printw(tbuf);
571
572 stotal += s_msec_1_bins[i];
573 itotal += i_msec_1_bins[i];
574 }
575 sprintf(tbuf, "total < 10 msecs %10d %10d\n\n", stotal, itotal);
576
577 if (fp)
578 fprintf(fp, "%s", tbuf);
579 else
580 printw(tbuf);
581
582
583
584
585 for (itotal = 0, stotal = 0, i = 1; i < 5; i++) {
586 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1)*10, s_msec_10_bins[i], i_msec_10_bins[i]);
587
588 if (fp)
589 fprintf(fp, "%s", tbuf);
590 else
591 printw(tbuf);
592
593 stotal += s_msec_10_bins[i];
594 itotal += i_msec_10_bins[i];
595 }
596 sprintf(tbuf, "total < 50 msecs %10d %10d\n\n", stotal, itotal);
597
598 if (fp)
599 fprintf(fp, "%s", tbuf);
600 else
601 printw(tbuf);
602
603 sprintf(tbuf, "delays > 50 msecs %10d %10d\n", s_too_slow, i_too_slow);
604
605 if (fp)
606 fprintf(fp, "%s", tbuf);
607 else
608 printw(tbuf);
609
610
611
612 sprintf(tbuf, "\nminimum latency(usecs) %7d %7d\n", s_min_latency, i_min_latency);
613
614 if (fp)
615 fprintf(fp, "%s", tbuf);
616 else
617 printw(tbuf);
618
619 sprintf(tbuf, "maximum latency(usecs) %7d %7d\n", s_max_latency, i_max_latency);
620
621 if (fp)
622 fprintf(fp, "%s", tbuf);
623 else
624 printw(tbuf);
625
626 if (s_total_samples)
627 average_s_latency = (unsigned int)(s_total_latency/s_total_samples);
628 else
629 average_s_latency = 0;
630
631 if (i_total_samples)
632 average_i_latency = (unsigned int)(i_total_latency/i_total_samples);
633 else
634 average_i_latency = 0;
635
636 sprintf(tbuf, "average latency(usecs) %7d %7d\n", average_s_latency, average_i_latency);
637
638 if (fp)
639 fprintf(fp, "%s", tbuf);
640 else
641 printw(tbuf);
642
643 sprintf(tbuf, "exceeded threshold %7d %7d\n", s_exceeded_threshold, i_exceeded_threshold);
644
645 if (fp)
646 fprintf(fp, "%s", tbuf);
647 else
648 printw(tbuf);
649
650 if (fp == (FILE *)0)
651 refresh();
652 else
653 fflush(fp);
654}
655
656int
657exit_usage()
658{
659
660 fprintf(stderr, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
661 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs]\n");
662 fprintf(stderr, " [-d decrementer_in_usecs] [-n kernel]\n\n");
663
664 fprintf(stderr, " -rt Set realtime scheduling policy. Default is timeshare.\n");
665 fprintf(stderr, " -c specify name of codes file\n");
666 fprintf(stderr, " -l specify name of file to log trace entries to when threshold is exceeded\n");
667 fprintf(stderr, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
668 fprintf(stderr, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
669 fprintf(stderr, " -s set sleep time in microseconds\n");
670 fprintf(stderr, " -d set decrementer in microseconds.\n");
671 fprintf(stderr, " -n specify kernel, default is /mach_kernel\n");
672
673 fprintf(stderr, "\nlatency must be run as root\n\n");
674
675 exit(1);
676}
677
678
679
680main(argc, argv)
681int argc;
682char *argv[];
683{
684 mach_timespec_t remain;
1c51fdde
A
685 uint64_t start, stop;
686 uint64_t timestamp1;
687 uint64_t timestamp2;
688 uint64_t adeadline, adelay;
1815bff5
A
689 double fdelay;
690 int elapsed_usecs;
691 double nanosecs_to_sleep;
692 int loop_cnt, sample_sc_now;
693 int decrementer_usec = 0;
b51d5b5f
A
694 kern_return_t ret;
695 int size;
696 int i, count;
697 host_name_port_t host;
1815bff5
A
698 void getdivisor();
699 void sample_sc();
700 void init_code_file();
701 void do_kernel_nm();
702 void open_logfile();
703
704 my_policy = THREAD_STANDARD_POLICY;
705 policy_name = "TIMESHARE";
706
707 while (argc > 1) {
708 if (strcmp(argv[1], "-rt") == 0) {
709 my_policy = THREAD_TIME_CONSTRAINT_POLICY; /* the real time band */
710 policy_name = "REALTIME";
711
712 } else if (strcmp(argv[1], "-st") == 0) {
713 argc--;
714 argv++;
715
716 if (argc > 1)
717 s_thresh_hold = atoi(argv[1]);
718 else
719 exit_usage();
720
721 } else if (strcmp(argv[1], "-it") == 0) {
722 argc--;
723 argv++;
724
725 if (argc > 1)
726 i_thresh_hold = atoi(argv[1]);
727 else
728 exit_usage();
729 } else if (strcmp(argv[1], "-c") == 0) {
730 argc--;
731 argv++;
732
733 if (argc > 1)
734 code_file = argv[1];
735 else
736 exit_usage();
737 } else if (strcmp(argv[1], "-l") == 0) {
738 argc--;
739 argv++;
740
741 if (argc > 1)
742 open_logfile(argv[1]);
743 else
744 exit_usage();
745
746 } else if (strcmp(argv[1], "-s") == 0) {
747 argc--;
748 argv++;
749
750 if (argc > 1)
751 num_of_usecs_to_sleep = atoi(argv[1]);
752 else
753 exit_usage();
754 } else if (strcmp(argv[1], "-d") == 0) {
755 argc--;
756 argv++;
757
758 if (argc > 1)
759 decrementer_usec = atoi(argv[1]);
760 else
761 exit_usage();
762 } else if (strcmp(argv[1], "-n") == 0) {
763 argc--;
764 argv++;
765
766 if (argc > 1)
767 kernelpath = argv[1];
768 else
769 exit_usage();
770 } else
771 exit_usage();
772
773 argc--;
774 argv++;
775 }
776
777 if ( geteuid() != 0 ) {
778 printf("'latency' must be run as root...\n");
779 exit(1);
780 }
781
782 if (kernelpath == (char *) 0)
783 kernelpath = "/mach_kernel";
784
785 if (code_file == (char *) 0)
786 code_file = "/usr/share/misc/trace.codes";
787
788 do_kernel_nm();
789
790 sample_sc_now = 25000 / num_of_usecs_to_sleep;
791
792 getdivisor();
793 decrementer_val = decrementer_usec * divisor;
794
b51d5b5f
A
795 /* get the cpu count for the DECR_TRAP array */
796 host = mach_host_self();
797 size = sizeof(hi)/sizeof(int);
798 ret = host_info(host, HOST_BASIC_INFO, (host_info_t)&hi, &size);
799 if (ret != KERN_SUCCESS) {
800 mach_error(argv[0], ret);
801 exit(EXIT_FAILURE);
802 }
803
804 if ((last_decrementer_kd = (kd_buf **)malloc(hi.avail_cpus * sizeof(kd_buf *))) == (kd_buf **)0)
805 quit("can't allocate memory for decrementer tracing info\n");
806
1815bff5
A
807 nanosecs_to_sleep = (double)(num_of_usecs_to_sleep * 1000);
808 fdelay = nanosecs_to_sleep * (divisor /1000);
1c51fdde 809 adelay = (uint64_t)fdelay;
1815bff5
A
810
811 init_code_file();
812
813 /*
814 When the decrementer isn't set in the options,
815 decval will be zero and this call will reset
816 the system default ...
817 */
818 set_rtcdec(decrementer_val);
819
b51d5b5f
A
820 if (initscr() == (WINDOW *) 0)
821 {
822 printf("Unrecognized TERM type, try vt100\n");
823 exit(1);
824 }
825
1815bff5
A
826 clear();
827 refresh();
828 signal(SIGWINCH, sigwinch);
829 signal(SIGINT, sigintr);
b51d5b5f
A
830 signal(SIGQUIT, leave);
831 signal(SIGTERM, leave);
832 signal(SIGHUP, leave);
1815bff5
A
833
834
835 if ((my_buffer = malloc(SAMPLE_SIZE * sizeof(kd_buf))) == (char *)0)
836 quit("can't allocate memory for tracing info\n");
837 set_remove();
838 set_numbufs(SAMPLE_SIZE);
839 set_enable(0);
840 if(log_fp)
841 set_init_logging();
842 else
843 set_init_nologging();
844 set_pidexclude(getpid(), 1);
845 set_enable(1);
846 trace_enabled = 1;
847 need_new_map = 1;
848
849 loop_cnt = 0;
850 start_time = time((long *)0);
851 refresh_time = start_time;
852
853 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
854 {
855 /* the realtime band */
856 if(set_time_constraint_policy() != KERN_SUCCESS)
857 quit("Failed to set realtime policy.\n");
858 }
859
860 for (;;) {
861 curr_time = time((long *)0);
862
863 if (curr_time >= refresh_time) {
864 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
865 {
866 /* set standard timeshare policy during screen update */
867 if(set_standard_policy() != KERN_SUCCESS)
868 quit("Failed to set standard policy.\n");
869 }
870 screen_update((FILE *)0);
871 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
872 {
873 /* set back to realtime band */
874 if(set_time_constraint_policy() != KERN_SUCCESS)
875 quit("Failed to set time_constraint policy.\n");
876 }
877 refresh_time = curr_time + 1;
878 }
879
1c51fdde
A
880 timestamp1 = mach_absolute_time();
881 adeadline = timestamp1 + adelay;
1815bff5 882 mk_wait_until(adeadline);
1c51fdde 883 timestamp2 = mach_absolute_time();
1815bff5 884
1c51fdde 885 start = timestamp1;
1815bff5 886
1c51fdde 887 stop = timestamp2;
1815bff5
A
888
889 elapsed_usecs = (int)(((double)(stop - start)) / divisor);
890
891 if ((elapsed_usecs -= num_of_usecs_to_sleep) <= 0)
892 continue;
893
894 if (elapsed_usecs < 100)
895 s_usec_10_bins[elapsed_usecs/10]++;
896 if (elapsed_usecs < 1000)
897 s_usec_100_bins[elapsed_usecs/100]++;
898 else if (elapsed_usecs < 10000)
899 s_msec_1_bins[elapsed_usecs/1000]++;
900 else if (elapsed_usecs < 50000)
901 s_msec_10_bins[elapsed_usecs/10000]++;
902 else
903 s_too_slow++;
904
905 if (elapsed_usecs > s_max_latency)
906 s_max_latency = elapsed_usecs;
907 if (elapsed_usecs < s_min_latency || s_total_samples == 0)
908 s_min_latency = elapsed_usecs;
909 s_total_latency += elapsed_usecs;
910 s_total_samples++;
911
912 if (s_thresh_hold && elapsed_usecs > s_thresh_hold)
913 s_exceeded_threshold++;
914 loop_cnt++;
915
916 if (log_fp && s_thresh_hold && elapsed_usecs > s_thresh_hold)
917 sample_sc(start, stop);
918 else {
919 if (loop_cnt >= sample_sc_now) {
920 sample_sc((long long)0, (long long)0);
921 loop_cnt = 0;
922 }
923 }
924 if (gotSIGWINCH) {
b51d5b5f
A
925 /*
926 No need to check for initscr error return.
927 We won't get here if it fails on the first call.
928 */
929 endwin();
1815bff5
A
930 clear();
931 refresh();
932
933 gotSIGWINCH = 0;
934 }
935 }
1c51fdde 936}
1815bff5
A
937
938
939void getdivisor()
940{
1c51fdde 941 mach_timebase_info_data_t info;
1815bff5 942
1c51fdde 943 (void) mach_timebase_info (&info);
1815bff5 944
1c51fdde 945 divisor = ( (double)info.denom / (double)info.numer) * 1000;
1815bff5 946
1815bff5
A
947}
948
949/* This is the realtime band */
950static kern_return_t
951set_time_constraint_policy()
952{
953 kern_return_t result;
954 thread_time_constraint_policy_data_t info;
955 mach_msg_type_number_t count;
956 boolean_t get_default;
957
958 get_default = TRUE;
959 count = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
960 result = thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
961 (thread_policy_t)&info, &count, &get_default);
962 if (result != KERN_SUCCESS)
963 return (result);
964
965 result = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
966 (thread_policy_t)&info, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
967
968 return (result);
969}
970
971/* This is the timeshare mode */
972static kern_return_t
973set_standard_policy()
974{
975 kern_return_t result;
976 thread_standard_policy_data_t info;
977 mach_msg_type_number_t count;
978 boolean_t get_default;
979
980 get_default = TRUE;
981 count = THREAD_STANDARD_POLICY_COUNT;
982 result = thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY,
983 (thread_policy_t)&info, &count, &get_default);
984 if (result != KERN_SUCCESS)
985 return (result);
986
987 result = thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY,
988 (thread_policy_t)&info, THREAD_STANDARD_POLICY_COUNT);
989
990 return (result);
991}
992
993
994void read_command_map()
995{
996 size_t size;
997 int mib[6];
998
999 if (mapptr) {
1000 free(mapptr);
1001 mapptr = 0;
1002 }
1003 total_threads = bufinfo.nkdthreads;
1004 size = bufinfo.nkdthreads * sizeof(kd_threadmap);
1005 if (size)
1006 {
1007 if (mapptr = (kd_threadmap *) malloc(size))
1008 bzero (mapptr, size);
1009 else
1010 {
1011 printf("Thread map is not initialized -- this is not fatal\n");
1012 return;
1013 }
1014 }
1015
1016 /* Now read the threadmap */
1017 mib[0] = CTL_KERN;
1018 mib[1] = KERN_KDEBUG;
1019 mib[2] = KERN_KDTHRMAP;
1020 mib[3] = 0;
1021 mib[4] = 0;
1022 mib[5] = 0; /* no flags */
1023 if (sysctl(mib, 3, mapptr, &size, NULL, 0) < 0)
1024 {
1025 /* This is not fatal -- just means I cant map command strings */
1026
1027 printf("Can't read the thread map -- this is not fatal\n");
1028 free(mapptr);
1029 mapptr = 0;
1030 return;
1031 }
1032 return;
1033}
1034
1035
1036void create_map_entry(int thread, char *command)
1037{
1038 int i, n;
1039 kd_threadmap *map;
1040
1041 if (!mapptr)
1042 return;
1043
1044 for (i = 0, map = 0; !map && i < total_threads; i++)
1045 {
1046 if (mapptr[i].thread == thread )
1047 map = &mapptr[i]; /* Reuse this entry, the thread has been reassigned */
1048 }
1049
1050 if (!map) /* look for invalid entries that I can reuse*/
1051 {
1052 for (i = 0, map = 0; !map && i < total_threads; i++)
1053 {
1054 if (mapptr[i].valid == 0 )
1055 map = &mapptr[i]; /* Reuse this invalid entry */
1056 }
1057 }
1058
1059 if (!map)
1060 {
1061 /* If reach here, then this is a new thread and
1062 * there are no invalid entries to reuse
1063 * Double the size of the thread map table.
1064 */
1065
1066 n = total_threads * 2;
1067 mapptr = (kd_threadmap *) realloc(mapptr, n * sizeof(kd_threadmap));
1068 bzero(&mapptr[total_threads], total_threads*sizeof(kd_threadmap));
1069 map = &mapptr[total_threads];
1070 total_threads = n;
1071#if 0
1072 if (log_fp)
1073 fprintf(log_fp, "MAP: increasing thread map to %d entries\n", total_threads);
1074#endif
1075 }
1076#if 0
1077 if (log_fp)
1078 fprintf(log_fp, "MAP: adding thread %x with name %s\n", thread, command);
1079#endif
1080 map->valid = 1;
1081 map->thread = thread;
1c51fdde
A
1082 /*
1083 The trace entry that returns the command name will hold
1084 at most, MAXCOMLEN chars, and in that case, is not
1085 guaranteed to be null terminated.
1086 */
1087 (void)strncpy (map->command, command, MAXCOMLEN);
1088 map->command[MAXCOMLEN] = '\0';
1815bff5
A
1089}
1090
1091
1092kd_threadmap *find_thread_map(int thread)
1093{
1094 int i;
1095 kd_threadmap *map;
1096
1097 if (!mapptr)
1098 return((kd_threadmap *)0);
1099
1100 for (i = 0; i < total_threads; i++)
1101 {
1102 map = &mapptr[i];
1103 if (map->valid && (map->thread == thread))
1104 {
1105 return(map);
1106 }
1107 }
1108 return ((kd_threadmap *)0);
1109}
1110
1111void
1112kill_thread_map(int thread)
1113{
1114 kd_threadmap *map;
1115
1116 if (map = find_thread_map(thread)) {
1117
1118#if 0
1119 if (log_fp)
1120 fprintf(log_fp, "MAP: deleting thread %x with name %s\n", thread, map->command);
1121#endif
1122 map->valid = 0;
1123 map->thread = 0;
1124 map->command[0] = '\0';
1125 }
1126}
1127
1128
1129struct th_info *find_thread(int thread, int type1, int type2) {
1130 struct th_info *ti;
1131
1132 for (ti = th_state; ti < &th_state[cur_max]; ti++) {
1133 if (ti->thread == thread) {
1134 if (type1 == 0)
1135 return(ti);
1136 if (type1 == ti->type)
1137 return(ti);
1138 if (type2 == ti->type)
1139 return(ti);
1140 }
1141 }
1142 return ((struct th_info *)0);
1143}
1144
1145
1146char *find_code(type)
1147{
1148 int i;
1149
1150 for (i = 0; i < num_of_codes; i++) {
1151 if (codes_tab[i].type == type)
1152 return(codes_tab[i].name);
1153 }
1154 return ((char *)0);
1155}
1156
1157
1c51fdde 1158void sample_sc(uint64_t start, uint64_t stop)
1815bff5 1159{
b51d5b5f 1160 kd_buf *kd, *last_mach_sched, *start_kd, *end_of_sample;
1c51fdde 1161 uint64_t now;
b51d5b5f 1162 int count, i;
1815bff5
A
1163 int first_entry = 1;
1164 char command[32];
1165 double timestamp, last_timestamp, delta, start_bias;
1166 void read_command_map();
1167
1168 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1169 {
1170 /* set standard timeshare policy when logging */
1171 if(set_standard_policy() != KERN_SUCCESS)
1172 quit("Failed to set standard policy.\n");
1173 }
1174
1175 /* Get kernel buffer information */
1176 get_bufinfo(&bufinfo);
1177
1178 if (need_new_map) {
1179 read_command_map();
1180 need_new_map = 0;
1181 }
1182 needed = bufinfo.nkdbufs * sizeof(kd_buf);
1183 mib[0] = CTL_KERN;
1184 mib[1] = KERN_KDEBUG;
1185 mib[2] = KERN_KDREADTR;
1186 mib[3] = 0;
1187 mib[4] = 0;
1188 mib[5] = 0; /* no flags */
1189
1190 if (sysctl(mib, 3, my_buffer, &needed, NULL, 0) < 0)
1191 quit("trace facility failure, KERN_KDREADTR\n");
1192
1193 count = needed;
1194
1195 if (bufinfo.flags & KDBG_WRAPPED) {
1815bff5
A
1196 for (i = 0; i < cur_max; i++) {
1197 th_state[i].thread = 0;
1198 th_state[i].type = -1;
1c51fdde 1199 th_state[i].pathptr = (long *)0;
1815bff5
A
1200 th_state[i].pathname[0] = 0;
1201 }
1202 cur_max = 0;
1203 need_new_map = 1;
1204
1205 set_enable(0);
1206 set_enable(1);
1207
1208 if (log_fp) {
1209 double latency;
1210
1211 latency = (double)(stop - start) / divisor;
1212 latency -= (double)num_of_usecs_to_sleep;
1213
1214 fprintf(log_fp, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1215 &(ctime(&curr_time)[0]), latency, count);
1216 }
1217 }
1218 end_of_sample = &((kd_buf *)my_buffer)[count];
b51d5b5f
A
1219
1220 /* Always reinitialize the DECR_TRAP array */
1221 for (i=0; i < hi.avail_cpus; i++)
1222 last_decrementer_kd[i] = (kd_buf *)my_buffer;
1223
1815bff5
A
1224 last_mach_sched = (kd_buf *)0;
1225
1226 for (kd = (kd_buf *)my_buffer; kd < end_of_sample; kd++) {
1227 int debugid, thread, cpunum;
1228 int type, clen, mode;
1c51fdde 1229 int len;
1815bff5
A
1230 char *p;
1231 long *sargptr;
b51d5b5f 1232 kd_buf *cur_kd;
1815bff5
A
1233 double i_latency;
1234 struct th_info *ti;
1235 char command1[32];
1236 char sched_info[64];
1237 kd_threadmap *map;
1238 kd_threadmap *find_thread_map();
1239 double handle_decrementer();
1240 kd_buf *log_decrementer();
1241 int check_for_thread_update();
1242 void enter_syscall();
1243 void exit_syscall();
1244 void print_entry();
1245
1246 thread = kd->arg5 & KDBG_THREAD_MASK;
1247 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1248 debugid = kd->debugid;
1249 type = kd->debugid & DBG_FUNC_MASK;
1250
1251 if (check_for_thread_update(thread, type, kd))
1252 continue;
1253
1254 if (type == DECR_TRAP)
1255 i_latency = handle_decrementer(kd);
1256
20e66415 1257 now = kd->timestamp;
1815bff5
A
1258
1259 timestamp = ((double)now) / divisor;
1260
1261 if (now < start || now > stop) {
1262 if (debugid & DBG_FUNC_START)
1263 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1264 else if (debugid & DBG_FUNC_END)
1265 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1266 else if (type == DECR_TRAP) {
b51d5b5f 1267 cur_kd = kd;
1815bff5 1268 if (log_fp && i_thresh_hold && (int)i_latency > i_thresh_hold) {
b51d5b5f 1269 start_kd = last_decrementer_kd[cpunum];
1815bff5 1270 kd = log_decrementer(start_kd, kd, end_of_sample, i_latency);
1815bff5
A
1271 if (kd >= end_of_sample)
1272 break;
1273 }
b51d5b5f
A
1274 if ((kd->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1275 {
1276 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1277 last_decrementer_kd[cpunum] = kd;
1278 }
1279 else
1280 last_decrementer_kd[cpunum] = cur_kd;
1815bff5
A
1281 }
1282 continue;
1283 }
1284 if (first_entry) {
1285 double latency;
b51d5b5f
A
1286 char buf1[132];
1287 char buf2[132];
1815bff5
A
1288
1289 latency = (double)(stop - start) / divisor;
1290 latency -= (double)num_of_usecs_to_sleep;
1291
1292 if (my_pri == -1)
1293 sprintf(buf2, "default");
1294 else
1295 sprintf(buf2, "%d", my_pri);
1296 sprintf(buf1, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1297 &(ctime(&curr_time)[0]), latency, num_of_usecs_to_sleep, policy_name, buf2);
1298 clen = strlen(buf1);
1299 memset(buf2, '-', clen);
1300 buf2[clen] = 0;
1301
1302 if (log_fp) {
1303 fprintf(log_fp, "\n\n%s\n", buf2);
1304 fprintf(log_fp, "%s\n\n", buf1);
1305 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1306 }
1307 start_bias = ((double)start) / divisor;
1308 last_timestamp = timestamp;
1309 first_entry = 0;
1310 }
1311 delta = timestamp - last_timestamp;
1312
1313 if (map = find_thread_map(thread))
1314 strcpy(command, map->command);
1315 else
1316 command[0] = 0;
1317
1318 switch (type) {
1319
1320 case CQ_action:
1321 if (log_fp) {
1322 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1323 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1324 }
1325 last_timestamp = timestamp;
1326 break;
1327
1328 case TES_action:
1329 if (log_fp) {
1330 fprintf(log_fp, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1331 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1332 }
1333
1334 last_timestamp = timestamp;
1335 break;
1336
1337 case IES_action:
1338 if (log_fp) {
1339 fprintf(log_fp, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1340 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1341 }
1342
1343 last_timestamp = timestamp;
1344 break;
1345
1346 case IES_filter:
1347 if (log_fp) {
1348 fprintf(log_fp, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1349 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1350 }
1351
1352 last_timestamp = timestamp;
1353 break;
1354
1355 case DECR_TRAP:
b51d5b5f 1356 last_decrementer_kd[cpunum] = kd;
1815bff5
A
1357
1358 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1359 p = "*";
1360 else
1361 p = " ";
1362
1363 mode = 1;
1364
1365 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1366 if (ti->type == -1 && strcmp(command, "kernel_task"))
1367 mode = 0;
1368 }
1369
1370 if (log_fp) {
1371 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1372 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1373 }
1374
1375 last_timestamp = timestamp;
1376 break;
1377
1378 case DECR_SET:
1379 if (log_fp) {
1380 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1381 timestamp - start_bias, delta, (double)kd->arg1/divisor, "DECR_SET", thread, cpunum, command);
1382 }
1383
1384 last_timestamp = timestamp;
1385 break;
1386
1387 case MACH_sched:
1388 case MACH_stkhandoff:
1389 last_mach_sched = kd;
1390
1391 if (map = find_thread_map(kd->arg2))
1392 strcpy(command1, map->command);
1393 else
1394 sprintf(command1, "%-8x", kd->arg2);
1395
1396 if (ti = find_thread(kd->arg2, 0, 0)) {
1397 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1398 p = "U";
1399 else
1400 p = "K";
1401 } else
1402 p = "*";
1403 memset(sched_info, ' ', sizeof(sched_info));
1404
1405 sprintf(sched_info, "%14.14s", command);
1406 clen = strlen(sched_info);
1407 sched_info[clen] = ' ';
1408
1409 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1410 clen = strlen(sched_info);
1411 sched_info[clen] = ' ';
1412
1413 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1414
1415 if (log_fp) {
1416 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1417 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1418 }
1419
1420 last_timestamp = timestamp;
1421 break;
1422
1423 case VFS_LOOKUP:
1424 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1425 if (cur_max >= MAX_THREADS)
1426 continue;
1427 ti = &th_state[cur_max++];
1428
1429 ti->thread = thread;
1430 ti->type = -1;
1c51fdde 1431 ti->pathptr = (long *)0;
1815bff5
A
1432 ti->child_thread = 0;
1433 }
b51d5b5f
A
1434 while ( (kd < end_of_sample) && ((kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP))
1435 {
1436 if (!ti->pathptr) {
1815bff5 1437 ti->arg1 = kd->arg1;
1c51fdde 1438 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1815bff5
A
1439 sargptr = (long *)&ti->pathname[0];
1440
1441 *sargptr++ = kd->arg2;
1442 *sargptr++ = kd->arg3;
1443 *sargptr++ = kd->arg4;
1c51fdde
A
1444 ti->pathptr = sargptr;
1445
b51d5b5f 1446 } else {
1c51fdde
A
1447 sargptr = ti->pathptr;
1448
1449 /*
1450 We don't want to overrun our pathname buffer if the
1451 kernel sends us more VFS_LOOKUP entries than we can
1452 handle.
1453 */
1454
b51d5b5f
A
1455 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1456 {
1457 kd++;
1458 continue;
1459 }
1460
1461 /*
1462 We need to detect consecutive vfslookup entries.
1463 So, if we get here and find a START entry,
1464 fake the pathptr so we can bypass all further
1465 vfslookup entries.
1466 */
1467
1468 if (kd->debugid & DBG_FUNC_START)
1469 {
1470 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1471 }
1472 else
1c51fdde
A
1473 {
1474 *sargptr++ = kd->arg1;
1475 *sargptr++ = kd->arg2;
1476 *sargptr++ = kd->arg3;
1477 *sargptr++ = kd->arg4;
1478 ti->pathptr = sargptr;
b51d5b5f
A
1479 }
1480 }
1481 kd++;
1482 }
1483
1484 kd--;
1c51fdde
A
1485
1486 /* print the tail end of the pathname */
b51d5b5f
A
1487 len = strlen(ti->pathname);
1488 if (len > 42)
1489 len -= 42;
1490 else
1491 len = 0;
1815bff5 1492
b51d5b5f
A
1493 if (log_fp) {
1494 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1495 timestamp - start_bias, delta, "VFS_LOOKUP",
1496 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1815bff5 1497 }
b51d5b5f 1498
1815bff5
A
1499 last_timestamp = timestamp;
1500 break;
1501
1502 default:
1503 if (debugid & DBG_FUNC_START)
1504 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1505 else if (debugid & DBG_FUNC_END)
1506 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1507 else
1508 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1509
1510 last_timestamp = timestamp;
1511 break;
1512 }
1513 }
1514 if (last_mach_sched && log_fp)
1515 fprintf(log_fp, "\nblocked by %s @ priority %d\n", command, last_mach_sched->arg3);
1516#if 0
1517 if (first_entry == 0 && log_fp)
1518 fprintf(log_fp, "\n start = %qd stop = %qd count = %d now = %qd\n", start, stop, count, now);
1519#endif
1520 if (log_fp)
1521 fflush(log_fp);
1522
1523 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1524 {
1525 /* set back to realtime band */
1526 if(set_time_constraint_policy() != KERN_SUCCESS)
1527 quit("Failed to set time_constraint policy.\n");
1528 }
1529}
1530
1531void
1532enter_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1533{
1534 struct th_info *ti;
1535 int i;
1536 int cpunum;
1537 char *p;
1538
1539 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1540
1541 if (print_info && fp) {
1542 if (p = find_code(type)) {
1543 if (type == INTERRUPT) {
1544 int mode = 1;
1545
1546 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1547 if (ti->type == -1 && strcmp(command, "kernel_task"))
1548 mode = 0;
1549 }
1550
1551 fprintf(fp, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1552 timestamp - bias, delta, pc_to_string(kd->arg2, 59, mode), thread, cpunum, command);
1553 } else if (type == MACH_vmfault) {
1554 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1555 timestamp - bias, delta, p, thread, cpunum, command);
1556 } else {
1557 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1558 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1559 thread, cpunum, command);
1560 }
1561 } else {
1562 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1563 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1564 thread, cpunum, command);
1565 }
1566 }
1567 if ((ti = find_thread(thread, -1, type)) == (struct th_info *)0) {
1568 if (cur_max >= MAX_THREADS) {
1569 static int do_this_once = 1;
1570
1571 if (do_this_once) {
1572 for (i = 0; i < cur_max; i++) {
1573 if (!fp)
1574 break;
1575 fprintf(fp, "thread = %x, type = %x\n",
1576 th_state[i].thread, th_state[i].type);
1577 }
1578 do_this_once = 0;
1579 }
1580 return;
1581
1582 }
1583 ti = &th_state[cur_max++];
1584
1585 ti->thread = thread;
1586 ti->child_thread = 0;
1587 }
1588 if (type != BSC_exit)
1589 ti->type = type;
1590 else
1591 ti->type = -1;
1592 ti->stime = timestamp;
1c51fdde
A
1593 ti->pathptr = (long *)0;
1594
1815bff5
A
1595#if 0
1596 if (print_info && fp)
1597 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, ti->type, ti->thread);
1598#endif
1599}
1600
1601
1602void
1603exit_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1604{
1605 struct th_info *ti;
1606 int cpunum;
1607 char *p;
1608
1609 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1610 ti = find_thread(thread, type, type);
1611#if 0
1612 if (print_info && fp)
1613 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, type, thread);
1614#endif
1615 if (print_info && fp) {
1616 if (ti)
1617 fprintf(fp, "%9.1f %8.1f(%.1f) \t", timestamp - bias, delta, timestamp - ti->stime);
1618 else
1619 fprintf(fp, "%9.1f %8.1f() \t", timestamp - bias, delta);
1620
1621 if (p = find_code(type)) {
1622 if (type == INTERRUPT) {
1623 fprintf(fp, "INTERRUPT %-8x %d %s\n", thread, cpunum, command);
1624 } else if (type == MACH_vmfault && kd->arg2 <= DBG_CACHE_HIT_FAULT) {
1625 fprintf(fp, "%-28.28s %-8.8s %-8x %-8x %d %s\n",
1626 p, fault_name[kd->arg2], kd->arg1,
1627 thread, cpunum, command);
1628 } else {
1629 fprintf(fp, "%-28.28s %-8x %-8x %-8x %d %s\n",
1630 p, kd->arg1, kd->arg2,
1631 thread, cpunum, command);
1632 }
1633 } else {
1634 fprintf(fp, "%-8x %-8x %-8x %-8x %d %s\n",
1635 type, kd->arg1, kd->arg2,
1636 thread, cpunum, command);
1637 }
1638 }
1639 if (ti == (struct th_info *)0) {
1640 if ((ti = find_thread(thread, -1, -1)) == (struct th_info *)0) {
1641 if (cur_max >= MAX_THREADS)
1642 return;
1643 ti = &th_state[cur_max++];
1644
1645 ti->thread = thread;
1646 ti->child_thread = 0;
1c51fdde 1647 ti->pathptr = (long *)0;
1815bff5
A
1648 }
1649 }
1650 ti->type = -1;
1651}
1652
1653void
1654print_entry(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias)
1655{
1656 char *p;
1657 int cpunum;
1658
1659 if (!fp)
1660 return;
1661
1662 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1663
1664#if 0
1665 fprintf(fp, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max, type, thread, cpunum);
1666#endif
1667 if (p = find_code(type)) {
1668 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1669 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1670 thread, cpunum, command);
1671 } else {
1672 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1673 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1674 thread, cpunum, command);
1675 }
1676}
1677
1678int
1679check_for_thread_update(int thread, int type, kd_buf *kd)
1680{
1681 struct th_info *ti;
1682 void create_map_entry();
1683
1684 switch (type) {
1685
1686 case TRACE_DATA_NEWTHREAD:
1687 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1688 if (cur_max >= MAX_THREADS)
1689 return (1);
1690 ti = &th_state[cur_max++];
1691
1692 ti->thread = thread;
1693 ti->type = -1;
1c51fdde 1694 ti->pathptr = (long *)0;
1815bff5
A
1695 }
1696 ti->child_thread = kd->arg1;
1697 return (1);
1698
1699 case TRACE_STRING_NEWTHREAD:
1700 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0)
1701 return (1);
1702 if (ti->child_thread == 0)
1703 return (1);
1704 create_map_entry(ti->child_thread, (char *)&kd->arg1);
1705
1706 ti->child_thread = 0;
1707 return (1);
1708
1709 case TRACE_STRING_EXEC:
1710 create_map_entry(thread, (char *)&kd->arg1);
1711 return (1);
1712
1713 }
1714 return (0);
1715}
1716
1717
1718kd_buf *log_decrementer(kd_buf *kd_beg, kd_buf *kd_end, kd_buf *end_of_sample, double i_latency)
1719{
1720 kd_buf *kd, *kd_start, *kd_stop;
b51d5b5f 1721 int kd_count; /* Limit the boundary of kd_start */
1815bff5
A
1722 double timestamp, last_timestamp, delta, start_bias;
1723 int thread, cpunum;
1724 int debugid, type, clen;
1c51fdde
A
1725 int len;
1726 uint64_t now;
1815bff5
A
1727 struct th_info *ti;
1728 long *sargptr;
1729 char *p;
1730 char command[32];
1731 char command1[32];
1732 char sched_info[64];
1733 char buf1[128];
1734 char buf2[128];
1735 kd_threadmap *map;
1736 kd_threadmap *find_thread_map();
1737
1738 sprintf(buf1, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time)[0]), i_latency);
1739 clen = strlen(buf1);
1740 memset(buf2, '-', clen);
1741 buf2[clen] = 0;
1742 fprintf(log_fp, "\n\n%s\n", buf2);
1743 fprintf(log_fp, "%s\n\n", buf1);
1744
1745 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1746
1747 thread = kd_beg->arg5 & KDBG_THREAD_MASK;
b51d5b5f 1748 cpunum = (kd_end->arg5 & KDBG_CPU_MASK) ? 1: 0;
1815bff5 1749
b51d5b5f
A
1750 for (kd_count = 0, kd_start = kd_beg - 1; (kd_start >= (kd_buf *)my_buffer); kd_start--, kd_count++) {
1751 if (kd_count == MAX_LOG_COUNT)
1752 break;
1753
1754 if((kd_start->arg5 & KDBG_CPU_MASK) != cpunum)
1755 continue;
1756
1757 if ((kd_start->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1758 break;
1759
1760 if((kd_start->arg5 & KDBG_THREAD_MASK) != thread)
1815bff5
A
1761 break;
1762 }
b51d5b5f 1763
1815bff5
A
1764 if (kd_start < (kd_buf *)my_buffer)
1765 kd_start = (kd_buf *)my_buffer;
1766
b51d5b5f
A
1767 thread = kd_end->arg5 & KDBG_THREAD_MASK;
1768
1769 for (kd_stop = kd_end + 1; kd_stop < end_of_sample; kd_stop++) {
1770
1771 if ((kd_stop->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1772 break;
1773
1774 if((kd_stop->arg5 & KDBG_CPU_MASK) != cpunum)
1775 continue;
1776
1777 if((kd_stop->arg5 & KDBG_THREAD_MASK) != thread)
1815bff5
A
1778 break;
1779 }
b51d5b5f 1780
1815bff5
A
1781 if (kd_stop >= end_of_sample)
1782 kd_stop = end_of_sample - 1;
1783
20e66415 1784 now = kd_start->timestamp;
1815bff5
A
1785 timestamp = ((double)now) / divisor;
1786
1787 for (kd = kd_start; kd <= kd_stop; kd++) {
1788 type = kd->debugid & DBG_FUNC_MASK;
1789
1790 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), type, type)) {
1791 if (ti->stime >= timestamp)
1792 ti->type = -1;
1793 }
1794 }
1795 for (kd = kd_start; kd <= kd_stop; kd++) {
1796 int mode;
1797
1798 thread = kd->arg5 & KDBG_THREAD_MASK;
1799 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1800 debugid = kd->debugid;
1801 type = kd->debugid & DBG_FUNC_MASK;
1802
20e66415 1803 now = kd->timestamp;
1815bff5
A
1804
1805 timestamp = ((double)now) / divisor;
1806
1807 if (kd == kd_start) {
1808 start_bias = timestamp;
1809 last_timestamp = timestamp;
1810 }
1811 delta = timestamp - last_timestamp;
1812
1813 if (map = find_thread_map(thread))
1814 strcpy(command, map->command);
1815 else
1816 command[0] = 0;
1817
1818
1819 switch (type) {
1820
1821 case CQ_action:
1822 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1823 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1824
1825 last_timestamp = timestamp;
1826 break;
1827
1828 case DECR_TRAP:
1829 if ((int)(kd->arg1) >= 0)
1830 i_latency = 0;
1831 else
1832 i_latency = (((double)(-1 - kd->arg1)) / divisor);
1833
1834 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1835 p = "*";
1836 else
1837 p = " ";
1838
1839 mode = 1;
1840
1841 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1842 if (ti->type == -1 && strcmp(command, "kernel_task"))
1843 mode = 0;
1844 }
1845 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1846 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1847
1848 last_timestamp = timestamp;
1849 break;
1850
1851 case DECR_SET:
1852 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1853 timestamp - start_bias, delta, (double)kd->arg1/divisor,
1854 "DECR_SET", thread, cpunum, command);
1855
1856 last_timestamp = timestamp;
1857 break;
1858
1859 case MACH_sched:
1860 case MACH_stkhandoff:
1861 if (map = find_thread_map(kd->arg2))
1862 strcpy(command1, map->command);
1863 else
1864 sprintf(command1, "%-8x", kd->arg2);
1865
1866 if (ti = find_thread(kd->arg2, 0, 0)) {
1867 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1868 p = "U";
1869 else
1870 p = "K";
1871 } else
1872 p = "*";
1873 memset(sched_info, ' ', sizeof(sched_info));
1874
1875 sprintf(sched_info, "%14.14s", command);
1876 clen = strlen(sched_info);
1877 sched_info[clen] = ' ';
1878
1879 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1880 clen = strlen(sched_info);
1881 sched_info[clen] = ' ';
1882
1883 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1884
1885 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1886 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1887
1888 last_timestamp = timestamp;
1889 break;
1890
1891 case VFS_LOOKUP:
1892 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1893 if (cur_max >= MAX_THREADS)
1894 continue;
1895 ti = &th_state[cur_max++];
1896
1897 ti->thread = thread;
1898 ti->type = -1;
1c51fdde 1899 ti->pathptr = (long *)0;
1815bff5
A
1900 ti->child_thread = 0;
1901 }
b51d5b5f
A
1902
1903 while ( (kd <= kd_stop) && (kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP)
1904 {
1905 if (!ti->pathptr) {
1815bff5 1906 ti->arg1 = kd->arg1;
1c51fdde 1907 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1815bff5
A
1908 sargptr = (long *)&ti->pathname[0];
1909
1910 *sargptr++ = kd->arg2;
1911 *sargptr++ = kd->arg3;
1912 *sargptr++ = kd->arg4;
1c51fdde
A
1913 ti->pathptr = sargptr;
1914
b51d5b5f 1915 } else {
1c51fdde
A
1916 sargptr = ti->pathptr;
1917
1918 /*
1919 We don't want to overrun our pathname buffer if the
1920 kernel sends us more VFS_LOOKUP entries than we can
1921 handle.
1922 */
1923
b51d5b5f
A
1924 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1925 {
1926 kd++;
1927 continue;
1928 }
1929
1930 /*
1931 We need to detect consecutive vfslookup entries.
1932 So, if we get here and find a START entry,
1933 fake the pathptr so we can bypass all further
1934 vfslookup entries.
1935 */
1936
1937 if (kd->debugid & DBG_FUNC_START)
1938 {
1939 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1940 }
1941 else
1c51fdde
A
1942 {
1943 *sargptr++ = kd->arg1;
1944 *sargptr++ = kd->arg2;
1945 *sargptr++ = kd->arg3;
1946 *sargptr++ = kd->arg4;
b51d5b5f 1947 ti->pathptr = sargptr;
1c51fdde 1948 }
b51d5b5f
A
1949 }
1950 kd++;
1815bff5 1951 }
b51d5b5f
A
1952
1953 kd--;
1954 /* print the tail end of the pathname */
1955 len = strlen(ti->pathname);
1956 if (len > 42)
1957 len -= 42;
1958 else
1959 len = 0;
1960
1961 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1962 timestamp - start_bias, delta, "VFS_LOOKUP",
1963 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1964
1815bff5
A
1965 last_timestamp = timestamp;
1966 break;
1967
1968 default:
1969 if (debugid & DBG_FUNC_START)
1970 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1971 else if (debugid & DBG_FUNC_END)
1972 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1973 else
1974 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1975
1976 last_timestamp = timestamp;
1977 break;
1978 }
1979 }
1980 return(kd_stop);
1981}
1982
1983
1984double handle_decrementer(kd_buf *kd)
1985{
1986 double latency;
1987 int elapsed_usecs;
1988
1989 if ((int)(kd->arg1) >= 0)
1990 latency = 1;
1991 else
1992 latency = (((double)(-1 - kd->arg1)) / divisor);
1993 elapsed_usecs = (int)latency;
1994
1995 if (elapsed_usecs < 100)
1996 i_usec_10_bins[elapsed_usecs/10]++;
1997 if (elapsed_usecs < 1000)
1998 i_usec_100_bins[elapsed_usecs/100]++;
1999 else if (elapsed_usecs < 10000)
2000 i_msec_1_bins[elapsed_usecs/1000]++;
2001 else if (elapsed_usecs < 50000)
2002 i_msec_10_bins[elapsed_usecs/10000]++;
2003 else
2004 i_too_slow++;
2005
2006 if (i_thresh_hold && elapsed_usecs > i_thresh_hold)
2007 i_exceeded_threshold++;
2008 if (elapsed_usecs > i_max_latency)
2009 i_max_latency = elapsed_usecs;
2010 if (elapsed_usecs < i_min_latency || i_total_samples == 0)
2011 i_min_latency = elapsed_usecs;
2012 i_total_latency += elapsed_usecs;
2013 i_total_samples++;
2014
2015 return (latency);
2016}
2017
2018
2019void init_code_file()
2020{
2021 FILE *fp;
2022 int i, n, cnt, code;
2023 char name[128];
2024
2025 if ((fp = fopen(code_file, "r")) == (FILE *)0) {
2026 if (log_fp)
2027 fprintf(log_fp, "open of %s failed\n", code_file);
2028 return;
2029 }
2030 n = fscanf(fp, "%d\n", &cnt);
2031
2032 if (n != 1) {
2033 if (log_fp)
2034 fprintf(log_fp, "bad format found in %s\n", code_file);
2035 return;
2036 }
2037 for (i = 0; i < MAX_ENTRIES; i++) {
2038 n = fscanf(fp, "%x%s\n", &code, name);
2039
2040 if (n != 2)
2041 break;
2042
2043 strncpy(codes_tab[i].name, name, 32);
2044 codes_tab[i].type = code;
2045 }
2046 num_of_codes = i;
2047
2048 fclose(fp);
2049}
2050
2051
2052void
2053do_kernel_nm()
2054{
2055 int i, len;
2056 FILE *fp = (FILE *)0;
2057 char tmp_nm_file[128];
2058 char tmpstr[1024];
2059 int inchr;
2060
2061 bzero(tmp_nm_file, 128);
2062 bzero(tmpstr, 1024);
2063
2064 /* Build the temporary nm file path */
2065 sprintf(tmp_nm_file, "/tmp/knm.out.%d", getpid());
2066
2067 /* Build the nm command and create a tmp file with the output*/
2068 sprintf (tmpstr, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2069 kernelpath, tmp_nm_file);
2070 system(tmpstr);
2071
2072 /* Parse the output from the nm command */
2073 if ((fp=fopen(tmp_nm_file, "r")) == (FILE *)0)
2074 {
2075 /* Hmmm, let's not treat this as fatal */
2076 fprintf(stderr, "Failed to open nm symbol file [%s]\n", tmp_nm_file);
2077 return;
2078 }
2079
2080 /* Count the number of symbols in the nm symbol table */
2081 kern_sym_count=0;
2082 while ( (inchr = getc(fp)) != -1)
2083 {
2084 if (inchr == '\n')
2085 kern_sym_count++;
2086 }
2087
2088 rewind(fp);
2089
2090 /* Malloc the space for symbol table */
2091 if (kern_sym_count > 0)
2092 {
2093 kern_sym_tbl = (kern_sym_t *)malloc(kern_sym_count * sizeof (kern_sym_t));
2094 if (!kern_sym_tbl)
2095 {
2096 /* Hmmm, lets not treat this as fatal */
2097 fprintf(stderr, "Can't allocate memory for kernel symbol table\n");
2098 }
2099 else
2100 bzero(kern_sym_tbl, (kern_sym_count * sizeof(kern_sym_t)));
2101 }
2102 else
2103 {
2104 /* Hmmm, lets not treat this as fatal */
2105 fprintf(stderr, "No kernel symbol table \n");
2106 }
2107
2108 for (i=0; i<kern_sym_count; i++)
2109 {
2110 bzero(tmpstr, 1024);
2111 if (fscanf(fp, "%x %c %s", &kern_sym_tbl[i].k_sym_addr, &inchr, tmpstr) != 3)
2112 break;
2113 else
2114 {
2115 len = strlen(tmpstr);
2116 kern_sym_tbl[i].k_sym_name = malloc(len + 1);
2117
2118 if (kern_sym_tbl[i].k_sym_name == (char *)0)
2119 {
2120 fprintf(stderr, "Can't allocate memory for symbol name [%s]\n", tmpstr);
2121 kern_sym_tbl[i].k_sym_name = (char *)0;
2122 len = 0;
2123 }
2124 else
2125 strcpy(kern_sym_tbl[i].k_sym_name, tmpstr);
2126
2127 kern_sym_tbl[i].k_sym_len = len;
2128 }
2129 } /* end for */
2130
2131 if (i != kern_sym_count)
2132 {
2133 /* Hmmm, didn't build up entire table from nm */
2134 /* scrap the entire thing */
2135 if (kern_sym_tbl)
2136 free (kern_sym_tbl);
2137 kern_sym_tbl = (kern_sym_t *)0;
2138 kern_sym_count = 0;
2139 }
2140
2141 fclose(fp);
2142
2143 /* Remove the temporary nm file */
2144 unlink(tmp_nm_file);
2145
2146#if 0
2147 /* Dump the kernel symbol table */
2148 for (i=0; i < kern_sym_count; i++)
2149 {
2150 if (kern_sym_tbl[i].k_sym_name)
2151 printf ("[%d] 0x%x %s\n", i,
2152 kern_sym_tbl[i].k_sym_addr, kern_sym_tbl[i].k_sym_name);
2153 else
2154 printf ("[%d] 0x%x %s\n", i,
2155 kern_sym_tbl[i].k_sym_addr, "No symbol name");
2156 }
2157#endif
2158}
2159
2160char *
2161pc_to_string(unsigned int pc, int max_len, int mode)
2162{
2163 int ret;
2164 int len;
2165
2166 int binary_search();
2167
2168 if (mode == 0)
2169 {
2170 sprintf(pcstring, "0x%-8x [usermode addr]", pc);
2171 return(pcstring);
2172 }
2173
2174 ret=0;
2175 ret = binary_search(kern_sym_tbl, 0, kern_sym_count-1, pc);
2176
2177 if (ret == -1)
2178 {
2179 sprintf(pcstring, "0x%x", pc);
2180 return(pcstring);
2181 }
2182 else if (kern_sym_tbl[ret].k_sym_name == (char *)0)
2183 {
2184 sprintf(pcstring, "0x%x", pc);
2185 return(pcstring);
2186 }
2187 else
2188 {
2189 if ((len = kern_sym_tbl[ret].k_sym_len) > (max_len - 8))
2190 len = max_len - 8;
2191
2192 memcpy(pcstring, kern_sym_tbl[ret].k_sym_name, len);
2193 sprintf(&pcstring[len], "+0x%-5x", pc - kern_sym_tbl[ret].k_sym_addr);
2194
2195 return (pcstring);
2196 }
2197}
2198
2199
2200/* Return -1 if not found, else return index */
2201int binary_search(list, low, high, addr)
2202kern_sym_t *list;
2203int low, high;
2204unsigned int addr;
2205{
2206 int mid;
2207
2208 mid = (low + high) / 2;
2209
2210 if (low > high)
2211 return (-1); /* failed */
2212 else if (low + 1 == high)
2213 {
2214 if (list[low].k_sym_addr <= addr &&
2215 addr < list[high].k_sym_addr)
2216 {
2217 /* We have a range match */
2218 return(low);
2219 }
2220 else if (list[high].k_sym_addr <= addr)
2221 {
2222 return(high);
2223 }
2224 else
2225 return(-1); /* Failed */
2226 }
2227 else if (addr < list[mid].k_sym_addr)
2228 {
2229 return(binary_search (list, low, mid, addr));
2230 }
2231 else
2232 {
2233 return(binary_search (list, mid, high, addr));
2234 }
2235}
2236
2237void
2238open_logfile(char *path)
2239{
2240 log_fp = fopen(path, "a");
2241
2242 if (!log_fp)
2243 {
2244 /* failed to open path */
2245 fprintf(stderr, "latency: failed to open logfile [%s]\n", path);
2246 exit_usage();
2247 }
2248}