]> git.saurik.com Git - apple/system_cmds.git/blame - latency.tproj/latency.c
system_cmds-230.7.tar.gz
[apple/system_cmds.git] / latency.tproj / latency.c
CommitLineData
1815bff5
A
1/*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6d658acd
A
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
1815bff5
A
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6d658acd
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1815bff5
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26
27/*
b51d5b5f 28 cc -I. -DKERNEL_PRIVATE -O -o latency latency.c -lncurses
1815bff5
A
29*/
30
31#include <mach/mach.h>
32#include <stdlib.h>
33#include <stdio.h>
34#include <signal.h>
35#include <strings.h>
36#include <nlist.h>
37#include <fcntl.h>
38#include <string.h>
39
40#include <sys/types.h>
41#include <sys/param.h>
42#include <sys/time.h>
43
44#include <libc.h>
45#include <termios.h>
b51d5b5f 46#include <curses.h>
1815bff5
A
47#include <sys/ioctl.h>
48
49#ifndef KERNEL_PRIVATE
50#define KERNEL_PRIVATE
51#include <sys/kdebug.h>
52#undef KERNEL_PRIVATE
53#else
54#include <sys/kdebug.h>
55#endif /*KERNEL_PRIVATE*/
56
57#include <sys/sysctl.h>
58#include <errno.h>
59#include <err.h>
60
61#include <mach/host_info.h>
62#include <mach/mach_error.h>
63#include <mach/mach_types.h>
64#include <mach/message.h>
65#include <mach/mach_syscalls.h>
1815bff5 66#include <mach/clock_types.h>
1c51fdde 67#include <mach/mach_time.h>
1815bff5
A
68
69#include <libkern/OSTypes.h>
70
71extern mach_port_t clock_port;
72
73#define KERN_KDPIDEX 14
74
75
76int s_usec_10_bins[10];
77int s_usec_100_bins[10];
78int s_msec_1_bins[10];
79int s_msec_10_bins[5];
80int s_too_slow;
81int s_max_latency;
82int s_min_latency = 0;
83long long s_total_latency = 0;
84int s_total_samples;
85long s_thresh_hold;
86int s_exceeded_threshold = 0;
87
88int i_usec_10_bins[10];
89int i_usec_100_bins[10];
90int i_msec_1_bins[10];
91int i_msec_10_bins[5];
92int i_too_slow;
93int i_max_latency;
94int i_min_latency = 0;
95long long i_total_latency = 0;
96int i_total_samples;
97long i_thresh_hold;
98int i_exceeded_threshold = 0;
99
100long start_time;
101long curr_time;
102long refresh_time;
103
104char *policy_name;
105int my_policy;
106int my_pri = -1;
107int num_of_usecs_to_sleep = 1000;
108
109char *kernelpath = (char *)0;
110char *code_file = (char *)0;
111
112typedef struct {
113 u_long k_sym_addr; /* kernel symbol address from nm */
114 u_int k_sym_len; /* length of kernel symbol string */
115 char *k_sym_name; /* kernel symbol string from nm */
116} kern_sym_t;
117
118kern_sym_t *kern_sym_tbl; /* pointer to the nm table */
119int kern_sym_count; /* number of entries in nm table */
120char pcstring[128];
121
122#define UNKNOWN "Can't find symbol name"
123
124
125double divisor;
126int gotSIGWINCH = 0;
127int trace_enabled = 0;
b51d5b5f
A
128struct host_basic_info hi;
129
1815bff5
A
130
131#define SAMPLE_SIZE 300000
132
133int mib[6];
134size_t needed;
135char *my_buffer;
136
137kbufinfo_t bufinfo = {0, 0, 0};
138
139FILE *log_fp = (FILE *)0;
140int num_of_codes = 0;
141int need_new_map = 0;
142int total_threads = 0;
143kd_threadmap *mapptr = 0;
144
145#define MAX_ENTRIES 1024
146struct ct {
147 int type;
148 char name[32];
149} codes_tab[MAX_ENTRIES];
150
1c51fdde
A
151/* If NUMPARMS changes from the kernel, then PATHLENGTH will also reflect the change */
152#define NUMPARMS 23
153#define PATHLENGTH (NUMPARMS*sizeof(long))
1815bff5
A
154
155struct th_info {
156 int thread;
157 int type;
158 int child_thread;
1815bff5
A
159 int arg1;
160 double stime;
1c51fdde
A
161 long *pathptr;
162 char pathname[PATHLENGTH + 1];
1815bff5
A
163};
164
165#define MAX_THREADS 512
166struct th_info th_state[MAX_THREADS];
167
168int cur_max = 0;
169
170#define TRACE_DATA_NEWTHREAD 0x07000004
171#define TRACE_STRING_NEWTHREAD 0x07010004
172#define TRACE_STRING_EXEC 0x07010008
173
174#define INTERRUPT 0x01050000
175#define DECR_TRAP 0x01090000
176#define DECR_SET 0x01090004
177#define MACH_vmfault 0x01300000
178#define MACH_sched 0x01400000
179#define MACH_stkhandoff 0x01400008
180#define VFS_LOOKUP 0x03010090
181#define BSC_exit 0x040C0004
182#define IES_action 0x050b0018
183#define IES_filter 0x050b001c
184#define TES_action 0x050c0010
185#define CQ_action 0x050d0018
186
187
188#define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
189#define DBG_FUNC_MASK 0xfffffffc
190
191#define DBG_ZERO_FILL_FAULT 1
192#define DBG_PAGEIN_FAULT 2
193#define DBG_COW_FAULT 3
194#define DBG_CACHE_HIT_FAULT 4
195
196char *fault_name[5] = {
197 "",
198 "ZeroFill",
199 "PageIn",
200 "COW",
201 "CacheHit",
202};
203
204char *pc_to_string();
205static kern_return_t set_time_constraint_policy(void);
206static kern_return_t set_standard_policy(void);
207
208int decrementer_val = 0; /* Value used to reset decrementer */
209int set_remove_flag = 1; /* By default, remove trace buffer */
210
b51d5b5f
A
211kd_buf **last_decrementer_kd; /* last DECR_TRAP per cpu */
212#define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
213
1815bff5
A
214int
215quit(s)
216char *s;
217{
218 void set_enable();
219 void set_rtcdec();
220 void set_remove();
221
222 if (trace_enabled)
223 set_enable(0);
224
225 /*
226 This flag is turned off when calling
227 quit() due to a set_remove() failure.
228 */
229 if (set_remove_flag)
230 set_remove();
231
232 if (decrementer_val)
233 set_rtcdec(0);
234
235 printf("latency: ");
236 if (s)
237 printf("%s", s);
238
239 exit(1);
240}
241
242void
243set_enable(int val)
244{
245 mib[0] = CTL_KERN;
246 mib[1] = KERN_KDEBUG;
247 mib[2] = KERN_KDENABLE; /* protocol */
248 mib[3] = val;
249 mib[4] = 0;
250 mib[5] = 0; /* no flags */
251
252 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
253 quit("trace facility failure, KERN_KDENABLE\n");
254}
255
256void
257set_numbufs(int nbufs)
258{
259 mib[0] = CTL_KERN;
260 mib[1] = KERN_KDEBUG;
261 mib[2] = KERN_KDSETBUF;
262 mib[3] = nbufs;
263 mib[4] = 0;
264 mib[5] = 0; /* no flags */
265 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
266 quit("trace facility failure, KERN_KDSETBUF\n");
267
268 mib[0] = CTL_KERN;
269 mib[1] = KERN_KDEBUG;
270 mib[2] = KERN_KDSETUP;
271 mib[3] = 0;
272 mib[4] = 0;
273 mib[5] = 0; /* no flags */
274 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
275 quit("trace facility failure, KERN_KDSETUP\n");
276
277}
278
279void
280set_pidexclude(int pid, int on_off)
281{
282 kd_regtype kr;
283
284 kr.type = KDBG_TYPENONE;
285 kr.value1 = pid;
286 kr.value2 = on_off;
287 needed = sizeof(kd_regtype);
288 mib[0] = CTL_KERN;
289 mib[1] = KERN_KDEBUG;
290 mib[2] = KERN_KDPIDEX;
291 mib[3] = 0;
292 mib[4] = 0;
293 mib[5] = 0;
294
295 sysctl(mib, 3, &kr, &needed, NULL, 0);
296}
297
298set_rtcdec(decval)
299int decval;
300{kd_regtype kr;
301 int ret;
302 extern int errno;
303
304 kr.type = KDBG_TYPENONE;
305 kr.value1 = decval;
306 needed = sizeof(kd_regtype);
307 mib[0] = CTL_KERN;
308 mib[1] = KERN_KDEBUG;
309 mib[2] = KERN_KDSETRTCDEC; /* protocol */
310 mib[3] = 0; /* wildcard address family */
311 mib[4] = 0;
312 mib[5] = 0; /* no flags */
313
314 errno = 0;
315
316 if ((ret=sysctl(mib, 3, &kr, &needed, NULL, 0)) < 0)
317 {
318 decrementer_val = 0;
319 quit("trace facility failure, KERN_KDSETRTCDEC\n");
320 }
321}
322
323
324void
325get_bufinfo(kbufinfo_t *val)
326{
327 needed = sizeof (*val);
328 mib[0] = CTL_KERN;
329 mib[1] = KERN_KDEBUG;
330 mib[2] = KERN_KDGETBUF;
331 mib[3] = 0;
332 mib[4] = 0;
333 mib[5] = 0; /* no flags */
334
335 if (sysctl(mib, 3, val, &needed, 0, 0) < 0)
336 quit("trace facility failure, KERN_KDGETBUF\n");
337
338}
339
340void
341set_remove()
342{
343 extern int errno;
344
345 mib[0] = CTL_KERN;
346 mib[1] = KERN_KDEBUG;
347 mib[2] = KERN_KDREMOVE; /* protocol */
348 mib[3] = 0;
349 mib[4] = 0;
350 mib[5] = 0; /* no flags */
351
352 errno = 0;
353
354 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
355 {
356 set_remove_flag = 0;
357 if(errno == EBUSY)
358 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
359 else
360 quit("trace facility failure, KERN_KDREMOVE\n");
361 }
362}
363
364void
365set_init_nologging()
366{
367 /* When we aren't logging, only collect the DECR_TRAP trace points */
368 kd_regtype kr;
369 kr.type = KDBG_VALCHECK;
370 kr.value1 = DECR_TRAP;
371 kr.value2 = 0;
372 kr.value3 = 0;
373 kr.value4 = 0;
374 needed = sizeof(kd_regtype);
375 mib[0] = CTL_KERN;
376 mib[1] = KERN_KDEBUG;
377 mib[2] = KERN_KDSETREG;
378 mib[3] = 0;
379 mib[4] = 0;
380 mib[5] = 0; /* no flags */
381 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
382 quit("trace facility failure, KERN_KDSETREG\n");
383
384 mib[0] = CTL_KERN;
385 mib[1] = KERN_KDEBUG;
386 mib[2] = KERN_KDSETUP;
387 mib[3] = 0;
388 mib[4] = 0;
389 mib[5] = 0; /* no flags */
390
391 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
392 quit("trace facility failure, KERN_KDSETUP\n");
393}
394
395void
396set_init_logging()
397{ kd_regtype kr;
398
399 kr.type = KDBG_RANGETYPE;
400 kr.value1 = 0;
401 kr.value2 = -1;
402 needed = sizeof(kd_regtype);
403 mib[0] = CTL_KERN;
404 mib[1] = KERN_KDEBUG;
405 mib[2] = KERN_KDSETREG;
406 mib[3] = 0;
407 mib[4] = 0;
408 mib[5] = 0; /* no flags */
409
410 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
411 quit("trace facility failure, KERN_KDSETREG\n");
412
413 mib[0] = CTL_KERN;
414 mib[1] = KERN_KDEBUG;
415 mib[2] = KERN_KDSETUP;
416 mib[3] = 0;
417 mib[4] = 0;
418 mib[5] = 0; /* no flags */
419
420 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
421 quit("trace facility failure, KERN_KDSETUP\n");
422}
423
424
425void sigwinch()
426{
427 gotSIGWINCH = 1;
428}
429
430void sigintr()
431{
432 void screen_update();
433
434 set_enable(0);
435 set_pidexclude(getpid(), 0);
436 screen_update(log_fp);
437 set_rtcdec(0);
438 set_remove();
439
440 exit(1);
441}
442
b51d5b5f 443void leave() /* exit under normal conditions -- signal handler */
1815bff5
A
444{
445 set_enable(0);
446 set_pidexclude(getpid(), 0);
447 set_rtcdec(0);
448 set_remove();
449
450 exit(1);
451}
452
1815bff5
A
453void
454screen_update(FILE *fp)
455{
456 int i;
457 int itotal, stotal;
458 int elapsed_secs;
459 int elapsed_mins;
460 int elapsed_hours;
461 unsigned int average_s_latency;
462 unsigned int average_i_latency;
463 char tbuf[256];
464
465 if (fp == (FILE *)0) {
466 erase();
467 move(0, 0);
468 } else
469 fprintf(fp,"\n\n===================================================================================================\n");
470 /*
471 * Display the current time.
472 * "ctime" always returns a string that looks like this:
473 *
474 * Sun Sep 16 01:03:52 1973
475 * 012345678901234567890123
476 * 1 2
477 *
478 * We want indices 11 thru 18 (length 8).
479 */
480 elapsed_secs = curr_time - start_time;
481 elapsed_hours = elapsed_secs / 3600;
482 elapsed_secs -= elapsed_hours * 3600;
483 elapsed_mins = elapsed_secs / 60;
484 elapsed_secs -= elapsed_mins * 60;
485
486 sprintf(tbuf, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time)[0]),
487 elapsed_hours, elapsed_mins, elapsed_secs);
488 if (fp)
489 fprintf(fp, "%s", tbuf);
490 else
491 printw(tbuf);
492
493
494
495 sprintf(tbuf, " SCHEDULER INTERRUPTS\n");
496
497 if (fp)
498 fprintf(fp, "%s", tbuf);
499 else
500 printw(tbuf);
501
502 sprintf(tbuf, "---------------------------------------------\n");
503
504 if (fp)
505 fprintf(fp, "%s", tbuf);
506 else
507 printw(tbuf);
508
509
510
511 sprintf(tbuf, "total_samples %10d %10d\n\n", s_total_samples, i_total_samples);
512
513 if (fp)
514 fprintf(fp, "%s", tbuf);
515 else
516 printw(tbuf);
517
518
519
520 for (itotal = 0, stotal = 0, i = 0; i < 10; i++) {
521 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 10, s_usec_10_bins[i], i_usec_10_bins[i]);
522
523 if (fp)
524 fprintf(fp, "%s", tbuf);
525 else
526 printw(tbuf);
527
528 stotal += s_usec_10_bins[i];
529 itotal += i_usec_10_bins[i];
530 }
531 sprintf(tbuf, "total < 100 usecs %10d %10d\n\n", stotal, itotal);
532
533 if (fp)
534 fprintf(fp, "%s", tbuf);
535 else
536 printw(tbuf);
537
538
539
540 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
541 if (i < 9)
542 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 100, s_usec_100_bins[i], i_usec_100_bins[i]);
543 else
544 sprintf(tbuf, "delays < 1 msec %10d %10d\n", s_usec_100_bins[i], i_usec_100_bins[i]);
545
546 if (fp)
547 fprintf(fp, "%s", tbuf);
548 else
549 printw(tbuf);
550
551 stotal += s_usec_100_bins[i];
552 itotal += i_usec_100_bins[i];
553 }
554 sprintf(tbuf, "total < 1 msec %10d %10d\n\n", stotal, itotal);
555
556 if (fp)
557 fprintf(fp, "%s", tbuf);
558 else
559 printw(tbuf);
560
561
562
563 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
564 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1), s_msec_1_bins[i], i_msec_1_bins[i]);
565
566 if (fp)
567 fprintf(fp, "%s", tbuf);
568 else
569 printw(tbuf);
570
571 stotal += s_msec_1_bins[i];
572 itotal += i_msec_1_bins[i];
573 }
574 sprintf(tbuf, "total < 10 msecs %10d %10d\n\n", stotal, itotal);
575
576 if (fp)
577 fprintf(fp, "%s", tbuf);
578 else
579 printw(tbuf);
580
581
582
583
584 for (itotal = 0, stotal = 0, i = 1; i < 5; i++) {
585 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1)*10, s_msec_10_bins[i], i_msec_10_bins[i]);
586
587 if (fp)
588 fprintf(fp, "%s", tbuf);
589 else
590 printw(tbuf);
591
592 stotal += s_msec_10_bins[i];
593 itotal += i_msec_10_bins[i];
594 }
595 sprintf(tbuf, "total < 50 msecs %10d %10d\n\n", stotal, itotal);
596
597 if (fp)
598 fprintf(fp, "%s", tbuf);
599 else
600 printw(tbuf);
601
602 sprintf(tbuf, "delays > 50 msecs %10d %10d\n", s_too_slow, i_too_slow);
603
604 if (fp)
605 fprintf(fp, "%s", tbuf);
606 else
607 printw(tbuf);
608
609
610
611 sprintf(tbuf, "\nminimum latency(usecs) %7d %7d\n", s_min_latency, i_min_latency);
612
613 if (fp)
614 fprintf(fp, "%s", tbuf);
615 else
616 printw(tbuf);
617
618 sprintf(tbuf, "maximum latency(usecs) %7d %7d\n", s_max_latency, i_max_latency);
619
620 if (fp)
621 fprintf(fp, "%s", tbuf);
622 else
623 printw(tbuf);
624
625 if (s_total_samples)
626 average_s_latency = (unsigned int)(s_total_latency/s_total_samples);
627 else
628 average_s_latency = 0;
629
630 if (i_total_samples)
631 average_i_latency = (unsigned int)(i_total_latency/i_total_samples);
632 else
633 average_i_latency = 0;
634
635 sprintf(tbuf, "average latency(usecs) %7d %7d\n", average_s_latency, average_i_latency);
636
637 if (fp)
638 fprintf(fp, "%s", tbuf);
639 else
640 printw(tbuf);
641
642 sprintf(tbuf, "exceeded threshold %7d %7d\n", s_exceeded_threshold, i_exceeded_threshold);
643
644 if (fp)
645 fprintf(fp, "%s", tbuf);
646 else
647 printw(tbuf);
648
649 if (fp == (FILE *)0)
650 refresh();
651 else
652 fflush(fp);
653}
654
655int
656exit_usage()
657{
658
659 fprintf(stderr, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
660 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs]\n");
661 fprintf(stderr, " [-d decrementer_in_usecs] [-n kernel]\n\n");
662
663 fprintf(stderr, " -rt Set realtime scheduling policy. Default is timeshare.\n");
664 fprintf(stderr, " -c specify name of codes file\n");
665 fprintf(stderr, " -l specify name of file to log trace entries to when threshold is exceeded\n");
666 fprintf(stderr, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
667 fprintf(stderr, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
668 fprintf(stderr, " -s set sleep time in microseconds\n");
669 fprintf(stderr, " -d set decrementer in microseconds.\n");
670 fprintf(stderr, " -n specify kernel, default is /mach_kernel\n");
671
672 fprintf(stderr, "\nlatency must be run as root\n\n");
673
674 exit(1);
675}
676
677
678
679main(argc, argv)
680int argc;
681char *argv[];
682{
683 mach_timespec_t remain;
1c51fdde
A
684 uint64_t start, stop;
685 uint64_t timestamp1;
686 uint64_t timestamp2;
687 uint64_t adeadline, adelay;
1815bff5
A
688 double fdelay;
689 int elapsed_usecs;
690 double nanosecs_to_sleep;
691 int loop_cnt, sample_sc_now;
692 int decrementer_usec = 0;
b51d5b5f
A
693 kern_return_t ret;
694 int size;
695 int i, count;
696 host_name_port_t host;
1815bff5
A
697 void getdivisor();
698 void sample_sc();
699 void init_code_file();
700 void do_kernel_nm();
701 void open_logfile();
702
703 my_policy = THREAD_STANDARD_POLICY;
704 policy_name = "TIMESHARE";
705
706 while (argc > 1) {
707 if (strcmp(argv[1], "-rt") == 0) {
708 my_policy = THREAD_TIME_CONSTRAINT_POLICY; /* the real time band */
709 policy_name = "REALTIME";
710
711 } else if (strcmp(argv[1], "-st") == 0) {
712 argc--;
713 argv++;
714
715 if (argc > 1)
716 s_thresh_hold = atoi(argv[1]);
717 else
718 exit_usage();
719
720 } else if (strcmp(argv[1], "-it") == 0) {
721 argc--;
722 argv++;
723
724 if (argc > 1)
725 i_thresh_hold = atoi(argv[1]);
726 else
727 exit_usage();
728 } else if (strcmp(argv[1], "-c") == 0) {
729 argc--;
730 argv++;
731
732 if (argc > 1)
733 code_file = argv[1];
734 else
735 exit_usage();
736 } else if (strcmp(argv[1], "-l") == 0) {
737 argc--;
738 argv++;
739
740 if (argc > 1)
741 open_logfile(argv[1]);
742 else
743 exit_usage();
744
745 } else if (strcmp(argv[1], "-s") == 0) {
746 argc--;
747 argv++;
748
749 if (argc > 1)
750 num_of_usecs_to_sleep = atoi(argv[1]);
751 else
752 exit_usage();
753 } else if (strcmp(argv[1], "-d") == 0) {
754 argc--;
755 argv++;
756
757 if (argc > 1)
758 decrementer_usec = atoi(argv[1]);
759 else
760 exit_usage();
761 } else if (strcmp(argv[1], "-n") == 0) {
762 argc--;
763 argv++;
764
765 if (argc > 1)
766 kernelpath = argv[1];
767 else
768 exit_usage();
769 } else
770 exit_usage();
771
772 argc--;
773 argv++;
774 }
775
776 if ( geteuid() != 0 ) {
777 printf("'latency' must be run as root...\n");
778 exit(1);
779 }
780
781 if (kernelpath == (char *) 0)
782 kernelpath = "/mach_kernel";
783
784 if (code_file == (char *) 0)
785 code_file = "/usr/share/misc/trace.codes";
786
787 do_kernel_nm();
788
789 sample_sc_now = 25000 / num_of_usecs_to_sleep;
790
791 getdivisor();
792 decrementer_val = decrementer_usec * divisor;
793
b51d5b5f
A
794 /* get the cpu count for the DECR_TRAP array */
795 host = mach_host_self();
796 size = sizeof(hi)/sizeof(int);
797 ret = host_info(host, HOST_BASIC_INFO, (host_info_t)&hi, &size);
798 if (ret != KERN_SUCCESS) {
799 mach_error(argv[0], ret);
800 exit(EXIT_FAILURE);
801 }
802
803 if ((last_decrementer_kd = (kd_buf **)malloc(hi.avail_cpus * sizeof(kd_buf *))) == (kd_buf **)0)
804 quit("can't allocate memory for decrementer tracing info\n");
805
1815bff5
A
806 nanosecs_to_sleep = (double)(num_of_usecs_to_sleep * 1000);
807 fdelay = nanosecs_to_sleep * (divisor /1000);
1c51fdde 808 adelay = (uint64_t)fdelay;
1815bff5
A
809
810 init_code_file();
811
812 /*
813 When the decrementer isn't set in the options,
814 decval will be zero and this call will reset
815 the system default ...
816 */
817 set_rtcdec(decrementer_val);
818
b51d5b5f
A
819 if (initscr() == (WINDOW *) 0)
820 {
821 printf("Unrecognized TERM type, try vt100\n");
822 exit(1);
823 }
824
1815bff5
A
825 clear();
826 refresh();
827 signal(SIGWINCH, sigwinch);
828 signal(SIGINT, sigintr);
b51d5b5f
A
829 signal(SIGQUIT, leave);
830 signal(SIGTERM, leave);
831 signal(SIGHUP, leave);
1815bff5
A
832
833
834 if ((my_buffer = malloc(SAMPLE_SIZE * sizeof(kd_buf))) == (char *)0)
835 quit("can't allocate memory for tracing info\n");
836 set_remove();
837 set_numbufs(SAMPLE_SIZE);
838 set_enable(0);
839 if(log_fp)
840 set_init_logging();
841 else
842 set_init_nologging();
843 set_pidexclude(getpid(), 1);
844 set_enable(1);
845 trace_enabled = 1;
846 need_new_map = 1;
847
848 loop_cnt = 0;
849 start_time = time((long *)0);
850 refresh_time = start_time;
851
852 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
853 {
854 /* the realtime band */
855 if(set_time_constraint_policy() != KERN_SUCCESS)
856 quit("Failed to set realtime policy.\n");
857 }
858
859 for (;;) {
860 curr_time = time((long *)0);
861
862 if (curr_time >= refresh_time) {
863 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
864 {
865 /* set standard timeshare policy during screen update */
866 if(set_standard_policy() != KERN_SUCCESS)
867 quit("Failed to set standard policy.\n");
868 }
869 screen_update((FILE *)0);
870 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
871 {
872 /* set back to realtime band */
873 if(set_time_constraint_policy() != KERN_SUCCESS)
874 quit("Failed to set time_constraint policy.\n");
875 }
876 refresh_time = curr_time + 1;
877 }
878
1c51fdde
A
879 timestamp1 = mach_absolute_time();
880 adeadline = timestamp1 + adelay;
1815bff5 881 mk_wait_until(adeadline);
1c51fdde 882 timestamp2 = mach_absolute_time();
1815bff5 883
1c51fdde 884 start = timestamp1;
1815bff5 885
1c51fdde 886 stop = timestamp2;
1815bff5
A
887
888 elapsed_usecs = (int)(((double)(stop - start)) / divisor);
889
890 if ((elapsed_usecs -= num_of_usecs_to_sleep) <= 0)
891 continue;
892
893 if (elapsed_usecs < 100)
894 s_usec_10_bins[elapsed_usecs/10]++;
895 if (elapsed_usecs < 1000)
896 s_usec_100_bins[elapsed_usecs/100]++;
897 else if (elapsed_usecs < 10000)
898 s_msec_1_bins[elapsed_usecs/1000]++;
899 else if (elapsed_usecs < 50000)
900 s_msec_10_bins[elapsed_usecs/10000]++;
901 else
902 s_too_slow++;
903
904 if (elapsed_usecs > s_max_latency)
905 s_max_latency = elapsed_usecs;
906 if (elapsed_usecs < s_min_latency || s_total_samples == 0)
907 s_min_latency = elapsed_usecs;
908 s_total_latency += elapsed_usecs;
909 s_total_samples++;
910
911 if (s_thresh_hold && elapsed_usecs > s_thresh_hold)
912 s_exceeded_threshold++;
913 loop_cnt++;
914
915 if (log_fp && s_thresh_hold && elapsed_usecs > s_thresh_hold)
916 sample_sc(start, stop);
917 else {
918 if (loop_cnt >= sample_sc_now) {
919 sample_sc((long long)0, (long long)0);
920 loop_cnt = 0;
921 }
922 }
923 if (gotSIGWINCH) {
b51d5b5f
A
924 /*
925 No need to check for initscr error return.
926 We won't get here if it fails on the first call.
927 */
928 endwin();
1815bff5
A
929 clear();
930 refresh();
931
932 gotSIGWINCH = 0;
933 }
934 }
1c51fdde 935}
1815bff5
A
936
937
938void getdivisor()
939{
1c51fdde 940 mach_timebase_info_data_t info;
1815bff5 941
1c51fdde 942 (void) mach_timebase_info (&info);
1815bff5 943
1c51fdde 944 divisor = ( (double)info.denom / (double)info.numer) * 1000;
1815bff5 945
1815bff5
A
946}
947
948/* This is the realtime band */
949static kern_return_t
950set_time_constraint_policy()
951{
952 kern_return_t result;
953 thread_time_constraint_policy_data_t info;
954 mach_msg_type_number_t count;
955 boolean_t get_default;
956
957 get_default = TRUE;
958 count = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
959 result = thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
960 (thread_policy_t)&info, &count, &get_default);
961 if (result != KERN_SUCCESS)
962 return (result);
963
964 result = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
965 (thread_policy_t)&info, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
966
967 return (result);
968}
969
970/* This is the timeshare mode */
971static kern_return_t
972set_standard_policy()
973{
974 kern_return_t result;
975 thread_standard_policy_data_t info;
976 mach_msg_type_number_t count;
977 boolean_t get_default;
978
979 get_default = TRUE;
980 count = THREAD_STANDARD_POLICY_COUNT;
981 result = thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY,
982 (thread_policy_t)&info, &count, &get_default);
983 if (result != KERN_SUCCESS)
984 return (result);
985
986 result = thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY,
987 (thread_policy_t)&info, THREAD_STANDARD_POLICY_COUNT);
988
989 return (result);
990}
991
992
993void read_command_map()
994{
995 size_t size;
996 int mib[6];
997
998 if (mapptr) {
999 free(mapptr);
1000 mapptr = 0;
1001 }
1002 total_threads = bufinfo.nkdthreads;
1003 size = bufinfo.nkdthreads * sizeof(kd_threadmap);
1004 if (size)
1005 {
1006 if (mapptr = (kd_threadmap *) malloc(size))
1007 bzero (mapptr, size);
1008 else
1009 {
1010 printf("Thread map is not initialized -- this is not fatal\n");
1011 return;
1012 }
1013 }
1014
1015 /* Now read the threadmap */
1016 mib[0] = CTL_KERN;
1017 mib[1] = KERN_KDEBUG;
1018 mib[2] = KERN_KDTHRMAP;
1019 mib[3] = 0;
1020 mib[4] = 0;
1021 mib[5] = 0; /* no flags */
1022 if (sysctl(mib, 3, mapptr, &size, NULL, 0) < 0)
1023 {
1024 /* This is not fatal -- just means I cant map command strings */
1025
1026 printf("Can't read the thread map -- this is not fatal\n");
1027 free(mapptr);
1028 mapptr = 0;
1029 return;
1030 }
1031 return;
1032}
1033
1034
1035void create_map_entry(int thread, char *command)
1036{
1037 int i, n;
1038 kd_threadmap *map;
1039
1040 if (!mapptr)
1041 return;
1042
1043 for (i = 0, map = 0; !map && i < total_threads; i++)
1044 {
1045 if (mapptr[i].thread == thread )
1046 map = &mapptr[i]; /* Reuse this entry, the thread has been reassigned */
1047 }
1048
1049 if (!map) /* look for invalid entries that I can reuse*/
1050 {
1051 for (i = 0, map = 0; !map && i < total_threads; i++)
1052 {
1053 if (mapptr[i].valid == 0 )
1054 map = &mapptr[i]; /* Reuse this invalid entry */
1055 }
1056 }
1057
1058 if (!map)
1059 {
1060 /* If reach here, then this is a new thread and
1061 * there are no invalid entries to reuse
1062 * Double the size of the thread map table.
1063 */
1064
1065 n = total_threads * 2;
1066 mapptr = (kd_threadmap *) realloc(mapptr, n * sizeof(kd_threadmap));
1067 bzero(&mapptr[total_threads], total_threads*sizeof(kd_threadmap));
1068 map = &mapptr[total_threads];
1069 total_threads = n;
1070#if 0
1071 if (log_fp)
1072 fprintf(log_fp, "MAP: increasing thread map to %d entries\n", total_threads);
1073#endif
1074 }
1075#if 0
1076 if (log_fp)
1077 fprintf(log_fp, "MAP: adding thread %x with name %s\n", thread, command);
1078#endif
1079 map->valid = 1;
1080 map->thread = thread;
1c51fdde
A
1081 /*
1082 The trace entry that returns the command name will hold
1083 at most, MAXCOMLEN chars, and in that case, is not
1084 guaranteed to be null terminated.
1085 */
1086 (void)strncpy (map->command, command, MAXCOMLEN);
1087 map->command[MAXCOMLEN] = '\0';
1815bff5
A
1088}
1089
1090
1091kd_threadmap *find_thread_map(int thread)
1092{
1093 int i;
1094 kd_threadmap *map;
1095
1096 if (!mapptr)
1097 return((kd_threadmap *)0);
1098
1099 for (i = 0; i < total_threads; i++)
1100 {
1101 map = &mapptr[i];
1102 if (map->valid && (map->thread == thread))
1103 {
1104 return(map);
1105 }
1106 }
1107 return ((kd_threadmap *)0);
1108}
1109
1110void
1111kill_thread_map(int thread)
1112{
1113 kd_threadmap *map;
1114
1115 if (map = find_thread_map(thread)) {
1116
1117#if 0
1118 if (log_fp)
1119 fprintf(log_fp, "MAP: deleting thread %x with name %s\n", thread, map->command);
1120#endif
1121 map->valid = 0;
1122 map->thread = 0;
1123 map->command[0] = '\0';
1124 }
1125}
1126
1127
1128struct th_info *find_thread(int thread, int type1, int type2) {
1129 struct th_info *ti;
1130
1131 for (ti = th_state; ti < &th_state[cur_max]; ti++) {
1132 if (ti->thread == thread) {
1133 if (type1 == 0)
1134 return(ti);
1135 if (type1 == ti->type)
1136 return(ti);
1137 if (type2 == ti->type)
1138 return(ti);
1139 }
1140 }
1141 return ((struct th_info *)0);
1142}
1143
1144
1145char *find_code(type)
1146{
1147 int i;
1148
1149 for (i = 0; i < num_of_codes; i++) {
1150 if (codes_tab[i].type == type)
1151 return(codes_tab[i].name);
1152 }
1153 return ((char *)0);
1154}
1155
1156
1c51fdde 1157void sample_sc(uint64_t start, uint64_t stop)
1815bff5 1158{
b51d5b5f 1159 kd_buf *kd, *last_mach_sched, *start_kd, *end_of_sample;
1c51fdde 1160 uint64_t now;
b51d5b5f 1161 int count, i;
1815bff5
A
1162 int first_entry = 1;
1163 char command[32];
1164 double timestamp, last_timestamp, delta, start_bias;
1165 void read_command_map();
1166
1167 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1168 {
1169 /* set standard timeshare policy when logging */
1170 if(set_standard_policy() != KERN_SUCCESS)
1171 quit("Failed to set standard policy.\n");
1172 }
1173
1174 /* Get kernel buffer information */
1175 get_bufinfo(&bufinfo);
1176
1177 if (need_new_map) {
1178 read_command_map();
1179 need_new_map = 0;
1180 }
1181 needed = bufinfo.nkdbufs * sizeof(kd_buf);
1182 mib[0] = CTL_KERN;
1183 mib[1] = KERN_KDEBUG;
1184 mib[2] = KERN_KDREADTR;
1185 mib[3] = 0;
1186 mib[4] = 0;
1187 mib[5] = 0; /* no flags */
1188
1189 if (sysctl(mib, 3, my_buffer, &needed, NULL, 0) < 0)
1190 quit("trace facility failure, KERN_KDREADTR\n");
1191
1192 count = needed;
1193
1194 if (bufinfo.flags & KDBG_WRAPPED) {
1815bff5
A
1195 for (i = 0; i < cur_max; i++) {
1196 th_state[i].thread = 0;
1197 th_state[i].type = -1;
1c51fdde 1198 th_state[i].pathptr = (long *)0;
1815bff5
A
1199 th_state[i].pathname[0] = 0;
1200 }
1201 cur_max = 0;
1202 need_new_map = 1;
1203
1204 set_enable(0);
1205 set_enable(1);
1206
1207 if (log_fp) {
1208 double latency;
1209
1210 latency = (double)(stop - start) / divisor;
1211 latency -= (double)num_of_usecs_to_sleep;
1212
1213 fprintf(log_fp, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1214 &(ctime(&curr_time)[0]), latency, count);
1215 }
1216 }
1217 end_of_sample = &((kd_buf *)my_buffer)[count];
b51d5b5f
A
1218
1219 /* Always reinitialize the DECR_TRAP array */
1220 for (i=0; i < hi.avail_cpus; i++)
1221 last_decrementer_kd[i] = (kd_buf *)my_buffer;
1222
1815bff5
A
1223 last_mach_sched = (kd_buf *)0;
1224
1225 for (kd = (kd_buf *)my_buffer; kd < end_of_sample; kd++) {
1226 int debugid, thread, cpunum;
1227 int type, clen, mode;
1c51fdde 1228 int len;
1815bff5
A
1229 char *p;
1230 long *sargptr;
b51d5b5f 1231 kd_buf *cur_kd;
1815bff5
A
1232 double i_latency;
1233 struct th_info *ti;
1234 char command1[32];
1235 char sched_info[64];
1236 kd_threadmap *map;
1237 kd_threadmap *find_thread_map();
1238 double handle_decrementer();
1239 kd_buf *log_decrementer();
1240 int check_for_thread_update();
1241 void enter_syscall();
1242 void exit_syscall();
1243 void print_entry();
1244
1245 thread = kd->arg5 & KDBG_THREAD_MASK;
1246 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1247 debugid = kd->debugid;
1248 type = kd->debugid & DBG_FUNC_MASK;
1249
1250 if (check_for_thread_update(thread, type, kd))
1251 continue;
1252
1253 if (type == DECR_TRAP)
1254 i_latency = handle_decrementer(kd);
1255
1c51fdde
A
1256 now = (((uint64_t)kd->timestamp.tv_sec) << 32) |
1257 (uint64_t)((unsigned int)(kd->timestamp.tv_nsec));
1815bff5
A
1258
1259 timestamp = ((double)now) / divisor;
1260
1261 if (now < start || now > stop) {
1262 if (debugid & DBG_FUNC_START)
1263 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1264 else if (debugid & DBG_FUNC_END)
1265 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1266 else if (type == DECR_TRAP) {
b51d5b5f 1267 cur_kd = kd;
1815bff5 1268 if (log_fp && i_thresh_hold && (int)i_latency > i_thresh_hold) {
b51d5b5f 1269 start_kd = last_decrementer_kd[cpunum];
1815bff5 1270 kd = log_decrementer(start_kd, kd, end_of_sample, i_latency);
1815bff5
A
1271 if (kd >= end_of_sample)
1272 break;
1273 }
b51d5b5f
A
1274 if ((kd->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1275 {
1276 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1277 last_decrementer_kd[cpunum] = kd;
1278 }
1279 else
1280 last_decrementer_kd[cpunum] = cur_kd;
1815bff5
A
1281 }
1282 continue;
1283 }
1284 if (first_entry) {
1285 double latency;
b51d5b5f
A
1286 char buf1[132];
1287 char buf2[132];
1815bff5
A
1288
1289 latency = (double)(stop - start) / divisor;
1290 latency -= (double)num_of_usecs_to_sleep;
1291
1292 if (my_pri == -1)
1293 sprintf(buf2, "default");
1294 else
1295 sprintf(buf2, "%d", my_pri);
1296 sprintf(buf1, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1297 &(ctime(&curr_time)[0]), latency, num_of_usecs_to_sleep, policy_name, buf2);
1298 clen = strlen(buf1);
1299 memset(buf2, '-', clen);
1300 buf2[clen] = 0;
1301
1302 if (log_fp) {
1303 fprintf(log_fp, "\n\n%s\n", buf2);
1304 fprintf(log_fp, "%s\n\n", buf1);
1305 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1306 }
1307 start_bias = ((double)start) / divisor;
1308 last_timestamp = timestamp;
1309 first_entry = 0;
1310 }
1311 delta = timestamp - last_timestamp;
1312
1313 if (map = find_thread_map(thread))
1314 strcpy(command, map->command);
1315 else
1316 command[0] = 0;
1317
1318 switch (type) {
1319
1320 case CQ_action:
1321 if (log_fp) {
1322 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1323 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1324 }
1325 last_timestamp = timestamp;
1326 break;
1327
1328 case TES_action:
1329 if (log_fp) {
1330 fprintf(log_fp, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1331 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1332 }
1333
1334 last_timestamp = timestamp;
1335 break;
1336
1337 case IES_action:
1338 if (log_fp) {
1339 fprintf(log_fp, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1340 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1341 }
1342
1343 last_timestamp = timestamp;
1344 break;
1345
1346 case IES_filter:
1347 if (log_fp) {
1348 fprintf(log_fp, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1349 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1350 }
1351
1352 last_timestamp = timestamp;
1353 break;
1354
1355 case DECR_TRAP:
b51d5b5f 1356 last_decrementer_kd[cpunum] = kd;
1815bff5
A
1357
1358 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1359 p = "*";
1360 else
1361 p = " ";
1362
1363 mode = 1;
1364
1365 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1366 if (ti->type == -1 && strcmp(command, "kernel_task"))
1367 mode = 0;
1368 }
1369
1370 if (log_fp) {
1371 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1372 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1373 }
1374
1375 last_timestamp = timestamp;
1376 break;
1377
1378 case DECR_SET:
1379 if (log_fp) {
1380 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1381 timestamp - start_bias, delta, (double)kd->arg1/divisor, "DECR_SET", thread, cpunum, command);
1382 }
1383
1384 last_timestamp = timestamp;
1385 break;
1386
1387 case MACH_sched:
1388 case MACH_stkhandoff:
1389 last_mach_sched = kd;
1390
1391 if (map = find_thread_map(kd->arg2))
1392 strcpy(command1, map->command);
1393 else
1394 sprintf(command1, "%-8x", kd->arg2);
1395
1396 if (ti = find_thread(kd->arg2, 0, 0)) {
1397 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1398 p = "U";
1399 else
1400 p = "K";
1401 } else
1402 p = "*";
1403 memset(sched_info, ' ', sizeof(sched_info));
1404
1405 sprintf(sched_info, "%14.14s", command);
1406 clen = strlen(sched_info);
1407 sched_info[clen] = ' ';
1408
1409 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1410 clen = strlen(sched_info);
1411 sched_info[clen] = ' ';
1412
1413 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1414
1415 if (log_fp) {
1416 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1417 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1418 }
1419
1420 last_timestamp = timestamp;
1421 break;
1422
1423 case VFS_LOOKUP:
1424 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1425 if (cur_max >= MAX_THREADS)
1426 continue;
1427 ti = &th_state[cur_max++];
1428
1429 ti->thread = thread;
1430 ti->type = -1;
1c51fdde 1431 ti->pathptr = (long *)0;
1815bff5
A
1432 ti->child_thread = 0;
1433 }
b51d5b5f
A
1434 while ( (kd < end_of_sample) && ((kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP))
1435 {
1436 if (!ti->pathptr) {
1815bff5 1437 ti->arg1 = kd->arg1;
1c51fdde 1438 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1815bff5
A
1439 sargptr = (long *)&ti->pathname[0];
1440
1441 *sargptr++ = kd->arg2;
1442 *sargptr++ = kd->arg3;
1443 *sargptr++ = kd->arg4;
1c51fdde
A
1444 ti->pathptr = sargptr;
1445
b51d5b5f 1446 } else {
1c51fdde
A
1447 sargptr = ti->pathptr;
1448
1449 /*
1450 We don't want to overrun our pathname buffer if the
1451 kernel sends us more VFS_LOOKUP entries than we can
1452 handle.
1453 */
1454
b51d5b5f
A
1455 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1456 {
1457 kd++;
1458 continue;
1459 }
1460
1461 /*
1462 We need to detect consecutive vfslookup entries.
1463 So, if we get here and find a START entry,
1464 fake the pathptr so we can bypass all further
1465 vfslookup entries.
1466 */
1467
1468 if (kd->debugid & DBG_FUNC_START)
1469 {
1470 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1471 }
1472 else
1c51fdde
A
1473 {
1474 *sargptr++ = kd->arg1;
1475 *sargptr++ = kd->arg2;
1476 *sargptr++ = kd->arg3;
1477 *sargptr++ = kd->arg4;
1478 ti->pathptr = sargptr;
b51d5b5f
A
1479 }
1480 }
1481 kd++;
1482 }
1483
1484 kd--;
1c51fdde
A
1485
1486 /* print the tail end of the pathname */
b51d5b5f
A
1487 len = strlen(ti->pathname);
1488 if (len > 42)
1489 len -= 42;
1490 else
1491 len = 0;
1815bff5 1492
b51d5b5f
A
1493 if (log_fp) {
1494 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1495 timestamp - start_bias, delta, "VFS_LOOKUP",
1496 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1815bff5 1497 }
b51d5b5f 1498
1815bff5
A
1499 last_timestamp = timestamp;
1500 break;
1501
1502 default:
1503 if (debugid & DBG_FUNC_START)
1504 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1505 else if (debugid & DBG_FUNC_END)
1506 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1507 else
1508 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1509
1510 last_timestamp = timestamp;
1511 break;
1512 }
1513 }
1514 if (last_mach_sched && log_fp)
1515 fprintf(log_fp, "\nblocked by %s @ priority %d\n", command, last_mach_sched->arg3);
1516#if 0
1517 if (first_entry == 0 && log_fp)
1518 fprintf(log_fp, "\n start = %qd stop = %qd count = %d now = %qd\n", start, stop, count, now);
1519#endif
1520 if (log_fp)
1521 fflush(log_fp);
1522
1523 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1524 {
1525 /* set back to realtime band */
1526 if(set_time_constraint_policy() != KERN_SUCCESS)
1527 quit("Failed to set time_constraint policy.\n");
1528 }
1529}
1530
1531void
1532enter_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1533{
1534 struct th_info *ti;
1535 int i;
1536 int cpunum;
1537 char *p;
1538
1539 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1540
1541 if (print_info && fp) {
1542 if (p = find_code(type)) {
1543 if (type == INTERRUPT) {
1544 int mode = 1;
1545
1546 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1547 if (ti->type == -1 && strcmp(command, "kernel_task"))
1548 mode = 0;
1549 }
1550
1551 fprintf(fp, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1552 timestamp - bias, delta, pc_to_string(kd->arg2, 59, mode), thread, cpunum, command);
1553 } else if (type == MACH_vmfault) {
1554 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1555 timestamp - bias, delta, p, thread, cpunum, command);
1556 } else {
1557 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1558 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1559 thread, cpunum, command);
1560 }
1561 } else {
1562 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1563 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1564 thread, cpunum, command);
1565 }
1566 }
1567 if ((ti = find_thread(thread, -1, type)) == (struct th_info *)0) {
1568 if (cur_max >= MAX_THREADS) {
1569 static int do_this_once = 1;
1570
1571 if (do_this_once) {
1572 for (i = 0; i < cur_max; i++) {
1573 if (!fp)
1574 break;
1575 fprintf(fp, "thread = %x, type = %x\n",
1576 th_state[i].thread, th_state[i].type);
1577 }
1578 do_this_once = 0;
1579 }
1580 return;
1581
1582 }
1583 ti = &th_state[cur_max++];
1584
1585 ti->thread = thread;
1586 ti->child_thread = 0;
1587 }
1588 if (type != BSC_exit)
1589 ti->type = type;
1590 else
1591 ti->type = -1;
1592 ti->stime = timestamp;
1c51fdde
A
1593 ti->pathptr = (long *)0;
1594
1815bff5
A
1595#if 0
1596 if (print_info && fp)
1597 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, ti->type, ti->thread);
1598#endif
1599}
1600
1601
1602void
1603exit_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1604{
1605 struct th_info *ti;
1606 int cpunum;
1607 char *p;
1608
1609 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1610 ti = find_thread(thread, type, type);
1611#if 0
1612 if (print_info && fp)
1613 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, type, thread);
1614#endif
1615 if (print_info && fp) {
1616 if (ti)
1617 fprintf(fp, "%9.1f %8.1f(%.1f) \t", timestamp - bias, delta, timestamp - ti->stime);
1618 else
1619 fprintf(fp, "%9.1f %8.1f() \t", timestamp - bias, delta);
1620
1621 if (p = find_code(type)) {
1622 if (type == INTERRUPT) {
1623 fprintf(fp, "INTERRUPT %-8x %d %s\n", thread, cpunum, command);
1624 } else if (type == MACH_vmfault && kd->arg2 <= DBG_CACHE_HIT_FAULT) {
1625 fprintf(fp, "%-28.28s %-8.8s %-8x %-8x %d %s\n",
1626 p, fault_name[kd->arg2], kd->arg1,
1627 thread, cpunum, command);
1628 } else {
1629 fprintf(fp, "%-28.28s %-8x %-8x %-8x %d %s\n",
1630 p, kd->arg1, kd->arg2,
1631 thread, cpunum, command);
1632 }
1633 } else {
1634 fprintf(fp, "%-8x %-8x %-8x %-8x %d %s\n",
1635 type, kd->arg1, kd->arg2,
1636 thread, cpunum, command);
1637 }
1638 }
1639 if (ti == (struct th_info *)0) {
1640 if ((ti = find_thread(thread, -1, -1)) == (struct th_info *)0) {
1641 if (cur_max >= MAX_THREADS)
1642 return;
1643 ti = &th_state[cur_max++];
1644
1645 ti->thread = thread;
1646 ti->child_thread = 0;
1c51fdde 1647 ti->pathptr = (long *)0;
1815bff5
A
1648 }
1649 }
1650 ti->type = -1;
1651}
1652
1653void
1654print_entry(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias)
1655{
1656 char *p;
1657 int cpunum;
1658
1659 if (!fp)
1660 return;
1661
1662 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1663
1664#if 0
1665 fprintf(fp, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max, type, thread, cpunum);
1666#endif
1667 if (p = find_code(type)) {
1668 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1669 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1670 thread, cpunum, command);
1671 } else {
1672 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1673 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1674 thread, cpunum, command);
1675 }
1676}
1677
1678int
1679check_for_thread_update(int thread, int type, kd_buf *kd)
1680{
1681 struct th_info *ti;
1682 void create_map_entry();
1683
1684 switch (type) {
1685
1686 case TRACE_DATA_NEWTHREAD:
1687 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1688 if (cur_max >= MAX_THREADS)
1689 return (1);
1690 ti = &th_state[cur_max++];
1691
1692 ti->thread = thread;
1693 ti->type = -1;
1c51fdde 1694 ti->pathptr = (long *)0;
1815bff5
A
1695 }
1696 ti->child_thread = kd->arg1;
1697 return (1);
1698
1699 case TRACE_STRING_NEWTHREAD:
1700 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0)
1701 return (1);
1702 if (ti->child_thread == 0)
1703 return (1);
1704 create_map_entry(ti->child_thread, (char *)&kd->arg1);
1705
1706 ti->child_thread = 0;
1707 return (1);
1708
1709 case TRACE_STRING_EXEC:
1710 create_map_entry(thread, (char *)&kd->arg1);
1711 return (1);
1712
1713 }
1714 return (0);
1715}
1716
1717
1718kd_buf *log_decrementer(kd_buf *kd_beg, kd_buf *kd_end, kd_buf *end_of_sample, double i_latency)
1719{
1720 kd_buf *kd, *kd_start, *kd_stop;
b51d5b5f 1721 int kd_count; /* Limit the boundary of kd_start */
1815bff5
A
1722 double timestamp, last_timestamp, delta, start_bias;
1723 int thread, cpunum;
1724 int debugid, type, clen;
1c51fdde
A
1725 int len;
1726 uint64_t now;
1815bff5
A
1727 struct th_info *ti;
1728 long *sargptr;
1729 char *p;
1730 char command[32];
1731 char command1[32];
1732 char sched_info[64];
1733 char buf1[128];
1734 char buf2[128];
1735 kd_threadmap *map;
1736 kd_threadmap *find_thread_map();
1737
1738 sprintf(buf1, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time)[0]), i_latency);
1739 clen = strlen(buf1);
1740 memset(buf2, '-', clen);
1741 buf2[clen] = 0;
1742 fprintf(log_fp, "\n\n%s\n", buf2);
1743 fprintf(log_fp, "%s\n\n", buf1);
1744
1745 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1746
1747 thread = kd_beg->arg5 & KDBG_THREAD_MASK;
b51d5b5f 1748 cpunum = (kd_end->arg5 & KDBG_CPU_MASK) ? 1: 0;
1815bff5 1749
b51d5b5f
A
1750 for (kd_count = 0, kd_start = kd_beg - 1; (kd_start >= (kd_buf *)my_buffer); kd_start--, kd_count++) {
1751 if (kd_count == MAX_LOG_COUNT)
1752 break;
1753
1754 if((kd_start->arg5 & KDBG_CPU_MASK) != cpunum)
1755 continue;
1756
1757 if ((kd_start->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1758 break;
1759
1760 if((kd_start->arg5 & KDBG_THREAD_MASK) != thread)
1815bff5
A
1761 break;
1762 }
b51d5b5f 1763
1815bff5
A
1764 if (kd_start < (kd_buf *)my_buffer)
1765 kd_start = (kd_buf *)my_buffer;
1766
b51d5b5f
A
1767 thread = kd_end->arg5 & KDBG_THREAD_MASK;
1768
1769 for (kd_stop = kd_end + 1; kd_stop < end_of_sample; kd_stop++) {
1770
1771 if ((kd_stop->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1772 break;
1773
1774 if((kd_stop->arg5 & KDBG_CPU_MASK) != cpunum)
1775 continue;
1776
1777 if((kd_stop->arg5 & KDBG_THREAD_MASK) != thread)
1815bff5
A
1778 break;
1779 }
b51d5b5f 1780
1815bff5
A
1781 if (kd_stop >= end_of_sample)
1782 kd_stop = end_of_sample - 1;
1783
1c51fdde
A
1784 now = (((uint64_t)kd_start->timestamp.tv_sec) << 32) |
1785 (uint64_t)((unsigned int)(kd_start->timestamp.tv_nsec));
1815bff5
A
1786 timestamp = ((double)now) / divisor;
1787
1788 for (kd = kd_start; kd <= kd_stop; kd++) {
1789 type = kd->debugid & DBG_FUNC_MASK;
1790
1791 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), type, type)) {
1792 if (ti->stime >= timestamp)
1793 ti->type = -1;
1794 }
1795 }
1796 for (kd = kd_start; kd <= kd_stop; kd++) {
1797 int mode;
1798
1799 thread = kd->arg5 & KDBG_THREAD_MASK;
1800 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1801 debugid = kd->debugid;
1802 type = kd->debugid & DBG_FUNC_MASK;
1803
1c51fdde
A
1804 now = (((uint64_t)kd->timestamp.tv_sec) << 32) |
1805 (uint64_t)((unsigned int)(kd->timestamp.tv_nsec));
1815bff5
A
1806
1807 timestamp = ((double)now) / divisor;
1808
1809 if (kd == kd_start) {
1810 start_bias = timestamp;
1811 last_timestamp = timestamp;
1812 }
1813 delta = timestamp - last_timestamp;
1814
1815 if (map = find_thread_map(thread))
1816 strcpy(command, map->command);
1817 else
1818 command[0] = 0;
1819
1820
1821 switch (type) {
1822
1823 case CQ_action:
1824 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1825 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1826
1827 last_timestamp = timestamp;
1828 break;
1829
1830 case DECR_TRAP:
1831 if ((int)(kd->arg1) >= 0)
1832 i_latency = 0;
1833 else
1834 i_latency = (((double)(-1 - kd->arg1)) / divisor);
1835
1836 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1837 p = "*";
1838 else
1839 p = " ";
1840
1841 mode = 1;
1842
1843 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1844 if (ti->type == -1 && strcmp(command, "kernel_task"))
1845 mode = 0;
1846 }
1847 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1848 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1849
1850 last_timestamp = timestamp;
1851 break;
1852
1853 case DECR_SET:
1854 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1855 timestamp - start_bias, delta, (double)kd->arg1/divisor,
1856 "DECR_SET", thread, cpunum, command);
1857
1858 last_timestamp = timestamp;
1859 break;
1860
1861 case MACH_sched:
1862 case MACH_stkhandoff:
1863 if (map = find_thread_map(kd->arg2))
1864 strcpy(command1, map->command);
1865 else
1866 sprintf(command1, "%-8x", kd->arg2);
1867
1868 if (ti = find_thread(kd->arg2, 0, 0)) {
1869 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1870 p = "U";
1871 else
1872 p = "K";
1873 } else
1874 p = "*";
1875 memset(sched_info, ' ', sizeof(sched_info));
1876
1877 sprintf(sched_info, "%14.14s", command);
1878 clen = strlen(sched_info);
1879 sched_info[clen] = ' ';
1880
1881 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1882 clen = strlen(sched_info);
1883 sched_info[clen] = ' ';
1884
1885 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1886
1887 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1888 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1889
1890 last_timestamp = timestamp;
1891 break;
1892
1893 case VFS_LOOKUP:
1894 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1895 if (cur_max >= MAX_THREADS)
1896 continue;
1897 ti = &th_state[cur_max++];
1898
1899 ti->thread = thread;
1900 ti->type = -1;
1c51fdde 1901 ti->pathptr = (long *)0;
1815bff5
A
1902 ti->child_thread = 0;
1903 }
b51d5b5f
A
1904
1905 while ( (kd <= kd_stop) && (kd->debugid & DBG_FUNC_MASK) == VFS_LOOKUP)
1906 {
1907 if (!ti->pathptr) {
1815bff5 1908 ti->arg1 = kd->arg1;
1c51fdde 1909 memset(&ti->pathname[0], 0, (PATHLENGTH + 1));
1815bff5
A
1910 sargptr = (long *)&ti->pathname[0];
1911
1912 *sargptr++ = kd->arg2;
1913 *sargptr++ = kd->arg3;
1914 *sargptr++ = kd->arg4;
1c51fdde
A
1915 ti->pathptr = sargptr;
1916
b51d5b5f 1917 } else {
1c51fdde
A
1918 sargptr = ti->pathptr;
1919
1920 /*
1921 We don't want to overrun our pathname buffer if the
1922 kernel sends us more VFS_LOOKUP entries than we can
1923 handle.
1924 */
1925
b51d5b5f
A
1926 if ((long *)sargptr >= (long *)&ti->pathname[PATHLENGTH])
1927 {
1928 kd++;
1929 continue;
1930 }
1931
1932 /*
1933 We need to detect consecutive vfslookup entries.
1934 So, if we get here and find a START entry,
1935 fake the pathptr so we can bypass all further
1936 vfslookup entries.
1937 */
1938
1939 if (kd->debugid & DBG_FUNC_START)
1940 {
1941 (long *)ti->pathptr = (long *)&ti->pathname[PATHLENGTH];
1942 }
1943 else
1c51fdde
A
1944 {
1945 *sargptr++ = kd->arg1;
1946 *sargptr++ = kd->arg2;
1947 *sargptr++ = kd->arg3;
1948 *sargptr++ = kd->arg4;
b51d5b5f 1949 ti->pathptr = sargptr;
1c51fdde 1950 }
b51d5b5f
A
1951 }
1952 kd++;
1815bff5 1953 }
b51d5b5f
A
1954
1955 kd--;
1956 /* print the tail end of the pathname */
1957 len = strlen(ti->pathname);
1958 if (len > 42)
1959 len -= 42;
1960 else
1961 len = 0;
1962
1963 fprintf(log_fp, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1964 timestamp - start_bias, delta, "VFS_LOOKUP",
1965 &ti->pathname[len], ti->arg1, thread, cpunum, command);
1966
1815bff5
A
1967 last_timestamp = timestamp;
1968 break;
1969
1970 default:
1971 if (debugid & DBG_FUNC_START)
1972 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1973 else if (debugid & DBG_FUNC_END)
1974 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1975 else
1976 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1977
1978 last_timestamp = timestamp;
1979 break;
1980 }
1981 }
1982 return(kd_stop);
1983}
1984
1985
1986double handle_decrementer(kd_buf *kd)
1987{
1988 double latency;
1989 int elapsed_usecs;
1990
1991 if ((int)(kd->arg1) >= 0)
1992 latency = 1;
1993 else
1994 latency = (((double)(-1 - kd->arg1)) / divisor);
1995 elapsed_usecs = (int)latency;
1996
1997 if (elapsed_usecs < 100)
1998 i_usec_10_bins[elapsed_usecs/10]++;
1999 if (elapsed_usecs < 1000)
2000 i_usec_100_bins[elapsed_usecs/100]++;
2001 else if (elapsed_usecs < 10000)
2002 i_msec_1_bins[elapsed_usecs/1000]++;
2003 else if (elapsed_usecs < 50000)
2004 i_msec_10_bins[elapsed_usecs/10000]++;
2005 else
2006 i_too_slow++;
2007
2008 if (i_thresh_hold && elapsed_usecs > i_thresh_hold)
2009 i_exceeded_threshold++;
2010 if (elapsed_usecs > i_max_latency)
2011 i_max_latency = elapsed_usecs;
2012 if (elapsed_usecs < i_min_latency || i_total_samples == 0)
2013 i_min_latency = elapsed_usecs;
2014 i_total_latency += elapsed_usecs;
2015 i_total_samples++;
2016
2017 return (latency);
2018}
2019
2020
2021void init_code_file()
2022{
2023 FILE *fp;
2024 int i, n, cnt, code;
2025 char name[128];
2026
2027 if ((fp = fopen(code_file, "r")) == (FILE *)0) {
2028 if (log_fp)
2029 fprintf(log_fp, "open of %s failed\n", code_file);
2030 return;
2031 }
2032 n = fscanf(fp, "%d\n", &cnt);
2033
2034 if (n != 1) {
2035 if (log_fp)
2036 fprintf(log_fp, "bad format found in %s\n", code_file);
2037 return;
2038 }
2039 for (i = 0; i < MAX_ENTRIES; i++) {
2040 n = fscanf(fp, "%x%s\n", &code, name);
2041
2042 if (n != 2)
2043 break;
2044
2045 strncpy(codes_tab[i].name, name, 32);
2046 codes_tab[i].type = code;
2047 }
2048 num_of_codes = i;
2049
2050 fclose(fp);
2051}
2052
2053
2054void
2055do_kernel_nm()
2056{
2057 int i, len;
2058 FILE *fp = (FILE *)0;
2059 char tmp_nm_file[128];
2060 char tmpstr[1024];
2061 int inchr;
2062
2063 bzero(tmp_nm_file, 128);
2064 bzero(tmpstr, 1024);
2065
2066 /* Build the temporary nm file path */
2067 sprintf(tmp_nm_file, "/tmp/knm.out.%d", getpid());
2068
2069 /* Build the nm command and create a tmp file with the output*/
2070 sprintf (tmpstr, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2071 kernelpath, tmp_nm_file);
2072 system(tmpstr);
2073
2074 /* Parse the output from the nm command */
2075 if ((fp=fopen(tmp_nm_file, "r")) == (FILE *)0)
2076 {
2077 /* Hmmm, let's not treat this as fatal */
2078 fprintf(stderr, "Failed to open nm symbol file [%s]\n", tmp_nm_file);
2079 return;
2080 }
2081
2082 /* Count the number of symbols in the nm symbol table */
2083 kern_sym_count=0;
2084 while ( (inchr = getc(fp)) != -1)
2085 {
2086 if (inchr == '\n')
2087 kern_sym_count++;
2088 }
2089
2090 rewind(fp);
2091
2092 /* Malloc the space for symbol table */
2093 if (kern_sym_count > 0)
2094 {
2095 kern_sym_tbl = (kern_sym_t *)malloc(kern_sym_count * sizeof (kern_sym_t));
2096 if (!kern_sym_tbl)
2097 {
2098 /* Hmmm, lets not treat this as fatal */
2099 fprintf(stderr, "Can't allocate memory for kernel symbol table\n");
2100 }
2101 else
2102 bzero(kern_sym_tbl, (kern_sym_count * sizeof(kern_sym_t)));
2103 }
2104 else
2105 {
2106 /* Hmmm, lets not treat this as fatal */
2107 fprintf(stderr, "No kernel symbol table \n");
2108 }
2109
2110 for (i=0; i<kern_sym_count; i++)
2111 {
2112 bzero(tmpstr, 1024);
2113 if (fscanf(fp, "%x %c %s", &kern_sym_tbl[i].k_sym_addr, &inchr, tmpstr) != 3)
2114 break;
2115 else
2116 {
2117 len = strlen(tmpstr);
2118 kern_sym_tbl[i].k_sym_name = malloc(len + 1);
2119
2120 if (kern_sym_tbl[i].k_sym_name == (char *)0)
2121 {
2122 fprintf(stderr, "Can't allocate memory for symbol name [%s]\n", tmpstr);
2123 kern_sym_tbl[i].k_sym_name = (char *)0;
2124 len = 0;
2125 }
2126 else
2127 strcpy(kern_sym_tbl[i].k_sym_name, tmpstr);
2128
2129 kern_sym_tbl[i].k_sym_len = len;
2130 }
2131 } /* end for */
2132
2133 if (i != kern_sym_count)
2134 {
2135 /* Hmmm, didn't build up entire table from nm */
2136 /* scrap the entire thing */
2137 if (kern_sym_tbl)
2138 free (kern_sym_tbl);
2139 kern_sym_tbl = (kern_sym_t *)0;
2140 kern_sym_count = 0;
2141 }
2142
2143 fclose(fp);
2144
2145 /* Remove the temporary nm file */
2146 unlink(tmp_nm_file);
2147
2148#if 0
2149 /* Dump the kernel symbol table */
2150 for (i=0; i < kern_sym_count; i++)
2151 {
2152 if (kern_sym_tbl[i].k_sym_name)
2153 printf ("[%d] 0x%x %s\n", i,
2154 kern_sym_tbl[i].k_sym_addr, kern_sym_tbl[i].k_sym_name);
2155 else
2156 printf ("[%d] 0x%x %s\n", i,
2157 kern_sym_tbl[i].k_sym_addr, "No symbol name");
2158 }
2159#endif
2160}
2161
2162char *
2163pc_to_string(unsigned int pc, int max_len, int mode)
2164{
2165 int ret;
2166 int len;
2167
2168 int binary_search();
2169
2170 if (mode == 0)
2171 {
2172 sprintf(pcstring, "0x%-8x [usermode addr]", pc);
2173 return(pcstring);
2174 }
2175
2176 ret=0;
2177 ret = binary_search(kern_sym_tbl, 0, kern_sym_count-1, pc);
2178
2179 if (ret == -1)
2180 {
2181 sprintf(pcstring, "0x%x", pc);
2182 return(pcstring);
2183 }
2184 else if (kern_sym_tbl[ret].k_sym_name == (char *)0)
2185 {
2186 sprintf(pcstring, "0x%x", pc);
2187 return(pcstring);
2188 }
2189 else
2190 {
2191 if ((len = kern_sym_tbl[ret].k_sym_len) > (max_len - 8))
2192 len = max_len - 8;
2193
2194 memcpy(pcstring, kern_sym_tbl[ret].k_sym_name, len);
2195 sprintf(&pcstring[len], "+0x%-5x", pc - kern_sym_tbl[ret].k_sym_addr);
2196
2197 return (pcstring);
2198 }
2199}
2200
2201
2202/* Return -1 if not found, else return index */
2203int binary_search(list, low, high, addr)
2204kern_sym_t *list;
2205int low, high;
2206unsigned int addr;
2207{
2208 int mid;
2209
2210 mid = (low + high) / 2;
2211
2212 if (low > high)
2213 return (-1); /* failed */
2214 else if (low + 1 == high)
2215 {
2216 if (list[low].k_sym_addr <= addr &&
2217 addr < list[high].k_sym_addr)
2218 {
2219 /* We have a range match */
2220 return(low);
2221 }
2222 else if (list[high].k_sym_addr <= addr)
2223 {
2224 return(high);
2225 }
2226 else
2227 return(-1); /* Failed */
2228 }
2229 else if (addr < list[mid].k_sym_addr)
2230 {
2231 return(binary_search (list, low, mid, addr));
2232 }
2233 else
2234 {
2235 return(binary_search (list, mid, high, addr));
2236 }
2237}
2238
2239void
2240open_logfile(char *path)
2241{
2242 log_fp = fopen(path, "a");
2243
2244 if (!log_fp)
2245 {
2246 /* failed to open path */
2247 fprintf(stderr, "latency: failed to open logfile [%s]\n", path);
2248 exit_usage();
2249 }
2250}