]> git.saurik.com Git - apple/system_cmds.git/blame - latency.tproj/latency.c
system_cmds-175.tar.gz
[apple/system_cmds.git] / latency.tproj / latency.c
CommitLineData
1815bff5
A
1/*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
7 * Reserved. This file contains Original Code and/or Modifications of
8 * Original Code as defined in and that are subject to the Apple Public
9 * Source License Version 1.0 (the 'License'). You may not use this file
10 * except in compliance with the License. Please obtain a copy of the
11 * License at http://www.apple.com/publicsource and read it before using
12 * this file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
19 * License for the specific language governing rights and limitations
20 * under the License."
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26/*
27 cc -I. -DKERNEL_PRIVATE -O -o latency latency.c
28*/
29
30#include <mach/mach.h>
31#include <stdlib.h>
32#include <stdio.h>
33#include <signal.h>
34#include <strings.h>
35#include <nlist.h>
36#include <fcntl.h>
37#include <string.h>
38
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/time.h>
42
43#include <libc.h>
44#include <termios.h>
45#include <bsd/curses.h>
46#include <sys/ioctl.h>
47
48#ifndef KERNEL_PRIVATE
49#define KERNEL_PRIVATE
50#include <sys/kdebug.h>
51#undef KERNEL_PRIVATE
52#else
53#include <sys/kdebug.h>
54#endif /*KERNEL_PRIVATE*/
55
56#include <sys/sysctl.h>
57#include <errno.h>
58#include <err.h>
59
60#include <mach/host_info.h>
61#include <mach/mach_error.h>
62#include <mach/mach_types.h>
63#include <mach/message.h>
64#include <mach/mach_syscalls.h>
65#include <mach/clock.h>
66#include <mach/clock_types.h>
67
68#include <libkern/OSTypes.h>
69
70extern mach_port_t clock_port;
71
72#define KERN_KDPIDEX 14
73
74
75int s_usec_10_bins[10];
76int s_usec_100_bins[10];
77int s_msec_1_bins[10];
78int s_msec_10_bins[5];
79int s_too_slow;
80int s_max_latency;
81int s_min_latency = 0;
82long long s_total_latency = 0;
83int s_total_samples;
84long s_thresh_hold;
85int s_exceeded_threshold = 0;
86
87int i_usec_10_bins[10];
88int i_usec_100_bins[10];
89int i_msec_1_bins[10];
90int i_msec_10_bins[5];
91int i_too_slow;
92int i_max_latency;
93int i_min_latency = 0;
94long long i_total_latency = 0;
95int i_total_samples;
96long i_thresh_hold;
97int i_exceeded_threshold = 0;
98
99long start_time;
100long curr_time;
101long refresh_time;
102
103char *policy_name;
104int my_policy;
105int my_pri = -1;
106int num_of_usecs_to_sleep = 1000;
107
108char *kernelpath = (char *)0;
109char *code_file = (char *)0;
110
111typedef struct {
112 u_long k_sym_addr; /* kernel symbol address from nm */
113 u_int k_sym_len; /* length of kernel symbol string */
114 char *k_sym_name; /* kernel symbol string from nm */
115} kern_sym_t;
116
117kern_sym_t *kern_sym_tbl; /* pointer to the nm table */
118int kern_sym_count; /* number of entries in nm table */
119char pcstring[128];
120
121#define UNKNOWN "Can't find symbol name"
122
123
124double divisor;
125int gotSIGWINCH = 0;
126int trace_enabled = 0;
127
128#define SAMPLE_SIZE 300000
129
130int mib[6];
131size_t needed;
132char *my_buffer;
133
134kbufinfo_t bufinfo = {0, 0, 0};
135
136FILE *log_fp = (FILE *)0;
137int num_of_codes = 0;
138int need_new_map = 0;
139int total_threads = 0;
140kd_threadmap *mapptr = 0;
141
142#define MAX_ENTRIES 1024
143struct ct {
144 int type;
145 char name[32];
146} codes_tab[MAX_ENTRIES];
147
148
149struct th_info {
150 int thread;
151 int type;
152 int child_thread;
153 int vfslookup;
154 int arg1;
155 double stime;
156 char pathname[32];
157};
158
159#define MAX_THREADS 512
160struct th_info th_state[MAX_THREADS];
161
162int cur_max = 0;
163
164#define TRACE_DATA_NEWTHREAD 0x07000004
165#define TRACE_STRING_NEWTHREAD 0x07010004
166#define TRACE_STRING_EXEC 0x07010008
167
168#define INTERRUPT 0x01050000
169#define DECR_TRAP 0x01090000
170#define DECR_SET 0x01090004
171#define MACH_vmfault 0x01300000
172#define MACH_sched 0x01400000
173#define MACH_stkhandoff 0x01400008
174#define VFS_LOOKUP 0x03010090
175#define BSC_exit 0x040C0004
176#define IES_action 0x050b0018
177#define IES_filter 0x050b001c
178#define TES_action 0x050c0010
179#define CQ_action 0x050d0018
180
181
182#define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
183#define DBG_FUNC_MASK 0xfffffffc
184
185#define DBG_ZERO_FILL_FAULT 1
186#define DBG_PAGEIN_FAULT 2
187#define DBG_COW_FAULT 3
188#define DBG_CACHE_HIT_FAULT 4
189
190char *fault_name[5] = {
191 "",
192 "ZeroFill",
193 "PageIn",
194 "COW",
195 "CacheHit",
196};
197
198char *pc_to_string();
199static kern_return_t set_time_constraint_policy(void);
200static kern_return_t set_standard_policy(void);
201
202int decrementer_val = 0; /* Value used to reset decrementer */
203int set_remove_flag = 1; /* By default, remove trace buffer */
204
205/* raw read of the timebase register */
206void clock_get_uptime( register AbsoluteTime *result)
207{
208#ifdef __ppc__
209
210 register UInt32 hic;
211 do {
212 asm volatile(" mftbu %0" : "=r" (result->hi));
213 asm volatile(" mftb %0" : "=r" (result->lo));
214 asm volatile(" mftbu %0" : "=r" (hic));
215 } while (hic != result->hi);
216
217#else
218 result->lo = 0;
219 result->hi = 0;
220#endif /* __ppc__ */
221
222}
223
224typedef unsigned long long abstime_scalar_t;
225
226#define AbsoluteTime_to_scalar(x) \
227 (*(abstime_scalar_t *)(x))
228
229/* t1 += t2 */
230#define ADD_ABSOLUTETIME(t1, t2) \
231 (AbsoluteTime_to_scalar(t1) += \
232 AbsoluteTime_to_scalar(t2))
233
234/* t1 -= t2 */
235#define SUB_ABSOLUTETIME(t1, t2) \
236 (AbsoluteTime_to_scalar(t1) -= \
237 AbsoluteTime_to_scalar(t2))
238
239int
240quit(s)
241char *s;
242{
243 void set_enable();
244 void set_rtcdec();
245 void set_remove();
246
247 if (trace_enabled)
248 set_enable(0);
249
250 /*
251 This flag is turned off when calling
252 quit() due to a set_remove() failure.
253 */
254 if (set_remove_flag)
255 set_remove();
256
257 if (decrementer_val)
258 set_rtcdec(0);
259
260 printf("latency: ");
261 if (s)
262 printf("%s", s);
263
264 exit(1);
265}
266
267void
268set_enable(int val)
269{
270 mib[0] = CTL_KERN;
271 mib[1] = KERN_KDEBUG;
272 mib[2] = KERN_KDENABLE; /* protocol */
273 mib[3] = val;
274 mib[4] = 0;
275 mib[5] = 0; /* no flags */
276
277 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
278 quit("trace facility failure, KERN_KDENABLE\n");
279}
280
281void
282set_numbufs(int nbufs)
283{
284 mib[0] = CTL_KERN;
285 mib[1] = KERN_KDEBUG;
286 mib[2] = KERN_KDSETBUF;
287 mib[3] = nbufs;
288 mib[4] = 0;
289 mib[5] = 0; /* no flags */
290 if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0)
291 quit("trace facility failure, KERN_KDSETBUF\n");
292
293 mib[0] = CTL_KERN;
294 mib[1] = KERN_KDEBUG;
295 mib[2] = KERN_KDSETUP;
296 mib[3] = 0;
297 mib[4] = 0;
298 mib[5] = 0; /* no flags */
299 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
300 quit("trace facility failure, KERN_KDSETUP\n");
301
302}
303
304void
305set_pidexclude(int pid, int on_off)
306{
307 kd_regtype kr;
308
309 kr.type = KDBG_TYPENONE;
310 kr.value1 = pid;
311 kr.value2 = on_off;
312 needed = sizeof(kd_regtype);
313 mib[0] = CTL_KERN;
314 mib[1] = KERN_KDEBUG;
315 mib[2] = KERN_KDPIDEX;
316 mib[3] = 0;
317 mib[4] = 0;
318 mib[5] = 0;
319
320 sysctl(mib, 3, &kr, &needed, NULL, 0);
321}
322
323set_rtcdec(decval)
324int decval;
325{kd_regtype kr;
326 int ret;
327 extern int errno;
328
329 kr.type = KDBG_TYPENONE;
330 kr.value1 = decval;
331 needed = sizeof(kd_regtype);
332 mib[0] = CTL_KERN;
333 mib[1] = KERN_KDEBUG;
334 mib[2] = KERN_KDSETRTCDEC; /* protocol */
335 mib[3] = 0; /* wildcard address family */
336 mib[4] = 0;
337 mib[5] = 0; /* no flags */
338
339 errno = 0;
340
341 if ((ret=sysctl(mib, 3, &kr, &needed, NULL, 0)) < 0)
342 {
343 decrementer_val = 0;
344 quit("trace facility failure, KERN_KDSETRTCDEC\n");
345 }
346}
347
348
349void
350get_bufinfo(kbufinfo_t *val)
351{
352 needed = sizeof (*val);
353 mib[0] = CTL_KERN;
354 mib[1] = KERN_KDEBUG;
355 mib[2] = KERN_KDGETBUF;
356 mib[3] = 0;
357 mib[4] = 0;
358 mib[5] = 0; /* no flags */
359
360 if (sysctl(mib, 3, val, &needed, 0, 0) < 0)
361 quit("trace facility failure, KERN_KDGETBUF\n");
362
363}
364
365void
366set_remove()
367{
368 extern int errno;
369
370 mib[0] = CTL_KERN;
371 mib[1] = KERN_KDEBUG;
372 mib[2] = KERN_KDREMOVE; /* protocol */
373 mib[3] = 0;
374 mib[4] = 0;
375 mib[5] = 0; /* no flags */
376
377 errno = 0;
378
379 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
380 {
381 set_remove_flag = 0;
382 if(errno == EBUSY)
383 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
384 else
385 quit("trace facility failure, KERN_KDREMOVE\n");
386 }
387}
388
389void
390set_init_nologging()
391{
392 /* When we aren't logging, only collect the DECR_TRAP trace points */
393 kd_regtype kr;
394 kr.type = KDBG_VALCHECK;
395 kr.value1 = DECR_TRAP;
396 kr.value2 = 0;
397 kr.value3 = 0;
398 kr.value4 = 0;
399 needed = sizeof(kd_regtype);
400 mib[0] = CTL_KERN;
401 mib[1] = KERN_KDEBUG;
402 mib[2] = KERN_KDSETREG;
403 mib[3] = 0;
404 mib[4] = 0;
405 mib[5] = 0; /* no flags */
406 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
407 quit("trace facility failure, KERN_KDSETREG\n");
408
409 mib[0] = CTL_KERN;
410 mib[1] = KERN_KDEBUG;
411 mib[2] = KERN_KDSETUP;
412 mib[3] = 0;
413 mib[4] = 0;
414 mib[5] = 0; /* no flags */
415
416 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
417 quit("trace facility failure, KERN_KDSETUP\n");
418}
419
420void
421set_init_logging()
422{ kd_regtype kr;
423
424 kr.type = KDBG_RANGETYPE;
425 kr.value1 = 0;
426 kr.value2 = -1;
427 needed = sizeof(kd_regtype);
428 mib[0] = CTL_KERN;
429 mib[1] = KERN_KDEBUG;
430 mib[2] = KERN_KDSETREG;
431 mib[3] = 0;
432 mib[4] = 0;
433 mib[5] = 0; /* no flags */
434
435 if (sysctl(mib, 3, &kr, &needed, NULL, 0) < 0)
436 quit("trace facility failure, KERN_KDSETREG\n");
437
438 mib[0] = CTL_KERN;
439 mib[1] = KERN_KDEBUG;
440 mib[2] = KERN_KDSETUP;
441 mib[3] = 0;
442 mib[4] = 0;
443 mib[5] = 0; /* no flags */
444
445 if (sysctl(mib, 3, NULL, &needed, NULL, 0) < 0)
446 quit("trace facility failure, KERN_KDSETUP\n");
447}
448
449
450void sigwinch()
451{
452 gotSIGWINCH = 1;
453}
454
455void sigintr()
456{
457 void screen_update();
458
459 set_enable(0);
460 set_pidexclude(getpid(), 0);
461 screen_update(log_fp);
462 set_rtcdec(0);
463 set_remove();
464
465 exit(1);
466}
467
468void sigquit()
469{
470 set_enable(0);
471 set_pidexclude(getpid(), 0);
472 set_rtcdec(0);
473 set_remove();
474
475 exit(1);
476}
477
478void sigterm()
479{
480 set_enable(0);
481 set_pidexclude(getpid(), 0);
482 set_rtcdec(0);
483 set_remove();
484
485 exit(1);
486}
487
488
489void
490screen_update(FILE *fp)
491{
492 int i;
493 int itotal, stotal;
494 int elapsed_secs;
495 int elapsed_mins;
496 int elapsed_hours;
497 unsigned int average_s_latency;
498 unsigned int average_i_latency;
499 char tbuf[256];
500
501 if (fp == (FILE *)0) {
502 erase();
503 move(0, 0);
504 } else
505 fprintf(fp,"\n\n===================================================================================================\n");
506 /*
507 * Display the current time.
508 * "ctime" always returns a string that looks like this:
509 *
510 * Sun Sep 16 01:03:52 1973
511 * 012345678901234567890123
512 * 1 2
513 *
514 * We want indices 11 thru 18 (length 8).
515 */
516 elapsed_secs = curr_time - start_time;
517 elapsed_hours = elapsed_secs / 3600;
518 elapsed_secs -= elapsed_hours * 3600;
519 elapsed_mins = elapsed_secs / 60;
520 elapsed_secs -= elapsed_mins * 60;
521
522 sprintf(tbuf, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time)[0]),
523 elapsed_hours, elapsed_mins, elapsed_secs);
524 if (fp)
525 fprintf(fp, "%s", tbuf);
526 else
527 printw(tbuf);
528
529
530
531 sprintf(tbuf, " SCHEDULER INTERRUPTS\n");
532
533 if (fp)
534 fprintf(fp, "%s", tbuf);
535 else
536 printw(tbuf);
537
538 sprintf(tbuf, "---------------------------------------------\n");
539
540 if (fp)
541 fprintf(fp, "%s", tbuf);
542 else
543 printw(tbuf);
544
545
546
547 sprintf(tbuf, "total_samples %10d %10d\n\n", s_total_samples, i_total_samples);
548
549 if (fp)
550 fprintf(fp, "%s", tbuf);
551 else
552 printw(tbuf);
553
554
555
556 for (itotal = 0, stotal = 0, i = 0; i < 10; i++) {
557 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 10, s_usec_10_bins[i], i_usec_10_bins[i]);
558
559 if (fp)
560 fprintf(fp, "%s", tbuf);
561 else
562 printw(tbuf);
563
564 stotal += s_usec_10_bins[i];
565 itotal += i_usec_10_bins[i];
566 }
567 sprintf(tbuf, "total < 100 usecs %10d %10d\n\n", stotal, itotal);
568
569 if (fp)
570 fprintf(fp, "%s", tbuf);
571 else
572 printw(tbuf);
573
574
575
576 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
577 if (i < 9)
578 sprintf(tbuf, "delays < %3d usecs %10d %10d\n", (i + 1) * 100, s_usec_100_bins[i], i_usec_100_bins[i]);
579 else
580 sprintf(tbuf, "delays < 1 msec %10d %10d\n", s_usec_100_bins[i], i_usec_100_bins[i]);
581
582 if (fp)
583 fprintf(fp, "%s", tbuf);
584 else
585 printw(tbuf);
586
587 stotal += s_usec_100_bins[i];
588 itotal += i_usec_100_bins[i];
589 }
590 sprintf(tbuf, "total < 1 msec %10d %10d\n\n", stotal, itotal);
591
592 if (fp)
593 fprintf(fp, "%s", tbuf);
594 else
595 printw(tbuf);
596
597
598
599 for (itotal = 0, stotal = 0, i = 1; i < 10; i++) {
600 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1), s_msec_1_bins[i], i_msec_1_bins[i]);
601
602 if (fp)
603 fprintf(fp, "%s", tbuf);
604 else
605 printw(tbuf);
606
607 stotal += s_msec_1_bins[i];
608 itotal += i_msec_1_bins[i];
609 }
610 sprintf(tbuf, "total < 10 msecs %10d %10d\n\n", stotal, itotal);
611
612 if (fp)
613 fprintf(fp, "%s", tbuf);
614 else
615 printw(tbuf);
616
617
618
619
620 for (itotal = 0, stotal = 0, i = 1; i < 5; i++) {
621 sprintf(tbuf, "delays < %3d msecs %10d %10d\n", (i + 1)*10, s_msec_10_bins[i], i_msec_10_bins[i]);
622
623 if (fp)
624 fprintf(fp, "%s", tbuf);
625 else
626 printw(tbuf);
627
628 stotal += s_msec_10_bins[i];
629 itotal += i_msec_10_bins[i];
630 }
631 sprintf(tbuf, "total < 50 msecs %10d %10d\n\n", stotal, itotal);
632
633 if (fp)
634 fprintf(fp, "%s", tbuf);
635 else
636 printw(tbuf);
637
638 sprintf(tbuf, "delays > 50 msecs %10d %10d\n", s_too_slow, i_too_slow);
639
640 if (fp)
641 fprintf(fp, "%s", tbuf);
642 else
643 printw(tbuf);
644
645
646
647 sprintf(tbuf, "\nminimum latency(usecs) %7d %7d\n", s_min_latency, i_min_latency);
648
649 if (fp)
650 fprintf(fp, "%s", tbuf);
651 else
652 printw(tbuf);
653
654 sprintf(tbuf, "maximum latency(usecs) %7d %7d\n", s_max_latency, i_max_latency);
655
656 if (fp)
657 fprintf(fp, "%s", tbuf);
658 else
659 printw(tbuf);
660
661 if (s_total_samples)
662 average_s_latency = (unsigned int)(s_total_latency/s_total_samples);
663 else
664 average_s_latency = 0;
665
666 if (i_total_samples)
667 average_i_latency = (unsigned int)(i_total_latency/i_total_samples);
668 else
669 average_i_latency = 0;
670
671 sprintf(tbuf, "average latency(usecs) %7d %7d\n", average_s_latency, average_i_latency);
672
673 if (fp)
674 fprintf(fp, "%s", tbuf);
675 else
676 printw(tbuf);
677
678 sprintf(tbuf, "exceeded threshold %7d %7d\n", s_exceeded_threshold, i_exceeded_threshold);
679
680 if (fp)
681 fprintf(fp, "%s", tbuf);
682 else
683 printw(tbuf);
684
685 if (fp == (FILE *)0)
686 refresh();
687 else
688 fflush(fp);
689}
690
691int
692exit_usage()
693{
694
695 fprintf(stderr, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
696 fprintf(stderr, " [-it threshold] [-s sleep_in_usecs]\n");
697 fprintf(stderr, " [-d decrementer_in_usecs] [-n kernel]\n\n");
698
699 fprintf(stderr, " -rt Set realtime scheduling policy. Default is timeshare.\n");
700 fprintf(stderr, " -c specify name of codes file\n");
701 fprintf(stderr, " -l specify name of file to log trace entries to when threshold is exceeded\n");
702 fprintf(stderr, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
703 fprintf(stderr, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
704 fprintf(stderr, " -s set sleep time in microseconds\n");
705 fprintf(stderr, " -d set decrementer in microseconds.\n");
706 fprintf(stderr, " -n specify kernel, default is /mach_kernel\n");
707
708 fprintf(stderr, "\nlatency must be run as root\n\n");
709
710 exit(1);
711}
712
713
714
715main(argc, argv)
716int argc;
717char *argv[];
718{
719 mach_timespec_t remain;
720 unsigned long long start, stop;
721 AbsoluteTime timestamp1;
722 AbsoluteTime timestamp2;
723 AbsoluteTime adeadline, adelay;
724 double fdelay;
725 int elapsed_usecs;
726 double nanosecs_to_sleep;
727 int loop_cnt, sample_sc_now;
728 int decrementer_usec = 0;
729 void getdivisor();
730 void sample_sc();
731 void init_code_file();
732 void do_kernel_nm();
733 void open_logfile();
734
735 my_policy = THREAD_STANDARD_POLICY;
736 policy_name = "TIMESHARE";
737
738 while (argc > 1) {
739 if (strcmp(argv[1], "-rt") == 0) {
740 my_policy = THREAD_TIME_CONSTRAINT_POLICY; /* the real time band */
741 policy_name = "REALTIME";
742
743 } else if (strcmp(argv[1], "-st") == 0) {
744 argc--;
745 argv++;
746
747 if (argc > 1)
748 s_thresh_hold = atoi(argv[1]);
749 else
750 exit_usage();
751
752 } else if (strcmp(argv[1], "-it") == 0) {
753 argc--;
754 argv++;
755
756 if (argc > 1)
757 i_thresh_hold = atoi(argv[1]);
758 else
759 exit_usage();
760 } else if (strcmp(argv[1], "-c") == 0) {
761 argc--;
762 argv++;
763
764 if (argc > 1)
765 code_file = argv[1];
766 else
767 exit_usage();
768 } else if (strcmp(argv[1], "-l") == 0) {
769 argc--;
770 argv++;
771
772 if (argc > 1)
773 open_logfile(argv[1]);
774 else
775 exit_usage();
776
777 } else if (strcmp(argv[1], "-s") == 0) {
778 argc--;
779 argv++;
780
781 if (argc > 1)
782 num_of_usecs_to_sleep = atoi(argv[1]);
783 else
784 exit_usage();
785 } else if (strcmp(argv[1], "-d") == 0) {
786 argc--;
787 argv++;
788
789 if (argc > 1)
790 decrementer_usec = atoi(argv[1]);
791 else
792 exit_usage();
793 } else if (strcmp(argv[1], "-n") == 0) {
794 argc--;
795 argv++;
796
797 if (argc > 1)
798 kernelpath = argv[1];
799 else
800 exit_usage();
801 } else
802 exit_usage();
803
804 argc--;
805 argv++;
806 }
807
808 if ( geteuid() != 0 ) {
809 printf("'latency' must be run as root...\n");
810 exit(1);
811 }
812
813 if (kernelpath == (char *) 0)
814 kernelpath = "/mach_kernel";
815
816 if (code_file == (char *) 0)
817 code_file = "/usr/share/misc/trace.codes";
818
819 do_kernel_nm();
820
821 sample_sc_now = 25000 / num_of_usecs_to_sleep;
822
823 getdivisor();
824 decrementer_val = decrementer_usec * divisor;
825
826 nanosecs_to_sleep = (double)(num_of_usecs_to_sleep * 1000);
827 fdelay = nanosecs_to_sleep * (divisor /1000);
828 AbsoluteTime_to_scalar(&adelay) = (abstime_scalar_t)fdelay;
829
830 init_code_file();
831
832 /*
833 When the decrementer isn't set in the options,
834 decval will be zero and this call will reset
835 the system default ...
836 */
837 set_rtcdec(decrementer_val);
838
839 initscr();
840 clear();
841 refresh();
842 signal(SIGWINCH, sigwinch);
843 signal(SIGINT, sigintr);
844 signal(SIGQUIT, sigquit);
845 signal(SIGTERM, sigterm);
846
847
848 if ((my_buffer = malloc(SAMPLE_SIZE * sizeof(kd_buf))) == (char *)0)
849 quit("can't allocate memory for tracing info\n");
850 set_remove();
851 set_numbufs(SAMPLE_SIZE);
852 set_enable(0);
853 if(log_fp)
854 set_init_logging();
855 else
856 set_init_nologging();
857 set_pidexclude(getpid(), 1);
858 set_enable(1);
859 trace_enabled = 1;
860 need_new_map = 1;
861
862 loop_cnt = 0;
863 start_time = time((long *)0);
864 refresh_time = start_time;
865
866 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
867 {
868 /* the realtime band */
869 if(set_time_constraint_policy() != KERN_SUCCESS)
870 quit("Failed to set realtime policy.\n");
871 }
872
873 for (;;) {
874 curr_time = time((long *)0);
875
876 if (curr_time >= refresh_time) {
877 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
878 {
879 /* set standard timeshare policy during screen update */
880 if(set_standard_policy() != KERN_SUCCESS)
881 quit("Failed to set standard policy.\n");
882 }
883 screen_update((FILE *)0);
884 if (my_policy == THREAD_TIME_CONSTRAINT_POLICY)
885 {
886 /* set back to realtime band */
887 if(set_time_constraint_policy() != KERN_SUCCESS)
888 quit("Failed to set time_constraint policy.\n");
889 }
890 refresh_time = curr_time + 1;
891 }
892
893 clock_get_uptime(&timestamp1);
894 adeadline = timestamp1;
895 ADD_ABSOLUTETIME(&adeadline, &adelay);
896 mk_wait_until(adeadline);
897 clock_get_uptime(&timestamp2);
898
899 start = (((unsigned long long)timestamp1.hi) << 32) |
900 (unsigned long long)((unsigned int)(timestamp1.lo));
901
902 stop = (((unsigned long long)timestamp2.hi) << 32) |
903 (unsigned long long)((unsigned int)(timestamp2.lo));
904
905 elapsed_usecs = (int)(((double)(stop - start)) / divisor);
906
907 if ((elapsed_usecs -= num_of_usecs_to_sleep) <= 0)
908 continue;
909
910 if (elapsed_usecs < 100)
911 s_usec_10_bins[elapsed_usecs/10]++;
912 if (elapsed_usecs < 1000)
913 s_usec_100_bins[elapsed_usecs/100]++;
914 else if (elapsed_usecs < 10000)
915 s_msec_1_bins[elapsed_usecs/1000]++;
916 else if (elapsed_usecs < 50000)
917 s_msec_10_bins[elapsed_usecs/10000]++;
918 else
919 s_too_slow++;
920
921 if (elapsed_usecs > s_max_latency)
922 s_max_latency = elapsed_usecs;
923 if (elapsed_usecs < s_min_latency || s_total_samples == 0)
924 s_min_latency = elapsed_usecs;
925 s_total_latency += elapsed_usecs;
926 s_total_samples++;
927
928 if (s_thresh_hold && elapsed_usecs > s_thresh_hold)
929 s_exceeded_threshold++;
930 loop_cnt++;
931
932 if (log_fp && s_thresh_hold && elapsed_usecs > s_thresh_hold)
933 sample_sc(start, stop);
934 else {
935 if (loop_cnt >= sample_sc_now) {
936 sample_sc((long long)0, (long long)0);
937 loop_cnt = 0;
938 }
939 }
940 if (gotSIGWINCH) {
941 initscr();
942 clear();
943 refresh();
944
945 gotSIGWINCH = 0;
946 }
947 }
948 }
949
950
951
952void getdivisor()
953{
954
955 unsigned int delta;
956 unsigned int abs_to_ns_num;
957 unsigned int abs_to_ns_denom;
958 unsigned int proc_to_abs_num;
959 unsigned int proc_to_abs_denom;
960
961 (void)MKGetTimeBaseInfo (&delta, &abs_to_ns_num, &abs_to_ns_denom,
962 &proc_to_abs_num, &proc_to_abs_denom);
963
964 divisor = ((double)abs_to_ns_denom / (double)abs_to_ns_num) * 1000;
965}
966
967/* This is the realtime band */
968static kern_return_t
969set_time_constraint_policy()
970{
971 kern_return_t result;
972 thread_time_constraint_policy_data_t info;
973 mach_msg_type_number_t count;
974 boolean_t get_default;
975
976 get_default = TRUE;
977 count = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
978 result = thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
979 (thread_policy_t)&info, &count, &get_default);
980 if (result != KERN_SUCCESS)
981 return (result);
982
983 result = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
984 (thread_policy_t)&info, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
985
986 return (result);
987}
988
989/* This is the timeshare mode */
990static kern_return_t
991set_standard_policy()
992{
993 kern_return_t result;
994 thread_standard_policy_data_t info;
995 mach_msg_type_number_t count;
996 boolean_t get_default;
997
998 get_default = TRUE;
999 count = THREAD_STANDARD_POLICY_COUNT;
1000 result = thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY,
1001 (thread_policy_t)&info, &count, &get_default);
1002 if (result != KERN_SUCCESS)
1003 return (result);
1004
1005 result = thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY,
1006 (thread_policy_t)&info, THREAD_STANDARD_POLICY_COUNT);
1007
1008 return (result);
1009}
1010
1011
1012void read_command_map()
1013{
1014 size_t size;
1015 int mib[6];
1016
1017 if (mapptr) {
1018 free(mapptr);
1019 mapptr = 0;
1020 }
1021 total_threads = bufinfo.nkdthreads;
1022 size = bufinfo.nkdthreads * sizeof(kd_threadmap);
1023 if (size)
1024 {
1025 if (mapptr = (kd_threadmap *) malloc(size))
1026 bzero (mapptr, size);
1027 else
1028 {
1029 printf("Thread map is not initialized -- this is not fatal\n");
1030 return;
1031 }
1032 }
1033
1034 /* Now read the threadmap */
1035 mib[0] = CTL_KERN;
1036 mib[1] = KERN_KDEBUG;
1037 mib[2] = KERN_KDTHRMAP;
1038 mib[3] = 0;
1039 mib[4] = 0;
1040 mib[5] = 0; /* no flags */
1041 if (sysctl(mib, 3, mapptr, &size, NULL, 0) < 0)
1042 {
1043 /* This is not fatal -- just means I cant map command strings */
1044
1045 printf("Can't read the thread map -- this is not fatal\n");
1046 free(mapptr);
1047 mapptr = 0;
1048 return;
1049 }
1050 return;
1051}
1052
1053
1054void create_map_entry(int thread, char *command)
1055{
1056 int i, n;
1057 kd_threadmap *map;
1058
1059 if (!mapptr)
1060 return;
1061
1062 for (i = 0, map = 0; !map && i < total_threads; i++)
1063 {
1064 if (mapptr[i].thread == thread )
1065 map = &mapptr[i]; /* Reuse this entry, the thread has been reassigned */
1066 }
1067
1068 if (!map) /* look for invalid entries that I can reuse*/
1069 {
1070 for (i = 0, map = 0; !map && i < total_threads; i++)
1071 {
1072 if (mapptr[i].valid == 0 )
1073 map = &mapptr[i]; /* Reuse this invalid entry */
1074 }
1075 }
1076
1077 if (!map)
1078 {
1079 /* If reach here, then this is a new thread and
1080 * there are no invalid entries to reuse
1081 * Double the size of the thread map table.
1082 */
1083
1084 n = total_threads * 2;
1085 mapptr = (kd_threadmap *) realloc(mapptr, n * sizeof(kd_threadmap));
1086 bzero(&mapptr[total_threads], total_threads*sizeof(kd_threadmap));
1087 map = &mapptr[total_threads];
1088 total_threads = n;
1089#if 0
1090 if (log_fp)
1091 fprintf(log_fp, "MAP: increasing thread map to %d entries\n", total_threads);
1092#endif
1093 }
1094#if 0
1095 if (log_fp)
1096 fprintf(log_fp, "MAP: adding thread %x with name %s\n", thread, command);
1097#endif
1098 map->valid = 1;
1099 map->thread = thread;
1100 (void)strncpy (map->command, command, sizeof(map->command));
1101 map->command[sizeof(map->command)-1] = '\0';
1102}
1103
1104
1105kd_threadmap *find_thread_map(int thread)
1106{
1107 int i;
1108 kd_threadmap *map;
1109
1110 if (!mapptr)
1111 return((kd_threadmap *)0);
1112
1113 for (i = 0; i < total_threads; i++)
1114 {
1115 map = &mapptr[i];
1116 if (map->valid && (map->thread == thread))
1117 {
1118 return(map);
1119 }
1120 }
1121 return ((kd_threadmap *)0);
1122}
1123
1124void
1125kill_thread_map(int thread)
1126{
1127 kd_threadmap *map;
1128
1129 if (map = find_thread_map(thread)) {
1130
1131#if 0
1132 if (log_fp)
1133 fprintf(log_fp, "MAP: deleting thread %x with name %s\n", thread, map->command);
1134#endif
1135 map->valid = 0;
1136 map->thread = 0;
1137 map->command[0] = '\0';
1138 }
1139}
1140
1141
1142struct th_info *find_thread(int thread, int type1, int type2) {
1143 struct th_info *ti;
1144
1145 for (ti = th_state; ti < &th_state[cur_max]; ti++) {
1146 if (ti->thread == thread) {
1147 if (type1 == 0)
1148 return(ti);
1149 if (type1 == ti->type)
1150 return(ti);
1151 if (type2 == ti->type)
1152 return(ti);
1153 }
1154 }
1155 return ((struct th_info *)0);
1156}
1157
1158
1159char *find_code(type)
1160{
1161 int i;
1162
1163 for (i = 0; i < num_of_codes; i++) {
1164 if (codes_tab[i].type == type)
1165 return(codes_tab[i].name);
1166 }
1167 return ((char *)0);
1168}
1169
1170
1171void sample_sc(long long start, long long stop)
1172{
1173 kd_buf *kd, *last_mach_sched, *last_decrementer_kd, *start_kd, *end_of_sample;
1174 unsigned long long now;
1175 int count;
1176 int first_entry = 1;
1177 char command[32];
1178 double timestamp, last_timestamp, delta, start_bias;
1179 void read_command_map();
1180
1181 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1182 {
1183 /* set standard timeshare policy when logging */
1184 if(set_standard_policy() != KERN_SUCCESS)
1185 quit("Failed to set standard policy.\n");
1186 }
1187
1188 /* Get kernel buffer information */
1189 get_bufinfo(&bufinfo);
1190
1191 if (need_new_map) {
1192 read_command_map();
1193 need_new_map = 0;
1194 }
1195 needed = bufinfo.nkdbufs * sizeof(kd_buf);
1196 mib[0] = CTL_KERN;
1197 mib[1] = KERN_KDEBUG;
1198 mib[2] = KERN_KDREADTR;
1199 mib[3] = 0;
1200 mib[4] = 0;
1201 mib[5] = 0; /* no flags */
1202
1203 if (sysctl(mib, 3, my_buffer, &needed, NULL, 0) < 0)
1204 quit("trace facility failure, KERN_KDREADTR\n");
1205
1206 count = needed;
1207
1208 if (bufinfo.flags & KDBG_WRAPPED) {
1209 int i;
1210
1211 for (i = 0; i < cur_max; i++) {
1212 th_state[i].thread = 0;
1213 th_state[i].type = -1;
1214 th_state[i].vfslookup = 0;
1215 th_state[i].pathname[0] = 0;
1216 }
1217 cur_max = 0;
1218 need_new_map = 1;
1219
1220 set_enable(0);
1221 set_enable(1);
1222
1223 if (log_fp) {
1224 double latency;
1225
1226 latency = (double)(stop - start) / divisor;
1227 latency -= (double)num_of_usecs_to_sleep;
1228
1229 fprintf(log_fp, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1230 &(ctime(&curr_time)[0]), latency, count);
1231 }
1232 }
1233 end_of_sample = &((kd_buf *)my_buffer)[count];
1234 last_decrementer_kd = (kd_buf *)my_buffer;
1235 last_mach_sched = (kd_buf *)0;
1236
1237 for (kd = (kd_buf *)my_buffer; kd < end_of_sample; kd++) {
1238 int debugid, thread, cpunum;
1239 int type, clen, mode;
1240 char *p;
1241 long *sargptr;
1242 double i_latency;
1243 struct th_info *ti;
1244 char command1[32];
1245 char sched_info[64];
1246 kd_threadmap *map;
1247 kd_threadmap *find_thread_map();
1248 double handle_decrementer();
1249 kd_buf *log_decrementer();
1250 int check_for_thread_update();
1251 void enter_syscall();
1252 void exit_syscall();
1253 void print_entry();
1254
1255 thread = kd->arg5 & KDBG_THREAD_MASK;
1256 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1257 debugid = kd->debugid;
1258 type = kd->debugid & DBG_FUNC_MASK;
1259
1260 if (check_for_thread_update(thread, type, kd))
1261 continue;
1262
1263 if (type == DECR_TRAP)
1264 i_latency = handle_decrementer(kd);
1265
1266 now = (((unsigned long long)kd->timestamp.tv_sec) << 32) |
1267 (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec));
1268
1269 timestamp = ((double)now) / divisor;
1270
1271 if (now < start || now > stop) {
1272 if (debugid & DBG_FUNC_START)
1273 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1274 else if (debugid & DBG_FUNC_END)
1275 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 0);
1276 else if (type == DECR_TRAP) {
1277 if (log_fp && i_thresh_hold && (int)i_latency > i_thresh_hold) {
1278 start_kd = last_decrementer_kd;
1279 kd = log_decrementer(start_kd, kd, end_of_sample, i_latency);
1280
1281 if (kd >= end_of_sample)
1282 break;
1283 }
1284 last_decrementer_kd = kd;
1285 }
1286 continue;
1287 }
1288 if (first_entry) {
1289 double latency;
1290 char buf1[128];
1291 char buf2[128];
1292
1293 latency = (double)(stop - start) / divisor;
1294 latency -= (double)num_of_usecs_to_sleep;
1295
1296 if (my_pri == -1)
1297 sprintf(buf2, "default");
1298 else
1299 sprintf(buf2, "%d", my_pri);
1300 sprintf(buf1, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1301 &(ctime(&curr_time)[0]), latency, num_of_usecs_to_sleep, policy_name, buf2);
1302 clen = strlen(buf1);
1303 memset(buf2, '-', clen);
1304 buf2[clen] = 0;
1305
1306 if (log_fp) {
1307 fprintf(log_fp, "\n\n%s\n", buf2);
1308 fprintf(log_fp, "%s\n\n", buf1);
1309 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1310 }
1311 start_bias = ((double)start) / divisor;
1312 last_timestamp = timestamp;
1313 first_entry = 0;
1314 }
1315 delta = timestamp - last_timestamp;
1316
1317 if (map = find_thread_map(thread))
1318 strcpy(command, map->command);
1319 else
1320 command[0] = 0;
1321
1322 switch (type) {
1323
1324 case CQ_action:
1325 if (log_fp) {
1326 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1327 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1328 }
1329 last_timestamp = timestamp;
1330 break;
1331
1332 case TES_action:
1333 if (log_fp) {
1334 fprintf(log_fp, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1335 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1336 }
1337
1338 last_timestamp = timestamp;
1339 break;
1340
1341 case IES_action:
1342 if (log_fp) {
1343 fprintf(log_fp, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1344 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1345 }
1346
1347 last_timestamp = timestamp;
1348 break;
1349
1350 case IES_filter:
1351 if (log_fp) {
1352 fprintf(log_fp, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1353 timestamp - start_bias, delta, pc_to_string(kd->arg1, 58, 1) , thread, cpunum, command);
1354 }
1355
1356 last_timestamp = timestamp;
1357 break;
1358
1359 case DECR_TRAP:
1360 last_decrementer_kd = kd;
1361
1362 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1363 p = "*";
1364 else
1365 p = " ";
1366
1367 mode = 1;
1368
1369 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1370 if (ti->type == -1 && strcmp(command, "kernel_task"))
1371 mode = 0;
1372 }
1373
1374 if (log_fp) {
1375 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1376 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1377 }
1378
1379 last_timestamp = timestamp;
1380 break;
1381
1382 case DECR_SET:
1383 if (log_fp) {
1384 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1385 timestamp - start_bias, delta, (double)kd->arg1/divisor, "DECR_SET", thread, cpunum, command);
1386 }
1387
1388 last_timestamp = timestamp;
1389 break;
1390
1391 case MACH_sched:
1392 case MACH_stkhandoff:
1393 last_mach_sched = kd;
1394
1395 if (map = find_thread_map(kd->arg2))
1396 strcpy(command1, map->command);
1397 else
1398 sprintf(command1, "%-8x", kd->arg2);
1399
1400 if (ti = find_thread(kd->arg2, 0, 0)) {
1401 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1402 p = "U";
1403 else
1404 p = "K";
1405 } else
1406 p = "*";
1407 memset(sched_info, ' ', sizeof(sched_info));
1408
1409 sprintf(sched_info, "%14.14s", command);
1410 clen = strlen(sched_info);
1411 sched_info[clen] = ' ';
1412
1413 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1414 clen = strlen(sched_info);
1415 sched_info[clen] = ' ';
1416
1417 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1418
1419 if (log_fp) {
1420 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1421 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1422 }
1423
1424 last_timestamp = timestamp;
1425 break;
1426
1427 case VFS_LOOKUP:
1428 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1429 if (cur_max >= MAX_THREADS)
1430 continue;
1431 ti = &th_state[cur_max++];
1432
1433 ti->thread = thread;
1434 ti->type = -1;
1435 ti->vfslookup = 0;
1436 ti->child_thread = 0;
1437 }
1438 if (ti->vfslookup == 0) {
1439 ti->vfslookup = 1;
1440 ti->arg1 = kd->arg1;
1441 memset(&ti->pathname[0], 0, 32);
1442 sargptr = (long *)&ti->pathname[0];
1443
1444 *sargptr++ = kd->arg2;
1445 *sargptr++ = kd->arg3;
1446 *sargptr++ = kd->arg4;
1447
1448 } else if (ti->vfslookup == 1) {
1449 ti->vfslookup = 0;
1450
1451 sargptr = (long *)&ti->pathname[12];
1452 *sargptr++ = kd->arg1;
1453 *sargptr++ = kd->arg2;
1454 *sargptr++ = kd->arg3;
1455 *sargptr++ = kd->arg4;
1456
1457 if (log_fp) {
1458 fprintf(log_fp, "%9.1f %8.1f\t\t%-28.28s %-28s %-8x %-8x %d %s\n",
1459 timestamp - start_bias, delta, "VFS_LOOKUP",
1460 ti->pathname, ti->arg1, thread, cpunum, command);
1461 }
1462 }
1463 last_timestamp = timestamp;
1464 break;
1465
1466 default:
1467 if (debugid & DBG_FUNC_START)
1468 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1469 else if (debugid & DBG_FUNC_END)
1470 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1471 else
1472 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1473
1474 last_timestamp = timestamp;
1475 break;
1476 }
1477 }
1478 if (last_mach_sched && log_fp)
1479 fprintf(log_fp, "\nblocked by %s @ priority %d\n", command, last_mach_sched->arg3);
1480#if 0
1481 if (first_entry == 0 && log_fp)
1482 fprintf(log_fp, "\n start = %qd stop = %qd count = %d now = %qd\n", start, stop, count, now);
1483#endif
1484 if (log_fp)
1485 fflush(log_fp);
1486
1487 if (log_fp && (my_policy == THREAD_TIME_CONSTRAINT_POLICY))
1488 {
1489 /* set back to realtime band */
1490 if(set_time_constraint_policy() != KERN_SUCCESS)
1491 quit("Failed to set time_constraint policy.\n");
1492 }
1493}
1494
1495void
1496enter_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1497{
1498 struct th_info *ti;
1499 int i;
1500 int cpunum;
1501 char *p;
1502
1503 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1504
1505 if (print_info && fp) {
1506 if (p = find_code(type)) {
1507 if (type == INTERRUPT) {
1508 int mode = 1;
1509
1510 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1511 if (ti->type == -1 && strcmp(command, "kernel_task"))
1512 mode = 0;
1513 }
1514
1515 fprintf(fp, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1516 timestamp - bias, delta, pc_to_string(kd->arg2, 59, mode), thread, cpunum, command);
1517 } else if (type == MACH_vmfault) {
1518 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1519 timestamp - bias, delta, p, thread, cpunum, command);
1520 } else {
1521 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1522 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1523 thread, cpunum, command);
1524 }
1525 } else {
1526 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1527 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1528 thread, cpunum, command);
1529 }
1530 }
1531 if ((ti = find_thread(thread, -1, type)) == (struct th_info *)0) {
1532 if (cur_max >= MAX_THREADS) {
1533 static int do_this_once = 1;
1534
1535 if (do_this_once) {
1536 for (i = 0; i < cur_max; i++) {
1537 if (!fp)
1538 break;
1539 fprintf(fp, "thread = %x, type = %x\n",
1540 th_state[i].thread, th_state[i].type);
1541 }
1542 do_this_once = 0;
1543 }
1544 return;
1545
1546 }
1547 ti = &th_state[cur_max++];
1548
1549 ti->thread = thread;
1550 ti->child_thread = 0;
1551 }
1552 if (type != BSC_exit)
1553 ti->type = type;
1554 else
1555 ti->type = -1;
1556 ti->stime = timestamp;
1557 ti->vfslookup = 0;
1558#if 0
1559 if (print_info && fp)
1560 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, ti->type, ti->thread);
1561#endif
1562}
1563
1564
1565void
1566exit_syscall(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias, int print_info)
1567{
1568 struct th_info *ti;
1569 int cpunum;
1570 char *p;
1571
1572 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1573 ti = find_thread(thread, type, type);
1574#if 0
1575 if (print_info && fp)
1576 fprintf(fp, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max, ti, type, thread);
1577#endif
1578 if (print_info && fp) {
1579 if (ti)
1580 fprintf(fp, "%9.1f %8.1f(%.1f) \t", timestamp - bias, delta, timestamp - ti->stime);
1581 else
1582 fprintf(fp, "%9.1f %8.1f() \t", timestamp - bias, delta);
1583
1584 if (p = find_code(type)) {
1585 if (type == INTERRUPT) {
1586 fprintf(fp, "INTERRUPT %-8x %d %s\n", thread, cpunum, command);
1587 } else if (type == MACH_vmfault && kd->arg2 <= DBG_CACHE_HIT_FAULT) {
1588 fprintf(fp, "%-28.28s %-8.8s %-8x %-8x %d %s\n",
1589 p, fault_name[kd->arg2], kd->arg1,
1590 thread, cpunum, command);
1591 } else {
1592 fprintf(fp, "%-28.28s %-8x %-8x %-8x %d %s\n",
1593 p, kd->arg1, kd->arg2,
1594 thread, cpunum, command);
1595 }
1596 } else {
1597 fprintf(fp, "%-8x %-8x %-8x %-8x %d %s\n",
1598 type, kd->arg1, kd->arg2,
1599 thread, cpunum, command);
1600 }
1601 }
1602 if (ti == (struct th_info *)0) {
1603 if ((ti = find_thread(thread, -1, -1)) == (struct th_info *)0) {
1604 if (cur_max >= MAX_THREADS)
1605 return;
1606 ti = &th_state[cur_max++];
1607
1608 ti->thread = thread;
1609 ti->child_thread = 0;
1610 ti->vfslookup = 0;
1611 }
1612 }
1613 ti->type = -1;
1614}
1615
1616void
1617print_entry(FILE *fp, kd_buf *kd, int thread, int type, char *command, double timestamp, double delta, double bias)
1618{
1619 char *p;
1620 int cpunum;
1621
1622 if (!fp)
1623 return;
1624
1625 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1626
1627#if 0
1628 fprintf(fp, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max, type, thread, cpunum);
1629#endif
1630 if (p = find_code(type)) {
1631 fprintf(fp, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1632 timestamp - bias, delta, p, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1633 thread, cpunum, command);
1634 } else {
1635 fprintf(fp, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1636 timestamp - bias, delta, type, kd->arg1, kd->arg2, kd->arg3, kd->arg4,
1637 thread, cpunum, command);
1638 }
1639}
1640
1641int
1642check_for_thread_update(int thread, int type, kd_buf *kd)
1643{
1644 struct th_info *ti;
1645 void create_map_entry();
1646
1647 switch (type) {
1648
1649 case TRACE_DATA_NEWTHREAD:
1650 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1651 if (cur_max >= MAX_THREADS)
1652 return (1);
1653 ti = &th_state[cur_max++];
1654
1655 ti->thread = thread;
1656 ti->type = -1;
1657 ti->vfslookup = 0;
1658 }
1659 ti->child_thread = kd->arg1;
1660 return (1);
1661
1662 case TRACE_STRING_NEWTHREAD:
1663 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0)
1664 return (1);
1665 if (ti->child_thread == 0)
1666 return (1);
1667 create_map_entry(ti->child_thread, (char *)&kd->arg1);
1668
1669 ti->child_thread = 0;
1670 return (1);
1671
1672 case TRACE_STRING_EXEC:
1673 create_map_entry(thread, (char *)&kd->arg1);
1674 return (1);
1675
1676 }
1677 return (0);
1678}
1679
1680
1681kd_buf *log_decrementer(kd_buf *kd_beg, kd_buf *kd_end, kd_buf *end_of_sample, double i_latency)
1682{
1683 kd_buf *kd, *kd_start, *kd_stop;
1684 double timestamp, last_timestamp, delta, start_bias;
1685 int thread, cpunum;
1686 int debugid, type, clen;
1687 unsigned long long now;
1688 struct th_info *ti;
1689 long *sargptr;
1690 char *p;
1691 char command[32];
1692 char command1[32];
1693 char sched_info[64];
1694 char buf1[128];
1695 char buf2[128];
1696 kd_threadmap *map;
1697 kd_threadmap *find_thread_map();
1698
1699 sprintf(buf1, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time)[0]), i_latency);
1700 clen = strlen(buf1);
1701 memset(buf2, '-', clen);
1702 buf2[clen] = 0;
1703 fprintf(log_fp, "\n\n%s\n", buf2);
1704 fprintf(log_fp, "%s\n\n", buf1);
1705
1706 fprintf(log_fp, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1707
1708 thread = kd_beg->arg5 & KDBG_THREAD_MASK;
1709
1710 for (kd_start = kd_beg - 1; (kd_start >= (kd_buf *)my_buffer) && (kd_start->arg5 & KDBG_THREAD_MASK) == thread; kd_start--) {
1711 if ((kd_start->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1712 break;
1713 }
1714 if (kd_start < (kd_buf *)my_buffer)
1715 kd_start = (kd_buf *)my_buffer;
1716
1717 for (kd_stop = kd_end + 1; kd_stop < end_of_sample && (kd_start->arg5 & KDBG_THREAD_MASK) == thread; kd_stop++) {
1718 if ((kd_stop->debugid & DBG_FUNC_MASK) == DECR_TRAP)
1719 break;
1720 }
1721 if (kd_stop >= end_of_sample)
1722 kd_stop = end_of_sample - 1;
1723
1724 now = (((unsigned long long)kd_start->timestamp.tv_sec) << 32) |
1725 (unsigned long long)((unsigned int)(kd_start->timestamp.tv_nsec));
1726 timestamp = ((double)now) / divisor;
1727
1728 for (kd = kd_start; kd <= kd_stop; kd++) {
1729 type = kd->debugid & DBG_FUNC_MASK;
1730
1731 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), type, type)) {
1732 if (ti->stime >= timestamp)
1733 ti->type = -1;
1734 }
1735 }
1736 for (kd = kd_start; kd <= kd_stop; kd++) {
1737 int mode;
1738
1739 thread = kd->arg5 & KDBG_THREAD_MASK;
1740 cpunum = (kd->arg5 & KDBG_CPU_MASK) ? 1: 0;
1741 debugid = kd->debugid;
1742 type = kd->debugid & DBG_FUNC_MASK;
1743
1744 now = (((unsigned long long)kd->timestamp.tv_sec) << 32) |
1745 (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec));
1746
1747 timestamp = ((double)now) / divisor;
1748
1749 if (kd == kd_start) {
1750 start_bias = timestamp;
1751 last_timestamp = timestamp;
1752 }
1753 delta = timestamp - last_timestamp;
1754
1755 if (map = find_thread_map(thread))
1756 strcpy(command, map->command);
1757 else
1758 command[0] = 0;
1759
1760
1761 switch (type) {
1762
1763 case CQ_action:
1764 fprintf(log_fp, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1765 timestamp - start_bias, delta, pc_to_string(kd->arg1, 59, 1) , thread, cpunum, command);
1766
1767 last_timestamp = timestamp;
1768 break;
1769
1770 case DECR_TRAP:
1771 if ((int)(kd->arg1) >= 0)
1772 i_latency = 0;
1773 else
1774 i_latency = (((double)(-1 - kd->arg1)) / divisor);
1775
1776 if (i_thresh_hold && (int)i_latency > i_thresh_hold)
1777 p = "*";
1778 else
1779 p = " ";
1780
1781 mode = 1;
1782
1783 if (ti = find_thread((kd->arg5 & KDBG_THREAD_MASK), 0, 0)) {
1784 if (ti->type == -1 && strcmp(command, "kernel_task"))
1785 mode = 0;
1786 }
1787 fprintf(log_fp, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1788 timestamp - start_bias, delta, i_latency, p, pc_to_string(kd->arg2, 59, mode) , thread, cpunum, command);
1789
1790 last_timestamp = timestamp;
1791 break;
1792
1793 case DECR_SET:
1794 fprintf(log_fp, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1795 timestamp - start_bias, delta, (double)kd->arg1/divisor,
1796 "DECR_SET", thread, cpunum, command);
1797
1798 last_timestamp = timestamp;
1799 break;
1800
1801 case MACH_sched:
1802 case MACH_stkhandoff:
1803 if (map = find_thread_map(kd->arg2))
1804 strcpy(command1, map->command);
1805 else
1806 sprintf(command1, "%-8x", kd->arg2);
1807
1808 if (ti = find_thread(kd->arg2, 0, 0)) {
1809 if (ti->type == -1 && strcmp(command1, "kernel_task"))
1810 p = "U";
1811 else
1812 p = "K";
1813 } else
1814 p = "*";
1815 memset(sched_info, ' ', sizeof(sched_info));
1816
1817 sprintf(sched_info, "%14.14s", command);
1818 clen = strlen(sched_info);
1819 sched_info[clen] = ' ';
1820
1821 sprintf(&sched_info[14], " @ pri %3d --> %14.14s", kd->arg3, command1);
1822 clen = strlen(sched_info);
1823 sched_info[clen] = ' ';
1824
1825 sprintf(&sched_info[45], " @ pri %3d%s", kd->arg4, p);
1826
1827 fprintf(log_fp, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1828 timestamp - start_bias, delta, "MACH_SCHED", sched_info, thread, cpunum);
1829
1830 last_timestamp = timestamp;
1831 break;
1832
1833 case VFS_LOOKUP:
1834 if ((ti = find_thread(thread, 0, 0)) == (struct th_info *)0) {
1835 if (cur_max >= MAX_THREADS)
1836 continue;
1837 ti = &th_state[cur_max++];
1838
1839 ti->thread = thread;
1840 ti->type = -1;
1841 ti->vfslookup = 0;
1842 ti->child_thread = 0;
1843 }
1844 if (ti->vfslookup == 0) {
1845 ti->vfslookup = 1;
1846 ti->arg1 = kd->arg1;
1847 memset(&ti->pathname[0], 0, 32);
1848 sargptr = (long *)&ti->pathname[0];
1849
1850 *sargptr++ = kd->arg2;
1851 *sargptr++ = kd->arg3;
1852 *sargptr++ = kd->arg4;
1853
1854 } else if (ti->vfslookup == 1) {
1855 ti->vfslookup = 0;
1856
1857 sargptr = (long *)&ti->pathname[12];
1858 *sargptr++ = kd->arg1;
1859 *sargptr++ = kd->arg2;
1860 *sargptr++ = kd->arg3;
1861 *sargptr++ = kd->arg4;
1862
1863 fprintf(log_fp, "%9.1f %8.1f\t\t%-28.28s %-28s %-8x %-8x %d %s\n",
1864 timestamp - start_bias, delta, "VFS_LOOKUP",
1865 ti->pathname, ti->arg1, thread, cpunum, command);
1866 }
1867 last_timestamp = timestamp;
1868 break;
1869
1870 default:
1871 if (debugid & DBG_FUNC_START)
1872 enter_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1873 else if (debugid & DBG_FUNC_END)
1874 exit_syscall(log_fp, kd, thread, type, command, timestamp, delta, start_bias, 1);
1875 else
1876 print_entry(log_fp, kd, thread, type, command, timestamp, delta, start_bias);
1877
1878 last_timestamp = timestamp;
1879 break;
1880 }
1881 }
1882 return(kd_stop);
1883}
1884
1885
1886double handle_decrementer(kd_buf *kd)
1887{
1888 double latency;
1889 int elapsed_usecs;
1890
1891 if ((int)(kd->arg1) >= 0)
1892 latency = 1;
1893 else
1894 latency = (((double)(-1 - kd->arg1)) / divisor);
1895 elapsed_usecs = (int)latency;
1896
1897 if (elapsed_usecs < 100)
1898 i_usec_10_bins[elapsed_usecs/10]++;
1899 if (elapsed_usecs < 1000)
1900 i_usec_100_bins[elapsed_usecs/100]++;
1901 else if (elapsed_usecs < 10000)
1902 i_msec_1_bins[elapsed_usecs/1000]++;
1903 else if (elapsed_usecs < 50000)
1904 i_msec_10_bins[elapsed_usecs/10000]++;
1905 else
1906 i_too_slow++;
1907
1908 if (i_thresh_hold && elapsed_usecs > i_thresh_hold)
1909 i_exceeded_threshold++;
1910 if (elapsed_usecs > i_max_latency)
1911 i_max_latency = elapsed_usecs;
1912 if (elapsed_usecs < i_min_latency || i_total_samples == 0)
1913 i_min_latency = elapsed_usecs;
1914 i_total_latency += elapsed_usecs;
1915 i_total_samples++;
1916
1917 return (latency);
1918}
1919
1920
1921void init_code_file()
1922{
1923 FILE *fp;
1924 int i, n, cnt, code;
1925 char name[128];
1926
1927 if ((fp = fopen(code_file, "r")) == (FILE *)0) {
1928 if (log_fp)
1929 fprintf(log_fp, "open of %s failed\n", code_file);
1930 return;
1931 }
1932 n = fscanf(fp, "%d\n", &cnt);
1933
1934 if (n != 1) {
1935 if (log_fp)
1936 fprintf(log_fp, "bad format found in %s\n", code_file);
1937 return;
1938 }
1939 for (i = 0; i < MAX_ENTRIES; i++) {
1940 n = fscanf(fp, "%x%s\n", &code, name);
1941
1942 if (n != 2)
1943 break;
1944
1945 strncpy(codes_tab[i].name, name, 32);
1946 codes_tab[i].type = code;
1947 }
1948 num_of_codes = i;
1949
1950 fclose(fp);
1951}
1952
1953
1954void
1955do_kernel_nm()
1956{
1957 int i, len;
1958 FILE *fp = (FILE *)0;
1959 char tmp_nm_file[128];
1960 char tmpstr[1024];
1961 int inchr;
1962
1963 bzero(tmp_nm_file, 128);
1964 bzero(tmpstr, 1024);
1965
1966 /* Build the temporary nm file path */
1967 sprintf(tmp_nm_file, "/tmp/knm.out.%d", getpid());
1968
1969 /* Build the nm command and create a tmp file with the output*/
1970 sprintf (tmpstr, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
1971 kernelpath, tmp_nm_file);
1972 system(tmpstr);
1973
1974 /* Parse the output from the nm command */
1975 if ((fp=fopen(tmp_nm_file, "r")) == (FILE *)0)
1976 {
1977 /* Hmmm, let's not treat this as fatal */
1978 fprintf(stderr, "Failed to open nm symbol file [%s]\n", tmp_nm_file);
1979 return;
1980 }
1981
1982 /* Count the number of symbols in the nm symbol table */
1983 kern_sym_count=0;
1984 while ( (inchr = getc(fp)) != -1)
1985 {
1986 if (inchr == '\n')
1987 kern_sym_count++;
1988 }
1989
1990 rewind(fp);
1991
1992 /* Malloc the space for symbol table */
1993 if (kern_sym_count > 0)
1994 {
1995 kern_sym_tbl = (kern_sym_t *)malloc(kern_sym_count * sizeof (kern_sym_t));
1996 if (!kern_sym_tbl)
1997 {
1998 /* Hmmm, lets not treat this as fatal */
1999 fprintf(stderr, "Can't allocate memory for kernel symbol table\n");
2000 }
2001 else
2002 bzero(kern_sym_tbl, (kern_sym_count * sizeof(kern_sym_t)));
2003 }
2004 else
2005 {
2006 /* Hmmm, lets not treat this as fatal */
2007 fprintf(stderr, "No kernel symbol table \n");
2008 }
2009
2010 for (i=0; i<kern_sym_count; i++)
2011 {
2012 bzero(tmpstr, 1024);
2013 if (fscanf(fp, "%x %c %s", &kern_sym_tbl[i].k_sym_addr, &inchr, tmpstr) != 3)
2014 break;
2015 else
2016 {
2017 len = strlen(tmpstr);
2018 kern_sym_tbl[i].k_sym_name = malloc(len + 1);
2019
2020 if (kern_sym_tbl[i].k_sym_name == (char *)0)
2021 {
2022 fprintf(stderr, "Can't allocate memory for symbol name [%s]\n", tmpstr);
2023 kern_sym_tbl[i].k_sym_name = (char *)0;
2024 len = 0;
2025 }
2026 else
2027 strcpy(kern_sym_tbl[i].k_sym_name, tmpstr);
2028
2029 kern_sym_tbl[i].k_sym_len = len;
2030 }
2031 } /* end for */
2032
2033 if (i != kern_sym_count)
2034 {
2035 /* Hmmm, didn't build up entire table from nm */
2036 /* scrap the entire thing */
2037 if (kern_sym_tbl)
2038 free (kern_sym_tbl);
2039 kern_sym_tbl = (kern_sym_t *)0;
2040 kern_sym_count = 0;
2041 }
2042
2043 fclose(fp);
2044
2045 /* Remove the temporary nm file */
2046 unlink(tmp_nm_file);
2047
2048#if 0
2049 /* Dump the kernel symbol table */
2050 for (i=0; i < kern_sym_count; i++)
2051 {
2052 if (kern_sym_tbl[i].k_sym_name)
2053 printf ("[%d] 0x%x %s\n", i,
2054 kern_sym_tbl[i].k_sym_addr, kern_sym_tbl[i].k_sym_name);
2055 else
2056 printf ("[%d] 0x%x %s\n", i,
2057 kern_sym_tbl[i].k_sym_addr, "No symbol name");
2058 }
2059#endif
2060}
2061
2062char *
2063pc_to_string(unsigned int pc, int max_len, int mode)
2064{
2065 int ret;
2066 int len;
2067
2068 int binary_search();
2069
2070 if (mode == 0)
2071 {
2072 sprintf(pcstring, "0x%-8x [usermode addr]", pc);
2073 return(pcstring);
2074 }
2075
2076 ret=0;
2077 ret = binary_search(kern_sym_tbl, 0, kern_sym_count-1, pc);
2078
2079 if (ret == -1)
2080 {
2081 sprintf(pcstring, "0x%x", pc);
2082 return(pcstring);
2083 }
2084 else if (kern_sym_tbl[ret].k_sym_name == (char *)0)
2085 {
2086 sprintf(pcstring, "0x%x", pc);
2087 return(pcstring);
2088 }
2089 else
2090 {
2091 if ((len = kern_sym_tbl[ret].k_sym_len) > (max_len - 8))
2092 len = max_len - 8;
2093
2094 memcpy(pcstring, kern_sym_tbl[ret].k_sym_name, len);
2095 sprintf(&pcstring[len], "+0x%-5x", pc - kern_sym_tbl[ret].k_sym_addr);
2096
2097 return (pcstring);
2098 }
2099}
2100
2101
2102/* Return -1 if not found, else return index */
2103int binary_search(list, low, high, addr)
2104kern_sym_t *list;
2105int low, high;
2106unsigned int addr;
2107{
2108 int mid;
2109
2110 mid = (low + high) / 2;
2111
2112 if (low > high)
2113 return (-1); /* failed */
2114 else if (low + 1 == high)
2115 {
2116 if (list[low].k_sym_addr <= addr &&
2117 addr < list[high].k_sym_addr)
2118 {
2119 /* We have a range match */
2120 return(low);
2121 }
2122 else if (list[high].k_sym_addr <= addr)
2123 {
2124 return(high);
2125 }
2126 else
2127 return(-1); /* Failed */
2128 }
2129 else if (addr < list[mid].k_sym_addr)
2130 {
2131 return(binary_search (list, low, mid, addr));
2132 }
2133 else
2134 {
2135 return(binary_search (list, mid, high, addr));
2136 }
2137}
2138
2139void
2140open_logfile(char *path)
2141{
2142 log_fp = fopen(path, "a");
2143
2144 if (!log_fp)
2145 {
2146 /* failed to open path */
2147 fprintf(stderr, "latency: failed to open logfile [%s]\n", path);
2148 exit_usage();
2149 }
2150}