2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
7 * Reserved. This file contains Original Code and/or Modifications of
8 * Original Code as defined in and that are subject to the Apple Public
9 * Source License Version 1.0 (the 'License'). You may not use this file
10 * except in compliance with the License. Please obtain a copy of the
11 * License at http://www.apple.com/publicsource and read it before using
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
19 * License for the specific language governing rights and limitations
22 * @APPLE_LICENSE_HEADER_END@
27 cc -I. -DKERNEL_PRIVATE -O -o latency latency.c -lncurses
30 #include <mach/mach.h>
39 #include <sys/types.h>
40 #include <sys/param.h>
46 #include <sys/ioctl.h>
48 #ifndef KERNEL_PRIVATE
49 #define KERNEL_PRIVATE
50 #include <sys/kdebug.h>
53 #include <sys/kdebug.h>
54 #endif /*KERNEL_PRIVATE*/
56 #include <sys/sysctl.h>
60 #include <mach/host_info.h>
61 #include <mach/mach_error.h>
62 #include <mach/mach_types.h>
63 #include <mach/message.h>
64 #include <mach/mach_syscalls.h>
65 #include <mach/clock_types.h>
66 #include <mach/mach_time.h>
68 #include <libkern/OSTypes.h>
70 extern mach_port_t clock_port
;
72 #define KERN_KDPIDEX 14
75 int s_usec_10_bins
[10];
76 int s_usec_100_bins
[10];
77 int s_msec_1_bins
[10];
78 int s_msec_10_bins
[5];
81 int s_min_latency
= 0;
82 long long s_total_latency
= 0;
85 int s_exceeded_threshold
= 0;
87 int i_usec_10_bins
[10];
88 int i_usec_100_bins
[10];
89 int i_msec_1_bins
[10];
90 int i_msec_10_bins
[5];
93 int i_min_latency
= 0;
94 long long i_total_latency
= 0;
97 int i_exceeded_threshold
= 0;
106 int num_of_usecs_to_sleep
= 1000;
108 char *kernelpath
= (char *)0;
109 char *code_file
= (char *)0;
112 u_long k_sym_addr
; /* kernel symbol address from nm */
113 u_int k_sym_len
; /* length of kernel symbol string */
114 char *k_sym_name
; /* kernel symbol string from nm */
117 kern_sym_t
*kern_sym_tbl
; /* pointer to the nm table */
118 int kern_sym_count
; /* number of entries in nm table */
121 #define UNKNOWN "Can't find symbol name"
126 int trace_enabled
= 0;
127 struct host_basic_info hi
;
130 #define SAMPLE_SIZE 300000
136 kbufinfo_t bufinfo
= {0, 0, 0};
138 FILE *log_fp
= (FILE *)0;
139 int num_of_codes
= 0;
140 int need_new_map
= 0;
141 int total_threads
= 0;
142 kd_threadmap
*mapptr
= 0;
144 #define MAX_ENTRIES 1024
148 } codes_tab
[MAX_ENTRIES
];
150 /* If NUMPARMS changes from the kernel, then PATHLENGTH will also reflect the change */
152 #define PATHLENGTH (NUMPARMS*sizeof(long))
161 char pathname
[PATHLENGTH
+ 1];
164 #define MAX_THREADS 512
165 struct th_info th_state
[MAX_THREADS
];
169 #define TRACE_DATA_NEWTHREAD 0x07000004
170 #define TRACE_STRING_NEWTHREAD 0x07010004
171 #define TRACE_STRING_EXEC 0x07010008
173 #define INTERRUPT 0x01050000
174 #define DECR_TRAP 0x01090000
175 #define DECR_SET 0x01090004
176 #define MACH_vmfault 0x01300000
177 #define MACH_sched 0x01400000
178 #define MACH_stkhandoff 0x01400008
179 #define VFS_LOOKUP 0x03010090
180 #define BSC_exit 0x040C0004
181 #define IES_action 0x050b0018
182 #define IES_filter 0x050b001c
183 #define TES_action 0x050c0010
184 #define CQ_action 0x050d0018
187 #define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
188 #define DBG_FUNC_MASK 0xfffffffc
190 #define DBG_ZERO_FILL_FAULT 1
191 #define DBG_PAGEIN_FAULT 2
192 #define DBG_COW_FAULT 3
193 #define DBG_CACHE_HIT_FAULT 4
195 char *fault_name
[5] = {
203 char *pc_to_string();
204 static kern_return_t
set_time_constraint_policy(void);
205 static kern_return_t
set_standard_policy(void);
207 int decrementer_val
= 0; /* Value used to reset decrementer */
208 int set_remove_flag
= 1; /* By default, remove trace buffer */
210 kd_buf
**last_decrementer_kd
; /* last DECR_TRAP per cpu */
211 #define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
225 This flag is turned off when calling
226 quit() due to a set_remove() failure.
245 mib
[1] = KERN_KDEBUG
;
246 mib
[2] = KERN_KDENABLE
; /* protocol */
249 mib
[5] = 0; /* no flags */
251 if (sysctl(mib
, 4, NULL
, &needed
, NULL
, 0) < 0)
252 quit("trace facility failure, KERN_KDENABLE\n");
256 set_numbufs(int nbufs
)
259 mib
[1] = KERN_KDEBUG
;
260 mib
[2] = KERN_KDSETBUF
;
263 mib
[5] = 0; /* no flags */
264 if (sysctl(mib
, 4, NULL
, &needed
, NULL
, 0) < 0)
265 quit("trace facility failure, KERN_KDSETBUF\n");
268 mib
[1] = KERN_KDEBUG
;
269 mib
[2] = KERN_KDSETUP
;
272 mib
[5] = 0; /* no flags */
273 if (sysctl(mib
, 3, NULL
, &needed
, NULL
, 0) < 0)
274 quit("trace facility failure, KERN_KDSETUP\n");
279 set_pidexclude(int pid
, int on_off
)
283 kr
.type
= KDBG_TYPENONE
;
286 needed
= sizeof(kd_regtype
);
288 mib
[1] = KERN_KDEBUG
;
289 mib
[2] = KERN_KDPIDEX
;
294 sysctl(mib
, 3, &kr
, &needed
, NULL
, 0);
303 kr
.type
= KDBG_TYPENONE
;
305 needed
= sizeof(kd_regtype
);
307 mib
[1] = KERN_KDEBUG
;
308 mib
[2] = KERN_KDSETRTCDEC
; /* protocol */
309 mib
[3] = 0; /* wildcard address family */
311 mib
[5] = 0; /* no flags */
315 if ((ret
=sysctl(mib
, 3, &kr
, &needed
, NULL
, 0)) < 0)
318 quit("trace facility failure, KERN_KDSETRTCDEC\n");
324 get_bufinfo(kbufinfo_t
*val
)
326 needed
= sizeof (*val
);
328 mib
[1] = KERN_KDEBUG
;
329 mib
[2] = KERN_KDGETBUF
;
332 mib
[5] = 0; /* no flags */
334 if (sysctl(mib
, 3, val
, &needed
, 0, 0) < 0)
335 quit("trace facility failure, KERN_KDGETBUF\n");
345 mib
[1] = KERN_KDEBUG
;
346 mib
[2] = KERN_KDREMOVE
; /* protocol */
349 mib
[5] = 0; /* no flags */
353 if (sysctl(mib
, 3, NULL
, &needed
, NULL
, 0) < 0)
357 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
359 quit("trace facility failure, KERN_KDREMOVE\n");
366 /* When we aren't logging, only collect the DECR_TRAP trace points */
368 kr
.type
= KDBG_VALCHECK
;
369 kr
.value1
= DECR_TRAP
;
373 needed
= sizeof(kd_regtype
);
375 mib
[1] = KERN_KDEBUG
;
376 mib
[2] = KERN_KDSETREG
;
379 mib
[5] = 0; /* no flags */
380 if (sysctl(mib
, 3, &kr
, &needed
, NULL
, 0) < 0)
381 quit("trace facility failure, KERN_KDSETREG\n");
384 mib
[1] = KERN_KDEBUG
;
385 mib
[2] = KERN_KDSETUP
;
388 mib
[5] = 0; /* no flags */
390 if (sysctl(mib
, 3, NULL
, &needed
, NULL
, 0) < 0)
391 quit("trace facility failure, KERN_KDSETUP\n");
398 kr
.type
= KDBG_RANGETYPE
;
401 needed
= sizeof(kd_regtype
);
403 mib
[1] = KERN_KDEBUG
;
404 mib
[2] = KERN_KDSETREG
;
407 mib
[5] = 0; /* no flags */
409 if (sysctl(mib
, 3, &kr
, &needed
, NULL
, 0) < 0)
410 quit("trace facility failure, KERN_KDSETREG\n");
413 mib
[1] = KERN_KDEBUG
;
414 mib
[2] = KERN_KDSETUP
;
417 mib
[5] = 0; /* no flags */
419 if (sysctl(mib
, 3, NULL
, &needed
, NULL
, 0) < 0)
420 quit("trace facility failure, KERN_KDSETUP\n");
431 void screen_update();
434 set_pidexclude(getpid(), 0);
435 screen_update(log_fp
);
442 void leave() /* exit under normal conditions -- signal handler */
445 set_pidexclude(getpid(), 0);
453 screen_update(FILE *fp
)
460 unsigned int average_s_latency
;
461 unsigned int average_i_latency
;
464 if (fp
== (FILE *)0) {
468 fprintf(fp
,"\n\n===================================================================================================\n");
470 * Display the current time.
471 * "ctime" always returns a string that looks like this:
473 * Sun Sep 16 01:03:52 1973
474 * 012345678901234567890123
477 * We want indices 11 thru 18 (length 8).
479 elapsed_secs
= curr_time
- start_time
;
480 elapsed_hours
= elapsed_secs
/ 3600;
481 elapsed_secs
-= elapsed_hours
* 3600;
482 elapsed_mins
= elapsed_secs
/ 60;
483 elapsed_secs
-= elapsed_mins
* 60;
485 sprintf(tbuf
, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time
)[0]),
486 elapsed_hours
, elapsed_mins
, elapsed_secs
);
488 fprintf(fp
, "%s", tbuf
);
494 sprintf(tbuf
, " SCHEDULER INTERRUPTS\n");
497 fprintf(fp
, "%s", tbuf
);
501 sprintf(tbuf
, "---------------------------------------------\n");
504 fprintf(fp
, "%s", tbuf
);
510 sprintf(tbuf
, "total_samples %10d %10d\n\n", s_total_samples
, i_total_samples
);
513 fprintf(fp
, "%s", tbuf
);
519 for (itotal
= 0, stotal
= 0, i
= 0; i
< 10; i
++) {
520 sprintf(tbuf
, "delays < %3d usecs %10d %10d\n", (i
+ 1) * 10, s_usec_10_bins
[i
], i_usec_10_bins
[i
]);
523 fprintf(fp
, "%s", tbuf
);
527 stotal
+= s_usec_10_bins
[i
];
528 itotal
+= i_usec_10_bins
[i
];
530 sprintf(tbuf
, "total < 100 usecs %10d %10d\n\n", stotal
, itotal
);
533 fprintf(fp
, "%s", tbuf
);
539 for (itotal
= 0, stotal
= 0, i
= 1; i
< 10; i
++) {
541 sprintf(tbuf
, "delays < %3d usecs %10d %10d\n", (i
+ 1) * 100, s_usec_100_bins
[i
], i_usec_100_bins
[i
]);
543 sprintf(tbuf
, "delays < 1 msec %10d %10d\n", s_usec_100_bins
[i
], i_usec_100_bins
[i
]);
546 fprintf(fp
, "%s", tbuf
);
550 stotal
+= s_usec_100_bins
[i
];
551 itotal
+= i_usec_100_bins
[i
];
553 sprintf(tbuf
, "total < 1 msec %10d %10d\n\n", stotal
, itotal
);
556 fprintf(fp
, "%s", tbuf
);
562 for (itotal
= 0, stotal
= 0, i
= 1; i
< 10; i
++) {
563 sprintf(tbuf
, "delays < %3d msecs %10d %10d\n", (i
+ 1), s_msec_1_bins
[i
], i_msec_1_bins
[i
]);
566 fprintf(fp
, "%s", tbuf
);
570 stotal
+= s_msec_1_bins
[i
];
571 itotal
+= i_msec_1_bins
[i
];
573 sprintf(tbuf
, "total < 10 msecs %10d %10d\n\n", stotal
, itotal
);
576 fprintf(fp
, "%s", tbuf
);
583 for (itotal
= 0, stotal
= 0, i
= 1; i
< 5; i
++) {
584 sprintf(tbuf
, "delays < %3d msecs %10d %10d\n", (i
+ 1)*10, s_msec_10_bins
[i
], i_msec_10_bins
[i
]);
587 fprintf(fp
, "%s", tbuf
);
591 stotal
+= s_msec_10_bins
[i
];
592 itotal
+= i_msec_10_bins
[i
];
594 sprintf(tbuf
, "total < 50 msecs %10d %10d\n\n", stotal
, itotal
);
597 fprintf(fp
, "%s", tbuf
);
601 sprintf(tbuf
, "delays > 50 msecs %10d %10d\n", s_too_slow
, i_too_slow
);
604 fprintf(fp
, "%s", tbuf
);
610 sprintf(tbuf
, "\nminimum latency(usecs) %7d %7d\n", s_min_latency
, i_min_latency
);
613 fprintf(fp
, "%s", tbuf
);
617 sprintf(tbuf
, "maximum latency(usecs) %7d %7d\n", s_max_latency
, i_max_latency
);
620 fprintf(fp
, "%s", tbuf
);
625 average_s_latency
= (unsigned int)(s_total_latency
/s_total_samples
);
627 average_s_latency
= 0;
630 average_i_latency
= (unsigned int)(i_total_latency
/i_total_samples
);
632 average_i_latency
= 0;
634 sprintf(tbuf
, "average latency(usecs) %7d %7d\n", average_s_latency
, average_i_latency
);
637 fprintf(fp
, "%s", tbuf
);
641 sprintf(tbuf
, "exceeded threshold %7d %7d\n", s_exceeded_threshold
, i_exceeded_threshold
);
644 fprintf(fp
, "%s", tbuf
);
658 fprintf(stderr
, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
659 fprintf(stderr
, " [-it threshold] [-s sleep_in_usecs]\n");
660 fprintf(stderr
, " [-d decrementer_in_usecs] [-n kernel]\n\n");
662 fprintf(stderr
, " -rt Set realtime scheduling policy. Default is timeshare.\n");
663 fprintf(stderr
, " -c specify name of codes file\n");
664 fprintf(stderr
, " -l specify name of file to log trace entries to when threshold is exceeded\n");
665 fprintf(stderr
, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
666 fprintf(stderr
, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
667 fprintf(stderr
, " -s set sleep time in microseconds\n");
668 fprintf(stderr
, " -d set decrementer in microseconds.\n");
669 fprintf(stderr
, " -n specify kernel, default is /mach_kernel\n");
671 fprintf(stderr
, "\nlatency must be run as root\n\n");
682 mach_timespec_t remain
;
683 uint64_t start
, stop
;
686 uint64_t adeadline
, adelay
;
689 double nanosecs_to_sleep
;
690 int loop_cnt
, sample_sc_now
;
691 int decrementer_usec
= 0;
695 host_name_port_t host
;
698 void init_code_file();
702 my_policy
= THREAD_STANDARD_POLICY
;
703 policy_name
= "TIMESHARE";
706 if (strcmp(argv
[1], "-rt") == 0) {
707 my_policy
= THREAD_TIME_CONSTRAINT_POLICY
; /* the real time band */
708 policy_name
= "REALTIME";
710 } else if (strcmp(argv
[1], "-st") == 0) {
715 s_thresh_hold
= atoi(argv
[1]);
719 } else if (strcmp(argv
[1], "-it") == 0) {
724 i_thresh_hold
= atoi(argv
[1]);
727 } else if (strcmp(argv
[1], "-c") == 0) {
735 } else if (strcmp(argv
[1], "-l") == 0) {
740 open_logfile(argv
[1]);
744 } else if (strcmp(argv
[1], "-s") == 0) {
749 num_of_usecs_to_sleep
= atoi(argv
[1]);
752 } else if (strcmp(argv
[1], "-d") == 0) {
757 decrementer_usec
= atoi(argv
[1]);
760 } else if (strcmp(argv
[1], "-n") == 0) {
765 kernelpath
= argv
[1];
775 if ( geteuid() != 0 ) {
776 printf("'latency' must be run as root...\n");
780 if (kernelpath
== (char *) 0)
781 kernelpath
= "/mach_kernel";
783 if (code_file
== (char *) 0)
784 code_file
= "/usr/share/misc/trace.codes";
788 sample_sc_now
= 25000 / num_of_usecs_to_sleep
;
791 decrementer_val
= decrementer_usec
* divisor
;
793 /* get the cpu count for the DECR_TRAP array */
794 host
= mach_host_self();
795 size
= sizeof(hi
)/sizeof(int);
796 ret
= host_info(host
, HOST_BASIC_INFO
, (host_info_t
)&hi
, &size
);
797 if (ret
!= KERN_SUCCESS
) {
798 mach_error(argv
[0], ret
);
802 if ((last_decrementer_kd
= (kd_buf
**)malloc(hi
.avail_cpus
* sizeof(kd_buf
*))) == (kd_buf
**)0)
803 quit("can't allocate memory for decrementer tracing info\n");
805 nanosecs_to_sleep
= (double)(num_of_usecs_to_sleep
* 1000);
806 fdelay
= nanosecs_to_sleep
* (divisor
/1000);
807 adelay
= (uint64_t)fdelay
;
812 When the decrementer isn't set in the options,
813 decval will be zero and this call will reset
814 the system default ...
816 set_rtcdec(decrementer_val
);
818 if (initscr() == (WINDOW
*) 0)
820 printf("Unrecognized TERM type, try vt100\n");
826 signal(SIGWINCH
, sigwinch
);
827 signal(SIGINT
, sigintr
);
828 signal(SIGQUIT
, leave
);
829 signal(SIGTERM
, leave
);
830 signal(SIGHUP
, leave
);
833 if ((my_buffer
= malloc(SAMPLE_SIZE
* sizeof(kd_buf
))) == (char *)0)
834 quit("can't allocate memory for tracing info\n");
836 set_numbufs(SAMPLE_SIZE
);
841 set_init_nologging();
842 set_pidexclude(getpid(), 1);
848 start_time
= time((long *)0);
849 refresh_time
= start_time
;
851 if (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
)
853 /* the realtime band */
854 if(set_time_constraint_policy() != KERN_SUCCESS
)
855 quit("Failed to set realtime policy.\n");
859 curr_time
= time((long *)0);
861 if (curr_time
>= refresh_time
) {
862 if (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
)
864 /* set standard timeshare policy during screen update */
865 if(set_standard_policy() != KERN_SUCCESS
)
866 quit("Failed to set standard policy.\n");
868 screen_update((FILE *)0);
869 if (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
)
871 /* set back to realtime band */
872 if(set_time_constraint_policy() != KERN_SUCCESS
)
873 quit("Failed to set time_constraint policy.\n");
875 refresh_time
= curr_time
+ 1;
878 timestamp1
= mach_absolute_time();
879 adeadline
= timestamp1
+ adelay
;
880 mk_wait_until(adeadline
);
881 timestamp2
= mach_absolute_time();
887 elapsed_usecs
= (int)(((double)(stop
- start
)) / divisor
);
889 if ((elapsed_usecs
-= num_of_usecs_to_sleep
) <= 0)
892 if (elapsed_usecs
< 100)
893 s_usec_10_bins
[elapsed_usecs
/10]++;
894 if (elapsed_usecs
< 1000)
895 s_usec_100_bins
[elapsed_usecs
/100]++;
896 else if (elapsed_usecs
< 10000)
897 s_msec_1_bins
[elapsed_usecs
/1000]++;
898 else if (elapsed_usecs
< 50000)
899 s_msec_10_bins
[elapsed_usecs
/10000]++;
903 if (elapsed_usecs
> s_max_latency
)
904 s_max_latency
= elapsed_usecs
;
905 if (elapsed_usecs
< s_min_latency
|| s_total_samples
== 0)
906 s_min_latency
= elapsed_usecs
;
907 s_total_latency
+= elapsed_usecs
;
910 if (s_thresh_hold
&& elapsed_usecs
> s_thresh_hold
)
911 s_exceeded_threshold
++;
914 if (log_fp
&& s_thresh_hold
&& elapsed_usecs
> s_thresh_hold
)
915 sample_sc(start
, stop
);
917 if (loop_cnt
>= sample_sc_now
) {
918 sample_sc((long long)0, (long long)0);
924 No need to check for initscr error return.
925 We won't get here if it fails on the first call.
939 mach_timebase_info_data_t info
;
941 (void) mach_timebase_info (&info
);
943 divisor
= ( (double)info
.denom
/ (double)info
.numer
) * 1000;
947 /* This is the realtime band */
949 set_time_constraint_policy()
951 kern_return_t result
;
952 thread_time_constraint_policy_data_t info
;
953 mach_msg_type_number_t count
;
954 boolean_t get_default
;
957 count
= THREAD_TIME_CONSTRAINT_POLICY_COUNT
;
958 result
= thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY
,
959 (thread_policy_t
)&info
, &count
, &get_default
);
960 if (result
!= KERN_SUCCESS
)
963 result
= thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY
,
964 (thread_policy_t
)&info
, THREAD_TIME_CONSTRAINT_POLICY_COUNT
);
969 /* This is the timeshare mode */
971 set_standard_policy()
973 kern_return_t result
;
974 thread_standard_policy_data_t info
;
975 mach_msg_type_number_t count
;
976 boolean_t get_default
;
979 count
= THREAD_STANDARD_POLICY_COUNT
;
980 result
= thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY
,
981 (thread_policy_t
)&info
, &count
, &get_default
);
982 if (result
!= KERN_SUCCESS
)
985 result
= thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY
,
986 (thread_policy_t
)&info
, THREAD_STANDARD_POLICY_COUNT
);
992 void read_command_map()
1001 total_threads
= bufinfo
.nkdthreads
;
1002 size
= bufinfo
.nkdthreads
* sizeof(kd_threadmap
);
1005 if (mapptr
= (kd_threadmap
*) malloc(size
))
1006 bzero (mapptr
, size
);
1009 printf("Thread map is not initialized -- this is not fatal\n");
1014 /* Now read the threadmap */
1016 mib
[1] = KERN_KDEBUG
;
1017 mib
[2] = KERN_KDTHRMAP
;
1020 mib
[5] = 0; /* no flags */
1021 if (sysctl(mib
, 3, mapptr
, &size
, NULL
, 0) < 0)
1023 /* This is not fatal -- just means I cant map command strings */
1025 printf("Can't read the thread map -- this is not fatal\n");
1034 void create_map_entry(int thread
, char *command
)
1042 for (i
= 0, map
= 0; !map
&& i
< total_threads
; i
++)
1044 if (mapptr
[i
].thread
== thread
)
1045 map
= &mapptr
[i
]; /* Reuse this entry, the thread has been reassigned */
1048 if (!map
) /* look for invalid entries that I can reuse*/
1050 for (i
= 0, map
= 0; !map
&& i
< total_threads
; i
++)
1052 if (mapptr
[i
].valid
== 0 )
1053 map
= &mapptr
[i
]; /* Reuse this invalid entry */
1059 /* If reach here, then this is a new thread and
1060 * there are no invalid entries to reuse
1061 * Double the size of the thread map table.
1064 n
= total_threads
* 2;
1065 mapptr
= (kd_threadmap
*) realloc(mapptr
, n
* sizeof(kd_threadmap
));
1066 bzero(&mapptr
[total_threads
], total_threads
*sizeof(kd_threadmap
));
1067 map
= &mapptr
[total_threads
];
1071 fprintf(log_fp
, "MAP: increasing thread map to %d entries\n", total_threads
);
1076 fprintf(log_fp
, "MAP: adding thread %x with name %s\n", thread
, command
);
1079 map
->thread
= thread
;
1081 The trace entry that returns the command name will hold
1082 at most, MAXCOMLEN chars, and in that case, is not
1083 guaranteed to be null terminated.
1085 (void)strncpy (map
->command
, command
, MAXCOMLEN
);
1086 map
->command
[MAXCOMLEN
] = '\0';
1090 kd_threadmap
*find_thread_map(int thread
)
1096 return((kd_threadmap
*)0);
1098 for (i
= 0; i
< total_threads
; i
++)
1101 if (map
->valid
&& (map
->thread
== thread
))
1106 return ((kd_threadmap
*)0);
1110 kill_thread_map(int thread
)
1114 if (map
= find_thread_map(thread
)) {
1118 fprintf(log_fp
, "MAP: deleting thread %x with name %s\n", thread
, map
->command
);
1122 map
->command
[0] = '\0';
1127 struct th_info
*find_thread(int thread
, int type1
, int type2
) {
1130 for (ti
= th_state
; ti
< &th_state
[cur_max
]; ti
++) {
1131 if (ti
->thread
== thread
) {
1134 if (type1
== ti
->type
)
1136 if (type2
== ti
->type
)
1140 return ((struct th_info
*)0);
1144 char *find_code(type
)
1148 for (i
= 0; i
< num_of_codes
; i
++) {
1149 if (codes_tab
[i
].type
== type
)
1150 return(codes_tab
[i
].name
);
1156 void sample_sc(uint64_t start
, uint64_t stop
)
1158 kd_buf
*kd
, *last_mach_sched
, *start_kd
, *end_of_sample
;
1161 int first_entry
= 1;
1163 double timestamp
, last_timestamp
, delta
, start_bias
;
1164 void read_command_map();
1166 if (log_fp
&& (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
))
1168 /* set standard timeshare policy when logging */
1169 if(set_standard_policy() != KERN_SUCCESS
)
1170 quit("Failed to set standard policy.\n");
1173 /* Get kernel buffer information */
1174 get_bufinfo(&bufinfo
);
1180 needed
= bufinfo
.nkdbufs
* sizeof(kd_buf
);
1182 mib
[1] = KERN_KDEBUG
;
1183 mib
[2] = KERN_KDREADTR
;
1186 mib
[5] = 0; /* no flags */
1188 if (sysctl(mib
, 3, my_buffer
, &needed
, NULL
, 0) < 0)
1189 quit("trace facility failure, KERN_KDREADTR\n");
1193 if (bufinfo
.flags
& KDBG_WRAPPED
) {
1194 for (i
= 0; i
< cur_max
; i
++) {
1195 th_state
[i
].thread
= 0;
1196 th_state
[i
].type
= -1;
1197 th_state
[i
].pathptr
= (long *)0;
1198 th_state
[i
].pathname
[0] = 0;
1209 latency
= (double)(stop
- start
) / divisor
;
1210 latency
-= (double)num_of_usecs_to_sleep
;
1212 fprintf(log_fp
, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1213 &(ctime(&curr_time
)[0]), latency
, count
);
1216 end_of_sample
= &((kd_buf
*)my_buffer
)[count
];
1218 /* Always reinitialize the DECR_TRAP array */
1219 for (i
=0; i
< hi
.avail_cpus
; i
++)
1220 last_decrementer_kd
[i
] = (kd_buf
*)my_buffer
;
1222 last_mach_sched
= (kd_buf
*)0;
1224 for (kd
= (kd_buf
*)my_buffer
; kd
< end_of_sample
; kd
++) {
1225 int debugid
, thread
, cpunum
;
1226 int type
, clen
, mode
;
1234 char sched_info
[64];
1236 kd_threadmap
*find_thread_map();
1237 double handle_decrementer();
1238 kd_buf
*log_decrementer();
1239 int check_for_thread_update();
1240 void enter_syscall();
1241 void exit_syscall();
1244 thread
= kd
->arg5
& KDBG_THREAD_MASK
;
1245 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1246 debugid
= kd
->debugid
;
1247 type
= kd
->debugid
& DBG_FUNC_MASK
;
1249 if (check_for_thread_update(thread
, type
, kd
))
1252 if (type
== DECR_TRAP
)
1253 i_latency
= handle_decrementer(kd
);
1255 now
= (((uint64_t)kd
->timestamp
.tv_sec
) << 32) |
1256 (uint64_t)((unsigned int)(kd
->timestamp
.tv_nsec
));
1258 timestamp
= ((double)now
) / divisor
;
1260 if (now
< start
|| now
> stop
) {
1261 if (debugid
& DBG_FUNC_START
)
1262 enter_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 0);
1263 else if (debugid
& DBG_FUNC_END
)
1264 exit_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 0);
1265 else if (type
== DECR_TRAP
) {
1267 if (log_fp
&& i_thresh_hold
&& (int)i_latency
> i_thresh_hold
) {
1268 start_kd
= last_decrementer_kd
[cpunum
];
1269 kd
= log_decrementer(start_kd
, kd
, end_of_sample
, i_latency
);
1270 if (kd
>= end_of_sample
)
1273 if ((kd
->debugid
& DBG_FUNC_MASK
) == DECR_TRAP
)
1275 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1276 last_decrementer_kd
[cpunum
] = kd
;
1279 last_decrementer_kd
[cpunum
] = cur_kd
;
1288 latency
= (double)(stop
- start
) / divisor
;
1289 latency
-= (double)num_of_usecs_to_sleep
;
1292 sprintf(buf2
, "default");
1294 sprintf(buf2
, "%d", my_pri
);
1295 sprintf(buf1
, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1296 &(ctime(&curr_time
)[0]), latency
, num_of_usecs_to_sleep
, policy_name
, buf2
);
1297 clen
= strlen(buf1
);
1298 memset(buf2
, '-', clen
);
1302 fprintf(log_fp
, "\n\n%s\n", buf2
);
1303 fprintf(log_fp
, "%s\n\n", buf1
);
1304 fprintf(log_fp
, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1306 start_bias
= ((double)start
) / divisor
;
1307 last_timestamp
= timestamp
;
1310 delta
= timestamp
- last_timestamp
;
1312 if (map
= find_thread_map(thread
))
1313 strcpy(command
, map
->command
);
1321 fprintf(log_fp
, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1322 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 59, 1) , thread
, cpunum
, command
);
1324 last_timestamp
= timestamp
;
1329 fprintf(log_fp
, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1330 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 58, 1) , thread
, cpunum
, command
);
1333 last_timestamp
= timestamp
;
1338 fprintf(log_fp
, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1339 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 58, 1) , thread
, cpunum
, command
);
1342 last_timestamp
= timestamp
;
1347 fprintf(log_fp
, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1348 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 58, 1) , thread
, cpunum
, command
);
1351 last_timestamp
= timestamp
;
1355 last_decrementer_kd
[cpunum
] = kd
;
1357 if (i_thresh_hold
&& (int)i_latency
> i_thresh_hold
)
1364 if (ti
= find_thread((kd
->arg5
& KDBG_THREAD_MASK
), 0, 0)) {
1365 if (ti
->type
== -1 && strcmp(command
, "kernel_task"))
1370 fprintf(log_fp
, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1371 timestamp
- start_bias
, delta
, i_latency
, p
, pc_to_string(kd
->arg2
, 59, mode
) , thread
, cpunum
, command
);
1374 last_timestamp
= timestamp
;
1379 fprintf(log_fp
, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1380 timestamp
- start_bias
, delta
, (double)kd
->arg1
/divisor
, "DECR_SET", thread
, cpunum
, command
);
1383 last_timestamp
= timestamp
;
1387 case MACH_stkhandoff
:
1388 last_mach_sched
= kd
;
1390 if (map
= find_thread_map(kd
->arg2
))
1391 strcpy(command1
, map
->command
);
1393 sprintf(command1
, "%-8x", kd
->arg2
);
1395 if (ti
= find_thread(kd
->arg2
, 0, 0)) {
1396 if (ti
->type
== -1 && strcmp(command1
, "kernel_task"))
1402 memset(sched_info
, ' ', sizeof(sched_info
));
1404 sprintf(sched_info
, "%14.14s", command
);
1405 clen
= strlen(sched_info
);
1406 sched_info
[clen
] = ' ';
1408 sprintf(&sched_info
[14], " @ pri %3d --> %14.14s", kd
->arg3
, command1
);
1409 clen
= strlen(sched_info
);
1410 sched_info
[clen
] = ' ';
1412 sprintf(&sched_info
[45], " @ pri %3d%s", kd
->arg4
, p
);
1415 fprintf(log_fp
, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1416 timestamp
- start_bias
, delta
, "MACH_SCHED", sched_info
, thread
, cpunum
);
1419 last_timestamp
= timestamp
;
1423 if ((ti
= find_thread(thread
, 0, 0)) == (struct th_info
*)0) {
1424 if (cur_max
>= MAX_THREADS
)
1426 ti
= &th_state
[cur_max
++];
1428 ti
->thread
= thread
;
1430 ti
->pathptr
= (long *)0;
1431 ti
->child_thread
= 0;
1433 while ( (kd
< end_of_sample
) && ((kd
->debugid
& DBG_FUNC_MASK
) == VFS_LOOKUP
))
1436 ti
->arg1
= kd
->arg1
;
1437 memset(&ti
->pathname
[0], 0, (PATHLENGTH
+ 1));
1438 sargptr
= (long *)&ti
->pathname
[0];
1440 *sargptr
++ = kd
->arg2
;
1441 *sargptr
++ = kd
->arg3
;
1442 *sargptr
++ = kd
->arg4
;
1443 ti
->pathptr
= sargptr
;
1446 sargptr
= ti
->pathptr
;
1449 We don't want to overrun our pathname buffer if the
1450 kernel sends us more VFS_LOOKUP entries than we can
1454 if ((long *)sargptr
>= (long *)&ti
->pathname
[PATHLENGTH
])
1461 We need to detect consecutive vfslookup entries.
1462 So, if we get here and find a START entry,
1463 fake the pathptr so we can bypass all further
1467 if (kd
->debugid
& DBG_FUNC_START
)
1469 (long *)ti
->pathptr
= (long *)&ti
->pathname
[PATHLENGTH
];
1473 *sargptr
++ = kd
->arg1
;
1474 *sargptr
++ = kd
->arg2
;
1475 *sargptr
++ = kd
->arg3
;
1476 *sargptr
++ = kd
->arg4
;
1477 ti
->pathptr
= sargptr
;
1485 /* print the tail end of the pathname */
1486 len
= strlen(ti
->pathname
);
1493 fprintf(log_fp
, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1494 timestamp
- start_bias
, delta
, "VFS_LOOKUP",
1495 &ti
->pathname
[len
], ti
->arg1
, thread
, cpunum
, command
);
1498 last_timestamp
= timestamp
;
1502 if (debugid
& DBG_FUNC_START
)
1503 enter_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 1);
1504 else if (debugid
& DBG_FUNC_END
)
1505 exit_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 1);
1507 print_entry(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
);
1509 last_timestamp
= timestamp
;
1513 if (last_mach_sched
&& log_fp
)
1514 fprintf(log_fp
, "\nblocked by %s @ priority %d\n", command
, last_mach_sched
->arg3
);
1516 if (first_entry
== 0 && log_fp
)
1517 fprintf(log_fp
, "\n start = %qd stop = %qd count = %d now = %qd\n", start
, stop
, count
, now
);
1522 if (log_fp
&& (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
))
1524 /* set back to realtime band */
1525 if(set_time_constraint_policy() != KERN_SUCCESS
)
1526 quit("Failed to set time_constraint policy.\n");
1531 enter_syscall(FILE *fp
, kd_buf
*kd
, int thread
, int type
, char *command
, double timestamp
, double delta
, double bias
, int print_info
)
1538 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1540 if (print_info
&& fp
) {
1541 if (p
= find_code(type
)) {
1542 if (type
== INTERRUPT
) {
1545 if (ti
= find_thread((kd
->arg5
& KDBG_THREAD_MASK
), 0, 0)) {
1546 if (ti
->type
== -1 && strcmp(command
, "kernel_task"))
1550 fprintf(fp
, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1551 timestamp
- bias
, delta
, pc_to_string(kd
->arg2
, 59, mode
), thread
, cpunum
, command
);
1552 } else if (type
== MACH_vmfault
) {
1553 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1554 timestamp
- bias
, delta
, p
, thread
, cpunum
, command
);
1556 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1557 timestamp
- bias
, delta
, p
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1558 thread
, cpunum
, command
);
1561 fprintf(fp
, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1562 timestamp
- bias
, delta
, type
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1563 thread
, cpunum
, command
);
1566 if ((ti
= find_thread(thread
, -1, type
)) == (struct th_info
*)0) {
1567 if (cur_max
>= MAX_THREADS
) {
1568 static int do_this_once
= 1;
1571 for (i
= 0; i
< cur_max
; i
++) {
1574 fprintf(fp
, "thread = %x, type = %x\n",
1575 th_state
[i
].thread
, th_state
[i
].type
);
1582 ti
= &th_state
[cur_max
++];
1584 ti
->thread
= thread
;
1585 ti
->child_thread
= 0;
1587 if (type
!= BSC_exit
)
1591 ti
->stime
= timestamp
;
1592 ti
->pathptr
= (long *)0;
1595 if (print_info
&& fp
)
1596 fprintf(fp
, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max
, ti
, ti
->type
, ti
->thread
);
1602 exit_syscall(FILE *fp
, kd_buf
*kd
, int thread
, int type
, char *command
, double timestamp
, double delta
, double bias
, int print_info
)
1608 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1609 ti
= find_thread(thread
, type
, type
);
1611 if (print_info
&& fp
)
1612 fprintf(fp
, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max
, ti
, type
, thread
);
1614 if (print_info
&& fp
) {
1616 fprintf(fp
, "%9.1f %8.1f(%.1f) \t", timestamp
- bias
, delta
, timestamp
- ti
->stime
);
1618 fprintf(fp
, "%9.1f %8.1f() \t", timestamp
- bias
, delta
);
1620 if (p
= find_code(type
)) {
1621 if (type
== INTERRUPT
) {
1622 fprintf(fp
, "INTERRUPT %-8x %d %s\n", thread
, cpunum
, command
);
1623 } else if (type
== MACH_vmfault
&& kd
->arg2
<= DBG_CACHE_HIT_FAULT
) {
1624 fprintf(fp
, "%-28.28s %-8.8s %-8x %-8x %d %s\n",
1625 p
, fault_name
[kd
->arg2
], kd
->arg1
,
1626 thread
, cpunum
, command
);
1628 fprintf(fp
, "%-28.28s %-8x %-8x %-8x %d %s\n",
1629 p
, kd
->arg1
, kd
->arg2
,
1630 thread
, cpunum
, command
);
1633 fprintf(fp
, "%-8x %-8x %-8x %-8x %d %s\n",
1634 type
, kd
->arg1
, kd
->arg2
,
1635 thread
, cpunum
, command
);
1638 if (ti
== (struct th_info
*)0) {
1639 if ((ti
= find_thread(thread
, -1, -1)) == (struct th_info
*)0) {
1640 if (cur_max
>= MAX_THREADS
)
1642 ti
= &th_state
[cur_max
++];
1644 ti
->thread
= thread
;
1645 ti
->child_thread
= 0;
1646 ti
->pathptr
= (long *)0;
1653 print_entry(FILE *fp
, kd_buf
*kd
, int thread
, int type
, char *command
, double timestamp
, double delta
, double bias
)
1661 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1664 fprintf(fp
, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max
, type
, thread
, cpunum
);
1666 if (p
= find_code(type
)) {
1667 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1668 timestamp
- bias
, delta
, p
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1669 thread
, cpunum
, command
);
1671 fprintf(fp
, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1672 timestamp
- bias
, delta
, type
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1673 thread
, cpunum
, command
);
1678 check_for_thread_update(int thread
, int type
, kd_buf
*kd
)
1681 void create_map_entry();
1685 case TRACE_DATA_NEWTHREAD
:
1686 if ((ti
= find_thread(thread
, 0, 0)) == (struct th_info
*)0) {
1687 if (cur_max
>= MAX_THREADS
)
1689 ti
= &th_state
[cur_max
++];
1691 ti
->thread
= thread
;
1693 ti
->pathptr
= (long *)0;
1695 ti
->child_thread
= kd
->arg1
;
1698 case TRACE_STRING_NEWTHREAD
:
1699 if ((ti
= find_thread(thread
, 0, 0)) == (struct th_info
*)0)
1701 if (ti
->child_thread
== 0)
1703 create_map_entry(ti
->child_thread
, (char *)&kd
->arg1
);
1705 ti
->child_thread
= 0;
1708 case TRACE_STRING_EXEC
:
1709 create_map_entry(thread
, (char *)&kd
->arg1
);
1717 kd_buf
*log_decrementer(kd_buf
*kd_beg
, kd_buf
*kd_end
, kd_buf
*end_of_sample
, double i_latency
)
1719 kd_buf
*kd
, *kd_start
, *kd_stop
;
1720 int kd_count
; /* Limit the boundary of kd_start */
1721 double timestamp
, last_timestamp
, delta
, start_bias
;
1723 int debugid
, type
, clen
;
1731 char sched_info
[64];
1735 kd_threadmap
*find_thread_map();
1737 sprintf(buf1
, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time
)[0]), i_latency
);
1738 clen
= strlen(buf1
);
1739 memset(buf2
, '-', clen
);
1741 fprintf(log_fp
, "\n\n%s\n", buf2
);
1742 fprintf(log_fp
, "%s\n\n", buf1
);
1744 fprintf(log_fp
, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1746 thread
= kd_beg
->arg5
& KDBG_THREAD_MASK
;
1747 cpunum
= (kd_end
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1749 for (kd_count
= 0, kd_start
= kd_beg
- 1; (kd_start
>= (kd_buf
*)my_buffer
); kd_start
--, kd_count
++) {
1750 if (kd_count
== MAX_LOG_COUNT
)
1753 if((kd_start
->arg5
& KDBG_CPU_MASK
) != cpunum
)
1756 if ((kd_start
->debugid
& DBG_FUNC_MASK
) == DECR_TRAP
)
1759 if((kd_start
->arg5
& KDBG_THREAD_MASK
) != thread
)
1763 if (kd_start
< (kd_buf
*)my_buffer
)
1764 kd_start
= (kd_buf
*)my_buffer
;
1766 thread
= kd_end
->arg5
& KDBG_THREAD_MASK
;
1768 for (kd_stop
= kd_end
+ 1; kd_stop
< end_of_sample
; kd_stop
++) {
1770 if ((kd_stop
->debugid
& DBG_FUNC_MASK
) == DECR_TRAP
)
1773 if((kd_stop
->arg5
& KDBG_CPU_MASK
) != cpunum
)
1776 if((kd_stop
->arg5
& KDBG_THREAD_MASK
) != thread
)
1780 if (kd_stop
>= end_of_sample
)
1781 kd_stop
= end_of_sample
- 1;
1783 now
= (((uint64_t)kd_start
->timestamp
.tv_sec
) << 32) |
1784 (uint64_t)((unsigned int)(kd_start
->timestamp
.tv_nsec
));
1785 timestamp
= ((double)now
) / divisor
;
1787 for (kd
= kd_start
; kd
<= kd_stop
; kd
++) {
1788 type
= kd
->debugid
& DBG_FUNC_MASK
;
1790 if (ti
= find_thread((kd
->arg5
& KDBG_THREAD_MASK
), type
, type
)) {
1791 if (ti
->stime
>= timestamp
)
1795 for (kd
= kd_start
; kd
<= kd_stop
; kd
++) {
1798 thread
= kd
->arg5
& KDBG_THREAD_MASK
;
1799 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1800 debugid
= kd
->debugid
;
1801 type
= kd
->debugid
& DBG_FUNC_MASK
;
1803 now
= (((uint64_t)kd
->timestamp
.tv_sec
) << 32) |
1804 (uint64_t)((unsigned int)(kd
->timestamp
.tv_nsec
));
1806 timestamp
= ((double)now
) / divisor
;
1808 if (kd
== kd_start
) {
1809 start_bias
= timestamp
;
1810 last_timestamp
= timestamp
;
1812 delta
= timestamp
- last_timestamp
;
1814 if (map
= find_thread_map(thread
))
1815 strcpy(command
, map
->command
);
1823 fprintf(log_fp
, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1824 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 59, 1) , thread
, cpunum
, command
);
1826 last_timestamp
= timestamp
;
1830 if ((int)(kd
->arg1
) >= 0)
1833 i_latency
= (((double)(-1 - kd
->arg1
)) / divisor
);
1835 if (i_thresh_hold
&& (int)i_latency
> i_thresh_hold
)
1842 if (ti
= find_thread((kd
->arg5
& KDBG_THREAD_MASK
), 0, 0)) {
1843 if (ti
->type
== -1 && strcmp(command
, "kernel_task"))
1846 fprintf(log_fp
, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1847 timestamp
- start_bias
, delta
, i_latency
, p
, pc_to_string(kd
->arg2
, 59, mode
) , thread
, cpunum
, command
);
1849 last_timestamp
= timestamp
;
1853 fprintf(log_fp
, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1854 timestamp
- start_bias
, delta
, (double)kd
->arg1
/divisor
,
1855 "DECR_SET", thread
, cpunum
, command
);
1857 last_timestamp
= timestamp
;
1861 case MACH_stkhandoff
:
1862 if (map
= find_thread_map(kd
->arg2
))
1863 strcpy(command1
, map
->command
);
1865 sprintf(command1
, "%-8x", kd
->arg2
);
1867 if (ti
= find_thread(kd
->arg2
, 0, 0)) {
1868 if (ti
->type
== -1 && strcmp(command1
, "kernel_task"))
1874 memset(sched_info
, ' ', sizeof(sched_info
));
1876 sprintf(sched_info
, "%14.14s", command
);
1877 clen
= strlen(sched_info
);
1878 sched_info
[clen
] = ' ';
1880 sprintf(&sched_info
[14], " @ pri %3d --> %14.14s", kd
->arg3
, command1
);
1881 clen
= strlen(sched_info
);
1882 sched_info
[clen
] = ' ';
1884 sprintf(&sched_info
[45], " @ pri %3d%s", kd
->arg4
, p
);
1886 fprintf(log_fp
, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1887 timestamp
- start_bias
, delta
, "MACH_SCHED", sched_info
, thread
, cpunum
);
1889 last_timestamp
= timestamp
;
1893 if ((ti
= find_thread(thread
, 0, 0)) == (struct th_info
*)0) {
1894 if (cur_max
>= MAX_THREADS
)
1896 ti
= &th_state
[cur_max
++];
1898 ti
->thread
= thread
;
1900 ti
->pathptr
= (long *)0;
1901 ti
->child_thread
= 0;
1904 while ( (kd
<= kd_stop
) && (kd
->debugid
& DBG_FUNC_MASK
) == VFS_LOOKUP
)
1907 ti
->arg1
= kd
->arg1
;
1908 memset(&ti
->pathname
[0], 0, (PATHLENGTH
+ 1));
1909 sargptr
= (long *)&ti
->pathname
[0];
1911 *sargptr
++ = kd
->arg2
;
1912 *sargptr
++ = kd
->arg3
;
1913 *sargptr
++ = kd
->arg4
;
1914 ti
->pathptr
= sargptr
;
1917 sargptr
= ti
->pathptr
;
1920 We don't want to overrun our pathname buffer if the
1921 kernel sends us more VFS_LOOKUP entries than we can
1925 if ((long *)sargptr
>= (long *)&ti
->pathname
[PATHLENGTH
])
1932 We need to detect consecutive vfslookup entries.
1933 So, if we get here and find a START entry,
1934 fake the pathptr so we can bypass all further
1938 if (kd
->debugid
& DBG_FUNC_START
)
1940 (long *)ti
->pathptr
= (long *)&ti
->pathname
[PATHLENGTH
];
1944 *sargptr
++ = kd
->arg1
;
1945 *sargptr
++ = kd
->arg2
;
1946 *sargptr
++ = kd
->arg3
;
1947 *sargptr
++ = kd
->arg4
;
1948 ti
->pathptr
= sargptr
;
1955 /* print the tail end of the pathname */
1956 len
= strlen(ti
->pathname
);
1962 fprintf(log_fp
, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1963 timestamp
- start_bias
, delta
, "VFS_LOOKUP",
1964 &ti
->pathname
[len
], ti
->arg1
, thread
, cpunum
, command
);
1966 last_timestamp
= timestamp
;
1970 if (debugid
& DBG_FUNC_START
)
1971 enter_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 1);
1972 else if (debugid
& DBG_FUNC_END
)
1973 exit_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 1);
1975 print_entry(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
);
1977 last_timestamp
= timestamp
;
1985 double handle_decrementer(kd_buf
*kd
)
1990 if ((int)(kd
->arg1
) >= 0)
1993 latency
= (((double)(-1 - kd
->arg1
)) / divisor
);
1994 elapsed_usecs
= (int)latency
;
1996 if (elapsed_usecs
< 100)
1997 i_usec_10_bins
[elapsed_usecs
/10]++;
1998 if (elapsed_usecs
< 1000)
1999 i_usec_100_bins
[elapsed_usecs
/100]++;
2000 else if (elapsed_usecs
< 10000)
2001 i_msec_1_bins
[elapsed_usecs
/1000]++;
2002 else if (elapsed_usecs
< 50000)
2003 i_msec_10_bins
[elapsed_usecs
/10000]++;
2007 if (i_thresh_hold
&& elapsed_usecs
> i_thresh_hold
)
2008 i_exceeded_threshold
++;
2009 if (elapsed_usecs
> i_max_latency
)
2010 i_max_latency
= elapsed_usecs
;
2011 if (elapsed_usecs
< i_min_latency
|| i_total_samples
== 0)
2012 i_min_latency
= elapsed_usecs
;
2013 i_total_latency
+= elapsed_usecs
;
2020 void init_code_file()
2023 int i
, n
, cnt
, code
;
2026 if ((fp
= fopen(code_file
, "r")) == (FILE *)0) {
2028 fprintf(log_fp
, "open of %s failed\n", code_file
);
2031 n
= fscanf(fp
, "%d\n", &cnt
);
2035 fprintf(log_fp
, "bad format found in %s\n", code_file
);
2038 for (i
= 0; i
< MAX_ENTRIES
; i
++) {
2039 n
= fscanf(fp
, "%x%s\n", &code
, name
);
2044 strncpy(codes_tab
[i
].name
, name
, 32);
2045 codes_tab
[i
].type
= code
;
2057 FILE *fp
= (FILE *)0;
2058 char tmp_nm_file
[128];
2062 bzero(tmp_nm_file
, 128);
2063 bzero(tmpstr
, 1024);
2065 /* Build the temporary nm file path */
2066 sprintf(tmp_nm_file
, "/tmp/knm.out.%d", getpid());
2068 /* Build the nm command and create a tmp file with the output*/
2069 sprintf (tmpstr
, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2070 kernelpath
, tmp_nm_file
);
2073 /* Parse the output from the nm command */
2074 if ((fp
=fopen(tmp_nm_file
, "r")) == (FILE *)0)
2076 /* Hmmm, let's not treat this as fatal */
2077 fprintf(stderr
, "Failed to open nm symbol file [%s]\n", tmp_nm_file
);
2081 /* Count the number of symbols in the nm symbol table */
2083 while ( (inchr
= getc(fp
)) != -1)
2091 /* Malloc the space for symbol table */
2092 if (kern_sym_count
> 0)
2094 kern_sym_tbl
= (kern_sym_t
*)malloc(kern_sym_count
* sizeof (kern_sym_t
));
2097 /* Hmmm, lets not treat this as fatal */
2098 fprintf(stderr
, "Can't allocate memory for kernel symbol table\n");
2101 bzero(kern_sym_tbl
, (kern_sym_count
* sizeof(kern_sym_t
)));
2105 /* Hmmm, lets not treat this as fatal */
2106 fprintf(stderr
, "No kernel symbol table \n");
2109 for (i
=0; i
<kern_sym_count
; i
++)
2111 bzero(tmpstr
, 1024);
2112 if (fscanf(fp
, "%x %c %s", &kern_sym_tbl
[i
].k_sym_addr
, &inchr
, tmpstr
) != 3)
2116 len
= strlen(tmpstr
);
2117 kern_sym_tbl
[i
].k_sym_name
= malloc(len
+ 1);
2119 if (kern_sym_tbl
[i
].k_sym_name
== (char *)0)
2121 fprintf(stderr
, "Can't allocate memory for symbol name [%s]\n", tmpstr
);
2122 kern_sym_tbl
[i
].k_sym_name
= (char *)0;
2126 strcpy(kern_sym_tbl
[i
].k_sym_name
, tmpstr
);
2128 kern_sym_tbl
[i
].k_sym_len
= len
;
2132 if (i
!= kern_sym_count
)
2134 /* Hmmm, didn't build up entire table from nm */
2135 /* scrap the entire thing */
2137 free (kern_sym_tbl
);
2138 kern_sym_tbl
= (kern_sym_t
*)0;
2144 /* Remove the temporary nm file */
2145 unlink(tmp_nm_file
);
2148 /* Dump the kernel symbol table */
2149 for (i
=0; i
< kern_sym_count
; i
++)
2151 if (kern_sym_tbl
[i
].k_sym_name
)
2152 printf ("[%d] 0x%x %s\n", i
,
2153 kern_sym_tbl
[i
].k_sym_addr
, kern_sym_tbl
[i
].k_sym_name
);
2155 printf ("[%d] 0x%x %s\n", i
,
2156 kern_sym_tbl
[i
].k_sym_addr
, "No symbol name");
2162 pc_to_string(unsigned int pc
, int max_len
, int mode
)
2167 int binary_search();
2171 sprintf(pcstring
, "0x%-8x [usermode addr]", pc
);
2176 ret
= binary_search(kern_sym_tbl
, 0, kern_sym_count
-1, pc
);
2180 sprintf(pcstring
, "0x%x", pc
);
2183 else if (kern_sym_tbl
[ret
].k_sym_name
== (char *)0)
2185 sprintf(pcstring
, "0x%x", pc
);
2190 if ((len
= kern_sym_tbl
[ret
].k_sym_len
) > (max_len
- 8))
2193 memcpy(pcstring
, kern_sym_tbl
[ret
].k_sym_name
, len
);
2194 sprintf(&pcstring
[len
], "+0x%-5x", pc
- kern_sym_tbl
[ret
].k_sym_addr
);
2201 /* Return -1 if not found, else return index */
2202 int binary_search(list
, low
, high
, addr
)
2209 mid
= (low
+ high
) / 2;
2212 return (-1); /* failed */
2213 else if (low
+ 1 == high
)
2215 if (list
[low
].k_sym_addr
<= addr
&&
2216 addr
< list
[high
].k_sym_addr
)
2218 /* We have a range match */
2221 else if (list
[high
].k_sym_addr
<= addr
)
2226 return(-1); /* Failed */
2228 else if (addr
< list
[mid
].k_sym_addr
)
2230 return(binary_search (list
, low
, mid
, addr
));
2234 return(binary_search (list
, mid
, high
, addr
));
2239 open_logfile(char *path
)
2241 log_fp
= fopen(path
, "a");
2245 /* failed to open path */
2246 fprintf(stderr
, "latency: failed to open logfile [%s]\n", path
);