2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
28 cc -I. -DKERNEL_PRIVATE -O -o latency latency.c -lncurses
31 #include <mach/mach.h>
40 #include <sys/types.h>
41 #include <sys/param.h>
47 #include <sys/ioctl.h>
49 #ifndef KERNEL_PRIVATE
50 #define KERNEL_PRIVATE
51 #include <sys/kdebug.h>
54 #include <sys/kdebug.h>
55 #endif /*KERNEL_PRIVATE*/
57 #include <sys/sysctl.h>
61 #include <mach/host_info.h>
62 #include <mach/mach_error.h>
63 #include <mach/mach_types.h>
64 #include <mach/message.h>
65 #include <mach/mach_syscalls.h>
66 #include <mach/clock_types.h>
67 #include <mach/mach_time.h>
69 #include <libkern/OSTypes.h>
71 extern mach_port_t clock_port
;
73 #define KERN_KDPIDEX 14
76 int s_usec_10_bins
[10];
77 int s_usec_100_bins
[10];
78 int s_msec_1_bins
[10];
79 int s_msec_10_bins
[5];
82 int s_min_latency
= 0;
83 long long s_total_latency
= 0;
86 int s_exceeded_threshold
= 0;
88 int i_usec_10_bins
[10];
89 int i_usec_100_bins
[10];
90 int i_msec_1_bins
[10];
91 int i_msec_10_bins
[5];
94 int i_min_latency
= 0;
95 long long i_total_latency
= 0;
98 int i_exceeded_threshold
= 0;
107 int num_of_usecs_to_sleep
= 1000;
109 char *kernelpath
= (char *)0;
110 char *code_file
= (char *)0;
113 u_long k_sym_addr
; /* kernel symbol address from nm */
114 u_int k_sym_len
; /* length of kernel symbol string */
115 char *k_sym_name
; /* kernel symbol string from nm */
118 kern_sym_t
*kern_sym_tbl
; /* pointer to the nm table */
119 int kern_sym_count
; /* number of entries in nm table */
122 #define UNKNOWN "Can't find symbol name"
127 int trace_enabled
= 0;
128 struct host_basic_info hi
;
131 #define SAMPLE_SIZE 300000
137 kbufinfo_t bufinfo
= {0, 0, 0};
139 FILE *log_fp
= (FILE *)0;
140 int num_of_codes
= 0;
141 int need_new_map
= 0;
142 int total_threads
= 0;
143 kd_threadmap
*mapptr
= 0;
145 #define MAX_ENTRIES 1024
149 } codes_tab
[MAX_ENTRIES
];
151 /* If NUMPARMS changes from the kernel, then PATHLENGTH will also reflect the change */
153 #define PATHLENGTH (NUMPARMS*sizeof(long))
162 char pathname
[PATHLENGTH
+ 1];
165 #define MAX_THREADS 512
166 struct th_info th_state
[MAX_THREADS
];
170 #define TRACE_DATA_NEWTHREAD 0x07000004
171 #define TRACE_STRING_NEWTHREAD 0x07010004
172 #define TRACE_STRING_EXEC 0x07010008
174 #define INTERRUPT 0x01050000
175 #define DECR_TRAP 0x01090000
176 #define DECR_SET 0x01090004
177 #define MACH_vmfault 0x01300000
178 #define MACH_sched 0x01400000
179 #define MACH_stkhandoff 0x01400008
180 #define VFS_LOOKUP 0x03010090
181 #define BSC_exit 0x040C0004
182 #define IES_action 0x050b0018
183 #define IES_filter 0x050b001c
184 #define TES_action 0x050c0010
185 #define CQ_action 0x050d0018
188 #define DBG_FUNC_ALL (DBG_FUNC_START | DBG_FUNC_END)
189 #define DBG_FUNC_MASK 0xfffffffc
191 #define DBG_ZERO_FILL_FAULT 1
192 #define DBG_PAGEIN_FAULT 2
193 #define DBG_COW_FAULT 3
194 #define DBG_CACHE_HIT_FAULT 4
196 char *fault_name
[5] = {
204 char *pc_to_string();
205 static kern_return_t
set_time_constraint_policy(void);
206 static kern_return_t
set_standard_policy(void);
208 int decrementer_val
= 0; /* Value used to reset decrementer */
209 int set_remove_flag
= 1; /* By default, remove trace buffer */
211 kd_buf
**last_decrementer_kd
; /* last DECR_TRAP per cpu */
212 #define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
226 This flag is turned off when calling
227 quit() due to a set_remove() failure.
246 mib
[1] = KERN_KDEBUG
;
247 mib
[2] = KERN_KDENABLE
; /* protocol */
250 mib
[5] = 0; /* no flags */
252 if (sysctl(mib
, 4, NULL
, &needed
, NULL
, 0) < 0)
253 quit("trace facility failure, KERN_KDENABLE\n");
257 set_numbufs(int nbufs
)
260 mib
[1] = KERN_KDEBUG
;
261 mib
[2] = KERN_KDSETBUF
;
264 mib
[5] = 0; /* no flags */
265 if (sysctl(mib
, 4, NULL
, &needed
, NULL
, 0) < 0)
266 quit("trace facility failure, KERN_KDSETBUF\n");
269 mib
[1] = KERN_KDEBUG
;
270 mib
[2] = KERN_KDSETUP
;
273 mib
[5] = 0; /* no flags */
274 if (sysctl(mib
, 3, NULL
, &needed
, NULL
, 0) < 0)
275 quit("trace facility failure, KERN_KDSETUP\n");
280 set_pidexclude(int pid
, int on_off
)
284 kr
.type
= KDBG_TYPENONE
;
287 needed
= sizeof(kd_regtype
);
289 mib
[1] = KERN_KDEBUG
;
290 mib
[2] = KERN_KDPIDEX
;
295 sysctl(mib
, 3, &kr
, &needed
, NULL
, 0);
304 kr
.type
= KDBG_TYPENONE
;
306 needed
= sizeof(kd_regtype
);
308 mib
[1] = KERN_KDEBUG
;
309 mib
[2] = KERN_KDSETRTCDEC
; /* protocol */
310 mib
[3] = 0; /* wildcard address family */
312 mib
[5] = 0; /* no flags */
316 if ((ret
=sysctl(mib
, 3, &kr
, &needed
, NULL
, 0)) < 0)
319 quit("trace facility failure, KERN_KDSETRTCDEC\n");
325 get_bufinfo(kbufinfo_t
*val
)
327 needed
= sizeof (*val
);
329 mib
[1] = KERN_KDEBUG
;
330 mib
[2] = KERN_KDGETBUF
;
333 mib
[5] = 0; /* no flags */
335 if (sysctl(mib
, 3, val
, &needed
, 0, 0) < 0)
336 quit("trace facility failure, KERN_KDGETBUF\n");
346 mib
[1] = KERN_KDEBUG
;
347 mib
[2] = KERN_KDREMOVE
; /* protocol */
350 mib
[5] = 0; /* no flags */
354 if (sysctl(mib
, 3, NULL
, &needed
, NULL
, 0) < 0)
358 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
360 quit("trace facility failure, KERN_KDREMOVE\n");
367 /* When we aren't logging, only collect the DECR_TRAP trace points */
369 kr
.type
= KDBG_VALCHECK
;
370 kr
.value1
= DECR_TRAP
;
374 needed
= sizeof(kd_regtype
);
376 mib
[1] = KERN_KDEBUG
;
377 mib
[2] = KERN_KDSETREG
;
380 mib
[5] = 0; /* no flags */
381 if (sysctl(mib
, 3, &kr
, &needed
, NULL
, 0) < 0)
382 quit("trace facility failure, KERN_KDSETREG\n");
385 mib
[1] = KERN_KDEBUG
;
386 mib
[2] = KERN_KDSETUP
;
389 mib
[5] = 0; /* no flags */
391 if (sysctl(mib
, 3, NULL
, &needed
, NULL
, 0) < 0)
392 quit("trace facility failure, KERN_KDSETUP\n");
399 kr
.type
= KDBG_RANGETYPE
;
402 needed
= sizeof(kd_regtype
);
404 mib
[1] = KERN_KDEBUG
;
405 mib
[2] = KERN_KDSETREG
;
408 mib
[5] = 0; /* no flags */
410 if (sysctl(mib
, 3, &kr
, &needed
, NULL
, 0) < 0)
411 quit("trace facility failure, KERN_KDSETREG\n");
414 mib
[1] = KERN_KDEBUG
;
415 mib
[2] = KERN_KDSETUP
;
418 mib
[5] = 0; /* no flags */
420 if (sysctl(mib
, 3, NULL
, &needed
, NULL
, 0) < 0)
421 quit("trace facility failure, KERN_KDSETUP\n");
432 void screen_update();
435 set_pidexclude(getpid(), 0);
436 screen_update(log_fp
);
444 void leave() /* exit under normal conditions -- signal handler */
447 set_pidexclude(getpid(), 0);
456 screen_update(FILE *fp
)
463 unsigned int average_s_latency
;
464 unsigned int average_i_latency
;
467 if (fp
== (FILE *)0) {
471 fprintf(fp
,"\n\n===================================================================================================\n");
473 * Display the current time.
474 * "ctime" always returns a string that looks like this:
476 * Sun Sep 16 01:03:52 1973
477 * 012345678901234567890123
480 * We want indices 11 thru 18 (length 8).
482 elapsed_secs
= curr_time
- start_time
;
483 elapsed_hours
= elapsed_secs
/ 3600;
484 elapsed_secs
-= elapsed_hours
* 3600;
485 elapsed_mins
= elapsed_secs
/ 60;
486 elapsed_secs
-= elapsed_mins
* 60;
488 sprintf(tbuf
, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time
)[0]),
489 elapsed_hours
, elapsed_mins
, elapsed_secs
);
491 fprintf(fp
, "%s", tbuf
);
497 sprintf(tbuf
, " SCHEDULER INTERRUPTS\n");
500 fprintf(fp
, "%s", tbuf
);
504 sprintf(tbuf
, "---------------------------------------------\n");
507 fprintf(fp
, "%s", tbuf
);
513 sprintf(tbuf
, "total_samples %10d %10d\n\n", s_total_samples
, i_total_samples
);
516 fprintf(fp
, "%s", tbuf
);
522 for (itotal
= 0, stotal
= 0, i
= 0; i
< 10; i
++) {
523 sprintf(tbuf
, "delays < %3d usecs %10d %10d\n", (i
+ 1) * 10, s_usec_10_bins
[i
], i_usec_10_bins
[i
]);
526 fprintf(fp
, "%s", tbuf
);
530 stotal
+= s_usec_10_bins
[i
];
531 itotal
+= i_usec_10_bins
[i
];
533 sprintf(tbuf
, "total < 100 usecs %10d %10d\n\n", stotal
, itotal
);
536 fprintf(fp
, "%s", tbuf
);
542 for (itotal
= 0, stotal
= 0, i
= 1; i
< 10; i
++) {
544 sprintf(tbuf
, "delays < %3d usecs %10d %10d\n", (i
+ 1) * 100, s_usec_100_bins
[i
], i_usec_100_bins
[i
]);
546 sprintf(tbuf
, "delays < 1 msec %10d %10d\n", s_usec_100_bins
[i
], i_usec_100_bins
[i
]);
549 fprintf(fp
, "%s", tbuf
);
553 stotal
+= s_usec_100_bins
[i
];
554 itotal
+= i_usec_100_bins
[i
];
556 sprintf(tbuf
, "total < 1 msec %10d %10d\n\n", stotal
, itotal
);
559 fprintf(fp
, "%s", tbuf
);
565 for (itotal
= 0, stotal
= 0, i
= 1; i
< 10; i
++) {
566 sprintf(tbuf
, "delays < %3d msecs %10d %10d\n", (i
+ 1), s_msec_1_bins
[i
], i_msec_1_bins
[i
]);
569 fprintf(fp
, "%s", tbuf
);
573 stotal
+= s_msec_1_bins
[i
];
574 itotal
+= i_msec_1_bins
[i
];
576 sprintf(tbuf
, "total < 10 msecs %10d %10d\n\n", stotal
, itotal
);
579 fprintf(fp
, "%s", tbuf
);
586 for (itotal
= 0, stotal
= 0, i
= 1; i
< 5; i
++) {
587 sprintf(tbuf
, "delays < %3d msecs %10d %10d\n", (i
+ 1)*10, s_msec_10_bins
[i
], i_msec_10_bins
[i
]);
590 fprintf(fp
, "%s", tbuf
);
594 stotal
+= s_msec_10_bins
[i
];
595 itotal
+= i_msec_10_bins
[i
];
597 sprintf(tbuf
, "total < 50 msecs %10d %10d\n\n", stotal
, itotal
);
600 fprintf(fp
, "%s", tbuf
);
604 sprintf(tbuf
, "delays > 50 msecs %10d %10d\n", s_too_slow
, i_too_slow
);
607 fprintf(fp
, "%s", tbuf
);
613 sprintf(tbuf
, "\nminimum latency(usecs) %7d %7d\n", s_min_latency
, i_min_latency
);
616 fprintf(fp
, "%s", tbuf
);
620 sprintf(tbuf
, "maximum latency(usecs) %7d %7d\n", s_max_latency
, i_max_latency
);
623 fprintf(fp
, "%s", tbuf
);
628 average_s_latency
= (unsigned int)(s_total_latency
/s_total_samples
);
630 average_s_latency
= 0;
633 average_i_latency
= (unsigned int)(i_total_latency
/i_total_samples
);
635 average_i_latency
= 0;
637 sprintf(tbuf
, "average latency(usecs) %7d %7d\n", average_s_latency
, average_i_latency
);
640 fprintf(fp
, "%s", tbuf
);
644 sprintf(tbuf
, "exceeded threshold %7d %7d\n", s_exceeded_threshold
, i_exceeded_threshold
);
647 fprintf(fp
, "%s", tbuf
);
661 fprintf(stderr
, "Usage: latency [-rt] [-c codefile] [-l logfile] [-st threshold]\n");
662 fprintf(stderr
, " [-it threshold] [-s sleep_in_usecs]\n");
663 fprintf(stderr
, " [-d decrementer_in_usecs] [-n kernel]\n\n");
665 fprintf(stderr
, " -rt Set realtime scheduling policy. Default is timeshare.\n");
666 fprintf(stderr
, " -c specify name of codes file\n");
667 fprintf(stderr
, " -l specify name of file to log trace entries to when threshold is exceeded\n");
668 fprintf(stderr
, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
669 fprintf(stderr
, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
670 fprintf(stderr
, " -s set sleep time in microseconds\n");
671 fprintf(stderr
, " -d set decrementer in microseconds.\n");
672 fprintf(stderr
, " -n specify kernel, default is /mach_kernel\n");
674 fprintf(stderr
, "\nlatency must be run as root\n\n");
685 mach_timespec_t remain
;
686 uint64_t start
, stop
;
689 uint64_t adeadline
, adelay
;
692 double nanosecs_to_sleep
;
693 int loop_cnt
, sample_sc_now
;
694 int decrementer_usec
= 0;
698 host_name_port_t host
;
701 void init_code_file();
705 my_policy
= THREAD_STANDARD_POLICY
;
706 policy_name
= "TIMESHARE";
709 if (strcmp(argv
[1], "-rt") == 0) {
710 my_policy
= THREAD_TIME_CONSTRAINT_POLICY
; /* the real time band */
711 policy_name
= "REALTIME";
713 } else if (strcmp(argv
[1], "-st") == 0) {
718 s_thresh_hold
= atoi(argv
[1]);
722 } else if (strcmp(argv
[1], "-it") == 0) {
727 i_thresh_hold
= atoi(argv
[1]);
730 } else if (strcmp(argv
[1], "-c") == 0) {
738 } else if (strcmp(argv
[1], "-l") == 0) {
743 open_logfile(argv
[1]);
747 } else if (strcmp(argv
[1], "-s") == 0) {
752 num_of_usecs_to_sleep
= atoi(argv
[1]);
755 } else if (strcmp(argv
[1], "-d") == 0) {
760 decrementer_usec
= atoi(argv
[1]);
763 } else if (strcmp(argv
[1], "-n") == 0) {
768 kernelpath
= argv
[1];
778 if ( geteuid() != 0 ) {
779 printf("'latency' must be run as root...\n");
783 if (kernelpath
== (char *) 0)
784 kernelpath
= "/mach_kernel";
786 if (code_file
== (char *) 0)
787 code_file
= "/usr/share/misc/trace.codes";
791 sample_sc_now
= 25000 / num_of_usecs_to_sleep
;
794 decrementer_val
= decrementer_usec
* divisor
;
796 /* get the cpu count for the DECR_TRAP array */
797 host
= mach_host_self();
798 size
= sizeof(hi
)/sizeof(int);
799 ret
= host_info(host
, HOST_BASIC_INFO
, (host_info_t
)&hi
, &size
);
800 if (ret
!= KERN_SUCCESS
) {
801 mach_error(argv
[0], ret
);
805 if ((last_decrementer_kd
= (kd_buf
**)malloc(hi
.avail_cpus
* sizeof(kd_buf
*))) == (kd_buf
**)0)
806 quit("can't allocate memory for decrementer tracing info\n");
808 nanosecs_to_sleep
= (double)(num_of_usecs_to_sleep
* 1000);
809 fdelay
= nanosecs_to_sleep
* (divisor
/1000);
810 adelay
= (uint64_t)fdelay
;
815 When the decrementer isn't set in the options,
816 decval will be zero and this call will reset
817 the system default ...
819 set_rtcdec(decrementer_val
);
821 if (initscr() == (WINDOW
*) 0)
823 printf("Unrecognized TERM type, try vt100\n");
829 signal(SIGWINCH
, sigwinch
);
830 signal(SIGINT
, sigintr
);
831 signal(SIGQUIT
, leave
);
832 signal(SIGTERM
, leave
);
833 signal(SIGHUP
, leave
);
836 if ((my_buffer
= malloc(SAMPLE_SIZE
* sizeof(kd_buf
))) == (char *)0)
837 quit("can't allocate memory for tracing info\n");
839 set_numbufs(SAMPLE_SIZE
);
844 set_init_nologging();
845 set_pidexclude(getpid(), 1);
851 start_time
= time((long *)0);
852 refresh_time
= start_time
;
854 if (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
)
856 /* the realtime band */
857 if(set_time_constraint_policy() != KERN_SUCCESS
)
858 quit("Failed to set realtime policy.\n");
862 curr_time
= time((long *)0);
864 if (curr_time
>= refresh_time
) {
865 if (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
)
867 /* set standard timeshare policy during screen update */
868 if(set_standard_policy() != KERN_SUCCESS
)
869 quit("Failed to set standard policy.\n");
871 screen_update((FILE *)0);
872 if (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
)
874 /* set back to realtime band */
875 if(set_time_constraint_policy() != KERN_SUCCESS
)
876 quit("Failed to set time_constraint policy.\n");
878 refresh_time
= curr_time
+ 1;
881 timestamp1
= mach_absolute_time();
882 adeadline
= timestamp1
+ adelay
;
883 mk_wait_until(adeadline
);
884 timestamp2
= mach_absolute_time();
890 elapsed_usecs
= (int)(((double)(stop
- start
)) / divisor
);
892 if ((elapsed_usecs
-= num_of_usecs_to_sleep
) <= 0)
895 if (elapsed_usecs
< 100)
896 s_usec_10_bins
[elapsed_usecs
/10]++;
897 if (elapsed_usecs
< 1000)
898 s_usec_100_bins
[elapsed_usecs
/100]++;
899 else if (elapsed_usecs
< 10000)
900 s_msec_1_bins
[elapsed_usecs
/1000]++;
901 else if (elapsed_usecs
< 50000)
902 s_msec_10_bins
[elapsed_usecs
/10000]++;
906 if (elapsed_usecs
> s_max_latency
)
907 s_max_latency
= elapsed_usecs
;
908 if (elapsed_usecs
< s_min_latency
|| s_total_samples
== 0)
909 s_min_latency
= elapsed_usecs
;
910 s_total_latency
+= elapsed_usecs
;
913 if (s_thresh_hold
&& elapsed_usecs
> s_thresh_hold
)
914 s_exceeded_threshold
++;
917 if (log_fp
&& s_thresh_hold
&& elapsed_usecs
> s_thresh_hold
)
918 sample_sc(start
, stop
);
920 if (loop_cnt
>= sample_sc_now
) {
921 sample_sc((long long)0, (long long)0);
927 No need to check for initscr error return.
928 We won't get here if it fails on the first call.
942 mach_timebase_info_data_t info
;
944 (void) mach_timebase_info (&info
);
946 divisor
= ( (double)info
.denom
/ (double)info
.numer
) * 1000;
950 /* This is the realtime band */
952 set_time_constraint_policy()
954 kern_return_t result
;
955 thread_time_constraint_policy_data_t info
;
956 mach_msg_type_number_t count
;
957 boolean_t get_default
;
960 count
= THREAD_TIME_CONSTRAINT_POLICY_COUNT
;
961 result
= thread_policy_get(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY
,
962 (thread_policy_t
)&info
, &count
, &get_default
);
963 if (result
!= KERN_SUCCESS
)
966 result
= thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY
,
967 (thread_policy_t
)&info
, THREAD_TIME_CONSTRAINT_POLICY_COUNT
);
972 /* This is the timeshare mode */
974 set_standard_policy()
976 kern_return_t result
;
977 thread_standard_policy_data_t info
;
978 mach_msg_type_number_t count
;
979 boolean_t get_default
;
982 count
= THREAD_STANDARD_POLICY_COUNT
;
983 result
= thread_policy_get(mach_thread_self(), THREAD_STANDARD_POLICY
,
984 (thread_policy_t
)&info
, &count
, &get_default
);
985 if (result
!= KERN_SUCCESS
)
988 result
= thread_policy_set(mach_thread_self(), THREAD_STANDARD_POLICY
,
989 (thread_policy_t
)&info
, THREAD_STANDARD_POLICY_COUNT
);
995 void read_command_map()
1004 total_threads
= bufinfo
.nkdthreads
;
1005 size
= bufinfo
.nkdthreads
* sizeof(kd_threadmap
);
1008 if (mapptr
= (kd_threadmap
*) malloc(size
))
1009 bzero (mapptr
, size
);
1012 printf("Thread map is not initialized -- this is not fatal\n");
1017 /* Now read the threadmap */
1019 mib
[1] = KERN_KDEBUG
;
1020 mib
[2] = KERN_KDTHRMAP
;
1023 mib
[5] = 0; /* no flags */
1024 if (sysctl(mib
, 3, mapptr
, &size
, NULL
, 0) < 0)
1026 /* This is not fatal -- just means I cant map command strings */
1028 printf("Can't read the thread map -- this is not fatal\n");
1037 void create_map_entry(int thread
, char *command
)
1045 for (i
= 0, map
= 0; !map
&& i
< total_threads
; i
++)
1047 if (mapptr
[i
].thread
== thread
)
1048 map
= &mapptr
[i
]; /* Reuse this entry, the thread has been reassigned */
1051 if (!map
) /* look for invalid entries that I can reuse*/
1053 for (i
= 0, map
= 0; !map
&& i
< total_threads
; i
++)
1055 if (mapptr
[i
].valid
== 0 )
1056 map
= &mapptr
[i
]; /* Reuse this invalid entry */
1062 /* If reach here, then this is a new thread and
1063 * there are no invalid entries to reuse
1064 * Double the size of the thread map table.
1067 n
= total_threads
* 2;
1068 mapptr
= (kd_threadmap
*) realloc(mapptr
, n
* sizeof(kd_threadmap
));
1069 bzero(&mapptr
[total_threads
], total_threads
*sizeof(kd_threadmap
));
1070 map
= &mapptr
[total_threads
];
1074 fprintf(log_fp
, "MAP: increasing thread map to %d entries\n", total_threads
);
1079 fprintf(log_fp
, "MAP: adding thread %x with name %s\n", thread
, command
);
1082 map
->thread
= thread
;
1084 The trace entry that returns the command name will hold
1085 at most, MAXCOMLEN chars, and in that case, is not
1086 guaranteed to be null terminated.
1088 (void)strncpy (map
->command
, command
, MAXCOMLEN
);
1089 map
->command
[MAXCOMLEN
] = '\0';
1093 kd_threadmap
*find_thread_map(int thread
)
1099 return((kd_threadmap
*)0);
1101 for (i
= 0; i
< total_threads
; i
++)
1104 if (map
->valid
&& (map
->thread
== thread
))
1109 return ((kd_threadmap
*)0);
1113 kill_thread_map(int thread
)
1117 if (map
= find_thread_map(thread
)) {
1121 fprintf(log_fp
, "MAP: deleting thread %x with name %s\n", thread
, map
->command
);
1125 map
->command
[0] = '\0';
1130 struct th_info
*find_thread(int thread
, int type1
, int type2
) {
1133 for (ti
= th_state
; ti
< &th_state
[cur_max
]; ti
++) {
1134 if (ti
->thread
== thread
) {
1137 if (type1
== ti
->type
)
1139 if (type2
== ti
->type
)
1143 return ((struct th_info
*)0);
1147 char *find_code(type
)
1151 for (i
= 0; i
< num_of_codes
; i
++) {
1152 if (codes_tab
[i
].type
== type
)
1153 return(codes_tab
[i
].name
);
1159 void sample_sc(uint64_t start
, uint64_t stop
)
1161 kd_buf
*kd
, *last_mach_sched
, *start_kd
, *end_of_sample
;
1164 int first_entry
= 1;
1166 double timestamp
, last_timestamp
, delta
, start_bias
;
1167 void read_command_map();
1169 if (log_fp
&& (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
))
1171 /* set standard timeshare policy when logging */
1172 if(set_standard_policy() != KERN_SUCCESS
)
1173 quit("Failed to set standard policy.\n");
1176 /* Get kernel buffer information */
1177 get_bufinfo(&bufinfo
);
1183 needed
= bufinfo
.nkdbufs
* sizeof(kd_buf
);
1185 mib
[1] = KERN_KDEBUG
;
1186 mib
[2] = KERN_KDREADTR
;
1189 mib
[5] = 0; /* no flags */
1191 if (sysctl(mib
, 3, my_buffer
, &needed
, NULL
, 0) < 0)
1192 quit("trace facility failure, KERN_KDREADTR\n");
1196 if (bufinfo
.flags
& KDBG_WRAPPED
) {
1197 for (i
= 0; i
< cur_max
; i
++) {
1198 th_state
[i
].thread
= 0;
1199 th_state
[i
].type
= -1;
1200 th_state
[i
].pathptr
= (long *)0;
1201 th_state
[i
].pathname
[0] = 0;
1212 latency
= (double)(stop
- start
) / divisor
;
1213 latency
-= (double)num_of_usecs_to_sleep
;
1215 fprintf(log_fp
, "\n\n%-19.19s scheduling latency = %.1fus num_of_traces = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1216 &(ctime(&curr_time
)[0]), latency
, count
);
1219 end_of_sample
= &((kd_buf
*)my_buffer
)[count
];
1221 /* Always reinitialize the DECR_TRAP array */
1222 for (i
=0; i
< hi
.avail_cpus
; i
++)
1223 last_decrementer_kd
[i
] = (kd_buf
*)my_buffer
;
1225 last_mach_sched
= (kd_buf
*)0;
1227 for (kd
= (kd_buf
*)my_buffer
; kd
< end_of_sample
; kd
++) {
1228 int debugid
, thread
, cpunum
;
1229 int type
, clen
, mode
;
1237 char sched_info
[64];
1239 kd_threadmap
*find_thread_map();
1240 double handle_decrementer();
1241 kd_buf
*log_decrementer();
1242 int check_for_thread_update();
1243 void enter_syscall();
1244 void exit_syscall();
1247 thread
= kd
->arg5
& KDBG_THREAD_MASK
;
1248 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1249 debugid
= kd
->debugid
;
1250 type
= kd
->debugid
& DBG_FUNC_MASK
;
1252 if (check_for_thread_update(thread
, type
, kd
))
1255 if (type
== DECR_TRAP
)
1256 i_latency
= handle_decrementer(kd
);
1258 now
= kd
->timestamp
;
1260 timestamp
= ((double)now
) / divisor
;
1262 if (now
< start
|| now
> stop
) {
1263 if (debugid
& DBG_FUNC_START
)
1264 enter_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 0);
1265 else if (debugid
& DBG_FUNC_END
)
1266 exit_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 0);
1267 else if (type
== DECR_TRAP
) {
1269 if (log_fp
&& i_thresh_hold
&& (int)i_latency
> i_thresh_hold
) {
1270 start_kd
= last_decrementer_kd
[cpunum
];
1271 kd
= log_decrementer(start_kd
, kd
, end_of_sample
, i_latency
);
1272 if (kd
>= end_of_sample
)
1275 if ((kd
->debugid
& DBG_FUNC_MASK
) == DECR_TRAP
)
1277 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1278 last_decrementer_kd
[cpunum
] = kd
;
1281 last_decrementer_kd
[cpunum
] = cur_kd
;
1290 latency
= (double)(stop
- start
) / divisor
;
1291 latency
-= (double)num_of_usecs_to_sleep
;
1294 sprintf(buf2
, "default");
1296 sprintf(buf2
, "%d", my_pri
);
1297 sprintf(buf1
, "%-19.19s scheduling latency = %.1fus sleep_request = %dus policy = %s priority = %s",
1298 &(ctime(&curr_time
)[0]), latency
, num_of_usecs_to_sleep
, policy_name
, buf2
);
1299 clen
= strlen(buf1
);
1300 memset(buf2
, '-', clen
);
1304 fprintf(log_fp
, "\n\n%s\n", buf2
);
1305 fprintf(log_fp
, "%s\n\n", buf1
);
1306 fprintf(log_fp
, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1308 start_bias
= ((double)start
) / divisor
;
1309 last_timestamp
= timestamp
;
1312 delta
= timestamp
- last_timestamp
;
1314 if (map
= find_thread_map(thread
))
1315 strcpy(command
, map
->command
);
1323 fprintf(log_fp
, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1324 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 59, 1) , thread
, cpunum
, command
);
1326 last_timestamp
= timestamp
;
1331 fprintf(log_fp
, "%9.1f %8.1f\t\tTES_action @ %-58.58s %-8x %d %s\n",
1332 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 58, 1) , thread
, cpunum
, command
);
1335 last_timestamp
= timestamp
;
1340 fprintf(log_fp
, "%9.1f %8.1f\t\tIES_action @ %-58.58s %-8x %d %s\n",
1341 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 58, 1) , thread
, cpunum
, command
);
1344 last_timestamp
= timestamp
;
1349 fprintf(log_fp
, "%9.1f %8.1f\t\tIES_filter @ %-58.58s %-8x %d %s\n",
1350 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 58, 1) , thread
, cpunum
, command
);
1353 last_timestamp
= timestamp
;
1357 last_decrementer_kd
[cpunum
] = kd
;
1359 if (i_thresh_hold
&& (int)i_latency
> i_thresh_hold
)
1366 if (ti
= find_thread((kd
->arg5
& KDBG_THREAD_MASK
), 0, 0)) {
1367 if (ti
->type
== -1 && strcmp(command
, "kernel_task"))
1372 fprintf(log_fp
, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1373 timestamp
- start_bias
, delta
, i_latency
, p
, pc_to_string(kd
->arg2
, 59, mode
) , thread
, cpunum
, command
);
1376 last_timestamp
= timestamp
;
1381 fprintf(log_fp
, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1382 timestamp
- start_bias
, delta
, (double)kd
->arg1
/divisor
, "DECR_SET", thread
, cpunum
, command
);
1385 last_timestamp
= timestamp
;
1389 case MACH_stkhandoff
:
1390 last_mach_sched
= kd
;
1392 if (map
= find_thread_map(kd
->arg2
))
1393 strcpy(command1
, map
->command
);
1395 sprintf(command1
, "%-8x", kd
->arg2
);
1397 if (ti
= find_thread(kd
->arg2
, 0, 0)) {
1398 if (ti
->type
== -1 && strcmp(command1
, "kernel_task"))
1404 memset(sched_info
, ' ', sizeof(sched_info
));
1406 sprintf(sched_info
, "%14.14s", command
);
1407 clen
= strlen(sched_info
);
1408 sched_info
[clen
] = ' ';
1410 sprintf(&sched_info
[14], " @ pri %3d --> %14.14s", kd
->arg3
, command1
);
1411 clen
= strlen(sched_info
);
1412 sched_info
[clen
] = ' ';
1414 sprintf(&sched_info
[45], " @ pri %3d%s", kd
->arg4
, p
);
1417 fprintf(log_fp
, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1418 timestamp
- start_bias
, delta
, "MACH_SCHED", sched_info
, thread
, cpunum
);
1421 last_timestamp
= timestamp
;
1425 if ((ti
= find_thread(thread
, 0, 0)) == (struct th_info
*)0) {
1426 if (cur_max
>= MAX_THREADS
)
1428 ti
= &th_state
[cur_max
++];
1430 ti
->thread
= thread
;
1432 ti
->pathptr
= (long *)0;
1433 ti
->child_thread
= 0;
1435 while ( (kd
< end_of_sample
) && ((kd
->debugid
& DBG_FUNC_MASK
) == VFS_LOOKUP
))
1438 ti
->arg1
= kd
->arg1
;
1439 memset(&ti
->pathname
[0], 0, (PATHLENGTH
+ 1));
1440 sargptr
= (long *)&ti
->pathname
[0];
1442 *sargptr
++ = kd
->arg2
;
1443 *sargptr
++ = kd
->arg3
;
1444 *sargptr
++ = kd
->arg4
;
1445 ti
->pathptr
= sargptr
;
1448 sargptr
= ti
->pathptr
;
1451 We don't want to overrun our pathname buffer if the
1452 kernel sends us more VFS_LOOKUP entries than we can
1456 if ((long *)sargptr
>= (long *)&ti
->pathname
[PATHLENGTH
])
1463 We need to detect consecutive vfslookup entries.
1464 So, if we get here and find a START entry,
1465 fake the pathptr so we can bypass all further
1469 if (kd
->debugid
& DBG_FUNC_START
)
1471 (long *)ti
->pathptr
= (long *)&ti
->pathname
[PATHLENGTH
];
1475 *sargptr
++ = kd
->arg1
;
1476 *sargptr
++ = kd
->arg2
;
1477 *sargptr
++ = kd
->arg3
;
1478 *sargptr
++ = kd
->arg4
;
1479 ti
->pathptr
= sargptr
;
1487 /* print the tail end of the pathname */
1488 len
= strlen(ti
->pathname
);
1495 fprintf(log_fp
, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1496 timestamp
- start_bias
, delta
, "VFS_LOOKUP",
1497 &ti
->pathname
[len
], ti
->arg1
, thread
, cpunum
, command
);
1500 last_timestamp
= timestamp
;
1504 if (debugid
& DBG_FUNC_START
)
1505 enter_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 1);
1506 else if (debugid
& DBG_FUNC_END
)
1507 exit_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 1);
1509 print_entry(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
);
1511 last_timestamp
= timestamp
;
1515 if (last_mach_sched
&& log_fp
)
1516 fprintf(log_fp
, "\nblocked by %s @ priority %d\n", command
, last_mach_sched
->arg3
);
1518 if (first_entry
== 0 && log_fp
)
1519 fprintf(log_fp
, "\n start = %qd stop = %qd count = %d now = %qd\n", start
, stop
, count
, now
);
1524 if (log_fp
&& (my_policy
== THREAD_TIME_CONSTRAINT_POLICY
))
1526 /* set back to realtime band */
1527 if(set_time_constraint_policy() != KERN_SUCCESS
)
1528 quit("Failed to set time_constraint policy.\n");
1533 enter_syscall(FILE *fp
, kd_buf
*kd
, int thread
, int type
, char *command
, double timestamp
, double delta
, double bias
, int print_info
)
1540 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1542 if (print_info
&& fp
) {
1543 if (p
= find_code(type
)) {
1544 if (type
== INTERRUPT
) {
1547 if (ti
= find_thread((kd
->arg5
& KDBG_THREAD_MASK
), 0, 0)) {
1548 if (ti
->type
== -1 && strcmp(command
, "kernel_task"))
1552 fprintf(fp
, "%9.1f %8.1f\t\tINTERRUPT @ %-59.59s %-8x %d %s\n",
1553 timestamp
- bias
, delta
, pc_to_string(kd
->arg2
, 59, mode
), thread
, cpunum
, command
);
1554 } else if (type
== MACH_vmfault
) {
1555 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %-8x %d %s\n",
1556 timestamp
- bias
, delta
, p
, thread
, cpunum
, command
);
1558 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1559 timestamp
- bias
, delta
, p
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1560 thread
, cpunum
, command
);
1563 fprintf(fp
, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1564 timestamp
- bias
, delta
, type
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1565 thread
, cpunum
, command
);
1568 if ((ti
= find_thread(thread
, -1, type
)) == (struct th_info
*)0) {
1569 if (cur_max
>= MAX_THREADS
) {
1570 static int do_this_once
= 1;
1573 for (i
= 0; i
< cur_max
; i
++) {
1576 fprintf(fp
, "thread = %x, type = %x\n",
1577 th_state
[i
].thread
, th_state
[i
].type
);
1584 ti
= &th_state
[cur_max
++];
1586 ti
->thread
= thread
;
1587 ti
->child_thread
= 0;
1589 if (type
!= BSC_exit
)
1593 ti
->stime
= timestamp
;
1594 ti
->pathptr
= (long *)0;
1597 if (print_info
&& fp
)
1598 fprintf(fp
, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max
, ti
, ti
->type
, ti
->thread
);
1604 exit_syscall(FILE *fp
, kd_buf
*kd
, int thread
, int type
, char *command
, double timestamp
, double delta
, double bias
, int print_info
)
1610 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1611 ti
= find_thread(thread
, type
, type
);
1613 if (print_info
&& fp
)
1614 fprintf(fp
, "cur_max = %d, ti = %x, type = %x, thread = %x\n", cur_max
, ti
, type
, thread
);
1616 if (print_info
&& fp
) {
1618 fprintf(fp
, "%9.1f %8.1f(%.1f) \t", timestamp
- bias
, delta
, timestamp
- ti
->stime
);
1620 fprintf(fp
, "%9.1f %8.1f() \t", timestamp
- bias
, delta
);
1622 if (p
= find_code(type
)) {
1623 if (type
== INTERRUPT
) {
1624 fprintf(fp
, "INTERRUPT %-8x %d %s\n", thread
, cpunum
, command
);
1625 } else if (type
== MACH_vmfault
&& kd
->arg2
<= DBG_CACHE_HIT_FAULT
) {
1626 fprintf(fp
, "%-28.28s %-8.8s %-8x %-8x %d %s\n",
1627 p
, fault_name
[kd
->arg2
], kd
->arg1
,
1628 thread
, cpunum
, command
);
1630 fprintf(fp
, "%-28.28s %-8x %-8x %-8x %d %s\n",
1631 p
, kd
->arg1
, kd
->arg2
,
1632 thread
, cpunum
, command
);
1635 fprintf(fp
, "%-8x %-8x %-8x %-8x %d %s\n",
1636 type
, kd
->arg1
, kd
->arg2
,
1637 thread
, cpunum
, command
);
1640 if (ti
== (struct th_info
*)0) {
1641 if ((ti
= find_thread(thread
, -1, -1)) == (struct th_info
*)0) {
1642 if (cur_max
>= MAX_THREADS
)
1644 ti
= &th_state
[cur_max
++];
1646 ti
->thread
= thread
;
1647 ti
->child_thread
= 0;
1648 ti
->pathptr
= (long *)0;
1655 print_entry(FILE *fp
, kd_buf
*kd
, int thread
, int type
, char *command
, double timestamp
, double delta
, double bias
)
1663 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1666 fprintf(fp
, "cur_max = %d, type = %x, thread = %x, cpunum = %d\n", cur_max
, type
, thread
, cpunum
);
1668 if (p
= find_code(type
)) {
1669 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %-8x %-8x %-8x %-8x %-8x %d %s\n",
1670 timestamp
- bias
, delta
, p
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1671 thread
, cpunum
, command
);
1673 fprintf(fp
, "%9.1f %8.1f\t\t%-8x %-8x %-8x %-8x %-8x %-8x %d %s\n",
1674 timestamp
- bias
, delta
, type
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1675 thread
, cpunum
, command
);
1680 check_for_thread_update(int thread
, int type
, kd_buf
*kd
)
1683 void create_map_entry();
1687 case TRACE_DATA_NEWTHREAD
:
1688 if ((ti
= find_thread(thread
, 0, 0)) == (struct th_info
*)0) {
1689 if (cur_max
>= MAX_THREADS
)
1691 ti
= &th_state
[cur_max
++];
1693 ti
->thread
= thread
;
1695 ti
->pathptr
= (long *)0;
1697 ti
->child_thread
= kd
->arg1
;
1700 case TRACE_STRING_NEWTHREAD
:
1701 if ((ti
= find_thread(thread
, 0, 0)) == (struct th_info
*)0)
1703 if (ti
->child_thread
== 0)
1705 create_map_entry(ti
->child_thread
, (char *)&kd
->arg1
);
1707 ti
->child_thread
= 0;
1710 case TRACE_STRING_EXEC
:
1711 create_map_entry(thread
, (char *)&kd
->arg1
);
1719 kd_buf
*log_decrementer(kd_buf
*kd_beg
, kd_buf
*kd_end
, kd_buf
*end_of_sample
, double i_latency
)
1721 kd_buf
*kd
, *kd_start
, *kd_stop
;
1722 int kd_count
; /* Limit the boundary of kd_start */
1723 double timestamp
, last_timestamp
, delta
, start_bias
;
1725 int debugid
, type
, clen
;
1733 char sched_info
[64];
1737 kd_threadmap
*find_thread_map();
1739 sprintf(buf1
, "%-19.19s interrupt latency = %.1fus", &(ctime(&curr_time
)[0]), i_latency
);
1740 clen
= strlen(buf1
);
1741 memset(buf2
, '-', clen
);
1743 fprintf(log_fp
, "\n\n%s\n", buf2
);
1744 fprintf(log_fp
, "%s\n\n", buf1
);
1746 fprintf(log_fp
, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
1748 thread
= kd_beg
->arg5
& KDBG_THREAD_MASK
;
1749 cpunum
= (kd_end
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1751 for (kd_count
= 0, kd_start
= kd_beg
- 1; (kd_start
>= (kd_buf
*)my_buffer
); kd_start
--, kd_count
++) {
1752 if (kd_count
== MAX_LOG_COUNT
)
1755 if((kd_start
->arg5
& KDBG_CPU_MASK
) != cpunum
)
1758 if ((kd_start
->debugid
& DBG_FUNC_MASK
) == DECR_TRAP
)
1761 if((kd_start
->arg5
& KDBG_THREAD_MASK
) != thread
)
1765 if (kd_start
< (kd_buf
*)my_buffer
)
1766 kd_start
= (kd_buf
*)my_buffer
;
1768 thread
= kd_end
->arg5
& KDBG_THREAD_MASK
;
1770 for (kd_stop
= kd_end
+ 1; kd_stop
< end_of_sample
; kd_stop
++) {
1772 if ((kd_stop
->debugid
& DBG_FUNC_MASK
) == DECR_TRAP
)
1775 if((kd_stop
->arg5
& KDBG_CPU_MASK
) != cpunum
)
1778 if((kd_stop
->arg5
& KDBG_THREAD_MASK
) != thread
)
1782 if (kd_stop
>= end_of_sample
)
1783 kd_stop
= end_of_sample
- 1;
1785 now
= kd_start
->timestamp
;
1786 timestamp
= ((double)now
) / divisor
;
1788 for (kd
= kd_start
; kd
<= kd_stop
; kd
++) {
1789 type
= kd
->debugid
& DBG_FUNC_MASK
;
1791 if (ti
= find_thread((kd
->arg5
& KDBG_THREAD_MASK
), type
, type
)) {
1792 if (ti
->stime
>= timestamp
)
1796 for (kd
= kd_start
; kd
<= kd_stop
; kd
++) {
1799 thread
= kd
->arg5
& KDBG_THREAD_MASK
;
1800 cpunum
= (kd
->arg5
& KDBG_CPU_MASK
) ? 1: 0;
1801 debugid
= kd
->debugid
;
1802 type
= kd
->debugid
& DBG_FUNC_MASK
;
1804 now
= kd
->timestamp
;
1806 timestamp
= ((double)now
) / divisor
;
1808 if (kd
== kd_start
) {
1809 start_bias
= timestamp
;
1810 last_timestamp
= timestamp
;
1812 delta
= timestamp
- last_timestamp
;
1814 if (map
= find_thread_map(thread
))
1815 strcpy(command
, map
->command
);
1823 fprintf(log_fp
, "%9.1f %8.1f\t\tCQ_action @ %-59.59s %-8x %d %s\n",
1824 timestamp
- start_bias
, delta
, pc_to_string(kd
->arg1
, 59, 1) , thread
, cpunum
, command
);
1826 last_timestamp
= timestamp
;
1830 if ((int)(kd
->arg1
) >= 0)
1833 i_latency
= (((double)(-1 - kd
->arg1
)) / divisor
);
1835 if (i_thresh_hold
&& (int)i_latency
> i_thresh_hold
)
1842 if (ti
= find_thread((kd
->arg5
& KDBG_THREAD_MASK
), 0, 0)) {
1843 if (ti
->type
== -1 && strcmp(command
, "kernel_task"))
1846 fprintf(log_fp
, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-59.59s %-8x %d %s\n",
1847 timestamp
- start_bias
, delta
, i_latency
, p
, pc_to_string(kd
->arg2
, 59, mode
) , thread
, cpunum
, command
);
1849 last_timestamp
= timestamp
;
1853 fprintf(log_fp
, "%9.1f %8.1f[%.1f] \t%-28.28s %-8x %d %s\n",
1854 timestamp
- start_bias
, delta
, (double)kd
->arg1
/divisor
,
1855 "DECR_SET", thread
, cpunum
, command
);
1857 last_timestamp
= timestamp
;
1861 case MACH_stkhandoff
:
1862 if (map
= find_thread_map(kd
->arg2
))
1863 strcpy(command1
, map
->command
);
1865 sprintf(command1
, "%-8x", kd
->arg2
);
1867 if (ti
= find_thread(kd
->arg2
, 0, 0)) {
1868 if (ti
->type
== -1 && strcmp(command1
, "kernel_task"))
1874 memset(sched_info
, ' ', sizeof(sched_info
));
1876 sprintf(sched_info
, "%14.14s", command
);
1877 clen
= strlen(sched_info
);
1878 sched_info
[clen
] = ' ';
1880 sprintf(&sched_info
[14], " @ pri %3d --> %14.14s", kd
->arg3
, command1
);
1881 clen
= strlen(sched_info
);
1882 sched_info
[clen
] = ' ';
1884 sprintf(&sched_info
[45], " @ pri %3d%s", kd
->arg4
, p
);
1886 fprintf(log_fp
, "%9.1f %8.1f\t\t%-10.10s %s %-8x %d\n",
1887 timestamp
- start_bias
, delta
, "MACH_SCHED", sched_info
, thread
, cpunum
);
1889 last_timestamp
= timestamp
;
1893 if ((ti
= find_thread(thread
, 0, 0)) == (struct th_info
*)0) {
1894 if (cur_max
>= MAX_THREADS
)
1896 ti
= &th_state
[cur_max
++];
1898 ti
->thread
= thread
;
1900 ti
->pathptr
= (long *)0;
1901 ti
->child_thread
= 0;
1904 while ( (kd
<= kd_stop
) && (kd
->debugid
& DBG_FUNC_MASK
) == VFS_LOOKUP
)
1907 ti
->arg1
= kd
->arg1
;
1908 memset(&ti
->pathname
[0], 0, (PATHLENGTH
+ 1));
1909 sargptr
= (long *)&ti
->pathname
[0];
1911 *sargptr
++ = kd
->arg2
;
1912 *sargptr
++ = kd
->arg3
;
1913 *sargptr
++ = kd
->arg4
;
1914 ti
->pathptr
= sargptr
;
1917 sargptr
= ti
->pathptr
;
1920 We don't want to overrun our pathname buffer if the
1921 kernel sends us more VFS_LOOKUP entries than we can
1925 if ((long *)sargptr
>= (long *)&ti
->pathname
[PATHLENGTH
])
1932 We need to detect consecutive vfslookup entries.
1933 So, if we get here and find a START entry,
1934 fake the pathptr so we can bypass all further
1938 if (kd
->debugid
& DBG_FUNC_START
)
1940 (long *)ti
->pathptr
= (long *)&ti
->pathname
[PATHLENGTH
];
1944 *sargptr
++ = kd
->arg1
;
1945 *sargptr
++ = kd
->arg2
;
1946 *sargptr
++ = kd
->arg3
;
1947 *sargptr
++ = kd
->arg4
;
1948 ti
->pathptr
= sargptr
;
1955 /* print the tail end of the pathname */
1956 len
= strlen(ti
->pathname
);
1962 fprintf(log_fp
, "%9.1f %8.1f\t\t%-14.14s %-42s %-8x %-8x %d %s\n",
1963 timestamp
- start_bias
, delta
, "VFS_LOOKUP",
1964 &ti
->pathname
[len
], ti
->arg1
, thread
, cpunum
, command
);
1966 last_timestamp
= timestamp
;
1970 if (debugid
& DBG_FUNC_START
)
1971 enter_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 1);
1972 else if (debugid
& DBG_FUNC_END
)
1973 exit_syscall(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
, 1);
1975 print_entry(log_fp
, kd
, thread
, type
, command
, timestamp
, delta
, start_bias
);
1977 last_timestamp
= timestamp
;
1985 double handle_decrementer(kd_buf
*kd
)
1990 if ((int)(kd
->arg1
) >= 0)
1993 latency
= (((double)(-1 - kd
->arg1
)) / divisor
);
1994 elapsed_usecs
= (int)latency
;
1996 if (elapsed_usecs
< 100)
1997 i_usec_10_bins
[elapsed_usecs
/10]++;
1998 if (elapsed_usecs
< 1000)
1999 i_usec_100_bins
[elapsed_usecs
/100]++;
2000 else if (elapsed_usecs
< 10000)
2001 i_msec_1_bins
[elapsed_usecs
/1000]++;
2002 else if (elapsed_usecs
< 50000)
2003 i_msec_10_bins
[elapsed_usecs
/10000]++;
2007 if (i_thresh_hold
&& elapsed_usecs
> i_thresh_hold
)
2008 i_exceeded_threshold
++;
2009 if (elapsed_usecs
> i_max_latency
)
2010 i_max_latency
= elapsed_usecs
;
2011 if (elapsed_usecs
< i_min_latency
|| i_total_samples
== 0)
2012 i_min_latency
= elapsed_usecs
;
2013 i_total_latency
+= elapsed_usecs
;
2020 void init_code_file()
2023 int i
, n
, cnt
, code
;
2026 if ((fp
= fopen(code_file
, "r")) == (FILE *)0) {
2028 fprintf(log_fp
, "open of %s failed\n", code_file
);
2031 n
= fscanf(fp
, "%d\n", &cnt
);
2035 fprintf(log_fp
, "bad format found in %s\n", code_file
);
2038 for (i
= 0; i
< MAX_ENTRIES
; i
++) {
2039 n
= fscanf(fp
, "%x%s\n", &code
, name
);
2044 strncpy(codes_tab
[i
].name
, name
, 32);
2045 codes_tab
[i
].type
= code
;
2057 FILE *fp
= (FILE *)0;
2058 char tmp_nm_file
[128];
2062 bzero(tmp_nm_file
, 128);
2063 bzero(tmpstr
, 1024);
2065 /* Build the temporary nm file path */
2066 sprintf(tmp_nm_file
, "/tmp/knm.out.%d", getpid());
2068 /* Build the nm command and create a tmp file with the output*/
2069 sprintf (tmpstr
, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2070 kernelpath
, tmp_nm_file
);
2073 /* Parse the output from the nm command */
2074 if ((fp
=fopen(tmp_nm_file
, "r")) == (FILE *)0)
2076 /* Hmmm, let's not treat this as fatal */
2077 fprintf(stderr
, "Failed to open nm symbol file [%s]\n", tmp_nm_file
);
2081 /* Count the number of symbols in the nm symbol table */
2083 while ( (inchr
= getc(fp
)) != -1)
2091 /* Malloc the space for symbol table */
2092 if (kern_sym_count
> 0)
2094 kern_sym_tbl
= (kern_sym_t
*)malloc(kern_sym_count
* sizeof (kern_sym_t
));
2097 /* Hmmm, lets not treat this as fatal */
2098 fprintf(stderr
, "Can't allocate memory for kernel symbol table\n");
2101 bzero(kern_sym_tbl
, (kern_sym_count
* sizeof(kern_sym_t
)));
2105 /* Hmmm, lets not treat this as fatal */
2106 fprintf(stderr
, "No kernel symbol table \n");
2109 for (i
=0; i
<kern_sym_count
; i
++)
2111 bzero(tmpstr
, 1024);
2112 if (fscanf(fp
, "%x %c %s", &kern_sym_tbl
[i
].k_sym_addr
, &inchr
, tmpstr
) != 3)
2116 len
= strlen(tmpstr
);
2117 kern_sym_tbl
[i
].k_sym_name
= malloc(len
+ 1);
2119 if (kern_sym_tbl
[i
].k_sym_name
== (char *)0)
2121 fprintf(stderr
, "Can't allocate memory for symbol name [%s]\n", tmpstr
);
2122 kern_sym_tbl
[i
].k_sym_name
= (char *)0;
2126 strcpy(kern_sym_tbl
[i
].k_sym_name
, tmpstr
);
2128 kern_sym_tbl
[i
].k_sym_len
= len
;
2132 if (i
!= kern_sym_count
)
2134 /* Hmmm, didn't build up entire table from nm */
2135 /* scrap the entire thing */
2137 free (kern_sym_tbl
);
2138 kern_sym_tbl
= (kern_sym_t
*)0;
2144 /* Remove the temporary nm file */
2145 unlink(tmp_nm_file
);
2148 /* Dump the kernel symbol table */
2149 for (i
=0; i
< kern_sym_count
; i
++)
2151 if (kern_sym_tbl
[i
].k_sym_name
)
2152 printf ("[%d] 0x%x %s\n", i
,
2153 kern_sym_tbl
[i
].k_sym_addr
, kern_sym_tbl
[i
].k_sym_name
);
2155 printf ("[%d] 0x%x %s\n", i
,
2156 kern_sym_tbl
[i
].k_sym_addr
, "No symbol name");
2162 pc_to_string(unsigned int pc
, int max_len
, int mode
)
2167 int binary_search();
2171 sprintf(pcstring
, "0x%-8x [usermode addr]", pc
);
2176 ret
= binary_search(kern_sym_tbl
, 0, kern_sym_count
-1, pc
);
2180 sprintf(pcstring
, "0x%x", pc
);
2183 else if (kern_sym_tbl
[ret
].k_sym_name
== (char *)0)
2185 sprintf(pcstring
, "0x%x", pc
);
2190 if ((len
= kern_sym_tbl
[ret
].k_sym_len
) > (max_len
- 8))
2193 memcpy(pcstring
, kern_sym_tbl
[ret
].k_sym_name
, len
);
2194 sprintf(&pcstring
[len
], "+0x%-5x", pc
- kern_sym_tbl
[ret
].k_sym_addr
);
2201 /* Return -1 if not found, else return index */
2202 int binary_search(list
, low
, high
, addr
)
2209 mid
= (low
+ high
) / 2;
2212 return (-1); /* failed */
2213 else if (low
+ 1 == high
)
2215 if (list
[low
].k_sym_addr
<= addr
&&
2216 addr
< list
[high
].k_sym_addr
)
2218 /* We have a range match */
2221 else if (list
[high
].k_sym_addr
<= addr
)
2226 return(-1); /* Failed */
2228 else if (addr
< list
[mid
].k_sym_addr
)
2230 return(binary_search (list
, low
, mid
, addr
));
2234 return(binary_search (list
, mid
, high
, addr
));
2239 open_logfile(char *path
)
2241 log_fp
= fopen(path
, "a");
2245 /* failed to open path */
2246 fprintf(stderr
, "latency: failed to open logfile [%s]\n", path
);