2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 cc -I/System/Library/Frameworks/System.framework/Versions/B/PrivateHeaders -DPRIVATE -D__APPLE_PRIVATE -arch x86_64 -arch i386 -O -o latency latency.c -lncurses -lutil
28 #include <mach/mach.h>
45 #include <sys/types.h>
46 #include <sys/param.h>
48 #include <sys/sysctl.h>
49 #include <sys/ioctl.h>
51 #ifndef KERNEL_PRIVATE
52 #define KERNEL_PRIVATE
53 #include <sys/kdebug.h>
56 #include <sys/kdebug.h>
57 #endif /*KERNEL_PRIVATE*/
59 #include <mach/mach_error.h>
60 #include <mach/mach_types.h>
61 #include <mach/message.h>
62 #include <mach/mach_syscalls.h>
63 #include <mach/clock_types.h>
64 #include <mach/mach_time.h>
66 #include <libkern/OSTypes.h>
69 int s_usec_10_bins
[10];
70 int s_usec_100_bins
[10];
71 int s_msec_1_bins
[10];
72 int s_msec_10_bins
[5];
75 int s_min_latency
= 0;
76 long long s_total_latency
= 0;
77 int s_total_samples
= 0;
79 int s_exceeded_threshold
= 0;
82 #define N_HIGH_RES_BINS 500
83 int use_high_res_bins
= false;
86 int i_usec_10_bins
[10];
87 int i_usec_100_bins
[10];
88 int i_msec_1_bins
[10];
89 int i_msec_10_bins
[5];
95 int i_exceeded_threshold
;
96 uint64_t i_total_latency
;
99 struct i_latencies
*i_lat
;
100 boolean_t i_latency_per_cpu
= FALSE
;
102 int i_high_res_bins
[N_HIGH_RES_BINS
];
106 int watch_priority_min
= 97;
107 int watch_priority_max
= 97;
114 char *kernelpath
= NULL
;
117 void *k_sym_addr
; /* kernel symbol address from nm */
118 size_t k_sym_len
; /* length of kernel symbol string */
119 char *k_sym_name
; /* kernel symbol string from nm */
122 kern_sym_t
*kern_sym_tbl
; /* pointer to the nm table */
123 int kern_sym_count
; /* number of entries in nm table */
127 #define MAX_ENTRIES 4096
131 } codes_tab
[MAX_ENTRIES
];
133 char *code_file
= NULL
;
134 int num_of_codes
= 0;
138 sig_atomic_t gotSIGWINCH
= 0;
139 int trace_enabled
= 0;
140 int need_new_map
= 1;
141 int set_remove_flag
= 1; /* By default, remove trace buffer */
146 uint64_t first_now
= 0;
147 uint64_t last_now
= 0;
151 #define SAMPLE_TIME_USECS 50000
152 #define SAMPLE_SIZE 300000
153 #define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
155 kbufinfo_t bufinfo
= {0, 0, 0};
159 uint64_t sample_TOD_secs
;
160 uint32_t sample_TOD_usecs
;
164 int sample_generation
= 0;
165 int num_i_latency_cpus
= 1;
170 kd_buf
**last_decrementer_kd
; /* last DECR_TRAP per cpu */
175 typedef struct event
*event_t
;
182 uint64_t ev_timestamp
;
186 typedef struct lookup
*lookup_t
;
194 int64_t lk_pathname
[NUMPARMS
+ 1];
198 typedef struct threadmap
*threadmap_t
;
205 char tm_command
[MAXCOMLEN
+ 1];
206 char tm_orig_command
[MAXCOMLEN
+ 1];
210 typedef struct threadrun
*threadrun_t
;
217 uint64_t tr_timestamp
;
222 typedef struct thread_entry
*thread_entry_t
;
224 struct thread_entry
{
225 thread_entry_t te_next
;
230 #define HASH_SIZE 1024
231 #define HASH_MASK 1023
233 event_t event_hash
[HASH_SIZE
];
234 lookup_t lookup_hash
[HASH_SIZE
];
235 threadmap_t threadmap_hash
[HASH_SIZE
];
236 threadrun_t threadrun_hash
[HASH_SIZE
];
238 event_t event_freelist
;
239 lookup_t lookup_freelist
;
240 threadrun_t threadrun_freelist
;
241 threadmap_t threadmap_freelist
;
242 threadmap_t threadmap_temp
;
244 thread_entry_t thread_entry_freelist
;
245 thread_entry_t thread_delete_list
;
246 thread_entry_t thread_reset_list
;
247 thread_entry_t thread_event_list
;
248 thread_entry_t thread_lookup_list
;
249 thread_entry_t thread_run_list
;
260 #define RAW_VERSION0 0x55aa0000
261 #define RAW_VERSION1 0x55aa0101
266 #define KERNEL_MODE 1
269 #define INTERRUPT 0x01050000
270 #define DECR_TRAP 0x01090000
271 #define DECR_SET 0x01090004
272 #define MACH_vmfault 0x01300008
273 #define MACH_sched 0x01400000
274 #define MACH_stkhandoff 0x01400008
275 #define MACH_makerunnable 0x01400018
276 #define MACH_idle 0x01400024
277 #define IES_action 0x050b0018
278 #define IES_filter 0x050b001c
279 #define TES_action 0x050c0010
280 #define CQ_action 0x050d0018
281 #define CPUPM_CPUSTER_RUNCOUNT 0x05310144
283 #define BSC_exit 0x040C0004
284 #define BSC_thread_terminate 0x040c05a4
286 #define DBG_FUNC_MASK ~(DBG_FUNC_START | DBG_FUNC_END)
288 #define CPU_NUMBER(kp) kdbg_get_cpu(kp)
290 #define EMPTYSTRING ""
292 const char *fault_name
[] = {
304 const char *sched_reasons
[] = {
324 #define ARRAYSIZE(x) ((int)(sizeof(x) / sizeof(*x)))
325 #define MAX_REASON ARRAYSIZE(sched_reasons)
327 static double handle_decrementer(kd_buf
*, int);
328 static kd_buf
*log_decrementer(kd_buf
*kd_beg
, kd_buf
*kd_end
, kd_buf
*end_of_sample
, double i_latency
);
329 static void read_command_map(void);
330 static void enter_syscall(FILE *fp
, kd_buf
*kd
, uint64_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, int print_info
);
331 static void exit_syscall(FILE *fp
, kd_buf
*kd
, uint64_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, int print_info
);
332 static void print_entry(FILE *fp
, kd_buf
*kd
, uint64_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, kd_buf
*kd_note
);
333 static void log_info(uint64_t now
, uint64_t idelta
, uint64_t start_bias
, kd_buf
*kd
, kd_buf
*kd_note
);
334 static char *find_code(int);
335 static void pc_to_string(char *pcstring
, uint64_t pc
, int max_len
, int mode
);
336 static void getdivisor(void);
337 static int sample_sc(void);
338 static void init_code_file(void);
339 static void do_kernel_nm(void);
340 static void open_logfile(const char*);
341 static int binary_search(kern_sym_t
*list
, int low
, int high
, uint64_t addr
);
343 static void create_map_entry(uint64_t, char *);
344 static void check_for_thread_update(uint64_t thread
, int debugid_base
, kd_buf
*kbufp
, char **command
);
345 static void log_scheduler(kd_buf
*kd_start
, kd_buf
*kd_stop
, kd_buf
*end_of_sample
, int s_priority
, double s_latency
, uint64_t thread
);
346 static int check_for_scheduler_latency(int type
, uint64_t *thread
, uint64_t now
, kd_buf
*kd
, kd_buf
**kd_start
, int *priority
, double *latency
);
347 static void open_rawfile(const char *path
);
349 static void screen_update(FILE *);
351 static void set_enable(int);
352 static void set_remove(void);
362 * This flag is turned off when calling
363 * quit() due to a set_remove() failure.
365 if (set_remove_flag
) {
381 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDENABLE
, val
};
384 if (sysctl(mib
, ARRAYSIZE(mib
), NULL
, &needed
, NULL
, 0) < 0) {
385 quit("trace facility failure, KERN_KDENABLE\n");
390 set_numbufs(int nbufs
)
392 int mib1
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDSETBUF
, nbufs
};
393 int mib2
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDSETUP
};
396 if (sysctl(mib1
, ARRAYSIZE(mib1
), NULL
, &needed
, NULL
, 0) < 0) {
397 quit("trace facility failure, KERN_KDSETBUF\n");
399 if (sysctl(mib2
, ARRAYSIZE(mib2
), NULL
, &needed
, NULL
, 0) < 0) {
400 quit("trace facility failure, KERN_KDSETUP\n");
405 set_pidexclude(int pid
, int on_off
)
407 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDPIDEX
};
408 size_t needed
= sizeof(kd_regtype
);
411 .type
= KDBG_TYPENONE
,
416 sysctl(mib
, ARRAYSIZE(mib
), &kr
, &needed
, NULL
, 0);
420 get_bufinfo(kbufinfo_t
*val
)
422 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDGETBUF
};
423 size_t needed
= sizeof (*val
);
425 if (sysctl(mib
, ARRAYSIZE(mib
), val
, &needed
, 0, 0) < 0) {
426 quit("trace facility failure, KERN_KDGETBUF\n");
433 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDREMOVE
};
438 if (sysctl(mib
, ARRAYSIZE(mib
), NULL
, &needed
, NULL
, 0) < 0) {
440 if (errno
== EBUSY
) {
441 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
443 quit("trace facility failure, KERN_KDREMOVE\n");
450 write_high_res_latencies(void)
455 if (use_high_res_bins
) {
456 if ((f
= fopen("latencies.csv","w"))) {
457 for (i
= 0; i
< N_HIGH_RES_BINS
; i
++) {
458 fprintf(f
, "%d,%d\n", i
, i_high_res_bins
[i
]);
466 sigintr(int signo
__attribute__((unused
)))
468 write_high_res_latencies();
471 set_pidexclude(getpid(), 0);
472 screen_update(log_fp
);
479 /* exit under normal conditions -- signal handler */
481 leave(int signo
__attribute__((unused
)))
483 write_high_res_latencies();
486 set_pidexclude(getpid(), 0);
494 sigwinch(int signo
__attribute__((unused
)))
500 print_total(FILE *fp
, char *s
, int total
)
505 struct i_latencies
*il
;
508 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
510 itotal
+= il
->i_total
;
512 clen
= sprintf(tbuf
, "%s %10d %9d", s
, total
, itotal
);
514 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
517 if (i_latency_per_cpu
== TRUE
) {
518 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_total
);
523 sprintf(&tbuf
[clen
], "\n");
525 fprintf(fp
, "%s", tbuf
);
534 screen_update(FILE *fp
)
543 long min_lat
, max_lat
;
545 unsigned int average_s_latency
;
546 unsigned int average_i_latency
;
547 struct i_latencies
*il
;
554 fprintf(fp
,"\n\n===================================================================================================\n");
557 * Display the current time.
558 * "ctime" always returns a string that looks like this:
560 * Sun Sep 16 01:03:52 1973
561 * 012345678901234567890123
564 * We want indices 11 thru 18 (length 8).
567 curr_time
= (unsigned long)sample_TOD_secs
;
568 elapsed_secs
= ((last_now
- first_now
) / divisor
) / 1000000;
570 elapsed_secs
= curr_time
- start_time
;
573 elapsed_hours
= elapsed_secs
/ 3600;
574 elapsed_secs
-= elapsed_hours
* 3600;
575 elapsed_mins
= elapsed_secs
/ 60;
576 elapsed_secs
-= elapsed_mins
* 60;
578 sprintf(tbuf
, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time
)[0]),
579 (long)elapsed_hours
, (long)elapsed_mins
, (long)elapsed_secs
);
581 fprintf(fp
, "%s", tbuf
);
586 sprintf(tbuf
, " SCHEDULER INTERRUPTS\n");
588 fprintf(fp
, "%s", tbuf
);
593 if (i_latency_per_cpu
== TRUE
) {
594 clen
= sprintf(tbuf
, " Total");
596 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
598 clen
+= sprintf(&tbuf
[clen
], " CPU %d", cpu
);
600 clen
+= sprintf(&tbuf
[clen
], " CPU %d", cpu
);
604 fprintf(fp
, "%s", tbuf
);
609 clen
= sprintf(tbuf
, "\n-------------------------------------------------------");
611 for (cpu
= 1; cpu
< num_i_latency_cpus
; cpu
++) {
612 clen
+= sprintf(&tbuf
[clen
], "----------");
615 fprintf(fp
, "%s", tbuf
);
620 sprintf(tbuf
, "---------------------------------------------");
622 fprintf(fp
, "%s", tbuf
);
627 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
629 itotal
+= il
->i_total_samples
;
631 clen
= sprintf(tbuf
, "\ntotal_samples %10d %9d", s_total_samples
, itotal
);
633 if (i_latency_per_cpu
== TRUE
) {
634 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
637 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_total_samples
);
640 sprintf(&tbuf
[clen
], "\n");
642 fprintf(fp
, "%s", tbuf
);
648 for (stotal
= 0, i
= 0; i
< 10; i
++) {
649 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
652 itotal
+= il
->i_usec_10_bins
[i
];
653 il
->i_total
+= il
->i_usec_10_bins
[i
];
655 clen
= sprintf(tbuf
, "\ndelays < %3d usecs %10d %9d", (i
+ 1) * 10, s_usec_10_bins
[i
], itotal
);
657 stotal
+= s_usec_10_bins
[i
];
659 if (i_latency_per_cpu
== TRUE
) {
660 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
663 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_usec_10_bins
[i
]);
667 fprintf(fp
, "%s", tbuf
);
672 print_total(fp
, "\ntotal < 100 usecs", stotal
);
674 for (stotal
= 0, i
= 1; i
< 10; i
++) {
675 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
678 itotal
+= il
->i_usec_100_bins
[i
];
679 il
->i_total
+= il
->i_usec_100_bins
[i
];
682 clen
= sprintf(tbuf
, "\ndelays < %3d usecs %10d %9d", (i
+ 1) * 100, s_usec_100_bins
[i
], itotal
);
684 clen
= sprintf(tbuf
, "\ndelays < 1 msec %10d %9d", s_usec_100_bins
[i
], itotal
);
687 stotal
+= s_usec_100_bins
[i
];
689 if (i_latency_per_cpu
== TRUE
) {
690 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
693 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_usec_100_bins
[i
]);
697 fprintf(fp
, "%s", tbuf
);
702 print_total(fp
, "\ntotal < 1 msec ", stotal
);
705 for (stotal
= 0, i
= 1; i
< 10; i
++) {
706 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
709 itotal
+= il
->i_msec_1_bins
[i
];
710 il
->i_total
+= il
->i_msec_1_bins
[i
];
712 clen
= sprintf(tbuf
, "\ndelays < %3d msecs %10d %9d", (i
+ 1), s_msec_1_bins
[i
], itotal
);
714 stotal
+= s_msec_1_bins
[i
];
716 if (i_latency_per_cpu
== TRUE
) {
717 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
720 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_msec_1_bins
[i
]);
724 fprintf(fp
, "%s", tbuf
);
729 print_total(fp
, "\ntotal < 10 msecs", stotal
);
731 for (stotal
= 0, i
= 1; i
< 5; i
++) {
732 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
735 itotal
+= il
->i_msec_10_bins
[i
];
736 il
->i_total
+= il
->i_msec_10_bins
[i
];
738 clen
= sprintf(tbuf
, "\ndelays < %3d msecs %10d %9d", (i
+ 1)*10, s_msec_10_bins
[i
], itotal
);
740 stotal
+= s_msec_10_bins
[i
];
742 if (i_latency_per_cpu
== TRUE
) {
743 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
746 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_msec_10_bins
[i
]);
750 fprintf(fp
, "%s", tbuf
);
755 print_total(fp
, "\ntotal < 50 msecs", stotal
);
758 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
760 itotal
+= il
->i_too_slow
;
762 clen
= sprintf(tbuf
, "\ndelays > 50 msecs %10d %9d", s_too_slow
, itotal
);
764 if (i_latency_per_cpu
== TRUE
) {
765 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
768 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_too_slow
);
772 fprintf(fp
, "%s", tbuf
);
777 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
780 if (cpu
== 0 || (il
->i_min_latency
< min_lat
)) {
781 min_lat
= il
->i_min_latency
;
784 clen
= sprintf(tbuf
, "\n\nminimum latency(usecs) %7d %9ld", s_min_latency
, min_lat
);
786 if (i_latency_per_cpu
== TRUE
) {
787 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
790 clen
+= sprintf(&tbuf
[clen
], " %9ld", il
->i_min_latency
);
794 fprintf(fp
, "%s", tbuf
);
800 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
803 if (cpu
== 0 || (il
->i_max_latency
> max_lat
)) {
804 max_lat
= il
->i_max_latency
;
807 clen
= sprintf(tbuf
, "\nmaximum latency(usecs) %7d %9ld", s_max_latency
, max_lat
);
809 if (i_latency_per_cpu
== TRUE
) {
810 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
813 clen
+= sprintf(&tbuf
[clen
], " %9ld", il
->i_max_latency
);
817 fprintf(fp
, "%s", tbuf
);
822 if (s_total_samples
) {
823 average_s_latency
= (unsigned int)(s_total_latency
/s_total_samples
);
825 average_s_latency
= 0;
828 for (itotal
= 0, tot_lat
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
831 itotal
+= il
->i_total_samples
;
832 tot_lat
+= il
->i_total_latency
;
835 average_i_latency
= (unsigned)(tot_lat
/itotal
);
837 average_i_latency
= 0;
840 clen
= sprintf(tbuf
, "\naverage latency(usecs) %7d %9d", average_s_latency
, average_i_latency
);
842 if (i_latency_per_cpu
== TRUE
) {
843 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
846 if (il
->i_total_samples
) {
847 average_i_latency
= (unsigned int)(il
->i_total_latency
/il
->i_total_samples
);
849 average_i_latency
= 0;
852 clen
+= sprintf(&tbuf
[clen
], " %9d", average_i_latency
);
856 fprintf(fp
, "%s", tbuf
);
861 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
864 itotal
+= il
->i_exceeded_threshold
;
866 clen
= sprintf(tbuf
, "\nexceeded threshold %7d %9d", s_exceeded_threshold
, itotal
);
868 if (i_latency_per_cpu
== TRUE
) {
869 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
872 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_exceeded_threshold
);
875 sprintf(&tbuf
[clen
], "\n");
878 fprintf(fp
, "%s", tbuf
);
893 fprintf(stderr
, "Usage: latency [-p <priority>] [-h] [-m] [-st <threshold>] [-it <threshold>]\n");
894 fprintf(stderr
, " [-c <codefile>] [-l <logfile>] [-R <rawfile>] [-n <kernel>]\n\n");
896 fprintf(stderr
, " -p specify scheduling priority to watch... default is realtime. Can also be a range, e.g. \"31-47\".\n");
897 fprintf(stderr
, " -h Display high resolution interrupt latencies and write them to latencies.csv (truncate existing file) upon exit.\n");
898 fprintf(stderr
, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
899 fprintf(stderr
, " -m specify per-CPU interrupt latency reporting\n");
900 fprintf(stderr
, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
901 fprintf(stderr
, " -c specify name of codes file... default is /usr/share/misc/trace.codes\n");
902 fprintf(stderr
, " -l specify name of file to log trace entries to when the specified threshold is exceeded\n");
903 fprintf(stderr
, " -R specify name of raw trace file to process\n");
904 fprintf(stderr
, " -n specify kernel... default is /System/Library/Kernels/kernel.development\n");
906 fprintf(stderr
, "\nlatency must be run as root\n\n");
918 main(int argc
, char *argv
[])
922 if (0 != reexec_to_match_kernel()) {
923 fprintf(stderr
, "Could not re-execute: %d\n", errno
);
928 if (strcmp(argv
[1], "-R") == 0) {
933 open_rawfile(argv
[1]);
940 } else if (strcmp(argv
[1], "-p") == 0) {
945 if (2 == sscanf(argv
[1], "%d-%d", &watch_priority_min
, &watch_priority_max
)) {
946 if (watch_priority_min
> watch_priority_max
) {
948 } else if (watch_priority_min
< 0) {
952 if (1 == sscanf(argv
[1], "%d", &watch_priority_min
)) {
953 watch_priority_max
= watch_priority_min
;
961 } else if (strcmp(argv
[1], "-st") == 0) {
966 s_thresh_hold
= atoi(argv
[1]);
970 } else if (strcmp(argv
[1], "-it") == 0) {
975 i_thresh_hold
= atoi(argv
[1]);
979 } else if (strcmp(argv
[1], "-c") == 0) {
988 } else if (strcmp(argv
[1], "-l") == 0) {
993 open_logfile(argv
[1]);
997 } else if (strcmp(argv
[1], "-n") == 0) {
1002 kernelpath
= argv
[1];
1006 } else if (strcmp(argv
[1], "-h") == 0) {
1007 use_high_res_bins
= TRUE
;
1009 } else if (strcmp(argv
[1], "-m") == 0) {
1010 i_latency_per_cpu
= TRUE
;
1020 if (geteuid() != 0) {
1021 printf("'latency' must be run as root...\n");
1025 if (kernelpath
== NULL
) {
1026 kernelpath
= "/System/Library/Kernels/kernel.development";
1029 if (code_file
== NULL
) {
1030 code_file
= "/usr/share/misc/trace.codes";
1040 if (initscr() == NULL
) {
1041 fprintf(stderr
, "Unrecognized TERM type, try vt100\n");
1048 signal(SIGWINCH
, sigwinch
);
1049 signal(SIGINT
, sigintr
);
1050 signal(SIGQUIT
, leave
);
1051 signal(SIGTERM
, leave
);
1052 signal(SIGHUP
, leave
);
1055 * grab the number of cpus and scale the buffer size
1057 int mib
[] = { CTL_HW
, HW_NCPU
};
1058 size_t len
= sizeof(num_cpus
);
1060 sysctl(mib
, ARRAYSIZE(mib
), &num_cpus
, &len
, NULL
, 0);
1063 set_numbufs(SAMPLE_SIZE
* num_cpus
);
1065 get_bufinfo(&bufinfo
);
1069 set_pidexclude(getpid(), 1);
1072 num_entries
= bufinfo
.nkdbufs
;
1074 num_entries
= 50000;
1078 for (cpu_mask
= 0, i
= 0; i
< num_cpus
; i
++)
1079 cpu_mask
|= ((uint64_t)1 << i
);
1081 if ((my_buffer
= malloc(num_entries
* sizeof(kd_buf
))) == NULL
) {
1082 quit("can't allocate memory for tracing info\n");
1085 if ((last_decrementer_kd
= (kd_buf
**)malloc(num_cpus
* sizeof(kd_buf
*))) == NULL
) {
1086 quit("can't allocate memory for decrementer tracing info\n");
1089 if (i_latency_per_cpu
== FALSE
) {
1090 num_i_latency_cpus
= 1;
1092 num_i_latency_cpus
= num_cpus
;
1095 if ((i_lat
= (struct i_latencies
*)malloc(num_i_latency_cpus
* sizeof(struct i_latencies
))) == NULL
) {
1096 quit("can't allocate memory for interrupt latency info\n");
1099 bzero((char *)i_lat
, num_i_latency_cpus
* sizeof(struct i_latencies
));
1102 while (sample_sc()) {
1107 screen_update(log_fp
);
1110 screen_update(stdout
);
1115 double nanosecs_to_sleep
;
1117 nanosecs_to_sleep
= (double)(SAMPLE_TIME_USECS
* 1000);
1118 fdelay
= nanosecs_to_sleep
* (divisor
/1000);
1119 adelay
= (uint64_t)fdelay
;
1123 start_time
= time(NULL
);
1124 refresh_time
= start_time
;
1127 curr_time
= time(NULL
);
1129 if (curr_time
>= refresh_time
) {
1130 screen_update(NULL
);
1131 refresh_time
= curr_time
+ 1;
1133 mach_wait_until(mach_absolute_time() + adelay
);
1139 * No need to check for initscr error return.
1140 * We won't get here if it fails on the first call.
1153 read_command_map(void)
1155 kd_threadmap
*mapptr
= 0;
1156 int total_threads
= 0;
1160 RAW_header header
= {0};
1163 if (read(RAW_fd
, &header
, sizeof(RAW_header
)) != sizeof(RAW_header
)) {
1164 perror("read failed");
1167 if (header
.version_no
!= RAW_VERSION1
) {
1168 header
.version_no
= RAW_VERSION0
;
1169 header
.TOD_secs
= time(NULL
);
1170 header
.TOD_usecs
= 0;
1172 lseek(RAW_fd
, (off_t
)0, SEEK_SET
);
1174 if (read(RAW_fd
, &header
.thread_count
, sizeof(int)) != sizeof(int)) {
1175 perror("read failed");
1179 total_threads
= header
.thread_count
;
1181 sample_TOD_secs
= header
.TOD_secs
;
1182 sample_TOD_usecs
= header
.TOD_usecs
;
1184 if (total_threads
== 0 && header
.version_no
!= RAW_VERSION0
) {
1185 offset
= lseek(RAW_fd
, (off_t
)0, SEEK_CUR
);
1186 offset
= (offset
+ (4095)) & ~4095;
1188 lseek(RAW_fd
, offset
, SEEK_SET
);
1191 total_threads
= bufinfo
.nkdthreads
;
1194 size
= total_threads
* sizeof(kd_threadmap
);
1196 if (size
== 0 || ((mapptr
= (kd_threadmap
*) malloc(size
)) == 0)) {
1199 bzero (mapptr
, size
);
1202 * Now read the threadmap
1205 if (read(RAW_fd
, mapptr
, size
) != size
) {
1206 printf("Can't read the thread map -- this is not fatal\n");
1208 if (header
.version_no
!= RAW_VERSION0
) {
1209 offset
= lseek(RAW_fd
, (off_t
)0, SEEK_CUR
);
1210 offset
= (offset
+ (4095)) & ~4095;
1212 lseek(RAW_fd
, offset
, SEEK_SET
);
1215 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDTHRMAP
};
1216 if (sysctl(mib
, ARRAYSIZE(mib
), mapptr
, &size
, NULL
, 0) < 0) {
1218 * This is not fatal -- just means I cant map command strings
1220 printf("Can't read the thread map -- this is not fatal\n");
1225 for (i
= 0; i
< total_threads
; i
++) {
1226 create_map_entry(mapptr
[i
].thread
, &mapptr
[i
].command
[0]);
1232 create_map_entry(uint64_t thread
, char *command
)
1236 if ((tme
= threadmap_freelist
)) {
1237 threadmap_freelist
= tme
->tm_next
;
1239 tme
= (threadmap_t
)malloc(sizeof(struct threadmap
));
1242 tme
->tm_thread
= thread
;
1244 (void)strncpy (tme
->tm_command
, command
, MAXCOMLEN
);
1245 tme
->tm_command
[MAXCOMLEN
] = '\0';
1246 tme
->tm_orig_command
[0] = '\0';
1248 int hashid
= thread
& HASH_MASK
;
1250 tme
->tm_next
= threadmap_hash
[hashid
];
1251 threadmap_hash
[hashid
] = tme
;
1255 delete_thread_entry(uint64_t thread
)
1259 int hashid
= thread
& HASH_MASK
;
1261 if ((tme
= threadmap_hash
[hashid
])) {
1262 if (tme
->tm_thread
== thread
) {
1263 threadmap_hash
[hashid
] = tme
->tm_next
;
1265 threadmap_t tme_prev
= tme
;
1267 for (tme
= tme
->tm_next
; tme
; tme
= tme
->tm_next
) {
1268 if (tme
->tm_thread
== thread
) {
1269 tme_prev
->tm_next
= tme
->tm_next
;
1276 tme
->tm_next
= threadmap_freelist
;
1277 threadmap_freelist
= tme
;
1283 find_and_insert_tmp_map_entry(uint64_t pthread
, char *command
)
1287 if ((tme
= threadmap_temp
)) {
1288 if (tme
->tm_pthread
== pthread
) {
1289 threadmap_temp
= tme
->tm_next
;
1291 threadmap_t tme_prev
= tme
;
1293 for (tme
= tme
->tm_next
; tme
; tme
= tme
->tm_next
) {
1294 if (tme
->tm_pthread
== pthread
) {
1295 tme_prev
->tm_next
= tme
->tm_next
;
1302 (void)strncpy (tme
->tm_command
, command
, MAXCOMLEN
);
1303 tme
->tm_command
[MAXCOMLEN
] = '\0';
1304 tme
->tm_orig_command
[0] = '\0';
1306 int hashid
= tme
->tm_thread
& HASH_MASK
;
1307 tme
->tm_next
= threadmap_hash
[hashid
];
1308 threadmap_hash
[hashid
] = tme
;
1314 create_tmp_map_entry(uint64_t thread
, uint64_t pthread
)
1318 if ((tme
= threadmap_freelist
)) {
1319 threadmap_freelist
= tme
->tm_next
;
1321 tme
= malloc(sizeof(struct threadmap
));
1324 tme
->tm_thread
= thread
;
1325 tme
->tm_pthread
= pthread
;
1326 tme
->tm_command
[0] = '\0';
1327 tme
->tm_orig_command
[0] = '\0';
1329 tme
->tm_next
= threadmap_temp
;
1330 threadmap_temp
= tme
;
1334 find_thread_entry(uint64_t thread
)
1338 int hashid
= thread
& HASH_MASK
;
1340 for (tme
= threadmap_hash
[hashid
]; tme
; tme
= tme
->tm_next
) {
1341 if (tme
->tm_thread
== thread
) {
1349 find_thread_name(uint64_t thread
, char **command
)
1353 if ((tme
= find_thread_entry(thread
))) {
1354 *command
= tme
->tm_command
;
1356 *command
= EMPTYSTRING
;
1361 add_thread_entry_to_list(thread_entry_t
*list
, uint64_t thread
)
1365 if ((te
= thread_entry_freelist
)) {
1366 thread_entry_freelist
= te
->te_next
;
1368 te
= (thread_entry_t
)malloc(sizeof(struct thread_entry
));
1371 te
->te_thread
= thread
;
1372 te
->te_next
= *list
;
1377 exec_thread_entry(uint64_t thread
, char *command
)
1381 if ((tme
= find_thread_entry(thread
))) {
1382 if (tme
->tm_orig_command
[0] == '\0') {
1383 (void)strncpy (tme
->tm_orig_command
, tme
->tm_command
, MAXCOMLEN
);
1384 tme
->tm_orig_command
[MAXCOMLEN
] = '\0';
1386 (void)strncpy (tme
->tm_command
, command
, MAXCOMLEN
);
1387 tme
->tm_command
[MAXCOMLEN
] = '\0';
1389 add_thread_entry_to_list(&thread_reset_list
, thread
);
1391 create_map_entry(thread
, command
);
1396 record_thread_entry_for_gc(uint64_t thread
)
1398 add_thread_entry_to_list(&thread_delete_list
, thread
);
1402 gc_thread_entries(void)
1405 thread_entry_t te_next
;
1408 for (te
= thread_delete_list
; te
; te
= te_next
) {
1409 delete_thread_entry(te
->te_thread
);
1411 te_next
= te
->te_next
;
1412 te
->te_next
= thread_entry_freelist
;
1413 thread_entry_freelist
= te
;
1417 thread_delete_list
= 0;
1421 gc_reset_entries(void)
1424 thread_entry_t te_next
;
1427 for (te
= thread_reset_list
; te
; te
= te_next
) {
1428 te_next
= te
->te_next
;
1429 te
->te_next
= thread_entry_freelist
;
1430 thread_entry_freelist
= te
;
1434 thread_reset_list
= 0;
1438 reset_thread_names(void)
1441 thread_entry_t te_next
;
1444 for (te
= thread_reset_list
; te
; te
= te_next
) {
1447 if ((tme
= find_thread_entry(te
->te_thread
))) {
1448 if (tme
->tm_orig_command
[0]) {
1449 (void)strncpy (tme
->tm_command
, tme
->tm_orig_command
, MAXCOMLEN
);
1450 tme
->tm_command
[MAXCOMLEN
] = '\0';
1451 tme
->tm_orig_command
[0] = '\0';
1454 te_next
= te
->te_next
;
1455 te
->te_next
= thread_entry_freelist
;
1456 thread_entry_freelist
= te
;
1460 thread_reset_list
= 0;
1464 delete_all_thread_entries(void)
1466 threadmap_t tme
= 0;
1467 threadmap_t tme_next
= 0;
1470 for (i
= 0; i
< HASH_SIZE
; i
++) {
1471 for (tme
= threadmap_hash
[i
]; tme
; tme
= tme_next
) {
1472 tme_next
= tme
->tm_next
;
1473 tme
->tm_next
= threadmap_freelist
;
1474 threadmap_freelist
= tme
;
1476 threadmap_hash
[i
] = 0;
1481 insert_run_event(uint64_t thread
, int priority
, kd_buf
*kd
, uint64_t now
)
1485 int hashid
= thread
& HASH_MASK
;
1487 for (trp
= threadrun_hash
[hashid
]; trp
; trp
= trp
->tr_next
) {
1488 if (trp
->tr_thread
== thread
) {
1493 if ((trp
= threadrun_freelist
)) {
1494 threadrun_freelist
= trp
->tr_next
;
1496 trp
= (threadrun_t
)malloc(sizeof(struct threadrun
));
1499 trp
->tr_thread
= thread
;
1501 trp
->tr_next
= threadrun_hash
[hashid
];
1502 threadrun_hash
[hashid
] = trp
;
1504 add_thread_entry_to_list(&thread_run_list
, thread
);
1507 trp
->tr_timestamp
= now
;
1508 trp
->tr_priority
= priority
;
1512 find_run_event(uint64_t thread
)
1515 int hashid
= thread
& HASH_MASK
;
1517 for (trp
= threadrun_hash
[hashid
]; trp
; trp
= trp
->tr_next
) {
1518 if (trp
->tr_thread
== thread
) {
1526 delete_run_event(uint64_t thread
)
1528 threadrun_t trp
= 0;
1529 threadrun_t trp_prev
;
1531 int hashid
= thread
& HASH_MASK
;
1533 if ((trp
= threadrun_hash
[hashid
])) {
1534 if (trp
->tr_thread
== thread
) {
1535 threadrun_hash
[hashid
] = trp
->tr_next
;
1539 for (trp
= trp
->tr_next
; trp
; trp
= trp
->tr_next
) {
1540 if (trp
->tr_thread
== thread
) {
1541 trp_prev
->tr_next
= trp
->tr_next
;
1548 trp
->tr_next
= threadrun_freelist
;
1549 threadrun_freelist
= trp
;
1558 thread_entry_t te_next
;
1560 threadrun_t trp_next
;
1563 for (te
= thread_run_list
; te
; te
= te_next
) {
1564 int hashid
= te
->te_thread
& HASH_MASK
;
1566 for (trp
= threadrun_hash
[hashid
]; trp
; trp
= trp_next
) {
1567 trp_next
= trp
->tr_next
;
1568 trp
->tr_next
= threadrun_freelist
;
1569 threadrun_freelist
= trp
;
1572 threadrun_hash
[hashid
] = 0;
1574 te_next
= te
->te_next
;
1575 te
->te_next
= thread_entry_freelist
;
1576 thread_entry_freelist
= te
;
1578 thread_run_list
= 0;
1584 insert_start_event(uint64_t thread
, int type
, uint64_t now
)
1588 int hashid
= thread
& HASH_MASK
;
1590 for (evp
= event_hash
[hashid
]; evp
; evp
= evp
->ev_next
) {
1591 if (evp
->ev_thread
== thread
&& evp
->ev_type
== type
) {
1596 if ((evp
= event_freelist
)) {
1597 event_freelist
= evp
->ev_next
;
1599 evp
= (event_t
)malloc(sizeof(struct event
));
1602 evp
->ev_thread
= thread
;
1603 evp
->ev_type
= type
;
1605 evp
->ev_next
= event_hash
[hashid
];
1606 event_hash
[hashid
] = evp
;
1608 add_thread_entry_to_list(&thread_event_list
, thread
);
1610 evp
->ev_timestamp
= now
;
1615 consume_start_event(uint64_t thread
, int type
, uint64_t now
)
1619 uint64_t elapsed
= 0;
1621 int hashid
= thread
& HASH_MASK
;
1623 if ((evp
= event_hash
[hashid
])) {
1624 if (evp
->ev_thread
== thread
&& evp
->ev_type
== type
) {
1625 event_hash
[hashid
] = evp
->ev_next
;
1629 for (evp
= evp
->ev_next
; evp
; evp
= evp
->ev_next
) {
1630 if (evp
->ev_thread
== thread
&& evp
->ev_type
== type
) {
1631 evp_prev
->ev_next
= evp
->ev_next
;
1638 elapsed
= now
- evp
->ev_timestamp
;
1640 if (now
< evp
->ev_timestamp
) {
1641 printf("consume: now = %qd, timestamp = %qd\n", now
, evp
->ev_timestamp
);
1644 evp
->ev_next
= event_freelist
;
1645 event_freelist
= evp
;
1652 gc_start_events(void)
1655 thread_entry_t te_next
;
1661 for (te
= thread_event_list
; te
; te
= te_next
) {
1663 hashid
= te
->te_thread
& HASH_MASK
;
1665 for (evp
= event_hash
[hashid
]; evp
; evp
= evp_next
) {
1666 evp_next
= evp
->ev_next
;
1667 evp
->ev_next
= event_freelist
;
1668 event_freelist
= evp
;
1671 event_hash
[hashid
] = 0;
1673 te_next
= te
->te_next
;
1674 te
->te_next
= thread_entry_freelist
;
1675 thread_entry_freelist
= te
;
1677 thread_event_list
= 0;
1681 thread_in_user_mode(uint64_t thread
, char *command
)
1685 if (strcmp(command
, "kernel_task") == 0) {
1689 int hashid
= thread
& HASH_MASK
;
1691 for (evp
= event_hash
[hashid
]; evp
; evp
= evp
->ev_next
) {
1692 if (evp
->ev_thread
== thread
) {
1700 handle_lookup_event(uint64_t thread
, int debugid
, kd_buf
*kdp
)
1703 boolean_t first_record
= FALSE
;
1705 int hashid
= thread
& HASH_MASK
;
1707 if (debugid
& DBG_FUNC_START
) {
1708 first_record
= TRUE
;
1711 for (lkp
= lookup_hash
[hashid
]; lkp
; lkp
= lkp
->lk_next
) {
1712 if (lkp
->lk_thread
== thread
) {
1717 if (first_record
== FALSE
) {
1721 if ((lkp
= lookup_freelist
)) {
1722 lookup_freelist
= lkp
->lk_next
;
1724 lkp
= (lookup_t
)malloc(sizeof(struct lookup
));
1727 lkp
->lk_thread
= thread
;
1729 lkp
->lk_next
= lookup_hash
[hashid
];
1730 lookup_hash
[hashid
] = lkp
;
1732 add_thread_entry_to_list(&thread_lookup_list
, thread
);
1735 if (first_record
== TRUE
) {
1736 lkp
->lk_pathptr
= lkp
->lk_pathname
;
1737 lkp
->lk_dvp
= kdp
->arg1
;
1739 if (lkp
->lk_pathptr
> &lkp
->lk_pathname
[NUMPARMS
-4]) {
1742 *lkp
->lk_pathptr
++ = kdp
->arg1
;
1744 *lkp
->lk_pathptr
++ = kdp
->arg2
;
1745 *lkp
->lk_pathptr
++ = kdp
->arg3
;
1746 *lkp
->lk_pathptr
++ = kdp
->arg4
;
1747 *lkp
->lk_pathptr
= 0;
1749 if (debugid
& DBG_FUNC_END
) {
1757 delete_lookup_event(uint64_t thread
, lookup_t lkp_to_delete
)
1763 hashid
= thread
& HASH_MASK
;
1765 if ((lkp
= lookup_hash
[hashid
])) {
1766 if (lkp
== lkp_to_delete
) {
1767 lookup_hash
[hashid
] = lkp
->lk_next
;
1771 for (lkp
= lkp
->lk_next
; lkp
; lkp
= lkp
->lk_next
) {
1772 if (lkp
== lkp_to_delete
) {
1773 lkp_prev
->lk_next
= lkp
->lk_next
;
1780 lkp
->lk_next
= lookup_freelist
;
1781 lookup_freelist
= lkp
;
1787 gc_lookup_events(void)
1790 thread_entry_t te_next
;
1796 for (te
= thread_lookup_list
; te
; te
= te_next
) {
1797 hashid
= te
->te_thread
& HASH_MASK
;
1799 for (lkp
= lookup_hash
[hashid
]; lkp
; lkp
= lkp_next
) {
1800 lkp_next
= lkp
->lk_next
;
1801 lkp
->lk_next
= lookup_freelist
;
1802 lookup_freelist
= lkp
;
1805 lookup_hash
[hashid
] = 0;
1807 te_next
= te
->te_next
;
1808 te
->te_next
= thread_entry_freelist
;
1809 thread_entry_freelist
= te
;
1811 thread_lookup_list
= 0;
1817 kd_buf
*kd
, *end_of_sample
;
1824 * Get kernel buffer information
1826 get_bufinfo(&bufinfo
);
1829 delete_all_thread_entries();
1836 bytes_read
= read(RAW_fd
, my_buffer
, num_entries
* sizeof(kd_buf
));
1838 if (bytes_read
== -1) {
1839 perror("read failed");
1842 count
= bytes_read
/ sizeof(kd_buf
);
1844 if (count
!= num_entries
) {
1849 kd
= (kd_buf
*)my_buffer
;
1850 first_now
= kd
->timestamp
& KDBG_TIMESTAMP_MASK
;
1855 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDREADTR
};
1856 size_t needed
= bufinfo
.nkdbufs
* sizeof(kd_buf
);
1858 if (sysctl(mib
, ARRAYSIZE(mib
), my_buffer
, &needed
, NULL
, 0) < 0) {
1859 quit("trace facility failure, KERN_KDREADTR\n");
1863 sample_generation
++;
1865 if (bufinfo
.flags
& KDBG_WRAPPED
) {
1869 fprintf(log_fp
, "\n\n%-19.19s sample = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1870 &(ctime(&curr_time
)[0]), sample_generation
);
1876 end_of_sample
= &((kd_buf
*)my_buffer
)[count
];
1879 * Always reinitialize the DECR_TRAP array
1881 for (i
= 0; i
< num_cpus
; i
++) {
1882 last_decrementer_kd
[i
] = (kd_buf
*)my_buffer
;
1885 for (kd
= (kd_buf
*)my_buffer
; kd
< end_of_sample
; kd
++) {
1887 uint64_t thread
= kd
->arg5
;
1888 int type
= kd
->debugid
& DBG_FUNC_MASK
;
1890 (void)check_for_thread_update(thread
, type
, kd
, NULL
);
1892 uint64_t now
= kd
->timestamp
& KDBG_TIMESTAMP_MASK
;
1895 if (type
== DECR_TRAP
) {
1896 int cpunum
= CPU_NUMBER(kd
);
1897 double i_latency
= handle_decrementer(kd
, cpunum
);
1900 if (i_thresh_hold
&& (int)i_latency
> i_thresh_hold
) {
1901 kd_start
= last_decrementer_kd
[cpunum
];
1903 log_decrementer(kd_start
, kd
, end_of_sample
, i_latency
);
1905 last_decrementer_kd
[cpunum
] = kd
;
1910 if (check_for_scheduler_latency(type
, &thread
, now
, kd
, &kd_start
, &s_priority
, &s_latency
)) {
1911 log_scheduler(kd_start
, kd
, end_of_sample
, s_priority
, s_latency
, thread
);
1919 gc_thread_entries();
1927 enter_syscall(FILE *fp
, kd_buf
*kd
, uint64_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, int print_info
)
1934 int cpunum
= CPU_NUMBER(kd
);
1936 if (print_info
&& fp
) {
1937 timestamp
= (double)(now
- start_bias
) / divisor
;
1938 delta
= (double)idelta
/ divisor
;
1940 if ((p
= find_code(type
))) {
1941 if (type
== INTERRUPT
) {
1950 pc_to_string(&pcstring
[0], kd
->arg2
, 58, mode
);
1952 fprintf(fp
, "%9.1f %8.1f\t\tINTERRUPT[%2" PRIx64
"] @ %-58.58s %8" PRIx64
" %2d %s\n",
1953 timestamp
, delta
, (uint64_t)kd
->arg1
, &pcstring
[0], thread
, cpunum
, command
);
1954 } else if (type
== MACH_vmfault
) {
1955 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %8" PRIx64
" %2d %s\n",
1956 timestamp
, delta
, p
, thread
, cpunum
, command
);
1958 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %-16" PRIx64
" %-16" PRIx64
" %-16" PRIx64
" %-16" PRIx64
" %8" PRIx64
" %2d %s\n",
1959 timestamp
, delta
, p
, (uint64_t)kd
->arg1
, (uint64_t)kd
->arg2
, (uint64_t)kd
->arg3
, (uint64_t)kd
->arg4
,
1960 thread
, cpunum
, command
);
1963 fprintf(fp
, "%9.1f %8.1f\t\t%-8x %-16" PRIx64
" %-16" PRIx64
" %-16" PRIx64
" %-16" PRIx64
" %8" PRIx64
" %2d %s\n",
1964 timestamp
, delta
, type
, (uint64_t)kd
->arg1
, (uint64_t)kd
->arg2
, (uint64_t)kd
->arg3
, (uint64_t)kd
->arg4
,
1965 thread
, cpunum
, command
);
1968 if (type
!= BSC_thread_terminate
&& type
!= BSC_exit
) {
1969 insert_start_event(thread
, type
, now
);
1974 exit_syscall(FILE *fp
, kd_buf
*kd
, uint64_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, int print_info
)
1980 double elapsed_timestamp
;
1982 elapsed_timestamp
= (double)consume_start_event(thread
, type
, now
) / divisor
;
1984 if (print_info
&& fp
) {
1985 int cpunum
= CPU_NUMBER(kd
);
1987 timestamp
= (double)(now
- start_bias
) / divisor
;
1988 delta
= (double)idelta
/ divisor
;
1990 fprintf(fp
, "%9.1f %8.1f(%.1f) \t", timestamp
, delta
, elapsed_timestamp
);
1992 if ((p
= find_code(type
))) {
1993 if (type
== INTERRUPT
) {
1994 fprintf(fp
, "INTERRUPT %8" PRIx64
" %2d %s\n", thread
, cpunum
, command
);
1995 } else if (type
== MACH_vmfault
&& kd
->arg4
<= DBG_PAGEIND_FAULT
) {
1996 user_addr
= ((uint64_t)kd
->arg1
<< 32) | (uint32_t)kd
->arg2
;
1998 fprintf(fp
, "%-28.28s %-10.10s %-16qx %8" PRIx64
" %2d %s\n",
1999 p
, fault_name
[kd
->arg4
], user_addr
,
2000 thread
, cpunum
, command
);
2002 fprintf(fp
, "%-28.28s %-16" PRIx64
" %-16" PRIx64
" %8" PRIx64
" %2d %s\n",
2003 p
, (uint64_t)kd
->arg1
, (uint64_t)kd
->arg2
,
2004 thread
, cpunum
, command
);
2007 fprintf(fp
, "%-8x %-16" PRIx64
" %-16" PRIx64
" %8" PRIx64
" %2d %s\n",
2008 type
, (uint64_t)kd
->arg1
, (uint64_t)kd
->arg2
,
2009 thread
, cpunum
, command
);
2015 print_entry(FILE *fp
, kd_buf
*kd
, uint64_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, kd_buf
*kd_note
)
2023 int cpunum
= CPU_NUMBER(kd
);
2025 double timestamp
= (double)(now
- start_bias
) / divisor
;
2026 double delta
= (double)idelta
/ divisor
;
2028 if ((p
= find_code(type
))) {
2029 if (kd
== kd_note
) {
2030 fprintf(fp
, "%9.1f %8.1f\t**\t", timestamp
, delta
);
2032 fprintf(fp
, "%9.1f %8.1f\t\t", timestamp
, delta
);
2034 fprintf(fp
, "%-28.28s %-16" PRIx64
" %-16" PRIx64
" %-16" PRIx64
" %-16" PRIx64
" %8" PRIx64
" %2d %s\n",
2035 p
, (uint64_t)kd
->arg1
, (uint64_t)kd
->arg2
, (uint64_t)kd
->arg3
, (uint64_t)kd
->arg4
, thread
, cpunum
, command
);
2037 fprintf(fp
, "%9.1f %8.1f\t\t%-8x %-16" PRIx64
" %-16" PRIx64
" %-16" PRIx64
" %-16" PRIx64
" %8" PRIx64
" %2d %s\n",
2038 timestamp
, delta
, type
, (uint64_t)kd
->arg1
, (uint64_t)kd
->arg2
, (uint64_t)kd
->arg3
, (uint64_t)kd
->arg4
,
2039 thread
, cpunum
, command
);
2044 check_for_thread_update(uint64_t thread
, int debugid_base
, kd_buf
*kbufp
, char **command
)
2046 if (debugid_base
== TRACE_DATA_NEWTHREAD
) {
2048 * Save the create thread data
2050 create_tmp_map_entry(kbufp
->arg1
, thread
);
2051 } else if (debugid_base
== TRACE_STRING_NEWTHREAD
) {
2053 * process new map entry
2055 find_and_insert_tmp_map_entry(thread
, (char *)&kbufp
->arg1
);
2056 } else if (debugid_base
== TRACE_STRING_EXEC
) {
2057 exec_thread_entry(thread
, (char *)&kbufp
->arg1
);
2059 if (debugid_base
== BSC_exit
|| debugid_base
== BSC_thread_terminate
) {
2060 record_thread_entry_for_gc(thread
);
2063 find_thread_name(thread
, command
);
2069 log_info(uint64_t now
, uint64_t idelta
, uint64_t start_bias
, kd_buf
*kd
, kd_buf
*kd_note
)
2077 char command_buf
[32];
2078 char sched_info
[64];
2080 const char *sched_reason
;
2086 uint64_t thread
= kd
->arg5
;
2087 int cpunum
= CPU_NUMBER(kd
);
2088 int debugid
= kd
->debugid
;
2089 int type
= kd
->debugid
& DBG_FUNC_MASK
;
2091 (void)check_for_thread_update(thread
, type
, kd
, &command
);
2093 if ((type
>> 24) == DBG_TRACE
) {
2094 if (((type
>> 16) & 0xff) != DBG_TRACE_INFO
) {
2098 timestamp
= (double)(now
- start_bias
) / divisor
;
2099 delta
= (double)idelta
/ divisor
;
2104 pc_to_string(&pcstring
[0], kd
->arg1
, 84, KERNEL_MODE
);
2106 fprintf(log_fp
, "%9.1f %8.1f\t\tCQ_action @ %-84.84s %8" PRIx64
" %2d %s\n",
2107 timestamp
, delta
, &pcstring
[0], thread
, cpunum
, command
);
2111 pc_to_string(&pcstring
[0], kd
->arg1
, 83, KERNEL_MODE
);
2113 fprintf(log_fp
, "%9.1f %8.1f\t\tTES_action @ %-83.83s %8" PRIx64
" %2d %s\n",
2114 timestamp
, delta
, &pcstring
[0], thread
, cpunum
, command
);
2118 pc_to_string(&pcstring
[0], kd
->arg1
, 83, KERNEL_MODE
);
2120 fprintf(log_fp
, "%9.1f %8.1f\t\tIES_action @ %-83.83s %8" PRIx64
" %2d %s\n",
2121 timestamp
, delta
, &pcstring
[0], thread
, cpunum
, command
);
2125 pc_to_string(&pcstring
[0], kd
->arg1
, 83, KERNEL_MODE
);
2127 fprintf(log_fp
, "%9.1f %8.1f\t\tIES_filter @ %-83.83s %8" PRIx64
" %2d %s\n",
2128 timestamp
, delta
, &pcstring
[0], thread
, cpunum
, command
);
2132 if ((int)kd
->arg1
>= 0) {
2135 i_latency
= (((double)(-1 - kd
->arg1
)) / divisor
);
2138 if (i_thresh_hold
&& (int)i_latency
> i_thresh_hold
) {
2150 pc_to_string(&pcstring
[0], kd
->arg2
, 84, mode
);
2152 fprintf(log_fp
, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-84.84s %8" PRIx64
" %2d %s\n",
2153 timestamp
, delta
, i_latency
, p
, &pcstring
[0], thread
, cpunum
, command
);
2157 fprintf(log_fp
, "%9.1f %8.1f[%.1f] \t%-28.28s %8" PRIx64
" %2d %s\n",
2158 timestamp
, delta
, (double)kd
->arg1
/divisor
, "DECR_SET", thread
, cpunum
, command
);
2162 case MACH_stkhandoff
:
2164 find_thread_name(kd
->arg2
, &command1
);
2166 if (command1
== EMPTYSTRING
) {
2167 command1
= command_buf
;
2168 sprintf(command1
, "%-8" PRIx64
, (uint64_t)kd
->arg2
);
2170 if (thread_in_user_mode(kd
->arg2
, command1
)) {
2178 if (reason
> MAX_REASON
) {
2181 sched_reason
= sched_reasons
[reason
];
2184 if (sched_reason
[0] == '?') {
2185 sprintf(joe
, "%" PRIx64
, reason
);
2188 sprintf(sched_info
, "%16.16s @ pri %3" PRIu64
" --> %16.16s @ pri %3" PRIu64
"%s", command
, (uint64_t)kd
->arg3
, command1
, (uint64_t)kd
->arg4
, p
);
2190 fprintf(log_fp
, "%9.1f %8.1f\t\t%-10.10s[%s] %s %8" PRIx64
" %2d\n",
2191 timestamp
, delta
, "MACH_SCHED", sched_reason
, sched_info
, thread
, cpunum
);
2195 if ((lkp
= handle_lookup_event(thread
, debugid
, kd
))) {
2197 * print the tail end of the pathname
2199 p
= (char *)lkp
->lk_pathname
;
2200 size_t clen
= strlen(p
);
2208 fprintf(log_fp
, "%9.1f %8.1f\t\t%-14.14s %-59s %-16" PRIx64
" %8" PRIx64
" %2d %s\n",
2209 timestamp
, delta
, "VFS_LOOKUP",
2210 &p
[clen
], lkp
->lk_dvp
, thread
, cpunum
, command
);
2212 delete_lookup_event(thread
, lkp
);
2217 if (debugid
& DBG_FUNC_START
) {
2218 enter_syscall(log_fp
, kd
, thread
, type
, command
, now
, idelta
, start_bias
, 1);
2219 } else if (debugid
& DBG_FUNC_END
) {
2220 exit_syscall(log_fp
, kd
, thread
, type
, command
, now
, idelta
, start_bias
, 1);
2222 print_entry(log_fp
, kd
, thread
, type
, command
, now
, idelta
, start_bias
, kd_note
);
2229 log_range(kd_buf
*kd_buffer
, kd_buf
*kd_start
, kd_buf
*kd_stop
, kd_buf
*kd_note
, char *buf1
)
2231 uint64_t last_timestamp
= 0;
2233 uint64_t start_bias
= 0;
2239 clen
= strlen(buf1
);
2240 memset(buf2
, '-', clen
);
2242 fprintf(log_fp
, "\n\n%s\n", buf2
);
2243 fprintf(log_fp
, "%s\n\n", buf1
);
2245 fprintf(log_fp
, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
2247 reset_thread_names();
2249 last_timestamp
= kd_start
->timestamp
& KDBG_TIMESTAMP_MASK
;
2250 start_bias
= last_timestamp
;
2252 for (kd
= kd_buffer
; kd
<= kd_stop
; kd
++) {
2253 now
= kd
->timestamp
& KDBG_TIMESTAMP_MASK
;
2255 if (kd
>= kd_start
) {
2256 delta
= now
- last_timestamp
;
2258 log_info(now
, delta
, start_bias
, kd
, kd_note
);
2260 last_timestamp
= now
;
2262 int debugid
= kd
->debugid
;
2263 uint64_t thread
= kd
->arg5
;
2264 int type
= kd
->debugid
& DBG_FUNC_MASK
;
2266 if ((type
>> 24) == DBG_TRACE
) {
2267 if (((type
>> 16) & 0xff) != DBG_TRACE_INFO
) {
2271 if (type
== BSC_thread_terminate
|| type
== BSC_exit
) {
2275 if (debugid
& DBG_FUNC_START
) {
2276 insert_start_event(thread
, type
, now
);
2277 } else if (debugid
& DBG_FUNC_END
) {
2278 (void)consume_start_event(thread
, type
, now
);
2287 log_decrementer(kd_buf
*kd_beg
, kd_buf
*kd_end
, kd_buf
*end_of_sample
, double i_latency
)
2289 kd_buf
*kd_start
, *kd_stop
;
2290 int kd_count
; /* Limit the boundary of kd_start */
2292 double sample_timestamp
;
2295 uint64_t thread
= kd_beg
->arg5
;
2296 int cpunum
= CPU_NUMBER(kd_end
);
2298 for (kd_count
= 0, kd_start
= kd_beg
- 1; (kd_start
>= (kd_buf
*)my_buffer
); kd_start
--, kd_count
++) {
2299 if (kd_count
== MAX_LOG_COUNT
) {
2303 if (CPU_NUMBER(kd_start
) != cpunum
) {
2307 if ((kd_start
->debugid
& DBG_FUNC_MASK
) == DECR_TRAP
) {
2311 if (kd_start
->arg5
!= thread
) {
2315 if (kd_start
< (kd_buf
*)my_buffer
) {
2316 kd_start
= (kd_buf
*)my_buffer
;
2319 thread
= kd_end
->arg5
;
2321 for (kd_stop
= kd_end
+ 1; kd_stop
< end_of_sample
; kd_stop
++) {
2322 if (CPU_NUMBER(kd_stop
) != cpunum
) {
2326 if ((kd_stop
->debugid
& DBG_FUNC_MASK
) == INTERRUPT
) {
2330 if (kd_stop
->arg5
!= thread
) {
2334 if (kd_stop
>= end_of_sample
) {
2335 kd_stop
= end_of_sample
- 1;
2342 now
= kd_start
->timestamp
& KDBG_TIMESTAMP_MASK
;
2343 sample_timestamp
= (double)(now
- first_now
) / divisor
;
2345 TOD_usecs
= (uint64_t)sample_timestamp
;
2346 TOD_secs
= (unsigned long)sample_TOD_secs
+ (unsigned long)((sample_TOD_usecs
+ TOD_usecs
) / 1000000);
2348 sprintf(buf1
, "%-19.19s interrupt latency = %.1fus [timestamp %.1f]", ctime(&TOD_secs
), i_latency
, sample_timestamp
);
2350 sprintf(buf1
, "%-19.19s interrupt latency = %.1fus [sample %d]", &(ctime(&curr_time
)[0]), i_latency
, sample_generation
);
2353 log_range((kd_buf
*)my_buffer
, kd_start
, kd_stop
, 0, buf1
);
2360 log_scheduler(kd_buf
*kd_beg
, kd_buf
*kd_end
, kd_buf
*end_of_sample
, int s_priority
, double s_latency
, uint64_t thread
)
2362 kd_buf
*kd_start
, *kd_stop
;
2367 double sample_timestamp
;
2370 for (count
= 0, kd_start
= kd_beg
; (kd_start
>= (kd_buf
*)my_buffer
); kd_start
--) {
2371 cpunum
= CPU_NUMBER(kd_start
);
2373 cmask
|= ((uint64_t)1 << cpunum
);
2375 if (cmask
== cpu_mask
) {
2380 if (kd_start
< (kd_buf
*)my_buffer
) {
2381 kd_start
= (kd_buf
*)my_buffer
;
2384 for (kd_stop
= kd_end
+ 1; kd_stop
< end_of_sample
; kd_stop
++) {
2385 if (kd_stop
->arg5
== thread
) {
2389 if (kd_stop
>= end_of_sample
) {
2390 kd_stop
= end_of_sample
- 1;
2397 now
= kd_start
->timestamp
& KDBG_TIMESTAMP_MASK
;
2398 sample_timestamp
= (double)(now
- first_now
) / divisor
;
2400 TOD_usecs
= (uint64_t)sample_timestamp
;
2401 TOD_secs
= (unsigned long)sample_TOD_secs
+ (unsigned long)((sample_TOD_usecs
+ TOD_usecs
) / 1000000);
2403 sprintf(buf1
, "%-19.19s priority = %d, scheduling latency = %.1fus [timestamp %.1f]", ctime(&TOD_secs
), s_priority
, s_latency
, sample_timestamp
);
2405 sprintf(buf1
, "%-19.19s priority = %d, scheduling latency = %.1fus [sample %d]", &(ctime(&curr_time
)[0]), s_priority
, s_latency
, sample_generation
);
2408 log_range((kd_buf
*)my_buffer
, kd_start
, kd_stop
, kd_beg
, buf1
);
2412 check_for_scheduler_latency(int type
, uint64_t *thread
, uint64_t now
, kd_buf
*kd
, kd_buf
**kd_start
, int *priority
, double *latency
)
2414 int found_latency
= 0;
2416 if (type
== MACH_makerunnable
) {
2417 if (watch_priority_min
<= kd
->arg2
&& kd
->arg2
<= watch_priority_max
) {
2418 insert_run_event(kd
->arg1
, (int)kd
->arg2
, kd
, now
);
2420 } else if (type
== MACH_sched
|| type
== MACH_stkhandoff
) {
2421 threadrun_t trp
= find_run_event(kd
->arg2
);
2423 if (type
== MACH_sched
|| type
== MACH_stkhandoff
) {
2427 if ((trp
= find_run_event(*thread
))) {
2428 double d_s_latency
= (((double)(now
- trp
->tr_timestamp
)) / divisor
);
2429 int s_latency
= (int)d_s_latency
;
2432 if (s_latency
< 100) {
2433 s_usec_10_bins
[s_latency
/10]++;
2435 if (s_latency
< 1000) {
2436 s_usec_100_bins
[s_latency
/100]++;
2437 } else if (s_latency
< 10000) {
2438 s_msec_1_bins
[s_latency
/1000]++;
2439 } else if (s_latency
< 50000) {
2440 s_msec_10_bins
[s_latency
/10000]++;
2445 if (s_latency
> s_max_latency
) {
2446 s_max_latency
= s_latency
;
2448 if (s_latency
< s_min_latency
|| s_total_samples
== 0) {
2449 s_min_latency
= s_latency
;
2451 s_total_latency
+= s_latency
;
2454 if (s_thresh_hold
&& s_latency
> s_thresh_hold
) {
2455 s_exceeded_threshold
++;
2458 *kd_start
= trp
->tr_entry
;
2459 *priority
= trp
->tr_priority
;
2460 *latency
= d_s_latency
;
2465 delete_run_event(*thread
);
2468 return found_latency
;
2472 handle_decrementer(kd_buf
*kd
, int cpunum
)
2474 struct i_latencies
*il
;
2478 if (i_latency_per_cpu
== FALSE
) {
2482 il
= &i_lat
[cpunum
];
2484 if ((long)(kd
->arg1
) >= 0) {
2487 latency
= (((double)(-1 - kd
->arg1
)) / divisor
);
2489 elapsed_usecs
= (long)latency
;
2491 if (elapsed_usecs
< 100) {
2492 il
->i_usec_10_bins
[elapsed_usecs
/10]++;
2495 if (elapsed_usecs
< 1000) {
2496 il
->i_usec_100_bins
[elapsed_usecs
/100]++;
2497 } else if (elapsed_usecs
< 10000) {
2498 il
->i_msec_1_bins
[elapsed_usecs
/1000]++;
2499 } else if (elapsed_usecs
< 50000) {
2500 il
->i_msec_10_bins
[elapsed_usecs
/10000]++;
2505 if (use_high_res_bins
&& elapsed_usecs
< N_HIGH_RES_BINS
) {
2506 i_high_res_bins
[elapsed_usecs
]++;
2508 if (i_thresh_hold
&& elapsed_usecs
> i_thresh_hold
) {
2509 il
->i_exceeded_threshold
++;
2511 if (elapsed_usecs
> il
->i_max_latency
) {
2512 il
->i_max_latency
= elapsed_usecs
;
2514 if (elapsed_usecs
< il
->i_min_latency
|| il
->i_total_samples
== 0) {
2515 il
->i_min_latency
= elapsed_usecs
;
2517 il
->i_total_latency
+= elapsed_usecs
;
2518 il
->i_total_samples
++;
2527 for (i
= 0; i
< num_of_codes
; i
++) {
2528 if (codes_tab
[i
].type
== type
) {
2529 return codes_tab
[i
].name
;
2536 init_code_file(void)
2541 if ((fp
= fopen(code_file
, "r")) == NULL
) {
2543 fprintf(log_fp
, "open of %s failed\n", code_file
);
2547 for (i
= 0; i
< MAX_ENTRIES
; i
++) {
2550 int n
= fscanf(fp
, "%x%127s\n", &code
, name
);
2552 if (n
== 1 && i
== 0) {
2554 * old code file format, just skip
2562 strncpy(codes_tab
[i
].name
, name
, 32);
2563 codes_tab
[i
].type
= code
;
2576 char tmp_nm_file
[128];
2580 bzero(tmp_nm_file
, 128);
2581 bzero(tmpstr
, 1024);
2584 * Build the temporary nm file path
2586 strcpy(tmp_nm_file
,"/tmp/knm.out.XXXXXX");
2588 if (!mktemp(tmp_nm_file
)) {
2589 fprintf(stderr
, "Error in mktemp call\n");
2594 * Build the nm command and create a tmp file with the output
2596 sprintf (tmpstr
, "/usr/bin/nm -n %s -s __TEXT __text > %s",
2597 kernelpath
, tmp_nm_file
);
2601 * Parse the output from the nm command
2603 if ((fp
= fopen(tmp_nm_file
, "r")) == NULL
) {
2604 /* Hmmm, let's not treat this as fatal */
2605 fprintf(stderr
, "Failed to open nm symbol file [%s]\n", tmp_nm_file
);
2609 * Count the number of symbols in the nm symbol table
2613 while ((inchr
= getc(fp
)) != -1) {
2614 if (inchr
== '\n') {
2621 * Malloc the space for symbol table
2623 if (kern_sym_count
> 0) {
2624 kern_sym_tbl
= malloc(kern_sym_count
* sizeof(kern_sym_t
));
2626 if (!kern_sym_tbl
) {
2628 * Hmmm, lets not treat this as fatal
2630 fprintf(stderr
, "Can't allocate memory for kernel symbol table\n");
2632 bzero(kern_sym_tbl
, kern_sym_count
* sizeof(kern_sym_t
));
2636 * Hmmm, lets not treat this as fatal
2638 fprintf(stderr
, "No kernel symbol table \n");
2640 for (i
= 0; i
< kern_sym_count
; i
++) {
2641 bzero(tmpstr
, 1024);
2643 if (fscanf(fp
, "%p %c %s", &kern_sym_tbl
[i
].k_sym_addr
, &inchr
, tmpstr
) != 3) {
2646 len
= strlen(tmpstr
);
2647 kern_sym_tbl
[i
].k_sym_name
= malloc(len
+ 1);
2649 if (kern_sym_tbl
[i
].k_sym_name
== NULL
) {
2650 fprintf(stderr
, "Can't allocate memory for symbol name [%s]\n", tmpstr
);
2651 kern_sym_tbl
[i
].k_sym_name
= NULL
;
2654 strcpy(kern_sym_tbl
[i
].k_sym_name
, tmpstr
);
2657 kern_sym_tbl
[i
].k_sym_len
= len
;
2660 if (i
!= kern_sym_count
) {
2662 * Hmmm, didn't build up entire table from nm
2663 * scrap the entire thing
2666 kern_sym_tbl
= NULL
;
2672 * Remove the temporary nm file
2674 unlink(tmp_nm_file
);
2677 * Dump the kernel symbol table
2679 for (i
= 0; i
< kern_sym_count
; i
++) {
2680 if (kern_sym_tbl
[i
].k_sym_name
) {
2681 printf ("[%d] %-16p %s\n", i
,
2682 kern_sym_tbl
[i
].k_sym_addr
, kern_sym_tbl
[i
].k_sym_name
);
2684 printf ("[%d] %-16p %s\n", i
,
2685 kern_sym_tbl
[i
].k_sym_addr
, "No symbol name");
2692 pc_to_string(char *pcstring
, uint64_t pc
, int max_len
, int mode
)
2697 if (mode
== USER_MODE
) {
2698 sprintf(pcstring
, "%-16" PRIx64
" [usermode addr]", pc
);
2701 ret
= binary_search(kern_sym_tbl
, 0, kern_sym_count
-1, pc
);
2703 if (ret
== -1 || kern_sym_tbl
[ret
].k_sym_name
== NULL
) {
2704 sprintf(pcstring
, "%-16" PRIx64
, pc
);
2707 if ((len
= kern_sym_tbl
[ret
].k_sym_len
) > (max_len
- 8)) {
2711 memcpy(pcstring
, kern_sym_tbl
[ret
].k_sym_name
, len
);
2713 sprintf(&pcstring
[len
], "+0x%-5" PRIx64
, pc
- (uint64_t)kern_sym_tbl
[ret
].k_sym_addr
);
2718 * Return -1 if not found, else return index
2721 binary_search(kern_sym_t
*list
, int low
, int high
, uint64_t addr
)
2725 if (kern_sym_count
== 0) {
2730 return -1; /* failed */
2733 if (low
+ 1 == high
) {
2734 if ((uint64_t)list
[low
].k_sym_addr
<= addr
&& addr
< (uint64_t)list
[high
].k_sym_addr
) {
2736 * We have a range match
2740 if ((uint64_t)list
[high
].k_sym_addr
<= addr
) {
2748 mid
= (low
+ high
) / 2;
2750 if (addr
< (uint64_t)list
[mid
].k_sym_addr
) {
2751 return binary_search(list
, low
, mid
, addr
);
2754 return binary_search(list
, mid
, high
, addr
);
2758 open_logfile(const char *path
)
2760 log_fp
= fopen(path
, "a");
2764 * failed to open path
2766 fprintf(stderr
, "latency: failed to open logfile [%s]\n", path
);
2772 open_rawfile(const char *path
)
2774 RAW_fd
= open(path
, O_RDONLY
);
2778 * failed to open path
2780 fprintf(stderr
, "latency: failed to open RAWfile [%s]\n", path
);
2788 mach_timebase_info_data_t info
;
2790 (void)mach_timebase_info(&info
);
2792 divisor
= ((double)info
.denom
/ (double)info
.numer
) * 1000;