2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
25 cc -I/System/Library/Frameworks/System.framework/Versions/B/PrivateHeaders -DPRIVATE -D__APPLE_PRIVATE -arch x86_64 -arch i386 -O -o latency latency.c -lncurses -lutil
28 #include <mach/mach.h>
44 #include <sys/types.h>
45 #include <sys/param.h>
47 #include <sys/sysctl.h>
48 #include <sys/ioctl.h>
50 #ifndef KERNEL_PRIVATE
51 #define KERNEL_PRIVATE
52 #include <sys/kdebug.h>
55 #include <sys/kdebug.h>
56 #endif /*KERNEL_PRIVATE*/
58 #include <mach/mach_error.h>
59 #include <mach/mach_types.h>
60 #include <mach/message.h>
61 #include <mach/mach_syscalls.h>
62 #include <mach/clock_types.h>
63 #include <mach/mach_time.h>
65 #include <libkern/OSTypes.h>
68 int s_usec_10_bins
[10];
69 int s_usec_100_bins
[10];
70 int s_msec_1_bins
[10];
71 int s_msec_10_bins
[5];
74 int s_min_latency
= 0;
75 long long s_total_latency
= 0;
76 int s_total_samples
= 0;
78 int s_exceeded_threshold
= 0;
81 #define N_HIGH_RES_BINS 500
82 int use_high_res_bins
= false;
85 int i_usec_10_bins
[10];
86 int i_usec_100_bins
[10];
87 int i_msec_1_bins
[10];
88 int i_msec_10_bins
[5];
94 int i_exceeded_threshold
;
95 uint64_t i_total_latency
;
98 struct i_latencies
*i_lat
;
99 boolean_t i_latency_per_cpu
= FALSE
;
101 int i_high_res_bins
[N_HIGH_RES_BINS
];
105 int watch_priority_min
= 97;
106 int watch_priority_max
= 97;
113 char *kernelpath
= NULL
;
116 void *k_sym_addr
; /* kernel symbol address from nm */
117 size_t k_sym_len
; /* length of kernel symbol string */
118 char *k_sym_name
; /* kernel symbol string from nm */
121 kern_sym_t
*kern_sym_tbl
; /* pointer to the nm table */
122 int kern_sym_count
; /* number of entries in nm table */
126 #define MAX_ENTRIES 4096
130 } codes_tab
[MAX_ENTRIES
];
132 char *code_file
= NULL
;
133 int num_of_codes
= 0;
137 sig_atomic_t gotSIGWINCH
= 0;
138 int trace_enabled
= 0;
139 int need_new_map
= 1;
140 int set_remove_flag
= 1; /* By default, remove trace buffer */
145 uint64_t first_now
= 0;
146 uint64_t last_now
= 0;
150 #define SAMPLE_TIME_USECS 50000
151 #define SAMPLE_SIZE 300000
152 #define MAX_LOG_COUNT 30 /* limits the number of entries dumped in log_decrementer */
154 kbufinfo_t bufinfo
= {0, 0, 0};
158 uint64_t sample_TOD_secs
;
159 uint32_t sample_TOD_usecs
;
163 int sample_generation
= 0;
164 int num_i_latency_cpus
= 1;
169 kd_buf
**last_decrementer_kd
; /* last DECR_TRAP per cpu */
174 typedef struct event
*event_t
;
181 uint64_t ev_timestamp
;
185 typedef struct lookup
*lookup_t
;
193 long lk_pathname
[NUMPARMS
+ 1];
197 typedef struct threadmap
*threadmap_t
;
203 uintptr_t tm_pthread
;
204 char tm_command
[MAXCOMLEN
+ 1];
205 char tm_orig_command
[MAXCOMLEN
+ 1];
209 typedef struct threadrun
*threadrun_t
;
216 uint64_t tr_timestamp
;
221 typedef struct thread_entry
*thread_entry_t
;
223 struct thread_entry
{
224 thread_entry_t te_next
;
229 #define HASH_SIZE 1024
230 #define HASH_MASK 1023
232 event_t event_hash
[HASH_SIZE
];
233 lookup_t lookup_hash
[HASH_SIZE
];
234 threadmap_t threadmap_hash
[HASH_SIZE
];
235 threadrun_t threadrun_hash
[HASH_SIZE
];
237 event_t event_freelist
;
238 lookup_t lookup_freelist
;
239 threadrun_t threadrun_freelist
;
240 threadmap_t threadmap_freelist
;
241 threadmap_t threadmap_temp
;
243 thread_entry_t thread_entry_freelist
;
244 thread_entry_t thread_delete_list
;
245 thread_entry_t thread_reset_list
;
246 thread_entry_t thread_event_list
;
247 thread_entry_t thread_lookup_list
;
248 thread_entry_t thread_run_list
;
259 #define RAW_VERSION0 0x55aa0000
260 #define RAW_VERSION1 0x55aa0101
265 #define KERNEL_MODE 1
268 #define INTERRUPT 0x01050000
269 #define DECR_TRAP 0x01090000
270 #define DECR_SET 0x01090004
271 #define MACH_vmfault 0x01300008
272 #define MACH_sched 0x01400000
273 #define MACH_stkhandoff 0x01400008
274 #define MACH_makerunnable 0x01400018
275 #define MACH_idle 0x01400024
276 #define IES_action 0x050b0018
277 #define IES_filter 0x050b001c
278 #define TES_action 0x050c0010
279 #define CQ_action 0x050d0018
280 #define CPUPM_CPUSTER_RUNCOUNT 0x05310144
282 #define BSC_exit 0x040C0004
283 #define BSC_thread_terminate 0x040c05a4
285 #define DBG_FUNC_MASK ~(DBG_FUNC_START | DBG_FUNC_END)
287 #define CPU_NUMBER(kp) kdbg_get_cpu(kp)
289 #define EMPTYSTRING ""
291 const char *fault_name
[] = {
303 const char *sched_reasons
[] = {
323 #define ARRAYSIZE(x) ((int)(sizeof(x) / sizeof(*x)))
324 #define MAX_REASON ARRAYSIZE(sched_reasons)
326 static double handle_decrementer(kd_buf
*, int);
327 static kd_buf
*log_decrementer(kd_buf
*kd_beg
, kd_buf
*kd_end
, kd_buf
*end_of_sample
, double i_latency
);
328 static void read_command_map(void);
329 static void enter_syscall(FILE *fp
, kd_buf
*kd
, uintptr_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, int print_info
);
330 static void exit_syscall(FILE *fp
, kd_buf
*kd
, uintptr_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, int print_info
);
331 static void print_entry(FILE *fp
, kd_buf
*kd
, uintptr_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, kd_buf
*kd_note
);
332 static void log_info(uint64_t now
, uint64_t idelta
, uint64_t start_bias
, kd_buf
*kd
, kd_buf
*kd_note
);
333 static char *find_code(int);
334 static void pc_to_string(char *pcstring
, uintptr_t pc
, int max_len
, int mode
);
335 static void getdivisor(void);
336 static int sample_sc(void);
337 static void init_code_file(void);
338 static void do_kernel_nm(void);
339 static void open_logfile(const char*);
340 static int binary_search(kern_sym_t
*list
, int low
, int high
, uintptr_t addr
);
342 static void create_map_entry(uintptr_t, char *);
343 static void check_for_thread_update(uintptr_t thread
, int debugid_base
, kd_buf
*kbufp
, char **command
);
344 static void log_scheduler(kd_buf
*kd_start
, kd_buf
*kd_stop
, kd_buf
*end_of_sample
, int s_priority
, double s_latency
, uintptr_t thread
);
345 static int check_for_scheduler_latency(int type
, uintptr_t *thread
, uint64_t now
, kd_buf
*kd
, kd_buf
**kd_start
, int *priority
, double *latency
);
346 static void open_rawfile(const char *path
);
348 static void screen_update(FILE *);
350 static void set_enable(int);
351 static void set_remove(void);
361 * This flag is turned off when calling
362 * quit() due to a set_remove() failure.
364 if (set_remove_flag
) {
378 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDENABLE
, val
};
381 if (sysctl(mib
, ARRAYSIZE(mib
), NULL
, &needed
, NULL
, 0) < 0) {
382 quit("trace facility failure, KERN_KDENABLE\n");
387 set_numbufs(int nbufs
)
389 int mib1
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDSETBUF
, nbufs
};
390 int mib2
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDSETUP
};
393 if (sysctl(mib1
, ARRAYSIZE(mib1
), NULL
, &needed
, NULL
, 0) < 0) {
394 quit("trace facility failure, KERN_KDSETBUF\n");
396 if (sysctl(mib2
, ARRAYSIZE(mib2
), NULL
, &needed
, NULL
, 0) < 0) {
397 quit("trace facility failure, KERN_KDSETUP\n");
402 set_pidexclude(int pid
, int on_off
)
404 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDPIDEX
};
405 size_t needed
= sizeof(kd_regtype
);
408 .type
= KDBG_TYPENONE
,
413 sysctl(mib
, ARRAYSIZE(mib
), &kr
, &needed
, NULL
, 0);
417 get_bufinfo(kbufinfo_t
*val
)
419 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDGETBUF
};
420 size_t needed
= sizeof (*val
);
422 if (sysctl(mib
, ARRAYSIZE(mib
), val
, &needed
, 0, 0) < 0) {
423 quit("trace facility failure, KERN_KDGETBUF\n");
430 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDREMOVE
};
435 if (sysctl(mib
, ARRAYSIZE(mib
), NULL
, &needed
, NULL
, 0) < 0) {
437 if (errno
== EBUSY
) {
438 quit("the trace facility is currently in use...\n fs_usage, sc_usage, and latency use this feature.\n\n");
440 quit("trace facility failure, KERN_KDREMOVE\n");
447 write_high_res_latencies(void)
452 if (use_high_res_bins
) {
453 if ((f
= fopen("latencies.csv","w"))) {
454 for (i
= 0; i
< N_HIGH_RES_BINS
; i
++) {
455 fprintf(f
, "%d,%d\n", i
, i_high_res_bins
[i
]);
463 sigintr(int signo
__attribute__((unused
)))
465 write_high_res_latencies();
468 set_pidexclude(getpid(), 0);
469 screen_update(log_fp
);
476 /* exit under normal conditions -- signal handler */
478 leave(int signo
__attribute__((unused
)))
480 write_high_res_latencies();
483 set_pidexclude(getpid(), 0);
491 sigwinch(int signo
__attribute__((unused
)))
497 print_total(FILE *fp
, char *s
, int total
)
502 struct i_latencies
*il
;
505 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
507 itotal
+= il
->i_total
;
509 clen
= sprintf(tbuf
, "%s %10d %9d", s
, total
, itotal
);
511 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
514 if (i_latency_per_cpu
== TRUE
) {
515 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_total
);
520 sprintf(&tbuf
[clen
], "\n");
522 fprintf(fp
, "%s", tbuf
);
531 screen_update(FILE *fp
)
540 long min_lat
, max_lat
;
542 unsigned int average_s_latency
;
543 unsigned int average_i_latency
;
544 struct i_latencies
*il
;
551 fprintf(fp
,"\n\n===================================================================================================\n");
554 * Display the current time.
555 * "ctime" always returns a string that looks like this:
557 * Sun Sep 16 01:03:52 1973
558 * 012345678901234567890123
561 * We want indices 11 thru 18 (length 8).
564 curr_time
= (unsigned long)sample_TOD_secs
;
565 elapsed_secs
= ((last_now
- first_now
) / divisor
) / 1000000;
567 elapsed_secs
= curr_time
- start_time
;
570 elapsed_hours
= elapsed_secs
/ 3600;
571 elapsed_secs
-= elapsed_hours
* 3600;
572 elapsed_mins
= elapsed_secs
/ 60;
573 elapsed_secs
-= elapsed_mins
* 60;
575 sprintf(tbuf
, "%-19.19s %2ld:%02ld:%02ld\n", &(ctime(&curr_time
)[0]),
576 (long)elapsed_hours
, (long)elapsed_mins
, (long)elapsed_secs
);
578 fprintf(fp
, "%s", tbuf
);
583 sprintf(tbuf
, " SCHEDULER INTERRUPTS\n");
585 fprintf(fp
, "%s", tbuf
);
590 if (i_latency_per_cpu
== TRUE
) {
591 clen
= sprintf(tbuf
, " Total");
593 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
595 clen
+= sprintf(&tbuf
[clen
], " CPU %d", cpu
);
597 clen
+= sprintf(&tbuf
[clen
], " CPU %d", cpu
);
601 fprintf(fp
, "%s", tbuf
);
606 clen
= sprintf(tbuf
, "\n-------------------------------------------------------");
608 for (cpu
= 1; cpu
< num_i_latency_cpus
; cpu
++) {
609 clen
+= sprintf(&tbuf
[clen
], "----------");
612 fprintf(fp
, "%s", tbuf
);
617 sprintf(tbuf
, "---------------------------------------------");
619 fprintf(fp
, "%s", tbuf
);
624 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
626 itotal
+= il
->i_total_samples
;
628 clen
= sprintf(tbuf
, "\ntotal_samples %10d %9d", s_total_samples
, itotal
);
630 if (i_latency_per_cpu
== TRUE
) {
631 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
634 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_total_samples
);
637 sprintf(&tbuf
[clen
], "\n");
639 fprintf(fp
, "%s", tbuf
);
645 for (stotal
= 0, i
= 0; i
< 10; i
++) {
646 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
649 itotal
+= il
->i_usec_10_bins
[i
];
650 il
->i_total
+= il
->i_usec_10_bins
[i
];
652 clen
= sprintf(tbuf
, "\ndelays < %3d usecs %10d %9d", (i
+ 1) * 10, s_usec_10_bins
[i
], itotal
);
654 stotal
+= s_usec_10_bins
[i
];
656 if (i_latency_per_cpu
== TRUE
) {
657 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
660 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_usec_10_bins
[i
]);
664 fprintf(fp
, "%s", tbuf
);
669 print_total(fp
, "\ntotal < 100 usecs", stotal
);
671 for (stotal
= 0, i
= 1; i
< 10; i
++) {
672 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
675 itotal
+= il
->i_usec_100_bins
[i
];
676 il
->i_total
+= il
->i_usec_100_bins
[i
];
679 clen
= sprintf(tbuf
, "\ndelays < %3d usecs %10d %9d", (i
+ 1) * 100, s_usec_100_bins
[i
], itotal
);
681 clen
= sprintf(tbuf
, "\ndelays < 1 msec %10d %9d", s_usec_100_bins
[i
], itotal
);
684 stotal
+= s_usec_100_bins
[i
];
686 if (i_latency_per_cpu
== TRUE
) {
687 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
690 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_usec_100_bins
[i
]);
694 fprintf(fp
, "%s", tbuf
);
699 print_total(fp
, "\ntotal < 1 msec ", stotal
);
702 for (stotal
= 0, i
= 1; i
< 10; i
++) {
703 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
706 itotal
+= il
->i_msec_1_bins
[i
];
707 il
->i_total
+= il
->i_msec_1_bins
[i
];
709 clen
= sprintf(tbuf
, "\ndelays < %3d msecs %10d %9d", (i
+ 1), s_msec_1_bins
[i
], itotal
);
711 stotal
+= s_msec_1_bins
[i
];
713 if (i_latency_per_cpu
== TRUE
) {
714 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
717 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_msec_1_bins
[i
]);
721 fprintf(fp
, "%s", tbuf
);
726 print_total(fp
, "\ntotal < 10 msecs", stotal
);
728 for (stotal
= 0, i
= 1; i
< 5; i
++) {
729 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
732 itotal
+= il
->i_msec_10_bins
[i
];
733 il
->i_total
+= il
->i_msec_10_bins
[i
];
735 clen
= sprintf(tbuf
, "\ndelays < %3d msecs %10d %9d", (i
+ 1)*10, s_msec_10_bins
[i
], itotal
);
737 stotal
+= s_msec_10_bins
[i
];
739 if (i_latency_per_cpu
== TRUE
) {
740 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
743 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_msec_10_bins
[i
]);
747 fprintf(fp
, "%s", tbuf
);
752 print_total(fp
, "\ntotal < 50 msecs", stotal
);
755 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
757 itotal
+= il
->i_too_slow
;
759 clen
= sprintf(tbuf
, "\ndelays > 50 msecs %10d %9d", s_too_slow
, itotal
);
761 if (i_latency_per_cpu
== TRUE
) {
762 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
765 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_too_slow
);
769 fprintf(fp
, "%s", tbuf
);
774 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
777 if (cpu
== 0 || (il
->i_min_latency
< min_lat
)) {
778 min_lat
= il
->i_min_latency
;
781 clen
= sprintf(tbuf
, "\n\nminimum latency(usecs) %7d %9ld", s_min_latency
, min_lat
);
783 if (i_latency_per_cpu
== TRUE
) {
784 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
787 clen
+= sprintf(&tbuf
[clen
], " %9ld", il
->i_min_latency
);
791 fprintf(fp
, "%s", tbuf
);
797 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
800 if (cpu
== 0 || (il
->i_max_latency
> max_lat
)) {
801 max_lat
= il
->i_max_latency
;
804 clen
= sprintf(tbuf
, "\nmaximum latency(usecs) %7d %9ld", s_max_latency
, max_lat
);
806 if (i_latency_per_cpu
== TRUE
) {
807 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
810 clen
+= sprintf(&tbuf
[clen
], " %9ld", il
->i_max_latency
);
814 fprintf(fp
, "%s", tbuf
);
819 if (s_total_samples
) {
820 average_s_latency
= (unsigned int)(s_total_latency
/s_total_samples
);
822 average_s_latency
= 0;
825 for (itotal
= 0, tot_lat
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
828 itotal
+= il
->i_total_samples
;
829 tot_lat
+= il
->i_total_latency
;
832 average_i_latency
= (unsigned)(tot_lat
/itotal
);
834 average_i_latency
= 0;
837 clen
= sprintf(tbuf
, "\naverage latency(usecs) %7d %9d", average_s_latency
, average_i_latency
);
839 if (i_latency_per_cpu
== TRUE
) {
840 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
843 if (il
->i_total_samples
) {
844 average_i_latency
= (unsigned int)(il
->i_total_latency
/il
->i_total_samples
);
846 average_i_latency
= 0;
849 clen
+= sprintf(&tbuf
[clen
], " %9d", average_i_latency
);
853 fprintf(fp
, "%s", tbuf
);
858 for (itotal
= 0, cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
861 itotal
+= il
->i_exceeded_threshold
;
863 clen
= sprintf(tbuf
, "\nexceeded threshold %7d %9d", s_exceeded_threshold
, itotal
);
865 if (i_latency_per_cpu
== TRUE
) {
866 for (cpu
= 0; cpu
< num_i_latency_cpus
; cpu
++) {
869 clen
+= sprintf(&tbuf
[clen
], " %9d", il
->i_exceeded_threshold
);
872 sprintf(&tbuf
[clen
], "\n");
875 fprintf(fp
, "%s", tbuf
);
890 fprintf(stderr
, "Usage: latency [-p <priority>] [-h] [-m] [-st <threshold>] [-it <threshold>]\n");
891 fprintf(stderr
, " [-c <codefile>] [-l <logfile>] [-R <rawfile>] [-n <kernel>]\n\n");
893 fprintf(stderr
, " -p specify scheduling priority to watch... default is realtime. Can also be a range, e.g. \"31-47\".\n");
894 fprintf(stderr
, " -h Display high resolution interrupt latencies and write them to latencies.csv (truncate existing file) upon exit.\n");
895 fprintf(stderr
, " -st set scheduler latency threshold in microseconds... if latency exceeds this, then log trace\n");
896 fprintf(stderr
, " -m specify per-CPU interrupt latency reporting\n");
897 fprintf(stderr
, " -it set interrupt latency threshold in microseconds... if latency exceeds this, then log trace\n");
898 fprintf(stderr
, " -c specify name of codes file... default is /usr/share/misc/trace.codes\n");
899 fprintf(stderr
, " -l specify name of file to log trace entries to when the specified threshold is exceeded\n");
900 fprintf(stderr
, " -R specify name of raw trace file to process\n");
901 fprintf(stderr
, " -n specify kernel... default is /System/Library/Kernels/kernel.development\n");
903 fprintf(stderr
, "\nlatency must be run as root\n\n");
909 main(int argc
, char *argv
[])
913 if (0 != reexec_to_match_kernel()) {
914 fprintf(stderr
, "Could not re-execute: %d\n", errno
);
919 if (strcmp(argv
[1], "-R") == 0) {
924 open_rawfile(argv
[1]);
931 } else if (strcmp(argv
[1], "-p") == 0) {
936 if (2 == sscanf(argv
[1], "%d-%d", &watch_priority_min
, &watch_priority_max
)) {
937 if (watch_priority_min
> watch_priority_max
) {
939 } else if (watch_priority_min
< 0) {
943 if (1 == sscanf(argv
[1], "%d", &watch_priority_min
)) {
944 watch_priority_max
= watch_priority_min
;
952 } else if (strcmp(argv
[1], "-st") == 0) {
957 s_thresh_hold
= atoi(argv
[1]);
961 } else if (strcmp(argv
[1], "-it") == 0) {
966 i_thresh_hold
= atoi(argv
[1]);
970 } else if (strcmp(argv
[1], "-c") == 0) {
979 } else if (strcmp(argv
[1], "-l") == 0) {
984 open_logfile(argv
[1]);
988 } else if (strcmp(argv
[1], "-n") == 0) {
993 kernelpath
= argv
[1];
997 } else if (strcmp(argv
[1], "-h") == 0) {
998 use_high_res_bins
= TRUE
;
1000 } else if (strcmp(argv
[1], "-m") == 0) {
1001 i_latency_per_cpu
= TRUE
;
1011 if (geteuid() != 0) {
1012 printf("'latency' must be run as root...\n");
1016 if (kernelpath
== NULL
) {
1017 kernelpath
= "/System/Library/Kernels/kernel.development";
1020 if (code_file
== NULL
) {
1021 code_file
= "/usr/share/misc/trace.codes";
1031 if (initscr() == NULL
) {
1032 printf("Unrecognized TERM type, try vt100\n");
1038 signal(SIGWINCH
, sigwinch
);
1039 signal(SIGINT
, sigintr
);
1040 signal(SIGQUIT
, leave
);
1041 signal(SIGTERM
, leave
);
1042 signal(SIGHUP
, leave
);
1045 * grab the number of cpus and scale the buffer size
1047 int mib
[] = { CTL_HW
, HW_NCPU
};
1048 size_t len
= sizeof(num_cpus
);
1050 sysctl(mib
, ARRAYSIZE(mib
), &num_cpus
, &len
, NULL
, 0);
1053 set_numbufs(SAMPLE_SIZE
* num_cpus
);
1055 get_bufinfo(&bufinfo
);
1059 set_pidexclude(getpid(), 1);
1062 num_entries
= bufinfo
.nkdbufs
;
1064 num_entries
= 50000;
1068 for (cpu_mask
= 0, i
= 0; i
< num_cpus
; i
++)
1069 cpu_mask
|= ((uint64_t)1 << i
);
1071 if ((my_buffer
= malloc(num_entries
* sizeof(kd_buf
))) == NULL
) {
1072 quit("can't allocate memory for tracing info\n");
1075 if ((last_decrementer_kd
= (kd_buf
**)malloc(num_cpus
* sizeof(kd_buf
*))) == NULL
) {
1076 quit("can't allocate memory for decrementer tracing info\n");
1079 if (i_latency_per_cpu
== FALSE
) {
1080 num_i_latency_cpus
= 1;
1082 num_i_latency_cpus
= num_cpus
;
1085 if ((i_lat
= (struct i_latencies
*)malloc(num_i_latency_cpus
* sizeof(struct i_latencies
))) == NULL
) {
1086 quit("can't allocate memory for interrupt latency info\n");
1089 bzero((char *)i_lat
, num_i_latency_cpus
* sizeof(struct i_latencies
));
1092 while (sample_sc()) {
1097 screen_update(log_fp
);
1100 screen_update(stdout
);
1105 double nanosecs_to_sleep
;
1107 nanosecs_to_sleep
= (double)(SAMPLE_TIME_USECS
* 1000);
1108 fdelay
= nanosecs_to_sleep
* (divisor
/1000);
1109 adelay
= (uint64_t)fdelay
;
1113 start_time
= time(NULL
);
1114 refresh_time
= start_time
;
1117 curr_time
= time(NULL
);
1119 if (curr_time
>= refresh_time
) {
1120 screen_update(NULL
);
1121 refresh_time
= curr_time
+ 1;
1123 mach_wait_until(mach_absolute_time() + adelay
);
1129 * No need to check for initscr error return.
1130 * We won't get here if it fails on the first call.
1143 read_command_map(void)
1145 kd_threadmap
*mapptr
= 0;
1146 int total_threads
= 0;
1150 RAW_header header
= {0};
1153 if (read(RAW_fd
, &header
, sizeof(RAW_header
)) != sizeof(RAW_header
)) {
1154 perror("read failed");
1157 if (header
.version_no
!= RAW_VERSION1
) {
1158 header
.version_no
= RAW_VERSION0
;
1159 header
.TOD_secs
= time(NULL
);
1160 header
.TOD_usecs
= 0;
1162 lseek(RAW_fd
, (off_t
)0, SEEK_SET
);
1164 if (read(RAW_fd
, &header
.thread_count
, sizeof(int)) != sizeof(int)) {
1165 perror("read failed");
1169 total_threads
= header
.thread_count
;
1171 sample_TOD_secs
= header
.TOD_secs
;
1172 sample_TOD_usecs
= header
.TOD_usecs
;
1174 if (total_threads
== 0 && header
.version_no
!= RAW_VERSION0
) {
1175 offset
= lseek(RAW_fd
, (off_t
)0, SEEK_CUR
);
1176 offset
= (offset
+ (4095)) & ~4095;
1178 lseek(RAW_fd
, offset
, SEEK_SET
);
1181 total_threads
= bufinfo
.nkdthreads
;
1184 size
= total_threads
* sizeof(kd_threadmap
);
1186 if (size
== 0 || ((mapptr
= (kd_threadmap
*) malloc(size
)) == 0)) {
1189 bzero (mapptr
, size
);
1192 * Now read the threadmap
1195 if (read(RAW_fd
, mapptr
, size
) != size
) {
1196 printf("Can't read the thread map -- this is not fatal\n");
1198 if (header
.version_no
!= RAW_VERSION0
) {
1199 offset
= lseek(RAW_fd
, (off_t
)0, SEEK_CUR
);
1200 offset
= (offset
+ (4095)) & ~4095;
1202 lseek(RAW_fd
, offset
, SEEK_SET
);
1205 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDTHRMAP
};
1206 if (sysctl(mib
, ARRAYSIZE(mib
), mapptr
, &size
, NULL
, 0) < 0) {
1208 * This is not fatal -- just means I cant map command strings
1210 printf("Can't read the thread map -- this is not fatal\n");
1215 for (i
= 0; i
< total_threads
; i
++) {
1216 create_map_entry(mapptr
[i
].thread
, &mapptr
[i
].command
[0]);
1222 create_map_entry(uintptr_t thread
, char *command
)
1226 if ((tme
= threadmap_freelist
)) {
1227 threadmap_freelist
= tme
->tm_next
;
1229 tme
= (threadmap_t
)malloc(sizeof(struct threadmap
));
1232 tme
->tm_thread
= thread
;
1234 (void)strncpy (tme
->tm_command
, command
, MAXCOMLEN
);
1235 tme
->tm_command
[MAXCOMLEN
] = '\0';
1236 tme
->tm_orig_command
[0] = '\0';
1238 int hashid
= thread
& HASH_MASK
;
1240 tme
->tm_next
= threadmap_hash
[hashid
];
1241 threadmap_hash
[hashid
] = tme
;
1245 delete_thread_entry(uintptr_t thread
)
1249 int hashid
= thread
& HASH_MASK
;
1251 if ((tme
= threadmap_hash
[hashid
])) {
1252 if (tme
->tm_thread
== thread
) {
1253 threadmap_hash
[hashid
] = tme
->tm_next
;
1255 threadmap_t tme_prev
= tme
;
1257 for (tme
= tme
->tm_next
; tme
; tme
= tme
->tm_next
) {
1258 if (tme
->tm_thread
== thread
) {
1259 tme_prev
->tm_next
= tme
->tm_next
;
1266 tme
->tm_next
= threadmap_freelist
;
1267 threadmap_freelist
= tme
;
1273 find_and_insert_tmp_map_entry(uintptr_t pthread
, char *command
)
1277 if ((tme
= threadmap_temp
)) {
1278 if (tme
->tm_pthread
== pthread
) {
1279 threadmap_temp
= tme
->tm_next
;
1281 threadmap_t tme_prev
= tme
;
1283 for (tme
= tme
->tm_next
; tme
; tme
= tme
->tm_next
) {
1284 if (tme
->tm_pthread
== pthread
) {
1285 tme_prev
->tm_next
= tme
->tm_next
;
1292 (void)strncpy (tme
->tm_command
, command
, MAXCOMLEN
);
1293 tme
->tm_command
[MAXCOMLEN
] = '\0';
1294 tme
->tm_orig_command
[0] = '\0';
1296 int hashid
= tme
->tm_thread
& HASH_MASK
;
1297 tme
->tm_next
= threadmap_hash
[hashid
];
1298 threadmap_hash
[hashid
] = tme
;
1304 create_tmp_map_entry(uintptr_t thread
, uintptr_t pthread
)
1308 if ((tme
= threadmap_freelist
)) {
1309 threadmap_freelist
= tme
->tm_next
;
1311 tme
= malloc(sizeof(struct threadmap
));
1314 tme
->tm_thread
= thread
;
1315 tme
->tm_pthread
= pthread
;
1316 tme
->tm_command
[0] = '\0';
1317 tme
->tm_orig_command
[0] = '\0';
1319 tme
->tm_next
= threadmap_temp
;
1320 threadmap_temp
= tme
;
1324 find_thread_entry(uintptr_t thread
)
1328 int hashid
= thread
& HASH_MASK
;
1330 for (tme
= threadmap_hash
[hashid
]; tme
; tme
= tme
->tm_next
) {
1331 if (tme
->tm_thread
== thread
) {
1339 find_thread_name(uintptr_t thread
, char **command
)
1343 if ((tme
= find_thread_entry(thread
))) {
1344 *command
= tme
->tm_command
;
1346 *command
= EMPTYSTRING
;
1351 add_thread_entry_to_list(thread_entry_t
*list
, uintptr_t thread
)
1355 if ((te
= thread_entry_freelist
)) {
1356 thread_entry_freelist
= te
->te_next
;
1358 te
= (thread_entry_t
)malloc(sizeof(struct thread_entry
));
1361 te
->te_thread
= thread
;
1362 te
->te_next
= *list
;
1367 exec_thread_entry(uintptr_t thread
, char *command
)
1371 if ((tme
= find_thread_entry(thread
))) {
1372 if (tme
->tm_orig_command
[0] == '\0') {
1373 (void)strncpy (tme
->tm_orig_command
, tme
->tm_command
, MAXCOMLEN
);
1374 tme
->tm_orig_command
[MAXCOMLEN
] = '\0';
1376 (void)strncpy (tme
->tm_command
, command
, MAXCOMLEN
);
1377 tme
->tm_command
[MAXCOMLEN
] = '\0';
1379 add_thread_entry_to_list(&thread_reset_list
, thread
);
1381 create_map_entry(thread
, command
);
1386 record_thread_entry_for_gc(uintptr_t thread
)
1388 add_thread_entry_to_list(&thread_delete_list
, thread
);
1392 gc_thread_entries(void)
1395 thread_entry_t te_next
;
1398 for (te
= thread_delete_list
; te
; te
= te_next
) {
1399 delete_thread_entry(te
->te_thread
);
1401 te_next
= te
->te_next
;
1402 te
->te_next
= thread_entry_freelist
;
1403 thread_entry_freelist
= te
;
1407 thread_delete_list
= 0;
1411 gc_reset_entries(void)
1414 thread_entry_t te_next
;
1417 for (te
= thread_reset_list
; te
; te
= te_next
) {
1418 te_next
= te
->te_next
;
1419 te
->te_next
= thread_entry_freelist
;
1420 thread_entry_freelist
= te
;
1424 thread_reset_list
= 0;
1428 reset_thread_names(void)
1431 thread_entry_t te_next
;
1434 for (te
= thread_reset_list
; te
; te
= te_next
) {
1437 if ((tme
= find_thread_entry(te
->te_thread
))) {
1438 if (tme
->tm_orig_command
[0]) {
1439 (void)strncpy (tme
->tm_command
, tme
->tm_orig_command
, MAXCOMLEN
);
1440 tme
->tm_command
[MAXCOMLEN
] = '\0';
1441 tme
->tm_orig_command
[0] = '\0';
1444 te_next
= te
->te_next
;
1445 te
->te_next
= thread_entry_freelist
;
1446 thread_entry_freelist
= te
;
1450 thread_reset_list
= 0;
1454 delete_all_thread_entries(void)
1456 threadmap_t tme
= 0;
1457 threadmap_t tme_next
= 0;
1460 for (i
= 0; i
< HASH_SIZE
; i
++) {
1461 for (tme
= threadmap_hash
[i
]; tme
; tme
= tme_next
) {
1462 tme_next
= tme
->tm_next
;
1463 tme
->tm_next
= threadmap_freelist
;
1464 threadmap_freelist
= tme
;
1466 threadmap_hash
[i
] = 0;
1471 insert_run_event(uintptr_t thread
, int priority
, kd_buf
*kd
, uint64_t now
)
1475 int hashid
= thread
& HASH_MASK
;
1477 for (trp
= threadrun_hash
[hashid
]; trp
; trp
= trp
->tr_next
) {
1478 if (trp
->tr_thread
== thread
) {
1483 if ((trp
= threadrun_freelist
)) {
1484 threadrun_freelist
= trp
->tr_next
;
1486 trp
= (threadrun_t
)malloc(sizeof(struct threadrun
));
1489 trp
->tr_thread
= thread
;
1491 trp
->tr_next
= threadrun_hash
[hashid
];
1492 threadrun_hash
[hashid
] = trp
;
1494 add_thread_entry_to_list(&thread_run_list
, thread
);
1497 trp
->tr_timestamp
= now
;
1498 trp
->tr_priority
= priority
;
1502 find_run_event(uintptr_t thread
)
1505 int hashid
= thread
& HASH_MASK
;
1507 for (trp
= threadrun_hash
[hashid
]; trp
; trp
= trp
->tr_next
) {
1508 if (trp
->tr_thread
== thread
) {
1516 delete_run_event(uintptr_t thread
)
1518 threadrun_t trp
= 0;
1519 threadrun_t trp_prev
;
1521 int hashid
= thread
& HASH_MASK
;
1523 if ((trp
= threadrun_hash
[hashid
])) {
1524 if (trp
->tr_thread
== thread
) {
1525 threadrun_hash
[hashid
] = trp
->tr_next
;
1529 for (trp
= trp
->tr_next
; trp
; trp
= trp
->tr_next
) {
1530 if (trp
->tr_thread
== thread
) {
1531 trp_prev
->tr_next
= trp
->tr_next
;
1538 trp
->tr_next
= threadrun_freelist
;
1539 threadrun_freelist
= trp
;
1548 thread_entry_t te_next
;
1550 threadrun_t trp_next
;
1553 for (te
= thread_run_list
; te
; te
= te_next
) {
1554 int hashid
= te
->te_thread
& HASH_MASK
;
1556 for (trp
= threadrun_hash
[hashid
]; trp
; trp
= trp_next
) {
1557 trp_next
= trp
->tr_next
;
1558 trp
->tr_next
= threadrun_freelist
;
1559 threadrun_freelist
= trp
;
1562 threadrun_hash
[hashid
] = 0;
1564 te_next
= te
->te_next
;
1565 te
->te_next
= thread_entry_freelist
;
1566 thread_entry_freelist
= te
;
1568 thread_run_list
= 0;
1574 insert_start_event(uintptr_t thread
, int type
, uint64_t now
)
1578 int hashid
= thread
& HASH_MASK
;
1580 for (evp
= event_hash
[hashid
]; evp
; evp
= evp
->ev_next
) {
1581 if (evp
->ev_thread
== thread
&& evp
->ev_type
== type
) {
1586 if ((evp
= event_freelist
)) {
1587 event_freelist
= evp
->ev_next
;
1589 evp
= (event_t
)malloc(sizeof(struct event
));
1592 evp
->ev_thread
= thread
;
1593 evp
->ev_type
= type
;
1595 evp
->ev_next
= event_hash
[hashid
];
1596 event_hash
[hashid
] = evp
;
1598 add_thread_entry_to_list(&thread_event_list
, thread
);
1600 evp
->ev_timestamp
= now
;
1605 consume_start_event(uintptr_t thread
, int type
, uint64_t now
)
1609 uint64_t elapsed
= 0;
1611 int hashid
= thread
& HASH_MASK
;
1613 if ((evp
= event_hash
[hashid
])) {
1614 if (evp
->ev_thread
== thread
&& evp
->ev_type
== type
) {
1615 event_hash
[hashid
] = evp
->ev_next
;
1619 for (evp
= evp
->ev_next
; evp
; evp
= evp
->ev_next
) {
1620 if (evp
->ev_thread
== thread
&& evp
->ev_type
== type
) {
1621 evp_prev
->ev_next
= evp
->ev_next
;
1628 elapsed
= now
- evp
->ev_timestamp
;
1630 if (now
< evp
->ev_timestamp
) {
1631 printf("consume: now = %qd, timestamp = %qd\n", now
, evp
->ev_timestamp
);
1634 evp
->ev_next
= event_freelist
;
1635 event_freelist
= evp
;
1642 gc_start_events(void)
1645 thread_entry_t te_next
;
1651 for (te
= thread_event_list
; te
; te
= te_next
) {
1653 hashid
= te
->te_thread
& HASH_MASK
;
1655 for (evp
= event_hash
[hashid
]; evp
; evp
= evp_next
) {
1656 evp_next
= evp
->ev_next
;
1657 evp
->ev_next
= event_freelist
;
1658 event_freelist
= evp
;
1661 event_hash
[hashid
] = 0;
1663 te_next
= te
->te_next
;
1664 te
->te_next
= thread_entry_freelist
;
1665 thread_entry_freelist
= te
;
1667 thread_event_list
= 0;
1671 thread_in_user_mode(uintptr_t thread
, char *command
)
1675 if (strcmp(command
, "kernel_task") == 0) {
1679 int hashid
= thread
& HASH_MASK
;
1681 for (evp
= event_hash
[hashid
]; evp
; evp
= evp
->ev_next
) {
1682 if (evp
->ev_thread
== thread
) {
1690 handle_lookup_event(uintptr_t thread
, int debugid
, kd_buf
*kdp
)
1693 boolean_t first_record
= FALSE
;
1695 int hashid
= thread
& HASH_MASK
;
1697 if (debugid
& DBG_FUNC_START
) {
1698 first_record
= TRUE
;
1701 for (lkp
= lookup_hash
[hashid
]; lkp
; lkp
= lkp
->lk_next
) {
1702 if (lkp
->lk_thread
== thread
) {
1707 if (first_record
== FALSE
) {
1711 if ((lkp
= lookup_freelist
)) {
1712 lookup_freelist
= lkp
->lk_next
;
1714 lkp
= (lookup_t
)malloc(sizeof(struct lookup
));
1717 lkp
->lk_thread
= thread
;
1719 lkp
->lk_next
= lookup_hash
[hashid
];
1720 lookup_hash
[hashid
] = lkp
;
1722 add_thread_entry_to_list(&thread_lookup_list
, thread
);
1725 if (first_record
== TRUE
) {
1726 lkp
->lk_pathptr
= lkp
->lk_pathname
;
1727 lkp
->lk_dvp
= kdp
->arg1
;
1729 if (lkp
->lk_pathptr
> &lkp
->lk_pathname
[NUMPARMS
-4]) {
1732 *lkp
->lk_pathptr
++ = kdp
->arg1
;
1734 *lkp
->lk_pathptr
++ = kdp
->arg2
;
1735 *lkp
->lk_pathptr
++ = kdp
->arg3
;
1736 *lkp
->lk_pathptr
++ = kdp
->arg4
;
1737 *lkp
->lk_pathptr
= 0;
1739 if (debugid
& DBG_FUNC_END
) {
1747 delete_lookup_event(uintptr_t thread
, lookup_t lkp_to_delete
)
1753 hashid
= thread
& HASH_MASK
;
1755 if ((lkp
= lookup_hash
[hashid
])) {
1756 if (lkp
== lkp_to_delete
) {
1757 lookup_hash
[hashid
] = lkp
->lk_next
;
1761 for (lkp
= lkp
->lk_next
; lkp
; lkp
= lkp
->lk_next
) {
1762 if (lkp
== lkp_to_delete
) {
1763 lkp_prev
->lk_next
= lkp
->lk_next
;
1770 lkp
->lk_next
= lookup_freelist
;
1771 lookup_freelist
= lkp
;
1777 gc_lookup_events(void)
1780 thread_entry_t te_next
;
1786 for (te
= thread_lookup_list
; te
; te
= te_next
) {
1787 hashid
= te
->te_thread
& HASH_MASK
;
1789 for (lkp
= lookup_hash
[hashid
]; lkp
; lkp
= lkp_next
) {
1790 lkp_next
= lkp
->lk_next
;
1791 lkp
->lk_next
= lookup_freelist
;
1792 lookup_freelist
= lkp
;
1795 lookup_hash
[hashid
] = 0;
1797 te_next
= te
->te_next
;
1798 te
->te_next
= thread_entry_freelist
;
1799 thread_entry_freelist
= te
;
1801 thread_lookup_list
= 0;
1807 kd_buf
*kd
, *end_of_sample
;
1814 * Get kernel buffer information
1816 get_bufinfo(&bufinfo
);
1819 delete_all_thread_entries();
1826 bytes_read
= read(RAW_fd
, my_buffer
, num_entries
* sizeof(kd_buf
));
1828 if (bytes_read
== -1) {
1829 perror("read failed");
1832 count
= bytes_read
/ sizeof(kd_buf
);
1834 if (count
!= num_entries
) {
1839 kd
= (kd_buf
*)my_buffer
;
1840 first_now
= kd
->timestamp
& KDBG_TIMESTAMP_MASK
;
1845 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDREADTR
};
1846 size_t needed
= bufinfo
.nkdbufs
* sizeof(kd_buf
);
1848 if (sysctl(mib
, ARRAYSIZE(mib
), my_buffer
, &needed
, NULL
, 0) < 0) {
1849 quit("trace facility failure, KERN_KDREADTR\n");
1853 sample_generation
++;
1855 if (bufinfo
.flags
& KDBG_WRAPPED
) {
1859 fprintf(log_fp
, "\n\n%-19.19s sample = %d <<<<<<< trace buffer wrapped >>>>>>>\n\n",
1860 &(ctime(&curr_time
)[0]), sample_generation
);
1866 end_of_sample
= &((kd_buf
*)my_buffer
)[count
];
1869 * Always reinitialize the DECR_TRAP array
1871 for (i
= 0; i
< num_cpus
; i
++) {
1872 last_decrementer_kd
[i
] = (kd_buf
*)my_buffer
;
1875 for (kd
= (kd_buf
*)my_buffer
; kd
< end_of_sample
; kd
++) {
1877 uintptr_t thread
= kd
->arg5
;
1878 int type
= kd
->debugid
& DBG_FUNC_MASK
;
1880 (void)check_for_thread_update(thread
, type
, kd
, NULL
);
1882 uint64_t now
= kd
->timestamp
& KDBG_TIMESTAMP_MASK
;
1885 if (type
== DECR_TRAP
) {
1886 int cpunum
= CPU_NUMBER(kd
);
1887 double i_latency
= handle_decrementer(kd
, cpunum
);
1890 if (i_thresh_hold
&& (int)i_latency
> i_thresh_hold
) {
1891 kd_start
= last_decrementer_kd
[cpunum
];
1893 log_decrementer(kd_start
, kd
, end_of_sample
, i_latency
);
1895 last_decrementer_kd
[cpunum
] = kd
;
1900 if (check_for_scheduler_latency(type
, &thread
, now
, kd
, &kd_start
, &s_priority
, &s_latency
)) {
1901 log_scheduler(kd_start
, kd
, end_of_sample
, s_priority
, s_latency
, thread
);
1909 gc_thread_entries();
1917 enter_syscall(FILE *fp
, kd_buf
*kd
, uintptr_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, int print_info
)
1924 int cpunum
= CPU_NUMBER(kd
);
1926 if (print_info
&& fp
) {
1927 timestamp
= (double)(now
- start_bias
) / divisor
;
1928 delta
= (double)idelta
/ divisor
;
1930 if ((p
= find_code(type
))) {
1931 if (type
== INTERRUPT
) {
1940 pc_to_string(&pcstring
[0], kd
->arg2
, 58, mode
);
1942 fprintf(fp
, "%9.1f %8.1f\t\tINTERRUPT[%2lx] @ %-58.58s %8lx %2d %s\n",
1943 timestamp
, delta
, kd
->arg1
, &pcstring
[0], thread
, cpunum
, command
);
1944 } else if (type
== MACH_vmfault
) {
1945 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %8lx %2d %s\n",
1946 timestamp
, delta
, p
, thread
, cpunum
, command
);
1948 fprintf(fp
, "%9.1f %8.1f\t\t%-28.28s %-16lx %-16lx %-16lx %-16lx %8lx %2d %s\n",
1949 timestamp
, delta
, p
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1950 thread
, cpunum
, command
);
1953 fprintf(fp
, "%9.1f %8.1f\t\t%-8x %-16lx %-16lx %-16lx %-16lx %8lx %2d %s\n",
1954 timestamp
, delta
, type
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
1955 thread
, cpunum
, command
);
1958 if (type
!= BSC_thread_terminate
&& type
!= BSC_exit
) {
1959 insert_start_event(thread
, type
, now
);
1964 exit_syscall(FILE *fp
, kd_buf
*kd
, uintptr_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, int print_info
)
1970 double elapsed_timestamp
;
1972 elapsed_timestamp
= (double)consume_start_event(thread
, type
, now
) / divisor
;
1974 if (print_info
&& fp
) {
1975 int cpunum
= CPU_NUMBER(kd
);
1977 timestamp
= (double)(now
- start_bias
) / divisor
;
1978 delta
= (double)idelta
/ divisor
;
1980 fprintf(fp
, "%9.1f %8.1f(%.1f) \t", timestamp
, delta
, elapsed_timestamp
);
1982 if ((p
= find_code(type
))) {
1983 if (type
== INTERRUPT
) {
1984 fprintf(fp
, "INTERRUPT %8lx %2d %s\n", thread
, cpunum
, command
);
1985 } else if (type
== MACH_vmfault
&& kd
->arg4
<= DBG_PAGEIND_FAULT
) {
1986 user_addr
= ((uint64_t)kd
->arg1
<< 32) | (uint32_t)kd
->arg2
;
1988 fprintf(fp
, "%-28.28s %-10.10s %-16qx %8lx %2d %s\n",
1989 p
, fault_name
[kd
->arg4
], user_addr
,
1990 thread
, cpunum
, command
);
1992 fprintf(fp
, "%-28.28s %-16lx %-16lx %8lx %2d %s\n",
1993 p
, kd
->arg1
, kd
->arg2
,
1994 thread
, cpunum
, command
);
1997 fprintf(fp
, "%-8x %-16lx %-16lx %8lx %2d %s\n",
1998 type
, kd
->arg1
, kd
->arg2
,
1999 thread
, cpunum
, command
);
2005 print_entry(FILE *fp
, kd_buf
*kd
, uintptr_t thread
, int type
, char *command
, uint64_t now
, uint64_t idelta
, uint64_t start_bias
, kd_buf
*kd_note
)
2013 int cpunum
= CPU_NUMBER(kd
);
2015 double timestamp
= (double)(now
- start_bias
) / divisor
;
2016 double delta
= (double)idelta
/ divisor
;
2018 if ((p
= find_code(type
))) {
2019 if (kd
== kd_note
) {
2020 fprintf(fp
, "%9.1f %8.1f\t**\t", timestamp
, delta
);
2022 fprintf(fp
, "%9.1f %8.1f\t\t", timestamp
, delta
);
2024 fprintf(fp
, "%-28.28s %-16lx %-16lx %-16lx %-16lx %8lx %2d %s\n",
2025 p
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
, thread
, cpunum
, command
);
2027 fprintf(fp
, "%9.1f %8.1f\t\t%-8x %-16lx %-16lx %-16lx %-16lx %8lx %2d %s\n",
2028 timestamp
, delta
, type
, kd
->arg1
, kd
->arg2
, kd
->arg3
, kd
->arg4
,
2029 thread
, cpunum
, command
);
2034 check_for_thread_update(uintptr_t thread
, int debugid_base
, kd_buf
*kbufp
, char **command
)
2036 if (debugid_base
== TRACE_DATA_NEWTHREAD
) {
2038 * Save the create thread data
2040 create_tmp_map_entry(kbufp
->arg1
, thread
);
2041 } else if (debugid_base
== TRACE_STRING_NEWTHREAD
) {
2043 * process new map entry
2045 find_and_insert_tmp_map_entry(thread
, (char *)&kbufp
->arg1
);
2046 } else if (debugid_base
== TRACE_STRING_EXEC
) {
2047 exec_thread_entry(thread
, (char *)&kbufp
->arg1
);
2049 if (debugid_base
== BSC_exit
|| debugid_base
== BSC_thread_terminate
) {
2050 record_thread_entry_for_gc(thread
);
2053 find_thread_name(thread
, command
);
2059 log_info(uint64_t now
, uint64_t idelta
, uint64_t start_bias
, kd_buf
*kd
, kd_buf
*kd_note
)
2067 char command_buf
[32];
2068 char sched_info
[64];
2070 const char *sched_reason
;
2076 uintptr_t thread
= kd
->arg5
;
2077 int cpunum
= CPU_NUMBER(kd
);
2078 int debugid
= kd
->debugid
;
2079 int type
= kd
->debugid
& DBG_FUNC_MASK
;
2081 (void)check_for_thread_update(thread
, type
, kd
, &command
);
2083 if ((type
>> 24) == DBG_TRACE
) {
2084 if (((type
>> 16) & 0xff) != DBG_TRACE_INFO
) {
2088 timestamp
= (double)(now
- start_bias
) / divisor
;
2089 delta
= (double)idelta
/ divisor
;
2094 pc_to_string(&pcstring
[0], kd
->arg1
, 84, KERNEL_MODE
);
2096 fprintf(log_fp
, "%9.1f %8.1f\t\tCQ_action @ %-84.84s %8lx %2d %s\n",
2097 timestamp
, delta
, &pcstring
[0], thread
, cpunum
, command
);
2101 pc_to_string(&pcstring
[0], kd
->arg1
, 83, KERNEL_MODE
);
2103 fprintf(log_fp
, "%9.1f %8.1f\t\tTES_action @ %-83.83s %8lx %2d %s\n",
2104 timestamp
, delta
, &pcstring
[0], thread
, cpunum
, command
);
2108 pc_to_string(&pcstring
[0], kd
->arg1
, 83, KERNEL_MODE
);
2110 fprintf(log_fp
, "%9.1f %8.1f\t\tIES_action @ %-83.83s %8lx %2d %s\n",
2111 timestamp
, delta
, &pcstring
[0], thread
, cpunum
, command
);
2115 pc_to_string(&pcstring
[0], kd
->arg1
, 83, KERNEL_MODE
);
2117 fprintf(log_fp
, "%9.1f %8.1f\t\tIES_filter @ %-83.83s %8lx %2d %s\n",
2118 timestamp
, delta
, &pcstring
[0], thread
, cpunum
, command
);
2122 if ((int)kd
->arg1
>= 0) {
2125 i_latency
= (((double)(-1 - kd
->arg1
)) / divisor
);
2128 if (i_thresh_hold
&& (int)i_latency
> i_thresh_hold
) {
2140 pc_to_string(&pcstring
[0], kd
->arg2
, 84, mode
);
2142 fprintf(log_fp
, "%9.1f %8.1f[%.1f]%s\tDECR_TRAP @ %-84.84s %8lx %2d %s\n",
2143 timestamp
, delta
, i_latency
, p
, &pcstring
[0], thread
, cpunum
, command
);
2147 fprintf(log_fp
, "%9.1f %8.1f[%.1f] \t%-28.28s %8lx %2d %s\n",
2148 timestamp
, delta
, (double)kd
->arg1
/divisor
, "DECR_SET", thread
, cpunum
, command
);
2152 case MACH_stkhandoff
:
2154 find_thread_name(kd
->arg2
, &command1
);
2156 if (command1
== EMPTYSTRING
) {
2157 command1
= command_buf
;
2158 sprintf(command1
, "%-8lx", kd
->arg2
);
2160 if (thread_in_user_mode(kd
->arg2
, command1
)) {
2168 if (reason
> MAX_REASON
) {
2171 sched_reason
= sched_reasons
[reason
];
2174 if (sched_reason
[0] == '?') {
2175 sprintf(joe
, "%lx", reason
);
2178 sprintf(sched_info
, "%16.16s @ pri %3lu --> %16.16s @ pri %3lu%s", command
, kd
->arg3
, command1
, kd
->arg4
, p
);
2180 fprintf(log_fp
, "%9.1f %8.1f\t\t%-10.10s[%s] %s %8lx %2d\n",
2181 timestamp
, delta
, "MACH_SCHED", sched_reason
, sched_info
, thread
, cpunum
);
2185 if ((lkp
= handle_lookup_event(thread
, debugid
, kd
))) {
2187 * print the tail end of the pathname
2189 p
= (char *)lkp
->lk_pathname
;
2190 size_t clen
= strlen(p
);
2198 fprintf(log_fp
, "%9.1f %8.1f\t\t%-14.14s %-59s %-16lx %8lx %2d %s\n",
2199 timestamp
, delta
, "VFS_LOOKUP",
2200 &p
[clen
], lkp
->lk_dvp
, thread
, cpunum
, command
);
2202 delete_lookup_event(thread
, lkp
);
2207 if (debugid
& DBG_FUNC_START
) {
2208 enter_syscall(log_fp
, kd
, thread
, type
, command
, now
, idelta
, start_bias
, 1);
2209 } else if (debugid
& DBG_FUNC_END
) {
2210 exit_syscall(log_fp
, kd
, thread
, type
, command
, now
, idelta
, start_bias
, 1);
2212 print_entry(log_fp
, kd
, thread
, type
, command
, now
, idelta
, start_bias
, kd_note
);
2219 log_range(kd_buf
*kd_buffer
, kd_buf
*kd_start
, kd_buf
*kd_stop
, kd_buf
*kd_note
, char *buf1
)
2221 uint64_t last_timestamp
= 0;
2223 uint64_t start_bias
= 0;
2229 clen
= strlen(buf1
);
2230 memset(buf2
, '-', clen
);
2232 fprintf(log_fp
, "\n\n%s\n", buf2
);
2233 fprintf(log_fp
, "%s\n\n", buf1
);
2235 fprintf(log_fp
, "RelTime(Us) Delta debugid arg1 arg2 arg3 arg4 thread cpu command\n\n");
2237 reset_thread_names();
2239 last_timestamp
= kd_start
->timestamp
& KDBG_TIMESTAMP_MASK
;
2240 start_bias
= last_timestamp
;
2242 for (kd
= kd_buffer
; kd
<= kd_stop
; kd
++) {
2243 now
= kd
->timestamp
& KDBG_TIMESTAMP_MASK
;
2245 if (kd
>= kd_start
) {
2246 delta
= now
- last_timestamp
;
2248 log_info(now
, delta
, start_bias
, kd
, kd_note
);
2250 last_timestamp
= now
;
2252 int debugid
= kd
->debugid
;
2253 uintptr_t thread
= kd
->arg5
;
2254 int type
= kd
->debugid
& DBG_FUNC_MASK
;
2256 if ((type
>> 24) == DBG_TRACE
) {
2257 if (((type
>> 16) & 0xff) != DBG_TRACE_INFO
) {
2261 if (type
== BSC_thread_terminate
|| type
== BSC_exit
) {
2265 if (debugid
& DBG_FUNC_START
) {
2266 insert_start_event(thread
, type
, now
);
2267 } else if (debugid
& DBG_FUNC_END
) {
2268 (void)consume_start_event(thread
, type
, now
);
2277 log_decrementer(kd_buf
*kd_beg
, kd_buf
*kd_end
, kd_buf
*end_of_sample
, double i_latency
)
2279 kd_buf
*kd_start
, *kd_stop
;
2280 int kd_count
; /* Limit the boundary of kd_start */
2282 double sample_timestamp
;
2285 uintptr_t thread
= kd_beg
->arg5
;
2286 int cpunum
= CPU_NUMBER(kd_end
);
2288 for (kd_count
= 0, kd_start
= kd_beg
- 1; (kd_start
>= (kd_buf
*)my_buffer
); kd_start
--, kd_count
++) {
2289 if (kd_count
== MAX_LOG_COUNT
) {
2293 if (CPU_NUMBER(kd_start
) != cpunum
) {
2297 if ((kd_start
->debugid
& DBG_FUNC_MASK
) == DECR_TRAP
) {
2301 if (kd_start
->arg5
!= thread
) {
2305 if (kd_start
< (kd_buf
*)my_buffer
) {
2306 kd_start
= (kd_buf
*)my_buffer
;
2309 thread
= kd_end
->arg5
;
2311 for (kd_stop
= kd_end
+ 1; kd_stop
< end_of_sample
; kd_stop
++) {
2312 if (CPU_NUMBER(kd_stop
) != cpunum
) {
2316 if ((kd_stop
->debugid
& DBG_FUNC_MASK
) == INTERRUPT
) {
2320 if (kd_stop
->arg5
!= thread
) {
2324 if (kd_stop
>= end_of_sample
) {
2325 kd_stop
= end_of_sample
- 1;
2332 now
= kd_start
->timestamp
& KDBG_TIMESTAMP_MASK
;
2333 sample_timestamp
= (double)(now
- first_now
) / divisor
;
2335 TOD_usecs
= (uint64_t)sample_timestamp
;
2336 TOD_secs
= (unsigned long)sample_TOD_secs
+ (unsigned long)((sample_TOD_usecs
+ TOD_usecs
) / 1000000);
2338 sprintf(buf1
, "%-19.19s interrupt latency = %.1fus [timestamp %.1f]", ctime(&TOD_secs
), i_latency
, sample_timestamp
);
2340 sprintf(buf1
, "%-19.19s interrupt latency = %.1fus [sample %d]", &(ctime(&curr_time
)[0]), i_latency
, sample_generation
);
2343 log_range((kd_buf
*)my_buffer
, kd_start
, kd_stop
, 0, buf1
);
2350 log_scheduler(kd_buf
*kd_beg
, kd_buf
*kd_end
, kd_buf
*end_of_sample
, int s_priority
, double s_latency
, uintptr_t thread
)
2352 kd_buf
*kd_start
, *kd_stop
;
2357 double sample_timestamp
;
2360 for (count
= 0, kd_start
= kd_beg
; (kd_start
>= (kd_buf
*)my_buffer
); kd_start
--) {
2361 cpunum
= CPU_NUMBER(kd_start
);
2363 cmask
|= ((uint64_t)1 << cpunum
);
2365 if (cmask
== cpu_mask
) {
2370 if (kd_start
< (kd_buf
*)my_buffer
) {
2371 kd_start
= (kd_buf
*)my_buffer
;
2374 for (kd_stop
= kd_end
+ 1; kd_stop
< end_of_sample
; kd_stop
++) {
2375 if (kd_stop
->arg5
== thread
) {
2379 if (kd_stop
>= end_of_sample
) {
2380 kd_stop
= end_of_sample
- 1;
2387 now
= kd_start
->timestamp
& KDBG_TIMESTAMP_MASK
;
2388 sample_timestamp
= (double)(now
- first_now
) / divisor
;
2390 TOD_usecs
= (uint64_t)sample_timestamp
;
2391 TOD_secs
= (unsigned long)sample_TOD_secs
+ (unsigned long)((sample_TOD_usecs
+ TOD_usecs
) / 1000000);
2393 sprintf(buf1
, "%-19.19s priority = %d, scheduling latency = %.1fus [timestamp %.1f]", ctime(&TOD_secs
), s_priority
, s_latency
, sample_timestamp
);
2395 sprintf(buf1
, "%-19.19s priority = %d, scheduling latency = %.1fus [sample %d]", &(ctime(&curr_time
)[0]), s_priority
, s_latency
, sample_generation
);
2398 log_range((kd_buf
*)my_buffer
, kd_start
, kd_stop
, kd_beg
, buf1
);
2402 check_for_scheduler_latency(int type
, uintptr_t *thread
, uint64_t now
, kd_buf
*kd
, kd_buf
**kd_start
, int *priority
, double *latency
)
2404 int found_latency
= 0;
2406 if (type
== MACH_makerunnable
) {
2407 if (watch_priority_min
<= kd
->arg2
&& kd
->arg2
<= watch_priority_max
) {
2408 insert_run_event(kd
->arg1
, (int)kd
->arg2
, kd
, now
);
2410 } else if (type
== MACH_sched
|| type
== MACH_stkhandoff
) {
2411 threadrun_t trp
= find_run_event(kd
->arg2
);
2413 if (type
== MACH_sched
|| type
== MACH_stkhandoff
) {
2417 if ((trp
= find_run_event(*thread
))) {
2418 double d_s_latency
= (((double)(now
- trp
->tr_timestamp
)) / divisor
);
2419 int s_latency
= (int)d_s_latency
;
2422 if (s_latency
< 100) {
2423 s_usec_10_bins
[s_latency
/10]++;
2425 if (s_latency
< 1000) {
2426 s_usec_100_bins
[s_latency
/100]++;
2427 } else if (s_latency
< 10000) {
2428 s_msec_1_bins
[s_latency
/1000]++;
2429 } else if (s_latency
< 50000) {
2430 s_msec_10_bins
[s_latency
/10000]++;
2435 if (s_latency
> s_max_latency
) {
2436 s_max_latency
= s_latency
;
2438 if (s_latency
< s_min_latency
|| s_total_samples
== 0) {
2439 s_min_latency
= s_latency
;
2441 s_total_latency
+= s_latency
;
2444 if (s_thresh_hold
&& s_latency
> s_thresh_hold
) {
2445 s_exceeded_threshold
++;
2448 *kd_start
= trp
->tr_entry
;
2449 *priority
= trp
->tr_priority
;
2450 *latency
= d_s_latency
;
2455 delete_run_event(*thread
);
2458 return found_latency
;
2462 handle_decrementer(kd_buf
*kd
, int cpunum
)
2464 struct i_latencies
*il
;
2468 if (i_latency_per_cpu
== FALSE
) {
2472 il
= &i_lat
[cpunum
];
2474 if ((long)(kd
->arg1
) >= 0) {
2477 latency
= (((double)(-1 - kd
->arg1
)) / divisor
);
2479 elapsed_usecs
= (long)latency
;
2481 if (elapsed_usecs
< 100) {
2482 il
->i_usec_10_bins
[elapsed_usecs
/10]++;
2485 if (elapsed_usecs
< 1000) {
2486 il
->i_usec_100_bins
[elapsed_usecs
/100]++;
2487 } else if (elapsed_usecs
< 10000) {
2488 il
->i_msec_1_bins
[elapsed_usecs
/1000]++;
2489 } else if (elapsed_usecs
< 50000) {
2490 il
->i_msec_10_bins
[elapsed_usecs
/10000]++;
2495 if (use_high_res_bins
&& elapsed_usecs
< N_HIGH_RES_BINS
) {
2496 i_high_res_bins
[elapsed_usecs
]++;
2498 if (i_thresh_hold
&& elapsed_usecs
> i_thresh_hold
) {
2499 il
->i_exceeded_threshold
++;
2501 if (elapsed_usecs
> il
->i_max_latency
) {
2502 il
->i_max_latency
= elapsed_usecs
;
2504 if (elapsed_usecs
< il
->i_min_latency
|| il
->i_total_samples
== 0) {
2505 il
->i_min_latency
= elapsed_usecs
;
2507 il
->i_total_latency
+= elapsed_usecs
;
2508 il
->i_total_samples
++;
2517 for (i
= 0; i
< num_of_codes
; i
++) {
2518 if (codes_tab
[i
].type
== type
) {
2519 return codes_tab
[i
].name
;
2526 init_code_file(void)
2531 if ((fp
= fopen(code_file
, "r")) == NULL
) {
2533 fprintf(log_fp
, "open of %s failed\n", code_file
);
2537 for (i
= 0; i
< MAX_ENTRIES
; i
++) {
2540 int n
= fscanf(fp
, "%x%127s\n", &code
, name
);
2542 if (n
== 1 && i
== 0) {
2544 * old code file format, just skip
2552 strncpy(codes_tab
[i
].name
, name
, 32);
2553 codes_tab
[i
].type
= code
;
2566 char tmp_nm_file
[128];
2570 bzero(tmp_nm_file
, 128);
2571 bzero(tmpstr
, 1024);
2574 * Build the temporary nm file path
2576 strcpy(tmp_nm_file
,"/tmp/knm.out.XXXXXX");
2578 if (!mktemp(tmp_nm_file
)) {
2579 fprintf(stderr
, "Error in mktemp call\n");
2584 * Build the nm command and create a tmp file with the output
2586 sprintf (tmpstr
, "/usr/bin/nm -f -n -s __TEXT __text %s > %s",
2587 kernelpath
, tmp_nm_file
);
2591 * Parse the output from the nm command
2593 if ((fp
= fopen(tmp_nm_file
, "r")) == NULL
) {
2594 /* Hmmm, let's not treat this as fatal */
2595 fprintf(stderr
, "Failed to open nm symbol file [%s]\n", tmp_nm_file
);
2599 * Count the number of symbols in the nm symbol table
2603 while ((inchr
= getc(fp
)) != -1) {
2604 if (inchr
== '\n') {
2611 * Malloc the space for symbol table
2613 if (kern_sym_count
> 0) {
2614 kern_sym_tbl
= malloc(kern_sym_count
* sizeof(kern_sym_t
));
2616 if (!kern_sym_tbl
) {
2618 * Hmmm, lets not treat this as fatal
2620 fprintf(stderr
, "Can't allocate memory for kernel symbol table\n");
2622 bzero(kern_sym_tbl
, kern_sym_count
* sizeof(kern_sym_t
));
2626 * Hmmm, lets not treat this as fatal
2628 fprintf(stderr
, "No kernel symbol table \n");
2630 for (i
= 0; i
< kern_sym_count
; i
++) {
2631 bzero(tmpstr
, 1024);
2633 if (fscanf(fp
, "%p %c %s", &kern_sym_tbl
[i
].k_sym_addr
, &inchr
, tmpstr
) != 3) {
2636 len
= strlen(tmpstr
);
2637 kern_sym_tbl
[i
].k_sym_name
= malloc(len
+ 1);
2639 if (kern_sym_tbl
[i
].k_sym_name
== NULL
) {
2640 fprintf(stderr
, "Can't allocate memory for symbol name [%s]\n", tmpstr
);
2641 kern_sym_tbl
[i
].k_sym_name
= NULL
;
2644 strcpy(kern_sym_tbl
[i
].k_sym_name
, tmpstr
);
2647 kern_sym_tbl
[i
].k_sym_len
= len
;
2650 if (i
!= kern_sym_count
) {
2652 * Hmmm, didn't build up entire table from nm
2653 * scrap the entire thing
2656 kern_sym_tbl
= NULL
;
2662 * Remove the temporary nm file
2664 unlink(tmp_nm_file
);
2667 * Dump the kernel symbol table
2669 for (i
= 0; i
< kern_sym_count
; i
++) {
2670 if (kern_sym_tbl
[i
].k_sym_name
) {
2671 printf ("[%d] %-16p %s\n", i
,
2672 kern_sym_tbl
[i
].k_sym_addr
, kern_sym_tbl
[i
].k_sym_name
);
2674 printf ("[%d] %-16p %s\n", i
,
2675 kern_sym_tbl
[i
].k_sym_addr
, "No symbol name");
2682 pc_to_string(char *pcstring
, uintptr_t pc
, int max_len
, int mode
)
2687 if (mode
== USER_MODE
) {
2688 sprintf(pcstring
, "%-16lx [usermode addr]", pc
);
2691 ret
= binary_search(kern_sym_tbl
, 0, kern_sym_count
-1, pc
);
2693 if (ret
== -1 || kern_sym_tbl
[ret
].k_sym_name
== NULL
) {
2694 sprintf(pcstring
, "%-16lx", pc
);
2697 if ((len
= kern_sym_tbl
[ret
].k_sym_len
) > (max_len
- 8)) {
2701 memcpy(pcstring
, kern_sym_tbl
[ret
].k_sym_name
, len
);
2703 sprintf(&pcstring
[len
], "+0x%-5lx", pc
- (uintptr_t)kern_sym_tbl
[ret
].k_sym_addr
);
2708 * Return -1 if not found, else return index
2711 binary_search(kern_sym_t
*list
, int low
, int high
, uintptr_t addr
)
2715 if (kern_sym_count
== 0) {
2720 return -1; /* failed */
2723 if (low
+ 1 == high
) {
2724 if ((uintptr_t)list
[low
].k_sym_addr
<= addr
&& addr
< (uintptr_t)list
[high
].k_sym_addr
) {
2726 * We have a range match
2730 if ((uintptr_t)list
[high
].k_sym_addr
<= addr
) {
2738 mid
= (low
+ high
) / 2;
2740 if (addr
< (uintptr_t)list
[mid
].k_sym_addr
) {
2741 return binary_search(list
, low
, mid
, addr
);
2744 return binary_search(list
, mid
, high
, addr
);
2748 open_logfile(const char *path
)
2750 log_fp
= fopen(path
, "a");
2754 * failed to open path
2756 fprintf(stderr
, "latency: failed to open logfile [%s]\n", path
);
2762 open_rawfile(const char *path
)
2764 RAW_fd
= open(path
, O_RDONLY
);
2768 * failed to open path
2770 fprintf(stderr
, "latency: failed to open RAWfile [%s]\n", path
);
2778 mach_timebase_info_data_t info
;
2780 (void)mach_timebase_info(&info
);
2782 divisor
= ((double)info
.denom
/ (double)info
.numer
) * 1000;