5 // Created by James McIlree on 2/4/14.
6 // Copyright (c) 2014 Apple. All rights reserved.
11 // Force materialization of the ring buffer print methods,
12 // so they can be called from the debugger.
13 template class EventRingBuffer
<Kernel32
>;
14 template class EventRingBuffer
<Kernel64
>;
16 static bool shouldProcessEvents
;
17 static uint32_t sigintCount
;
19 static bool start_live_tracing(Globals
& globals
)
21 if (!KDBG::reset()) return false;
22 if (!KDBG::set_buffer_capacity(globals
.trace_buffer_size())) return false;
23 if (!KDBG::set_nowrap(false)) return false;
24 if (!KDBG::initialize_buffers()) return false;
25 if (!KDBG::set_enabled(KDEBUG_ENABLE_TRACE
)) return false;
30 static void end_live_tracing(void)
35 static void signal_handler_ctrl_C(int sig
)
37 shouldProcessEvents
= false;
38 if (++sigintCount
>= 5) {
39 // Not responding, nuke it from orbit.
44 template <typename SIZE
>
45 static void live_trace_event_loop(Globals
& globals
)
48 shouldProcessEvents
= true;
51 while (shouldProcessEvents
) {
52 signal(SIGINT
, signal_handler_ctrl_C
);
54 EventRingBuffer
<SIZE
> ring_buffer(globals
, globals
.trace_buffer_size() * 2);
58 char* buf_end
= buf
+ sizeof(buf
);
59 print_mach_msg_header(buf
, buf_end
, globals
);
60 dprintf(globals
.output_fd(), "%s", buf
);
63 VoucherContentSysctl
contents(globals
.should_trace_voucher_contents());
65 if (start_live_tracing(globals
)) {
67 // Okay, our goal is to hit specific timeposts.
68 // IOW, if our target is every 10ms, and we spend 3ms doing work,
70 AbsTime traceUpdateIntervalAbs
= globals
.live_update_interval();
71 AbsTime now
, next_trace_update
= AbsTime::now();
72 std::unique_ptr
<Machine
<SIZE
>> machine
, last_machine
;
74 std::unordered_map
<pid_t
, bool> task_appnap_state
;
75 std::unordered_map
<pid_t
, TaskRequestedPolicy
> task_requested_state
;
76 std::unordered_map
<typename
SIZE::ptr_t
, TaskRequestedPolicy
> thread_requested_state
;
77 std::unordered_map
<pid_t
, std::pair
<TaskEffectivePolicy
, uint32_t>> task_effective_state
;
78 std::unordered_map
<typename
SIZE::ptr_t
, std::pair
<TaskEffectivePolicy
, uint32_t>> thread_effective_state
;
79 std::unordered_map
<pid_t
, std::pair
<uint32_t, uint32_t>> task_boosts
;
81 while (shouldProcessEvents
) {
83 if (now
>= next_trace_update
) {
84 std::size_t count
, capacity
;
85 KDEvent
<SIZE
>* events
;
87 std::tie(events
, count
, capacity
) = ring_buffer
.read();
90 machine
= std::make_unique
<Machine
<SIZE
>>(*last_machine
, events
, count
);
92 auto state
= KDBG::state();
93 auto threadmap
= KDBG::threadmap
<SIZE
>(state
);
94 auto cpumap
= KDBG::cpumap();
95 machine
= std::make_unique
<Machine
<SIZE
>>(cpumap
.data(), (uint32_t)cpumap
.size(),
96 threadmap
.data(), (uint32_t)threadmap
.size(),
99 if (globals
.should_zero_base_timestamps() && count
) {
100 globals
.set_beginning_of_time(events
[0].timestamp());
102 globals
.set_beginning_of_time(AbsTime(0));
106 if (!machine
->lost_events()) {
107 process_events(globals
, *machine
, task_appnap_state
, task_requested_state
, thread_requested_state
, task_effective_state
, thread_effective_state
, task_boosts
);
109 // We read to the end of the ring buffer, and there are
110 // more events to process. Do not risk an overflow, process
113 // If count == capacity, we read to the end of the ring buffer,
114 // and should immediately re-read.
115 if (count
< capacity
) {
116 next_trace_update
+= traceUpdateIntervalAbs
;
117 if (next_trace_update
<= now
) {
118 printf("WARNING - falling behind on event processing\n");
119 // Reset so if we do catch up, we don't spin on a clock
120 // that has fallen seconds behind.
121 next_trace_update
= AbsTime::now();
125 printf("LOST EVENTS, exiting...\n");
126 shouldProcessEvents
= false;
129 last_machine
= std::move(machine
);
133 mach_wait_until(next_trace_update
.value());
136 printf("Unable to enable tracing.\n");
137 shouldProcessEvents
= false;
140 signal(SIGINT
, SIG_DFL
);
143 // Final cleanup here to make sure partial initialization is
148 void LiveTraceAction::execute(Globals
& globals
) {
149 // Initial state snapshot, is another program using the trace buffer, etc.
151 KDState state
= KDBG::state();
152 if (state
.is_initialized() || state
.controlling_pid() > 0) {
153 if (state
.controlling_pid() != getpid()) {
154 if (state
.controlling_pid() > 0 && kill(state
.controlling_pid(), 0) == -1 && errno
== ESRCH
) {
155 if (globals
.is_verbose()) {
156 printf("Reclaiming trace buffer control from pid %d\n", state
.controlling_pid());
159 printf("Another process is using the trace facility, possibly pid %d\n", state
.controlling_pid());
166 if (state
.is_lp64()) {
167 live_trace_event_loop
<Kernel64
>(globals
);
169 live_trace_event_loop
<Kernel32
>(globals
);
171 } catch (const std::exception
& e
) {
172 log_msg(ASL_LEVEL_WARNING
, "Caught exception in %s:\n %s\n", __PRETTY_FUNCTION__
, e
.what());
176 } catch (Exception
& e
) {
178 printf("Unable to acquire trace buffer state. You must be root.\n");