]> git.saurik.com Git - apple/system_cmds.git/blob - msa/LiveTraceAction.cpp
0989e45dd216470a28f7fca0ba46d31823863934
[apple/system_cmds.git] / msa / LiveTraceAction.cpp
1 //
2 // LiveTraceAction.cpp
3 // msa
4 //
5 // Created by James McIlree on 2/4/14.
6 // Copyright (c) 2014 Apple. All rights reserved.
7 //
8
9 #include "global.h"
10
11 // Force materialization of the ring buffer print methods,
12 // so they can be called from the debugger.
13 template class EventRingBuffer<Kernel32>;
14 template class EventRingBuffer<Kernel64>;
15
16 static bool shouldProcessEvents;
17 static uint32_t sigintCount;
18
19 static bool start_live_tracing(Globals& globals)
20 {
21 if (!KDBG::reset()) return false;
22 if (!KDBG::set_buffer_capacity(globals.trace_buffer_size())) return false;
23 if (!KDBG::set_nowrap(false)) return false;
24 if (!KDBG::initialize_buffers()) return false;
25 if (!KDBG::set_enabled(KDEBUG_ENABLE_TRACE)) return false;
26
27 return true;
28 }
29
30 static void end_live_tracing(void)
31 {
32 KDBG::reset();
33 }
34
35 static void signal_handler_ctrl_C(int sig)
36 {
37 shouldProcessEvents = false;
38 if (++sigintCount >= 5) {
39 // Not responding, nuke it from orbit.
40 exit(1);
41 }
42 }
43
44 template <typename SIZE>
45 static void live_trace_event_loop(Globals& globals)
46 {
47 // Handle ctrl-C
48 shouldProcessEvents = true;
49 sigintCount = 0;
50
51 while (shouldProcessEvents) {
52 signal(SIGINT, signal_handler_ctrl_C);
53
54 EventRingBuffer<SIZE> ring_buffer(globals, globals.trace_buffer_size() * 2);
55
56 {
57 char buf[PATH_MAX];
58 char* buf_end = buf + sizeof(buf);
59 print_mach_msg_header(buf, buf_end, globals);
60 dprintf(globals.output_fd(), "%s", buf);
61 }
62
63 VoucherContentSysctl contents(globals.should_trace_voucher_contents());
64
65 if (start_live_tracing(globals)) {
66
67 // Okay, our goal is to hit specific timeposts.
68 // IOW, if our target is every 10ms, and we spend 3ms doing work,
69 // we sleep 7ms.
70 AbsTime traceUpdateIntervalAbs = globals.live_update_interval();
71 AbsTime now, next_trace_update = AbsTime::now();
72 std::unique_ptr<Machine<SIZE>> machine, last_machine;
73
74 std::unordered_map<pid_t, bool> task_appnap_state;
75 std::unordered_map<pid_t, TaskRequestedPolicy> task_requested_state;
76 std::unordered_map<typename SIZE::ptr_t, TaskRequestedPolicy> thread_requested_state;
77 std::unordered_map<pid_t, std::pair<TaskEffectivePolicy, uint32_t>> task_effective_state;
78 std::unordered_map<typename SIZE::ptr_t, std::pair<TaskEffectivePolicy, uint32_t>> thread_effective_state;
79 std::unordered_map<pid_t, std::pair<uint32_t, uint32_t>> task_boosts;
80
81 while (shouldProcessEvents) {
82 now = AbsTime::now();
83 if (now >= next_trace_update) {
84 std::size_t count, capacity;
85 KDEvent<SIZE>* events;
86
87 std::tie(events, count, capacity) = ring_buffer.read();
88 if (count) {
89 if (last_machine) {
90 machine = std::make_unique<Machine<SIZE>>(*last_machine, events, count);
91 } else {
92 auto state = KDBG::state();
93 auto threadmap = KDBG::threadmap<SIZE>(state);
94 auto cpumap = KDBG::cpumap();
95 machine = std::make_unique<Machine<SIZE>>(cpumap.data(), (uint32_t)cpumap.size(),
96 threadmap.data(), (uint32_t)threadmap.size(),
97 events, count);
98
99 if (globals.should_zero_base_timestamps() && count) {
100 globals.set_beginning_of_time(events[0].timestamp());
101 } else {
102 globals.set_beginning_of_time(AbsTime(0));
103 }
104 }
105
106 if (!machine->lost_events()) {
107 process_events(globals, *machine, task_appnap_state, task_requested_state, thread_requested_state, task_effective_state, thread_effective_state, task_boosts);
108
109 // We read to the end of the ring buffer, and there are
110 // more events to process. Do not risk an overflow, process
111 // them immediately.
112
113 // If count == capacity, we read to the end of the ring buffer,
114 // and should immediately re-read.
115 if (count < capacity) {
116 next_trace_update += traceUpdateIntervalAbs;
117 if (next_trace_update <= now) {
118 printf("WARNING - falling behind on event processing\n");
119 // Reset so if we do catch up, we don't spin on a clock
120 // that has fallen seconds behind.
121 next_trace_update = AbsTime::now();
122 }
123 }
124 } else {
125 printf("LOST EVENTS, exiting...\n");
126 shouldProcessEvents = false;
127 }
128
129 last_machine = std::move(machine);
130 }
131 }
132
133 mach_wait_until(next_trace_update.value());
134 }
135 } else {
136 printf("Unable to enable tracing.\n");
137 shouldProcessEvents = false;
138 }
139
140 signal(SIGINT, SIG_DFL);
141 }
142
143 // Final cleanup here to make sure partial initialization is
144 // cleaned up.
145 end_live_tracing();
146 }
147
148 void LiveTraceAction::execute(Globals& globals) {
149 // Initial state snapshot, is another program using the trace buffer, etc.
150 try {
151 KDState state = KDBG::state();
152 if (state.is_initialized() || state.controlling_pid() > 0) {
153 if (state.controlling_pid() != getpid()) {
154 if (state.controlling_pid() > 0 && kill(state.controlling_pid(), 0) == -1 && errno == ESRCH) {
155 if (globals.is_verbose()) {
156 printf("Reclaiming trace buffer control from pid %d\n", state.controlling_pid());
157 }
158 } else {
159 printf("Another process is using the trace facility, possibly pid %d\n", state.controlling_pid());
160 exit(1);
161 }
162 }
163 }
164
165 try {
166 if (state.is_lp64()) {
167 live_trace_event_loop<Kernel64>(globals);
168 } else {
169 live_trace_event_loop<Kernel32>(globals);
170 }
171 } catch (const std::exception& e) {
172 log_msg(ASL_LEVEL_WARNING, "Caught exception in %s:\n %s\n", __PRETTY_FUNCTION__, e.what());
173 KDBG::reset();
174 }
175
176 } catch (Exception& e) {
177 if (getuid() != 0) {
178 printf("Unable to acquire trace buffer state. You must be root.\n");
179 exit(1);
180 } else {
181 usage(e.what());
182 }
183 }
184 }