]> git.saurik.com Git - apple/system_cmds.git/blob - KDBG/MachineCPU.mutable-impl.hpp
system_cmds-671.10.3.tar.gz
[apple/system_cmds.git] / KDBG / MachineCPU.mutable-impl.hpp
1 //
2 // MachineCPU.mutable-impl.hpp
3 // KDBG
4 //
5 // Created by James McIlree on 11/7/12.
6 // Copyright (c) 2014 Apple. All rights reserved.
7 //
8
9 template <typename SIZE>
10 void MachineCPU<SIZE>::set_idle(AbsTime timestamp) {
11 ASSERT(is_idle_state_initialized(), "Setting idle before state was initialized");
12 ASSERT(!is_intr(), "Setting idle while in interrupt");
13 ASSERT(!is_idle(), "Setting idle while already idle");
14 ASSERT(_begin_idle == 0, "Sanity");
15
16 _begin_idle = timestamp;
17 _flags |= (uint32_t)kMachineCPUFlag::IsStateIdle;
18 }
19
20 template <typename SIZE>
21 void MachineCPU<SIZE>::clear_idle(AbsTime timestamp) {
22 ASSERT(is_idle_state_initialized(), "Clearing idle before state was initialized");
23 ASSERT(!is_intr(), "Clearing idle while in interrupt");
24 ASSERT(is_idle(), "Clearing idle while not idle");
25
26 _cpu_idle.emplace_back(_begin_idle, timestamp - _begin_idle);
27 DEBUG_ONLY(_begin_idle = AbsTime(0);)
28 _flags &= ~(uint32_t)kMachineCPUFlag::IsStateIdle;
29 }
30
31 template <typename SIZE>
32 void MachineCPU<SIZE>::set_deactivate_switch_to_idle_thread() {
33 ASSERT(!is_deactivate_switch_to_idle_thread(), "State already set");
34 ASSERT(!is_intr(), "This state should not occur during INTR");
35
36 _flags |= (uint32_t)kMachineCPUFlag::IsStateDeactivatedForcedSwitchToIdleThread;
37 }
38
39 template <typename SIZE>
40 void MachineCPU<SIZE>::clear_deactivate_switch_to_idle_thread() {
41 ASSERT(is_deactivate_switch_to_idle_thread(), "Clearing state when not set");
42 ASSERT(!is_intr(), "This state transition should not occur during INTR");
43
44 _flags &= ~(uint32_t)kMachineCPUFlag::IsStateDeactivatedForcedSwitchToIdleThread;
45 }
46
47 template <typename SIZE>
48 void MachineCPU<SIZE>::initialize_idle_state(bool is_idle, AbsTime timestamp) {
49 ASSERT(!is_idle_state_initialized(), "Attempt to initialize Idle state more than once");
50 ASSERT(!this->is_idle(), "Attempt to initialize Idle state while already idle");
51
52 if (is_idle) {
53 _begin_idle = timestamp;
54 _flags |= (uint32_t)kMachineCPUFlag::IsStateIdle;
55 }
56
57 _flags |= (uint32_t)kMachineCPUFlag::IsStateIdleInitialized;
58 }
59
60 template <typename SIZE>
61 void MachineCPU<SIZE>::set_intr(AbsTime timestamp) {
62 // We can take an INTR in state Unknown, IDLE, and RUNNING.
63 ASSERT(is_intr_state_initialized(), "Setting INTR before state was initialized");
64 ASSERT(!is_intr(), "Setting INTR when already in state INTR");
65 ASSERT(_begin_intr == 0, "Sanity");
66
67 _begin_intr = timestamp;
68 _flags |= (uint32_t)kMachineCPUFlag::IsStateINTR;
69 }
70
71 template <typename SIZE>
72 void MachineCPU<SIZE>::clear_intr(AbsTime timestamp) {
73 ASSERT(is_intr_state_initialized(), "Clearing INTR before state was initialized");
74 ASSERT(is_intr(), "Clearing INTR when not in INTR");
75
76 _cpu_intr.emplace_back(_begin_intr, timestamp - _begin_intr);
77 DEBUG_ONLY(_begin_intr = AbsTime(0);)
78 _flags &= ~(uint32_t)kMachineCPUFlag::IsStateINTR;
79 }
80
81 template <typename SIZE>
82 void MachineCPU<SIZE>::initialize_intr_state(bool is_intr, AbsTime timestamp) {
83 ASSERT(!is_intr_state_initialized(), "Attempt to initialize INTR state more than once");
84 ASSERT(!this->is_intr(), "Attempt to initialize INTR state while already INTR");
85
86 if (is_intr) {
87 _begin_intr = timestamp;
88 _flags |= (uint32_t)kMachineCPUFlag::IsStateINTR;
89 }
90
91 _flags |= (uint32_t)kMachineCPUFlag::IsStateINTRInitialized;
92 }
93
94 template <typename SIZE>
95 void MachineCPU<SIZE>::initialize_thread_state(MachineThread<SIZE>* init_thread, AbsTime timestamp) {
96 ASSERT(!is_thread_state_initialized(), "Attempt to initialize thread state more than once");
97 ASSERT(!_thread, "Sanity");
98
99 // When initializing the thread state, the TID lookup may fail. This
100 // can happen if there wasn't a threadmap, or if the thread was created
101 // later in the trace. We explicitly allow NULL as a valid value here.
102 // NULL means "Go ahead and set the init flag, but we will not emit a
103 // runq event later when a real context switch happens
104
105 _flags |= (uint32_t)kMachineCPUFlag::IsStateThreadInitialized;
106 if (init_thread) {
107 _cpu_runq.emplace_back(init_thread, true, timestamp);
108 _thread = init_thread;
109 }
110 }
111
112 template <typename SIZE>
113 void MachineCPU<SIZE>::context_switch(MachineThread<SIZE>* to_thread, MachineThread<SIZE>* from_thread, AbsTime timestamp) {
114 //
115 // We cannot context switch in INTR or Idle
116 //
117 // The one exception is if we were thread_initialized with NULL,
118 // then the first context switch will happen at idle.
119 ASSERT(!is_intr(), "May not context switch while in interrupt");
120 ASSERT(!is_idle() || _thread == NULL && is_thread_state_initialized(), "May not context switch while idle");
121 ASSERT(to_thread, "May not context switch to NULL");
122
123 // The threads should match, unless...
124 // 1) We're uninitialized; we don't know who was on cpu
125 // 2) VERY RARE: A process EXEC'd, and we made a new thread for the new process. The tid's will still match, and the old thread should be marked as trace terminated.
126 ASSERT(from_thread == _thread || _thread == NULL || (_thread->is_trace_terminated() && _thread->tid() == from_thread->tid()), "From thread does not match thread on cpu");
127
128 // Very rarely, we init a cpu to a thread, and then event[0] is a mach_sched
129 // or other context switch event. If that has happened, just discard the init
130 // thread entry.
131 if (_cpu_runq.size() == 1) {
132 if (_cpu_runq.back().is_event_zero_init_thread()) {
133 if (timestamp == _cpu_runq.back().timestamp()) {
134 _cpu_runq.pop_back();
135 }
136 }
137 }
138
139 ASSERT(_cpu_runq.empty() || timestamp > _cpu_runq.back().timestamp(), "Out of order timestamps");
140 ASSERT(_cpu_runq.size() < 2 || !_cpu_runq.back().is_event_zero_init_thread(), "Sanity");
141
142 _cpu_runq.emplace_back(to_thread, false, timestamp);
143 _thread = to_thread;
144 }
145
146 template <typename SIZE>
147 void MachineCPU<SIZE>::post_initialize(AbsInterval events_timespan) {
148 #if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
149 // Make sure everything is sorted
150 if (_cpu_runq.size() > 1) {
151 for (uint32_t i=1; i<_cpu_runq.size(); ++i) {
152 ASSERT(_cpu_runq[i-1].timestamp() < _cpu_runq[i].timestamp(), "Out of order run events");
153 }
154 }
155 if (_cpu_idle.size() > 1) {
156 for (uint32_t i=1; i<_cpu_idle.size(); ++i) {
157 ASSERT(_cpu_idle[i-1].max() < _cpu_idle[i].location(), "Out of order idle events");
158 }
159 }
160 if (_cpu_intr.size() > 1) {
161 for (uint32_t i=1; i<_cpu_intr.size(); ++i) {
162 ASSERT(_cpu_intr[i-1].max() < _cpu_intr[i].location(), "Out of order intr events");
163 }
164 }
165 #endif
166
167 // We do not need to flush the current thread on cpu, as the cpu
168 // runq only records "on" events, and assumes a duration of "until
169 // the next thread arrives or end of time"
170
171
172 // if we have a pending intr state, flush it.
173 // We want to flush the intr first, so an idle
174 // flush doesn't assert.
175 if (is_intr())
176 clear_intr(events_timespan.max());
177
178 // If we have a pending idle state, flush it.
179 if (is_idle())
180 clear_idle(events_timespan.max());
181
182 if (!_cpu_runq.empty() || !_cpu_idle.empty() || !_cpu_intr.empty()) {
183 //
184 // Collapse all the events into a single timeline
185 //
186
187 // Check this math once we're done building the timeline.
188 size_t guessed_capacity = _cpu_runq.size() + _cpu_idle.size() * 2 + _cpu_intr.size() * 2;
189 _timeline.reserve(guessed_capacity);
190
191 auto runq_it = _cpu_runq.begin();
192 auto idle_it = _cpu_idle.begin();
193 auto intr_it = _cpu_intr.begin();
194
195 // Starting these at 0 will for an update to valid values in
196 // the first pass of the workloop.
197
198 AbsInterval current_runq(AbsTime(0), AbsTime(0));
199 AbsInterval current_idle(AbsTime(0), AbsTime(0));
200 AbsInterval current_intr(AbsTime(0), AbsTime(0));
201
202 MachineThread<SIZE>* current_thread = NULL;
203
204 AbsTime cursor(events_timespan.location());
205 while (events_timespan.contains(cursor)) {
206 //
207 // First we see if anyone needs updating with the next component.
208 //
209 if (cursor >= current_runq.max()) {
210 if (runq_it != _cpu_runq.end()) {
211 AbsTime end, begin = runq_it->timestamp();
212 if (runq_it+1 != _cpu_runq.end())
213 end = (runq_it+1)->timestamp();
214 else
215 end = events_timespan.max();
216
217 current_runq = AbsInterval(begin, end - begin);
218 current_thread = runq_it->thread();
219 ++runq_it;
220 } else {
221 // This will force future update checks to always fail.
222 current_runq = AbsInterval(events_timespan.max() + AbsTime(1), AbsTime(1));
223 current_thread = NULL;
224 }
225 }
226
227 if (cursor >= current_idle.max()) {
228 if (idle_it != _cpu_idle.end()) {
229 current_idle = *idle_it;
230 ++idle_it;
231 } else {
232 // This will force future update checks to always fail.
233 current_idle = AbsInterval(events_timespan.max() + AbsTime(1), AbsTime(1));
234 }
235 }
236
237 if (cursor >= current_intr.max()) {
238 if (intr_it != _cpu_intr.end()) {
239 current_intr = *intr_it;
240 ++intr_it;
241 } else {
242 // This will force future update checks to always fail.
243 current_intr = AbsInterval(events_timespan.max() + AbsTime(1), AbsTime(1));
244 }
245 }
246
247 //
248 // Now we see what type of activity we will be recording.
249 //
250 // This is heirarchical, intr > idle > run > unknown.
251 //
252
253 kCPUActivity type = kCPUActivity::Unknown;
254
255 if (current_runq.contains(cursor))
256 type = kCPUActivity::Run;
257
258 if (current_idle.contains(cursor))
259 type = kCPUActivity::Idle;
260
261 if (current_intr.contains(cursor))
262 type = kCPUActivity::INTR;
263
264 //
265 // Now we know the type, and the starting location.
266 // We must find the end.
267 //
268 // Since this is heirarchical, each type may end on
269 // its own "end", or the "begin" of a type higher than
270 // itself. An idle can end at its end, or at an intr begin.
271 //
272
273 AbsTime end;
274 switch (type) {
275 case kCPUActivity::Unknown:
276 end = std::min({ events_timespan.max(), current_runq.location(), current_idle.location(), current_intr.location() });
277 break;
278
279 case kCPUActivity::Run:
280 end = std::min({ current_runq.max(), current_idle.location(), current_intr.location() });
281 break;
282
283 case kCPUActivity::Idle:
284 end = std::min(current_idle.max(), current_intr.location());
285 break;
286
287 case kCPUActivity::INTR:
288 end = current_intr.max();
289 break;
290 }
291
292 //
293 // Now we drop in the new activity
294 //
295 if (type == kCPUActivity::Run) {
296 ASSERT(current_thread, "Current thread is NULL");
297 // Its a context switch if we are at the beginning of the runq interval
298 _timeline.emplace_back(current_thread, AbsInterval(cursor, end - cursor), current_runq.location() == cursor);
299 } else
300 _timeline.emplace_back(type, AbsInterval(cursor, end - cursor));
301
302 //
303 // And bump the cursor to the end...
304 //
305 cursor = end;
306 }
307
308 #if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
309 for (auto it = _timeline.begin(); it != _timeline.end(); ++it) {
310 auto next_it = it + 1;
311 ASSERT(events_timespan.contains(*it), "activity not contained in events_timespan");
312 if (next_it != _timeline.end()) {
313 ASSERT(it->max() == next_it->location(), "activity not end to end");
314 bool initial_idle_state = ((it == _timeline.begin()) && it->is_idle());
315 ASSERT(!next_it->is_context_switch() || (it->is_run() || it->is_unknown() || initial_idle_state) , "Context switch activity preceeded by !run activity");
316 }
317 }
318 #endif
319 }
320
321 _cpu_runq.clear();
322 _cpu_runq.shrink_to_fit();
323
324 _cpu_idle.clear();
325 _cpu_idle.shrink_to_fit();
326
327 _cpu_intr.clear();
328 _cpu_intr.shrink_to_fit();
329 }