]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/machine_cpu.h> | |
30 | #include <arm/cpu_internal.h> | |
31 | #include <arm/cpuid.h> | |
32 | #include <arm/cpu_data.h> | |
33 | #include <arm/cpu_data_internal.h> | |
34 | #include <arm/misc_protos.h> | |
35 | #include <arm/machdep_call.h> | |
36 | #include <arm/machine_routines.h> | |
37 | #include <arm/rtclock.h> | |
38 | #include <kern/machine.h> | |
39 | #include <kern/thread.h> | |
40 | #include <kern/thread_group.h> | |
41 | #include <kern/policy_internal.h> | |
42 | #include <machine/config.h> | |
cb323159 | 43 | #include <machine/atomic.h> |
d9a64523 | 44 | #include <pexpert/pexpert.h> |
5ba3f43e A |
45 | |
46 | #if MONOTONIC | |
47 | #include <kern/monotonic.h> | |
48 | #include <machine/monotonic.h> | |
49 | #endif /* MONOTONIC */ | |
50 | ||
51 | #include <mach/machine.h> | |
52 | ||
53 | #if INTERRUPT_MASKED_DEBUG | |
54 | extern boolean_t interrupt_masked_debug; | |
55 | extern uint64_t interrupt_masked_timeout; | |
56 | #endif | |
57 | ||
58 | extern uint64_t mach_absolutetime_asleep; | |
59 | ||
60 | static void | |
61 | sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused) | |
62 | { | |
63 | } | |
64 | ||
65 | static void | |
66 | sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused) | |
67 | { | |
68 | } | |
69 | ||
70 | static void | |
71 | sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused) | |
72 | { | |
73 | } | |
74 | ||
75 | static void | |
76 | sched_perfcontrol_thread_group_default(thread_group_data_t data __unused) | |
77 | { | |
78 | } | |
79 | ||
0a7de745 | 80 | static void |
5ba3f43e A |
81 | sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused) |
82 | { | |
83 | } | |
84 | ||
85 | static void | |
a39ff7e2 | 86 | sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused, |
0a7de745 | 87 | perfcontrol_work_interval_t work_interval __unused) |
a39ff7e2 A |
88 | { |
89 | } | |
90 | ||
91 | static void | |
92 | sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused, | |
0a7de745 | 93 | perfcontrol_work_interval_instance_t instance __unused) |
5ba3f43e A |
94 | { |
95 | } | |
96 | ||
97 | static void | |
98 | sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline) | |
99 | { | |
100 | } | |
101 | ||
102 | static void | |
103 | sched_perfcontrol_csw_default( | |
0a7de745 A |
104 | __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp, |
105 | __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore, | |
106 | __unused struct perfcontrol_thread_data *oncore, | |
5ba3f43e A |
107 | __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused) |
108 | { | |
109 | } | |
110 | ||
111 | static void | |
112 | sched_perfcontrol_state_update_default( | |
113 | __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp, | |
114 | __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data, | |
115 | __unused void *unused) | |
116 | { | |
117 | } | |
118 | ||
119 | sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default; | |
120 | sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default; | |
121 | sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default; | |
122 | sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default; | |
123 | sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default; | |
124 | sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default; | |
125 | sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default; | |
126 | sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default; | |
a39ff7e2 | 127 | sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default; |
5ba3f43e A |
128 | sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default; |
129 | sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default; | |
130 | sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default; | |
131 | ||
132 | void | |
133 | sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state) | |
134 | { | |
135 | assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2); | |
136 | ||
137 | if (size_of_state > sizeof(struct perfcontrol_state)) { | |
138 | panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state); | |
139 | } | |
140 | ||
141 | if (callbacks) { | |
142 | ||
a39ff7e2 A |
143 | if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) { |
144 | if (callbacks->work_interval_ctl != NULL) { | |
145 | sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl; | |
146 | } else { | |
147 | sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default; | |
148 | } | |
149 | } | |
150 | ||
5ba3f43e A |
151 | if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) { |
152 | if (callbacks->csw != NULL) { | |
153 | sched_perfcontrol_csw = callbacks->csw; | |
154 | } else { | |
155 | sched_perfcontrol_csw = sched_perfcontrol_csw_default; | |
156 | } | |
157 | ||
158 | if (callbacks->state_update != NULL) { | |
159 | sched_perfcontrol_state_update = callbacks->state_update; | |
160 | } else { | |
161 | sched_perfcontrol_state_update = sched_perfcontrol_state_update_default; | |
162 | } | |
163 | } | |
164 | ||
165 | if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) { | |
166 | if (callbacks->deadline_passed != NULL) { | |
167 | sched_perfcontrol_deadline_passed = callbacks->deadline_passed; | |
168 | } else { | |
169 | sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default; | |
170 | } | |
171 | } | |
172 | ||
173 | if (callbacks->offcore != NULL) { | |
174 | sched_perfcontrol_offcore = callbacks->offcore; | |
175 | } else { | |
176 | sched_perfcontrol_offcore = sched_perfcontrol_offcore_default; | |
177 | } | |
178 | ||
179 | if (callbacks->context_switch != NULL) { | |
180 | sched_perfcontrol_switch = callbacks->context_switch; | |
181 | } else { | |
182 | sched_perfcontrol_switch = sched_perfcontrol_switch_default; | |
183 | } | |
184 | ||
185 | if (callbacks->oncore != NULL) { | |
186 | sched_perfcontrol_oncore = callbacks->oncore; | |
187 | } else { | |
188 | sched_perfcontrol_oncore = sched_perfcontrol_oncore_default; | |
189 | } | |
190 | ||
191 | if (callbacks->max_runnable_latency != NULL) { | |
192 | sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency; | |
193 | } else { | |
194 | sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default; | |
195 | } | |
0a7de745 | 196 | |
5ba3f43e A |
197 | if (callbacks->work_interval_notify != NULL) { |
198 | sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify; | |
199 | } else { | |
200 | sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default; | |
201 | } | |
202 | } else { | |
203 | /* reset to defaults */ | |
204 | sched_perfcontrol_offcore = sched_perfcontrol_offcore_default; | |
205 | sched_perfcontrol_switch = sched_perfcontrol_switch_default; | |
206 | sched_perfcontrol_oncore = sched_perfcontrol_oncore_default; | |
207 | sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default; | |
208 | sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default; | |
209 | sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default; | |
210 | sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default; | |
211 | sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default; | |
a39ff7e2 | 212 | sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default; |
5ba3f43e A |
213 | sched_perfcontrol_csw = sched_perfcontrol_csw_default; |
214 | sched_perfcontrol_state_update = sched_perfcontrol_state_update_default; | |
215 | } | |
216 | } | |
217 | ||
218 | ||
219 | static void | |
0a7de745 A |
220 | machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data, |
221 | thread_t thread, | |
222 | uint64_t same_pri_latency) | |
5ba3f43e A |
223 | { |
224 | bzero(data, sizeof(struct perfcontrol_thread_data)); | |
225 | data->perfctl_class = thread_get_perfcontrol_class(thread); | |
226 | data->energy_estimate_nj = 0; | |
227 | data->thread_id = thread->thread_id; | |
228 | data->scheduling_latency_at_same_basepri = same_pri_latency; | |
229 | data->perfctl_state = FIND_PERFCONTROL_STATE(thread); | |
230 | } | |
231 | ||
232 | static void | |
233 | machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters) | |
234 | { | |
235 | #if MONOTONIC | |
236 | mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles); | |
237 | #else /* MONOTONIC */ | |
238 | cpu_counters->instructions = 0; | |
239 | cpu_counters->cycles = 0; | |
240 | #endif /* !MONOTONIC */ | |
241 | } | |
242 | ||
243 | int perfcontrol_callout_stats_enabled = 0; | |
244 | static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX]; | |
245 | static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX]; | |
246 | ||
247 | #if MONOTONIC | |
248 | static inline | |
0a7de745 A |
249 | bool |
250 | perfcontrol_callout_counters_begin(uint64_t *counters) | |
5ba3f43e | 251 | { |
0a7de745 A |
252 | if (!perfcontrol_callout_stats_enabled) { |
253 | return false; | |
254 | } | |
255 | mt_fixed_counts(counters); | |
256 | return true; | |
5ba3f43e A |
257 | } |
258 | ||
259 | static inline | |
0a7de745 A |
260 | void |
261 | perfcontrol_callout_counters_end(uint64_t *start_counters, | |
262 | perfcontrol_callout_type_t type) | |
5ba3f43e | 263 | { |
0a7de745 A |
264 | uint64_t end_counters[MT_CORE_NFIXED]; |
265 | mt_fixed_counts(end_counters); | |
cb323159 A |
266 | os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES], |
267 | end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], relaxed); | |
5ba3f43e | 268 | #ifdef MT_CORE_INSTRS |
cb323159 A |
269 | os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS], |
270 | end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], relaxed); | |
5ba3f43e | 271 | #endif /* defined(MT_CORE_INSTRS) */ |
cb323159 | 272 | os_atomic_inc(&perfcontrol_callout_count[type], relaxed); |
5ba3f43e A |
273 | } |
274 | #endif /* MONOTONIC */ | |
275 | ||
0a7de745 A |
276 | uint64_t |
277 | perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, | |
278 | perfcontrol_callout_stat_t stat) | |
5ba3f43e | 279 | { |
0a7de745 A |
280 | if (!perfcontrol_callout_stats_enabled) { |
281 | return 0; | |
282 | } | |
cb323159 A |
283 | return os_atomic_load_wide(&perfcontrol_callout_stats[type][stat], relaxed) / |
284 | os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed); | |
5ba3f43e A |
285 | } |
286 | ||
287 | void | |
288 | machine_switch_perfcontrol_context(perfcontrol_event event, | |
0a7de745 A |
289 | uint64_t timestamp, |
290 | uint32_t flags, | |
291 | uint64_t new_thread_same_pri_latency, | |
292 | thread_t old, | |
293 | thread_t new) | |
5ba3f43e A |
294 | { |
295 | if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) { | |
296 | perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old); | |
297 | perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new); | |
298 | sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state); | |
299 | } | |
300 | ||
301 | if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) { | |
302 | uint32_t cpu_id = (uint32_t)cpu_number(); | |
303 | struct perfcontrol_cpu_counters cpu_counters; | |
304 | struct perfcontrol_thread_data offcore, oncore; | |
305 | machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0); | |
306 | machine_switch_populate_perfcontrol_thread_data(&oncore, new, | |
0a7de745 | 307 | new_thread_same_pri_latency); |
5ba3f43e A |
308 | machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters); |
309 | ||
310 | #if MONOTONIC | |
311 | uint64_t counters[MT_CORE_NFIXED]; | |
312 | bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); | |
313 | #endif /* MONOTONIC */ | |
314 | sched_perfcontrol_csw(event, cpu_id, timestamp, flags, | |
0a7de745 | 315 | &offcore, &oncore, &cpu_counters, NULL); |
5ba3f43e | 316 | #if MONOTONIC |
0a7de745 A |
317 | if (ctrs_enabled) { |
318 | perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT); | |
319 | } | |
5ba3f43e A |
320 | #endif /* MONOTONIC */ |
321 | ||
322 | #if __arm64__ | |
323 | old->machine.energy_estimate_nj += offcore.energy_estimate_nj; | |
324 | new->machine.energy_estimate_nj += oncore.energy_estimate_nj; | |
325 | #endif | |
326 | } | |
327 | } | |
328 | ||
329 | void | |
330 | machine_switch_perfcontrol_state_update(perfcontrol_event event, | |
0a7de745 A |
331 | uint64_t timestamp, |
332 | uint32_t flags, | |
333 | thread_t thread) | |
5ba3f43e | 334 | { |
0a7de745 | 335 | if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) { |
5ba3f43e | 336 | return; |
0a7de745 | 337 | } |
5ba3f43e A |
338 | uint32_t cpu_id = (uint32_t)cpu_number(); |
339 | struct perfcontrol_thread_data data; | |
340 | machine_switch_populate_perfcontrol_thread_data(&data, thread, 0); | |
341 | ||
342 | #if MONOTONIC | |
343 | uint64_t counters[MT_CORE_NFIXED]; | |
344 | bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); | |
345 | #endif /* MONOTONIC */ | |
0a7de745 A |
346 | sched_perfcontrol_state_update(event, cpu_id, timestamp, flags, |
347 | &data, NULL); | |
5ba3f43e | 348 | #if MONOTONIC |
0a7de745 A |
349 | if (ctrs_enabled) { |
350 | perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE); | |
351 | } | |
5ba3f43e A |
352 | #endif /* MONOTONIC */ |
353 | ||
354 | #if __arm64__ | |
355 | thread->machine.energy_estimate_nj += data.energy_estimate_nj; | |
356 | #endif | |
357 | } | |
358 | ||
359 | void | |
360 | machine_thread_going_on_core(thread_t new_thread, | |
0a7de745 A |
361 | thread_urgency_t urgency, |
362 | uint64_t sched_latency, | |
363 | uint64_t same_pri_latency, | |
364 | uint64_t timestamp) | |
5ba3f43e | 365 | { |
0a7de745 | 366 | if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) { |
5ba3f43e | 367 | return; |
0a7de745 | 368 | } |
5ba3f43e A |
369 | struct going_on_core on_core; |
370 | perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread); | |
371 | ||
372 | on_core.thread_id = new_thread->thread_id; | |
373 | on_core.energy_estimate_nj = 0; | |
374 | on_core.qos_class = proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS); | |
375 | on_core.urgency = urgency; | |
d9a64523 | 376 | on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE; |
5ba3f43e A |
377 | on_core.is_kernel_thread = new_thread->task == kernel_task; |
378 | on_core.scheduling_latency = sched_latency; | |
379 | on_core.start_time = timestamp; | |
380 | on_core.scheduling_latency_at_same_basepri = same_pri_latency; | |
381 | ||
382 | #if MONOTONIC | |
383 | uint64_t counters[MT_CORE_NFIXED]; | |
384 | bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); | |
385 | #endif /* MONOTONIC */ | |
386 | sched_perfcontrol_oncore(state, &on_core); | |
387 | #if MONOTONIC | |
0a7de745 A |
388 | if (ctrs_enabled) { |
389 | perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE); | |
390 | } | |
5ba3f43e A |
391 | #endif /* MONOTONIC */ |
392 | ||
393 | #if __arm64__ | |
394 | new_thread->machine.energy_estimate_nj += on_core.energy_estimate_nj; | |
395 | #endif | |
396 | } | |
397 | ||
398 | void | |
0a7de745 A |
399 | machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, |
400 | uint64_t last_dispatch, __unused boolean_t thread_runnable) | |
5ba3f43e | 401 | { |
0a7de745 | 402 | if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) { |
5ba3f43e | 403 | return; |
0a7de745 | 404 | } |
5ba3f43e A |
405 | struct going_off_core off_core; |
406 | perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread); | |
407 | ||
408 | off_core.thread_id = old_thread->thread_id; | |
409 | off_core.energy_estimate_nj = 0; | |
410 | off_core.end_time = last_dispatch; | |
411 | ||
412 | #if MONOTONIC | |
413 | uint64_t counters[MT_CORE_NFIXED]; | |
414 | bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); | |
415 | #endif /* MONOTONIC */ | |
416 | sched_perfcontrol_offcore(state, &off_core, thread_terminating); | |
417 | #if MONOTONIC | |
0a7de745 A |
418 | if (ctrs_enabled) { |
419 | perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE); | |
420 | } | |
5ba3f43e A |
421 | #endif /* MONOTONIC */ |
422 | ||
423 | #if __arm64__ | |
424 | old_thread->machine.energy_estimate_nj += off_core.energy_estimate_nj; | |
425 | #endif | |
426 | } | |
427 | ||
428 | ||
429 | void | |
430 | machine_max_runnable_latency(uint64_t bg_max_latency, | |
0a7de745 A |
431 | uint64_t default_max_latency, |
432 | uint64_t realtime_max_latency) | |
5ba3f43e | 433 | { |
0a7de745 | 434 | if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) { |
5ba3f43e | 435 | return; |
0a7de745 | 436 | } |
5ba3f43e A |
437 | struct perfcontrol_max_runnable_latency latencies = { |
438 | .max_scheduling_latencies = { | |
439 | [THREAD_URGENCY_NONE] = 0, | |
440 | [THREAD_URGENCY_BACKGROUND] = bg_max_latency, | |
441 | [THREAD_URGENCY_NORMAL] = default_max_latency, | |
442 | [THREAD_URGENCY_REAL_TIME] = realtime_max_latency | |
443 | } | |
444 | }; | |
445 | ||
446 | sched_perfcontrol_max_runnable_latency(&latencies); | |
447 | } | |
448 | ||
449 | void | |
450 | machine_work_interval_notify(thread_t thread, | |
0a7de745 | 451 | struct kern_work_interval_args* kwi_args) |
5ba3f43e | 452 | { |
0a7de745 | 453 | if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) { |
5ba3f43e | 454 | return; |
0a7de745 | 455 | } |
5ba3f43e A |
456 | perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread); |
457 | struct perfcontrol_work_interval work_interval = { | |
458 | .thread_id = thread->thread_id, | |
459 | .qos_class = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS), | |
460 | .urgency = kwi_args->urgency, | |
461 | .flags = kwi_args->notify_flags, | |
462 | .work_interval_id = kwi_args->work_interval_id, | |
463 | .start = kwi_args->start, | |
464 | .finish = kwi_args->finish, | |
465 | .deadline = kwi_args->deadline, | |
466 | .next_start = kwi_args->next_start, | |
467 | .create_flags = kwi_args->create_flags, | |
468 | }; | |
469 | sched_perfcontrol_work_interval_notify(state, &work_interval); | |
470 | } | |
471 | ||
a39ff7e2 | 472 | |
5ba3f43e A |
473 | void |
474 | machine_perfcontrol_deadline_passed(uint64_t deadline) | |
475 | { | |
0a7de745 | 476 | if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) { |
5ba3f43e | 477 | sched_perfcontrol_deadline_passed(deadline); |
0a7de745 | 478 | } |
5ba3f43e A |
479 | } |
480 | ||
481 | #if INTERRUPT_MASKED_DEBUG | |
482 | /* | |
483 | * ml_spin_debug_reset() | |
484 | * Reset the timestamp on a thread that has been unscheduled | |
cb323159 | 485 | * to avoid false alarms. Alarm will go off if interrupts are held |
5ba3f43e | 486 | * disabled for too long, starting from now. |
cb323159 A |
487 | * |
488 | * Call ml_get_timebase() directly to prevent extra overhead on newer | |
489 | * platforms that's enabled in DEVELOPMENT kernel configurations. | |
5ba3f43e A |
490 | */ |
491 | void | |
492 | ml_spin_debug_reset(thread_t thread) | |
493 | { | |
cb323159 | 494 | thread->machine.intmask_timestamp = ml_get_timebase(); |
5ba3f43e A |
495 | } |
496 | ||
497 | /* | |
498 | * ml_spin_debug_clear() | |
499 | * Clear the timestamp on a thread that has been unscheduled | |
500 | * to avoid false alarms | |
501 | */ | |
502 | void | |
503 | ml_spin_debug_clear(thread_t thread) | |
504 | { | |
d9a64523 | 505 | thread->machine.intmask_timestamp = 0; |
5ba3f43e A |
506 | } |
507 | ||
508 | /* | |
509 | * ml_spin_debug_clear_self() | |
510 | * Clear the timestamp on the current thread to prevent | |
511 | * false alarms | |
512 | */ | |
513 | void | |
514 | ml_spin_debug_clear_self() | |
515 | { | |
516 | ml_spin_debug_clear(current_thread()); | |
517 | } | |
518 | ||
519 | void | |
520 | ml_check_interrupts_disabled_duration(thread_t thread) | |
521 | { | |
d9a64523 A |
522 | uint64_t start; |
523 | uint64_t now; | |
5ba3f43e | 524 | |
d9a64523 A |
525 | start = thread->machine.intmask_timestamp; |
526 | if (start != 0) { | |
cb323159 | 527 | now = ml_get_timebase(); |
5ba3f43e | 528 | |
d9a64523 A |
529 | if ((now - start) > interrupt_masked_timeout * debug_cpu_performance_degradation_factor) { |
530 | mach_timebase_info_data_t timebase; | |
531 | clock_timebase_info(&timebase); | |
5ba3f43e A |
532 | |
533 | #ifndef KASAN | |
d9a64523 | 534 | /* |
0a7de745 A |
535 | * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the |
536 | * mechanism enabled so that KASAN can catch any bugs in the mechanism itself. | |
537 | */ | |
538 | panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer) / timebase.denom)); | |
5ba3f43e | 539 | #endif |
d9a64523 A |
540 | } |
541 | } | |
5ba3f43e | 542 | |
d9a64523 | 543 | return; |
5ba3f43e A |
544 | } |
545 | #endif // INTERRUPT_MASKED_DEBUG | |
546 | ||
547 | ||
548 | boolean_t | |
549 | ml_set_interrupts_enabled(boolean_t enable) | |
550 | { | |
0a7de745 A |
551 | thread_t thread; |
552 | uint64_t state; | |
5ba3f43e A |
553 | |
554 | #if __arm__ | |
555 | #define INTERRUPT_MASK PSR_IRQF | |
d9a64523 | 556 | state = __builtin_arm_rsr("cpsr"); |
5ba3f43e A |
557 | #else |
558 | #define INTERRUPT_MASK DAIF_IRQF | |
d9a64523 | 559 | state = __builtin_arm_rsr("DAIF"); |
5ba3f43e | 560 | #endif |
d9a64523 | 561 | if (enable && (state & INTERRUPT_MASK)) { |
cb323159 | 562 | assert(getCpuDatap()->cpu_int_state == NULL); // Make sure we're not enabling interrupts from primary interrupt context |
5ba3f43e | 563 | #if INTERRUPT_MASKED_DEBUG |
d9a64523 A |
564 | if (interrupt_masked_debug) { |
565 | // Interrupts are currently masked, we will enable them (after finishing this check) | |
566 | thread = current_thread(); | |
567 | ml_check_interrupts_disabled_duration(thread); | |
568 | thread->machine.intmask_timestamp = 0; | |
569 | } | |
0a7de745 | 570 | #endif // INTERRUPT_MASKED_DEBUG |
d9a64523 A |
571 | if (get_preemption_level() == 0) { |
572 | thread = current_thread(); | |
573 | while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) { | |
5ba3f43e | 574 | #if __ARM_USER_PROTECT__ |
d9a64523 | 575 | uintptr_t up = arm_user_protect_begin(thread); |
5ba3f43e | 576 | #endif |
d9a64523 | 577 | ast_taken_kernel(); |
5ba3f43e | 578 | #if __ARM_USER_PROTECT__ |
d9a64523 | 579 | arm_user_protect_end(thread, up, FALSE); |
5ba3f43e | 580 | #endif |
d9a64523 A |
581 | } |
582 | } | |
5ba3f43e | 583 | #if __arm__ |
d9a64523 | 584 | __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ |
5ba3f43e | 585 | #else |
d9a64523 | 586 | __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF)); |
5ba3f43e | 587 | #endif |
d9a64523 | 588 | } else if (!enable && ((state & INTERRUPT_MASK) == 0)) { |
5ba3f43e | 589 | #if __arm__ |
d9a64523 | 590 | __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ |
5ba3f43e | 591 | #else |
d9a64523 | 592 | __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF)); |
5ba3f43e A |
593 | #endif |
594 | #if INTERRUPT_MASKED_DEBUG | |
d9a64523 A |
595 | if (interrupt_masked_debug) { |
596 | // Interrupts were enabled, we just masked them | |
cb323159 | 597 | current_thread()->machine.intmask_timestamp = ml_get_timebase(); |
d9a64523 | 598 | } |
5ba3f43e | 599 | #endif |
d9a64523 | 600 | } |
0a7de745 A |
601 | return (state & INTERRUPT_MASK) == 0; |
602 | } | |
603 | ||
604 | boolean_t | |
605 | ml_early_set_interrupts_enabled(boolean_t enable) | |
606 | { | |
607 | return ml_set_interrupts_enabled(enable); | |
d9a64523 A |
608 | } |
609 | ||
610 | /* | |
611 | * Routine: ml_at_interrupt_context | |
612 | * Function: Check if running at interrupt context | |
613 | */ | |
614 | boolean_t | |
615 | ml_at_interrupt_context(void) | |
616 | { | |
617 | /* Do not use a stack-based check here, as the top-level exception handler | |
618 | * is free to use some other stack besides the per-CPU interrupt stack. | |
619 | * Interrupts should always be disabled if we're at interrupt context. | |
620 | * Check that first, as we may be in a preemptible non-interrupt context, in | |
621 | * which case we could be migrated to a different CPU between obtaining | |
622 | * the per-cpu data pointer and loading cpu_int_state. We then might end | |
623 | * up checking the interrupt state of a different CPU, resulting in a false | |
624 | * positive. But if interrupts are disabled, we also know we cannot be | |
625 | * preempted. */ | |
0a7de745 | 626 | return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL); |
d9a64523 A |
627 | } |
628 | ||
0a7de745 | 629 | vm_offset_t |
d9a64523 A |
630 | ml_stack_remaining(void) |
631 | { | |
632 | uintptr_t local = (uintptr_t) &local; | |
633 | vm_offset_t intstack_top_ptr; | |
634 | ||
635 | /* Since this is a stack-based check, we don't need to worry about | |
636 | * preemption as we do in ml_at_interrupt_context(). If we are preemptible, | |
637 | * then the sp should never be within any CPU's interrupt stack unless | |
638 | * something has gone horribly wrong. */ | |
639 | intstack_top_ptr = getCpuDatap()->intstack_top; | |
640 | if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) { | |
0a7de745 | 641 | return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE); |
d9a64523 | 642 | } else { |
0a7de745 | 643 | return local - current_thread()->kernel_stack; |
d9a64523 | 644 | } |
5ba3f43e A |
645 | } |
646 | ||
647 | static boolean_t ml_quiescing; | |
648 | ||
0a7de745 A |
649 | void |
650 | ml_set_is_quiescing(boolean_t quiescing) | |
5ba3f43e | 651 | { |
d9a64523 A |
652 | assert(FALSE == ml_get_interrupts_enabled()); |
653 | ml_quiescing = quiescing; | |
5ba3f43e A |
654 | } |
655 | ||
0a7de745 A |
656 | boolean_t |
657 | ml_is_quiescing(void) | |
5ba3f43e | 658 | { |
d9a64523 | 659 | assert(FALSE == ml_get_interrupts_enabled()); |
0a7de745 | 660 | return ml_quiescing; |
5ba3f43e A |
661 | } |
662 | ||
0a7de745 A |
663 | uint64_t |
664 | ml_get_booter_memory_size(void) | |
5ba3f43e | 665 | { |
5ba3f43e | 666 | uint64_t size; |
0a7de745 | 667 | uint64_t roundsize = 512 * 1024 * 1024ULL; |
5ba3f43e | 668 | size = BootArgs->memSizeActual; |
d9a64523 | 669 | if (!size) { |
5ba3f43e | 670 | size = BootArgs->memSize; |
0a7de745 A |
671 | if (size < (2 * roundsize)) { |
672 | roundsize >>= 1; | |
673 | } | |
d9a64523 | 674 | size = (size + roundsize - 1) & ~(roundsize - 1); |
5ba3f43e | 675 | size -= BootArgs->memSize; |
d9a64523 | 676 | } |
0a7de745 | 677 | return size; |
5ba3f43e A |
678 | } |
679 | ||
680 | uint64_t | |
681 | ml_get_abstime_offset(void) | |
682 | { | |
683 | return rtclock_base_abstime; | |
684 | } | |
685 | ||
686 | uint64_t | |
687 | ml_get_conttime_offset(void) | |
688 | { | |
0a7de745 | 689 | return rtclock_base_abstime + mach_absolutetime_asleep; |
5ba3f43e A |
690 | } |
691 | ||
692 | uint64_t | |
693 | ml_get_time_since_reset(void) | |
694 | { | |
695 | /* The timebase resets across S2R, so just return the raw value. */ | |
696 | return ml_get_hwclock(); | |
697 | } | |
698 | ||
cb323159 A |
699 | void |
700 | ml_set_reset_time(__unused uint64_t wake_time) | |
701 | { | |
702 | } | |
703 | ||
5ba3f43e A |
704 | uint64_t |
705 | ml_get_conttime_wake_time(void) | |
706 | { | |
707 | /* The wake time is simply our continuous time offset. */ | |
708 | return ml_get_conttime_offset(); | |
709 | } | |
710 | ||
0a7de745 A |
711 | /* |
712 | * ml_snoop_thread_is_on_core(thread_t thread) | |
713 | * Check if the given thread is currently on core. This function does not take | |
714 | * locks, disable preemption, or otherwise guarantee synchronization. The | |
715 | * result should be considered advisory. | |
716 | */ | |
717 | bool | |
718 | ml_snoop_thread_is_on_core(thread_t thread) | |
719 | { | |
720 | unsigned int cur_cpu_num = 0; | |
721 | ||
722 | for (cur_cpu_num = 0; cur_cpu_num < MAX_CPUS; cur_cpu_num++) { | |
723 | if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) { | |
724 | if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) { | |
725 | return true; | |
726 | } | |
727 | } | |
728 | } | |
729 | ||
730 | return false; | |
731 | } |