]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/machine_cpu.h> | |
30 | #include <arm/cpu_internal.h> | |
31 | #include <arm/cpuid.h> | |
32 | #include <arm/cpu_data.h> | |
33 | #include <arm/cpu_data_internal.h> | |
34 | #include <arm/misc_protos.h> | |
35 | #include <arm/machdep_call.h> | |
36 | #include <arm/machine_routines.h> | |
37 | #include <arm/rtclock.h> | |
38 | #include <kern/machine.h> | |
39 | #include <kern/thread.h> | |
40 | #include <kern/thread_group.h> | |
41 | #include <kern/policy_internal.h> | |
42 | #include <machine/config.h> | |
cb323159 | 43 | #include <machine/atomic.h> |
d9a64523 | 44 | #include <pexpert/pexpert.h> |
5ba3f43e A |
45 | |
46 | #if MONOTONIC | |
47 | #include <kern/monotonic.h> | |
48 | #include <machine/monotonic.h> | |
49 | #endif /* MONOTONIC */ | |
50 | ||
51 | #include <mach/machine.h> | |
52 | ||
53 | #if INTERRUPT_MASKED_DEBUG | |
54 | extern boolean_t interrupt_masked_debug; | |
55 | extern uint64_t interrupt_masked_timeout; | |
56 | #endif | |
57 | ||
c6bf4f31 | 58 | #if !HAS_CONTINUOUS_HWCLOCK |
5ba3f43e | 59 | extern uint64_t mach_absolutetime_asleep; |
c6bf4f31 A |
60 | #else |
61 | extern uint64_t wake_abstime; | |
62 | static uint64_t wake_conttime = UINT64_MAX; | |
63 | #endif | |
5ba3f43e A |
64 | |
65 | static void | |
66 | sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused) | |
67 | { | |
68 | } | |
69 | ||
70 | static void | |
71 | sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused) | |
72 | { | |
73 | } | |
74 | ||
75 | static void | |
76 | sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused) | |
77 | { | |
78 | } | |
79 | ||
80 | static void | |
81 | sched_perfcontrol_thread_group_default(thread_group_data_t data __unused) | |
82 | { | |
83 | } | |
84 | ||
0a7de745 | 85 | static void |
5ba3f43e A |
86 | sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused) |
87 | { | |
88 | } | |
89 | ||
90 | static void | |
a39ff7e2 | 91 | sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused, |
0a7de745 | 92 | perfcontrol_work_interval_t work_interval __unused) |
a39ff7e2 A |
93 | { |
94 | } | |
95 | ||
96 | static void | |
97 | sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused, | |
0a7de745 | 98 | perfcontrol_work_interval_instance_t instance __unused) |
5ba3f43e A |
99 | { |
100 | } | |
101 | ||
102 | static void | |
103 | sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline) | |
104 | { | |
105 | } | |
106 | ||
107 | static void | |
108 | sched_perfcontrol_csw_default( | |
0a7de745 A |
109 | __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp, |
110 | __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore, | |
111 | __unused struct perfcontrol_thread_data *oncore, | |
5ba3f43e A |
112 | __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused) |
113 | { | |
114 | } | |
115 | ||
116 | static void | |
117 | sched_perfcontrol_state_update_default( | |
118 | __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp, | |
119 | __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data, | |
120 | __unused void *unused) | |
121 | { | |
122 | } | |
123 | ||
124 | sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default; | |
125 | sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default; | |
126 | sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default; | |
127 | sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default; | |
128 | sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default; | |
129 | sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default; | |
130 | sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default; | |
131 | sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default; | |
a39ff7e2 | 132 | sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default; |
5ba3f43e A |
133 | sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default; |
134 | sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default; | |
135 | sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default; | |
136 | ||
137 | void | |
138 | sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state) | |
139 | { | |
140 | assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2); | |
141 | ||
142 | if (size_of_state > sizeof(struct perfcontrol_state)) { | |
143 | panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state); | |
144 | } | |
145 | ||
146 | if (callbacks) { | |
147 | ||
a39ff7e2 A |
148 | if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) { |
149 | if (callbacks->work_interval_ctl != NULL) { | |
150 | sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl; | |
151 | } else { | |
152 | sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default; | |
153 | } | |
154 | } | |
155 | ||
5ba3f43e A |
156 | if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) { |
157 | if (callbacks->csw != NULL) { | |
158 | sched_perfcontrol_csw = callbacks->csw; | |
159 | } else { | |
160 | sched_perfcontrol_csw = sched_perfcontrol_csw_default; | |
161 | } | |
162 | ||
163 | if (callbacks->state_update != NULL) { | |
164 | sched_perfcontrol_state_update = callbacks->state_update; | |
165 | } else { | |
166 | sched_perfcontrol_state_update = sched_perfcontrol_state_update_default; | |
167 | } | |
168 | } | |
169 | ||
170 | if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) { | |
171 | if (callbacks->deadline_passed != NULL) { | |
172 | sched_perfcontrol_deadline_passed = callbacks->deadline_passed; | |
173 | } else { | |
174 | sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default; | |
175 | } | |
176 | } | |
177 | ||
178 | if (callbacks->offcore != NULL) { | |
179 | sched_perfcontrol_offcore = callbacks->offcore; | |
180 | } else { | |
181 | sched_perfcontrol_offcore = sched_perfcontrol_offcore_default; | |
182 | } | |
183 | ||
184 | if (callbacks->context_switch != NULL) { | |
185 | sched_perfcontrol_switch = callbacks->context_switch; | |
186 | } else { | |
187 | sched_perfcontrol_switch = sched_perfcontrol_switch_default; | |
188 | } | |
189 | ||
190 | if (callbacks->oncore != NULL) { | |
191 | sched_perfcontrol_oncore = callbacks->oncore; | |
192 | } else { | |
193 | sched_perfcontrol_oncore = sched_perfcontrol_oncore_default; | |
194 | } | |
195 | ||
196 | if (callbacks->max_runnable_latency != NULL) { | |
197 | sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency; | |
198 | } else { | |
199 | sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default; | |
200 | } | |
0a7de745 | 201 | |
5ba3f43e A |
202 | if (callbacks->work_interval_notify != NULL) { |
203 | sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify; | |
204 | } else { | |
205 | sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default; | |
206 | } | |
207 | } else { | |
208 | /* reset to defaults */ | |
209 | sched_perfcontrol_offcore = sched_perfcontrol_offcore_default; | |
210 | sched_perfcontrol_switch = sched_perfcontrol_switch_default; | |
211 | sched_perfcontrol_oncore = sched_perfcontrol_oncore_default; | |
212 | sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default; | |
213 | sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default; | |
214 | sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default; | |
215 | sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default; | |
216 | sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default; | |
a39ff7e2 | 217 | sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default; |
5ba3f43e A |
218 | sched_perfcontrol_csw = sched_perfcontrol_csw_default; |
219 | sched_perfcontrol_state_update = sched_perfcontrol_state_update_default; | |
220 | } | |
221 | } | |
222 | ||
223 | ||
224 | static void | |
0a7de745 A |
225 | machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data, |
226 | thread_t thread, | |
227 | uint64_t same_pri_latency) | |
5ba3f43e A |
228 | { |
229 | bzero(data, sizeof(struct perfcontrol_thread_data)); | |
230 | data->perfctl_class = thread_get_perfcontrol_class(thread); | |
231 | data->energy_estimate_nj = 0; | |
232 | data->thread_id = thread->thread_id; | |
233 | data->scheduling_latency_at_same_basepri = same_pri_latency; | |
234 | data->perfctl_state = FIND_PERFCONTROL_STATE(thread); | |
235 | } | |
236 | ||
237 | static void | |
238 | machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters) | |
239 | { | |
240 | #if MONOTONIC | |
241 | mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles); | |
242 | #else /* MONOTONIC */ | |
243 | cpu_counters->instructions = 0; | |
244 | cpu_counters->cycles = 0; | |
245 | #endif /* !MONOTONIC */ | |
246 | } | |
247 | ||
248 | int perfcontrol_callout_stats_enabled = 0; | |
249 | static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX]; | |
250 | static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX]; | |
251 | ||
252 | #if MONOTONIC | |
253 | static inline | |
0a7de745 A |
254 | bool |
255 | perfcontrol_callout_counters_begin(uint64_t *counters) | |
5ba3f43e | 256 | { |
0a7de745 A |
257 | if (!perfcontrol_callout_stats_enabled) { |
258 | return false; | |
259 | } | |
260 | mt_fixed_counts(counters); | |
261 | return true; | |
5ba3f43e A |
262 | } |
263 | ||
264 | static inline | |
0a7de745 A |
265 | void |
266 | perfcontrol_callout_counters_end(uint64_t *start_counters, | |
267 | perfcontrol_callout_type_t type) | |
5ba3f43e | 268 | { |
0a7de745 A |
269 | uint64_t end_counters[MT_CORE_NFIXED]; |
270 | mt_fixed_counts(end_counters); | |
cb323159 A |
271 | os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES], |
272 | end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], relaxed); | |
5ba3f43e | 273 | #ifdef MT_CORE_INSTRS |
cb323159 A |
274 | os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS], |
275 | end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], relaxed); | |
5ba3f43e | 276 | #endif /* defined(MT_CORE_INSTRS) */ |
cb323159 | 277 | os_atomic_inc(&perfcontrol_callout_count[type], relaxed); |
5ba3f43e A |
278 | } |
279 | #endif /* MONOTONIC */ | |
280 | ||
0a7de745 A |
281 | uint64_t |
282 | perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, | |
283 | perfcontrol_callout_stat_t stat) | |
5ba3f43e | 284 | { |
0a7de745 A |
285 | if (!perfcontrol_callout_stats_enabled) { |
286 | return 0; | |
287 | } | |
cb323159 A |
288 | return os_atomic_load_wide(&perfcontrol_callout_stats[type][stat], relaxed) / |
289 | os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed); | |
5ba3f43e A |
290 | } |
291 | ||
292 | void | |
293 | machine_switch_perfcontrol_context(perfcontrol_event event, | |
0a7de745 A |
294 | uint64_t timestamp, |
295 | uint32_t flags, | |
296 | uint64_t new_thread_same_pri_latency, | |
297 | thread_t old, | |
298 | thread_t new) | |
5ba3f43e A |
299 | { |
300 | if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) { | |
301 | perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old); | |
302 | perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new); | |
303 | sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state); | |
304 | } | |
305 | ||
306 | if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) { | |
307 | uint32_t cpu_id = (uint32_t)cpu_number(); | |
308 | struct perfcontrol_cpu_counters cpu_counters; | |
309 | struct perfcontrol_thread_data offcore, oncore; | |
310 | machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0); | |
311 | machine_switch_populate_perfcontrol_thread_data(&oncore, new, | |
0a7de745 | 312 | new_thread_same_pri_latency); |
5ba3f43e A |
313 | machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters); |
314 | ||
315 | #if MONOTONIC | |
316 | uint64_t counters[MT_CORE_NFIXED]; | |
317 | bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); | |
318 | #endif /* MONOTONIC */ | |
319 | sched_perfcontrol_csw(event, cpu_id, timestamp, flags, | |
0a7de745 | 320 | &offcore, &oncore, &cpu_counters, NULL); |
5ba3f43e | 321 | #if MONOTONIC |
0a7de745 A |
322 | if (ctrs_enabled) { |
323 | perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT); | |
324 | } | |
5ba3f43e A |
325 | #endif /* MONOTONIC */ |
326 | ||
327 | #if __arm64__ | |
328 | old->machine.energy_estimate_nj += offcore.energy_estimate_nj; | |
329 | new->machine.energy_estimate_nj += oncore.energy_estimate_nj; | |
330 | #endif | |
331 | } | |
332 | } | |
333 | ||
334 | void | |
335 | machine_switch_perfcontrol_state_update(perfcontrol_event event, | |
0a7de745 A |
336 | uint64_t timestamp, |
337 | uint32_t flags, | |
338 | thread_t thread) | |
5ba3f43e | 339 | { |
0a7de745 | 340 | if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) { |
5ba3f43e | 341 | return; |
0a7de745 | 342 | } |
5ba3f43e A |
343 | uint32_t cpu_id = (uint32_t)cpu_number(); |
344 | struct perfcontrol_thread_data data; | |
345 | machine_switch_populate_perfcontrol_thread_data(&data, thread, 0); | |
346 | ||
347 | #if MONOTONIC | |
348 | uint64_t counters[MT_CORE_NFIXED]; | |
349 | bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); | |
350 | #endif /* MONOTONIC */ | |
0a7de745 A |
351 | sched_perfcontrol_state_update(event, cpu_id, timestamp, flags, |
352 | &data, NULL); | |
5ba3f43e | 353 | #if MONOTONIC |
0a7de745 A |
354 | if (ctrs_enabled) { |
355 | perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE); | |
356 | } | |
5ba3f43e A |
357 | #endif /* MONOTONIC */ |
358 | ||
359 | #if __arm64__ | |
360 | thread->machine.energy_estimate_nj += data.energy_estimate_nj; | |
361 | #endif | |
362 | } | |
363 | ||
364 | void | |
365 | machine_thread_going_on_core(thread_t new_thread, | |
0a7de745 A |
366 | thread_urgency_t urgency, |
367 | uint64_t sched_latency, | |
368 | uint64_t same_pri_latency, | |
369 | uint64_t timestamp) | |
5ba3f43e | 370 | { |
0a7de745 | 371 | if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) { |
5ba3f43e | 372 | return; |
0a7de745 | 373 | } |
5ba3f43e A |
374 | struct going_on_core on_core; |
375 | perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread); | |
376 | ||
377 | on_core.thread_id = new_thread->thread_id; | |
378 | on_core.energy_estimate_nj = 0; | |
379 | on_core.qos_class = proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS); | |
380 | on_core.urgency = urgency; | |
d9a64523 | 381 | on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE; |
5ba3f43e A |
382 | on_core.is_kernel_thread = new_thread->task == kernel_task; |
383 | on_core.scheduling_latency = sched_latency; | |
384 | on_core.start_time = timestamp; | |
385 | on_core.scheduling_latency_at_same_basepri = same_pri_latency; | |
386 | ||
387 | #if MONOTONIC | |
388 | uint64_t counters[MT_CORE_NFIXED]; | |
389 | bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); | |
390 | #endif /* MONOTONIC */ | |
391 | sched_perfcontrol_oncore(state, &on_core); | |
392 | #if MONOTONIC | |
0a7de745 A |
393 | if (ctrs_enabled) { |
394 | perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE); | |
395 | } | |
5ba3f43e A |
396 | #endif /* MONOTONIC */ |
397 | ||
398 | #if __arm64__ | |
399 | new_thread->machine.energy_estimate_nj += on_core.energy_estimate_nj; | |
400 | #endif | |
401 | } | |
402 | ||
403 | void | |
0a7de745 A |
404 | machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, |
405 | uint64_t last_dispatch, __unused boolean_t thread_runnable) | |
5ba3f43e | 406 | { |
0a7de745 | 407 | if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) { |
5ba3f43e | 408 | return; |
0a7de745 | 409 | } |
5ba3f43e A |
410 | struct going_off_core off_core; |
411 | perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread); | |
412 | ||
413 | off_core.thread_id = old_thread->thread_id; | |
414 | off_core.energy_estimate_nj = 0; | |
415 | off_core.end_time = last_dispatch; | |
416 | ||
417 | #if MONOTONIC | |
418 | uint64_t counters[MT_CORE_NFIXED]; | |
419 | bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); | |
420 | #endif /* MONOTONIC */ | |
421 | sched_perfcontrol_offcore(state, &off_core, thread_terminating); | |
422 | #if MONOTONIC | |
0a7de745 A |
423 | if (ctrs_enabled) { |
424 | perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE); | |
425 | } | |
5ba3f43e A |
426 | #endif /* MONOTONIC */ |
427 | ||
428 | #if __arm64__ | |
429 | old_thread->machine.energy_estimate_nj += off_core.energy_estimate_nj; | |
430 | #endif | |
431 | } | |
432 | ||
433 | ||
434 | void | |
435 | machine_max_runnable_latency(uint64_t bg_max_latency, | |
0a7de745 A |
436 | uint64_t default_max_latency, |
437 | uint64_t realtime_max_latency) | |
5ba3f43e | 438 | { |
0a7de745 | 439 | if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) { |
5ba3f43e | 440 | return; |
0a7de745 | 441 | } |
5ba3f43e A |
442 | struct perfcontrol_max_runnable_latency latencies = { |
443 | .max_scheduling_latencies = { | |
444 | [THREAD_URGENCY_NONE] = 0, | |
445 | [THREAD_URGENCY_BACKGROUND] = bg_max_latency, | |
446 | [THREAD_URGENCY_NORMAL] = default_max_latency, | |
447 | [THREAD_URGENCY_REAL_TIME] = realtime_max_latency | |
448 | } | |
449 | }; | |
450 | ||
451 | sched_perfcontrol_max_runnable_latency(&latencies); | |
452 | } | |
453 | ||
454 | void | |
455 | machine_work_interval_notify(thread_t thread, | |
0a7de745 | 456 | struct kern_work_interval_args* kwi_args) |
5ba3f43e | 457 | { |
0a7de745 | 458 | if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) { |
5ba3f43e | 459 | return; |
0a7de745 | 460 | } |
5ba3f43e A |
461 | perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread); |
462 | struct perfcontrol_work_interval work_interval = { | |
463 | .thread_id = thread->thread_id, | |
464 | .qos_class = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS), | |
465 | .urgency = kwi_args->urgency, | |
466 | .flags = kwi_args->notify_flags, | |
467 | .work_interval_id = kwi_args->work_interval_id, | |
468 | .start = kwi_args->start, | |
469 | .finish = kwi_args->finish, | |
470 | .deadline = kwi_args->deadline, | |
471 | .next_start = kwi_args->next_start, | |
472 | .create_flags = kwi_args->create_flags, | |
473 | }; | |
474 | sched_perfcontrol_work_interval_notify(state, &work_interval); | |
475 | } | |
476 | ||
a39ff7e2 | 477 | |
5ba3f43e A |
478 | void |
479 | machine_perfcontrol_deadline_passed(uint64_t deadline) | |
480 | { | |
0a7de745 | 481 | if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) { |
5ba3f43e | 482 | sched_perfcontrol_deadline_passed(deadline); |
0a7de745 | 483 | } |
5ba3f43e A |
484 | } |
485 | ||
486 | #if INTERRUPT_MASKED_DEBUG | |
487 | /* | |
488 | * ml_spin_debug_reset() | |
489 | * Reset the timestamp on a thread that has been unscheduled | |
cb323159 | 490 | * to avoid false alarms. Alarm will go off if interrupts are held |
5ba3f43e | 491 | * disabled for too long, starting from now. |
cb323159 A |
492 | * |
493 | * Call ml_get_timebase() directly to prevent extra overhead on newer | |
494 | * platforms that's enabled in DEVELOPMENT kernel configurations. | |
5ba3f43e A |
495 | */ |
496 | void | |
497 | ml_spin_debug_reset(thread_t thread) | |
498 | { | |
cb323159 | 499 | thread->machine.intmask_timestamp = ml_get_timebase(); |
5ba3f43e A |
500 | } |
501 | ||
502 | /* | |
503 | * ml_spin_debug_clear() | |
504 | * Clear the timestamp on a thread that has been unscheduled | |
505 | * to avoid false alarms | |
506 | */ | |
507 | void | |
508 | ml_spin_debug_clear(thread_t thread) | |
509 | { | |
d9a64523 | 510 | thread->machine.intmask_timestamp = 0; |
5ba3f43e A |
511 | } |
512 | ||
513 | /* | |
514 | * ml_spin_debug_clear_self() | |
515 | * Clear the timestamp on the current thread to prevent | |
516 | * false alarms | |
517 | */ | |
518 | void | |
519 | ml_spin_debug_clear_self() | |
520 | { | |
521 | ml_spin_debug_clear(current_thread()); | |
522 | } | |
523 | ||
524 | void | |
525 | ml_check_interrupts_disabled_duration(thread_t thread) | |
526 | { | |
d9a64523 A |
527 | uint64_t start; |
528 | uint64_t now; | |
5ba3f43e | 529 | |
d9a64523 A |
530 | start = thread->machine.intmask_timestamp; |
531 | if (start != 0) { | |
cb323159 | 532 | now = ml_get_timebase(); |
5ba3f43e | 533 | |
d9a64523 A |
534 | if ((now - start) > interrupt_masked_timeout * debug_cpu_performance_degradation_factor) { |
535 | mach_timebase_info_data_t timebase; | |
536 | clock_timebase_info(&timebase); | |
5ba3f43e A |
537 | |
538 | #ifndef KASAN | |
d9a64523 | 539 | /* |
0a7de745 A |
540 | * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the |
541 | * mechanism enabled so that KASAN can catch any bugs in the mechanism itself. | |
542 | */ | |
543 | panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer) / timebase.denom)); | |
5ba3f43e | 544 | #endif |
d9a64523 A |
545 | } |
546 | } | |
5ba3f43e | 547 | |
d9a64523 | 548 | return; |
5ba3f43e A |
549 | } |
550 | #endif // INTERRUPT_MASKED_DEBUG | |
551 | ||
552 | ||
553 | boolean_t | |
554 | ml_set_interrupts_enabled(boolean_t enable) | |
555 | { | |
0a7de745 A |
556 | thread_t thread; |
557 | uint64_t state; | |
5ba3f43e A |
558 | |
559 | #if __arm__ | |
560 | #define INTERRUPT_MASK PSR_IRQF | |
d9a64523 | 561 | state = __builtin_arm_rsr("cpsr"); |
5ba3f43e A |
562 | #else |
563 | #define INTERRUPT_MASK DAIF_IRQF | |
d9a64523 | 564 | state = __builtin_arm_rsr("DAIF"); |
5ba3f43e | 565 | #endif |
d9a64523 | 566 | if (enable && (state & INTERRUPT_MASK)) { |
cb323159 | 567 | assert(getCpuDatap()->cpu_int_state == NULL); // Make sure we're not enabling interrupts from primary interrupt context |
5ba3f43e | 568 | #if INTERRUPT_MASKED_DEBUG |
d9a64523 A |
569 | if (interrupt_masked_debug) { |
570 | // Interrupts are currently masked, we will enable them (after finishing this check) | |
571 | thread = current_thread(); | |
572 | ml_check_interrupts_disabled_duration(thread); | |
573 | thread->machine.intmask_timestamp = 0; | |
574 | } | |
0a7de745 | 575 | #endif // INTERRUPT_MASKED_DEBUG |
d9a64523 A |
576 | if (get_preemption_level() == 0) { |
577 | thread = current_thread(); | |
578 | while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) { | |
5ba3f43e | 579 | #if __ARM_USER_PROTECT__ |
d9a64523 | 580 | uintptr_t up = arm_user_protect_begin(thread); |
5ba3f43e | 581 | #endif |
d9a64523 | 582 | ast_taken_kernel(); |
5ba3f43e | 583 | #if __ARM_USER_PROTECT__ |
d9a64523 | 584 | arm_user_protect_end(thread, up, FALSE); |
5ba3f43e | 585 | #endif |
d9a64523 A |
586 | } |
587 | } | |
5ba3f43e | 588 | #if __arm__ |
d9a64523 | 589 | __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ |
5ba3f43e | 590 | #else |
d9a64523 | 591 | __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF)); |
5ba3f43e | 592 | #endif |
d9a64523 | 593 | } else if (!enable && ((state & INTERRUPT_MASK) == 0)) { |
5ba3f43e | 594 | #if __arm__ |
d9a64523 | 595 | __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ |
5ba3f43e | 596 | #else |
d9a64523 | 597 | __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF)); |
5ba3f43e A |
598 | #endif |
599 | #if INTERRUPT_MASKED_DEBUG | |
d9a64523 A |
600 | if (interrupt_masked_debug) { |
601 | // Interrupts were enabled, we just masked them | |
cb323159 | 602 | current_thread()->machine.intmask_timestamp = ml_get_timebase(); |
d9a64523 | 603 | } |
5ba3f43e | 604 | #endif |
d9a64523 | 605 | } |
0a7de745 A |
606 | return (state & INTERRUPT_MASK) == 0; |
607 | } | |
608 | ||
609 | boolean_t | |
610 | ml_early_set_interrupts_enabled(boolean_t enable) | |
611 | { | |
612 | return ml_set_interrupts_enabled(enable); | |
d9a64523 A |
613 | } |
614 | ||
615 | /* | |
616 | * Routine: ml_at_interrupt_context | |
617 | * Function: Check if running at interrupt context | |
618 | */ | |
619 | boolean_t | |
620 | ml_at_interrupt_context(void) | |
621 | { | |
622 | /* Do not use a stack-based check here, as the top-level exception handler | |
623 | * is free to use some other stack besides the per-CPU interrupt stack. | |
624 | * Interrupts should always be disabled if we're at interrupt context. | |
625 | * Check that first, as we may be in a preemptible non-interrupt context, in | |
626 | * which case we could be migrated to a different CPU between obtaining | |
627 | * the per-cpu data pointer and loading cpu_int_state. We then might end | |
628 | * up checking the interrupt state of a different CPU, resulting in a false | |
629 | * positive. But if interrupts are disabled, we also know we cannot be | |
630 | * preempted. */ | |
0a7de745 | 631 | return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL); |
d9a64523 A |
632 | } |
633 | ||
0a7de745 | 634 | vm_offset_t |
d9a64523 A |
635 | ml_stack_remaining(void) |
636 | { | |
637 | uintptr_t local = (uintptr_t) &local; | |
638 | vm_offset_t intstack_top_ptr; | |
639 | ||
640 | /* Since this is a stack-based check, we don't need to worry about | |
641 | * preemption as we do in ml_at_interrupt_context(). If we are preemptible, | |
642 | * then the sp should never be within any CPU's interrupt stack unless | |
643 | * something has gone horribly wrong. */ | |
644 | intstack_top_ptr = getCpuDatap()->intstack_top; | |
645 | if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) { | |
0a7de745 | 646 | return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE); |
d9a64523 | 647 | } else { |
0a7de745 | 648 | return local - current_thread()->kernel_stack; |
d9a64523 | 649 | } |
5ba3f43e A |
650 | } |
651 | ||
652 | static boolean_t ml_quiescing; | |
653 | ||
0a7de745 A |
654 | void |
655 | ml_set_is_quiescing(boolean_t quiescing) | |
5ba3f43e | 656 | { |
d9a64523 A |
657 | assert(FALSE == ml_get_interrupts_enabled()); |
658 | ml_quiescing = quiescing; | |
5ba3f43e A |
659 | } |
660 | ||
0a7de745 A |
661 | boolean_t |
662 | ml_is_quiescing(void) | |
5ba3f43e | 663 | { |
d9a64523 | 664 | assert(FALSE == ml_get_interrupts_enabled()); |
0a7de745 | 665 | return ml_quiescing; |
5ba3f43e A |
666 | } |
667 | ||
0a7de745 A |
668 | uint64_t |
669 | ml_get_booter_memory_size(void) | |
5ba3f43e | 670 | { |
5ba3f43e | 671 | uint64_t size; |
0a7de745 | 672 | uint64_t roundsize = 512 * 1024 * 1024ULL; |
5ba3f43e | 673 | size = BootArgs->memSizeActual; |
d9a64523 | 674 | if (!size) { |
5ba3f43e | 675 | size = BootArgs->memSize; |
0a7de745 A |
676 | if (size < (2 * roundsize)) { |
677 | roundsize >>= 1; | |
678 | } | |
d9a64523 | 679 | size = (size + roundsize - 1) & ~(roundsize - 1); |
5ba3f43e | 680 | size -= BootArgs->memSize; |
d9a64523 | 681 | } |
0a7de745 | 682 | return size; |
5ba3f43e A |
683 | } |
684 | ||
685 | uint64_t | |
686 | ml_get_abstime_offset(void) | |
687 | { | |
688 | return rtclock_base_abstime; | |
689 | } | |
690 | ||
691 | uint64_t | |
692 | ml_get_conttime_offset(void) | |
693 | { | |
c6bf4f31 A |
694 | #if HAS_CONTINUOUS_HWCLOCK |
695 | return 0; | |
696 | #else | |
0a7de745 | 697 | return rtclock_base_abstime + mach_absolutetime_asleep; |
c6bf4f31 | 698 | #endif |
5ba3f43e A |
699 | } |
700 | ||
701 | uint64_t | |
702 | ml_get_time_since_reset(void) | |
703 | { | |
c6bf4f31 A |
704 | #if HAS_CONTINUOUS_HWCLOCK |
705 | if (wake_conttime == UINT64_MAX) { | |
706 | return UINT64_MAX; | |
707 | } else { | |
708 | return mach_continuous_time() - wake_conttime; | |
709 | } | |
710 | #else | |
5ba3f43e A |
711 | /* The timebase resets across S2R, so just return the raw value. */ |
712 | return ml_get_hwclock(); | |
c6bf4f31 | 713 | #endif |
5ba3f43e A |
714 | } |
715 | ||
cb323159 A |
716 | void |
717 | ml_set_reset_time(__unused uint64_t wake_time) | |
718 | { | |
c6bf4f31 A |
719 | #if HAS_CONTINUOUS_HWCLOCK |
720 | wake_conttime = wake_time; | |
721 | #endif | |
cb323159 A |
722 | } |
723 | ||
5ba3f43e A |
724 | uint64_t |
725 | ml_get_conttime_wake_time(void) | |
726 | { | |
c6bf4f31 A |
727 | #if HAS_CONTINUOUS_HWCLOCK |
728 | /* | |
729 | * For now, we will reconstitute the timebase value from | |
730 | * cpu_timebase_init and use it as the wake time. | |
731 | */ | |
732 | return wake_abstime - ml_get_abstime_offset(); | |
733 | #else /* HAS_CONTINOUS_HWCLOCK */ | |
5ba3f43e A |
734 | /* The wake time is simply our continuous time offset. */ |
735 | return ml_get_conttime_offset(); | |
c6bf4f31 | 736 | #endif /* HAS_CONTINOUS_HWCLOCK */ |
5ba3f43e A |
737 | } |
738 | ||
0a7de745 A |
739 | /* |
740 | * ml_snoop_thread_is_on_core(thread_t thread) | |
741 | * Check if the given thread is currently on core. This function does not take | |
742 | * locks, disable preemption, or otherwise guarantee synchronization. The | |
743 | * result should be considered advisory. | |
744 | */ | |
745 | bool | |
746 | ml_snoop_thread_is_on_core(thread_t thread) | |
747 | { | |
748 | unsigned int cur_cpu_num = 0; | |
749 | ||
750 | for (cur_cpu_num = 0; cur_cpu_num < MAX_CPUS; cur_cpu_num++) { | |
751 | if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) { | |
752 | if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) { | |
753 | return true; | |
754 | } | |
755 | } | |
756 | } | |
757 | ||
758 | return false; | |
759 | } |