]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/machine_routines_common.c
2cb596872c68aa5c824a1a9f784a6fc360424e9c
[apple/xnu.git] / osfmk / arm / machine_routines_common.c
1 /*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpu_data.h>
33 #include <arm/cpu_data_internal.h>
34 #include <arm/misc_protos.h>
35 #include <arm/machdep_call.h>
36 #include <arm/machine_routines.h>
37 #include <arm/rtclock.h>
38 #include <kern/machine.h>
39 #include <kern/thread.h>
40 #include <kern/thread_group.h>
41 #include <kern/policy_internal.h>
42 #include <machine/config.h>
43 #include <pexpert/pexpert.h>
44
45 #if MONOTONIC
46 #include <kern/monotonic.h>
47 #include <machine/monotonic.h>
48 #endif /* MONOTONIC */
49
50 #include <mach/machine.h>
51
52 #if INTERRUPT_MASKED_DEBUG
53 extern boolean_t interrupt_masked_debug;
54 extern uint64_t interrupt_masked_timeout;
55 #endif
56
57 extern uint64_t mach_absolutetime_asleep;
58
59 static void
60 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused)
61 {
62 }
63
64 static void
65 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused)
66 {
67 }
68
69 static void
70 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused)
71 {
72 }
73
74 static void
75 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)
76 {
77 }
78
79 static void
80 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
81 {
82 }
83
84 static void
85 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,
86 perfcontrol_work_interval_t work_interval __unused)
87 {
88 }
89
90 static void
91 sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,
92 perfcontrol_work_interval_instance_t instance __unused)
93 {
94 }
95
96 static void
97 sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)
98 {
99 }
100
101 static void
102 sched_perfcontrol_csw_default(
103 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
104 __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
105 __unused struct perfcontrol_thread_data *oncore,
106 __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
107 {
108 }
109
110 static void
111 sched_perfcontrol_state_update_default(
112 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
113 __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data,
114 __unused void *unused)
115 {
116 }
117
118 sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
119 sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default;
120 sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
121 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
122 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
123 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
124 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
125 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
126 sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
127 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
128 sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default;
129 sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
130
131 void
132 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state)
133 {
134 assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2);
135
136 if (size_of_state > sizeof(struct perfcontrol_state)) {
137 panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state);
138 }
139
140 if (callbacks) {
141
142 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) {
143 if (callbacks->work_interval_ctl != NULL) {
144 sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl;
145 } else {
146 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
147 }
148 }
149
150 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) {
151 if (callbacks->csw != NULL) {
152 sched_perfcontrol_csw = callbacks->csw;
153 } else {
154 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
155 }
156
157 if (callbacks->state_update != NULL) {
158 sched_perfcontrol_state_update = callbacks->state_update;
159 } else {
160 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
161 }
162 }
163
164 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) {
165 if (callbacks->deadline_passed != NULL) {
166 sched_perfcontrol_deadline_passed = callbacks->deadline_passed;
167 } else {
168 sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
169 }
170 }
171
172 if (callbacks->offcore != NULL) {
173 sched_perfcontrol_offcore = callbacks->offcore;
174 } else {
175 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
176 }
177
178 if (callbacks->context_switch != NULL) {
179 sched_perfcontrol_switch = callbacks->context_switch;
180 } else {
181 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
182 }
183
184 if (callbacks->oncore != NULL) {
185 sched_perfcontrol_oncore = callbacks->oncore;
186 } else {
187 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
188 }
189
190 if (callbacks->max_runnable_latency != NULL) {
191 sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency;
192 } else {
193 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
194 }
195
196 if (callbacks->work_interval_notify != NULL) {
197 sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
198 } else {
199 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
200 }
201 } else {
202 /* reset to defaults */
203 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
204 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
205 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
206 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
207 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
208 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
209 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
210 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
211 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
212 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
213 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
214 }
215 }
216
217
218 static void
219 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
220 thread_t thread,
221 uint64_t same_pri_latency)
222 {
223 bzero(data, sizeof(struct perfcontrol_thread_data));
224 data->perfctl_class = thread_get_perfcontrol_class(thread);
225 data->energy_estimate_nj = 0;
226 data->thread_id = thread->thread_id;
227 data->scheduling_latency_at_same_basepri = same_pri_latency;
228 data->perfctl_state = FIND_PERFCONTROL_STATE(thread);
229 }
230
231 static void
232 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters)
233 {
234 #if MONOTONIC
235 mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles);
236 #else /* MONOTONIC */
237 cpu_counters->instructions = 0;
238 cpu_counters->cycles = 0;
239 #endif /* !MONOTONIC */
240 }
241
242 int perfcontrol_callout_stats_enabled = 0;
243 static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX];
244 static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX];
245
246 #if MONOTONIC
247 static inline
248 bool
249 perfcontrol_callout_counters_begin(uint64_t *counters)
250 {
251 if (!perfcontrol_callout_stats_enabled) {
252 return false;
253 }
254 mt_fixed_counts(counters);
255 return true;
256 }
257
258 static inline
259 void
260 perfcontrol_callout_counters_end(uint64_t *start_counters,
261 perfcontrol_callout_type_t type)
262 {
263 uint64_t end_counters[MT_CORE_NFIXED];
264 mt_fixed_counts(end_counters);
265 atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
266 end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], memory_order_relaxed);
267 #ifdef MT_CORE_INSTRS
268 atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
269 end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], memory_order_relaxed);
270 #endif /* defined(MT_CORE_INSTRS) */
271 atomic_fetch_add_explicit(&perfcontrol_callout_count[type], 1, memory_order_relaxed);
272 }
273 #endif /* MONOTONIC */
274
275 uint64_t
276 perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
277 perfcontrol_callout_stat_t stat)
278 {
279 if (!perfcontrol_callout_stats_enabled) {
280 return 0;
281 }
282 return perfcontrol_callout_stats[type][stat] / perfcontrol_callout_count[type];
283 }
284
285 void
286 machine_switch_perfcontrol_context(perfcontrol_event event,
287 uint64_t timestamp,
288 uint32_t flags,
289 uint64_t new_thread_same_pri_latency,
290 thread_t old,
291 thread_t new)
292 {
293 if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
294 perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
295 perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new);
296 sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state);
297 }
298
299 if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) {
300 uint32_t cpu_id = (uint32_t)cpu_number();
301 struct perfcontrol_cpu_counters cpu_counters;
302 struct perfcontrol_thread_data offcore, oncore;
303 machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
304 machine_switch_populate_perfcontrol_thread_data(&oncore, new,
305 new_thread_same_pri_latency);
306 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
307
308 #if MONOTONIC
309 uint64_t counters[MT_CORE_NFIXED];
310 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
311 #endif /* MONOTONIC */
312 sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
313 &offcore, &oncore, &cpu_counters, NULL);
314 #if MONOTONIC
315 if (ctrs_enabled) {
316 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
317 }
318 #endif /* MONOTONIC */
319
320 #if __arm64__
321 old->machine.energy_estimate_nj += offcore.energy_estimate_nj;
322 new->machine.energy_estimate_nj += oncore.energy_estimate_nj;
323 #endif
324 }
325 }
326
327 void
328 machine_switch_perfcontrol_state_update(perfcontrol_event event,
329 uint64_t timestamp,
330 uint32_t flags,
331 thread_t thread)
332 {
333 if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) {
334 return;
335 }
336 uint32_t cpu_id = (uint32_t)cpu_number();
337 struct perfcontrol_thread_data data;
338 machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
339
340 #if MONOTONIC
341 uint64_t counters[MT_CORE_NFIXED];
342 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
343 #endif /* MONOTONIC */
344 sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
345 &data, NULL);
346 #if MONOTONIC
347 if (ctrs_enabled) {
348 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
349 }
350 #endif /* MONOTONIC */
351
352 #if __arm64__
353 thread->machine.energy_estimate_nj += data.energy_estimate_nj;
354 #endif
355 }
356
357 void
358 machine_thread_going_on_core(thread_t new_thread,
359 thread_urgency_t urgency,
360 uint64_t sched_latency,
361 uint64_t same_pri_latency,
362 uint64_t timestamp)
363 {
364 if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) {
365 return;
366 }
367 struct going_on_core on_core;
368 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
369
370 on_core.thread_id = new_thread->thread_id;
371 on_core.energy_estimate_nj = 0;
372 on_core.qos_class = proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
373 on_core.urgency = urgency;
374 on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE;
375 on_core.is_kernel_thread = new_thread->task == kernel_task;
376 on_core.scheduling_latency = sched_latency;
377 on_core.start_time = timestamp;
378 on_core.scheduling_latency_at_same_basepri = same_pri_latency;
379
380 #if MONOTONIC
381 uint64_t counters[MT_CORE_NFIXED];
382 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
383 #endif /* MONOTONIC */
384 sched_perfcontrol_oncore(state, &on_core);
385 #if MONOTONIC
386 if (ctrs_enabled) {
387 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
388 }
389 #endif /* MONOTONIC */
390
391 #if __arm64__
392 new_thread->machine.energy_estimate_nj += on_core.energy_estimate_nj;
393 #endif
394 }
395
396 void
397 machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating,
398 uint64_t last_dispatch, __unused boolean_t thread_runnable)
399 {
400 if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) {
401 return;
402 }
403 struct going_off_core off_core;
404 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
405
406 off_core.thread_id = old_thread->thread_id;
407 off_core.energy_estimate_nj = 0;
408 off_core.end_time = last_dispatch;
409
410 #if MONOTONIC
411 uint64_t counters[MT_CORE_NFIXED];
412 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
413 #endif /* MONOTONIC */
414 sched_perfcontrol_offcore(state, &off_core, thread_terminating);
415 #if MONOTONIC
416 if (ctrs_enabled) {
417 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
418 }
419 #endif /* MONOTONIC */
420
421 #if __arm64__
422 old_thread->machine.energy_estimate_nj += off_core.energy_estimate_nj;
423 #endif
424 }
425
426
427 void
428 machine_max_runnable_latency(uint64_t bg_max_latency,
429 uint64_t default_max_latency,
430 uint64_t realtime_max_latency)
431 {
432 if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) {
433 return;
434 }
435 struct perfcontrol_max_runnable_latency latencies = {
436 .max_scheduling_latencies = {
437 [THREAD_URGENCY_NONE] = 0,
438 [THREAD_URGENCY_BACKGROUND] = bg_max_latency,
439 [THREAD_URGENCY_NORMAL] = default_max_latency,
440 [THREAD_URGENCY_REAL_TIME] = realtime_max_latency
441 }
442 };
443
444 sched_perfcontrol_max_runnable_latency(&latencies);
445 }
446
447 void
448 machine_work_interval_notify(thread_t thread,
449 struct kern_work_interval_args* kwi_args)
450 {
451 if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) {
452 return;
453 }
454 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
455 struct perfcontrol_work_interval work_interval = {
456 .thread_id = thread->thread_id,
457 .qos_class = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS),
458 .urgency = kwi_args->urgency,
459 .flags = kwi_args->notify_flags,
460 .work_interval_id = kwi_args->work_interval_id,
461 .start = kwi_args->start,
462 .finish = kwi_args->finish,
463 .deadline = kwi_args->deadline,
464 .next_start = kwi_args->next_start,
465 .create_flags = kwi_args->create_flags,
466 };
467 sched_perfcontrol_work_interval_notify(state, &work_interval);
468 }
469
470
471 void
472 machine_perfcontrol_deadline_passed(uint64_t deadline)
473 {
474 if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) {
475 sched_perfcontrol_deadline_passed(deadline);
476 }
477 }
478
479 #if INTERRUPT_MASKED_DEBUG
480 /*
481 * ml_spin_debug_reset()
482 * Reset the timestamp on a thread that has been unscheduled
483 * to avoid false alarms. Alarm will go off if interrupts are held
484 * disabled for too long, starting from now.
485 */
486 void
487 ml_spin_debug_reset(thread_t thread)
488 {
489 thread->machine.intmask_timestamp = mach_absolute_time();
490 }
491
492 /*
493 * ml_spin_debug_clear()
494 * Clear the timestamp on a thread that has been unscheduled
495 * to avoid false alarms
496 */
497 void
498 ml_spin_debug_clear(thread_t thread)
499 {
500 thread->machine.intmask_timestamp = 0;
501 }
502
503 /*
504 * ml_spin_debug_clear_self()
505 * Clear the timestamp on the current thread to prevent
506 * false alarms
507 */
508 void
509 ml_spin_debug_clear_self()
510 {
511 ml_spin_debug_clear(current_thread());
512 }
513
514 void
515 ml_check_interrupts_disabled_duration(thread_t thread)
516 {
517 uint64_t start;
518 uint64_t now;
519
520 start = thread->machine.intmask_timestamp;
521 if (start != 0) {
522 now = mach_absolute_time();
523
524 if ((now - start) > interrupt_masked_timeout * debug_cpu_performance_degradation_factor) {
525 mach_timebase_info_data_t timebase;
526 clock_timebase_info(&timebase);
527
528 #ifndef KASAN
529 /*
530 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
531 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
532 */
533 panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer) / timebase.denom));
534 #endif
535 }
536 }
537
538 return;
539 }
540 #endif // INTERRUPT_MASKED_DEBUG
541
542
543 boolean_t
544 ml_set_interrupts_enabled(boolean_t enable)
545 {
546 thread_t thread;
547 uint64_t state;
548
549 #if __arm__
550 #define INTERRUPT_MASK PSR_IRQF
551 state = __builtin_arm_rsr("cpsr");
552 #else
553 #define INTERRUPT_MASK DAIF_IRQF
554 state = __builtin_arm_rsr("DAIF");
555 #endif
556 if (enable && (state & INTERRUPT_MASK)) {
557 #if INTERRUPT_MASKED_DEBUG
558 if (interrupt_masked_debug) {
559 // Interrupts are currently masked, we will enable them (after finishing this check)
560 thread = current_thread();
561 ml_check_interrupts_disabled_duration(thread);
562 thread->machine.intmask_timestamp = 0;
563 }
564 #endif // INTERRUPT_MASKED_DEBUG
565 if (get_preemption_level() == 0) {
566 thread = current_thread();
567 while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
568 #if __ARM_USER_PROTECT__
569 uintptr_t up = arm_user_protect_begin(thread);
570 #endif
571 ast_taken_kernel();
572 #if __ARM_USER_PROTECT__
573 arm_user_protect_end(thread, up, FALSE);
574 #endif
575 }
576 }
577 #if __arm__
578 __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
579 #else
580 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
581 #endif
582 } else if (!enable && ((state & INTERRUPT_MASK) == 0)) {
583 #if __arm__
584 __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
585 #else
586 __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
587 #endif
588 #if INTERRUPT_MASKED_DEBUG
589 if (interrupt_masked_debug) {
590 // Interrupts were enabled, we just masked them
591 current_thread()->machine.intmask_timestamp = mach_absolute_time();
592 }
593 #endif
594 }
595 return (state & INTERRUPT_MASK) == 0;
596 }
597
598 boolean_t
599 ml_early_set_interrupts_enabled(boolean_t enable)
600 {
601 return ml_set_interrupts_enabled(enable);
602 }
603
604 /*
605 * Routine: ml_at_interrupt_context
606 * Function: Check if running at interrupt context
607 */
608 boolean_t
609 ml_at_interrupt_context(void)
610 {
611 /* Do not use a stack-based check here, as the top-level exception handler
612 * is free to use some other stack besides the per-CPU interrupt stack.
613 * Interrupts should always be disabled if we're at interrupt context.
614 * Check that first, as we may be in a preemptible non-interrupt context, in
615 * which case we could be migrated to a different CPU between obtaining
616 * the per-cpu data pointer and loading cpu_int_state. We then might end
617 * up checking the interrupt state of a different CPU, resulting in a false
618 * positive. But if interrupts are disabled, we also know we cannot be
619 * preempted. */
620 return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL);
621 }
622
623 vm_offset_t
624 ml_stack_remaining(void)
625 {
626 uintptr_t local = (uintptr_t) &local;
627 vm_offset_t intstack_top_ptr;
628
629 /* Since this is a stack-based check, we don't need to worry about
630 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
631 * then the sp should never be within any CPU's interrupt stack unless
632 * something has gone horribly wrong. */
633 intstack_top_ptr = getCpuDatap()->intstack_top;
634 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
635 return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE);
636 } else {
637 return local - current_thread()->kernel_stack;
638 }
639 }
640
641 static boolean_t ml_quiescing;
642
643 void
644 ml_set_is_quiescing(boolean_t quiescing)
645 {
646 assert(FALSE == ml_get_interrupts_enabled());
647 ml_quiescing = quiescing;
648 }
649
650 boolean_t
651 ml_is_quiescing(void)
652 {
653 assert(FALSE == ml_get_interrupts_enabled());
654 return ml_quiescing;
655 }
656
657 uint64_t
658 ml_get_booter_memory_size(void)
659 {
660 uint64_t size;
661 uint64_t roundsize = 512 * 1024 * 1024ULL;
662 size = BootArgs->memSizeActual;
663 if (!size) {
664 size = BootArgs->memSize;
665 if (size < (2 * roundsize)) {
666 roundsize >>= 1;
667 }
668 size = (size + roundsize - 1) & ~(roundsize - 1);
669 size -= BootArgs->memSize;
670 }
671 return size;
672 }
673
674 uint64_t
675 ml_get_abstime_offset(void)
676 {
677 return rtclock_base_abstime;
678 }
679
680 uint64_t
681 ml_get_conttime_offset(void)
682 {
683 return rtclock_base_abstime + mach_absolutetime_asleep;
684 }
685
686 uint64_t
687 ml_get_time_since_reset(void)
688 {
689 /* The timebase resets across S2R, so just return the raw value. */
690 return ml_get_hwclock();
691 }
692
693 uint64_t
694 ml_get_conttime_wake_time(void)
695 {
696 /* The wake time is simply our continuous time offset. */
697 return ml_get_conttime_offset();
698 }
699
700 /*
701 * ml_snoop_thread_is_on_core(thread_t thread)
702 * Check if the given thread is currently on core. This function does not take
703 * locks, disable preemption, or otherwise guarantee synchronization. The
704 * result should be considered advisory.
705 */
706 bool
707 ml_snoop_thread_is_on_core(thread_t thread)
708 {
709 unsigned int cur_cpu_num = 0;
710
711 for (cur_cpu_num = 0; cur_cpu_num < MAX_CPUS; cur_cpu_num++) {
712 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) {
713 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) {
714 return true;
715 }
716 }
717 }
718
719 return false;
720 }