]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/machine_routines_common.c
02f73391024b7d2a63fa180ba0e69866b866491d
[apple/xnu.git] / osfmk / arm / machine_routines_common.c
1 /*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpu_data.h>
33 #include <arm/cpu_data_internal.h>
34 #include <arm/misc_protos.h>
35 #include <arm/machdep_call.h>
36 #include <arm/machine_routines.h>
37 #include <arm/rtclock.h>
38 #include <kern/machine.h>
39 #include <kern/thread.h>
40 #include <kern/thread_group.h>
41 #include <kern/policy_internal.h>
42 #include <machine/config.h>
43 #include <machine/atomic.h>
44 #include <pexpert/pexpert.h>
45
46 #if MONOTONIC
47 #include <kern/monotonic.h>
48 #include <machine/monotonic.h>
49 #endif /* MONOTONIC */
50
51 #include <mach/machine.h>
52
53 #if INTERRUPT_MASKED_DEBUG
54 extern boolean_t interrupt_masked_debug;
55 extern uint64_t interrupt_masked_timeout;
56 #endif
57
58 extern uint64_t mach_absolutetime_asleep;
59
60 static void
61 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused)
62 {
63 }
64
65 static void
66 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused)
67 {
68 }
69
70 static void
71 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused)
72 {
73 }
74
75 static void
76 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)
77 {
78 }
79
80 static void
81 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
82 {
83 }
84
85 static void
86 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,
87 perfcontrol_work_interval_t work_interval __unused)
88 {
89 }
90
91 static void
92 sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,
93 perfcontrol_work_interval_instance_t instance __unused)
94 {
95 }
96
97 static void
98 sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)
99 {
100 }
101
102 static void
103 sched_perfcontrol_csw_default(
104 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
105 __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
106 __unused struct perfcontrol_thread_data *oncore,
107 __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
108 {
109 }
110
111 static void
112 sched_perfcontrol_state_update_default(
113 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
114 __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data,
115 __unused void *unused)
116 {
117 }
118
119 sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
120 sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default;
121 sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
122 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
123 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
124 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
125 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
126 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
127 sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
128 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
129 sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default;
130 sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
131
132 void
133 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state)
134 {
135 assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2);
136
137 if (size_of_state > sizeof(struct perfcontrol_state)) {
138 panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state);
139 }
140
141 if (callbacks) {
142
143 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) {
144 if (callbacks->work_interval_ctl != NULL) {
145 sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl;
146 } else {
147 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
148 }
149 }
150
151 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) {
152 if (callbacks->csw != NULL) {
153 sched_perfcontrol_csw = callbacks->csw;
154 } else {
155 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
156 }
157
158 if (callbacks->state_update != NULL) {
159 sched_perfcontrol_state_update = callbacks->state_update;
160 } else {
161 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
162 }
163 }
164
165 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) {
166 if (callbacks->deadline_passed != NULL) {
167 sched_perfcontrol_deadline_passed = callbacks->deadline_passed;
168 } else {
169 sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
170 }
171 }
172
173 if (callbacks->offcore != NULL) {
174 sched_perfcontrol_offcore = callbacks->offcore;
175 } else {
176 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
177 }
178
179 if (callbacks->context_switch != NULL) {
180 sched_perfcontrol_switch = callbacks->context_switch;
181 } else {
182 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
183 }
184
185 if (callbacks->oncore != NULL) {
186 sched_perfcontrol_oncore = callbacks->oncore;
187 } else {
188 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
189 }
190
191 if (callbacks->max_runnable_latency != NULL) {
192 sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency;
193 } else {
194 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
195 }
196
197 if (callbacks->work_interval_notify != NULL) {
198 sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
199 } else {
200 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
201 }
202 } else {
203 /* reset to defaults */
204 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
205 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
206 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
207 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
208 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
209 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
210 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
211 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
212 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
213 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
214 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
215 }
216 }
217
218
219 static void
220 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
221 thread_t thread,
222 uint64_t same_pri_latency)
223 {
224 bzero(data, sizeof(struct perfcontrol_thread_data));
225 data->perfctl_class = thread_get_perfcontrol_class(thread);
226 data->energy_estimate_nj = 0;
227 data->thread_id = thread->thread_id;
228 data->scheduling_latency_at_same_basepri = same_pri_latency;
229 data->perfctl_state = FIND_PERFCONTROL_STATE(thread);
230 }
231
232 static void
233 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters)
234 {
235 #if MONOTONIC
236 mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles);
237 #else /* MONOTONIC */
238 cpu_counters->instructions = 0;
239 cpu_counters->cycles = 0;
240 #endif /* !MONOTONIC */
241 }
242
243 int perfcontrol_callout_stats_enabled = 0;
244 static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX];
245 static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX];
246
247 #if MONOTONIC
248 static inline
249 bool
250 perfcontrol_callout_counters_begin(uint64_t *counters)
251 {
252 if (!perfcontrol_callout_stats_enabled) {
253 return false;
254 }
255 mt_fixed_counts(counters);
256 return true;
257 }
258
259 static inline
260 void
261 perfcontrol_callout_counters_end(uint64_t *start_counters,
262 perfcontrol_callout_type_t type)
263 {
264 uint64_t end_counters[MT_CORE_NFIXED];
265 mt_fixed_counts(end_counters);
266 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
267 end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], relaxed);
268 #ifdef MT_CORE_INSTRS
269 os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
270 end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], relaxed);
271 #endif /* defined(MT_CORE_INSTRS) */
272 os_atomic_inc(&perfcontrol_callout_count[type], relaxed);
273 }
274 #endif /* MONOTONIC */
275
276 uint64_t
277 perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
278 perfcontrol_callout_stat_t stat)
279 {
280 if (!perfcontrol_callout_stats_enabled) {
281 return 0;
282 }
283 return os_atomic_load_wide(&perfcontrol_callout_stats[type][stat], relaxed) /
284 os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed);
285 }
286
287 void
288 machine_switch_perfcontrol_context(perfcontrol_event event,
289 uint64_t timestamp,
290 uint32_t flags,
291 uint64_t new_thread_same_pri_latency,
292 thread_t old,
293 thread_t new)
294 {
295 if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
296 perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
297 perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new);
298 sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state);
299 }
300
301 if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) {
302 uint32_t cpu_id = (uint32_t)cpu_number();
303 struct perfcontrol_cpu_counters cpu_counters;
304 struct perfcontrol_thread_data offcore, oncore;
305 machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
306 machine_switch_populate_perfcontrol_thread_data(&oncore, new,
307 new_thread_same_pri_latency);
308 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
309
310 #if MONOTONIC
311 uint64_t counters[MT_CORE_NFIXED];
312 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
313 #endif /* MONOTONIC */
314 sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
315 &offcore, &oncore, &cpu_counters, NULL);
316 #if MONOTONIC
317 if (ctrs_enabled) {
318 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
319 }
320 #endif /* MONOTONIC */
321
322 #if __arm64__
323 old->machine.energy_estimate_nj += offcore.energy_estimate_nj;
324 new->machine.energy_estimate_nj += oncore.energy_estimate_nj;
325 #endif
326 }
327 }
328
329 void
330 machine_switch_perfcontrol_state_update(perfcontrol_event event,
331 uint64_t timestamp,
332 uint32_t flags,
333 thread_t thread)
334 {
335 if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) {
336 return;
337 }
338 uint32_t cpu_id = (uint32_t)cpu_number();
339 struct perfcontrol_thread_data data;
340 machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
341
342 #if MONOTONIC
343 uint64_t counters[MT_CORE_NFIXED];
344 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
345 #endif /* MONOTONIC */
346 sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
347 &data, NULL);
348 #if MONOTONIC
349 if (ctrs_enabled) {
350 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
351 }
352 #endif /* MONOTONIC */
353
354 #if __arm64__
355 thread->machine.energy_estimate_nj += data.energy_estimate_nj;
356 #endif
357 }
358
359 void
360 machine_thread_going_on_core(thread_t new_thread,
361 thread_urgency_t urgency,
362 uint64_t sched_latency,
363 uint64_t same_pri_latency,
364 uint64_t timestamp)
365 {
366 if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) {
367 return;
368 }
369 struct going_on_core on_core;
370 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
371
372 on_core.thread_id = new_thread->thread_id;
373 on_core.energy_estimate_nj = 0;
374 on_core.qos_class = proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
375 on_core.urgency = urgency;
376 on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE;
377 on_core.is_kernel_thread = new_thread->task == kernel_task;
378 on_core.scheduling_latency = sched_latency;
379 on_core.start_time = timestamp;
380 on_core.scheduling_latency_at_same_basepri = same_pri_latency;
381
382 #if MONOTONIC
383 uint64_t counters[MT_CORE_NFIXED];
384 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
385 #endif /* MONOTONIC */
386 sched_perfcontrol_oncore(state, &on_core);
387 #if MONOTONIC
388 if (ctrs_enabled) {
389 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
390 }
391 #endif /* MONOTONIC */
392
393 #if __arm64__
394 new_thread->machine.energy_estimate_nj += on_core.energy_estimate_nj;
395 #endif
396 }
397
398 void
399 machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating,
400 uint64_t last_dispatch, __unused boolean_t thread_runnable)
401 {
402 if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) {
403 return;
404 }
405 struct going_off_core off_core;
406 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
407
408 off_core.thread_id = old_thread->thread_id;
409 off_core.energy_estimate_nj = 0;
410 off_core.end_time = last_dispatch;
411
412 #if MONOTONIC
413 uint64_t counters[MT_CORE_NFIXED];
414 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
415 #endif /* MONOTONIC */
416 sched_perfcontrol_offcore(state, &off_core, thread_terminating);
417 #if MONOTONIC
418 if (ctrs_enabled) {
419 perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
420 }
421 #endif /* MONOTONIC */
422
423 #if __arm64__
424 old_thread->machine.energy_estimate_nj += off_core.energy_estimate_nj;
425 #endif
426 }
427
428
429 void
430 machine_max_runnable_latency(uint64_t bg_max_latency,
431 uint64_t default_max_latency,
432 uint64_t realtime_max_latency)
433 {
434 if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) {
435 return;
436 }
437 struct perfcontrol_max_runnable_latency latencies = {
438 .max_scheduling_latencies = {
439 [THREAD_URGENCY_NONE] = 0,
440 [THREAD_URGENCY_BACKGROUND] = bg_max_latency,
441 [THREAD_URGENCY_NORMAL] = default_max_latency,
442 [THREAD_URGENCY_REAL_TIME] = realtime_max_latency
443 }
444 };
445
446 sched_perfcontrol_max_runnable_latency(&latencies);
447 }
448
449 void
450 machine_work_interval_notify(thread_t thread,
451 struct kern_work_interval_args* kwi_args)
452 {
453 if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) {
454 return;
455 }
456 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
457 struct perfcontrol_work_interval work_interval = {
458 .thread_id = thread->thread_id,
459 .qos_class = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS),
460 .urgency = kwi_args->urgency,
461 .flags = kwi_args->notify_flags,
462 .work_interval_id = kwi_args->work_interval_id,
463 .start = kwi_args->start,
464 .finish = kwi_args->finish,
465 .deadline = kwi_args->deadline,
466 .next_start = kwi_args->next_start,
467 .create_flags = kwi_args->create_flags,
468 };
469 sched_perfcontrol_work_interval_notify(state, &work_interval);
470 }
471
472
473 void
474 machine_perfcontrol_deadline_passed(uint64_t deadline)
475 {
476 if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) {
477 sched_perfcontrol_deadline_passed(deadline);
478 }
479 }
480
481 #if INTERRUPT_MASKED_DEBUG
482 /*
483 * ml_spin_debug_reset()
484 * Reset the timestamp on a thread that has been unscheduled
485 * to avoid false alarms. Alarm will go off if interrupts are held
486 * disabled for too long, starting from now.
487 *
488 * Call ml_get_timebase() directly to prevent extra overhead on newer
489 * platforms that's enabled in DEVELOPMENT kernel configurations.
490 */
491 void
492 ml_spin_debug_reset(thread_t thread)
493 {
494 thread->machine.intmask_timestamp = ml_get_timebase();
495 }
496
497 /*
498 * ml_spin_debug_clear()
499 * Clear the timestamp on a thread that has been unscheduled
500 * to avoid false alarms
501 */
502 void
503 ml_spin_debug_clear(thread_t thread)
504 {
505 thread->machine.intmask_timestamp = 0;
506 }
507
508 /*
509 * ml_spin_debug_clear_self()
510 * Clear the timestamp on the current thread to prevent
511 * false alarms
512 */
513 void
514 ml_spin_debug_clear_self()
515 {
516 ml_spin_debug_clear(current_thread());
517 }
518
519 void
520 ml_check_interrupts_disabled_duration(thread_t thread)
521 {
522 uint64_t start;
523 uint64_t now;
524
525 start = thread->machine.intmask_timestamp;
526 if (start != 0) {
527 now = ml_get_timebase();
528
529 if ((now - start) > interrupt_masked_timeout * debug_cpu_performance_degradation_factor) {
530 mach_timebase_info_data_t timebase;
531 clock_timebase_info(&timebase);
532
533 #ifndef KASAN
534 /*
535 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
536 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
537 */
538 panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer) / timebase.denom));
539 #endif
540 }
541 }
542
543 return;
544 }
545 #endif // INTERRUPT_MASKED_DEBUG
546
547
548 boolean_t
549 ml_set_interrupts_enabled(boolean_t enable)
550 {
551 thread_t thread;
552 uint64_t state;
553
554 #if __arm__
555 #define INTERRUPT_MASK PSR_IRQF
556 state = __builtin_arm_rsr("cpsr");
557 #else
558 #define INTERRUPT_MASK DAIF_IRQF
559 state = __builtin_arm_rsr("DAIF");
560 #endif
561 if (enable && (state & INTERRUPT_MASK)) {
562 assert(getCpuDatap()->cpu_int_state == NULL); // Make sure we're not enabling interrupts from primary interrupt context
563 #if INTERRUPT_MASKED_DEBUG
564 if (interrupt_masked_debug) {
565 // Interrupts are currently masked, we will enable them (after finishing this check)
566 thread = current_thread();
567 ml_check_interrupts_disabled_duration(thread);
568 thread->machine.intmask_timestamp = 0;
569 }
570 #endif // INTERRUPT_MASKED_DEBUG
571 if (get_preemption_level() == 0) {
572 thread = current_thread();
573 while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
574 #if __ARM_USER_PROTECT__
575 uintptr_t up = arm_user_protect_begin(thread);
576 #endif
577 ast_taken_kernel();
578 #if __ARM_USER_PROTECT__
579 arm_user_protect_end(thread, up, FALSE);
580 #endif
581 }
582 }
583 #if __arm__
584 __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
585 #else
586 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
587 #endif
588 } else if (!enable && ((state & INTERRUPT_MASK) == 0)) {
589 #if __arm__
590 __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
591 #else
592 __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
593 #endif
594 #if INTERRUPT_MASKED_DEBUG
595 if (interrupt_masked_debug) {
596 // Interrupts were enabled, we just masked them
597 current_thread()->machine.intmask_timestamp = ml_get_timebase();
598 }
599 #endif
600 }
601 return (state & INTERRUPT_MASK) == 0;
602 }
603
604 boolean_t
605 ml_early_set_interrupts_enabled(boolean_t enable)
606 {
607 return ml_set_interrupts_enabled(enable);
608 }
609
610 /*
611 * Routine: ml_at_interrupt_context
612 * Function: Check if running at interrupt context
613 */
614 boolean_t
615 ml_at_interrupt_context(void)
616 {
617 /* Do not use a stack-based check here, as the top-level exception handler
618 * is free to use some other stack besides the per-CPU interrupt stack.
619 * Interrupts should always be disabled if we're at interrupt context.
620 * Check that first, as we may be in a preemptible non-interrupt context, in
621 * which case we could be migrated to a different CPU between obtaining
622 * the per-cpu data pointer and loading cpu_int_state. We then might end
623 * up checking the interrupt state of a different CPU, resulting in a false
624 * positive. But if interrupts are disabled, we also know we cannot be
625 * preempted. */
626 return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL);
627 }
628
629 vm_offset_t
630 ml_stack_remaining(void)
631 {
632 uintptr_t local = (uintptr_t) &local;
633 vm_offset_t intstack_top_ptr;
634
635 /* Since this is a stack-based check, we don't need to worry about
636 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
637 * then the sp should never be within any CPU's interrupt stack unless
638 * something has gone horribly wrong. */
639 intstack_top_ptr = getCpuDatap()->intstack_top;
640 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
641 return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE);
642 } else {
643 return local - current_thread()->kernel_stack;
644 }
645 }
646
647 static boolean_t ml_quiescing;
648
649 void
650 ml_set_is_quiescing(boolean_t quiescing)
651 {
652 assert(FALSE == ml_get_interrupts_enabled());
653 ml_quiescing = quiescing;
654 }
655
656 boolean_t
657 ml_is_quiescing(void)
658 {
659 assert(FALSE == ml_get_interrupts_enabled());
660 return ml_quiescing;
661 }
662
663 uint64_t
664 ml_get_booter_memory_size(void)
665 {
666 uint64_t size;
667 uint64_t roundsize = 512 * 1024 * 1024ULL;
668 size = BootArgs->memSizeActual;
669 if (!size) {
670 size = BootArgs->memSize;
671 if (size < (2 * roundsize)) {
672 roundsize >>= 1;
673 }
674 size = (size + roundsize - 1) & ~(roundsize - 1);
675 size -= BootArgs->memSize;
676 }
677 return size;
678 }
679
680 uint64_t
681 ml_get_abstime_offset(void)
682 {
683 return rtclock_base_abstime;
684 }
685
686 uint64_t
687 ml_get_conttime_offset(void)
688 {
689 return rtclock_base_abstime + mach_absolutetime_asleep;
690 }
691
692 uint64_t
693 ml_get_time_since_reset(void)
694 {
695 /* The timebase resets across S2R, so just return the raw value. */
696 return ml_get_hwclock();
697 }
698
699 void
700 ml_set_reset_time(__unused uint64_t wake_time)
701 {
702 }
703
704 uint64_t
705 ml_get_conttime_wake_time(void)
706 {
707 /* The wake time is simply our continuous time offset. */
708 return ml_get_conttime_offset();
709 }
710
711 /*
712 * ml_snoop_thread_is_on_core(thread_t thread)
713 * Check if the given thread is currently on core. This function does not take
714 * locks, disable preemption, or otherwise guarantee synchronization. The
715 * result should be considered advisory.
716 */
717 bool
718 ml_snoop_thread_is_on_core(thread_t thread)
719 {
720 unsigned int cur_cpu_num = 0;
721
722 for (cur_cpu_num = 0; cur_cpu_num < MAX_CPUS; cur_cpu_num++) {
723 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) {
724 if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) {
725 return true;
726 }
727 }
728 }
729
730 return false;
731 }