#include <kern/thread_group.h>
#include <kern/policy_internal.h>
#include <machine/config.h>
+#include <machine/atomic.h>
+#include <pexpert/pexpert.h>
#if MONOTONIC
#include <kern/monotonic.h>
{
}
-static void
+static void
sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
{
}
static void
sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,
- perfcontrol_work_interval_t work_interval __unused)
+ perfcontrol_work_interval_t work_interval __unused)
{
}
static void
sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,
- perfcontrol_work_interval_instance_t instance __unused)
+ perfcontrol_work_interval_instance_t instance __unused)
{
}
static void
sched_perfcontrol_csw_default(
- __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
- __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
- __unused struct perfcontrol_thread_data *oncore,
+ __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
+ __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
+ __unused struct perfcontrol_thread_data *oncore,
__unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
{
}
if (callbacks) {
-
if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) {
if (callbacks->work_interval_ctl != NULL) {
sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl;
} else {
sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
}
-
+
if (callbacks->work_interval_notify != NULL) {
sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
} else {
static void
-machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
- thread_t thread,
- uint64_t same_pri_latency)
+machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
+ thread_t thread,
+ uint64_t same_pri_latency)
{
bzero(data, sizeof(struct perfcontrol_thread_data));
data->perfctl_class = thread_get_perfcontrol_class(thread);
#if MONOTONIC
static inline
-bool perfcontrol_callout_counters_begin(uint64_t *counters)
+bool
+perfcontrol_callout_counters_begin(uint64_t *counters)
{
- if (!perfcontrol_callout_stats_enabled)
- return false;
- mt_fixed_counts(counters);
- return true;
+ if (!perfcontrol_callout_stats_enabled) {
+ return false;
+ }
+ mt_fixed_counts(counters);
+ return true;
}
static inline
-void perfcontrol_callout_counters_end(uint64_t *start_counters,
- perfcontrol_callout_type_t type)
+void
+perfcontrol_callout_counters_end(uint64_t *start_counters,
+ perfcontrol_callout_type_t type)
{
- uint64_t end_counters[MT_CORE_NFIXED];
- mt_fixed_counts(end_counters);
- atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
- end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], memory_order_relaxed);
+ uint64_t end_counters[MT_CORE_NFIXED];
+ mt_fixed_counts(end_counters);
+ os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
+ end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], relaxed);
#ifdef MT_CORE_INSTRS
- atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
- end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], memory_order_relaxed);
+ os_atomic_add(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
+ end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], relaxed);
#endif /* defined(MT_CORE_INSTRS) */
- atomic_fetch_add_explicit(&perfcontrol_callout_count[type], 1, memory_order_relaxed);
+ os_atomic_inc(&perfcontrol_callout_count[type], relaxed);
}
#endif /* MONOTONIC */
-uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
- perfcontrol_callout_stat_t stat)
+uint64_t
+perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
+ perfcontrol_callout_stat_t stat)
{
- if (!perfcontrol_callout_stats_enabled)
- return 0;
- return (perfcontrol_callout_stats[type][stat] / perfcontrol_callout_count[type]);
+ if (!perfcontrol_callout_stats_enabled) {
+ return 0;
+ }
+ return os_atomic_load_wide(&perfcontrol_callout_stats[type][stat], relaxed) /
+ os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed);
}
void
machine_switch_perfcontrol_context(perfcontrol_event event,
- uint64_t timestamp,
- uint32_t flags,
- uint64_t new_thread_same_pri_latency,
- thread_t old,
- thread_t new)
+ uint64_t timestamp,
+ uint32_t flags,
+ uint64_t new_thread_same_pri_latency,
+ thread_t old,
+ thread_t new)
{
if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
struct perfcontrol_thread_data offcore, oncore;
machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
machine_switch_populate_perfcontrol_thread_data(&oncore, new,
- new_thread_same_pri_latency);
+ new_thread_same_pri_latency);
machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
#if MONOTONIC
bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
#endif /* MONOTONIC */
sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
- &offcore, &oncore, &cpu_counters, NULL);
+ &offcore, &oncore, &cpu_counters, NULL);
#if MONOTONIC
- if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
+ if (ctrs_enabled) {
+ perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
+ }
#endif /* MONOTONIC */
#if __arm64__
void
machine_switch_perfcontrol_state_update(perfcontrol_event event,
- uint64_t timestamp,
- uint32_t flags,
- thread_t thread)
+ uint64_t timestamp,
+ uint32_t flags,
+ thread_t thread)
{
- if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default)
+ if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) {
return;
+ }
uint32_t cpu_id = (uint32_t)cpu_number();
struct perfcontrol_thread_data data;
machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
uint64_t counters[MT_CORE_NFIXED];
bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
#endif /* MONOTONIC */
- sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
- &data, NULL);
+ sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
+ &data, NULL);
#if MONOTONIC
- if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
+ if (ctrs_enabled) {
+ perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
+ }
#endif /* MONOTONIC */
#if __arm64__
void
machine_thread_going_on_core(thread_t new_thread,
- int urgency,
- uint64_t sched_latency,
- uint64_t same_pri_latency,
- uint64_t timestamp)
+ thread_urgency_t urgency,
+ uint64_t sched_latency,
+ uint64_t same_pri_latency,
+ uint64_t timestamp)
{
-
- if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default)
+ if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) {
return;
+ }
struct going_on_core on_core;
perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
on_core.energy_estimate_nj = 0;
on_core.qos_class = proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
on_core.urgency = urgency;
- on_core.is_32_bit = thread_is_64bit(new_thread) ? FALSE : TRUE;
+ on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE;
on_core.is_kernel_thread = new_thread->task == kernel_task;
on_core.scheduling_latency = sched_latency;
on_core.start_time = timestamp;
#endif /* MONOTONIC */
sched_perfcontrol_oncore(state, &on_core);
#if MONOTONIC
- if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
+ if (ctrs_enabled) {
+ perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
+ }
#endif /* MONOTONIC */
#if __arm64__
}
void
-machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, uint64_t last_dispatch)
+machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating,
+ uint64_t last_dispatch, __unused boolean_t thread_runnable)
{
- if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default)
+ if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) {
return;
+ }
struct going_off_core off_core;
perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
#endif /* MONOTONIC */
sched_perfcontrol_offcore(state, &off_core, thread_terminating);
#if MONOTONIC
- if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
+ if (ctrs_enabled) {
+ perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
+ }
#endif /* MONOTONIC */
#if __arm64__
void
machine_max_runnable_latency(uint64_t bg_max_latency,
- uint64_t default_max_latency,
- uint64_t realtime_max_latency)
+ uint64_t default_max_latency,
+ uint64_t realtime_max_latency)
{
- if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default)
+ if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) {
return;
+ }
struct perfcontrol_max_runnable_latency latencies = {
.max_scheduling_latencies = {
[THREAD_URGENCY_NONE] = 0,
void
machine_work_interval_notify(thread_t thread,
- struct kern_work_interval_args* kwi_args)
+ struct kern_work_interval_args* kwi_args)
{
- if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default)
+ if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) {
return;
+ }
perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
struct perfcontrol_work_interval work_interval = {
.thread_id = thread->thread_id,
void
machine_perfcontrol_deadline_passed(uint64_t deadline)
{
- if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default)
+ if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) {
sched_perfcontrol_deadline_passed(deadline);
+ }
}
#if INTERRUPT_MASKED_DEBUG
/*
* ml_spin_debug_reset()
* Reset the timestamp on a thread that has been unscheduled
- * to avoid false alarms. Alarm will go off if interrupts are held
+ * to avoid false alarms. Alarm will go off if interrupts are held
* disabled for too long, starting from now.
+ *
+ * Call ml_get_timebase() directly to prevent extra overhead on newer
+ * platforms that's enabled in DEVELOPMENT kernel configurations.
*/
void
ml_spin_debug_reset(thread_t thread)
{
- thread->machine.intmask_timestamp = mach_absolute_time();
+ thread->machine.intmask_timestamp = ml_get_timebase();
}
/*
void
ml_spin_debug_clear(thread_t thread)
{
- thread->machine.intmask_timestamp = 0;
+ thread->machine.intmask_timestamp = 0;
}
/*
void
ml_check_interrupts_disabled_duration(thread_t thread)
{
- uint64_t start;
- uint64_t now;
+ uint64_t start;
+ uint64_t now;
- start = thread->machine.intmask_timestamp;
- if (start != 0) {
- now = mach_absolute_time();
+ start = thread->machine.intmask_timestamp;
+ if (start != 0) {
+ now = ml_get_timebase();
- if ((now - start) > interrupt_masked_timeout) {
- mach_timebase_info_data_t timebase;
- clock_timebase_info(&timebase);
+ if ((now - start) > interrupt_masked_timeout * debug_cpu_performance_degradation_factor) {
+ mach_timebase_info_data_t timebase;
+ clock_timebase_info(&timebase);
#ifndef KASAN
- /*
- * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
- * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
- */
- panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer)/timebase.denom));
+ /*
+ * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
+ * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
+ */
+ panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer) / timebase.denom));
#endif
- }
- }
+ }
+ }
- return;
+ return;
}
#endif // INTERRUPT_MASKED_DEBUG
boolean_t
ml_set_interrupts_enabled(boolean_t enable)
{
- thread_t thread;
- uint64_t state;
+ thread_t thread;
+ uint64_t state;
#if __arm__
#define INTERRUPT_MASK PSR_IRQF
- state = __builtin_arm_rsr("cpsr");
+ state = __builtin_arm_rsr("cpsr");
#else
#define INTERRUPT_MASK DAIF_IRQF
- state = __builtin_arm_rsr("DAIF");
+ state = __builtin_arm_rsr("DAIF");
#endif
- if (enable) {
+ if (enable && (state & INTERRUPT_MASK)) {
+ assert(getCpuDatap()->cpu_int_state == NULL); // Make sure we're not enabling interrupts from primary interrupt context
#if INTERRUPT_MASKED_DEBUG
- if (interrupt_masked_debug && (state & INTERRUPT_MASK)) {
- // Interrupts are currently masked, we will enable them (after finishing this check)
- thread = current_thread();
- ml_check_interrupts_disabled_duration(thread);
- thread->machine.intmask_timestamp = 0;
- }
-#endif // INTERRUPT_MASKED_DEBUG
- if (get_preemption_level() == 0) {
- thread = current_thread();
- while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
+ if (interrupt_masked_debug) {
+ // Interrupts are currently masked, we will enable them (after finishing this check)
+ thread = current_thread();
+ ml_check_interrupts_disabled_duration(thread);
+ thread->machine.intmask_timestamp = 0;
+ }
+#endif // INTERRUPT_MASKED_DEBUG
+ if (get_preemption_level() == 0) {
+ thread = current_thread();
+ while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
#if __ARM_USER_PROTECT__
- uintptr_t up = arm_user_protect_begin(thread);
+ uintptr_t up = arm_user_protect_begin(thread);
#endif
- ast_taken_kernel();
+ ast_taken_kernel();
#if __ARM_USER_PROTECT__
- arm_user_protect_end(thread, up, FALSE);
+ arm_user_protect_end(thread, up, FALSE);
#endif
- }
- }
+ }
+ }
#if __arm__
- __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
+ __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
#else
- __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
+ __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
#endif
- } else {
+ } else if (!enable && ((state & INTERRUPT_MASK) == 0)) {
#if __arm__
- __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
+ __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
#else
- __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
+ __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
#endif
#if INTERRUPT_MASKED_DEBUG
- if (interrupt_masked_debug && ((state & INTERRUPT_MASK) == 0)) {
- // Interrupts were enabled, we just masked them
- current_thread()->machine.intmask_timestamp = mach_absolute_time();
- }
+ if (interrupt_masked_debug) {
+ // Interrupts were enabled, we just masked them
+ current_thread()->machine.intmask_timestamp = ml_get_timebase();
+ }
#endif
- }
- return ((state & INTERRUPT_MASK) == 0);
+ }
+ return (state & INTERRUPT_MASK) == 0;
+}
+
+boolean_t
+ml_early_set_interrupts_enabled(boolean_t enable)
+{
+ return ml_set_interrupts_enabled(enable);
+}
+
+/*
+ * Routine: ml_at_interrupt_context
+ * Function: Check if running at interrupt context
+ */
+boolean_t
+ml_at_interrupt_context(void)
+{
+ /* Do not use a stack-based check here, as the top-level exception handler
+ * is free to use some other stack besides the per-CPU interrupt stack.
+ * Interrupts should always be disabled if we're at interrupt context.
+ * Check that first, as we may be in a preemptible non-interrupt context, in
+ * which case we could be migrated to a different CPU between obtaining
+ * the per-cpu data pointer and loading cpu_int_state. We then might end
+ * up checking the interrupt state of a different CPU, resulting in a false
+ * positive. But if interrupts are disabled, we also know we cannot be
+ * preempted. */
+ return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL);
+}
+
+vm_offset_t
+ml_stack_remaining(void)
+{
+ uintptr_t local = (uintptr_t) &local;
+ vm_offset_t intstack_top_ptr;
+
+ /* Since this is a stack-based check, we don't need to worry about
+ * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
+ * then the sp should never be within any CPU's interrupt stack unless
+ * something has gone horribly wrong. */
+ intstack_top_ptr = getCpuDatap()->intstack_top;
+ if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
+ return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE);
+ } else {
+ return local - current_thread()->kernel_stack;
+ }
}
static boolean_t ml_quiescing;
-void ml_set_is_quiescing(boolean_t quiescing)
+void
+ml_set_is_quiescing(boolean_t quiescing)
{
- assert(FALSE == ml_get_interrupts_enabled());
- ml_quiescing = quiescing;
+ assert(FALSE == ml_get_interrupts_enabled());
+ ml_quiescing = quiescing;
}
-boolean_t ml_is_quiescing(void)
+boolean_t
+ml_is_quiescing(void)
{
- assert(FALSE == ml_get_interrupts_enabled());
- return (ml_quiescing);
+ assert(FALSE == ml_get_interrupts_enabled());
+ return ml_quiescing;
}
-uint64_t ml_get_booter_memory_size(void)
+uint64_t
+ml_get_booter_memory_size(void)
{
- enum { kRoundSize = 512*1024*1024ULL };
uint64_t size;
+ uint64_t roundsize = 512 * 1024 * 1024ULL;
size = BootArgs->memSizeActual;
- if (!size)
- {
+ if (!size) {
size = BootArgs->memSize;
- size = (size + kRoundSize - 1) & ~(kRoundSize - 1);
+ if (size < (2 * roundsize)) {
+ roundsize >>= 1;
+ }
+ size = (size + roundsize - 1) & ~(roundsize - 1);
size -= BootArgs->memSize;
- }
- return (size);
+ }
+ return size;
}
uint64_t
uint64_t
ml_get_conttime_offset(void)
{
- return (rtclock_base_abstime + mach_absolutetime_asleep);
+ return rtclock_base_abstime + mach_absolutetime_asleep;
}
uint64_t
return ml_get_hwclock();
}
+void
+ml_set_reset_time(__unused uint64_t wake_time)
+{
+}
+
uint64_t
ml_get_conttime_wake_time(void)
{
return ml_get_conttime_offset();
}
+/*
+ * ml_snoop_thread_is_on_core(thread_t thread)
+ * Check if the given thread is currently on core. This function does not take
+ * locks, disable preemption, or otherwise guarantee synchronization. The
+ * result should be considered advisory.
+ */
+bool
+ml_snoop_thread_is_on_core(thread_t thread)
+{
+ unsigned int cur_cpu_num = 0;
+
+ for (cur_cpu_num = 0; cur_cpu_num < MAX_CPUS; cur_cpu_num++) {
+ if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) {
+ if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}