assert(host_priv == &realhost);
+#if DEVELOPMENT || DEBUG
if (options & HOST_REBOOT_DEBUGGER) {
Debugger("Debugger");
return (KERN_SUCCESS);
}
+#endif
if (options & HOST_REBOOT_UPSDELAY) {
// UPS power cutoff path
/* pset lock dropped */
/*
- * Continue processor shutdown in shutdown context.
+ * Continue processor shutdown in shutdown context.
+ *
+ * We save the current context in machine_processor_shutdown in such a way
+ * that when this thread is next invoked it will return from here instead of
+ * from the machine_switch_context() in thread_invoke like a normal context switch.
+ *
+ * As such, 'old_thread' is neither the idle thread nor the current thread - it's whatever
+ * thread invoked back to this one. (Usually, it's another processor's idle thread.)
+ *
+ * TODO: Make this a real thread_run of the idle_thread, so we don't have to keep this in sync
+ * with thread_invoke.
*/
thread_bind(prev);
old_thread = machine_processor_shutdown(self, processor_offline, processor);
}
/*
- *Complete the shutdown and place the processor offline.
+ * Complete the shutdown and place the processor offline.
+ *
+ * Called at splsched in the shutdown context.
+ * This performs a minimal thread_invoke() to the idle thread,
+ * so it needs to be kept in sync with what thread_invoke() does.
*
- * Called at splsched in the shutdown context.
+ * The onlining half of this is done in load_context().
*/
void
processor_offline(
processor_t processor)
{
- thread_t new_thread, old_thread = processor->active_thread;
+ assert(processor == current_processor());
+ assert(processor->active_thread == current_thread());
+
+ thread_t old_thread = processor->active_thread;
+ thread_t new_thread = processor->idle_thread;
- new_thread = processor->idle_thread;
processor->active_thread = new_thread;
processor->current_pri = IDLEPRI;
processor->current_thmode = TH_MODE_NONE;
+ processor->starting_pri = IDLEPRI;
+ processor->current_sfi_class = SFI_CLASS_KERNEL;
processor->deadline = UINT64_MAX;
new_thread->last_processor = processor;
- processor->last_dispatch = mach_absolute_time();
- timer_stop(PROCESSOR_DATA(processor, thread_timer), processor->last_dispatch);
+ uint64_t ctime = mach_absolute_time();
+
+ processor->last_dispatch = ctime;
+ old_thread->last_run_time = ctime;
+
+ /* Update processor->thread_timer and ->kernel_timer to point to the new thread */
+ thread_timer_event(ctime, &new_thread->system_timer);
+ PROCESSOR_DATA(processor, kernel_timer) = &new_thread->system_timer;
+
+ timer_stop(PROCESSOR_DATA(processor, current_state), ctime);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
+ old_thread->reason, (uintptr_t)thread_tid(new_thread),
+ old_thread->sched_pri, new_thread->sched_pri, 0);
machine_set_current_thread(new_thread);
return (KERN_SUCCESS);
}
+
+#if CONFIG_DTRACE
+#include <mach/sdt.h>
+#endif
+
+unsigned long long ml_io_read(uintptr_t vaddr, int size) {
+ unsigned long long result = 0;
+ unsigned char s1;
+ unsigned short s2;
+
+#if defined(__x86_64__)
+ uint64_t sabs, eabs;
+ boolean_t istate, timeread = FALSE;
+#if DEVELOPMENT || DEBUG
+ pmap_verify_noncacheable(vaddr);
+#endif /* x86_64 DEVELOPMENT || DEBUG */
+ if (__improbable(reportphyreaddelayabs != 0)) {
+ istate = ml_set_interrupts_enabled(FALSE);
+ sabs = mach_absolute_time();
+ timeread = TRUE;
+ }
+#endif /* x86_64 */
+
+ switch (size) {
+ case 1:
+ s1 = *(volatile unsigned char *)vaddr;
+ result = s1;
+ break;
+ case 2:
+ s2 = *(volatile unsigned short *)vaddr;
+ result = s2;
+ break;
+ case 4:
+ result = *(volatile unsigned int *)vaddr;
+ break;
+ case 8:
+ result = *(volatile unsigned long long *)vaddr;
+ break;
+ default:
+ panic("Invalid size %d for ml_io_read(%p)\n", size, (void *)vaddr);
+ break;
+ }
+
+#if defined(__x86_64__)
+ if (__improbable(timeread == TRUE)) {
+ eabs = mach_absolute_time();
+ (void)ml_set_interrupts_enabled(istate);
+
+ if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
+ if (phyreadpanic) {
+ panic("Read from IO virtual addr 0x%lx took %llu ns, result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", vaddr, (eabs - sabs), result, sabs, eabs, reportphyreaddelayabs);
+ }
+#if CONFIG_DTRACE
+ DTRACE_PHYSLAT3(physread, uint64_t, (eabs - sabs),
+ uint64_t, vaddr, uint32_t, size);
+#endif /* CONFIG_DTRACE */
+ }
+ }
+#endif /* x86_64 */
+ return result;
+}
+
+unsigned int ml_io_read8(uintptr_t vaddr) {
+ return (unsigned) ml_io_read(vaddr, 1);
+}
+
+unsigned int ml_io_read16(uintptr_t vaddr) {
+ return (unsigned) ml_io_read(vaddr, 2);
+}
+
+unsigned int ml_io_read32(uintptr_t vaddr) {
+ return (unsigned) ml_io_read(vaddr, 4);
+}
+
+unsigned long long ml_io_read64(uintptr_t vaddr) {
+ return ml_io_read(vaddr, 8);
+}