/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/misc_protos.h>
#include <kern/machine.h>
#include <mach/processor_info.h>
+#include <i386/pmap.h>
#include <i386/machine_cpu.h>
#include <i386/machine_routines.h>
-#include <i386/pmap.h>
#include <i386/misc_protos.h>
#include <i386/cpu_threads.h>
-#include <i386/rtclock.h>
+#include <i386/rtclock_protos.h>
+#include <i386/cpuid.h>
+#if CONFIG_VMX
+#include <i386/vmx/vmx_cpu.h>
+#endif
#include <vm/vm_kern.h>
-#include "cpuid.h"
+#include <kern/timer_call.h>
struct processor processor_master;
{
cpu_data_t *cdp = current_cpu_datap();
- i386_deactivate_cpu();
-
PE_cpu_machine_quiesce(cdp->cpu_id);
cpu_thread_halt();
{
cpu_data_t *cdp = current_cpu_datap();
+ timer_call_queue_init(&cdp->rtclock_timer.queue);
+ cdp->rtclock_timer.deadline = EndOfAllTime;
+
cdp->cpu_type = cpuid_cputype();
cdp->cpu_subtype = cpuid_cpusubtype();
int cpu)
{
cpu_data_t *cdp = cpu_datap(cpu);
+ boolean_t intrs_enabled;
+ uint64_t tsc_timeout;
- simple_lock(&x86_topo_lock);
+ /*
+ * Wait until the CPU indicates that it has stopped.
+ * Disable interrupts while the topo lock is held -- arguably
+ * this should always be done but in this instance it can lead to
+ * a timeout if long-running interrupt were to occur here.
+ */
+ intrs_enabled = ml_set_interrupts_enabled(FALSE);
+ mp_safe_spin_lock(&x86_topo_lock);
+ /* Set a generous timeout of several seconds (in TSC ticks) */
+ tsc_timeout = rdtsc64() + (10ULL * 1000 * 1000 * 1000);
while ((cdp->lcpu.state != LCPU_HALT)
- && (cdp->lcpu.state != LCPU_OFF)) {
+ && (cdp->lcpu.state != LCPU_OFF)
+ && !cdp->lcpu.stopped) {
simple_unlock(&x86_topo_lock);
+ ml_set_interrupts_enabled(intrs_enabled);
cpu_pause();
- simple_lock(&x86_topo_lock);
+ if (rdtsc64() > tsc_timeout)
+ panic("cpu_exit_wait(%d) timeout", cpu);
+ ml_set_interrupts_enabled(FALSE);
+ mp_safe_spin_lock(&x86_topo_lock);
}
simple_unlock(&x86_topo_lock);
+ ml_set_interrupts_enabled(intrs_enabled);
}
void
PE_cpu_machine_init(cdp->cpu_id, !cdp->cpu_boot_complete);
cdp->cpu_boot_complete = TRUE;
cdp->cpu_running = TRUE;
-#if 0
- if (cpu_datap(cpu)->hibernate)
- {
- cpu_datap(cpu)->hibernate = 0;
- hibernate_machine_init();
- }
-#endif
ml_init_interrupt();
- /* for every CPU, get the VT specs */
- vmx_get_specs();
+#if CONFIG_VMX
+ /* initialize VMX for every CPU */
+ vmx_cpu_init();
+#endif
}
processor_t
if (is_boot_cpu)
return &processor_master;
- ret = kmem_alloc(kernel_map, (vm_offset_t *) &proc, sizeof(*proc));
+ ret = kmem_alloc(kernel_map, (vm_offset_t *) &proc, sizeof(*proc), VM_KERN_MEMORY_OSFMK);
if (ret != KERN_SUCCESS)
return NULL;