/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/misc_protos.h>
#include <kern/machine.h>
#include <mach/processor_info.h>
-#include <i386/mp.h>
+#include <i386/pmap.h>
#include <i386/machine_cpu.h>
#include <i386/machine_routines.h>
-#include <i386/pmap.h>
#include <i386/misc_protos.h>
#include <i386/cpu_threads.h>
+#include <i386/rtclock_protos.h>
+#include <i386/cpuid.h>
+#if CONFIG_VMX
+#include <i386/vmx/vmx_cpu.h>
+#endif
#include <vm/vm_kern.h>
-
+#include <kern/timer_call.h>
struct processor processor_master;
processor_info_t info,
unsigned int count)
{
- printf("cpu_control(%d,0x%x,%d) not implemented\n",
+ printf("cpu_control(%d,%p,%d) not implemented\n",
slot_num, info, count);
return (KERN_FAILURE);
}
processor_info_t info,
unsigned int *count)
{
- printf("cpu_info(%d,%d,0x%x,0x%x) not implemented\n",
+ printf("cpu_info(%d,%d,%p,%p) not implemented\n",
flavor, slot_num, info, count);
return (KERN_FAILURE);
}
void
cpu_sleep(void)
{
- cpu_data_t *proc_info = current_cpu_datap();
+ cpu_data_t *cdp = current_cpu_datap();
- PE_cpu_machine_quiesce(proc_info->cpu_id);
+ PE_cpu_machine_quiesce(cdp->cpu_id);
cpu_thread_halt();
}
{
cpu_data_t *cdp = current_cpu_datap();
-#ifdef MACH_BSD
- /* FIXME */
- cdp->cpu_type = CPU_TYPE_I386;
- cdp->cpu_subtype = CPU_SUBTYPE_PENTPRO;
-#else
- cdp->cpu_type = cpuid_cputype(0);
- cdp->cpu_subtype = CPU_SUBTYPE_AT386;
-#endif
- cdp->cpu_running = TRUE;
+ timer_call_queue_init(&cdp->rtclock_timer.queue);
+ cdp->rtclock_timer.deadline = EndOfAllTime;
+
+ cdp->cpu_type = cpuid_cputype();
+ cdp->cpu_subtype = cpuid_cpusubtype();
+
+ i386_activate_cpu();
}
kern_return_t
if (cpu == cpu_number()) {
cpu_machine_init();
return KERN_SUCCESS;
- } else {
+ }
+
+ /*
+ * Try to bring the CPU back online without a reset.
+ * If the fast restart doesn't succeed, fall back to
+ * the slow way.
+ */
+ ret = intel_startCPU_fast(cpu);
+ if (ret != KERN_SUCCESS) {
/*
* Should call out through PE.
* But take the shortcut here.
*/
ret = intel_startCPU(cpu);
- return(ret);
}
+
+ if (ret != KERN_SUCCESS)
+ kprintf("cpu: cpu_start(%d) returning failure!\n", cpu);
+
+ return(ret);
}
void
cpu_exit_wait(
- __unused int cpu)
+ int cpu)
{
+ cpu_data_t *cdp = cpu_datap(cpu);
+ boolean_t intrs_enabled;
+ uint64_t tsc_timeout;
+
+ /*
+ * Wait until the CPU indicates that it has stopped.
+ * Disable interrupts while the topo lock is held -- arguably
+ * this should always be done but in this instance it can lead to
+ * a timeout if long-running interrupt were to occur here.
+ */
+ intrs_enabled = ml_set_interrupts_enabled(FALSE);
+ mp_safe_spin_lock(&x86_topo_lock);
+ /* Set a generous timeout of several seconds (in TSC ticks) */
+ tsc_timeout = rdtsc64() + (10ULL * 1000 * 1000 * 1000);
+ while ((cdp->lcpu.state != LCPU_HALT)
+ && (cdp->lcpu.state != LCPU_OFF)
+ && !cdp->lcpu.stopped) {
+ simple_unlock(&x86_topo_lock);
+ ml_set_interrupts_enabled(intrs_enabled);
+ cpu_pause();
+ if (rdtsc64() > tsc_timeout)
+ panic("cpu_exit_wait(%d) timeout", cpu);
+ ml_set_interrupts_enabled(FALSE);
+ mp_safe_spin_lock(&x86_topo_lock);
+ }
+ simple_unlock(&x86_topo_lock);
+ ml_set_interrupts_enabled(intrs_enabled);
}
void
cpu_machine_init(
void)
{
- int cpu;
+ cpu_data_t *cdp = current_cpu_datap();
- cpu = get_cpu_number();
- PE_cpu_machine_init(cpu_datap(cpu)->cpu_id, TRUE);
-#if 0
- if (cpu_datap(cpu)->hibernate)
- {
- cpu_datap(cpu)->hibernate = 0;
- hibernate_machine_init();
- }
-#endif
+ PE_cpu_machine_init(cdp->cpu_id, !cdp->cpu_boot_complete);
+ cdp->cpu_boot_complete = TRUE;
+ cdp->cpu_running = TRUE;
ml_init_interrupt();
+
+#if CONFIG_VMX
+ /* initialize VMX for every CPU */
+ vmx_cpu_init();
+#endif
}
processor_t
if (is_boot_cpu)
return &processor_master;
- ret = kmem_alloc(kernel_map, (vm_offset_t *) &proc, sizeof(*proc));
+ ret = kmem_alloc(kernel_map, (vm_offset_t *) &proc, sizeof(*proc), VM_KERN_MEMORY_OSFMK);
if (ret != KERN_SUCCESS)
return NULL;