/*
- * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-/*
- * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved.
- *
- * DRI: Josh de Cesare
- *
- */
extern "C" {
#include <machine/machine_routines.h>
#include <pexpert/pexpert.h>
+#include <kern/cpu_number.h>
+extern void kperf_kernel_configure(char *);
}
-#include <machine/machine_routines.h>
-
#include <IOKit/IOLib.h>
#include <IOKit/IOPlatformExpert.h>
#include <IOKit/pwr_mgt/RootDomain.h>
#include <IOKit/IOUserClient.h>
#include <IOKit/IOKitKeysPrivate.h>
#include <IOKit/IOCPU.h>
+#include "IOKitKernelInternal.h"
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <kern/queue.h>
+extern "C" void console_suspend();
+extern "C" void console_resume();
+extern "C" void sched_override_recommended_cores_for_sleep(void);
+extern "C" void sched_restore_recommended_cores_after_sleep(void);
+
typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority,
void * param1, void * param2, void * param3,
const char * name);
const char * name;
void * refcon0;
void * refcon1;
+ boolean_t callout_in_progress;
struct iocpu_platform_action_entry * alloc_list;
};
typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t;
-queue_head_t *
-iocpu_get_platform_quiesce_queue(void);
-
-queue_head_t *
-iocpu_get_platform_active_queue(void);
-
-void
-iocpu_platform_cpu_action_init(queue_head_t * quiesce_queue, queue_head_t * init_queue);
-
-void
-iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry);
-
-void
-iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry);
-
-kern_return_t
-iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority,
- void * param1, void * param2, void * param3);
-
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#define kBootCPUNumber 0
-
-static iocpu_platform_action_entry_t * gIOAllActionsQueue;
-static queue_head_t gIOSleepActionQueue;
-static queue_head_t gIOWakeActionQueue;
-
-static queue_head_t iocpu_quiesce_queue;
-static queue_head_t iocpu_active_queue;
-
-static queue_head_t gIOHaltRestartActionQueue;
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+static IOLock *gIOCPUsLock;
+static OSArray *gIOCPUs;
+static const OSSymbol *gIOCPUStateKey;
+static OSString *gIOCPUStateNames[kIOCPUStateCount];
-void
-iocpu_platform_cpu_action_init(queue_head_t * quiesce_queue, __unused queue_head_t * init_queue)
+enum
{
-#if 0
- enum { kNumQuiesceActions = 2 };
- static iocpu_platform_action_entry_t quiesce_actions[kNumQuiesceActions] =
- {
- { { NULL, NULL }, (iocpu_platform_action_t) &clean_mmu_dcache, 97000, 0, 0, NULL },
- { { NULL, NULL }, (iocpu_platform_action_t) &arm_sleep, 99000, 0, 0, NULL },
- };
- unsigned int idx;
+ kQueueSleep = 0,
+ kQueueWake = 1,
+ kQueueQuiesce = 2,
+ kQueueActive = 3,
+ kQueueHaltRestart = 4,
+ kQueuePanic = 5,
+ kQueueCount = 6
+};
- for (idx = 0; idx < kNumQuiesceActions; idx++)
- iocpu_add_platform_action(quiesce_queue, &quiesce_actions[idx]);
-#endif
-}
+const OSSymbol * gIOPlatformSleepActionKey;
+const OSSymbol * gIOPlatformWakeActionKey;
+const OSSymbol * gIOPlatformQuiesceActionKey;
+const OSSymbol * gIOPlatformActiveActionKey;
+const OSSymbol * gIOPlatformHaltRestartActionKey;
+const OSSymbol * gIOPlatformPanicActionKey;
-queue_head_t * iocpu_get_platform_quiesce_queue(void)
-{
- if (!iocpu_quiesce_queue.next)
- {
- queue_init(&iocpu_quiesce_queue);
- queue_init(&iocpu_active_queue);
- iocpu_platform_cpu_action_init(&iocpu_quiesce_queue, &iocpu_active_queue);
- }
- return (&iocpu_quiesce_queue);
-}
+static queue_head_t gActionQueues[kQueueCount];
+static const OSSymbol * gActionSymbols[kQueueCount];
-queue_head_t * iocpu_get_platform_active_queue(void)
-{
- if (!iocpu_active_queue.next)
- {
- queue_init(&iocpu_quiesce_queue);
- queue_init(&iocpu_active_queue);
- iocpu_platform_cpu_action_init(&iocpu_quiesce_queue, &iocpu_active_queue);
- }
- return (&iocpu_active_queue);
-}
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-void iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry)
+static void
+iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry)
{
iocpu_platform_action_entry_t * next;
queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail
}
-void iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry)
+static void
+iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry)
{
remque(&entry->link);
}
-kern_return_t
+static kern_return_t
iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority,
- void * param1, void * param2, void * param3)
+ void * param1, void * param2, void * param3, boolean_t allow_nested_callouts)
{
kern_return_t ret = KERN_SUCCESS;
kern_return_t result = KERN_SUCCESS;
if ((pri >= first_priority) && (pri <= last_priority))
{
//kprintf("[%p]", next->action);
- ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
+ if (!allow_nested_callouts && !next->callout_in_progress)
+ {
+ next->callout_in_progress = TRUE;
+ ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
+ next->callout_in_progress = FALSE;
+ }
+ else if (allow_nested_callouts)
+ {
+ ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
+ }
}
if (KERN_SUCCESS == result)
result = ret;
extern "C" kern_return_t
IOCPURunPlatformQuiesceActions(void)
{
- return (iocpu_run_platform_actions(iocpu_get_platform_quiesce_queue(), 0, 0U-1,
- NULL, NULL, NULL));
+ return (iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U-1,
+ NULL, NULL, NULL, TRUE));
}
extern "C" kern_return_t
IOCPURunPlatformActiveActions(void)
{
- return (iocpu_run_platform_actions(iocpu_get_platform_active_queue(), 0, 0U-1,
- NULL, NULL, NULL));
+ return (iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U-1,
+ NULL, NULL, NULL, TRUE));
}
+extern "C" kern_return_t
+IOCPURunPlatformHaltRestartActions(uint32_t message)
+{
+ if (!gActionQueues[kQueueHaltRestart].next) return (kIOReturnNotReady);
+ return (iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U-1,
+ (void *)(uintptr_t) message, NULL, NULL, TRUE));
+}
+
+extern "C" kern_return_t
+IOCPURunPlatformPanicActions(uint32_t message)
+{
+ // Don't allow nested calls of panic actions
+ if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady);
+ return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1,
+ (void *)(uintptr_t) message, NULL, NULL, FALSE));
+}
+
+
+extern "C" kern_return_t
+IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len)
+{
+ PE_panic_save_context_t context = {
+ .psc_buffer = addr,
+ .psc_offset = offset,
+ .psc_length = len
+ };
+
+ // Don't allow nested calls of panic actions
+ if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady);
+ return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1,
+ (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE));
+
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
static kern_return_t
IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority,
void * param1, void * param2, void * param3,
}
static void
-IOInstallServicePlatformAction(IOService * service,
- const OSSymbol * key, queue_head_t * queue,
- bool reverse)
+IOInstallServicePlatformAction(IOService * service, uint32_t qidx)
{
- OSNumber * num;
iocpu_platform_action_entry_t * entry;
- uint32_t priority;
+ OSNumber * num;
+ uint32_t priority;
+ const OSSymbol * key = gActionSymbols[qidx];
+ queue_head_t * queue = &gActionQueues[qidx];
+ bool reverse;
+ bool uniq;
num = OSDynamicCast(OSNumber, service->getProperty(key));
- if (!num)
- return;
+ if (!num) return;
+
+ reverse = false;
+ uniq = false;
+ switch (qidx)
+ {
+ case kQueueWake:
+ case kQueueActive:
+ reverse = true;
+ break;
+ case kQueueHaltRestart:
+ case kQueuePanic:
+ uniq = true;
+ break;
+ }
+ if (uniq)
+ {
+ queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link)
+ {
+ if (service == entry->refcon0) return;
+ }
+ }
entry = IONew(iocpu_platform_action_entry_t, 1);
entry->action = &IOServicePlatformAction;
entry->priority = priority;
entry->refcon0 = service;
entry->refcon1 = (void *) key;
+ entry->callout_in_progress = FALSE;
iocpu_add_platform_action(queue, entry);
- entry->alloc_list = gIOAllActionsQueue;
- gIOAllActionsQueue = entry;
}
-extern "C" kern_return_t
-IOCPURunPlatformHaltRestartActions(uint32_t message)
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+void
+IOCPUInitialize(void)
{
- kern_return_t ret;
- IORegistryIterator * iter;
- OSOrderedSet * all;
- IOService * service;
+ gIOCPUsLock = IOLockAlloc();
+ gIOCPUs = OSArray::withCapacity(1);
+
+ for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
+ {
+ queue_init(&gActionQueues[qidx]);
+ }
+
+ gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
+
+ gIOCPUStateNames[kIOCPUStateUnregistered] =
+ OSString::withCStringNoCopy("Unregistered");
+ gIOCPUStateNames[kIOCPUStateUninitalized] =
+ OSString::withCStringNoCopy("Uninitalized");
+ gIOCPUStateNames[kIOCPUStateStopped] =
+ OSString::withCStringNoCopy("Stopped");
+ gIOCPUStateNames[kIOCPUStateRunning] =
+ OSString::withCStringNoCopy("Running");
+
+ gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep]
+ = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey);
+ gIOPlatformWakeActionKey = gActionSymbols[kQueueWake]
+ = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey);
+ gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce]
+ = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey);
+ gIOPlatformActiveActionKey = gActionSymbols[kQueueActive]
+ = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey);
+ gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart]
+ = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey);
+ gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic]
+ = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey);
+}
+
+IOReturn
+IOInstallServicePlatformActions(IOService * service)
+{
+ IOLockLock(gIOCPUsLock);
- if (!gIOHaltRestartActionQueue.next)
+ IOInstallServicePlatformAction(service, kQueueHaltRestart);
+ IOInstallServicePlatformAction(service, kQueuePanic);
+
+ IOLockUnlock(gIOCPUsLock);
+
+ return (kIOReturnSuccess);
+}
+
+IOReturn
+IORemoveServicePlatformActions(IOService * service)
+{
+ iocpu_platform_action_entry_t * entry;
+ iocpu_platform_action_entry_t * next;
+
+ IOLockLock(gIOCPUsLock);
+
+ for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
{
- queue_init(&gIOHaltRestartActionQueue);
- iter = IORegistryIterator::iterateOver(gIOServicePlane,
- kIORegistryIterateRecursively);
- if (iter)
+ next = (typeof(entry)) queue_first(&gActionQueues[qidx]);
+ while (!queue_end(&gActionQueues[qidx], &next->link))
{
- all = 0;
- do
+ entry = next;
+ next = (typeof(entry)) queue_next(&entry->link);
+ if (service == entry->refcon0)
{
- if (all) all->release();
- all = iter->iterateAll();
+ iocpu_remove_platform_action(entry);
+ IODelete(entry, iocpu_platform_action_entry_t, 1);
}
- while (!iter->isValid());
- iter->release();
- if (all)
- {
- while((service = (IOService *) all->getFirstObject()))
- {
- IOInstallServicePlatformAction(service, gIOPlatformHaltRestartActionKey, &gIOHaltRestartActionQueue, false);
- all->removeObject(service);
- }
- all->release();
- }
}
}
- ret = iocpu_run_platform_actions(&gIOHaltRestartActionQueue, 0, 0U-1,
- (void *)(uintptr_t) message, NULL, NULL);
- return (ret);
+
+ IOLockUnlock(gIOCPUsLock);
+
+ return (kIOReturnSuccess);
}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
kern_return_t PE_cpu_start(cpu_id_t target,
vm_offset_t start_paddr, vm_offset_t arg_paddr)
{
- IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
+ IOCPU *targetCPU = (IOCPU *)target;
- if (targetCPU == 0) return KERN_FAILURE;
- return targetCPU->startCPU(start_paddr, arg_paddr);
+ if (targetCPU == NULL) return KERN_FAILURE;
+ return targetCPU->startCPU(start_paddr, arg_paddr);
}
void PE_cpu_halt(cpu_id_t target)
{
- IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
+ IOCPU *targetCPU = (IOCPU *)target;
- if (targetCPU) targetCPU->haltCPU();
+ targetCPU->haltCPU();
}
void PE_cpu_signal(cpu_id_t source, cpu_id_t target)
{
- IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
- IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
+ IOCPU *sourceCPU = (IOCPU *)source;
+ IOCPU *targetCPU = (IOCPU *)target;
- if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU);
+ sourceCPU->signalCPU(targetCPU);
+}
+
+void PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
+{
+ IOCPU *sourceCPU = (IOCPU *)source;
+ IOCPU *targetCPU = (IOCPU *)target;
+
+ sourceCPU->signalCPUDeferred(targetCPU);
+}
+
+void PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
+{
+ IOCPU *sourceCPU = (IOCPU *)source;
+ IOCPU *targetCPU = (IOCPU *)target;
+
+ sourceCPU->signalCPUCancel(targetCPU);
}
void PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
{
- IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
-
- if (targetCPU) targetCPU->initCPU(bootb);
+ IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
+
+ if (targetCPU == NULL)
+ panic("%s: invalid target CPU %p", __func__, target);
+
+ targetCPU->initCPU(bootb);
+#if defined(__arm__) || defined(__arm64__)
+ if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) ml_set_is_quiescing(false);
+#endif /* defined(__arm__) || defined(__arm64__) */
}
void PE_cpu_machine_quiesce(cpu_id_t target)
{
- IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
+ IOCPU *targetCPU = (IOCPU*)target;
+#if defined(__arm__) || defined(__arm64__)
+ if (targetCPU->getCPUNumber() == (UInt32)master_cpu) ml_set_is_quiescing(true);
+#endif /* defined(__arm__) || defined(__arm64__) */
+ targetCPU->quiesceCPU();
+}
+
+#if defined(__arm__) || defined(__arm64__)
+static perfmon_interrupt_handler_func pmi_handler = 0;
- if (targetCPU) targetCPU->quiesceCPU();
+kern_return_t PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
+{
+ pmi_handler = handler;
+
+ return KERN_SUCCESS;
}
+void PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
+{
+ IOCPU *targetCPU = (IOCPU*)target;
+
+ if (targetCPU == nullptr) {
+ return;
+ }
+
+ if (enable) {
+ targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0);
+ targetCPU->getProvider()->enableInterrupt(1);
+ } else {
+ targetCPU->getProvider()->disableInterrupt(1);
+ }
+}
+#endif
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-static OSArray *gIOCPUs;
-static const OSSymbol *gIOCPUStateKey;
-static OSString *gIOCPUStateNames[kIOCPUStateCount];
-
void IOCPUSleepKernel(void)
{
long cnt, numCPUs;
IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
kprintf("IOCPUSleepKernel\n");
+#if defined(__arm64__)
+ sched_override_recommended_cores_for_sleep();
+#endif
IORegistryIterator * iter;
OSOrderedSet * all;
rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
- queue_init(&gIOSleepActionQueue);
- queue_init(&gIOWakeActionQueue);
- queue_init(&gIOHaltRestartActionQueue);
-
iter = IORegistryIterator::iterateOver( gIOServicePlane,
kIORegistryIterateRecursively );
if( iter)
{
while((service = (IOService *) all->getFirstObject()))
{
- IOInstallServicePlatformAction(service, gIOPlatformSleepActionKey, &gIOSleepActionQueue, false);
- IOInstallServicePlatformAction(service, gIOPlatformWakeActionKey, &gIOWakeActionQueue, true);
- IOInstallServicePlatformAction(service, gIOPlatformQuiesceActionKey, iocpu_get_platform_quiesce_queue(), false);
- IOInstallServicePlatformAction(service, gIOPlatformActiveActionKey, iocpu_get_platform_active_queue(), true);
- IOInstallServicePlatformAction(service, gIOPlatformHaltRestartActionKey, &gIOHaltRestartActionQueue, false);
+ for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
+ {
+ IOInstallServicePlatformAction(service, qidx);
+ }
all->removeObject(service);
}
all->release();
}
}
- iocpu_run_platform_actions(&gIOSleepActionQueue, 0, 0U-1,
- NULL, NULL, NULL);
+ iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U-1,
+ NULL, NULL, NULL, TRUE);
rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
// We make certain that the bootCPU is the last to sleep
// We'll skip it for now, and halt it after finishing the
// non-boot CPU's.
- if (target->getCPUNumber() == kBootCPUNumber)
+ if (target->getCPUNumber() == (UInt32)master_cpu)
{
bootCPU = target;
- } else if (target->getCPUState() == kIOCPUStateRunning)
+ } else if (target->getCPUState() == kIOCPUStateRunning)
{
- target->haltCPU();
+ target->haltCPU();
}
}
+ assert(bootCPU != NULL);
+ assert(cpu_number() == master_cpu);
+
+ console_suspend();
+
rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
+ rootDomain->stop_watchdog_timer();
// Now sleep the boot CPU.
- if (bootCPU)
- bootCPU->haltCPU();
+ bootCPU->haltCPU();
+ rootDomain->start_watchdog_timer();
rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
- iocpu_run_platform_actions(&gIOWakeActionQueue, 0, 0U-1,
- NULL, NULL, NULL);
+ console_resume();
+
+ iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U-1,
+ NULL, NULL, NULL, TRUE);
iocpu_platform_action_entry_t * entry;
- while ((entry = gIOAllActionsQueue))
+ for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
{
- gIOAllActionsQueue = entry->alloc_list;
- iocpu_remove_platform_action(entry);
- IODelete(entry, iocpu_platform_action_entry_t, 1);
+ while (!(queue_empty(&gActionQueues[qidx])))
+ {
+ entry = (typeof(entry)) queue_first(&gActionQueues[qidx]);
+ iocpu_remove_platform_action(entry);
+ IODelete(entry, iocpu_platform_action_entry_t, 1);
+ }
}
- if (!queue_empty(&gIOSleepActionQueue)) panic("gIOSleepActionQueue");
- if (!queue_empty(&gIOWakeActionQueue)) panic("gIOWakeActionQueue");
- if (!queue_empty(&gIOHaltRestartActionQueue)) panic("gIOHaltRestartActionQueue");
- gIOHaltRestartActionQueue.next = 0;
-
rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
// Wake the other CPUs.
for (cnt = 0; cnt < numCPUs; cnt++)
{
target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
-
+
// Skip the already-woken boot CPU.
- if ((target->getCPUNumber() != kBootCPUNumber)
- && (target->getCPUState() == kIOCPUStateStopped))
- {
- processor_start(target->getMachProcessor());
+ if (target->getCPUNumber() != (UInt32)master_cpu) {
+ if (target->getCPUState() == kIOCPUStateRunning)
+ panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
+
+ if (target->getCPUState() == kIOCPUStateStopped)
+ processor_start(target->getMachProcessor());
}
}
-}
-void IOCPU::initCPUs(void)
-{
- if (gIOCPUs == 0) {
- gIOCPUs = OSArray::withCapacity(1);
-
- gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
-
- gIOCPUStateNames[kIOCPUStateUnregistered] =
- OSString::withCStringNoCopy("Unregistered");
- gIOCPUStateNames[kIOCPUStateUninitalized] =
- OSString::withCStringNoCopy("Uninitalized");
- gIOCPUStateNames[kIOCPUStateStopped] =
- OSString::withCStringNoCopy("Stopped");
- gIOCPUStateNames[kIOCPUStateRunning] =
- OSString::withCStringNoCopy("Running");
- }
+#if defined(__arm64__)
+ sched_restore_recommended_cores_after_sleep();
+#endif
}
bool IOCPU::start(IOService *provider)
if (!super::start(provider)) return false;
- initCPUs();
-
_cpuGroup = gIOCPUs;
cpuNub = provider;
+ IOLockLock(gIOCPUsLock);
gIOCPUs->setObject(this);
-
+ IOLockUnlock(gIOCPUsLock);
+
// Correct the bus, cpu and timebase frequencies in the device tree.
if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
{
- OSString *stateStr;
-
if (aKey == gIOCPUStateKey) {
- stateStr = OSDynamicCast(OSString, anObject);
- if (stateStr == 0) return false;
-
- if (_cpuNumber == 0) return false;
-
- if (stateStr->isEqualTo("running")) {
- if (_cpuState == kIOCPUStateStopped) {
- processor_start(machProcessor);
- } else if (_cpuState != kIOCPUStateRunning) {
- return false;
- }
- } else if (stateStr->isEqualTo("stopped")) {
- if (_cpuState == kIOCPUStateRunning) {
- haltCPU();
- } else if (_cpuState != kIOCPUStateStopped) {
- return false;
- }
- } else return false;
-
- return true;
+ return false;
}
-
+
return super::setProperty(aKey, anObject);
}
{
bool result;
OSDictionary *dict = dictionaryWithProperties();
+ if (!dict) return false;
dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]);
result = dict->serialize(serialize);
dict->release();
{
}
+void IOCPU::signalCPUDeferred(IOCPU *target)
+{
+ // Our CPU may not support deferred IPIs,
+ // so send a regular IPI by default
+ signalCPU(target);
+}
+
+void IOCPU::signalCPUCancel(IOCPU */*target*/)
+{
+ // Meant to cancel signals sent by
+ // signalCPUDeferred; unsupported
+ // by default
+}
+
void IOCPU::enableCPUTimeBase(bool /*enable*/)
{
}
OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
-OSMetaClassDefineReservedUnused(IOCPUInterruptController, 0);
OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
IOReturn IOCPUInterruptController::initCPUInterruptController(int sources)
+{
+ return initCPUInterruptController(sources, sources);
+}
+
+IOReturn IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
{
int cnt;
if (!super::init()) return kIOReturnInvalid;
-
- numCPUs = sources;
-
- cpus = (IOCPU **)IOMalloc(numCPUs * sizeof(IOCPU *));
- if (cpus == 0) return kIOReturnNoMemory;
- bzero(cpus, numCPUs * sizeof(IOCPU *));
-
- vectors = (IOInterruptVector *)IOMalloc(numCPUs * sizeof(IOInterruptVector));
+
+ numSources = sources;
+ numCPUs = cpus;
+
+ vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector));
if (vectors == 0) return kIOReturnNoMemory;
- bzero(vectors, numCPUs * sizeof(IOInterruptVector));
-
- // Allocate locks for the
- for (cnt = 0; cnt < numCPUs; cnt++) {
+ bzero(vectors, numSources * sizeof(IOInterruptVector));
+
+ // Allocate a lock for each vector
+ for (cnt = 0; cnt < numSources; cnt++) {
vectors[cnt].interruptLock = IOLockAlloc();
if (vectors[cnt].interruptLock == NULL) {
- for (cnt = 0; cnt < numCPUs; cnt++) {
+ for (cnt = 0; cnt < numSources; cnt++) {
if (vectors[cnt].interruptLock != NULL)
IOLockFree(vectors[cnt].interruptLock);
}
}
}
- ml_init_max_cpus(numCPUs);
+ ml_init_max_cpus(numSources);
+
+#if KPERF
+ /*
+ * kperf allocates based on the number of CPUs and requires them to all be
+ * accounted for.
+ */
+ boolean_t found_kperf = FALSE;
+ char kperf_config_str[64];
+ found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str));
+ if (found_kperf && kperf_config_str[0] != '\0') {
+ kperf_kernel_configure(kperf_config_str);
+ }
+#endif
return kIOReturnSuccess;
}
return;
// Create the interrupt specifer array.
- specifier = OSArray::withCapacity(numCPUs);
- for (cnt = 0; cnt < numCPUs; cnt++) {
+ specifier = OSArray::withCapacity(numSources);
+ for (cnt = 0; cnt < numSources; cnt++) {
tmpLong = cnt;
tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong));
specifier->setObject(tmpData);
};
// Create the interrupt controller array.
- controller = OSArray::withCapacity(numCPUs);
- for (cnt = 0; cnt < numCPUs; cnt++) {
+ controller = OSArray::withCapacity(numSources);
+ for (cnt = 0; cnt < numSources; cnt++) {
controller->setObject(gPlatformInterruptControllerName);
}
IOInterruptHandler handler = OSMemberFunctionCast(
IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
+ assert(numCPUs > 0);
+
ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, 0);
- // Ensure that the increment is seen by all processors
- OSIncrementAtomic(&enabledCPUs);
+ IOTakeLock(vectors[0].interruptLock);
+ ++enabledCPUs;
- if (enabledCPUs == numCPUs) thread_wakeup(this);
+ if (enabledCPUs == numCPUs) {
+ IOService::cpusRunning();
+ thread_wakeup(this);
+ }
+ IOUnlock(vectors[0].interruptLock);
}
IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub,
void *refCon)
{
IOInterruptVector *vector;
-
- if (source >= numCPUs) return kIOReturnNoResources;
-
+
+ if (source >= numSources) return kIOReturnNoResources;
+
vector = &vectors[source];
-
+
// Get the lock for this vector.
IOTakeLock(vector->interruptLock);
-
+
// Make sure the vector is not in use.
if (vector->interruptRegistered) {
IOUnlock(vector->interruptLock);
return kIOReturnNoResources;
}
-
+
// Fill in vector with the client's info.
vector->handler = handler;
vector->nub = nub;
vector->source = source;
vector->target = target;
vector->refCon = refCon;
-
+
// Get the vector ready. It starts hard disabled.
vector->interruptDisabledHard = 1;
vector->interruptDisabledSoft = 1;
vector->interruptRegistered = 1;
-
+
IOUnlock(vector->interruptLock);
-
+
+ IOTakeLock(vectors[0].interruptLock);
if (enabledCPUs != numCPUs) {
assert_wait(this, THREAD_UNINT);
+ IOUnlock(vectors[0].interruptLock);
thread_block(THREAD_CONTINUE_NULL);
- }
-
+ } else
+ IOUnlock(vectors[0].interruptLock);
+
return kIOReturnSuccess;
}