X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/2d21ac55c334faf3a56e5634905ed6987fc787d4..d9a64523371fa019c4575bb400cbbc3a50ac9903:/iokit/Kernel/IOCPU.cpp?ds=sidebyside diff --git a/iokit/Kernel/IOCPU.cpp b/iokit/Kernel/IOCPU.cpp index 610229ce3..8ad8d76cd 100644 --- a/iokit/Kernel/IOCPU.cpp +++ b/iokit/Kernel/IOCPU.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,109 +25,80 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved. - * - * DRI: Josh de Cesare - * - */ extern "C" { #include #include +#include +extern void kperf_kernel_configure(char *); } -#include - #include #include #include +#include #include #include #include +#include "IOKitKernelInternal.h" /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include +extern "C" void console_suspend(); +extern "C" void console_resume(); +extern "C" void sched_override_recommended_cores_for_sleep(void); +extern "C" void sched_restore_recommended_cores_after_sleep(void); + typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority, - void * param1, void * param2, void * param3); + void * param1, void * param2, void * param3, + const char * name); struct iocpu_platform_action_entry { queue_chain_t link; iocpu_platform_action_t action; int32_t priority; + const char * name; void * refcon0; void * refcon1; + boolean_t callout_in_progress; struct iocpu_platform_action_entry * alloc_list; }; typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t; -queue_head_t * -iocpu_get_platform_quiesce_queue(void); - -queue_head_t * -iocpu_get_platform_active_queue(void); - -void -iocpu_platform_cpu_action_init(queue_head_t * quiesce_queue, queue_head_t * init_queue); - -void -iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry); - -void -iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry); - -kern_return_t -iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority, - void * param1, void * param2, void * param3); - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +static IOLock *gIOCPUsLock; +static OSArray *gIOCPUs; +static const OSSymbol *gIOCPUStateKey; +static OSString *gIOCPUStateNames[kIOCPUStateCount]; -static iocpu_platform_action_entry_t * gIOAllActionsQueue; -static queue_head_t gIOSleepActionQueue; -static queue_head_t gIOWakeActionQueue; - -static queue_head_t iocpu_quiesce_queue; -static queue_head_t iocpu_active_queue; - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -void -iocpu_platform_cpu_action_init(queue_head_t * quiesce_queue, __unused queue_head_t * init_queue) +enum { -#if 0 - enum { kNumQuiesceActions = 2 }; - static iocpu_platform_action_entry_t quiesce_actions[kNumQuiesceActions] = - { - { { NULL, NULL }, (iocpu_platform_action_t) &clean_mmu_dcache, 97000, 0, 0, NULL }, - { { NULL, NULL }, (iocpu_platform_action_t) &arm_sleep, 99000, 0, 0, NULL }, - }; - unsigned int idx; + kQueueSleep = 0, + kQueueWake = 1, + kQueueQuiesce = 2, + kQueueActive = 3, + kQueueHaltRestart = 4, + kQueuePanic = 5, + kQueueCount = 6 +}; - for (idx = 0; idx < kNumQuiesceActions; idx++) - iocpu_add_platform_action(quiesce_queue, &quiesce_actions[idx]); -#endif -} +const OSSymbol * gIOPlatformSleepActionKey; +const OSSymbol * gIOPlatformWakeActionKey; +const OSSymbol * gIOPlatformQuiesceActionKey; +const OSSymbol * gIOPlatformActiveActionKey; +const OSSymbol * gIOPlatformHaltRestartActionKey; +const OSSymbol * gIOPlatformPanicActionKey; -queue_head_t * iocpu_get_platform_quiesce_queue(void) -{ - if (!iocpu_quiesce_queue.next) - { - queue_init(&iocpu_quiesce_queue); - queue_init(&iocpu_active_queue); - iocpu_platform_cpu_action_init(&iocpu_quiesce_queue, &iocpu_active_queue); - } - return (&iocpu_quiesce_queue); -} +static queue_head_t gActionQueues[kQueueCount]; +static const OSSymbol * gActionSymbols[kQueueCount]; -queue_head_t * iocpu_get_platform_active_queue(void) -{ - return (&iocpu_active_queue); -} +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry) +static void +iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry) { iocpu_platform_action_entry_t * next; @@ -142,14 +113,15 @@ void iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail } -void iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry) +static void +iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry) { remque(&entry->link); } -kern_return_t +static kern_return_t iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority, - void * param1, void * param2, void * param3) + void * param1, void * param2, void * param3, boolean_t allow_nested_callouts) { kern_return_t ret = KERN_SUCCESS; kern_return_t result = KERN_SUCCESS; @@ -161,7 +133,16 @@ iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32 if ((pri >= first_priority) && (pri <= last_priority)) { //kprintf("[%p]", next->action); - ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3); + if (!allow_nested_callouts && !next->callout_in_progress) + { + next->callout_in_progress = TRUE; + ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); + next->callout_in_progress = FALSE; + } + else if (allow_nested_callouts) + { + ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); + } } if (KERN_SUCCESS == result) result = ret; @@ -174,48 +155,108 @@ iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32 extern "C" kern_return_t IOCPURunPlatformQuiesceActions(void) { - return (iocpu_run_platform_actions(iocpu_get_platform_quiesce_queue(), 0, 0UL-1, - NULL, NULL, NULL)); + return (iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U-1, + NULL, NULL, NULL, TRUE)); } extern "C" kern_return_t IOCPURunPlatformActiveActions(void) { - return (iocpu_run_platform_actions(iocpu_get_platform_active_queue(), 0, 0UL-1, - NULL, NULL, NULL)); + return (iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U-1, + NULL, NULL, NULL, TRUE)); } +extern "C" kern_return_t +IOCPURunPlatformHaltRestartActions(uint32_t message) +{ + if (!gActionQueues[kQueueHaltRestart].next) return (kIOReturnNotReady); + return (iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U-1, + (void *)(uintptr_t) message, NULL, NULL, TRUE)); +} + +extern "C" kern_return_t +IOCPURunPlatformPanicActions(uint32_t message) +{ + // Don't allow nested calls of panic actions + if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady); + return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1, + (void *)(uintptr_t) message, NULL, NULL, FALSE)); +} + + +extern "C" kern_return_t +IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len) +{ + PE_panic_save_context_t context = { + .psc_buffer = addr, + .psc_offset = offset, + .psc_length = len + }; + + // Don't allow nested calls of panic actions + if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady); + return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1, + (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE)); + +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + static kern_return_t IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority, - void * param1, void * param2, void * param3) + void * param1, void * param2, void * param3, + const char * service_name) { IOReturn ret; IOService * service = (IOService *) refcon0; const OSSymbol * function = (const OSSymbol *) refcon1; - kprintf("%s -> %s\n", function->getCStringNoCopy(), service->getName()); + kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name); ret = service->callPlatformFunction(function, false, - (void *) priority, param1, param2, param3); + (void *)(uintptr_t) priority, param1, param2, param3); return (ret); } static void -IOInstallServicePlatformAction(IOService * service, - const OSSymbol * key, queue_head_t * queue, - bool reverse) +IOInstallServicePlatformAction(IOService * service, uint32_t qidx) { - OSNumber * num; iocpu_platform_action_entry_t * entry; - uint32_t priority; + OSNumber * num; + uint32_t priority; + const OSSymbol * key = gActionSymbols[qidx]; + queue_head_t * queue = &gActionQueues[qidx]; + bool reverse; + bool uniq; num = OSDynamicCast(OSNumber, service->getProperty(key)); - if (!num) - return; + if (!num) return; + + reverse = false; + uniq = false; + switch (qidx) + { + case kQueueWake: + case kQueueActive: + reverse = true; + break; + case kQueueHaltRestart: + case kQueuePanic: + uniq = true; + break; + } + if (uniq) + { + queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link) + { + if (service == entry->refcon0) return; + } + } entry = IONew(iocpu_platform_action_entry_t, 1); entry->action = &IOServicePlatformAction; + entry->name = service->getName(); priority = num->unsigned32BitValue(); if (reverse) entry->priority = -priority; @@ -223,52 +264,182 @@ IOInstallServicePlatformAction(IOService * service, entry->priority = priority; entry->refcon0 = service; entry->refcon1 = (void *) key; + entry->callout_in_progress = FALSE; iocpu_add_platform_action(queue, entry); - entry->alloc_list = gIOAllActionsQueue; - gIOAllActionsQueue = entry; } +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void +IOCPUInitialize(void) +{ + gIOCPUsLock = IOLockAlloc(); + gIOCPUs = OSArray::withCapacity(1); + + for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) + { + queue_init(&gActionQueues[qidx]); + } + + gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState"); + + gIOCPUStateNames[kIOCPUStateUnregistered] = + OSString::withCStringNoCopy("Unregistered"); + gIOCPUStateNames[kIOCPUStateUninitalized] = + OSString::withCStringNoCopy("Uninitalized"); + gIOCPUStateNames[kIOCPUStateStopped] = + OSString::withCStringNoCopy("Stopped"); + gIOCPUStateNames[kIOCPUStateRunning] = + OSString::withCStringNoCopy("Running"); + + gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep] + = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey); + gIOPlatformWakeActionKey = gActionSymbols[kQueueWake] + = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey); + gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce] + = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey); + gIOPlatformActiveActionKey = gActionSymbols[kQueueActive] + = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey); + gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart] + = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey); + gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic] + = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey); +} + +IOReturn +IOInstallServicePlatformActions(IOService * service) +{ + IOLockLock(gIOCPUsLock); + + IOInstallServicePlatformAction(service, kQueueHaltRestart); + IOInstallServicePlatformAction(service, kQueuePanic); + + IOLockUnlock(gIOCPUsLock); + + return (kIOReturnSuccess); +} + +IOReturn +IORemoveServicePlatformActions(IOService * service) +{ + iocpu_platform_action_entry_t * entry; + iocpu_platform_action_entry_t * next; + + IOLockLock(gIOCPUsLock); + + for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) + { + next = (typeof(entry)) queue_first(&gActionQueues[qidx]); + while (!queue_end(&gActionQueues[qidx], &next->link)) + { + entry = next; + next = (typeof(entry)) queue_next(&entry->link); + if (service == entry->refcon0) + { + iocpu_remove_platform_action(entry); + IODelete(entry, iocpu_platform_action_entry_t, 1); + } + } + } + + IOLockUnlock(gIOCPUsLock); + + return (kIOReturnSuccess); +} + + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t PE_cpu_start(cpu_id_t target, vm_offset_t start_paddr, vm_offset_t arg_paddr) { - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + IOCPU *targetCPU = (IOCPU *)target; - if (targetCPU == 0) return KERN_FAILURE; - return targetCPU->startCPU(start_paddr, arg_paddr); + if (targetCPU == NULL) return KERN_FAILURE; + return targetCPU->startCPU(start_paddr, arg_paddr); } void PE_cpu_halt(cpu_id_t target) { - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + IOCPU *targetCPU = (IOCPU *)target; - if (targetCPU) targetCPU->haltCPU(); + targetCPU->haltCPU(); } void PE_cpu_signal(cpu_id_t source, cpu_id_t target) { - IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source); - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + IOCPU *sourceCPU = (IOCPU *)source; + IOCPU *targetCPU = (IOCPU *)target; - if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU); + sourceCPU->signalCPU(targetCPU); +} + +void PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target) +{ + IOCPU *sourceCPU = (IOCPU *)source; + IOCPU *targetCPU = (IOCPU *)target; + + sourceCPU->signalCPUDeferred(targetCPU); +} + +void PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target) +{ + IOCPU *sourceCPU = (IOCPU *)source; + IOCPU *targetCPU = (IOCPU *)target; + + sourceCPU->signalCPUCancel(targetCPU); } void PE_cpu_machine_init(cpu_id_t target, boolean_t bootb) { - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); - - if (targetCPU) targetCPU->initCPU(bootb); + IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + + if (targetCPU == NULL) + panic("%s: invalid target CPU %p", __func__, target); + + targetCPU->initCPU(bootb); +#if defined(__arm__) || defined(__arm64__) + if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) ml_set_is_quiescing(false); +#endif /* defined(__arm__) || defined(__arm64__) */ } void PE_cpu_machine_quiesce(cpu_id_t target) { - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + IOCPU *targetCPU = (IOCPU*)target; +#if defined(__arm__) || defined(__arm64__) + if (targetCPU->getCPUNumber() == (UInt32)master_cpu) ml_set_is_quiescing(true); +#endif /* defined(__arm__) || defined(__arm64__) */ + targetCPU->quiesceCPU(); +} - if (targetCPU) targetCPU->quiesceCPU(); +#if defined(__arm__) || defined(__arm64__) +static perfmon_interrupt_handler_func pmi_handler = 0; + +kern_return_t PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler) +{ + pmi_handler = handler; + + return KERN_SUCCESS; } +void PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable) +{ + IOCPU *targetCPU = (IOCPU*)target; + + if (targetCPU == nullptr) { + return; + } + + if (enable) { + targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0); + targetCPU->getProvider()->enableInterrupt(1); + } else { + targetCPU->getProvider()->disableInterrupt(1); + } +} +#endif + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define super IOService @@ -285,96 +456,126 @@ OSMetaClassDefineReservedUnused(IOCPU, 7); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static OSArray *gIOCPUs; -static const OSSymbol *gIOCPUStateKey; -static OSString *gIOCPUStateNames[kIOCPUStateCount]; - void IOCPUSleepKernel(void) { long cnt, numCPUs; IOCPU *target; - + IOCPU *bootCPU = NULL; + IOPMrootDomain *rootDomain = IOService::getPMRootDomain(); + kprintf("IOCPUSleepKernel\n"); +#if defined(__arm64__) + sched_override_recommended_cores_for_sleep(); +#endif - OSIterator * iter; - IOService * service; + IORegistryIterator * iter; + OSOrderedSet * all; + IOService * service; - queue_init(&gIOSleepActionQueue); - queue_init(&gIOWakeActionQueue); + rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions ); iter = IORegistryIterator::iterateOver( gIOServicePlane, kIORegistryIterateRecursively ); if( iter) { - do + all = 0; + do { - iter->reset(); - while((service = (IOService *) iter->getNextObject())) - { - IOInstallServicePlatformAction(service, gIOPlatformSleepActionKey, &gIOSleepActionQueue, false); - IOInstallServicePlatformAction(service, gIOPlatformWakeActionKey, &gIOWakeActionQueue, true); - IOInstallServicePlatformAction(service, gIOPlatformQuiesceActionKey, iocpu_get_platform_quiesce_queue(), false); - IOInstallServicePlatformAction(service, gIOPlatformActiveActionKey, iocpu_get_platform_active_queue(), true); - } + if (all) + all->release(); + all = iter->iterateAll(); } - while( !service && !iter->isValid()); + while (!iter->isValid()); iter->release(); + + if (all) + { + while((service = (IOService *) all->getFirstObject())) + { + for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) + { + IOInstallServicePlatformAction(service, qidx); + } + all->removeObject(service); + } + all->release(); + } } - iocpu_run_platform_actions(&gIOSleepActionQueue, 0, 0UL-1, - NULL, NULL, NULL); + iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U-1, + NULL, NULL, NULL, TRUE); + + rootDomain->tracePoint( kIOPMTracePointSleepCPUs ); numCPUs = gIOCPUs->getCount(); // Sleep the CPUs. cnt = numCPUs; - while (cnt--) { - target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); - if (target->getCPUState() == kIOCPUStateRunning) { - target->haltCPU(); - } + while (cnt--) + { + target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); + + // We make certain that the bootCPU is the last to sleep + // We'll skip it for now, and halt it after finishing the + // non-boot CPU's. + if (target->getCPUNumber() == (UInt32)master_cpu) + { + bootCPU = target; + } else if (target->getCPUState() == kIOCPUStateRunning) + { + target->haltCPU(); + } } - iocpu_run_platform_actions(&gIOWakeActionQueue, 0, 0UL-1, - NULL, NULL, NULL); + assert(bootCPU != NULL); + assert(cpu_number() == master_cpu); + + console_suspend(); + + rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver ); + rootDomain->stop_watchdog_timer(); + + // Now sleep the boot CPU. + bootCPU->haltCPU(); + + rootDomain->start_watchdog_timer(); + rootDomain->tracePoint( kIOPMTracePointWakePlatformActions ); + + console_resume(); + + iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U-1, + NULL, NULL, NULL, TRUE); iocpu_platform_action_entry_t * entry; - while ((entry = gIOAllActionsQueue)) + for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) { - gIOAllActionsQueue = entry->alloc_list; - iocpu_remove_platform_action(entry); - IODelete(entry, iocpu_platform_action_entry_t, 1); + while (!(queue_empty(&gActionQueues[qidx]))) + { + entry = (typeof(entry)) queue_first(&gActionQueues[qidx]); + iocpu_remove_platform_action(entry); + IODelete(entry, iocpu_platform_action_entry_t, 1); + } } - if (!queue_empty(&gIOSleepActionQueue)) - IOPanic("gIOSleepActionQueue"); - if (!queue_empty(&gIOWakeActionQueue)) - IOPanic("gIOWakeActionQueue"); - + rootDomain->tracePoint( kIOPMTracePointWakeCPUs ); + // Wake the other CPUs. - for (cnt = 1; cnt < numCPUs; cnt++) { - target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); - if (target->getCPUState() == kIOCPUStateStopped) { - processor_start(target->getMachProcessor()); - } + for (cnt = 0; cnt < numCPUs; cnt++) + { + target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); + + // Skip the already-woken boot CPU. + if (target->getCPUNumber() != (UInt32)master_cpu) { + if (target->getCPUState() == kIOCPUStateRunning) + panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber())); + + if (target->getCPUState() == kIOCPUStateStopped) + processor_start(target->getMachProcessor()); + } } -} -void IOCPU::initCPUs(void) -{ - if (gIOCPUs == 0) { - gIOCPUs = OSArray::withCapacity(1); - - gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState"); - - gIOCPUStateNames[kIOCPUStateUnregistered] = - OSString::withCStringNoCopy("Unregistered"); - gIOCPUStateNames[kIOCPUStateUninitalized] = - OSString::withCStringNoCopy("Uninitalized"); - gIOCPUStateNames[kIOCPUStateStopped] = - OSString::withCStringNoCopy("Stopped"); - gIOCPUStateNames[kIOCPUStateRunning] = - OSString::withCStringNoCopy("Running"); - } +#if defined(__arm64__) + sched_restore_recommended_cores_after_sleep(); +#endif } bool IOCPU::start(IOService *provider) @@ -383,13 +584,13 @@ bool IOCPU::start(IOService *provider) if (!super::start(provider)) return false; - initCPUs(); - _cpuGroup = gIOCPUs; cpuNub = provider; + IOLockLock(gIOCPUsLock); gIOCPUs->setObject(this); - + IOLockUnlock(gIOCPUsLock); + // Correct the bus, cpu and timebase frequencies in the device tree. if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) { busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); @@ -411,7 +612,7 @@ bool IOCPU::start(IOService *provider) provider->setProperty("timebase-frequency", timebaseFrequency); timebaseFrequency->release(); - super::setProperty("IOCPUID", (UInt32)this, 32); + super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t)*8); setCPUNumber(0); setCPUState(kIOCPUStateUnregistered); @@ -428,31 +629,10 @@ OSObject *IOCPU::getProperty(const OSSymbol *aKey) const bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject) { - OSString *stateStr; - if (aKey == gIOCPUStateKey) { - stateStr = OSDynamicCast(OSString, anObject); - if (stateStr == 0) return false; - - if (_cpuNumber == 0) return false; - - if (stateStr->isEqualTo("running")) { - if (_cpuState == kIOCPUStateStopped) { - processor_start(machProcessor); - } else if (_cpuState != kIOCPUStateRunning) { - return false; - } - } else if (stateStr->isEqualTo("stopped")) { - if (_cpuState == kIOCPUStateRunning) { - haltCPU(); - } else if (_cpuState != kIOCPUStateStopped) { - return false; - } - } else return false; - - return true; + return false; } - + return super::setProperty(aKey, anObject); } @@ -460,6 +640,7 @@ bool IOCPU::serializeProperties(OSSerialize *serialize) const { bool result; OSDictionary *dict = dictionaryWithProperties(); + if (!dict) return false; dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]); result = dict->serialize(serialize); dict->release(); @@ -491,6 +672,20 @@ void IOCPU::signalCPU(IOCPU */*target*/) { } +void IOCPU::signalCPUDeferred(IOCPU *target) +{ + // Our CPU may not support deferred IPIs, + // so send a regular IPI by default + signalCPU(target); +} + +void IOCPU::signalCPUCancel(IOCPU */*target*/) +{ + // Meant to cancel signals sent by + // signalCPUDeferred; unsupported + // by default +} + void IOCPU::enableCPUTimeBase(bool /*enable*/) { } @@ -541,7 +736,6 @@ processor_t IOCPU::getMachProcessor(void) OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController); -OSMetaClassDefineReservedUnused(IOCPUInterruptController, 0); OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1); OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2); OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3); @@ -552,28 +746,29 @@ OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - IOReturn IOCPUInterruptController::initCPUInterruptController(int sources) +{ + return initCPUInterruptController(sources, sources); +} + +IOReturn IOCPUInterruptController::initCPUInterruptController(int sources, int cpus) { int cnt; if (!super::init()) return kIOReturnInvalid; - - numCPUs = sources; - - cpus = (IOCPU **)IOMalloc(numCPUs * sizeof(IOCPU *)); - if (cpus == 0) return kIOReturnNoMemory; - bzero(cpus, numCPUs * sizeof(IOCPU *)); - - vectors = (IOInterruptVector *)IOMalloc(numCPUs * sizeof(IOInterruptVector)); + + numSources = sources; + numCPUs = cpus; + + vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector)); if (vectors == 0) return kIOReturnNoMemory; - bzero(vectors, numCPUs * sizeof(IOInterruptVector)); - - // Allocate locks for the - for (cnt = 0; cnt < numCPUs; cnt++) { + bzero(vectors, numSources * sizeof(IOInterruptVector)); + + // Allocate a lock for each vector + for (cnt = 0; cnt < numSources; cnt++) { vectors[cnt].interruptLock = IOLockAlloc(); if (vectors[cnt].interruptLock == NULL) { - for (cnt = 0; cnt < numCPUs; cnt++) { + for (cnt = 0; cnt < numSources; cnt++) { if (vectors[cnt].interruptLock != NULL) IOLockFree(vectors[cnt].interruptLock); } @@ -581,7 +776,20 @@ IOReturn IOCPUInterruptController::initCPUInterruptController(int sources) } } - ml_init_max_cpus(numCPUs); + ml_init_max_cpus(numSources); + +#if KPERF + /* + * kperf allocates based on the number of CPUs and requires them to all be + * accounted for. + */ + boolean_t found_kperf = FALSE; + char kperf_config_str[64]; + found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str)); + if (found_kperf && kperf_config_str[0] != '\0') { + kperf_kernel_configure(kperf_config_str); + } +#endif return kIOReturnSuccess; } @@ -607,8 +815,8 @@ void IOCPUInterruptController::setCPUInterruptProperties(IOService *service) return; // Create the interrupt specifer array. - specifier = OSArray::withCapacity(numCPUs); - for (cnt = 0; cnt < numCPUs; cnt++) { + specifier = OSArray::withCapacity(numSources); + for (cnt = 0; cnt < numSources; cnt++) { tmpLong = cnt; tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); specifier->setObject(tmpData); @@ -616,8 +824,8 @@ void IOCPUInterruptController::setCPUInterruptProperties(IOService *service) }; // Create the interrupt controller array. - controller = OSArray::withCapacity(numCPUs); - for (cnt = 0; cnt < numCPUs; cnt++) { + controller = OSArray::withCapacity(numSources); + for (cnt = 0; cnt < numSources; cnt++) { controller->setObject(gPlatformInterruptControllerName); } @@ -633,11 +841,18 @@ void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu) IOInterruptHandler handler = OSMemberFunctionCast( IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt); + assert(numCPUs > 0); + ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, 0); - - enabledCPUs++; - - if (enabledCPUs == numCPUs) thread_wakeup(this); + + IOTakeLock(vectors[0].interruptLock); + ++enabledCPUs; + + if (enabledCPUs == numCPUs) { + IOService::cpusRunning(); + thread_wakeup(this); + } + IOUnlock(vectors[0].interruptLock); } IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub, @@ -647,39 +862,42 @@ IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub, void *refCon) { IOInterruptVector *vector; - - if (source >= numCPUs) return kIOReturnNoResources; - + + if (source >= numSources) return kIOReturnNoResources; + vector = &vectors[source]; - + // Get the lock for this vector. IOTakeLock(vector->interruptLock); - + // Make sure the vector is not in use. if (vector->interruptRegistered) { IOUnlock(vector->interruptLock); return kIOReturnNoResources; } - + // Fill in vector with the client's info. vector->handler = handler; vector->nub = nub; vector->source = source; vector->target = target; vector->refCon = refCon; - + // Get the vector ready. It starts hard disabled. vector->interruptDisabledHard = 1; vector->interruptDisabledSoft = 1; vector->interruptRegistered = 1; - + IOUnlock(vector->interruptLock); - + + IOTakeLock(vectors[0].interruptLock); if (enabledCPUs != numCPUs) { assert_wait(this, THREAD_UNINT); + IOUnlock(vectors[0].interruptLock); thread_block(THREAD_CONTINUE_NULL); - } - + } else + IOUnlock(vectors[0].interruptLock); + return kIOReturnSuccess; }