X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d1ecb069dfe24481e4a83f44cb5217a2b06746d7..c6bf4f310a33a9262d455ea4d3f0630b1255e3fe:/iokit/Kernel/IOCPU.cpp diff --git a/iokit/Kernel/IOCPU.cpp b/iokit/Kernel/IOCPU.cpp index 7646d2a97..84b9cedec 100644 --- a/iokit/Kernel/IOCPU.cpp +++ b/iokit/Kernel/IOCPU.cpp @@ -1,8 +1,8 @@ /* - * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,260 +22,440 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved. - * - * DRI: Josh de Cesare * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ extern "C" { #include #include +#include +extern void kperf_kernel_configure(char *); } -#include - #include #include #include +#include #include #include #include +#include "IOKitKernelInternal.h" /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include +#include -typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority, - void * param1, void * param2, void * param3); +extern "C" void console_suspend(); +extern "C" void console_resume(); +extern "C" void sched_override_recommended_cores_for_sleep(void); +extern "C" void sched_restore_recommended_cores_after_sleep(void); -struct iocpu_platform_action_entry -{ - queue_chain_t link; - iocpu_platform_action_t action; - int32_t priority; - void * refcon0; - void * refcon1; - struct iocpu_platform_action_entry * alloc_list; +typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority, + void * param1, void * param2, void * param3, + const char * name); + +struct iocpu_platform_action_entry { + queue_chain_t link; + iocpu_platform_action_t action; + int32_t priority; + const char * name; + void * refcon0; + void * refcon1; + boolean_t callout_in_progress; + struct iocpu_platform_action_entry * alloc_list; }; typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t; -queue_head_t * -iocpu_get_platform_quiesce_queue(void); +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static IOLock *gIOCPUsLock; +static OSArray *gIOCPUs; +static const OSSymbol *gIOCPUStateKey; +static OSString *gIOCPUStateNames[kIOCPUStateCount]; -queue_head_t * -iocpu_get_platform_active_queue(void); +enum{ + kQueueSleep = 0, + kQueueWake = 1, + kQueueQuiesce = 2, + kQueueActive = 3, + kQueueHaltRestart = 4, + kQueuePanic = 5, + kQueueCount = 6 +}; -void -iocpu_platform_cpu_action_init(queue_head_t * quiesce_queue, queue_head_t * init_queue); +const OSSymbol * gIOPlatformSleepActionKey; +const OSSymbol * gIOPlatformWakeActionKey; +const OSSymbol * gIOPlatformQuiesceActionKey; +const OSSymbol * gIOPlatformActiveActionKey; +const OSSymbol * gIOPlatformHaltRestartActionKey; +const OSSymbol * gIOPlatformPanicActionKey; -void -iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry); +static queue_head_t gActionQueues[kQueueCount]; +static const OSSymbol * gActionSymbols[kQueueCount]; -void -iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry); +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -kern_return_t -iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority, - void * param1, void * param2, void * param3); +static void +iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry) +{ + iocpu_platform_action_entry_t * next; -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) + { + if (next->priority > entry->priority) { + queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link); + return; + } + } + queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail +} -#define kBootCPUNumber 0 +static void +iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry) +{ + remque(&entry->link); +} -static iocpu_platform_action_entry_t * gIOAllActionsQueue; -static queue_head_t gIOSleepActionQueue; -static queue_head_t gIOWakeActionQueue; +static kern_return_t +iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority, + void * param1, void * param2, void * param3, boolean_t allow_nested_callouts) +{ + kern_return_t ret = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; + iocpu_platform_action_entry_t * next; -static queue_head_t iocpu_quiesce_queue; -static queue_head_t iocpu_active_queue; + queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) + { + uint32_t pri = (next->priority < 0) ? -next->priority : next->priority; + if ((pri >= first_priority) && (pri <= last_priority)) { + //kprintf("[%p]", next->action); + if (!allow_nested_callouts && !next->callout_in_progress) { + next->callout_in_progress = TRUE; + ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); + next->callout_in_progress = FALSE; + } else if (allow_nested_callouts) { + ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); + } + } + if (KERN_SUCCESS == result) { + result = ret; + } + } + return result; +} /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void -iocpu_platform_cpu_action_init(queue_head_t * quiesce_queue, __unused queue_head_t * init_queue) +extern "C" kern_return_t +IOCPURunPlatformQuiesceActions(void) { -#if 0 - enum { kNumQuiesceActions = 2 }; - static iocpu_platform_action_entry_t quiesce_actions[kNumQuiesceActions] = - { - { { NULL, NULL }, (iocpu_platform_action_t) &clean_mmu_dcache, 97000, 0, 0, NULL }, - { { NULL, NULL }, (iocpu_platform_action_t) &arm_sleep, 99000, 0, 0, NULL }, - }; - unsigned int idx; - - for (idx = 0; idx < kNumQuiesceActions; idx++) - iocpu_add_platform_action(quiesce_queue, &quiesce_actions[idx]); -#endif + assert(preemption_enabled() == false); + return iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U - 1, + NULL, NULL, NULL, TRUE); } -queue_head_t * iocpu_get_platform_quiesce_queue(void) +extern "C" kern_return_t +IOCPURunPlatformActiveActions(void) { - if (!iocpu_quiesce_queue.next) - { - queue_init(&iocpu_quiesce_queue); - queue_init(&iocpu_active_queue); - iocpu_platform_cpu_action_init(&iocpu_quiesce_queue, &iocpu_active_queue); - } - return (&iocpu_quiesce_queue); + assert(preemption_enabled() == false); + return iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U - 1, + NULL, NULL, NULL, TRUE); } -queue_head_t * iocpu_get_platform_active_queue(void) +extern "C" kern_return_t +IOCPURunPlatformHaltRestartActions(uint32_t message) { - if (!iocpu_active_queue.next) - { - queue_init(&iocpu_quiesce_queue); - queue_init(&iocpu_active_queue); - iocpu_platform_cpu_action_init(&iocpu_quiesce_queue, &iocpu_active_queue); - } - return (&iocpu_active_queue); + if (!gActionQueues[kQueueHaltRestart].next) { + return kIOReturnNotReady; + } + return iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U - 1, + (void *)(uintptr_t) message, NULL, NULL, TRUE); } -void iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry) +extern "C" kern_return_t +IOCPURunPlatformPanicActions(uint32_t message) { - iocpu_platform_action_entry_t * next; + // Don't allow nested calls of panic actions + if (!gActionQueues[kQueuePanic].next) { + return kIOReturnNotReady; + } + return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1, + (void *)(uintptr_t) message, NULL, NULL, FALSE); +} - queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) - { - if (next->priority > entry->priority) - { - queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link); - return; + +extern "C" kern_return_t +IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len) +{ + PE_panic_save_context_t context = { + .psc_buffer = addr, + .psc_offset = offset, + .psc_length = len + }; + + // Don't allow nested calls of panic actions + if (!gActionQueues[kQueuePanic].next) { + return kIOReturnNotReady; } - } - queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail + return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1, + (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE); } -void iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry) +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static kern_return_t +IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority, + void * param1, void * param2, void * param3, + const char * service_name) { - remque(&entry->link); + IOReturn ret; + IOService * service = (IOService *) refcon0; + const OSSymbol * function = (const OSSymbol *) refcon1; + + kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name); + + ret = service->callPlatformFunction(function, false, + (void *)(uintptr_t) priority, param1, param2, param3); + + return ret; } -kern_return_t -iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority, - void * param1, void * param2, void * param3) +static void +IOInstallServicePlatformAction(IOService * service, uint32_t qidx) { - kern_return_t ret = KERN_SUCCESS; - kern_return_t result = KERN_SUCCESS; - iocpu_platform_action_entry_t * next; + iocpu_platform_action_entry_t * entry; + OSNumber * num; + uint32_t priority; + const OSSymbol * key = gActionSymbols[qidx]; + queue_head_t * queue = &gActionQueues[qidx]; + bool reverse; + bool uniq; + + num = OSDynamicCast(OSNumber, service->getProperty(key)); + if (!num) { + return; + } - queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) - { - uint32_t pri = (next->priority < 0) ? -next->priority : next->priority; - if ((pri >= first_priority) && (pri <= last_priority)) - { - //kprintf("[%p]", next->action); - ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3); + reverse = false; + uniq = false; + switch (qidx) { + case kQueueWake: + case kQueueActive: + reverse = true; + break; + case kQueueHaltRestart: + case kQueuePanic: + uniq = true; + break; + } + if (uniq) { + queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link) + { + if (service == entry->refcon0) { + return; + } + } + } + + entry = IONew(iocpu_platform_action_entry_t, 1); + entry->action = &IOServicePlatformAction; + entry->name = service->getName(); + priority = num->unsigned32BitValue(); + if (reverse) { + entry->priority = -priority; + } else { + entry->priority = priority; } - if (KERN_SUCCESS == result) - result = ret; - } - return (result); + entry->refcon0 = service; + entry->refcon1 = (void *) key; + entry->callout_in_progress = FALSE; + + iocpu_add_platform_action(queue, entry); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -extern "C" kern_return_t -IOCPURunPlatformQuiesceActions(void) +void +IOCPUInitialize(void) { - return (iocpu_run_platform_actions(iocpu_get_platform_quiesce_queue(), 0, 0U-1, - NULL, NULL, NULL)); + gIOCPUsLock = IOLockAlloc(); + gIOCPUs = OSArray::withCapacity(1); + + for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) { + queue_init(&gActionQueues[qidx]); + } + + gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState"); + + gIOCPUStateNames[kIOCPUStateUnregistered] = + OSString::withCStringNoCopy("Unregistered"); + gIOCPUStateNames[kIOCPUStateUninitalized] = + OSString::withCStringNoCopy("Uninitalized"); + gIOCPUStateNames[kIOCPUStateStopped] = + OSString::withCStringNoCopy("Stopped"); + gIOCPUStateNames[kIOCPUStateRunning] = + OSString::withCStringNoCopy("Running"); + + gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep] + = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey); + gIOPlatformWakeActionKey = gActionSymbols[kQueueWake] + = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey); + gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce] + = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey); + gIOPlatformActiveActionKey = gActionSymbols[kQueueActive] + = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey); + gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart] + = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey); + gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic] + = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey); } -extern "C" kern_return_t -IOCPURunPlatformActiveActions(void) +IOReturn +IOInstallServicePlatformActions(IOService * service) { - return (iocpu_run_platform_actions(iocpu_get_platform_active_queue(), 0, 0U-1, - NULL, NULL, NULL)); + IOLockLock(gIOCPUsLock); + + IOInstallServicePlatformAction(service, kQueueHaltRestart); + IOInstallServicePlatformAction(service, kQueuePanic); + + IOLockUnlock(gIOCPUsLock); + + return kIOReturnSuccess; } -static kern_return_t -IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority, - void * param1, void * param2, void * param3) +IOReturn +IORemoveServicePlatformActions(IOService * service) { - IOReturn ret; - IOService * service = (IOService *) refcon0; - const OSSymbol * function = (const OSSymbol *) refcon1; - - kprintf("%s -> %s\n", function->getCStringNoCopy(), service->getName()); + iocpu_platform_action_entry_t * entry; + iocpu_platform_action_entry_t * next; + + IOLockLock(gIOCPUsLock); + + for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) { + next = (typeof(entry))queue_first(&gActionQueues[qidx]); + while (!queue_end(&gActionQueues[qidx], &next->link)) { + entry = next; + next = (typeof(entry))queue_next(&entry->link); + if (service == entry->refcon0) { + iocpu_remove_platform_action(entry); + IODelete(entry, iocpu_platform_action_entry_t, 1); + } + } + } - ret = service->callPlatformFunction(function, false, - (void *) priority, param1, param2, param3); + IOLockUnlock(gIOCPUsLock); - return (ret); + return kIOReturnSuccess; } -static void -IOInstallServicePlatformAction(IOService * service, - const OSSymbol * key, queue_head_t * queue, - bool reverse) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +kern_return_t +PE_cpu_start(cpu_id_t target, + vm_offset_t start_paddr, vm_offset_t arg_paddr) { - OSNumber * num; - iocpu_platform_action_entry_t * entry; - uint32_t priority; + IOCPU *targetCPU = (IOCPU *)target; - num = OSDynamicCast(OSNumber, service->getProperty(key)); - if (!num) - return; + if (targetCPU == NULL) { + return KERN_FAILURE; + } + return targetCPU->startCPU(start_paddr, arg_paddr); +} - entry = IONew(iocpu_platform_action_entry_t, 1); - entry->action = &IOServicePlatformAction; - priority = num->unsigned32BitValue(); - if (reverse) - entry->priority = -priority; - else - entry->priority = priority; - entry->refcon0 = service; - entry->refcon1 = (void *) key; +void +PE_cpu_halt(cpu_id_t target) +{ + IOCPU *targetCPU = (IOCPU *)target; - iocpu_add_platform_action(queue, entry); - entry->alloc_list = gIOAllActionsQueue; - gIOAllActionsQueue = entry; + targetCPU->haltCPU(); } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +void +PE_cpu_signal(cpu_id_t source, cpu_id_t target) +{ + IOCPU *sourceCPU = (IOCPU *)source; + IOCPU *targetCPU = (IOCPU *)target; + + sourceCPU->signalCPU(targetCPU); +} -kern_return_t PE_cpu_start(cpu_id_t target, - vm_offset_t start_paddr, vm_offset_t arg_paddr) +void +PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target) { - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); - - if (targetCPU == 0) return KERN_FAILURE; - return targetCPU->startCPU(start_paddr, arg_paddr); + IOCPU *sourceCPU = (IOCPU *)source; + IOCPU *targetCPU = (IOCPU *)target; + + sourceCPU->signalCPUDeferred(targetCPU); } -void PE_cpu_halt(cpu_id_t target) +void +PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target) { - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); - - if (targetCPU) targetCPU->haltCPU(); + IOCPU *sourceCPU = (IOCPU *)source; + IOCPU *targetCPU = (IOCPU *)target; + + sourceCPU->signalCPUCancel(targetCPU); } -void PE_cpu_signal(cpu_id_t source, cpu_id_t target) +void +PE_cpu_machine_init(cpu_id_t target, boolean_t bootb) { - IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source); - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); - - if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU); + IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + + if (targetCPU == NULL) { + panic("%s: invalid target CPU %p", __func__, target); + } + + targetCPU->initCPU(bootb); +#if defined(__arm__) || defined(__arm64__) + if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) { + ml_set_is_quiescing(false); + } +#endif /* defined(__arm__) || defined(__arm64__) */ } -void PE_cpu_machine_init(cpu_id_t target, boolean_t bootb) +void +PE_cpu_machine_quiesce(cpu_id_t target) { - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); - - if (targetCPU) targetCPU->initCPU(bootb); + IOCPU *targetCPU = (IOCPU*)target; +#if defined(__arm__) || defined(__arm64__) + if (targetCPU->getCPUNumber() == (UInt32)master_cpu) { + ml_set_is_quiescing(true); + } +#endif /* defined(__arm__) || defined(__arm64__) */ + targetCPU->quiesceCPU(); } -void PE_cpu_machine_quiesce(cpu_id_t target) +#if defined(__arm__) || defined(__arm64__) +static perfmon_interrupt_handler_func pmi_handler = NULL; + +kern_return_t +PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler) { - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + pmi_handler = handler; - if (targetCPU) targetCPU->quiesceCPU(); + return KERN_SUCCESS; } +void +PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable) +{ + IOCPU *targetCPU = (IOCPU*)target; + + if (targetCPU == nullptr) { + return; + } + + if (enable) { + targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, NULL); + targetCPU->getProvider()->enableInterrupt(1); + } else { + targetCPU->getProvider()->disableInterrupt(1); + } +} +#endif /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -293,271 +473,341 @@ OSMetaClassDefineReservedUnused(IOCPU, 7); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static OSArray *gIOCPUs; -static const OSSymbol *gIOCPUStateKey; -static OSString *gIOCPUStateNames[kIOCPUStateCount]; +void +IOCPUSleepKernel(void) +{ +#if defined(__x86_64__) + extern IOCPU *currentShutdownTarget; +#endif + long cnt, numCPUs; + IOCPU *target; + IOCPU *bootCPU = NULL; + IOPMrootDomain *rootDomain = IOService::getPMRootDomain(); + + kprintf("IOCPUSleepKernel\n"); +#if defined(__arm64__) + sched_override_recommended_cores_for_sleep(); +#endif + + IORegistryIterator * iter; + OSOrderedSet * all; + IOService * service; + + rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions ); + + iter = IORegistryIterator::iterateOver( gIOServicePlane, + kIORegistryIterateRecursively ); + if (iter) { + all = NULL; + do{ + if (all) { + all->release(); + } + all = iter->iterateAll(); + }while (!iter->isValid()); + iter->release(); + + if (all) { + while ((service = (IOService *) all->getFirstObject())) { + for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) { + IOInstallServicePlatformAction(service, qidx); + } + all->removeObject(service); + } + all->release(); + } + } + + iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U - 1, + NULL, NULL, NULL, TRUE); + + rootDomain->tracePoint( kIOPMTracePointSleepCPUs ); + + numCPUs = gIOCPUs->getCount(); +#if defined(__x86_64__) + currentShutdownTarget = NULL; +#endif + + integer_t old_pri; + thread_t self = current_thread(); + + /* + * We need to boost this thread's priority to the maximum kernel priority to + * ensure we can urgently preempt ANY thread currently executing on the + * target CPU. Note that realtime threads have their own mechanism to eventually + * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long. + */ + old_pri = thread_kern_get_pri(self); + thread_kern_set_pri(self, thread_kern_get_kernel_maxpri()); + + // Sleep the CPUs. + cnt = numCPUs; + while (cnt--) { + target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); + + // We make certain that the bootCPU is the last to sleep + // We'll skip it for now, and halt it after finishing the + // non-boot CPU's. + if (target->getCPUNumber() == (UInt32)master_cpu) { + bootCPU = target; + } else if (target->getCPUState() == kIOCPUStateRunning) { +#if defined(__x86_64__) + currentShutdownTarget = target; +#endif + target->haltCPU(); + } + } -void IOCPUSleepKernel(void) + assert(bootCPU != NULL); + assert(cpu_number() == master_cpu); + + console_suspend(); + + rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver ); + rootDomain->stop_watchdog_timer(); + + /* + * Now sleep the boot CPU, including calling the kQueueQuiesce actions. + * The system sleeps here. + */ + + bootCPU->haltCPU(); + + /* + * The system is now coming back from sleep on the boot CPU. + * The kQueueActive actions have already been called. + */ + + rootDomain->start_watchdog_timer(); + rootDomain->tracePoint( kIOPMTracePointWakePlatformActions ); + + console_resume(); + + iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U - 1, + NULL, NULL, NULL, TRUE); + + iocpu_platform_action_entry_t * entry; + for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) { + while (!(queue_empty(&gActionQueues[qidx]))) { + entry = (typeof(entry))queue_first(&gActionQueues[qidx]); + iocpu_remove_platform_action(entry); + IODelete(entry, iocpu_platform_action_entry_t, 1); + } + } + + rootDomain->tracePoint( kIOPMTracePointWakeCPUs ); + + // Wake the other CPUs. + for (cnt = 0; cnt < numCPUs; cnt++) { + target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); + + // Skip the already-woken boot CPU. + if (target->getCPUNumber() != (UInt32)master_cpu) { + if (target->getCPUState() == kIOCPUStateRunning) { + panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber())); + } + + if (target->getCPUState() == kIOCPUStateStopped) { + processor_start(target->getMachProcessor()); + } + } + } + +#if defined(__arm64__) + sched_restore_recommended_cores_after_sleep(); +#endif + + thread_kern_set_pri(self, old_pri); +} + +bool +IOCPU::start(IOService *provider) { - long cnt, numCPUs; - IOCPU *target; - IOCPU *bootCPU = NULL; - - kprintf("IOCPUSleepKernel\n"); + OSData *busFrequency, *cpuFrequency, *timebaseFrequency; - OSIterator * iter; - IOService * service; + if (!super::start(provider)) { + return false; + } - queue_init(&gIOSleepActionQueue); - queue_init(&gIOWakeActionQueue); + _cpuGroup = gIOCPUs; + cpuNub = provider; - iter = IORegistryIterator::iterateOver( gIOServicePlane, - kIORegistryIterateRecursively ); - if( iter) - { - do - { - iter->reset(); - while((service = (IOService *) iter->getNextObject())) - { - IOInstallServicePlatformAction(service, gIOPlatformSleepActionKey, &gIOSleepActionQueue, false); - IOInstallServicePlatformAction(service, gIOPlatformWakeActionKey, &gIOWakeActionQueue, true); - IOInstallServicePlatformAction(service, gIOPlatformQuiesceActionKey, iocpu_get_platform_quiesce_queue(), false); - IOInstallServicePlatformAction(service, gIOPlatformActiveActionKey, iocpu_get_platform_active_queue(), true); - } - } - while( !service && !iter->isValid()); - iter->release(); - } - - iocpu_run_platform_actions(&gIOSleepActionQueue, 0, 0U-1, - NULL, NULL, NULL); - - numCPUs = gIOCPUs->getCount(); - // Sleep the CPUs. - cnt = numCPUs; - while (cnt--) - { - target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); - - // We make certain that the bootCPU is the last to sleep - // We'll skip it for now, and halt it after finishing the - // non-boot CPU's. - if (target->getCPUNumber() == kBootCPUNumber) - { - bootCPU = target; - } else if (target->getCPUState() == kIOCPUStateRunning) - { - target->haltCPU(); - } - } - - // Now sleep the boot CPU. - if (bootCPU) - bootCPU->haltCPU(); - - iocpu_run_platform_actions(&gIOWakeActionQueue, 0, 0U-1, - NULL, NULL, NULL); - - iocpu_platform_action_entry_t * entry; - while ((entry = gIOAllActionsQueue)) - { - gIOAllActionsQueue = entry->alloc_list; - iocpu_remove_platform_action(entry); - IODelete(entry, iocpu_platform_action_entry_t, 1); - } - - if (!queue_empty(&gIOSleepActionQueue)) - panic("gIOSleepActionQueue"); - if (!queue_empty(&gIOWakeActionQueue)) - panic("gIOWakeActionQueue"); - - // Wake the other CPUs. - for (cnt = 0; cnt < numCPUs; cnt++) - { - target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); - - // Skip the already-woken boot CPU. - if ((target->getCPUNumber() != kBootCPUNumber) - && (target->getCPUState() == kIOCPUStateStopped)) - { - processor_start(target->getMachProcessor()); - } - } -} - -void IOCPU::initCPUs(void) -{ - if (gIOCPUs == 0) { - gIOCPUs = OSArray::withCapacity(1); - - gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState"); - - gIOCPUStateNames[kIOCPUStateUnregistered] = - OSString::withCStringNoCopy("Unregistered"); - gIOCPUStateNames[kIOCPUStateUninitalized] = - OSString::withCStringNoCopy("Uninitalized"); - gIOCPUStateNames[kIOCPUStateStopped] = - OSString::withCStringNoCopy("Stopped"); - gIOCPUStateNames[kIOCPUStateRunning] = - OSString::withCStringNoCopy("Running"); - } -} - -bool IOCPU::start(IOService *provider) -{ - OSData *busFrequency, *cpuFrequency, *timebaseFrequency; - - if (!super::start(provider)) return false; - - initCPUs(); - - _cpuGroup = gIOCPUs; - cpuNub = provider; - - gIOCPUs->setObject(this); - - // Correct the bus, cpu and timebase frequencies in the device tree. - if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) { - busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); - } else { - busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8); - } - provider->setProperty("bus-frequency", busFrequency); - busFrequency->release(); - - if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) { - cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4); - } else { - cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8); - } - provider->setProperty("clock-frequency", cpuFrequency); - cpuFrequency->release(); - - timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4); - provider->setProperty("timebase-frequency", timebaseFrequency); - timebaseFrequency->release(); - - super::setProperty("IOCPUID", (uintptr_t)this, sizeof(uintptr_t)*8); - - setCPUNumber(0); - setCPUState(kIOCPUStateUnregistered); - - return true; -} - -OSObject *IOCPU::getProperty(const OSSymbol *aKey) const -{ - if (aKey == gIOCPUStateKey) return gIOCPUStateNames[_cpuState]; - - return super::getProperty(aKey); -} - -bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject) -{ - OSString *stateStr; - - if (aKey == gIOCPUStateKey) { - stateStr = OSDynamicCast(OSString, anObject); - if (stateStr == 0) return false; - - if (_cpuNumber == 0) return false; - - if (stateStr->isEqualTo("running")) { - if (_cpuState == kIOCPUStateStopped) { - processor_start(machProcessor); - } else if (_cpuState != kIOCPUStateRunning) { - return false; - } - } else if (stateStr->isEqualTo("stopped")) { - if (_cpuState == kIOCPUStateRunning) { - haltCPU(); - } else if (_cpuState != kIOCPUStateStopped) { - return false; - } - } else return false; - - return true; - } - - return super::setProperty(aKey, anObject); -} - -bool IOCPU::serializeProperties(OSSerialize *serialize) const + IOLockLock(gIOCPUsLock); + gIOCPUs->setObject(this); + IOLockUnlock(gIOCPUsLock); + + // Correct the bus, cpu and timebase frequencies in the device tree. + if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) { + busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); + } else { + busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8); + } + provider->setProperty("bus-frequency", busFrequency); + busFrequency->release(); + + if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) { + cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4); + } else { + cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8); + } + provider->setProperty("clock-frequency", cpuFrequency); + cpuFrequency->release(); + + timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4); + provider->setProperty("timebase-frequency", timebaseFrequency); + timebaseFrequency->release(); + + super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8); + + setCPUNumber(0); + setCPUState(kIOCPUStateUnregistered); + + return true; +} + +void +IOCPU::detach(IOService *provider) +{ + super::detach(provider); + IOLockLock(gIOCPUsLock); + unsigned int index = gIOCPUs->getNextIndexOfObject(this, 0); + if (index != (unsigned int)-1) { + gIOCPUs->removeObject(index); + } + IOLockUnlock(gIOCPUsLock); +} + +OSObject * +IOCPU::getProperty(const OSSymbol *aKey) const +{ + if (aKey == gIOCPUStateKey) { + return gIOCPUStateNames[_cpuState]; + } + + return super::getProperty(aKey); +} + +bool +IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject) +{ + if (aKey == gIOCPUStateKey) { + return false; + } + + return super::setProperty(aKey, anObject); +} + +bool +IOCPU::serializeProperties(OSSerialize *serialize) const { bool result; OSDictionary *dict = dictionaryWithProperties(); + if (!dict) { + return false; + } dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]); result = dict->serialize(serialize); - dict->release(); + dict->release(); return result; } -IOReturn IOCPU::setProperties(OSObject *properties) +IOReturn +IOCPU::setProperties(OSObject *properties) { - OSDictionary *dict = OSDynamicCast(OSDictionary, properties); - OSString *stateStr; - IOReturn result; - - if (dict == 0) return kIOReturnUnsupported; - - stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey)); - if (stateStr != 0) { - result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); - if (result != kIOReturnSuccess) return result; - - if (setProperty(gIOCPUStateKey, stateStr)) return kIOReturnSuccess; - - return kIOReturnUnsupported; - } - - return kIOReturnUnsupported; + OSDictionary *dict = OSDynamicCast(OSDictionary, properties); + OSString *stateStr; + IOReturn result; + + if (dict == NULL) { + return kIOReturnUnsupported; + } + + stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey)); + if (stateStr != NULL) { + result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); + if (result != kIOReturnSuccess) { + return result; + } + + if (setProperty(gIOCPUStateKey, stateStr)) { + return kIOReturnSuccess; + } + + return kIOReturnUnsupported; + } + + return kIOReturnUnsupported; +} + +void +IOCPU::signalCPU(IOCPU */*target*/) +{ +} + +void +IOCPU::signalCPUDeferred(IOCPU *target) +{ + // Our CPU may not support deferred IPIs, + // so send a regular IPI by default + signalCPU(target); } -void IOCPU::signalCPU(IOCPU */*target*/) +void +IOCPU::signalCPUCancel(IOCPU */*target*/) { + // Meant to cancel signals sent by + // signalCPUDeferred; unsupported + // by default } -void IOCPU::enableCPUTimeBase(bool /*enable*/) +void +IOCPU::enableCPUTimeBase(bool /*enable*/) { } -UInt32 IOCPU::getCPUNumber(void) +UInt32 +IOCPU::getCPUNumber(void) { - return _cpuNumber; + return _cpuNumber; } -void IOCPU::setCPUNumber(UInt32 cpuNumber) +void +IOCPU::setCPUNumber(UInt32 cpuNumber) { - _cpuNumber = cpuNumber; - super::setProperty("IOCPUNumber", _cpuNumber, 32); + _cpuNumber = cpuNumber; + super::setProperty("IOCPUNumber", _cpuNumber, 32); } -UInt32 IOCPU::getCPUState(void) +UInt32 +IOCPU::getCPUState(void) { - return _cpuState; + return _cpuState; } -void IOCPU::setCPUState(UInt32 cpuState) +void +IOCPU::setCPUState(UInt32 cpuState) { - if (cpuState < kIOCPUStateCount) { - _cpuState = cpuState; - } + if (cpuState < kIOCPUStateCount) { + _cpuState = cpuState; + } } -OSArray *IOCPU::getCPUGroup(void) +OSArray * +IOCPU::getCPUGroup(void) { - return _cpuGroup; + return _cpuGroup; } -UInt32 IOCPU::getCPUGroupSize(void) +UInt32 +IOCPU::getCPUGroupSize(void) { - return _cpuGroup->getCount(); + return _cpuGroup->getCount(); } -processor_t IOCPU::getMachProcessor(void) +processor_t +IOCPU::getMachProcessor(void) { - return machProcessor; + return machProcessor; } @@ -568,7 +818,6 @@ processor_t IOCPU::getMachProcessor(void) OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController); -OSMetaClassDefineReservedUnused(IOCPUInterruptController, 0); OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1); OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2); OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3); @@ -579,183 +828,235 @@ OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +IOReturn +IOCPUInterruptController::initCPUInterruptController(int sources) +{ + return initCPUInterruptController(sources, sources); +} + +IOReturn +IOCPUInterruptController::initCPUInterruptController(int sources, int cpus) +{ + int cnt; + + if (!super::init()) { + return kIOReturnInvalid; + } + + numSources = sources; + numCPUs = cpus; + + vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector)); + if (vectors == NULL) { + return kIOReturnNoMemory; + } + bzero(vectors, numSources * sizeof(IOInterruptVector)); + + // Allocate a lock for each vector + for (cnt = 0; cnt < numSources; cnt++) { + vectors[cnt].interruptLock = IOLockAlloc(); + if (vectors[cnt].interruptLock == NULL) { + for (cnt = 0; cnt < numSources; cnt++) { + if (vectors[cnt].interruptLock != NULL) { + IOLockFree(vectors[cnt].interruptLock); + } + } + return kIOReturnNoResources; + } + } + + ml_init_max_cpus(numSources); + +#if KPERF + /* + * kperf allocates based on the number of CPUs and requires them to all be + * accounted for. + */ + boolean_t found_kperf = FALSE; + char kperf_config_str[64]; + found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str)); + if (found_kperf && kperf_config_str[0] != '\0') { + kperf_kernel_configure(kperf_config_str); + } +#endif /* KPERF */ + + return kIOReturnSuccess; +} + +void +IOCPUInterruptController::registerCPUInterruptController(void) +{ + registerService(); + + getPlatform()->registerInterruptController(gPlatformInterruptControllerName, + this); +} + +void +IOCPUInterruptController::setCPUInterruptProperties(IOService *service) +{ + int cnt; + OSArray *controller; + OSArray *specifier; + OSData *tmpData; + long tmpLong; + + if ((service->getProperty(gIOInterruptControllersKey) != NULL) && + (service->getProperty(gIOInterruptSpecifiersKey) != NULL)) { + return; + } + + // Create the interrupt specifer array. + specifier = OSArray::withCapacity(numSources); + for (cnt = 0; cnt < numSources; cnt++) { + tmpLong = cnt; + tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); + specifier->setObject(tmpData); + tmpData->release(); + } + ; + + // Create the interrupt controller array. + controller = OSArray::withCapacity(numSources); + for (cnt = 0; cnt < numSources; cnt++) { + controller->setObject(gPlatformInterruptControllerName); + } + + // Put the two arrays into the property table. + service->setProperty(gIOInterruptControllersKey, controller); + service->setProperty(gIOInterruptSpecifiersKey, specifier); + controller->release(); + specifier->release(); +} -IOReturn IOCPUInterruptController::initCPUInterruptController(int sources) -{ - int cnt; - - if (!super::init()) return kIOReturnInvalid; - - numCPUs = sources; - - cpus = (IOCPU **)IOMalloc(numCPUs * sizeof(IOCPU *)); - if (cpus == 0) return kIOReturnNoMemory; - bzero(cpus, numCPUs * sizeof(IOCPU *)); - - vectors = (IOInterruptVector *)IOMalloc(numCPUs * sizeof(IOInterruptVector)); - if (vectors == 0) return kIOReturnNoMemory; - bzero(vectors, numCPUs * sizeof(IOInterruptVector)); - - // Allocate locks for the - for (cnt = 0; cnt < numCPUs; cnt++) { - vectors[cnt].interruptLock = IOLockAlloc(); - if (vectors[cnt].interruptLock == NULL) { - for (cnt = 0; cnt < numCPUs; cnt++) { - if (vectors[cnt].interruptLock != NULL) - IOLockFree(vectors[cnt].interruptLock); - } - return kIOReturnNoResources; - } - } - - ml_init_max_cpus(numCPUs); - - return kIOReturnSuccess; -} - -void IOCPUInterruptController::registerCPUInterruptController(void) -{ - registerService(); - - getPlatform()->registerInterruptController(gPlatformInterruptControllerName, - this); -} - -void IOCPUInterruptController::setCPUInterruptProperties(IOService *service) -{ - int cnt; - OSArray *controller; - OSArray *specifier; - OSData *tmpData; - long tmpLong; - - if ((service->getProperty(gIOInterruptControllersKey) != 0) && - (service->getProperty(gIOInterruptSpecifiersKey) != 0)) - return; - - // Create the interrupt specifer array. - specifier = OSArray::withCapacity(numCPUs); - for (cnt = 0; cnt < numCPUs; cnt++) { - tmpLong = cnt; - tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); - specifier->setObject(tmpData); - tmpData->release(); - }; - - // Create the interrupt controller array. - controller = OSArray::withCapacity(numCPUs); - for (cnt = 0; cnt < numCPUs; cnt++) { - controller->setObject(gPlatformInterruptControllerName); - } - - // Put the two arrays into the property table. - service->setProperty(gIOInterruptControllersKey, controller); - service->setProperty(gIOInterruptSpecifiersKey, specifier); - controller->release(); - specifier->release(); -} - -void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu) +void +IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu) { IOInterruptHandler handler = OSMemberFunctionCast( IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt); - ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, 0); - - enabledCPUs++; - - if (enabledCPUs == numCPUs) thread_wakeup(this); -} - -IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub, - int source, - void *target, - IOInterruptHandler handler, - void *refCon) -{ - IOInterruptVector *vector; - - if (source >= numCPUs) return kIOReturnNoResources; - - vector = &vectors[source]; - - // Get the lock for this vector. - IOTakeLock(vector->interruptLock); - - // Make sure the vector is not in use. - if (vector->interruptRegistered) { - IOUnlock(vector->interruptLock); - return kIOReturnNoResources; - } - - // Fill in vector with the client's info. - vector->handler = handler; - vector->nub = nub; - vector->source = source; - vector->target = target; - vector->refCon = refCon; - - // Get the vector ready. It starts hard disabled. - vector->interruptDisabledHard = 1; - vector->interruptDisabledSoft = 1; - vector->interruptRegistered = 1; - - IOUnlock(vector->interruptLock); - - if (enabledCPUs != numCPUs) { - assert_wait(this, THREAD_UNINT); - thread_block(THREAD_CONTINUE_NULL); - } - - return kIOReturnSuccess; -} - -IOReturn IOCPUInterruptController::getInterruptType(IOService */*nub*/, - int /*source*/, - int *interruptType) -{ - if (interruptType == 0) return kIOReturnBadArgument; - - *interruptType = kIOInterruptTypeLevel; - - return kIOReturnSuccess; -} - -IOReturn IOCPUInterruptController::enableInterrupt(IOService */*nub*/, - int /*source*/) + assert(numCPUs > 0); + + ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, NULL); + + IOTakeLock(vectors[0].interruptLock); + ++enabledCPUs; + + if (enabledCPUs == numCPUs) { + IOService::cpusRunning(); + thread_wakeup(this); + } + IOUnlock(vectors[0].interruptLock); +} + +IOReturn +IOCPUInterruptController::registerInterrupt(IOService *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) +{ + IOInterruptVector *vector; + + // Interrupts must be enabled, as this can allocate memory. + assert(ml_get_interrupts_enabled() == TRUE); + + if (source >= numSources) { + return kIOReturnNoResources; + } + + vector = &vectors[source]; + + // Get the lock for this vector. + IOTakeLock(vector->interruptLock); + + // Make sure the vector is not in use. + if (vector->interruptRegistered) { + IOUnlock(vector->interruptLock); + return kIOReturnNoResources; + } + + // Fill in vector with the client's info. + vector->handler = handler; + vector->nub = nub; + vector->source = source; + vector->target = target; + vector->refCon = refCon; + + // Get the vector ready. It starts hard disabled. + vector->interruptDisabledHard = 1; + vector->interruptDisabledSoft = 1; + vector->interruptRegistered = 1; + + IOUnlock(vector->interruptLock); + + IOTakeLock(vectors[0].interruptLock); + if (enabledCPUs != numCPUs) { + assert_wait(this, THREAD_UNINT); + IOUnlock(vectors[0].interruptLock); + thread_block(THREAD_CONTINUE_NULL); + } else { + IOUnlock(vectors[0].interruptLock); + } + + return kIOReturnSuccess; +} + +IOReturn +IOCPUInterruptController::getInterruptType(IOService */*nub*/, + int /*source*/, + int *interruptType) +{ + if (interruptType == NULL) { + return kIOReturnBadArgument; + } + + *interruptType = kIOInterruptTypeLevel; + + return kIOReturnSuccess; +} + +IOReturn +IOCPUInterruptController::enableInterrupt(IOService */*nub*/, + int /*source*/) { // ml_set_interrupts_enabled(true); - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOReturn IOCPUInterruptController::disableInterrupt(IOService */*nub*/, - int /*source*/) +IOReturn +IOCPUInterruptController::disableInterrupt(IOService */*nub*/, + int /*source*/) { // ml_set_interrupts_enabled(false); - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOReturn IOCPUInterruptController::causeInterrupt(IOService */*nub*/, - int /*source*/) +IOReturn +IOCPUInterruptController::causeInterrupt(IOService */*nub*/, + int /*source*/) { - ml_cause_interrupt(); - return kIOReturnSuccess; + ml_cause_interrupt(); + return kIOReturnSuccess; } -IOReturn IOCPUInterruptController::handleInterrupt(void */*refCon*/, - IOService */*nub*/, - int source) +IOReturn +IOCPUInterruptController::handleInterrupt(void */*refCon*/, + IOService */*nub*/, + int source) { - IOInterruptVector *vector; - - vector = &vectors[source]; - - if (!vector->interruptRegistered) return kIOReturnInvalid; - - vector->handler(vector->target, vector->refCon, - vector->nub, vector->source); - - return kIOReturnSuccess; + IOInterruptVector *vector; + + vector = &vectors[source]; + + if (!vector->interruptRegistered) { + return kIOReturnInvalid; + } + + vector->handler(vector->target, vector->refCon, + vector->nub, vector->source); + + return kIOReturnSuccess; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */