2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 #include <kern/cpu_number.h>
33 extern void kperf_kernel_configure(char *);
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOPlatformExpert.h>
38 #include <IOKit/pwr_mgt/RootDomain.h>
39 #include <IOKit/pwr_mgt/IOPMPrivate.h>
40 #include <IOKit/IOUserClient.h>
41 #include <IOKit/IOKitKeysPrivate.h>
42 #include <IOKit/IOCPU.h>
43 #include "IOKitKernelInternal.h"
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 #include <kern/queue.h>
48 extern "C" void console_suspend();
49 extern "C" void console_resume();
50 extern "C" void sched_override_recommended_cores_for_sleep(void);
51 extern "C" void sched_restore_recommended_cores_after_sleep(void);
53 typedef kern_return_t (*iocpu_platform_action_t
)(void * refcon0
, void * refcon1
, uint32_t priority
,
54 void * param1
, void * param2
, void * param3
,
57 struct iocpu_platform_action_entry
60 iocpu_platform_action_t action
;
65 boolean_t callout_in_progress
;
66 struct iocpu_platform_action_entry
* alloc_list
;
68 typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t
;
70 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
72 static IOLock
*gIOCPUsLock
;
73 static OSArray
*gIOCPUs
;
74 static const OSSymbol
*gIOCPUStateKey
;
75 static OSString
*gIOCPUStateNames
[kIOCPUStateCount
];
83 kQueueHaltRestart
= 4,
88 const OSSymbol
* gIOPlatformSleepActionKey
;
89 const OSSymbol
* gIOPlatformWakeActionKey
;
90 const OSSymbol
* gIOPlatformQuiesceActionKey
;
91 const OSSymbol
* gIOPlatformActiveActionKey
;
92 const OSSymbol
* gIOPlatformHaltRestartActionKey
;
93 const OSSymbol
* gIOPlatformPanicActionKey
;
95 static queue_head_t gActionQueues
[kQueueCount
];
96 static const OSSymbol
* gActionSymbols
[kQueueCount
];
98 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
101 iocpu_add_platform_action(queue_head_t
* queue
, iocpu_platform_action_entry_t
* entry
)
103 iocpu_platform_action_entry_t
* next
;
105 queue_iterate(queue
, next
, iocpu_platform_action_entry_t
*, link
)
107 if (next
->priority
> entry
->priority
)
109 queue_insert_before(queue
, entry
, next
, iocpu_platform_action_entry_t
*, link
);
113 queue_enter(queue
, entry
, iocpu_platform_action_entry_t
*, link
); // at tail
117 iocpu_remove_platform_action(iocpu_platform_action_entry_t
* entry
)
119 remque(&entry
->link
);
123 iocpu_run_platform_actions(queue_head_t
* queue
, uint32_t first_priority
, uint32_t last_priority
,
124 void * param1
, void * param2
, void * param3
, boolean_t allow_nested_callouts
)
126 kern_return_t ret
= KERN_SUCCESS
;
127 kern_return_t result
= KERN_SUCCESS
;
128 iocpu_platform_action_entry_t
* next
;
130 queue_iterate(queue
, next
, iocpu_platform_action_entry_t
*, link
)
132 uint32_t pri
= (next
->priority
< 0) ? -next
->priority
: next
->priority
;
133 if ((pri
>= first_priority
) && (pri
<= last_priority
))
135 //kprintf("[%p]", next->action);
136 if (!allow_nested_callouts
&& !next
->callout_in_progress
)
138 next
->callout_in_progress
= TRUE
;
139 ret
= (*next
->action
)(next
->refcon0
, next
->refcon1
, pri
, param1
, param2
, param3
, next
->name
);
140 next
->callout_in_progress
= FALSE
;
142 else if (allow_nested_callouts
)
144 ret
= (*next
->action
)(next
->refcon0
, next
->refcon1
, pri
, param1
, param2
, param3
, next
->name
);
147 if (KERN_SUCCESS
== result
)
153 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
155 extern "C" kern_return_t
156 IOCPURunPlatformQuiesceActions(void)
158 return (iocpu_run_platform_actions(&gActionQueues
[kQueueQuiesce
], 0, 0U-1,
159 NULL
, NULL
, NULL
, TRUE
));
162 extern "C" kern_return_t
163 IOCPURunPlatformActiveActions(void)
165 return (iocpu_run_platform_actions(&gActionQueues
[kQueueActive
], 0, 0U-1,
166 NULL
, NULL
, NULL
, TRUE
));
169 extern "C" kern_return_t
170 IOCPURunPlatformHaltRestartActions(uint32_t message
)
172 if (!gActionQueues
[kQueueHaltRestart
].next
) return (kIOReturnNotReady
);
173 return (iocpu_run_platform_actions(&gActionQueues
[kQueueHaltRestart
], 0, 0U-1,
174 (void *)(uintptr_t) message
, NULL
, NULL
, TRUE
));
177 extern "C" kern_return_t
178 IOCPURunPlatformPanicActions(uint32_t message
)
180 // Don't allow nested calls of panic actions
181 if (!gActionQueues
[kQueuePanic
].next
) return (kIOReturnNotReady
);
182 return (iocpu_run_platform_actions(&gActionQueues
[kQueuePanic
], 0, 0U-1,
183 (void *)(uintptr_t) message
, NULL
, NULL
, FALSE
));
187 extern "C" kern_return_t
188 IOCPURunPlatformPanicSyncAction(void *addr
, uint32_t offset
, uint32_t len
)
190 PE_panic_save_context_t context
= {
192 .psc_offset
= offset
,
196 // Don't allow nested calls of panic actions
197 if (!gActionQueues
[kQueuePanic
].next
) return (kIOReturnNotReady
);
198 return (iocpu_run_platform_actions(&gActionQueues
[kQueuePanic
], 0, 0U-1,
199 (void *)(uintptr_t)(kPEPanicSync
), &context
, NULL
, FALSE
));
203 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
206 IOServicePlatformAction(void * refcon0
, void * refcon1
, uint32_t priority
,
207 void * param1
, void * param2
, void * param3
,
208 const char * service_name
)
211 IOService
* service
= (IOService
*) refcon0
;
212 const OSSymbol
* function
= (const OSSymbol
*) refcon1
;
214 kprintf("%s -> %s\n", function
->getCStringNoCopy(), service_name
);
216 ret
= service
->callPlatformFunction(function
, false,
217 (void *)(uintptr_t) priority
, param1
, param2
, param3
);
223 IOInstallServicePlatformAction(IOService
* service
, uint32_t qidx
)
225 iocpu_platform_action_entry_t
* entry
;
228 const OSSymbol
* key
= gActionSymbols
[qidx
];
229 queue_head_t
* queue
= &gActionQueues
[qidx
];
233 num
= OSDynamicCast(OSNumber
, service
->getProperty(key
));
244 case kQueueHaltRestart
:
251 queue_iterate(queue
, entry
, iocpu_platform_action_entry_t
*, link
)
253 if (service
== entry
->refcon0
) return;
257 entry
= IONew(iocpu_platform_action_entry_t
, 1);
258 entry
->action
= &IOServicePlatformAction
;
259 entry
->name
= service
->getName();
260 priority
= num
->unsigned32BitValue();
262 entry
->priority
= -priority
;
264 entry
->priority
= priority
;
265 entry
->refcon0
= service
;
266 entry
->refcon1
= (void *) key
;
267 entry
->callout_in_progress
= FALSE
;
269 iocpu_add_platform_action(queue
, entry
);
272 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
275 IOCPUInitialize(void)
277 gIOCPUsLock
= IOLockAlloc();
278 gIOCPUs
= OSArray::withCapacity(1);
280 for (uint32_t qidx
= kQueueSleep
; qidx
< kQueueCount
; qidx
++)
282 queue_init(&gActionQueues
[qidx
]);
285 gIOCPUStateKey
= OSSymbol::withCStringNoCopy("IOCPUState");
287 gIOCPUStateNames
[kIOCPUStateUnregistered
] =
288 OSString::withCStringNoCopy("Unregistered");
289 gIOCPUStateNames
[kIOCPUStateUninitalized
] =
290 OSString::withCStringNoCopy("Uninitalized");
291 gIOCPUStateNames
[kIOCPUStateStopped
] =
292 OSString::withCStringNoCopy("Stopped");
293 gIOCPUStateNames
[kIOCPUStateRunning
] =
294 OSString::withCStringNoCopy("Running");
296 gIOPlatformSleepActionKey
= gActionSymbols
[kQueueSleep
]
297 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey
);
298 gIOPlatformWakeActionKey
= gActionSymbols
[kQueueWake
]
299 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey
);
300 gIOPlatformQuiesceActionKey
= gActionSymbols
[kQueueQuiesce
]
301 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey
);
302 gIOPlatformActiveActionKey
= gActionSymbols
[kQueueActive
]
303 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey
);
304 gIOPlatformHaltRestartActionKey
= gActionSymbols
[kQueueHaltRestart
]
305 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey
);
306 gIOPlatformPanicActionKey
= gActionSymbols
[kQueuePanic
]
307 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey
);
311 IOInstallServicePlatformActions(IOService
* service
)
313 IOLockLock(gIOCPUsLock
);
315 IOInstallServicePlatformAction(service
, kQueueHaltRestart
);
316 IOInstallServicePlatformAction(service
, kQueuePanic
);
318 IOLockUnlock(gIOCPUsLock
);
320 return (kIOReturnSuccess
);
324 IORemoveServicePlatformActions(IOService
* service
)
326 iocpu_platform_action_entry_t
* entry
;
327 iocpu_platform_action_entry_t
* next
;
329 IOLockLock(gIOCPUsLock
);
331 for (uint32_t qidx
= kQueueSleep
; qidx
< kQueueCount
; qidx
++)
333 next
= (typeof(entry
)) queue_first(&gActionQueues
[qidx
]);
334 while (!queue_end(&gActionQueues
[qidx
], &next
->link
))
337 next
= (typeof(entry
)) queue_next(&entry
->link
);
338 if (service
== entry
->refcon0
)
340 iocpu_remove_platform_action(entry
);
341 IODelete(entry
, iocpu_platform_action_entry_t
, 1);
346 IOLockUnlock(gIOCPUsLock
);
348 return (kIOReturnSuccess
);
352 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
354 kern_return_t
PE_cpu_start(cpu_id_t target
,
355 vm_offset_t start_paddr
, vm_offset_t arg_paddr
)
357 IOCPU
*targetCPU
= (IOCPU
*)target
;
359 if (targetCPU
== NULL
) return KERN_FAILURE
;
360 return targetCPU
->startCPU(start_paddr
, arg_paddr
);
363 void PE_cpu_halt(cpu_id_t target
)
365 IOCPU
*targetCPU
= (IOCPU
*)target
;
367 targetCPU
->haltCPU();
370 void PE_cpu_signal(cpu_id_t source
, cpu_id_t target
)
372 IOCPU
*sourceCPU
= (IOCPU
*)source
;
373 IOCPU
*targetCPU
= (IOCPU
*)target
;
375 sourceCPU
->signalCPU(targetCPU
);
378 void PE_cpu_signal_deferred(cpu_id_t source
, cpu_id_t target
)
380 IOCPU
*sourceCPU
= (IOCPU
*)source
;
381 IOCPU
*targetCPU
= (IOCPU
*)target
;
383 sourceCPU
->signalCPUDeferred(targetCPU
);
386 void PE_cpu_signal_cancel(cpu_id_t source
, cpu_id_t target
)
388 IOCPU
*sourceCPU
= (IOCPU
*)source
;
389 IOCPU
*targetCPU
= (IOCPU
*)target
;
391 sourceCPU
->signalCPUCancel(targetCPU
);
394 void PE_cpu_machine_init(cpu_id_t target
, boolean_t bootb
)
396 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
398 if (targetCPU
== NULL
)
399 panic("%s: invalid target CPU %p", __func__
, target
);
401 targetCPU
->initCPU(bootb
);
402 #if defined(__arm__) || defined(__arm64__)
403 if (!bootb
&& (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
)) ml_set_is_quiescing(false);
404 #endif /* defined(__arm__) || defined(__arm64__) */
407 void PE_cpu_machine_quiesce(cpu_id_t target
)
409 IOCPU
*targetCPU
= (IOCPU
*)target
;
410 #if defined(__arm__) || defined(__arm64__)
411 if (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
) ml_set_is_quiescing(true);
412 #endif /* defined(__arm__) || defined(__arm64__) */
413 targetCPU
->quiesceCPU();
416 #if defined(__arm__) || defined(__arm64__)
417 static perfmon_interrupt_handler_func pmi_handler
= 0;
419 kern_return_t
PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler
)
421 pmi_handler
= handler
;
426 void PE_cpu_perfmon_interrupt_enable(cpu_id_t target
, boolean_t enable
)
428 IOCPU
*targetCPU
= (IOCPU
*)target
;
430 if (targetCPU
== nullptr) {
435 targetCPU
->getProvider()->registerInterrupt(1, targetCPU
, (IOInterruptAction
)pmi_handler
, 0);
436 targetCPU
->getProvider()->enableInterrupt(1);
438 targetCPU
->getProvider()->disableInterrupt(1);
443 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
445 #define super IOService
447 OSDefineMetaClassAndAbstractStructors(IOCPU
, IOService
);
448 OSMetaClassDefineReservedUnused(IOCPU
, 0);
449 OSMetaClassDefineReservedUnused(IOCPU
, 1);
450 OSMetaClassDefineReservedUnused(IOCPU
, 2);
451 OSMetaClassDefineReservedUnused(IOCPU
, 3);
452 OSMetaClassDefineReservedUnused(IOCPU
, 4);
453 OSMetaClassDefineReservedUnused(IOCPU
, 5);
454 OSMetaClassDefineReservedUnused(IOCPU
, 6);
455 OSMetaClassDefineReservedUnused(IOCPU
, 7);
457 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
459 void IOCPUSleepKernel(void)
463 IOCPU
*bootCPU
= NULL
;
464 IOPMrootDomain
*rootDomain
= IOService::getPMRootDomain();
466 kprintf("IOCPUSleepKernel\n");
467 #if defined(__arm64__)
468 sched_override_recommended_cores_for_sleep();
471 IORegistryIterator
* iter
;
475 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformActions
);
477 iter
= IORegistryIterator::iterateOver( gIOServicePlane
,
478 kIORegistryIterateRecursively
);
486 all
= iter
->iterateAll();
488 while (!iter
->isValid());
493 while((service
= (IOService
*) all
->getFirstObject()))
495 for (uint32_t qidx
= kQueueSleep
; qidx
<= kQueueActive
; qidx
++)
497 IOInstallServicePlatformAction(service
, qidx
);
499 all
->removeObject(service
);
505 iocpu_run_platform_actions(&gActionQueues
[kQueueSleep
], 0, 0U-1,
506 NULL
, NULL
, NULL
, TRUE
);
508 rootDomain
->tracePoint( kIOPMTracePointSleepCPUs
);
510 numCPUs
= gIOCPUs
->getCount();
515 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
517 // We make certain that the bootCPU is the last to sleep
518 // We'll skip it for now, and halt it after finishing the
520 if (target
->getCPUNumber() == (UInt32
)master_cpu
)
523 } else if (target
->getCPUState() == kIOCPUStateRunning
)
529 assert(bootCPU
!= NULL
);
530 assert(cpu_number() == master_cpu
);
534 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformDriver
);
535 rootDomain
->stop_watchdog_timer();
537 // Now sleep the boot CPU.
540 rootDomain
->start_watchdog_timer();
541 rootDomain
->tracePoint( kIOPMTracePointWakePlatformActions
);
545 iocpu_run_platform_actions(&gActionQueues
[kQueueWake
], 0, 0U-1,
546 NULL
, NULL
, NULL
, TRUE
);
548 iocpu_platform_action_entry_t
* entry
;
549 for (uint32_t qidx
= kQueueSleep
; qidx
<= kQueueActive
; qidx
++)
551 while (!(queue_empty(&gActionQueues
[qidx
])))
553 entry
= (typeof(entry
)) queue_first(&gActionQueues
[qidx
]);
554 iocpu_remove_platform_action(entry
);
555 IODelete(entry
, iocpu_platform_action_entry_t
, 1);
559 rootDomain
->tracePoint( kIOPMTracePointWakeCPUs
);
561 // Wake the other CPUs.
562 for (cnt
= 0; cnt
< numCPUs
; cnt
++)
564 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
566 // Skip the already-woken boot CPU.
567 if (target
->getCPUNumber() != (UInt32
)master_cpu
) {
568 if (target
->getCPUState() == kIOCPUStateRunning
)
569 panic("Spurious wakeup of cpu %u", (unsigned int)(target
->getCPUNumber()));
571 if (target
->getCPUState() == kIOCPUStateStopped
)
572 processor_start(target
->getMachProcessor());
576 #if defined(__arm64__)
577 sched_restore_recommended_cores_after_sleep();
581 bool IOCPU::start(IOService
*provider
)
583 OSData
*busFrequency
, *cpuFrequency
, *timebaseFrequency
;
585 if (!super::start(provider
)) return false;
590 IOLockLock(gIOCPUsLock
);
591 gIOCPUs
->setObject(this);
592 IOLockUnlock(gIOCPUsLock
);
594 // Correct the bus, cpu and timebase frequencies in the device tree.
595 if (gPEClockFrequencyInfo
.bus_frequency_hz
< 0x100000000ULL
) {
596 busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_clock_rate_hz
, 4);
598 busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_frequency_hz
, 8);
600 provider
->setProperty("bus-frequency", busFrequency
);
601 busFrequency
->release();
603 if (gPEClockFrequencyInfo
.cpu_frequency_hz
< 0x100000000ULL
) {
604 cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_clock_rate_hz
, 4);
606 cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_frequency_hz
, 8);
608 provider
->setProperty("clock-frequency", cpuFrequency
);
609 cpuFrequency
->release();
611 timebaseFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.timebase_frequency_hz
, 4);
612 provider
->setProperty("timebase-frequency", timebaseFrequency
);
613 timebaseFrequency
->release();
615 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t)*8);
618 setCPUState(kIOCPUStateUnregistered
);
623 OSObject
*IOCPU::getProperty(const OSSymbol
*aKey
) const
625 if (aKey
== gIOCPUStateKey
) return gIOCPUStateNames
[_cpuState
];
627 return super::getProperty(aKey
);
630 bool IOCPU::setProperty(const OSSymbol
*aKey
, OSObject
*anObject
)
632 if (aKey
== gIOCPUStateKey
) {
636 return super::setProperty(aKey
, anObject
);
639 bool IOCPU::serializeProperties(OSSerialize
*serialize
) const
642 OSDictionary
*dict
= dictionaryWithProperties();
643 if (!dict
) return false;
644 dict
->setObject(gIOCPUStateKey
, gIOCPUStateNames
[_cpuState
]);
645 result
= dict
->serialize(serialize
);
650 IOReturn
IOCPU::setProperties(OSObject
*properties
)
652 OSDictionary
*dict
= OSDynamicCast(OSDictionary
, properties
);
656 if (dict
== 0) return kIOReturnUnsupported
;
658 stateStr
= OSDynamicCast(OSString
, dict
->getObject(gIOCPUStateKey
));
660 result
= IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator
);
661 if (result
!= kIOReturnSuccess
) return result
;
663 if (setProperty(gIOCPUStateKey
, stateStr
)) return kIOReturnSuccess
;
665 return kIOReturnUnsupported
;
668 return kIOReturnUnsupported
;
671 void IOCPU::signalCPU(IOCPU */
*target*/
)
675 void IOCPU::signalCPUDeferred(IOCPU
*target
)
677 // Our CPU may not support deferred IPIs,
678 // so send a regular IPI by default
682 void IOCPU::signalCPUCancel(IOCPU */
*target*/
)
684 // Meant to cancel signals sent by
685 // signalCPUDeferred; unsupported
689 void IOCPU::enableCPUTimeBase(bool /*enable*/)
693 UInt32
IOCPU::getCPUNumber(void)
698 void IOCPU::setCPUNumber(UInt32 cpuNumber
)
700 _cpuNumber
= cpuNumber
;
701 super::setProperty("IOCPUNumber", _cpuNumber
, 32);
704 UInt32
IOCPU::getCPUState(void)
709 void IOCPU::setCPUState(UInt32 cpuState
)
711 if (cpuState
< kIOCPUStateCount
) {
712 _cpuState
= cpuState
;
716 OSArray
*IOCPU::getCPUGroup(void)
721 UInt32
IOCPU::getCPUGroupSize(void)
723 return _cpuGroup
->getCount();
726 processor_t
IOCPU::getMachProcessor(void)
728 return machProcessor
;
732 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
735 #define super IOInterruptController
737 OSDefineMetaClassAndStructors(IOCPUInterruptController
, IOInterruptController
);
739 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 1);
740 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 2);
741 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 3);
742 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 4);
743 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 5);
747 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
749 IOReturn
IOCPUInterruptController::initCPUInterruptController(int sources
)
751 return initCPUInterruptController(sources
, sources
);
754 IOReturn
IOCPUInterruptController::initCPUInterruptController(int sources
, int cpus
)
758 if (!super::init()) return kIOReturnInvalid
;
760 numSources
= sources
;
763 vectors
= (IOInterruptVector
*)IOMalloc(numSources
* sizeof(IOInterruptVector
));
764 if (vectors
== 0) return kIOReturnNoMemory
;
765 bzero(vectors
, numSources
* sizeof(IOInterruptVector
));
767 // Allocate a lock for each vector
768 for (cnt
= 0; cnt
< numSources
; cnt
++) {
769 vectors
[cnt
].interruptLock
= IOLockAlloc();
770 if (vectors
[cnt
].interruptLock
== NULL
) {
771 for (cnt
= 0; cnt
< numSources
; cnt
++) {
772 if (vectors
[cnt
].interruptLock
!= NULL
)
773 IOLockFree(vectors
[cnt
].interruptLock
);
775 return kIOReturnNoResources
;
779 ml_init_max_cpus(numSources
);
783 * kperf allocates based on the number of CPUs and requires them to all be
786 boolean_t found_kperf
= FALSE
;
787 char kperf_config_str
[64];
788 found_kperf
= PE_parse_boot_arg_str("kperf", kperf_config_str
, sizeof(kperf_config_str
));
789 if (found_kperf
&& kperf_config_str
[0] != '\0') {
790 kperf_kernel_configure(kperf_config_str
);
794 return kIOReturnSuccess
;
797 void IOCPUInterruptController::registerCPUInterruptController(void)
801 getPlatform()->registerInterruptController(gPlatformInterruptControllerName
,
805 void IOCPUInterruptController::setCPUInterruptProperties(IOService
*service
)
813 if ((service
->getProperty(gIOInterruptControllersKey
) != 0) &&
814 (service
->getProperty(gIOInterruptSpecifiersKey
) != 0))
817 // Create the interrupt specifer array.
818 specifier
= OSArray::withCapacity(numSources
);
819 for (cnt
= 0; cnt
< numSources
; cnt
++) {
821 tmpData
= OSData::withBytes(&tmpLong
, sizeof(tmpLong
));
822 specifier
->setObject(tmpData
);
826 // Create the interrupt controller array.
827 controller
= OSArray::withCapacity(numSources
);
828 for (cnt
= 0; cnt
< numSources
; cnt
++) {
829 controller
->setObject(gPlatformInterruptControllerName
);
832 // Put the two arrays into the property table.
833 service
->setProperty(gIOInterruptControllersKey
, controller
);
834 service
->setProperty(gIOInterruptSpecifiersKey
, specifier
);
835 controller
->release();
836 specifier
->release();
839 void IOCPUInterruptController::enableCPUInterrupt(IOCPU
*cpu
)
841 IOInterruptHandler handler
= OSMemberFunctionCast(
842 IOInterruptHandler
, this, &IOCPUInterruptController::handleInterrupt
);
846 ml_install_interrupt_handler(cpu
, cpu
->getCPUNumber(), this, handler
, 0);
848 IOTakeLock(vectors
[0].interruptLock
);
851 if (enabledCPUs
== numCPUs
) {
852 IOService::cpusRunning();
855 IOUnlock(vectors
[0].interruptLock
);
858 IOReturn
IOCPUInterruptController::registerInterrupt(IOService
*nub
,
861 IOInterruptHandler handler
,
864 IOInterruptVector
*vector
;
866 if (source
>= numSources
) return kIOReturnNoResources
;
868 vector
= &vectors
[source
];
870 // Get the lock for this vector.
871 IOTakeLock(vector
->interruptLock
);
873 // Make sure the vector is not in use.
874 if (vector
->interruptRegistered
) {
875 IOUnlock(vector
->interruptLock
);
876 return kIOReturnNoResources
;
879 // Fill in vector with the client's info.
880 vector
->handler
= handler
;
882 vector
->source
= source
;
883 vector
->target
= target
;
884 vector
->refCon
= refCon
;
886 // Get the vector ready. It starts hard disabled.
887 vector
->interruptDisabledHard
= 1;
888 vector
->interruptDisabledSoft
= 1;
889 vector
->interruptRegistered
= 1;
891 IOUnlock(vector
->interruptLock
);
893 IOTakeLock(vectors
[0].interruptLock
);
894 if (enabledCPUs
!= numCPUs
) {
895 assert_wait(this, THREAD_UNINT
);
896 IOUnlock(vectors
[0].interruptLock
);
897 thread_block(THREAD_CONTINUE_NULL
);
899 IOUnlock(vectors
[0].interruptLock
);
901 return kIOReturnSuccess
;
904 IOReturn
IOCPUInterruptController::getInterruptType(IOService */
*nub*/
,
908 if (interruptType
== 0) return kIOReturnBadArgument
;
910 *interruptType
= kIOInterruptTypeLevel
;
912 return kIOReturnSuccess
;
915 IOReturn
IOCPUInterruptController::enableInterrupt(IOService */
*nub*/
,
918 // ml_set_interrupts_enabled(true);
919 return kIOReturnSuccess
;
922 IOReturn
IOCPUInterruptController::disableInterrupt(IOService */
*nub*/
,
925 // ml_set_interrupts_enabled(false);
926 return kIOReturnSuccess
;
929 IOReturn
IOCPUInterruptController::causeInterrupt(IOService */
*nub*/
,
932 ml_cause_interrupt();
933 return kIOReturnSuccess
;
936 IOReturn
IOCPUInterruptController::handleInterrupt(void */
*refCon*/
,
940 IOInterruptVector
*vector
;
942 vector
= &vectors
[source
];
944 if (!vector
->interruptRegistered
) return kIOReturnInvalid
;
946 vector
->handler(vector
->target
, vector
->refCon
,
947 vector
->nub
, vector
->source
);
949 return kIOReturnSuccess
;
952 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */