2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 #include <kern/cpu_number.h>
33 extern void kperf_kernel_configure(char *);
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOPlatformExpert.h>
38 #include <IOKit/pwr_mgt/RootDomain.h>
39 #include <IOKit/pwr_mgt/IOPMPrivate.h>
40 #include <IOKit/IOUserClient.h>
41 #include <IOKit/IOKitKeysPrivate.h>
42 #include <IOKit/IOCPU.h>
43 #include "IOKitKernelInternal.h"
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 #include <kern/queue.h>
48 extern "C" void console_suspend();
49 extern "C" void console_resume();
51 typedef kern_return_t (*iocpu_platform_action_t
)(void * refcon0
, void * refcon1
, uint32_t priority
,
52 void * param1
, void * param2
, void * param3
,
55 struct iocpu_platform_action_entry
58 iocpu_platform_action_t action
;
63 boolean_t callout_in_progress
;
64 struct iocpu_platform_action_entry
* alloc_list
;
66 typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t
;
68 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
70 static IOLock
*gIOCPUsLock
;
71 static OSArray
*gIOCPUs
;
72 static const OSSymbol
*gIOCPUStateKey
;
73 static OSString
*gIOCPUStateNames
[kIOCPUStateCount
];
81 kQueueHaltRestart
= 4,
86 const OSSymbol
* gIOPlatformSleepActionKey
;
87 const OSSymbol
* gIOPlatformWakeActionKey
;
88 const OSSymbol
* gIOPlatformQuiesceActionKey
;
89 const OSSymbol
* gIOPlatformActiveActionKey
;
90 const OSSymbol
* gIOPlatformHaltRestartActionKey
;
91 const OSSymbol
* gIOPlatformPanicActionKey
;
93 static queue_head_t gActionQueues
[kQueueCount
];
94 static const OSSymbol
* gActionSymbols
[kQueueCount
];
96 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99 iocpu_add_platform_action(queue_head_t
* queue
, iocpu_platform_action_entry_t
* entry
)
101 iocpu_platform_action_entry_t
* next
;
103 queue_iterate(queue
, next
, iocpu_platform_action_entry_t
*, link
)
105 if (next
->priority
> entry
->priority
)
107 queue_insert_before(queue
, entry
, next
, iocpu_platform_action_entry_t
*, link
);
111 queue_enter(queue
, entry
, iocpu_platform_action_entry_t
*, link
); // at tail
115 iocpu_remove_platform_action(iocpu_platform_action_entry_t
* entry
)
117 remque(&entry
->link
);
121 iocpu_run_platform_actions(queue_head_t
* queue
, uint32_t first_priority
, uint32_t last_priority
,
122 void * param1
, void * param2
, void * param3
, boolean_t allow_nested_callouts
)
124 kern_return_t ret
= KERN_SUCCESS
;
125 kern_return_t result
= KERN_SUCCESS
;
126 iocpu_platform_action_entry_t
* next
;
128 queue_iterate(queue
, next
, iocpu_platform_action_entry_t
*, link
)
130 uint32_t pri
= (next
->priority
< 0) ? -next
->priority
: next
->priority
;
131 if ((pri
>= first_priority
) && (pri
<= last_priority
))
133 //kprintf("[%p]", next->action);
134 if (!allow_nested_callouts
&& !next
->callout_in_progress
)
136 next
->callout_in_progress
= TRUE
;
137 ret
= (*next
->action
)(next
->refcon0
, next
->refcon1
, pri
, param1
, param2
, param3
, next
->name
);
138 next
->callout_in_progress
= FALSE
;
140 else if (allow_nested_callouts
)
142 ret
= (*next
->action
)(next
->refcon0
, next
->refcon1
, pri
, param1
, param2
, param3
, next
->name
);
145 if (KERN_SUCCESS
== result
)
151 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
153 extern "C" kern_return_t
154 IOCPURunPlatformQuiesceActions(void)
156 return (iocpu_run_platform_actions(&gActionQueues
[kQueueQuiesce
], 0, 0U-1,
157 NULL
, NULL
, NULL
, TRUE
));
160 extern "C" kern_return_t
161 IOCPURunPlatformActiveActions(void)
163 return (iocpu_run_platform_actions(&gActionQueues
[kQueueActive
], 0, 0U-1,
164 NULL
, NULL
, NULL
, TRUE
));
167 extern "C" kern_return_t
168 IOCPURunPlatformHaltRestartActions(uint32_t message
)
170 if (!gActionQueues
[kQueueHaltRestart
].next
) return (kIOReturnNotReady
);
171 return (iocpu_run_platform_actions(&gActionQueues
[kQueueHaltRestart
], 0, 0U-1,
172 (void *)(uintptr_t) message
, NULL
, NULL
, TRUE
));
175 extern "C" kern_return_t
176 IOCPURunPlatformPanicActions(uint32_t message
)
178 // Don't allow nested calls of panic actions
179 if (!gActionQueues
[kQueuePanic
].next
) return (kIOReturnNotReady
);
180 return (iocpu_run_platform_actions(&gActionQueues
[kQueuePanic
], 0, 0U-1,
181 (void *)(uintptr_t) message
, NULL
, NULL
, FALSE
));
185 extern "C" kern_return_t
186 IOCPURunPlatformPanicSyncAction(void *addr
, size_t len
)
188 // Don't allow nested calls of panic actions
189 if (!gActionQueues
[kQueuePanic
].next
) return (kIOReturnNotReady
);
190 return (iocpu_run_platform_actions(&gActionQueues
[kQueuePanic
], 0, 0U-1,
191 (void *)(uintptr_t)(kPEPanicSync
), addr
, (void *)(uintptr_t)len
, FALSE
));
195 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
198 IOServicePlatformAction(void * refcon0
, void * refcon1
, uint32_t priority
,
199 void * param1
, void * param2
, void * param3
,
200 const char * service_name
)
203 IOService
* service
= (IOService
*) refcon0
;
204 const OSSymbol
* function
= (const OSSymbol
*) refcon1
;
206 kprintf("%s -> %s\n", function
->getCStringNoCopy(), service_name
);
208 ret
= service
->callPlatformFunction(function
, false,
209 (void *)(uintptr_t) priority
, param1
, param2
, param3
);
215 IOInstallServicePlatformAction(IOService
* service
, uint32_t qidx
)
217 iocpu_platform_action_entry_t
* entry
;
220 const OSSymbol
* key
= gActionSymbols
[qidx
];
221 queue_head_t
* queue
= &gActionQueues
[qidx
];
225 num
= OSDynamicCast(OSNumber
, service
->getProperty(key
));
236 case kQueueHaltRestart
:
243 queue_iterate(queue
, entry
, iocpu_platform_action_entry_t
*, link
)
245 if (service
== entry
->refcon0
) return;
249 entry
= IONew(iocpu_platform_action_entry_t
, 1);
250 entry
->action
= &IOServicePlatformAction
;
251 entry
->name
= service
->getName();
252 priority
= num
->unsigned32BitValue();
254 entry
->priority
= -priority
;
256 entry
->priority
= priority
;
257 entry
->refcon0
= service
;
258 entry
->refcon1
= (void *) key
;
259 entry
->callout_in_progress
= FALSE
;
261 iocpu_add_platform_action(queue
, entry
);
264 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
267 IOCPUInitialize(void)
269 gIOCPUsLock
= IOLockAlloc();
270 gIOCPUs
= OSArray::withCapacity(1);
272 for (uint32_t qidx
= kQueueSleep
; qidx
< kQueueCount
; qidx
++)
274 queue_init(&gActionQueues
[qidx
]);
277 gIOCPUStateKey
= OSSymbol::withCStringNoCopy("IOCPUState");
279 gIOCPUStateNames
[kIOCPUStateUnregistered
] =
280 OSString::withCStringNoCopy("Unregistered");
281 gIOCPUStateNames
[kIOCPUStateUninitalized
] =
282 OSString::withCStringNoCopy("Uninitalized");
283 gIOCPUStateNames
[kIOCPUStateStopped
] =
284 OSString::withCStringNoCopy("Stopped");
285 gIOCPUStateNames
[kIOCPUStateRunning
] =
286 OSString::withCStringNoCopy("Running");
288 gIOPlatformSleepActionKey
= gActionSymbols
[kQueueSleep
]
289 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey
);
290 gIOPlatformWakeActionKey
= gActionSymbols
[kQueueWake
]
291 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey
);
292 gIOPlatformQuiesceActionKey
= gActionSymbols
[kQueueQuiesce
]
293 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey
);
294 gIOPlatformActiveActionKey
= gActionSymbols
[kQueueActive
]
295 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey
);
296 gIOPlatformHaltRestartActionKey
= gActionSymbols
[kQueueHaltRestart
]
297 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey
);
298 gIOPlatformPanicActionKey
= gActionSymbols
[kQueuePanic
]
299 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey
);
303 IOInstallServicePlatformActions(IOService
* service
)
305 IOLockLock(gIOCPUsLock
);
307 IOInstallServicePlatformAction(service
, kQueueHaltRestart
);
308 IOInstallServicePlatformAction(service
, kQueuePanic
);
310 IOLockUnlock(gIOCPUsLock
);
312 return (kIOReturnSuccess
);
316 IORemoveServicePlatformActions(IOService
* service
)
318 iocpu_platform_action_entry_t
* entry
;
319 iocpu_platform_action_entry_t
* next
;
321 IOLockLock(gIOCPUsLock
);
323 for (uint32_t qidx
= kQueueSleep
; qidx
< kQueueCount
; qidx
++)
325 next
= (typeof(entry
)) queue_first(&gActionQueues
[qidx
]);
326 while (!queue_end(&gActionQueues
[qidx
], &next
->link
))
329 next
= (typeof(entry
)) queue_next(&entry
->link
);
330 if (service
== entry
->refcon0
)
332 iocpu_remove_platform_action(entry
);
333 IODelete(entry
, iocpu_platform_action_entry_t
, 1);
338 IOLockUnlock(gIOCPUsLock
);
340 return (kIOReturnSuccess
);
344 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
346 kern_return_t
PE_cpu_start(cpu_id_t target
,
347 vm_offset_t start_paddr
, vm_offset_t arg_paddr
)
349 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
351 if (targetCPU
== 0) return KERN_FAILURE
;
352 return targetCPU
->startCPU(start_paddr
, arg_paddr
);
355 void PE_cpu_halt(cpu_id_t target
)
357 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
359 if (targetCPU
) targetCPU
->haltCPU();
362 void PE_cpu_signal(cpu_id_t source
, cpu_id_t target
)
364 IOCPU
*sourceCPU
= OSDynamicCast(IOCPU
, (OSObject
*)source
);
365 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
367 if (sourceCPU
&& targetCPU
) sourceCPU
->signalCPU(targetCPU
);
370 void PE_cpu_signal_deferred(cpu_id_t source
, cpu_id_t target
)
372 IOCPU
*sourceCPU
= OSDynamicCast(IOCPU
, (OSObject
*)source
);
373 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
375 if (sourceCPU
&& targetCPU
) sourceCPU
->signalCPUDeferred(targetCPU
);
378 void PE_cpu_signal_cancel(cpu_id_t source
, cpu_id_t target
)
380 IOCPU
*sourceCPU
= OSDynamicCast(IOCPU
, (OSObject
*)source
);
381 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
383 if (sourceCPU
&& targetCPU
) sourceCPU
->signalCPUCancel(targetCPU
);
386 void PE_cpu_machine_init(cpu_id_t target
, boolean_t bootb
)
388 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
391 targetCPU
->initCPU(bootb
);
392 #if defined(__arm__) || defined(__arm64__)
393 if (!bootb
&& (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
)) ml_set_is_quiescing(false);
394 #endif /* defined(__arm__) || defined(__arm64__) */
398 void PE_cpu_machine_quiesce(cpu_id_t target
)
400 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
402 #if defined(__arm__) || defined(__arm64__)
403 if (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
) ml_set_is_quiescing(true);
404 #endif /* defined(__arm__) || defined(__arm64__) */
405 targetCPU
->quiesceCPU();
409 #if defined(__arm__) || defined(__arm64__)
410 static perfmon_interrupt_handler_func pmi_handler
= 0;
412 kern_return_t
PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler
)
414 pmi_handler
= handler
;
419 void PE_cpu_perfmon_interrupt_enable(cpu_id_t target
, boolean_t enable
)
421 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
425 targetCPU
->getProvider()->registerInterrupt(1, targetCPU
, (IOInterruptAction
)pmi_handler
, 0);
426 targetCPU
->getProvider()->enableInterrupt(1);
428 targetCPU
->getProvider()->disableInterrupt(1);
434 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
436 #define super IOService
438 OSDefineMetaClassAndAbstractStructors(IOCPU
, IOService
);
439 OSMetaClassDefineReservedUnused(IOCPU
, 0);
440 OSMetaClassDefineReservedUnused(IOCPU
, 1);
441 OSMetaClassDefineReservedUnused(IOCPU
, 2);
442 OSMetaClassDefineReservedUnused(IOCPU
, 3);
443 OSMetaClassDefineReservedUnused(IOCPU
, 4);
444 OSMetaClassDefineReservedUnused(IOCPU
, 5);
445 OSMetaClassDefineReservedUnused(IOCPU
, 6);
446 OSMetaClassDefineReservedUnused(IOCPU
, 7);
448 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
450 void IOCPUSleepKernel(void)
454 IOCPU
*bootCPU
= NULL
;
455 IOPMrootDomain
*rootDomain
= IOService::getPMRootDomain();
457 kprintf("IOCPUSleepKernel\n");
459 IORegistryIterator
* iter
;
463 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformActions
);
465 iter
= IORegistryIterator::iterateOver( gIOServicePlane
,
466 kIORegistryIterateRecursively
);
474 all
= iter
->iterateAll();
476 while (!iter
->isValid());
481 while((service
= (IOService
*) all
->getFirstObject()))
483 for (uint32_t qidx
= kQueueSleep
; qidx
<= kQueueActive
; qidx
++)
485 IOInstallServicePlatformAction(service
, qidx
);
487 all
->removeObject(service
);
493 iocpu_run_platform_actions(&gActionQueues
[kQueueSleep
], 0, 0U-1,
494 NULL
, NULL
, NULL
, TRUE
);
496 rootDomain
->tracePoint( kIOPMTracePointSleepCPUs
);
498 numCPUs
= gIOCPUs
->getCount();
503 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
505 // We make certain that the bootCPU is the last to sleep
506 // We'll skip it for now, and halt it after finishing the
508 if (target
->getCPUNumber() == (UInt32
)master_cpu
)
511 } else if (target
->getCPUState() == kIOCPUStateRunning
)
517 assert(bootCPU
!= NULL
);
518 assert(cpu_number() == master_cpu
);
522 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformDriver
);
524 // Now sleep the boot CPU.
527 rootDomain
->tracePoint( kIOPMTracePointWakePlatformActions
);
531 iocpu_run_platform_actions(&gActionQueues
[kQueueWake
], 0, 0U-1,
532 NULL
, NULL
, NULL
, TRUE
);
534 iocpu_platform_action_entry_t
* entry
;
535 for (uint32_t qidx
= kQueueSleep
; qidx
<= kQueueActive
; qidx
++)
537 while (!(queue_empty(&gActionQueues
[qidx
])))
539 entry
= (typeof(entry
)) queue_first(&gActionQueues
[qidx
]);
540 iocpu_remove_platform_action(entry
);
541 IODelete(entry
, iocpu_platform_action_entry_t
, 1);
545 rootDomain
->tracePoint( kIOPMTracePointWakeCPUs
);
547 // Wake the other CPUs.
548 for (cnt
= 0; cnt
< numCPUs
; cnt
++)
550 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
552 // Skip the already-woken boot CPU.
553 if (target
->getCPUNumber() != (UInt32
)master_cpu
) {
554 if (target
->getCPUState() == kIOCPUStateRunning
)
555 panic("Spurious wakeup of cpu %u", (unsigned int)(target
->getCPUNumber()));
557 if (target
->getCPUState() == kIOCPUStateStopped
)
558 processor_start(target
->getMachProcessor());
563 bool IOCPU::start(IOService
*provider
)
565 OSData
*busFrequency
, *cpuFrequency
, *timebaseFrequency
;
567 if (!super::start(provider
)) return false;
572 IOLockLock(gIOCPUsLock
);
573 gIOCPUs
->setObject(this);
574 IOLockUnlock(gIOCPUsLock
);
576 // Correct the bus, cpu and timebase frequencies in the device tree.
577 if (gPEClockFrequencyInfo
.bus_frequency_hz
< 0x100000000ULL
) {
578 busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_clock_rate_hz
, 4);
580 busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_frequency_hz
, 8);
582 provider
->setProperty("bus-frequency", busFrequency
);
583 busFrequency
->release();
585 if (gPEClockFrequencyInfo
.cpu_frequency_hz
< 0x100000000ULL
) {
586 cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_clock_rate_hz
, 4);
588 cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_frequency_hz
, 8);
590 provider
->setProperty("clock-frequency", cpuFrequency
);
591 cpuFrequency
->release();
593 timebaseFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.timebase_frequency_hz
, 4);
594 provider
->setProperty("timebase-frequency", timebaseFrequency
);
595 timebaseFrequency
->release();
597 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t)*8);
600 setCPUState(kIOCPUStateUnregistered
);
605 OSObject
*IOCPU::getProperty(const OSSymbol
*aKey
) const
607 if (aKey
== gIOCPUStateKey
) return gIOCPUStateNames
[_cpuState
];
609 return super::getProperty(aKey
);
612 bool IOCPU::setProperty(const OSSymbol
*aKey
, OSObject
*anObject
)
614 if (aKey
== gIOCPUStateKey
) {
618 return super::setProperty(aKey
, anObject
);
621 bool IOCPU::serializeProperties(OSSerialize
*serialize
) const
624 OSDictionary
*dict
= dictionaryWithProperties();
625 if (!dict
) return false;
626 dict
->setObject(gIOCPUStateKey
, gIOCPUStateNames
[_cpuState
]);
627 result
= dict
->serialize(serialize
);
632 IOReturn
IOCPU::setProperties(OSObject
*properties
)
634 OSDictionary
*dict
= OSDynamicCast(OSDictionary
, properties
);
638 if (dict
== 0) return kIOReturnUnsupported
;
640 stateStr
= OSDynamicCast(OSString
, dict
->getObject(gIOCPUStateKey
));
642 result
= IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator
);
643 if (result
!= kIOReturnSuccess
) return result
;
645 if (setProperty(gIOCPUStateKey
, stateStr
)) return kIOReturnSuccess
;
647 return kIOReturnUnsupported
;
650 return kIOReturnUnsupported
;
653 void IOCPU::signalCPU(IOCPU */
*target*/
)
657 void IOCPU::signalCPUDeferred(IOCPU
*target
)
659 // Our CPU may not support deferred IPIs,
660 // so send a regular IPI by default
664 void IOCPU::signalCPUCancel(IOCPU */
*target*/
)
666 // Meant to cancel signals sent by
667 // signalCPUDeferred; unsupported
671 void IOCPU::enableCPUTimeBase(bool /*enable*/)
675 UInt32
IOCPU::getCPUNumber(void)
680 void IOCPU::setCPUNumber(UInt32 cpuNumber
)
682 _cpuNumber
= cpuNumber
;
683 super::setProperty("IOCPUNumber", _cpuNumber
, 32);
686 UInt32
IOCPU::getCPUState(void)
691 void IOCPU::setCPUState(UInt32 cpuState
)
693 if (cpuState
< kIOCPUStateCount
) {
694 _cpuState
= cpuState
;
698 OSArray
*IOCPU::getCPUGroup(void)
703 UInt32
IOCPU::getCPUGroupSize(void)
705 return _cpuGroup
->getCount();
708 processor_t
IOCPU::getMachProcessor(void)
710 return machProcessor
;
714 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
717 #define super IOInterruptController
719 OSDefineMetaClassAndStructors(IOCPUInterruptController
, IOInterruptController
);
721 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 1);
722 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 2);
723 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 3);
724 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 4);
725 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 5);
729 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
731 IOReturn
IOCPUInterruptController::initCPUInterruptController(int sources
)
733 return initCPUInterruptController(sources
, sources
);
736 IOReturn
IOCPUInterruptController::initCPUInterruptController(int sources
, int cpus
)
740 if (!super::init()) return kIOReturnInvalid
;
742 numSources
= sources
;
745 vectors
= (IOInterruptVector
*)IOMalloc(numSources
* sizeof(IOInterruptVector
));
746 if (vectors
== 0) return kIOReturnNoMemory
;
747 bzero(vectors
, numSources
* sizeof(IOInterruptVector
));
749 // Allocate a lock for each vector
750 for (cnt
= 0; cnt
< numSources
; cnt
++) {
751 vectors
[cnt
].interruptLock
= IOLockAlloc();
752 if (vectors
[cnt
].interruptLock
== NULL
) {
753 for (cnt
= 0; cnt
< numSources
; cnt
++) {
754 if (vectors
[cnt
].interruptLock
!= NULL
)
755 IOLockFree(vectors
[cnt
].interruptLock
);
757 return kIOReturnNoResources
;
761 ml_init_max_cpus(numSources
);
765 * kperf allocates based on the number of CPUs and requires them to all be
768 boolean_t found_kperf
= FALSE
;
769 char kperf_config_str
[64];
770 found_kperf
= PE_parse_boot_arg_str("kperf", kperf_config_str
, sizeof(kperf_config_str
));
771 if (found_kperf
&& kperf_config_str
[0] != '\0') {
772 kperf_kernel_configure(kperf_config_str
);
776 return kIOReturnSuccess
;
779 void IOCPUInterruptController::registerCPUInterruptController(void)
783 getPlatform()->registerInterruptController(gPlatformInterruptControllerName
,
787 void IOCPUInterruptController::setCPUInterruptProperties(IOService
*service
)
795 if ((service
->getProperty(gIOInterruptControllersKey
) != 0) &&
796 (service
->getProperty(gIOInterruptSpecifiersKey
) != 0))
799 // Create the interrupt specifer array.
800 specifier
= OSArray::withCapacity(numSources
);
801 for (cnt
= 0; cnt
< numSources
; cnt
++) {
803 tmpData
= OSData::withBytes(&tmpLong
, sizeof(tmpLong
));
804 specifier
->setObject(tmpData
);
808 // Create the interrupt controller array.
809 controller
= OSArray::withCapacity(numSources
);
810 for (cnt
= 0; cnt
< numSources
; cnt
++) {
811 controller
->setObject(gPlatformInterruptControllerName
);
814 // Put the two arrays into the property table.
815 service
->setProperty(gIOInterruptControllersKey
, controller
);
816 service
->setProperty(gIOInterruptSpecifiersKey
, specifier
);
817 controller
->release();
818 specifier
->release();
821 void IOCPUInterruptController::enableCPUInterrupt(IOCPU
*cpu
)
823 IOInterruptHandler handler
= OSMemberFunctionCast(
824 IOInterruptHandler
, this, &IOCPUInterruptController::handleInterrupt
);
828 ml_install_interrupt_handler(cpu
, cpu
->getCPUNumber(), this, handler
, 0);
830 IOTakeLock(vectors
[0].interruptLock
);
833 if (enabledCPUs
== numCPUs
) {
834 IOService::cpusRunning();
837 IOUnlock(vectors
[0].interruptLock
);
840 IOReturn
IOCPUInterruptController::registerInterrupt(IOService
*nub
,
843 IOInterruptHandler handler
,
846 IOInterruptVector
*vector
;
848 if (source
>= numSources
) return kIOReturnNoResources
;
850 vector
= &vectors
[source
];
852 // Get the lock for this vector.
853 IOTakeLock(vector
->interruptLock
);
855 // Make sure the vector is not in use.
856 if (vector
->interruptRegistered
) {
857 IOUnlock(vector
->interruptLock
);
858 return kIOReturnNoResources
;
861 // Fill in vector with the client's info.
862 vector
->handler
= handler
;
864 vector
->source
= source
;
865 vector
->target
= target
;
866 vector
->refCon
= refCon
;
868 // Get the vector ready. It starts hard disabled.
869 vector
->interruptDisabledHard
= 1;
870 vector
->interruptDisabledSoft
= 1;
871 vector
->interruptRegistered
= 1;
873 IOUnlock(vector
->interruptLock
);
875 IOTakeLock(vectors
[0].interruptLock
);
876 if (enabledCPUs
!= numCPUs
) {
877 assert_wait(this, THREAD_UNINT
);
878 IOUnlock(vectors
[0].interruptLock
);
879 thread_block(THREAD_CONTINUE_NULL
);
881 IOUnlock(vectors
[0].interruptLock
);
883 return kIOReturnSuccess
;
886 IOReturn
IOCPUInterruptController::getInterruptType(IOService */
*nub*/
,
890 if (interruptType
== 0) return kIOReturnBadArgument
;
892 *interruptType
= kIOInterruptTypeLevel
;
894 return kIOReturnSuccess
;
897 IOReturn
IOCPUInterruptController::enableInterrupt(IOService */
*nub*/
,
900 // ml_set_interrupts_enabled(true);
901 return kIOReturnSuccess
;
904 IOReturn
IOCPUInterruptController::disableInterrupt(IOService */
*nub*/
,
907 // ml_set_interrupts_enabled(false);
908 return kIOReturnSuccess
;
911 IOReturn
IOCPUInterruptController::causeInterrupt(IOService */
*nub*/
,
914 ml_cause_interrupt();
915 return kIOReturnSuccess
;
918 IOReturn
IOCPUInterruptController::handleInterrupt(void */
*refCon*/
,
922 IOInterruptVector
*vector
;
924 vector
= &vectors
[source
];
926 if (!vector
->interruptRegistered
) return kIOReturnInvalid
;
928 vector
->handler(vector
->target
, vector
->refCon
,
929 vector
->nub
, vector
->source
);
931 return kIOReturnSuccess
;
934 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */