2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 #include <kern/cpu_number.h>
33 extern void kperf_kernel_configure(char *);
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOPlatformExpert.h>
38 #include <IOKit/pwr_mgt/RootDomain.h>
39 #include <IOKit/pwr_mgt/IOPMPrivate.h>
40 #include <IOKit/IOUserClient.h>
41 #include <IOKit/IOKitKeysPrivate.h>
42 #include <IOKit/IOCPU.h>
43 #include "IOKitKernelInternal.h"
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 #include <kern/queue.h>
48 extern "C" void console_suspend();
49 extern "C" void console_resume();
51 typedef kern_return_t (*iocpu_platform_action_t
)(void * refcon0
, void * refcon1
, uint32_t priority
,
52 void * param1
, void * param2
, void * param3
,
55 struct iocpu_platform_action_entry
58 iocpu_platform_action_t action
;
63 boolean_t callout_in_progress
;
64 struct iocpu_platform_action_entry
* alloc_list
;
66 typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t
;
68 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
70 static IOLock
*gIOCPUsLock
;
71 static OSArray
*gIOCPUs
;
72 static const OSSymbol
*gIOCPUStateKey
;
73 static OSString
*gIOCPUStateNames
[kIOCPUStateCount
];
81 kQueueHaltRestart
= 4,
86 const OSSymbol
* gIOPlatformSleepActionKey
;
87 const OSSymbol
* gIOPlatformWakeActionKey
;
88 const OSSymbol
* gIOPlatformQuiesceActionKey
;
89 const OSSymbol
* gIOPlatformActiveActionKey
;
90 const OSSymbol
* gIOPlatformHaltRestartActionKey
;
91 const OSSymbol
* gIOPlatformPanicActionKey
;
93 static queue_head_t gActionQueues
[kQueueCount
];
94 static const OSSymbol
* gActionSymbols
[kQueueCount
];
96 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99 iocpu_add_platform_action(queue_head_t
* queue
, iocpu_platform_action_entry_t
* entry
)
101 iocpu_platform_action_entry_t
* next
;
103 queue_iterate(queue
, next
, iocpu_platform_action_entry_t
*, link
)
105 if (next
->priority
> entry
->priority
)
107 queue_insert_before(queue
, entry
, next
, iocpu_platform_action_entry_t
*, link
);
111 queue_enter(queue
, entry
, iocpu_platform_action_entry_t
*, link
); // at tail
115 iocpu_remove_platform_action(iocpu_platform_action_entry_t
* entry
)
117 remque(&entry
->link
);
121 iocpu_run_platform_actions(queue_head_t
* queue
, uint32_t first_priority
, uint32_t last_priority
,
122 void * param1
, void * param2
, void * param3
, boolean_t allow_nested_callouts
)
124 kern_return_t ret
= KERN_SUCCESS
;
125 kern_return_t result
= KERN_SUCCESS
;
126 iocpu_platform_action_entry_t
* next
;
128 queue_iterate(queue
, next
, iocpu_platform_action_entry_t
*, link
)
130 uint32_t pri
= (next
->priority
< 0) ? -next
->priority
: next
->priority
;
131 if ((pri
>= first_priority
) && (pri
<= last_priority
))
133 //kprintf("[%p]", next->action);
134 if (!allow_nested_callouts
&& !next
->callout_in_progress
)
136 next
->callout_in_progress
= TRUE
;
137 ret
= (*next
->action
)(next
->refcon0
, next
->refcon1
, pri
, param1
, param2
, param3
, next
->name
);
138 next
->callout_in_progress
= FALSE
;
140 else if (allow_nested_callouts
)
142 ret
= (*next
->action
)(next
->refcon0
, next
->refcon1
, pri
, param1
, param2
, param3
, next
->name
);
145 if (KERN_SUCCESS
== result
)
151 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
153 extern "C" kern_return_t
154 IOCPURunPlatformQuiesceActions(void)
156 return (iocpu_run_platform_actions(&gActionQueues
[kQueueQuiesce
], 0, 0U-1,
157 NULL
, NULL
, NULL
, TRUE
));
160 extern "C" kern_return_t
161 IOCPURunPlatformActiveActions(void)
163 return (iocpu_run_platform_actions(&gActionQueues
[kQueueActive
], 0, 0U-1,
164 NULL
, NULL
, NULL
, TRUE
));
167 extern "C" kern_return_t
168 IOCPURunPlatformHaltRestartActions(uint32_t message
)
170 if (!gActionQueues
[kQueueHaltRestart
].next
) return (kIOReturnNotReady
);
171 return (iocpu_run_platform_actions(&gActionQueues
[kQueueHaltRestart
], 0, 0U-1,
172 (void *)(uintptr_t) message
, NULL
, NULL
, TRUE
));
175 extern "C" kern_return_t
176 IOCPURunPlatformPanicActions(uint32_t message
)
178 // Don't allow nested calls of panic actions
179 if (!gActionQueues
[kQueuePanic
].next
) return (kIOReturnNotReady
);
180 return (iocpu_run_platform_actions(&gActionQueues
[kQueuePanic
], 0, 0U-1,
181 (void *)(uintptr_t) message
, NULL
, NULL
, FALSE
));
185 extern "C" kern_return_t
186 IOCPURunPlatformPanicSyncAction(void *addr
, uint32_t offset
, uint32_t len
)
188 PE_panic_save_context_t context
= {
190 .psc_offset
= offset
,
194 // Don't allow nested calls of panic actions
195 if (!gActionQueues
[kQueuePanic
].next
) return (kIOReturnNotReady
);
196 return (iocpu_run_platform_actions(&gActionQueues
[kQueuePanic
], 0, 0U-1,
197 (void *)(uintptr_t)(kPEPanicSync
), &context
, NULL
, FALSE
));
201 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
204 IOServicePlatformAction(void * refcon0
, void * refcon1
, uint32_t priority
,
205 void * param1
, void * param2
, void * param3
,
206 const char * service_name
)
209 IOService
* service
= (IOService
*) refcon0
;
210 const OSSymbol
* function
= (const OSSymbol
*) refcon1
;
212 kprintf("%s -> %s\n", function
->getCStringNoCopy(), service_name
);
214 ret
= service
->callPlatformFunction(function
, false,
215 (void *)(uintptr_t) priority
, param1
, param2
, param3
);
221 IOInstallServicePlatformAction(IOService
* service
, uint32_t qidx
)
223 iocpu_platform_action_entry_t
* entry
;
226 const OSSymbol
* key
= gActionSymbols
[qidx
];
227 queue_head_t
* queue
= &gActionQueues
[qidx
];
231 num
= OSDynamicCast(OSNumber
, service
->getProperty(key
));
242 case kQueueHaltRestart
:
249 queue_iterate(queue
, entry
, iocpu_platform_action_entry_t
*, link
)
251 if (service
== entry
->refcon0
) return;
255 entry
= IONew(iocpu_platform_action_entry_t
, 1);
256 entry
->action
= &IOServicePlatformAction
;
257 entry
->name
= service
->getName();
258 priority
= num
->unsigned32BitValue();
260 entry
->priority
= -priority
;
262 entry
->priority
= priority
;
263 entry
->refcon0
= service
;
264 entry
->refcon1
= (void *) key
;
265 entry
->callout_in_progress
= FALSE
;
267 iocpu_add_platform_action(queue
, entry
);
270 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
273 IOCPUInitialize(void)
275 gIOCPUsLock
= IOLockAlloc();
276 gIOCPUs
= OSArray::withCapacity(1);
278 for (uint32_t qidx
= kQueueSleep
; qidx
< kQueueCount
; qidx
++)
280 queue_init(&gActionQueues
[qidx
]);
283 gIOCPUStateKey
= OSSymbol::withCStringNoCopy("IOCPUState");
285 gIOCPUStateNames
[kIOCPUStateUnregistered
] =
286 OSString::withCStringNoCopy("Unregistered");
287 gIOCPUStateNames
[kIOCPUStateUninitalized
] =
288 OSString::withCStringNoCopy("Uninitalized");
289 gIOCPUStateNames
[kIOCPUStateStopped
] =
290 OSString::withCStringNoCopy("Stopped");
291 gIOCPUStateNames
[kIOCPUStateRunning
] =
292 OSString::withCStringNoCopy("Running");
294 gIOPlatformSleepActionKey
= gActionSymbols
[kQueueSleep
]
295 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey
);
296 gIOPlatformWakeActionKey
= gActionSymbols
[kQueueWake
]
297 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey
);
298 gIOPlatformQuiesceActionKey
= gActionSymbols
[kQueueQuiesce
]
299 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey
);
300 gIOPlatformActiveActionKey
= gActionSymbols
[kQueueActive
]
301 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey
);
302 gIOPlatformHaltRestartActionKey
= gActionSymbols
[kQueueHaltRestart
]
303 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey
);
304 gIOPlatformPanicActionKey
= gActionSymbols
[kQueuePanic
]
305 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey
);
309 IOInstallServicePlatformActions(IOService
* service
)
311 IOLockLock(gIOCPUsLock
);
313 IOInstallServicePlatformAction(service
, kQueueHaltRestart
);
314 IOInstallServicePlatformAction(service
, kQueuePanic
);
316 IOLockUnlock(gIOCPUsLock
);
318 return (kIOReturnSuccess
);
322 IORemoveServicePlatformActions(IOService
* service
)
324 iocpu_platform_action_entry_t
* entry
;
325 iocpu_platform_action_entry_t
* next
;
327 IOLockLock(gIOCPUsLock
);
329 for (uint32_t qidx
= kQueueSleep
; qidx
< kQueueCount
; qidx
++)
331 next
= (typeof(entry
)) queue_first(&gActionQueues
[qidx
]);
332 while (!queue_end(&gActionQueues
[qidx
], &next
->link
))
335 next
= (typeof(entry
)) queue_next(&entry
->link
);
336 if (service
== entry
->refcon0
)
338 iocpu_remove_platform_action(entry
);
339 IODelete(entry
, iocpu_platform_action_entry_t
, 1);
344 IOLockUnlock(gIOCPUsLock
);
346 return (kIOReturnSuccess
);
350 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
352 kern_return_t
PE_cpu_start(cpu_id_t target
,
353 vm_offset_t start_paddr
, vm_offset_t arg_paddr
)
355 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
357 if (targetCPU
== 0) return KERN_FAILURE
;
358 return targetCPU
->startCPU(start_paddr
, arg_paddr
);
361 void PE_cpu_halt(cpu_id_t target
)
363 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
365 if (targetCPU
) targetCPU
->haltCPU();
368 void PE_cpu_signal(cpu_id_t source
, cpu_id_t target
)
370 IOCPU
*sourceCPU
= OSDynamicCast(IOCPU
, (OSObject
*)source
);
371 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
373 if (sourceCPU
&& targetCPU
) sourceCPU
->signalCPU(targetCPU
);
376 void PE_cpu_signal_deferred(cpu_id_t source
, cpu_id_t target
)
378 IOCPU
*sourceCPU
= OSDynamicCast(IOCPU
, (OSObject
*)source
);
379 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
381 if (sourceCPU
&& targetCPU
) sourceCPU
->signalCPUDeferred(targetCPU
);
384 void PE_cpu_signal_cancel(cpu_id_t source
, cpu_id_t target
)
386 IOCPU
*sourceCPU
= OSDynamicCast(IOCPU
, (OSObject
*)source
);
387 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
389 if (sourceCPU
&& targetCPU
) sourceCPU
->signalCPUCancel(targetCPU
);
392 void PE_cpu_machine_init(cpu_id_t target
, boolean_t bootb
)
394 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
397 targetCPU
->initCPU(bootb
);
398 #if defined(__arm__) || defined(__arm64__)
399 if (!bootb
&& (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
)) ml_set_is_quiescing(false);
400 #endif /* defined(__arm__) || defined(__arm64__) */
404 void PE_cpu_machine_quiesce(cpu_id_t target
)
406 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
408 #if defined(__arm__) || defined(__arm64__)
409 if (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
) ml_set_is_quiescing(true);
410 #endif /* defined(__arm__) || defined(__arm64__) */
411 targetCPU
->quiesceCPU();
415 #if defined(__arm__) || defined(__arm64__)
416 static perfmon_interrupt_handler_func pmi_handler
= 0;
418 kern_return_t
PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler
)
420 pmi_handler
= handler
;
425 void PE_cpu_perfmon_interrupt_enable(cpu_id_t target
, boolean_t enable
)
427 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
431 targetCPU
->getProvider()->registerInterrupt(1, targetCPU
, (IOInterruptAction
)pmi_handler
, 0);
432 targetCPU
->getProvider()->enableInterrupt(1);
434 targetCPU
->getProvider()->disableInterrupt(1);
440 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
442 #define super IOService
444 OSDefineMetaClassAndAbstractStructors(IOCPU
, IOService
);
445 OSMetaClassDefineReservedUnused(IOCPU
, 0);
446 OSMetaClassDefineReservedUnused(IOCPU
, 1);
447 OSMetaClassDefineReservedUnused(IOCPU
, 2);
448 OSMetaClassDefineReservedUnused(IOCPU
, 3);
449 OSMetaClassDefineReservedUnused(IOCPU
, 4);
450 OSMetaClassDefineReservedUnused(IOCPU
, 5);
451 OSMetaClassDefineReservedUnused(IOCPU
, 6);
452 OSMetaClassDefineReservedUnused(IOCPU
, 7);
454 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
456 void IOCPUSleepKernel(void)
460 IOCPU
*bootCPU
= NULL
;
461 IOPMrootDomain
*rootDomain
= IOService::getPMRootDomain();
463 kprintf("IOCPUSleepKernel\n");
465 IORegistryIterator
* iter
;
469 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformActions
);
471 iter
= IORegistryIterator::iterateOver( gIOServicePlane
,
472 kIORegistryIterateRecursively
);
480 all
= iter
->iterateAll();
482 while (!iter
->isValid());
487 while((service
= (IOService
*) all
->getFirstObject()))
489 for (uint32_t qidx
= kQueueSleep
; qidx
<= kQueueActive
; qidx
++)
491 IOInstallServicePlatformAction(service
, qidx
);
493 all
->removeObject(service
);
499 iocpu_run_platform_actions(&gActionQueues
[kQueueSleep
], 0, 0U-1,
500 NULL
, NULL
, NULL
, TRUE
);
502 rootDomain
->tracePoint( kIOPMTracePointSleepCPUs
);
504 numCPUs
= gIOCPUs
->getCount();
509 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
511 // We make certain that the bootCPU is the last to sleep
512 // We'll skip it for now, and halt it after finishing the
514 if (target
->getCPUNumber() == (UInt32
)master_cpu
)
517 } else if (target
->getCPUState() == kIOCPUStateRunning
)
523 assert(bootCPU
!= NULL
);
524 assert(cpu_number() == master_cpu
);
528 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformDriver
);
530 // Now sleep the boot CPU.
533 rootDomain
->tracePoint( kIOPMTracePointWakePlatformActions
);
537 iocpu_run_platform_actions(&gActionQueues
[kQueueWake
], 0, 0U-1,
538 NULL
, NULL
, NULL
, TRUE
);
540 iocpu_platform_action_entry_t
* entry
;
541 for (uint32_t qidx
= kQueueSleep
; qidx
<= kQueueActive
; qidx
++)
543 while (!(queue_empty(&gActionQueues
[qidx
])))
545 entry
= (typeof(entry
)) queue_first(&gActionQueues
[qidx
]);
546 iocpu_remove_platform_action(entry
);
547 IODelete(entry
, iocpu_platform_action_entry_t
, 1);
551 rootDomain
->tracePoint( kIOPMTracePointWakeCPUs
);
553 // Wake the other CPUs.
554 for (cnt
= 0; cnt
< numCPUs
; cnt
++)
556 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
558 // Skip the already-woken boot CPU.
559 if (target
->getCPUNumber() != (UInt32
)master_cpu
) {
560 if (target
->getCPUState() == kIOCPUStateRunning
)
561 panic("Spurious wakeup of cpu %u", (unsigned int)(target
->getCPUNumber()));
563 if (target
->getCPUState() == kIOCPUStateStopped
)
564 processor_start(target
->getMachProcessor());
569 bool IOCPU::start(IOService
*provider
)
571 OSData
*busFrequency
, *cpuFrequency
, *timebaseFrequency
;
573 if (!super::start(provider
)) return false;
578 IOLockLock(gIOCPUsLock
);
579 gIOCPUs
->setObject(this);
580 IOLockUnlock(gIOCPUsLock
);
582 // Correct the bus, cpu and timebase frequencies in the device tree.
583 if (gPEClockFrequencyInfo
.bus_frequency_hz
< 0x100000000ULL
) {
584 busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_clock_rate_hz
, 4);
586 busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_frequency_hz
, 8);
588 provider
->setProperty("bus-frequency", busFrequency
);
589 busFrequency
->release();
591 if (gPEClockFrequencyInfo
.cpu_frequency_hz
< 0x100000000ULL
) {
592 cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_clock_rate_hz
, 4);
594 cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_frequency_hz
, 8);
596 provider
->setProperty("clock-frequency", cpuFrequency
);
597 cpuFrequency
->release();
599 timebaseFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.timebase_frequency_hz
, 4);
600 provider
->setProperty("timebase-frequency", timebaseFrequency
);
601 timebaseFrequency
->release();
603 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t)*8);
606 setCPUState(kIOCPUStateUnregistered
);
611 OSObject
*IOCPU::getProperty(const OSSymbol
*aKey
) const
613 if (aKey
== gIOCPUStateKey
) return gIOCPUStateNames
[_cpuState
];
615 return super::getProperty(aKey
);
618 bool IOCPU::setProperty(const OSSymbol
*aKey
, OSObject
*anObject
)
620 if (aKey
== gIOCPUStateKey
) {
624 return super::setProperty(aKey
, anObject
);
627 bool IOCPU::serializeProperties(OSSerialize
*serialize
) const
630 OSDictionary
*dict
= dictionaryWithProperties();
631 if (!dict
) return false;
632 dict
->setObject(gIOCPUStateKey
, gIOCPUStateNames
[_cpuState
]);
633 result
= dict
->serialize(serialize
);
638 IOReturn
IOCPU::setProperties(OSObject
*properties
)
640 OSDictionary
*dict
= OSDynamicCast(OSDictionary
, properties
);
644 if (dict
== 0) return kIOReturnUnsupported
;
646 stateStr
= OSDynamicCast(OSString
, dict
->getObject(gIOCPUStateKey
));
648 result
= IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator
);
649 if (result
!= kIOReturnSuccess
) return result
;
651 if (setProperty(gIOCPUStateKey
, stateStr
)) return kIOReturnSuccess
;
653 return kIOReturnUnsupported
;
656 return kIOReturnUnsupported
;
659 void IOCPU::signalCPU(IOCPU */
*target*/
)
663 void IOCPU::signalCPUDeferred(IOCPU
*target
)
665 // Our CPU may not support deferred IPIs,
666 // so send a regular IPI by default
670 void IOCPU::signalCPUCancel(IOCPU */
*target*/
)
672 // Meant to cancel signals sent by
673 // signalCPUDeferred; unsupported
677 void IOCPU::enableCPUTimeBase(bool /*enable*/)
681 UInt32
IOCPU::getCPUNumber(void)
686 void IOCPU::setCPUNumber(UInt32 cpuNumber
)
688 _cpuNumber
= cpuNumber
;
689 super::setProperty("IOCPUNumber", _cpuNumber
, 32);
692 UInt32
IOCPU::getCPUState(void)
697 void IOCPU::setCPUState(UInt32 cpuState
)
699 if (cpuState
< kIOCPUStateCount
) {
700 _cpuState
= cpuState
;
704 OSArray
*IOCPU::getCPUGroup(void)
709 UInt32
IOCPU::getCPUGroupSize(void)
711 return _cpuGroup
->getCount();
714 processor_t
IOCPU::getMachProcessor(void)
716 return machProcessor
;
720 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
723 #define super IOInterruptController
725 OSDefineMetaClassAndStructors(IOCPUInterruptController
, IOInterruptController
);
727 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 1);
728 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 2);
729 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 3);
730 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 4);
731 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 5);
735 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
737 IOReturn
IOCPUInterruptController::initCPUInterruptController(int sources
)
739 return initCPUInterruptController(sources
, sources
);
742 IOReturn
IOCPUInterruptController::initCPUInterruptController(int sources
, int cpus
)
746 if (!super::init()) return kIOReturnInvalid
;
748 numSources
= sources
;
751 vectors
= (IOInterruptVector
*)IOMalloc(numSources
* sizeof(IOInterruptVector
));
752 if (vectors
== 0) return kIOReturnNoMemory
;
753 bzero(vectors
, numSources
* sizeof(IOInterruptVector
));
755 // Allocate a lock for each vector
756 for (cnt
= 0; cnt
< numSources
; cnt
++) {
757 vectors
[cnt
].interruptLock
= IOLockAlloc();
758 if (vectors
[cnt
].interruptLock
== NULL
) {
759 for (cnt
= 0; cnt
< numSources
; cnt
++) {
760 if (vectors
[cnt
].interruptLock
!= NULL
)
761 IOLockFree(vectors
[cnt
].interruptLock
);
763 return kIOReturnNoResources
;
767 ml_init_max_cpus(numSources
);
771 * kperf allocates based on the number of CPUs and requires them to all be
774 boolean_t found_kperf
= FALSE
;
775 char kperf_config_str
[64];
776 found_kperf
= PE_parse_boot_arg_str("kperf", kperf_config_str
, sizeof(kperf_config_str
));
777 if (found_kperf
&& kperf_config_str
[0] != '\0') {
778 kperf_kernel_configure(kperf_config_str
);
782 return kIOReturnSuccess
;
785 void IOCPUInterruptController::registerCPUInterruptController(void)
789 getPlatform()->registerInterruptController(gPlatformInterruptControllerName
,
793 void IOCPUInterruptController::setCPUInterruptProperties(IOService
*service
)
801 if ((service
->getProperty(gIOInterruptControllersKey
) != 0) &&
802 (service
->getProperty(gIOInterruptSpecifiersKey
) != 0))
805 // Create the interrupt specifer array.
806 specifier
= OSArray::withCapacity(numSources
);
807 for (cnt
= 0; cnt
< numSources
; cnt
++) {
809 tmpData
= OSData::withBytes(&tmpLong
, sizeof(tmpLong
));
810 specifier
->setObject(tmpData
);
814 // Create the interrupt controller array.
815 controller
= OSArray::withCapacity(numSources
);
816 for (cnt
= 0; cnt
< numSources
; cnt
++) {
817 controller
->setObject(gPlatformInterruptControllerName
);
820 // Put the two arrays into the property table.
821 service
->setProperty(gIOInterruptControllersKey
, controller
);
822 service
->setProperty(gIOInterruptSpecifiersKey
, specifier
);
823 controller
->release();
824 specifier
->release();
827 void IOCPUInterruptController::enableCPUInterrupt(IOCPU
*cpu
)
829 IOInterruptHandler handler
= OSMemberFunctionCast(
830 IOInterruptHandler
, this, &IOCPUInterruptController::handleInterrupt
);
834 ml_install_interrupt_handler(cpu
, cpu
->getCPUNumber(), this, handler
, 0);
836 IOTakeLock(vectors
[0].interruptLock
);
839 if (enabledCPUs
== numCPUs
) {
840 IOService::cpusRunning();
843 IOUnlock(vectors
[0].interruptLock
);
846 IOReturn
IOCPUInterruptController::registerInterrupt(IOService
*nub
,
849 IOInterruptHandler handler
,
852 IOInterruptVector
*vector
;
854 if (source
>= numSources
) return kIOReturnNoResources
;
856 vector
= &vectors
[source
];
858 // Get the lock for this vector.
859 IOTakeLock(vector
->interruptLock
);
861 // Make sure the vector is not in use.
862 if (vector
->interruptRegistered
) {
863 IOUnlock(vector
->interruptLock
);
864 return kIOReturnNoResources
;
867 // Fill in vector with the client's info.
868 vector
->handler
= handler
;
870 vector
->source
= source
;
871 vector
->target
= target
;
872 vector
->refCon
= refCon
;
874 // Get the vector ready. It starts hard disabled.
875 vector
->interruptDisabledHard
= 1;
876 vector
->interruptDisabledSoft
= 1;
877 vector
->interruptRegistered
= 1;
879 IOUnlock(vector
->interruptLock
);
881 IOTakeLock(vectors
[0].interruptLock
);
882 if (enabledCPUs
!= numCPUs
) {
883 assert_wait(this, THREAD_UNINT
);
884 IOUnlock(vectors
[0].interruptLock
);
885 thread_block(THREAD_CONTINUE_NULL
);
887 IOUnlock(vectors
[0].interruptLock
);
889 return kIOReturnSuccess
;
892 IOReturn
IOCPUInterruptController::getInterruptType(IOService */
*nub*/
,
896 if (interruptType
== 0) return kIOReturnBadArgument
;
898 *interruptType
= kIOInterruptTypeLevel
;
900 return kIOReturnSuccess
;
903 IOReturn
IOCPUInterruptController::enableInterrupt(IOService */
*nub*/
,
906 // ml_set_interrupts_enabled(true);
907 return kIOReturnSuccess
;
910 IOReturn
IOCPUInterruptController::disableInterrupt(IOService */
*nub*/
,
913 // ml_set_interrupts_enabled(false);
914 return kIOReturnSuccess
;
917 IOReturn
IOCPUInterruptController::causeInterrupt(IOService */
*nub*/
,
920 ml_cause_interrupt();
921 return kIOReturnSuccess
;
924 IOReturn
IOCPUInterruptController::handleInterrupt(void */
*refCon*/
,
928 IOInterruptVector
*vector
;
930 vector
= &vectors
[source
];
932 if (!vector
->interruptRegistered
) return kIOReturnInvalid
;
934 vector
->handler(vector
->target
, vector
->refCon
,
935 vector
->nub
, vector
->source
);
937 return kIOReturnSuccess
;
940 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */