2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 #include <kern/cpu_number.h>
33 extern void kperf_kernel_configure(char *);
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOPlatformExpert.h>
38 #include <IOKit/pwr_mgt/RootDomain.h>
39 #include <IOKit/pwr_mgt/IOPMPrivate.h>
40 #include <IOKit/IOUserClient.h>
41 #include <IOKit/IOKitKeysPrivate.h>
42 #include <IOKit/IOCPU.h>
43 #include "IOKitKernelInternal.h"
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 #include <kern/queue.h>
47 #include <kern/sched_prim.h>
49 extern "C" void console_suspend();
50 extern "C" void console_resume();
51 extern "C" void sched_override_recommended_cores_for_sleep(void);
52 extern "C" void sched_restore_recommended_cores_after_sleep(void);
54 typedef kern_return_t (*iocpu_platform_action_t
)(void * refcon0
, void * refcon1
, uint32_t priority
,
55 void * param1
, void * param2
, void * param3
,
58 struct iocpu_platform_action_entry
{
60 iocpu_platform_action_t action
;
65 boolean_t callout_in_progress
;
66 struct iocpu_platform_action_entry
* alloc_list
;
68 typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t
;
70 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
72 static IOLock
*gIOCPUsLock
;
73 static OSArray
*gIOCPUs
;
74 static const OSSymbol
*gIOCPUStateKey
;
75 static OSString
*gIOCPUStateNames
[kIOCPUStateCount
];
82 kQueueHaltRestart
= 4,
87 const OSSymbol
* gIOPlatformSleepActionKey
;
88 const OSSymbol
* gIOPlatformWakeActionKey
;
89 const OSSymbol
* gIOPlatformQuiesceActionKey
;
90 const OSSymbol
* gIOPlatformActiveActionKey
;
91 const OSSymbol
* gIOPlatformHaltRestartActionKey
;
92 const OSSymbol
* gIOPlatformPanicActionKey
;
94 static queue_head_t gActionQueues
[kQueueCount
];
95 static const OSSymbol
* gActionSymbols
[kQueueCount
];
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100 iocpu_add_platform_action(queue_head_t
* queue
, iocpu_platform_action_entry_t
* entry
)
102 iocpu_platform_action_entry_t
* next
;
104 queue_iterate(queue
, next
, iocpu_platform_action_entry_t
*, link
)
106 if (next
->priority
> entry
->priority
) {
107 queue_insert_before(queue
, entry
, next
, iocpu_platform_action_entry_t
*, link
);
111 queue_enter(queue
, entry
, iocpu_platform_action_entry_t
*, link
); // at tail
115 iocpu_remove_platform_action(iocpu_platform_action_entry_t
* entry
)
117 remque(&entry
->link
);
121 iocpu_run_platform_actions(queue_head_t
* queue
, uint32_t first_priority
, uint32_t last_priority
,
122 void * param1
, void * param2
, void * param3
, boolean_t allow_nested_callouts
)
124 kern_return_t ret
= KERN_SUCCESS
;
125 kern_return_t result
= KERN_SUCCESS
;
126 iocpu_platform_action_entry_t
* next
;
128 queue_iterate(queue
, next
, iocpu_platform_action_entry_t
*, link
)
130 uint32_t pri
= (next
->priority
< 0) ? -next
->priority
: next
->priority
;
131 if ((pri
>= first_priority
) && (pri
<= last_priority
)) {
132 //kprintf("[%p]", next->action);
133 if (!allow_nested_callouts
&& !next
->callout_in_progress
) {
134 next
->callout_in_progress
= TRUE
;
135 ret
= (*next
->action
)(next
->refcon0
, next
->refcon1
, pri
, param1
, param2
, param3
, next
->name
);
136 next
->callout_in_progress
= FALSE
;
137 } else if (allow_nested_callouts
) {
138 ret
= (*next
->action
)(next
->refcon0
, next
->refcon1
, pri
, param1
, param2
, param3
, next
->name
);
141 if (KERN_SUCCESS
== result
) {
148 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
150 extern "C" kern_return_t
151 IOCPURunPlatformQuiesceActions(void)
153 assert(preemption_enabled() == false);
154 return iocpu_run_platform_actions(&gActionQueues
[kQueueQuiesce
], 0, 0U - 1,
155 NULL
, NULL
, NULL
, TRUE
);
158 extern "C" kern_return_t
159 IOCPURunPlatformActiveActions(void)
161 assert(preemption_enabled() == false);
162 return iocpu_run_platform_actions(&gActionQueues
[kQueueActive
], 0, 0U - 1,
163 NULL
, NULL
, NULL
, TRUE
);
166 extern "C" kern_return_t
167 IOCPURunPlatformHaltRestartActions(uint32_t message
)
169 if (!gActionQueues
[kQueueHaltRestart
].next
) {
170 return kIOReturnNotReady
;
172 return iocpu_run_platform_actions(&gActionQueues
[kQueueHaltRestart
], 0, 0U - 1,
173 (void *)(uintptr_t) message
, NULL
, NULL
, TRUE
);
176 extern "C" kern_return_t
177 IOCPURunPlatformPanicActions(uint32_t message
)
179 // Don't allow nested calls of panic actions
180 if (!gActionQueues
[kQueuePanic
].next
) {
181 return kIOReturnNotReady
;
183 return iocpu_run_platform_actions(&gActionQueues
[kQueuePanic
], 0, 0U - 1,
184 (void *)(uintptr_t) message
, NULL
, NULL
, FALSE
);
188 extern "C" kern_return_t
189 IOCPURunPlatformPanicSyncAction(void *addr
, uint32_t offset
, uint32_t len
)
191 PE_panic_save_context_t context
= {
193 .psc_offset
= offset
,
197 // Don't allow nested calls of panic actions
198 if (!gActionQueues
[kQueuePanic
].next
) {
199 return kIOReturnNotReady
;
201 return iocpu_run_platform_actions(&gActionQueues
[kQueuePanic
], 0, 0U - 1,
202 (void *)(uintptr_t)(kPEPanicSync
), &context
, NULL
, FALSE
);
205 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
208 IOServicePlatformAction(void * refcon0
, void * refcon1
, uint32_t priority
,
209 void * param1
, void * param2
, void * param3
,
210 const char * service_name
)
213 IOService
* service
= (IOService
*) refcon0
;
214 const OSSymbol
* function
= (const OSSymbol
*) refcon1
;
216 kprintf("%s -> %s\n", function
->getCStringNoCopy(), service_name
);
218 ret
= service
->callPlatformFunction(function
, false,
219 (void *)(uintptr_t) priority
, param1
, param2
, param3
);
225 IOInstallServicePlatformAction(IOService
* service
, uint32_t qidx
)
227 iocpu_platform_action_entry_t
* entry
;
230 const OSSymbol
* key
= gActionSymbols
[qidx
];
231 queue_head_t
* queue
= &gActionQueues
[qidx
];
235 num
= OSDynamicCast(OSNumber
, service
->getProperty(key
));
247 case kQueueHaltRestart
:
253 queue_iterate(queue
, entry
, iocpu_platform_action_entry_t
*, link
)
255 if (service
== entry
->refcon0
) {
261 entry
= IONew(iocpu_platform_action_entry_t
, 1);
262 entry
->action
= &IOServicePlatformAction
;
263 entry
->name
= service
->getName();
264 priority
= num
->unsigned32BitValue();
266 entry
->priority
= -priority
;
268 entry
->priority
= priority
;
270 entry
->refcon0
= service
;
271 entry
->refcon1
= (void *) key
;
272 entry
->callout_in_progress
= FALSE
;
274 iocpu_add_platform_action(queue
, entry
);
277 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
280 IOCPUInitialize(void)
282 gIOCPUsLock
= IOLockAlloc();
283 gIOCPUs
= OSArray::withCapacity(1);
285 for (uint32_t qidx
= kQueueSleep
; qidx
< kQueueCount
; qidx
++) {
286 queue_init(&gActionQueues
[qidx
]);
289 gIOCPUStateKey
= OSSymbol::withCStringNoCopy("IOCPUState");
291 gIOCPUStateNames
[kIOCPUStateUnregistered
] =
292 OSString::withCStringNoCopy("Unregistered");
293 gIOCPUStateNames
[kIOCPUStateUninitalized
] =
294 OSString::withCStringNoCopy("Uninitalized");
295 gIOCPUStateNames
[kIOCPUStateStopped
] =
296 OSString::withCStringNoCopy("Stopped");
297 gIOCPUStateNames
[kIOCPUStateRunning
] =
298 OSString::withCStringNoCopy("Running");
300 gIOPlatformSleepActionKey
= gActionSymbols
[kQueueSleep
]
301 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey
);
302 gIOPlatformWakeActionKey
= gActionSymbols
[kQueueWake
]
303 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey
);
304 gIOPlatformQuiesceActionKey
= gActionSymbols
[kQueueQuiesce
]
305 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey
);
306 gIOPlatformActiveActionKey
= gActionSymbols
[kQueueActive
]
307 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey
);
308 gIOPlatformHaltRestartActionKey
= gActionSymbols
[kQueueHaltRestart
]
309 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey
);
310 gIOPlatformPanicActionKey
= gActionSymbols
[kQueuePanic
]
311 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey
);
315 IOInstallServicePlatformActions(IOService
* service
)
317 IOLockLock(gIOCPUsLock
);
319 IOInstallServicePlatformAction(service
, kQueueHaltRestart
);
320 IOInstallServicePlatformAction(service
, kQueuePanic
);
322 IOLockUnlock(gIOCPUsLock
);
324 return kIOReturnSuccess
;
328 IORemoveServicePlatformActions(IOService
* service
)
330 iocpu_platform_action_entry_t
* entry
;
331 iocpu_platform_action_entry_t
* next
;
333 IOLockLock(gIOCPUsLock
);
335 for (uint32_t qidx
= kQueueSleep
; qidx
< kQueueCount
; qidx
++) {
336 next
= (typeof(entry
))queue_first(&gActionQueues
[qidx
]);
337 while (!queue_end(&gActionQueues
[qidx
], &next
->link
)) {
339 next
= (typeof(entry
))queue_next(&entry
->link
);
340 if (service
== entry
->refcon0
) {
341 iocpu_remove_platform_action(entry
);
342 IODelete(entry
, iocpu_platform_action_entry_t
, 1);
347 IOLockUnlock(gIOCPUsLock
);
349 return kIOReturnSuccess
;
353 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
356 PE_cpu_start(cpu_id_t target
,
357 vm_offset_t start_paddr
, vm_offset_t arg_paddr
)
359 IOCPU
*targetCPU
= (IOCPU
*)target
;
361 if (targetCPU
== NULL
) {
364 return targetCPU
->startCPU(start_paddr
, arg_paddr
);
368 PE_cpu_halt(cpu_id_t target
)
370 IOCPU
*targetCPU
= (IOCPU
*)target
;
372 targetCPU
->haltCPU();
376 PE_cpu_signal(cpu_id_t source
, cpu_id_t target
)
378 IOCPU
*sourceCPU
= (IOCPU
*)source
;
379 IOCPU
*targetCPU
= (IOCPU
*)target
;
381 sourceCPU
->signalCPU(targetCPU
);
385 PE_cpu_signal_deferred(cpu_id_t source
, cpu_id_t target
)
387 IOCPU
*sourceCPU
= (IOCPU
*)source
;
388 IOCPU
*targetCPU
= (IOCPU
*)target
;
390 sourceCPU
->signalCPUDeferred(targetCPU
);
394 PE_cpu_signal_cancel(cpu_id_t source
, cpu_id_t target
)
396 IOCPU
*sourceCPU
= (IOCPU
*)source
;
397 IOCPU
*targetCPU
= (IOCPU
*)target
;
399 sourceCPU
->signalCPUCancel(targetCPU
);
403 PE_cpu_machine_init(cpu_id_t target
, boolean_t bootb
)
405 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
407 if (targetCPU
== NULL
) {
408 panic("%s: invalid target CPU %p", __func__
, target
);
411 targetCPU
->initCPU(bootb
);
412 #if defined(__arm__) || defined(__arm64__)
413 if (!bootb
&& (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
)) {
414 ml_set_is_quiescing(false);
416 #endif /* defined(__arm__) || defined(__arm64__) */
420 PE_cpu_machine_quiesce(cpu_id_t target
)
422 IOCPU
*targetCPU
= (IOCPU
*)target
;
423 #if defined(__arm__) || defined(__arm64__)
424 if (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
) {
425 ml_set_is_quiescing(true);
427 #endif /* defined(__arm__) || defined(__arm64__) */
428 targetCPU
->quiesceCPU();
431 #if defined(__arm__) || defined(__arm64__)
432 static perfmon_interrupt_handler_func pmi_handler
= NULL
;
435 PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler
)
437 pmi_handler
= handler
;
443 PE_cpu_perfmon_interrupt_enable(cpu_id_t target
, boolean_t enable
)
445 IOCPU
*targetCPU
= (IOCPU
*)target
;
447 if (targetCPU
== nullptr) {
452 targetCPU
->getProvider()->registerInterrupt(1, targetCPU
, (IOInterruptAction
)pmi_handler
, NULL
);
453 targetCPU
->getProvider()->enableInterrupt(1);
455 targetCPU
->getProvider()->disableInterrupt(1);
460 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
462 #define super IOService
464 OSDefineMetaClassAndAbstractStructors(IOCPU
, IOService
);
465 OSMetaClassDefineReservedUnused(IOCPU
, 0);
466 OSMetaClassDefineReservedUnused(IOCPU
, 1);
467 OSMetaClassDefineReservedUnused(IOCPU
, 2);
468 OSMetaClassDefineReservedUnused(IOCPU
, 3);
469 OSMetaClassDefineReservedUnused(IOCPU
, 4);
470 OSMetaClassDefineReservedUnused(IOCPU
, 5);
471 OSMetaClassDefineReservedUnused(IOCPU
, 6);
472 OSMetaClassDefineReservedUnused(IOCPU
, 7);
474 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
477 IOCPUSleepKernel(void)
479 #if defined(__x86_64__)
480 extern IOCPU
*currentShutdownTarget
;
484 IOCPU
*bootCPU
= NULL
;
485 IOPMrootDomain
*rootDomain
= IOService::getPMRootDomain();
487 kprintf("IOCPUSleepKernel\n");
488 #if defined(__arm64__)
489 sched_override_recommended_cores_for_sleep();
492 IORegistryIterator
* iter
;
496 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformActions
);
498 iter
= IORegistryIterator::iterateOver( gIOServicePlane
,
499 kIORegistryIterateRecursively
);
506 all
= iter
->iterateAll();
507 }while (!iter
->isValid());
511 while ((service
= (IOService
*) all
->getFirstObject())) {
512 for (uint32_t qidx
= kQueueSleep
; qidx
<= kQueueActive
; qidx
++) {
513 IOInstallServicePlatformAction(service
, qidx
);
515 all
->removeObject(service
);
521 iocpu_run_platform_actions(&gActionQueues
[kQueueSleep
], 0, 0U - 1,
522 NULL
, NULL
, NULL
, TRUE
);
524 rootDomain
->tracePoint( kIOPMTracePointSleepCPUs
);
526 numCPUs
= gIOCPUs
->getCount();
527 #if defined(__x86_64__)
528 currentShutdownTarget
= NULL
;
532 thread_t self
= current_thread();
535 * We need to boost this thread's priority to the maximum kernel priority to
536 * ensure we can urgently preempt ANY thread currently executing on the
537 * target CPU. Note that realtime threads have their own mechanism to eventually
538 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
540 old_pri
= thread_kern_get_pri(self
);
541 thread_kern_set_pri(self
, thread_kern_get_kernel_maxpri());
546 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
548 // We make certain that the bootCPU is the last to sleep
549 // We'll skip it for now, and halt it after finishing the
551 if (target
->getCPUNumber() == (UInt32
)master_cpu
) {
553 } else if (target
->getCPUState() == kIOCPUStateRunning
) {
554 #if defined(__x86_64__)
555 currentShutdownTarget
= target
;
561 assert(bootCPU
!= NULL
);
562 assert(cpu_number() == master_cpu
);
566 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformDriver
);
567 rootDomain
->stop_watchdog_timer();
570 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
571 * The system sleeps here.
577 * The system is now coming back from sleep on the boot CPU.
578 * The kQueueActive actions have already been called.
581 rootDomain
->start_watchdog_timer();
582 rootDomain
->tracePoint( kIOPMTracePointWakePlatformActions
);
586 iocpu_run_platform_actions(&gActionQueues
[kQueueWake
], 0, 0U - 1,
587 NULL
, NULL
, NULL
, TRUE
);
589 iocpu_platform_action_entry_t
* entry
;
590 for (uint32_t qidx
= kQueueSleep
; qidx
<= kQueueActive
; qidx
++) {
591 while (!(queue_empty(&gActionQueues
[qidx
]))) {
592 entry
= (typeof(entry
))queue_first(&gActionQueues
[qidx
]);
593 iocpu_remove_platform_action(entry
);
594 IODelete(entry
, iocpu_platform_action_entry_t
, 1);
598 rootDomain
->tracePoint( kIOPMTracePointWakeCPUs
);
600 // Wake the other CPUs.
601 for (cnt
= 0; cnt
< numCPUs
; cnt
++) {
602 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
604 // Skip the already-woken boot CPU.
605 if (target
->getCPUNumber() != (UInt32
)master_cpu
) {
606 if (target
->getCPUState() == kIOCPUStateRunning
) {
607 panic("Spurious wakeup of cpu %u", (unsigned int)(target
->getCPUNumber()));
610 if (target
->getCPUState() == kIOCPUStateStopped
) {
611 processor_start(target
->getMachProcessor());
616 #if defined(__arm64__)
617 sched_restore_recommended_cores_after_sleep();
620 thread_kern_set_pri(self
, old_pri
);
624 IOCPU::start(IOService
*provider
)
626 OSData
*busFrequency
, *cpuFrequency
, *timebaseFrequency
;
628 if (!super::start(provider
)) {
635 IOLockLock(gIOCPUsLock
);
636 gIOCPUs
->setObject(this);
637 IOLockUnlock(gIOCPUsLock
);
639 // Correct the bus, cpu and timebase frequencies in the device tree.
640 if (gPEClockFrequencyInfo
.bus_frequency_hz
< 0x100000000ULL
) {
641 busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_clock_rate_hz
, 4);
643 busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_frequency_hz
, 8);
645 provider
->setProperty("bus-frequency", busFrequency
);
646 busFrequency
->release();
648 if (gPEClockFrequencyInfo
.cpu_frequency_hz
< 0x100000000ULL
) {
649 cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_clock_rate_hz
, 4);
651 cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_frequency_hz
, 8);
653 provider
->setProperty("clock-frequency", cpuFrequency
);
654 cpuFrequency
->release();
656 timebaseFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.timebase_frequency_hz
, 4);
657 provider
->setProperty("timebase-frequency", timebaseFrequency
);
658 timebaseFrequency
->release();
660 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8);
663 setCPUState(kIOCPUStateUnregistered
);
669 IOCPU::detach(IOService
*provider
)
671 super::detach(provider
);
672 IOLockLock(gIOCPUsLock
);
673 unsigned int index
= gIOCPUs
->getNextIndexOfObject(this, 0);
674 if (index
!= (unsigned int)-1) {
675 gIOCPUs
->removeObject(index
);
677 IOLockUnlock(gIOCPUsLock
);
681 IOCPU::getProperty(const OSSymbol
*aKey
) const
683 if (aKey
== gIOCPUStateKey
) {
684 return gIOCPUStateNames
[_cpuState
];
687 return super::getProperty(aKey
);
691 IOCPU::setProperty(const OSSymbol
*aKey
, OSObject
*anObject
)
693 if (aKey
== gIOCPUStateKey
) {
697 return super::setProperty(aKey
, anObject
);
701 IOCPU::serializeProperties(OSSerialize
*serialize
) const
704 OSDictionary
*dict
= dictionaryWithProperties();
708 dict
->setObject(gIOCPUStateKey
, gIOCPUStateNames
[_cpuState
]);
709 result
= dict
->serialize(serialize
);
715 IOCPU::setProperties(OSObject
*properties
)
717 OSDictionary
*dict
= OSDynamicCast(OSDictionary
, properties
);
722 return kIOReturnUnsupported
;
725 stateStr
= OSDynamicCast(OSString
, dict
->getObject(gIOCPUStateKey
));
726 if (stateStr
!= NULL
) {
727 result
= IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator
);
728 if (result
!= kIOReturnSuccess
) {
732 if (setProperty(gIOCPUStateKey
, stateStr
)) {
733 return kIOReturnSuccess
;
736 return kIOReturnUnsupported
;
739 return kIOReturnUnsupported
;
743 IOCPU::signalCPU(IOCPU */
*target*/
)
748 IOCPU::signalCPUDeferred(IOCPU
*target
)
750 // Our CPU may not support deferred IPIs,
751 // so send a regular IPI by default
756 IOCPU::signalCPUCancel(IOCPU */
*target*/
)
758 // Meant to cancel signals sent by
759 // signalCPUDeferred; unsupported
764 IOCPU::enableCPUTimeBase(bool /*enable*/)
769 IOCPU::getCPUNumber(void)
775 IOCPU::setCPUNumber(UInt32 cpuNumber
)
777 _cpuNumber
= cpuNumber
;
778 super::setProperty("IOCPUNumber", _cpuNumber
, 32);
782 IOCPU::getCPUState(void)
788 IOCPU::setCPUState(UInt32 cpuState
)
790 if (cpuState
< kIOCPUStateCount
) {
791 _cpuState
= cpuState
;
796 IOCPU::getCPUGroup(void)
802 IOCPU::getCPUGroupSize(void)
804 return _cpuGroup
->getCount();
808 IOCPU::getMachProcessor(void)
810 return machProcessor
;
814 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
817 #define super IOInterruptController
819 OSDefineMetaClassAndStructors(IOCPUInterruptController
, IOInterruptController
);
821 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 1);
822 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 2);
823 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 3);
824 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 4);
825 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 5);
829 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
832 IOCPUInterruptController::initCPUInterruptController(int sources
)
834 return initCPUInterruptController(sources
, sources
);
838 IOCPUInterruptController::initCPUInterruptController(int sources
, int cpus
)
842 if (!super::init()) {
843 return kIOReturnInvalid
;
846 numSources
= sources
;
849 vectors
= (IOInterruptVector
*)IOMalloc(numSources
* sizeof(IOInterruptVector
));
850 if (vectors
== NULL
) {
851 return kIOReturnNoMemory
;
853 bzero(vectors
, numSources
* sizeof(IOInterruptVector
));
855 // Allocate a lock for each vector
856 for (cnt
= 0; cnt
< numSources
; cnt
++) {
857 vectors
[cnt
].interruptLock
= IOLockAlloc();
858 if (vectors
[cnt
].interruptLock
== NULL
) {
859 for (cnt
= 0; cnt
< numSources
; cnt
++) {
860 if (vectors
[cnt
].interruptLock
!= NULL
) {
861 IOLockFree(vectors
[cnt
].interruptLock
);
864 return kIOReturnNoResources
;
868 ml_init_max_cpus(numSources
);
872 * kperf allocates based on the number of CPUs and requires them to all be
875 boolean_t found_kperf
= FALSE
;
876 char kperf_config_str
[64];
877 found_kperf
= PE_parse_boot_arg_str("kperf", kperf_config_str
, sizeof(kperf_config_str
));
878 if (found_kperf
&& kperf_config_str
[0] != '\0') {
879 kperf_kernel_configure(kperf_config_str
);
883 return kIOReturnSuccess
;
887 IOCPUInterruptController::registerCPUInterruptController(void)
891 getPlatform()->registerInterruptController(gPlatformInterruptControllerName
,
896 IOCPUInterruptController::setCPUInterruptProperties(IOService
*service
)
904 if ((service
->getProperty(gIOInterruptControllersKey
) != NULL
) &&
905 (service
->getProperty(gIOInterruptSpecifiersKey
) != NULL
)) {
909 // Create the interrupt specifer array.
910 specifier
= OSArray::withCapacity(numSources
);
911 for (cnt
= 0; cnt
< numSources
; cnt
++) {
913 tmpData
= OSData::withBytes(&tmpLong
, sizeof(tmpLong
));
914 specifier
->setObject(tmpData
);
919 // Create the interrupt controller array.
920 controller
= OSArray::withCapacity(numSources
);
921 for (cnt
= 0; cnt
< numSources
; cnt
++) {
922 controller
->setObject(gPlatformInterruptControllerName
);
925 // Put the two arrays into the property table.
926 service
->setProperty(gIOInterruptControllersKey
, controller
);
927 service
->setProperty(gIOInterruptSpecifiersKey
, specifier
);
928 controller
->release();
929 specifier
->release();
933 IOCPUInterruptController::enableCPUInterrupt(IOCPU
*cpu
)
935 IOInterruptHandler handler
= OSMemberFunctionCast(
936 IOInterruptHandler
, this, &IOCPUInterruptController::handleInterrupt
);
940 ml_install_interrupt_handler(cpu
, cpu
->getCPUNumber(), this, handler
, NULL
);
942 IOTakeLock(vectors
[0].interruptLock
);
945 if (enabledCPUs
== numCPUs
) {
946 IOService::cpusRunning();
949 IOUnlock(vectors
[0].interruptLock
);
953 IOCPUInterruptController::registerInterrupt(IOService
*nub
,
956 IOInterruptHandler handler
,
959 IOInterruptVector
*vector
;
961 // Interrupts must be enabled, as this can allocate memory.
962 assert(ml_get_interrupts_enabled() == TRUE
);
964 if (source
>= numSources
) {
965 return kIOReturnNoResources
;
968 vector
= &vectors
[source
];
970 // Get the lock for this vector.
971 IOTakeLock(vector
->interruptLock
);
973 // Make sure the vector is not in use.
974 if (vector
->interruptRegistered
) {
975 IOUnlock(vector
->interruptLock
);
976 return kIOReturnNoResources
;
979 // Fill in vector with the client's info.
980 vector
->handler
= handler
;
982 vector
->source
= source
;
983 vector
->target
= target
;
984 vector
->refCon
= refCon
;
986 // Get the vector ready. It starts hard disabled.
987 vector
->interruptDisabledHard
= 1;
988 vector
->interruptDisabledSoft
= 1;
989 vector
->interruptRegistered
= 1;
991 IOUnlock(vector
->interruptLock
);
993 IOTakeLock(vectors
[0].interruptLock
);
994 if (enabledCPUs
!= numCPUs
) {
995 assert_wait(this, THREAD_UNINT
);
996 IOUnlock(vectors
[0].interruptLock
);
997 thread_block(THREAD_CONTINUE_NULL
);
999 IOUnlock(vectors
[0].interruptLock
);
1002 return kIOReturnSuccess
;
1006 IOCPUInterruptController::getInterruptType(IOService */
*nub*/
,
1010 if (interruptType
== NULL
) {
1011 return kIOReturnBadArgument
;
1014 *interruptType
= kIOInterruptTypeLevel
;
1016 return kIOReturnSuccess
;
1020 IOCPUInterruptController::enableInterrupt(IOService */
*nub*/
,
1023 // ml_set_interrupts_enabled(true);
1024 return kIOReturnSuccess
;
1028 IOCPUInterruptController::disableInterrupt(IOService */
*nub*/
,
1031 // ml_set_interrupts_enabled(false);
1032 return kIOReturnSuccess
;
1036 IOCPUInterruptController::causeInterrupt(IOService */
*nub*/
,
1039 ml_cause_interrupt();
1040 return kIOReturnSuccess
;
1044 IOCPUInterruptController::handleInterrupt(void */
*refCon*/
,
1048 IOInterruptVector
*vector
;
1050 vector
= &vectors
[source
];
1052 if (!vector
->interruptRegistered
) {
1053 return kIOReturnInvalid
;
1056 vector
->handler(vector
->target
, vector
->refCon
,
1057 vector
->nub
, vector
->source
);
1059 return kIOReturnSuccess
;
1062 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */