2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define IOKIT_ENABLE_SHARED_PTR
32 #include <machine/machine_routines.h>
33 #include <pexpert/pexpert.h>
34 #include <kern/cpu_number.h>
35 extern void kperf_kernel_configure(char *);
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <IOKit/pwr_mgt/RootDomain.h>
41 #include <IOKit/pwr_mgt/IOPMPrivate.h>
42 #include <libkern/c++/OSSharedPtr.h>
43 #include <IOKit/IOUserClient.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45 #include <IOKit/IOCPU.h>
46 #include "IOKitKernelInternal.h"
48 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
50 #include <kern/queue.h>
51 #include <kern/sched_prim.h>
53 extern "C" void console_suspend();
54 extern "C" void console_resume();
55 extern "C" void sched_override_recommended_cores_for_sleep(void);
56 extern "C" void sched_restore_recommended_cores_after_sleep(void);
58 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
60 static IOLock
*gIOCPUsLock
;
61 static OSSharedPtr
<OSArray
> gIOCPUs
;
62 static OSSharedPtr
<const OSSymbol
> gIOCPUStateKey
;
63 static OSSharedPtr
<OSString
> gIOCPUStateNames
[kIOCPUStateCount
];
65 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
72 gIOCPUsLock
= IOLockAlloc();
73 gIOCPUs
= OSArray::withCapacity(1);
75 gIOCPUStateKey
= OSSymbol::withCStringNoCopy("IOCPUState");
77 gIOCPUStateNames
[kIOCPUStateUnregistered
] =
78 OSString::withCStringNoCopy("Unregistered");
79 gIOCPUStateNames
[kIOCPUStateUninitalized
] =
80 OSString::withCStringNoCopy("Uninitalized");
81 gIOCPUStateNames
[kIOCPUStateStopped
] =
82 OSString::withCStringNoCopy("Stopped");
83 gIOCPUStateNames
[kIOCPUStateRunning
] =
84 OSString::withCStringNoCopy("Running");
87 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
90 PE_cpu_start(cpu_id_t target
,
91 vm_offset_t start_paddr
, vm_offset_t arg_paddr
)
93 IOCPU
*targetCPU
= (IOCPU
*)target
;
95 if (targetCPU
== NULL
) {
98 return targetCPU
->startCPU(start_paddr
, arg_paddr
);
102 PE_cpu_halt(cpu_id_t target
)
104 IOCPU
*targetCPU
= (IOCPU
*)target
;
106 targetCPU
->haltCPU();
110 PE_cpu_signal(cpu_id_t source
, cpu_id_t target
)
112 IOCPU
*sourceCPU
= (IOCPU
*)source
;
113 IOCPU
*targetCPU
= (IOCPU
*)target
;
115 sourceCPU
->signalCPU(targetCPU
);
119 PE_cpu_signal_deferred(cpu_id_t source
, cpu_id_t target
)
121 IOCPU
*sourceCPU
= (IOCPU
*)source
;
122 IOCPU
*targetCPU
= (IOCPU
*)target
;
124 sourceCPU
->signalCPUDeferred(targetCPU
);
128 PE_cpu_signal_cancel(cpu_id_t source
, cpu_id_t target
)
130 IOCPU
*sourceCPU
= (IOCPU
*)source
;
131 IOCPU
*targetCPU
= (IOCPU
*)target
;
133 sourceCPU
->signalCPUCancel(targetCPU
);
137 PE_cpu_machine_init(cpu_id_t target
, boolean_t bootb
)
139 IOCPU
*targetCPU
= OSDynamicCast(IOCPU
, (OSObject
*)target
);
141 if (targetCPU
== NULL
) {
142 panic("%s: invalid target CPU %p", __func__
, target
);
145 targetCPU
->initCPU(bootb
);
146 #if defined(__arm__) || defined(__arm64__)
147 if (!bootb
&& (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
)) {
148 ml_set_is_quiescing(false);
150 #endif /* defined(__arm__) || defined(__arm64__) */
154 PE_cpu_machine_quiesce(cpu_id_t target
)
156 IOCPU
*targetCPU
= (IOCPU
*)target
;
157 #if defined(__arm__) || defined(__arm64__)
158 if (targetCPU
->getCPUNumber() == (UInt32
)master_cpu
) {
159 ml_set_is_quiescing(true);
161 #endif /* defined(__arm__) || defined(__arm64__) */
162 targetCPU
->quiesceCPU();
165 #if defined(__arm__) || defined(__arm64__)
166 static perfmon_interrupt_handler_func pmi_handler
= NULL
;
169 PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler
)
171 pmi_handler
= handler
;
177 PE_cpu_perfmon_interrupt_enable(cpu_id_t target
, boolean_t enable
)
179 IOCPU
*targetCPU
= (IOCPU
*)target
;
181 if (targetCPU
== nullptr) {
186 targetCPU
->getProvider()->registerInterrupt(1, targetCPU
, (IOInterruptAction
)pmi_handler
, NULL
);
187 targetCPU
->getProvider()->enableInterrupt(1);
189 targetCPU
->getProvider()->disableInterrupt(1);
194 #endif /* !USE_APPLEARMSMP */
196 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
198 #define super IOService
200 OSDefineMetaClassAndAbstractStructors(IOCPU
, IOService
);
201 OSMetaClassDefineReservedUnused(IOCPU
, 0);
202 OSMetaClassDefineReservedUnused(IOCPU
, 1);
203 OSMetaClassDefineReservedUnused(IOCPU
, 2);
204 OSMetaClassDefineReservedUnused(IOCPU
, 3);
205 OSMetaClassDefineReservedUnused(IOCPU
, 4);
206 OSMetaClassDefineReservedUnused(IOCPU
, 5);
207 OSMetaClassDefineReservedUnused(IOCPU
, 6);
208 OSMetaClassDefineReservedUnused(IOCPU
, 7);
210 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
214 IOCPUSleepKernel(void)
216 #if defined(__x86_64__)
217 extern IOCPU
*currentShutdownTarget
;
219 unsigned int cnt
, numCPUs
;
221 IOCPU
*bootCPU
= NULL
;
222 IOPMrootDomain
*rootDomain
= IOService::getPMRootDomain();
224 printf("IOCPUSleepKernel enter\n");
225 #if defined(__arm64__)
226 sched_override_recommended_cores_for_sleep();
229 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformActions
);
230 IOPlatformActionsPreSleep();
231 rootDomain
->tracePoint( kIOPMTracePointSleepCPUs
);
233 numCPUs
= gIOCPUs
->getCount();
234 #if defined(__x86_64__)
235 currentShutdownTarget
= NULL
;
239 thread_t self
= current_thread();
242 * We need to boost this thread's priority to the maximum kernel priority to
243 * ensure we can urgently preempt ANY thread currently executing on the
244 * target CPU. Note that realtime threads have their own mechanism to eventually
245 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
247 old_pri
= thread_kern_get_pri(self
);
248 thread_kern_set_pri(self
, thread_kern_get_kernel_maxpri());
251 ml_set_is_quiescing(true);
254 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
256 // We make certain that the bootCPU is the last to sleep
257 // We'll skip it for now, and halt it after finishing the
259 if (target
->getCPUNumber() == (UInt32
)master_cpu
) {
261 } else if (target
->getCPUState() == kIOCPUStateRunning
) {
262 #if defined(__x86_64__)
263 currentShutdownTarget
= target
;
269 assert(bootCPU
!= NULL
);
270 assert(cpu_number() == master_cpu
);
274 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformDriver
);
275 rootDomain
->stop_watchdog_timer();
278 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
279 * The system sleeps here.
283 ml_set_is_quiescing(false);
286 * The system is now coming back from sleep on the boot CPU.
287 * The kQueueActive actions have already been called.
290 rootDomain
->start_watchdog_timer();
291 rootDomain
->tracePoint( kIOPMTracePointWakePlatformActions
);
295 IOPlatformActionsPostResume();
296 rootDomain
->tracePoint( kIOPMTracePointWakeCPUs
);
298 // Wake the other CPUs.
299 for (cnt
= 0; cnt
< numCPUs
; cnt
++) {
300 target
= OSDynamicCast(IOCPU
, gIOCPUs
->getObject(cnt
));
302 // Skip the already-woken boot CPU.
303 if (target
->getCPUNumber() != (UInt32
)master_cpu
) {
304 if (target
->getCPUState() == kIOCPUStateRunning
) {
305 panic("Spurious wakeup of cpu %u", (unsigned int)(target
->getCPUNumber()));
308 if (target
->getCPUState() == kIOCPUStateStopped
) {
309 processor_start(target
->getMachProcessor());
314 #if defined(__arm64__)
315 sched_restore_recommended_cores_after_sleep();
318 thread_kern_set_pri(self
, old_pri
);
319 printf("IOCPUSleepKernel exit\n");
323 is_IOCPU_disabled(void)
327 #else /* !USE_APPLEARMSMP */
329 is_IOCPU_disabled(void)
333 #endif /* !USE_APPLEARMSMP */
336 IOCPU::start(IOService
*provider
)
338 if (is_IOCPU_disabled()) {
342 if (!super::start(provider
)) {
349 IOLockLock(gIOCPUsLock
);
350 gIOCPUs
->setObject(this);
351 IOLockUnlock(gIOCPUsLock
);
353 // Correct the bus, cpu and timebase frequencies in the device tree.
354 if (gPEClockFrequencyInfo
.bus_frequency_hz
< 0x100000000ULL
) {
355 OSSharedPtr
<OSData
> busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_clock_rate_hz
, 4);
356 provider
->setProperty("bus-frequency", busFrequency
.get());
358 OSSharedPtr
<OSData
> busFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.bus_frequency_hz
, 8);
359 provider
->setProperty("bus-frequency", busFrequency
.get());
362 if (gPEClockFrequencyInfo
.cpu_frequency_hz
< 0x100000000ULL
) {
363 OSSharedPtr
<OSData
> cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_clock_rate_hz
, 4);
364 provider
->setProperty("clock-frequency", cpuFrequency
.get());
366 OSSharedPtr
<OSData
> cpuFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.cpu_frequency_hz
, 8);
367 provider
->setProperty("clock-frequency", cpuFrequency
.get());
370 OSSharedPtr
<OSData
> timebaseFrequency
= OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo
.timebase_frequency_hz
, 4);
371 provider
->setProperty("timebase-frequency", timebaseFrequency
.get());
373 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8);
376 setCPUState(kIOCPUStateUnregistered
);
382 IOCPU::detach(IOService
*provider
)
384 if (is_IOCPU_disabled()) {
388 super::detach(provider
);
389 IOLockLock(gIOCPUsLock
);
390 unsigned int index
= gIOCPUs
->getNextIndexOfObject(this, 0);
391 if (index
!= (unsigned int)-1) {
392 gIOCPUs
->removeObject(index
);
394 IOLockUnlock(gIOCPUsLock
);
398 IOCPU::getProperty(const OSSymbol
*aKey
) const
400 if (aKey
== gIOCPUStateKey
) {
401 return gIOCPUStateNames
[_cpuState
].get();
403 #pragma clang diagnostic push
404 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
405 return super::getProperty(aKey
);
406 #pragma clang diagnostic pop
410 IOCPU::setProperty(const OSSymbol
*aKey
, OSObject
*anObject
)
412 if (aKey
== gIOCPUStateKey
) {
416 return super::setProperty(aKey
, anObject
);
420 IOCPU::serializeProperties(OSSerialize
*serialize
) const
423 OSSharedPtr
<OSDictionary
> dict
= dictionaryWithProperties();
427 dict
->setObject(gIOCPUStateKey
.get(), gIOCPUStateNames
[_cpuState
].get());
428 result
= dict
->serialize(serialize
);
433 IOCPU::setProperties(OSObject
*properties
)
435 OSDictionary
*dict
= OSDynamicCast(OSDictionary
, properties
);
440 return kIOReturnUnsupported
;
443 stateStr
= OSDynamicCast(OSString
, dict
->getObject(gIOCPUStateKey
.get()));
444 if (stateStr
!= NULL
) {
445 result
= IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator
);
446 if (result
!= kIOReturnSuccess
) {
450 if (setProperty(gIOCPUStateKey
.get(), stateStr
)) {
451 return kIOReturnSuccess
;
454 return kIOReturnUnsupported
;
457 return kIOReturnUnsupported
;
461 IOCPU::signalCPU(IOCPU */
*target*/
)
466 IOCPU::signalCPUDeferred(IOCPU
*target
)
468 // Our CPU may not support deferred IPIs,
469 // so send a regular IPI by default
474 IOCPU::signalCPUCancel(IOCPU */
*target*/
)
476 // Meant to cancel signals sent by
477 // signalCPUDeferred; unsupported
482 IOCPU::enableCPUTimeBase(bool /*enable*/)
487 IOCPU::getCPUNumber(void)
493 IOCPU::setCPUNumber(UInt32 cpuNumber
)
495 _cpuNumber
= cpuNumber
;
496 super::setProperty("IOCPUNumber", _cpuNumber
, 32);
500 IOCPU::getCPUState(void)
506 IOCPU::setCPUState(UInt32 cpuState
)
508 if (cpuState
< kIOCPUStateCount
) {
509 _cpuState
= cpuState
;
514 IOCPU::getCPUGroup(void)
516 return _cpuGroup
.get();
520 IOCPU::getCPUGroupSize(void)
522 return _cpuGroup
->getCount();
526 IOCPU::getMachProcessor(void)
528 return machProcessor
;
532 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
535 #define super IOInterruptController
537 OSDefineMetaClassAndStructors(IOCPUInterruptController
, IOInterruptController
);
539 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 1);
540 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 2);
541 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 3);
542 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 4);
543 OSMetaClassDefineReservedUnused(IOCPUInterruptController
, 5);
547 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
550 IOCPUInterruptController::initCPUInterruptController(int sources
)
552 return initCPUInterruptController(sources
, sources
);
556 IOCPUInterruptController::initCPUInterruptController(int sources
, int cpus
)
560 if (!super::init()) {
561 return kIOReturnInvalid
;
564 numSources
= sources
;
567 vectors
= (IOInterruptVector
*)IOMalloc(numSources
* sizeof(IOInterruptVector
));
568 if (vectors
== NULL
) {
569 return kIOReturnNoMemory
;
571 bzero(vectors
, numSources
* sizeof(IOInterruptVector
));
573 // Allocate a lock for each vector
574 for (cnt
= 0; cnt
< numSources
; cnt
++) {
575 vectors
[cnt
].interruptLock
= IOLockAlloc();
576 if (vectors
[cnt
].interruptLock
== NULL
) {
577 for (cnt
= 0; cnt
< numSources
; cnt
++) {
578 if (vectors
[cnt
].interruptLock
!= NULL
) {
579 IOLockFree(vectors
[cnt
].interruptLock
);
582 return kIOReturnNoResources
;
586 ml_set_max_cpus(numSources
);
587 return kIOReturnSuccess
;
591 IOCPUInterruptController::registerCPUInterruptController(void)
593 setProperty(gPlatformInterruptControllerName
, kOSBooleanTrue
);
596 getPlatform()->registerInterruptController(gPlatformInterruptControllerName
,
601 IOCPUInterruptController::setCPUInterruptProperties(IOService
*service
)
604 OSSharedPtr
<OSArray
> specifier
;
605 OSSharedPtr
<OSArray
> controller
;
608 if ((service
->propertyExists(gIOInterruptControllersKey
)) &&
609 (service
->propertyExists(gIOInterruptSpecifiersKey
))) {
613 // Create the interrupt specifer array.
614 specifier
= OSArray::withCapacity(numSources
);
615 for (cnt
= 0; cnt
< numSources
; cnt
++) {
617 OSSharedPtr
<OSData
> tmpData
= OSData::withBytes(&tmpLong
, sizeof(tmpLong
));
618 specifier
->setObject(tmpData
.get());
621 // Create the interrupt controller array.
622 controller
= OSArray::withCapacity(numSources
);
623 for (cnt
= 0; cnt
< numSources
; cnt
++) {
624 controller
->setObject(gPlatformInterruptControllerName
);
627 // Put the two arrays into the property table.
628 service
->setProperty(gIOInterruptControllersKey
, controller
.get());
629 service
->setProperty(gIOInterruptSpecifiersKey
, specifier
.get());
633 IOCPUInterruptController::enableCPUInterrupt(IOCPU
*cpu
)
635 IOInterruptHandler handler
= OSMemberFunctionCast(
636 IOInterruptHandler
, this, &IOCPUInterruptController::handleInterrupt
);
640 ml_install_interrupt_handler(cpu
, cpu
->getCPUNumber(), this, handler
, NULL
);
642 IOTakeLock(vectors
[0].interruptLock
);
645 if (enabledCPUs
== numCPUs
) {
646 IOService::cpusRunning();
649 IOUnlock(vectors
[0].interruptLock
);
653 IOCPUInterruptController::registerInterrupt(IOService
*nub
,
656 IOInterruptHandler handler
,
659 IOInterruptVector
*vector
;
661 // Interrupts must be enabled, as this can allocate memory.
662 assert(ml_get_interrupts_enabled() == TRUE
);
664 if (source
>= numSources
) {
665 return kIOReturnNoResources
;
668 vector
= &vectors
[source
];
670 // Get the lock for this vector.
671 IOTakeLock(vector
->interruptLock
);
673 // Make sure the vector is not in use.
674 if (vector
->interruptRegistered
) {
675 IOUnlock(vector
->interruptLock
);
676 return kIOReturnNoResources
;
679 // Fill in vector with the client's info.
680 vector
->handler
= handler
;
682 vector
->source
= source
;
683 vector
->target
= target
;
684 vector
->refCon
= refCon
;
686 // Get the vector ready. It starts hard disabled.
687 vector
->interruptDisabledHard
= 1;
688 vector
->interruptDisabledSoft
= 1;
689 vector
->interruptRegistered
= 1;
691 IOUnlock(vector
->interruptLock
);
693 IOTakeLock(vectors
[0].interruptLock
);
694 if (enabledCPUs
!= numCPUs
) {
695 assert_wait(this, THREAD_UNINT
);
696 IOUnlock(vectors
[0].interruptLock
);
697 thread_block(THREAD_CONTINUE_NULL
);
699 IOUnlock(vectors
[0].interruptLock
);
702 return kIOReturnSuccess
;
706 IOCPUInterruptController::getInterruptType(IOService */
*nub*/
,
710 if (interruptType
== NULL
) {
711 return kIOReturnBadArgument
;
714 *interruptType
= kIOInterruptTypeLevel
;
716 return kIOReturnSuccess
;
720 IOCPUInterruptController::enableInterrupt(IOService */
*nub*/
,
723 // ml_set_interrupts_enabled(true);
724 return kIOReturnSuccess
;
728 IOCPUInterruptController::disableInterrupt(IOService */
*nub*/
,
731 // ml_set_interrupts_enabled(false);
732 return kIOReturnSuccess
;
736 IOCPUInterruptController::causeInterrupt(IOService */
*nub*/
,
739 ml_cause_interrupt();
740 return kIOReturnSuccess
;
744 IOCPUInterruptController::handleInterrupt(void */
*refCon*/
,
748 IOInterruptVector
*vector
;
750 vector
= &vectors
[source
];
752 if (!vector
->interruptRegistered
) {
753 return kIOReturnInvalid
;
756 vector
->handler(vector
->target
, vector
->refCon
,
757 vector
->nub
, vector
->source
);
759 return kIOReturnSuccess
;
762 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */