2 * Copyright (c) 2019 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <kern/debug.h>
31 #include <kern/processor.h>
32 #include <kern/thread.h>
33 #include <kperf/kperf.h>
34 #include <pexpert/pexpert.h>
35 #include <machine/machine_routines.h>
38 #include <libkern/OSAtomic.h>
39 #include <libkern/c++/OSCollection.h>
40 #include <IOKit/IODeviceTreeSupport.h>
41 #include <IOKit/IOLib.h>
42 #include <IOKit/IOPlatformActions.h>
43 #include <IOKit/IOPMGR.h>
44 #include <IOKit/IOReturn.h>
45 #include <IOKit/IOService.h>
46 #include <IOKit/PassthruInterruptController.h>
47 #include <IOKit/pwr_mgt/RootDomain.h>
48 #include <IOKit/pwr_mgt/IOPMPrivate.h>
49 #include <Kernel/IOKitKernelInternal.h>
53 // FIXME: These are in <kern/misc_protos.h> but that file has other deps that aren't being resolved
54 extern "C" void console_suspend();
55 extern "C" void console_resume();
57 static PassthruInterruptController
*gCPUIC
;
59 static IOInterruptController
*gAIC
;
60 static bool aic_ipis
= false;
61 static const ml_topology_info
*topology_info
;
63 // cpu_id of the boot processor
64 static unsigned int boot_cpu
;
66 // array index is a cpu_id (so some elements may be NULL)
67 static processor_t
*machProcessors
;
70 processor_idle_wrapper(cpu_id_t
/*cpu_id*/, boolean_t enter
, uint64_t *new_timeout_ticks
)
73 gPMGR
->enterCPUIdle(new_timeout_ticks
);
75 gPMGR
->exitCPUIdle(new_timeout_ticks
);
80 idle_timer_wrapper(void */
*refCon*/
, uint64_t *new_timeout_ticks
)
82 gPMGR
->updateCPUIdle(new_timeout_ticks
);
86 matching_dict_for_cpu_id(unsigned int cpu_id
)
88 // The cpu-id property in EDT doesn't necessarily match the dynamically
89 // assigned logical ID in XNU, so look up the cpu node by the physical
90 // (cluster/core) ID instead.
91 OSSymbolConstPtr cpuTypeSymbol
= OSSymbol::withCString("cpu");
92 OSSymbolConstPtr cpuIdSymbol
= OSSymbol::withCString("reg");
93 OSDataPtr cpuId
= OSData::withBytes(&(topology_info
->cpus
[cpu_id
].phys_id
), sizeof(uint32_t));
95 OSDictionary
*propMatch
= OSDictionary::withCapacity(4);
96 propMatch
->setObject(gIODTTypeKey
, cpuTypeSymbol
);
97 propMatch
->setObject(cpuIdSymbol
, cpuId
);
99 OSDictionary
*matching
= IOService::serviceMatching("IOPlatformDevice");
100 matching
->setObject(gIOPropertyMatchKey
, propMatch
);
102 propMatch
->release();
103 cpuTypeSymbol
->release();
104 cpuIdSymbol
->release();
111 register_aic_handlers(const ml_topology_cpu
*cpu_info
,
112 ipi_handler_t ipi_handler
,
113 perfmon_interrupt_handler_func pmi_handler
)
115 OSDictionary
*matching
= matching_dict_for_cpu_id(cpu_info
->cpu_id
);
116 IOService
*cpu
= IOService::waitForMatchingService(matching
, UINT64_MAX
);
119 OSArray
*irqs
= (OSArray
*) cpu
->getProperty(gIOInterruptSpecifiersKey
);
121 panic("Error finding interrupts for CPU %d", cpu_info
->cpu_id
);
124 unsigned int irqcount
= irqs
->getCount();
127 // Legacy configuration, for !HAS_IPI chips (pre-Skye).
128 if (cpu
->registerInterrupt(0, NULL
, (IOInterruptAction
)ipi_handler
, NULL
) != kIOReturnSuccess
||
129 cpu
->enableInterrupt(0) != kIOReturnSuccess
||
130 cpu
->registerInterrupt(2, NULL
, (IOInterruptAction
)ipi_handler
, NULL
) != kIOReturnSuccess
||
131 cpu
->enableInterrupt(2) != kIOReturnSuccess
) {
132 panic("Error registering IPIs");
134 #if !defined(HAS_IPI)
135 // Ideally this should be decided by EDT, but first we need to update EDT
136 // to default to fast IPIs on modern platforms.
141 // Conditional, because on Skye and later, we use an FIQ instead of an external IRQ.
142 if (pmi_handler
&& irqcount
== 1) {
143 if (cpu
->registerInterrupt(1, NULL
, (IOInterruptAction
)pmi_handler
, NULL
) != kIOReturnSuccess
||
144 cpu
->enableInterrupt(1) != kIOReturnSuccess
) {
145 panic("Error registering PMI");
151 cpu_boot_thread(void */
*unused0*/
, wait_result_t
/*unused1*/)
153 OSDictionary
*matching
= IOService::serviceMatching("IOPlatformExpert");
154 IOService::waitForMatchingService(matching
, UINT64_MAX
);
157 gCPUIC
= new PassthruInterruptController
;
158 if (!gCPUIC
|| !gCPUIC
->init()) {
159 panic("Can't initialize PassthruInterruptController");
161 gAIC
= static_cast<IOInterruptController
*>(gCPUIC
->waitForChildController());
163 ml_set_max_cpus(topology_info
->max_cpu_id
+ 1);
165 matching
= IOService::serviceMatching("IOPMGR");
166 gPMGR
= OSDynamicCast(IOPMGR
,
167 IOService::waitForMatchingService(matching
, UINT64_MAX
));
170 const size_t array_size
= (topology_info
->max_cpu_id
+ 1) * sizeof(*machProcessors
);
171 machProcessors
= static_cast<processor_t
*>(IOMalloc(array_size
));
172 if (!machProcessors
) {
173 panic("Can't allocate machProcessors array");
175 memset(machProcessors
, 0, array_size
);
177 for (unsigned int cpu
= 0; cpu
< topology_info
->num_cpus
; cpu
++) {
178 const ml_topology_cpu
*cpu_info
= &topology_info
->cpus
[cpu
];
179 const unsigned int cpu_id
= cpu_info
->cpu_id
;
180 ml_processor_info_t this_processor_info
;
181 ipi_handler_t ipi_handler
;
182 perfmon_interrupt_handler_func pmi_handler
;
184 memset(&this_processor_info
, 0, sizeof(this_processor_info
));
185 this_processor_info
.cpu_id
= reinterpret_cast<cpu_id_t
>(cpu_id
);
186 this_processor_info
.phys_id
= cpu_info
->phys_id
;
187 this_processor_info
.log_id
= cpu_id
;
188 this_processor_info
.cluster_id
= cpu_info
->cluster_id
;
189 this_processor_info
.cluster_type
= cpu_info
->cluster_type
;
190 this_processor_info
.l2_cache_size
= cpu_info
->l2_cache_size
;
191 this_processor_info
.l2_cache_id
= cpu_info
->l2_cache_id
;
192 this_processor_info
.l3_cache_size
= cpu_info
->l3_cache_size
;
193 this_processor_info
.l3_cache_id
= cpu_info
->l3_cache_id
;
195 gPMGR
->initCPUIdle(&this_processor_info
);
196 this_processor_info
.processor_idle
= &processor_idle_wrapper
;
197 this_processor_info
.idle_timer
= &idle_timer_wrapper
;
199 kern_return_t result
= ml_processor_register(&this_processor_info
,
200 &machProcessors
[cpu_id
], &ipi_handler
, &pmi_handler
);
201 if (result
== KERN_FAILURE
) {
202 panic("ml_processor_register failed: %d", result
);
204 register_aic_handlers(cpu_info
, ipi_handler
, pmi_handler
);
206 if (processor_start(machProcessors
[cpu_id
]) != KERN_SUCCESS
) {
207 panic("processor_start failed");
210 ml_cpu_init_completed();
211 IOService::publishResource(gIOAllCPUInitializedKey
, kOSBooleanTrue
);
215 IOCPUInitialize(void)
217 topology_info
= ml_get_topology_info();
218 boot_cpu
= topology_info
->boot_cpu
->cpu_id
;
221 kernel_thread_start(&cpu_boot_thread
, NULL
, &thread
);
222 thread_set_thread_name(thread
, "cpu_boot_thread");
223 thread_deallocate(thread
);
227 target_to_cpu_id(cpu_id_t in
)
229 return (unsigned int)(uintptr_t)in
;
232 // Release a secondary CPU from reset. Runs from a different CPU (obviously).
234 PE_cpu_start(cpu_id_t target
,
235 vm_offset_t
/*start_paddr*/, vm_offset_t
/*arg_paddr*/)
237 unsigned int cpu_id
= target_to_cpu_id(target
);
239 if (cpu_id
!= boot_cpu
) {
240 extern unsigned int LowResetVectorBase
;
241 gPMGR
->enableCPUCore(cpu_id
, ml_vtophys((vm_offset_t
)&LowResetVectorBase
));
246 // Initialize a CPU when it first comes up. Runs on the target CPU.
247 // |bootb| is true on the initial boot, false on S2R resume.
249 PE_cpu_machine_init(cpu_id_t target
, boolean_t bootb
)
251 unsigned int cpu_id
= target_to_cpu_id(target
);
253 if (!bootb
&& cpu_id
== boot_cpu
&& ml_is_quiescing()) {
254 IOCPURunPlatformActiveActions();
257 ml_broadcast_cpu_event(CPU_BOOTED
, cpu_id
);
259 // Send myself an IPI to clear SIGPdisabled. Hang here if IPIs are broken.
260 // (Probably only works on the boot CPU.)
261 PE_cpu_signal(target
, target
);
262 while (ml_get_interrupts_enabled() && !ml_cpu_signal_is_enabled()) {
268 PE_cpu_halt(cpu_id_t target
)
270 unsigned int cpu_id
= target_to_cpu_id(target
);
271 processor_exit(machProcessors
[cpu_id
]);
275 PE_cpu_signal(cpu_id_t
/*source*/, cpu_id_t target
)
277 struct ml_topology_cpu
*cpu
= &topology_info
->cpus
[target_to_cpu_id(target
)];
279 gAIC
->sendIPI(cpu
->cpu_id
, false);
281 ml_cpu_signal(cpu
->phys_id
);
286 PE_cpu_signal_deferred(cpu_id_t
/*source*/, cpu_id_t target
)
288 struct ml_topology_cpu
*cpu
= &topology_info
->cpus
[target_to_cpu_id(target
)];
290 gAIC
->sendIPI(cpu
->cpu_id
, true);
292 ml_cpu_signal_deferred(cpu
->phys_id
);
297 PE_cpu_signal_cancel(cpu_id_t
/*source*/, cpu_id_t target
)
299 struct ml_topology_cpu
*cpu
= &topology_info
->cpus
[target_to_cpu_id(target
)];
301 gAIC
->cancelDeferredIPI(cpu
->cpu_id
);
303 ml_cpu_signal_retract(cpu
->phys_id
);
307 // Brings down one CPU core for S2R. Runs on the target CPU.
309 PE_cpu_machine_quiesce(cpu_id_t target
)
311 unsigned int cpu_id
= target_to_cpu_id(target
);
313 if (cpu_id
== boot_cpu
) {
314 IOCPURunPlatformQuiesceActions();
316 gPMGR
->disableCPUCore(cpu_id
);
319 ml_broadcast_cpu_event(CPU_DOWN
, cpu_id
);
323 // Takes one secondary CPU core offline at runtime. Runs on the target CPU.
324 // Returns true if the platform code should go into deep sleep WFI, false otherwise.
326 PE_cpu_down(cpu_id_t target
)
328 unsigned int cpu_id
= target_to_cpu_id(target
);
329 assert(cpu_id
!= boot_cpu
);
330 gPMGR
->disableCPUCore(cpu_id
);
335 PE_handle_ext_interrupt(void)
337 gCPUIC
->externalInterrupt();
341 IOCPUSleepKernel(void)
343 IOPMrootDomain
*rootDomain
= IOService::getPMRootDomain();
346 printf("IOCPUSleepKernel enter\n");
347 #if defined(__arm64__)
348 sched_override_recommended_cores_for_sleep();
351 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformActions
);
352 IOPlatformActionsPreSleep();
353 rootDomain
->tracePoint( kIOPMTracePointSleepCPUs
);
356 thread_t self
= current_thread();
359 * We need to boost this thread's priority to the maximum kernel priority to
360 * ensure we can urgently preempt ANY thread currently executing on the
361 * target CPU. Note that realtime threads have their own mechanism to eventually
362 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
364 old_pri
= thread_kern_get_pri(self
);
365 thread_kern_set_pri(self
, thread_kern_get_kernel_maxpri());
367 // Sleep the non-boot CPUs.
368 ml_set_is_quiescing(true);
369 for (i
= 0; i
< topology_info
->num_cpus
; i
++) {
370 unsigned int cpu_id
= topology_info
->cpus
[i
].cpu_id
;
371 if (cpu_id
!= boot_cpu
) {
372 processor_exit(machProcessors
[cpu_id
]);
378 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformDriver
);
379 rootDomain
->stop_watchdog_timer();
382 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
383 * The system sleeps here.
385 processor_exit(machProcessors
[boot_cpu
]);
388 * The system is now coming back from sleep on the boot CPU.
389 * The kQueueActive actions have already been called.
392 ml_set_is_quiescing(false);
393 rootDomain
->start_watchdog_timer();
394 rootDomain
->tracePoint( kIOPMTracePointWakePlatformActions
);
398 IOPlatformActionsPostResume();
399 rootDomain
->tracePoint( kIOPMTracePointWakeCPUs
);
401 for (i
= 0; i
< topology_info
->num_cpus
; i
++) {
402 unsigned int cpu_id
= topology_info
->cpus
[i
].cpu_id
;
403 if (cpu_id
!= boot_cpu
) {
404 processor_start(machProcessors
[cpu_id
]);
408 #if defined(__arm64__)
409 sched_restore_recommended_cores_after_sleep();
412 thread_kern_set_pri(self
, old_pri
);
413 printf("IOCPUSleepKernel exit\n");
416 #endif /* USE_APPLEARMSMP */