2 * Copyright (c) 2019 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <kern/debug.h>
31 #include <kern/processor.h>
32 #include <kern/thread.h>
33 #include <kperf/kperf.h>
34 #include <pexpert/pexpert.h>
35 #include <machine/machine_routines.h>
38 #include <libkern/OSAtomic.h>
39 #include <libkern/c++/OSCollection.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOPlatformActions.h>
42 #include <IOKit/IOPMGR.h>
43 #include <IOKit/IOReturn.h>
44 #include <IOKit/IOService.h>
45 #include <IOKit/PassthruInterruptController.h>
46 #include <IOKit/pwr_mgt/RootDomain.h>
47 #include <IOKit/pwr_mgt/IOPMPrivate.h>
48 #include <Kernel/IOKitKernelInternal.h>
52 // FIXME: These are in <kern/misc_protos.h> but that file has other deps that aren't being resolved
53 extern "C" void console_suspend();
54 extern "C" void console_resume();
56 static PassthruInterruptController
*gCPUIC
;
58 static IOInterruptController
*gAIC
;
59 static bool aic_ipis
= false;
60 static const ml_topology_info
*topology_info
;
62 // cpu_id of the boot processor
63 static unsigned int boot_cpu
;
65 // array index is a cpu_id (so some elements may be NULL)
66 static processor_t
*machProcessors
;
69 processor_idle_wrapper(cpu_id_t
/*cpu_id*/, boolean_t enter
, uint64_t *new_timeout_ticks
)
72 gPMGR
->enterCPUIdle(new_timeout_ticks
);
74 gPMGR
->exitCPUIdle(new_timeout_ticks
);
79 idle_timer_wrapper(void */
*refCon*/
, uint64_t *new_timeout_ticks
)
81 gPMGR
->updateCPUIdle(new_timeout_ticks
);
85 register_aic_handlers(const ml_topology_cpu
*cpu_info
,
86 ipi_handler_t ipi_handler
,
87 perfmon_interrupt_handler_func pmi_handler
)
91 IOInterruptVectorNumber irqlist
[n_irqs
] = {
92 cpu_info
->self_ipi_irq
,
93 cpu_info
->other_ipi_irq
,
96 IOService
*fakeCPU
= new IOService();
97 if (!fakeCPU
|| !fakeCPU
->init()) {
98 panic("Can't initialize fakeCPU");
101 IOInterruptSource source
[n_irqs
];
102 for (i
= 0; i
< n_irqs
; i
++) {
103 source
[i
].vectorData
= OSData::withBytes(&irqlist
[i
], sizeof(irqlist
[0]));
105 fakeCPU
->_interruptSources
= source
;
107 if (cpu_info
->self_ipi_irq
&& cpu_info
->other_ipi_irq
) {
108 // Legacy configuration, for !HAS_IPI chips (pre-Skye).
109 if (gAIC
->registerInterrupt(fakeCPU
, 0, NULL
, (IOInterruptHandler
)ipi_handler
, NULL
) != kIOReturnSuccess
||
110 gAIC
->enableInterrupt(fakeCPU
, 0) != kIOReturnSuccess
||
111 gAIC
->registerInterrupt(fakeCPU
, 1, NULL
, (IOInterruptHandler
)ipi_handler
, NULL
) != kIOReturnSuccess
||
112 gAIC
->enableInterrupt(fakeCPU
, 1) != kIOReturnSuccess
) {
113 panic("Error registering IPIs");
115 #if !defined(HAS_IPI)
116 // Ideally this should be decided by EDT, but first we need to update EDT
117 // to default to fast IPIs on modern platforms.
121 // Conditional, because on Skye and later, we use an FIQ instead of an external IRQ.
122 if (pmi_handler
&& cpu_info
->pmi_irq
) {
123 if (gAIC
->registerInterrupt(fakeCPU
, 2, NULL
, (IOInterruptHandler
)pmi_handler
, NULL
) != kIOReturnSuccess
||
124 gAIC
->enableInterrupt(fakeCPU
, 2) != kIOReturnSuccess
) {
125 panic("Error registering PMI");
129 for (i
= 0; i
< n_irqs
; i
++) {
130 source
[i
].vectorData
->release();
135 cpu_boot_thread(void */
*unused0*/
, wait_result_t
/*unused1*/)
137 OSDictionary
*matching
= IOService::serviceMatching("IOPlatformExpert");
138 IOService::waitForMatchingService(matching
, UINT64_MAX
);
141 gCPUIC
= new PassthruInterruptController
;
142 if (!gCPUIC
|| !gCPUIC
->init()) {
143 panic("Can't initialize PassthruInterruptController");
145 gAIC
= static_cast<IOInterruptController
*>(gCPUIC
->waitForChildController());
147 ml_set_max_cpus(topology_info
->max_cpu_id
+ 1);
149 matching
= IOService::serviceMatching("IOPMGR");
150 gPMGR
= OSDynamicCast(IOPMGR
,
151 IOService::waitForMatchingService(matching
, UINT64_MAX
));
154 const size_t array_size
= (topology_info
->max_cpu_id
+ 1) * sizeof(*machProcessors
);
155 machProcessors
= static_cast<processor_t
*>(IOMalloc(array_size
));
156 if (!machProcessors
) {
157 panic("Can't allocate machProcessors array");
159 memset(machProcessors
, 0, array_size
);
162 for (unsigned int cpu
= 0; cpu
< topology_info
->num_cpus
; cpu
++) {
163 const ml_topology_cpu
*cpu_info
= &topology_info
->cpus
[cpu
];
164 const unsigned int cpu_id
= cpu_info
->cpu_id
;
165 ml_processor_info_t this_processor_info
;
166 ipi_handler_t ipi_handler
;
167 perfmon_interrupt_handler_func pmi_handler
;
169 memset(&this_processor_info
, 0, sizeof(this_processor_info
));
170 this_processor_info
.cpu_id
= reinterpret_cast<cpu_id_t
>(cpu_id
);
171 this_processor_info
.phys_id
= cpu_info
->phys_id
;
172 this_processor_info
.log_id
= cpu_id
;
173 this_processor_info
.cluster_id
= cpu_info
->cluster_id
;
174 this_processor_info
.cluster_type
= cpu_info
->cluster_type
;
175 this_processor_info
.l2_cache_size
= cpu_info
->l2_cache_size
;
176 this_processor_info
.l2_cache_id
= cpu_info
->l2_cache_id
;
177 this_processor_info
.l3_cache_size
= cpu_info
->l3_cache_size
;
178 this_processor_info
.l3_cache_id
= cpu_info
->l3_cache_id
;
180 gPMGR
->initCPUIdle(&this_processor_info
);
181 this_processor_info
.processor_idle
= &processor_idle_wrapper
;
182 this_processor_info
.idle_timer
= &idle_timer_wrapper
;
184 kern_return_t result
= ml_processor_register(&this_processor_info
,
185 &machProcessors
[cpu_id
], &ipi_handler
, &pmi_handler
);
186 if (result
== KERN_FAILURE
) {
187 panic("ml_processor_register failed: %d", result
);
189 register_aic_handlers(cpu_info
, ipi_handler
, pmi_handler
);
191 if (processor_start(machProcessors
[cpu_id
]) != KERN_SUCCESS
) {
192 panic("processor_start failed");
195 IOService::publishResource(gIOAllCPUInitializedKey
, kOSBooleanTrue
);
199 IOCPUInitialize(void)
201 topology_info
= ml_get_topology_info();
202 boot_cpu
= topology_info
->boot_cpu
->cpu_id
;
205 kernel_thread_start(&cpu_boot_thread
, NULL
, &thread
);
206 thread_set_thread_name(thread
, "cpu_boot_thread");
207 thread_deallocate(thread
);
211 target_to_cpu_id(cpu_id_t in
)
213 return (unsigned int)(uintptr_t)in
;
216 // Release a secondary CPU from reset. Runs from a different CPU (obviously).
218 PE_cpu_start(cpu_id_t target
,
219 vm_offset_t
/*start_paddr*/, vm_offset_t
/*arg_paddr*/)
221 unsigned int cpu_id
= target_to_cpu_id(target
);
223 if (cpu_id
!= boot_cpu
) {
224 gPMGR
->enableCPUCore(cpu_id
);
229 // Initialize a CPU when it first comes up. Runs on the target CPU.
230 // |bootb| is true on the initial boot, false on S2R resume.
232 PE_cpu_machine_init(cpu_id_t target
, boolean_t bootb
)
234 unsigned int cpu_id
= target_to_cpu_id(target
);
236 if (!bootb
&& cpu_id
== boot_cpu
&& ml_is_quiescing()) {
237 IOCPURunPlatformActiveActions();
240 ml_broadcast_cpu_event(CPU_BOOTED
, cpu_id
);
242 // Send myself an IPI to clear SIGPdisabled. Hang here if IPIs are broken.
243 // (Probably only works on the boot CPU.)
244 PE_cpu_signal(target
, target
);
245 while (ml_get_interrupts_enabled() && !ml_cpu_signal_is_enabled()) {
251 PE_cpu_halt(cpu_id_t target
)
253 unsigned int cpu_id
= target_to_cpu_id(target
);
254 processor_exit(machProcessors
[cpu_id
]);
258 PE_cpu_signal(cpu_id_t
/*source*/, cpu_id_t target
)
260 struct ml_topology_cpu
*cpu
= &topology_info
->cpus
[target_to_cpu_id(target
)];
262 gAIC
->sendIPI(cpu
->cpu_id
, false);
264 ml_cpu_signal(cpu
->phys_id
);
269 PE_cpu_signal_deferred(cpu_id_t
/*source*/, cpu_id_t target
)
271 struct ml_topology_cpu
*cpu
= &topology_info
->cpus
[target_to_cpu_id(target
)];
273 gAIC
->sendIPI(cpu
->cpu_id
, true);
275 ml_cpu_signal_deferred(cpu
->phys_id
);
280 PE_cpu_signal_cancel(cpu_id_t
/*source*/, cpu_id_t target
)
282 struct ml_topology_cpu
*cpu
= &topology_info
->cpus
[target_to_cpu_id(target
)];
284 gAIC
->cancelDeferredIPI(cpu
->cpu_id
);
286 ml_cpu_signal_retract(cpu
->phys_id
);
290 // Brings down one CPU core for S2R. Runs on the target CPU.
292 PE_cpu_machine_quiesce(cpu_id_t target
)
294 unsigned int cpu_id
= target_to_cpu_id(target
);
296 if (cpu_id
== boot_cpu
) {
297 IOCPURunPlatformQuiesceActions();
299 gPMGR
->disableCPUCore(cpu_id
);
302 ml_broadcast_cpu_event(CPU_DOWN
, cpu_id
);
306 // Takes one secondary CPU core offline at runtime. Runs on the target CPU.
307 // Returns true if the platform code should go into deep sleep WFI, false otherwise.
309 PE_cpu_down(cpu_id_t target
)
311 unsigned int cpu_id
= target_to_cpu_id(target
);
312 assert(cpu_id
!= boot_cpu
);
313 gPMGR
->disableCPUCore(cpu_id
);
318 PE_handle_ext_interrupt(void)
320 gCPUIC
->externalInterrupt();
324 IOCPUSleepKernel(void)
326 IOPMrootDomain
*rootDomain
= IOService::getPMRootDomain();
329 printf("IOCPUSleepKernel enter\n");
330 #if defined(__arm64__)
331 sched_override_recommended_cores_for_sleep();
334 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformActions
);
335 IOPlatformActionsPreSleep();
336 rootDomain
->tracePoint( kIOPMTracePointSleepCPUs
);
339 thread_t self
= current_thread();
342 * We need to boost this thread's priority to the maximum kernel priority to
343 * ensure we can urgently preempt ANY thread currently executing on the
344 * target CPU. Note that realtime threads have their own mechanism to eventually
345 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
347 old_pri
= thread_kern_get_pri(self
);
348 thread_kern_set_pri(self
, thread_kern_get_kernel_maxpri());
350 // Sleep the non-boot CPUs.
351 ml_set_is_quiescing(true);
352 for (i
= 0; i
< topology_info
->num_cpus
; i
++) {
353 unsigned int cpu_id
= topology_info
->cpus
[i
].cpu_id
;
354 if (cpu_id
!= boot_cpu
) {
355 processor_exit(machProcessors
[cpu_id
]);
361 rootDomain
->tracePoint( kIOPMTracePointSleepPlatformDriver
);
362 rootDomain
->stop_watchdog_timer();
365 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
366 * The system sleeps here.
368 processor_exit(machProcessors
[boot_cpu
]);
371 * The system is now coming back from sleep on the boot CPU.
372 * The kQueueActive actions have already been called.
375 ml_set_is_quiescing(false);
376 rootDomain
->start_watchdog_timer();
377 rootDomain
->tracePoint( kIOPMTracePointWakePlatformActions
);
381 IOPlatformActionsPostResume();
382 rootDomain
->tracePoint( kIOPMTracePointWakeCPUs
);
384 for (i
= 0; i
< topology_info
->num_cpus
; i
++) {
385 unsigned int cpu_id
= topology_info
->cpus
[i
].cpu_id
;
386 if (cpu_id
!= boot_cpu
) {
387 processor_start(machProcessors
[cpu_id
]);
391 #if defined(__arm64__)
392 sched_restore_recommended_cores_after_sleep();
395 thread_kern_set_pri(self
, old_pri
);
396 printf("IOCPUSleepKernel exit\n");
399 #endif /* USE_APPLEARMSMP */