]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/arm/AppleARMSMP.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / iokit / Kernel / arm / AppleARMSMP.cpp
CommitLineData
f427ee49
A
1/*
2 * Copyright (c) 2019 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29extern "C" {
30#include <kern/debug.h>
31#include <kern/processor.h>
32#include <kern/thread.h>
33#include <kperf/kperf.h>
34#include <pexpert/pexpert.h>
35#include <machine/machine_routines.h>
36};
37
38#include <libkern/OSAtomic.h>
39#include <libkern/c++/OSCollection.h>
c3c9b80d 40#include <IOKit/IODeviceTreeSupport.h>
f427ee49
A
41#include <IOKit/IOLib.h>
42#include <IOKit/IOPlatformActions.h>
43#include <IOKit/IOPMGR.h>
44#include <IOKit/IOReturn.h>
45#include <IOKit/IOService.h>
46#include <IOKit/PassthruInterruptController.h>
47#include <IOKit/pwr_mgt/RootDomain.h>
48#include <IOKit/pwr_mgt/IOPMPrivate.h>
49#include <Kernel/IOKitKernelInternal.h>
50
51#if USE_APPLEARMSMP
52
53// FIXME: These are in <kern/misc_protos.h> but that file has other deps that aren't being resolved
54extern "C" void console_suspend();
55extern "C" void console_resume();
56
57static PassthruInterruptController *gCPUIC;
58static IOPMGR *gPMGR;
59static IOInterruptController *gAIC;
60static bool aic_ipis = false;
61static const ml_topology_info *topology_info;
62
63// cpu_id of the boot processor
64static unsigned int boot_cpu;
65
66// array index is a cpu_id (so some elements may be NULL)
67static processor_t *machProcessors;
68
69static void
70processor_idle_wrapper(cpu_id_t /*cpu_id*/, boolean_t enter, uint64_t *new_timeout_ticks)
71{
72 if (enter) {
73 gPMGR->enterCPUIdle(new_timeout_ticks);
74 } else {
75 gPMGR->exitCPUIdle(new_timeout_ticks);
76 }
77}
78
79static void
80idle_timer_wrapper(void */*refCon*/, uint64_t *new_timeout_ticks)
81{
82 gPMGR->updateCPUIdle(new_timeout_ticks);
83}
84
c3c9b80d
A
85static OSDictionary *
86matching_dict_for_cpu_id(unsigned int cpu_id)
87{
88 // The cpu-id property in EDT doesn't necessarily match the dynamically
89 // assigned logical ID in XNU, so look up the cpu node by the physical
90 // (cluster/core) ID instead.
91 OSSymbolConstPtr cpuTypeSymbol = OSSymbol::withCString("cpu");
92 OSSymbolConstPtr cpuIdSymbol = OSSymbol::withCString("reg");
93 OSDataPtr cpuId = OSData::withBytes(&(topology_info->cpus[cpu_id].phys_id), sizeof(uint32_t));
94
95 OSDictionary *propMatch = OSDictionary::withCapacity(4);
96 propMatch->setObject(gIODTTypeKey, cpuTypeSymbol);
97 propMatch->setObject(cpuIdSymbol, cpuId);
98
99 OSDictionary *matching = IOService::serviceMatching("IOPlatformDevice");
100 matching->setObject(gIOPropertyMatchKey, propMatch);
101
102 propMatch->release();
103 cpuTypeSymbol->release();
104 cpuIdSymbol->release();
105 cpuId->release();
106
107 return matching;
108}
109
f427ee49
A
110static void
111register_aic_handlers(const ml_topology_cpu *cpu_info,
112 ipi_handler_t ipi_handler,
113 perfmon_interrupt_handler_func pmi_handler)
114{
c3c9b80d
A
115 OSDictionary *matching = matching_dict_for_cpu_id(cpu_info->cpu_id);
116 IOService *cpu = IOService::waitForMatchingService(matching, UINT64_MAX);
117 matching->release();
f427ee49 118
c3c9b80d
A
119 OSArray *irqs = (OSArray *) cpu->getProperty(gIOInterruptSpecifiersKey);
120 if (!irqs) {
121 panic("Error finding interrupts for CPU %d", cpu_info->cpu_id);
f427ee49 122 }
f427ee49 123
c3c9b80d
A
124 unsigned int irqcount = irqs->getCount();
125
126 if (irqcount == 3) {
f427ee49 127 // Legacy configuration, for !HAS_IPI chips (pre-Skye).
c3c9b80d
A
128 if (cpu->registerInterrupt(0, NULL, (IOInterruptAction)ipi_handler, NULL) != kIOReturnSuccess ||
129 cpu->enableInterrupt(0) != kIOReturnSuccess ||
130 cpu->registerInterrupt(2, NULL, (IOInterruptAction)ipi_handler, NULL) != kIOReturnSuccess ||
131 cpu->enableInterrupt(2) != kIOReturnSuccess) {
f427ee49
A
132 panic("Error registering IPIs");
133 }
134#if !defined(HAS_IPI)
135 // Ideally this should be decided by EDT, but first we need to update EDT
136 // to default to fast IPIs on modern platforms.
137 aic_ipis = true;
138#endif
139 }
c3c9b80d 140
f427ee49 141 // Conditional, because on Skye and later, we use an FIQ instead of an external IRQ.
c3c9b80d
A
142 if (pmi_handler && irqcount == 1) {
143 if (cpu->registerInterrupt(1, NULL, (IOInterruptAction)pmi_handler, NULL) != kIOReturnSuccess ||
144 cpu->enableInterrupt(1) != kIOReturnSuccess) {
f427ee49
A
145 panic("Error registering PMI");
146 }
147 }
f427ee49
A
148}
149
150static void
151cpu_boot_thread(void */*unused0*/, wait_result_t /*unused1*/)
152{
153 OSDictionary *matching = IOService::serviceMatching("IOPlatformExpert");
154 IOService::waitForMatchingService(matching, UINT64_MAX);
155 matching->release();
156
157 gCPUIC = new PassthruInterruptController;
158 if (!gCPUIC || !gCPUIC->init()) {
159 panic("Can't initialize PassthruInterruptController");
160 }
161 gAIC = static_cast<IOInterruptController *>(gCPUIC->waitForChildController());
162
163 ml_set_max_cpus(topology_info->max_cpu_id + 1);
164
165 matching = IOService::serviceMatching("IOPMGR");
166 gPMGR = OSDynamicCast(IOPMGR,
167 IOService::waitForMatchingService(matching, UINT64_MAX));
168 matching->release();
169
170 const size_t array_size = (topology_info->max_cpu_id + 1) * sizeof(*machProcessors);
171 machProcessors = static_cast<processor_t *>(IOMalloc(array_size));
172 if (!machProcessors) {
173 panic("Can't allocate machProcessors array");
174 }
175 memset(machProcessors, 0, array_size);
176
f427ee49
A
177 for (unsigned int cpu = 0; cpu < topology_info->num_cpus; cpu++) {
178 const ml_topology_cpu *cpu_info = &topology_info->cpus[cpu];
179 const unsigned int cpu_id = cpu_info->cpu_id;
180 ml_processor_info_t this_processor_info;
181 ipi_handler_t ipi_handler;
182 perfmon_interrupt_handler_func pmi_handler;
183
184 memset(&this_processor_info, 0, sizeof(this_processor_info));
185 this_processor_info.cpu_id = reinterpret_cast<cpu_id_t>(cpu_id);
186 this_processor_info.phys_id = cpu_info->phys_id;
187 this_processor_info.log_id = cpu_id;
188 this_processor_info.cluster_id = cpu_info->cluster_id;
189 this_processor_info.cluster_type = cpu_info->cluster_type;
190 this_processor_info.l2_cache_size = cpu_info->l2_cache_size;
191 this_processor_info.l2_cache_id = cpu_info->l2_cache_id;
192 this_processor_info.l3_cache_size = cpu_info->l3_cache_size;
193 this_processor_info.l3_cache_id = cpu_info->l3_cache_id;
194
195 gPMGR->initCPUIdle(&this_processor_info);
196 this_processor_info.processor_idle = &processor_idle_wrapper;
197 this_processor_info.idle_timer = &idle_timer_wrapper;
198
199 kern_return_t result = ml_processor_register(&this_processor_info,
200 &machProcessors[cpu_id], &ipi_handler, &pmi_handler);
201 if (result == KERN_FAILURE) {
202 panic("ml_processor_register failed: %d", result);
203 }
204 register_aic_handlers(cpu_info, ipi_handler, pmi_handler);
205
206 if (processor_start(machProcessors[cpu_id]) != KERN_SUCCESS) {
207 panic("processor_start failed");
208 }
209 }
c3c9b80d 210 ml_cpu_init_completed();
f427ee49
A
211 IOService::publishResource(gIOAllCPUInitializedKey, kOSBooleanTrue);
212}
213
214void
215IOCPUInitialize(void)
216{
217 topology_info = ml_get_topology_info();
218 boot_cpu = topology_info->boot_cpu->cpu_id;
219
220 thread_t thread;
221 kernel_thread_start(&cpu_boot_thread, NULL, &thread);
222 thread_set_thread_name(thread, "cpu_boot_thread");
223 thread_deallocate(thread);
224}
225
226static unsigned int
227target_to_cpu_id(cpu_id_t in)
228{
229 return (unsigned int)(uintptr_t)in;
230}
231
232// Release a secondary CPU from reset. Runs from a different CPU (obviously).
233kern_return_t
234PE_cpu_start(cpu_id_t target,
235 vm_offset_t /*start_paddr*/, vm_offset_t /*arg_paddr*/)
236{
237 unsigned int cpu_id = target_to_cpu_id(target);
238
239 if (cpu_id != boot_cpu) {
c3c9b80d
A
240 extern unsigned int LowResetVectorBase;
241 gPMGR->enableCPUCore(cpu_id, ml_vtophys((vm_offset_t)&LowResetVectorBase));
f427ee49
A
242 }
243 return KERN_SUCCESS;
244}
245
246// Initialize a CPU when it first comes up. Runs on the target CPU.
247// |bootb| is true on the initial boot, false on S2R resume.
248void
249PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
250{
251 unsigned int cpu_id = target_to_cpu_id(target);
252
253 if (!bootb && cpu_id == boot_cpu && ml_is_quiescing()) {
254 IOCPURunPlatformActiveActions();
255 }
256
257 ml_broadcast_cpu_event(CPU_BOOTED, cpu_id);
258
259 // Send myself an IPI to clear SIGPdisabled. Hang here if IPIs are broken.
260 // (Probably only works on the boot CPU.)
261 PE_cpu_signal(target, target);
262 while (ml_get_interrupts_enabled() && !ml_cpu_signal_is_enabled()) {
263 OSMemoryBarrier();
264 }
265}
266
267void
268PE_cpu_halt(cpu_id_t target)
269{
270 unsigned int cpu_id = target_to_cpu_id(target);
271 processor_exit(machProcessors[cpu_id]);
272}
273
274void
275PE_cpu_signal(cpu_id_t /*source*/, cpu_id_t target)
276{
277 struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
278 if (aic_ipis) {
279 gAIC->sendIPI(cpu->cpu_id, false);
280 } else {
281 ml_cpu_signal(cpu->phys_id);
282 }
283}
284
285void
286PE_cpu_signal_deferred(cpu_id_t /*source*/, cpu_id_t target)
287{
288 struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
289 if (aic_ipis) {
290 gAIC->sendIPI(cpu->cpu_id, true);
291 } else {
292 ml_cpu_signal_deferred(cpu->phys_id);
293 }
294}
295
296void
297PE_cpu_signal_cancel(cpu_id_t /*source*/, cpu_id_t target)
298{
299 struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
300 if (aic_ipis) {
301 gAIC->cancelDeferredIPI(cpu->cpu_id);
302 } else {
303 ml_cpu_signal_retract(cpu->phys_id);
304 }
305}
306
307// Brings down one CPU core for S2R. Runs on the target CPU.
308void
309PE_cpu_machine_quiesce(cpu_id_t target)
310{
311 unsigned int cpu_id = target_to_cpu_id(target);
312
313 if (cpu_id == boot_cpu) {
314 IOCPURunPlatformQuiesceActions();
315 } else {
316 gPMGR->disableCPUCore(cpu_id);
317 }
318
319 ml_broadcast_cpu_event(CPU_DOWN, cpu_id);
320 ml_arm_sleep();
321}
322
323// Takes one secondary CPU core offline at runtime. Runs on the target CPU.
324// Returns true if the platform code should go into deep sleep WFI, false otherwise.
325bool
326PE_cpu_down(cpu_id_t target)
327{
328 unsigned int cpu_id = target_to_cpu_id(target);
329 assert(cpu_id != boot_cpu);
330 gPMGR->disableCPUCore(cpu_id);
331 return false;
332}
333
334void
335PE_handle_ext_interrupt(void)
336{
337 gCPUIC->externalInterrupt();
338}
339
340void
341IOCPUSleepKernel(void)
342{
343 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
344 unsigned int i;
345
346 printf("IOCPUSleepKernel enter\n");
347#if defined(__arm64__)
348 sched_override_recommended_cores_for_sleep();
349#endif
350
351 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
352 IOPlatformActionsPreSleep();
353 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
354
355 integer_t old_pri;
356 thread_t self = current_thread();
357
358 /*
359 * We need to boost this thread's priority to the maximum kernel priority to
360 * ensure we can urgently preempt ANY thread currently executing on the
361 * target CPU. Note that realtime threads have their own mechanism to eventually
362 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
363 */
364 old_pri = thread_kern_get_pri(self);
365 thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
366
367 // Sleep the non-boot CPUs.
368 ml_set_is_quiescing(true);
369 for (i = 0; i < topology_info->num_cpus; i++) {
370 unsigned int cpu_id = topology_info->cpus[i].cpu_id;
371 if (cpu_id != boot_cpu) {
372 processor_exit(machProcessors[cpu_id]);
373 }
374 }
375
376 console_suspend();
377
378 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
379 rootDomain->stop_watchdog_timer();
380
381 /*
382 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
383 * The system sleeps here.
384 */
385 processor_exit(machProcessors[boot_cpu]);
386
387 /*
388 * The system is now coming back from sleep on the boot CPU.
389 * The kQueueActive actions have already been called.
390 */
391
392 ml_set_is_quiescing(false);
393 rootDomain->start_watchdog_timer();
394 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
395
396 console_resume();
397
398 IOPlatformActionsPostResume();
399 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
400
401 for (i = 0; i < topology_info->num_cpus; i++) {
402 unsigned int cpu_id = topology_info->cpus[i].cpu_id;
403 if (cpu_id != boot_cpu) {
404 processor_start(machProcessors[cpu_id]);
405 }
406 }
407
408#if defined(__arm64__)
409 sched_restore_recommended_cores_after_sleep();
410#endif
411
412 thread_kern_set_pri(self, old_pri);
413 printf("IOCPUSleepKernel exit\n");
414}
415
416#endif /* USE_APPLEARMSMP */