]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/arm/AppleARMSMP.cpp
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / iokit / Kernel / arm / AppleARMSMP.cpp
1 /*
2 * Copyright (c) 2019 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 extern "C" {
30 #include <kern/debug.h>
31 #include <kern/processor.h>
32 #include <kern/thread.h>
33 #include <kperf/kperf.h>
34 #include <pexpert/pexpert.h>
35 #include <machine/machine_routines.h>
36 };
37
38 #include <libkern/OSAtomic.h>
39 #include <libkern/c++/OSCollection.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOPlatformActions.h>
42 #include <IOKit/IOPMGR.h>
43 #include <IOKit/IOReturn.h>
44 #include <IOKit/IOService.h>
45 #include <IOKit/PassthruInterruptController.h>
46 #include <IOKit/pwr_mgt/RootDomain.h>
47 #include <IOKit/pwr_mgt/IOPMPrivate.h>
48 #include <Kernel/IOKitKernelInternal.h>
49
50 #if USE_APPLEARMSMP
51
52 // FIXME: These are in <kern/misc_protos.h> but that file has other deps that aren't being resolved
53 extern "C" void console_suspend();
54 extern "C" void console_resume();
55
56 static PassthruInterruptController *gCPUIC;
57 static IOPMGR *gPMGR;
58 static IOInterruptController *gAIC;
59 static bool aic_ipis = false;
60 static const ml_topology_info *topology_info;
61
62 // cpu_id of the boot processor
63 static unsigned int boot_cpu;
64
65 // array index is a cpu_id (so some elements may be NULL)
66 static processor_t *machProcessors;
67
68 static void
69 processor_idle_wrapper(cpu_id_t /*cpu_id*/, boolean_t enter, uint64_t *new_timeout_ticks)
70 {
71 if (enter) {
72 gPMGR->enterCPUIdle(new_timeout_ticks);
73 } else {
74 gPMGR->exitCPUIdle(new_timeout_ticks);
75 }
76 }
77
78 static void
79 idle_timer_wrapper(void */*refCon*/, uint64_t *new_timeout_ticks)
80 {
81 gPMGR->updateCPUIdle(new_timeout_ticks);
82 }
83
84 static void
85 register_aic_handlers(const ml_topology_cpu *cpu_info,
86 ipi_handler_t ipi_handler,
87 perfmon_interrupt_handler_func pmi_handler)
88 {
89 const int n_irqs = 3;
90 int i;
91 IOInterruptVectorNumber irqlist[n_irqs] = {
92 cpu_info->self_ipi_irq,
93 cpu_info->other_ipi_irq,
94 cpu_info->pmi_irq };
95
96 IOService *fakeCPU = new IOService();
97 if (!fakeCPU || !fakeCPU->init()) {
98 panic("Can't initialize fakeCPU");
99 }
100
101 IOInterruptSource source[n_irqs];
102 for (i = 0; i < n_irqs; i++) {
103 source[i].vectorData = OSData::withBytes(&irqlist[i], sizeof(irqlist[0]));
104 }
105 fakeCPU->_interruptSources = source;
106
107 if (cpu_info->self_ipi_irq && cpu_info->other_ipi_irq) {
108 // Legacy configuration, for !HAS_IPI chips (pre-Skye).
109 if (gAIC->registerInterrupt(fakeCPU, 0, NULL, (IOInterruptHandler)ipi_handler, NULL) != kIOReturnSuccess ||
110 gAIC->enableInterrupt(fakeCPU, 0) != kIOReturnSuccess ||
111 gAIC->registerInterrupt(fakeCPU, 1, NULL, (IOInterruptHandler)ipi_handler, NULL) != kIOReturnSuccess ||
112 gAIC->enableInterrupt(fakeCPU, 1) != kIOReturnSuccess) {
113 panic("Error registering IPIs");
114 }
115 #if !defined(HAS_IPI)
116 // Ideally this should be decided by EDT, but first we need to update EDT
117 // to default to fast IPIs on modern platforms.
118 aic_ipis = true;
119 #endif
120 }
121 // Conditional, because on Skye and later, we use an FIQ instead of an external IRQ.
122 if (pmi_handler && cpu_info->pmi_irq) {
123 if (gAIC->registerInterrupt(fakeCPU, 2, NULL, (IOInterruptHandler)pmi_handler, NULL) != kIOReturnSuccess ||
124 gAIC->enableInterrupt(fakeCPU, 2) != kIOReturnSuccess) {
125 panic("Error registering PMI");
126 }
127 }
128
129 for (i = 0; i < n_irqs; i++) {
130 source[i].vectorData->release();
131 }
132 }
133
134 static void
135 cpu_boot_thread(void */*unused0*/, wait_result_t /*unused1*/)
136 {
137 OSDictionary *matching = IOService::serviceMatching("IOPlatformExpert");
138 IOService::waitForMatchingService(matching, UINT64_MAX);
139 matching->release();
140
141 gCPUIC = new PassthruInterruptController;
142 if (!gCPUIC || !gCPUIC->init()) {
143 panic("Can't initialize PassthruInterruptController");
144 }
145 gAIC = static_cast<IOInterruptController *>(gCPUIC->waitForChildController());
146
147 ml_set_max_cpus(topology_info->max_cpu_id + 1);
148
149 matching = IOService::serviceMatching("IOPMGR");
150 gPMGR = OSDynamicCast(IOPMGR,
151 IOService::waitForMatchingService(matching, UINT64_MAX));
152 matching->release();
153
154 const size_t array_size = (topology_info->max_cpu_id + 1) * sizeof(*machProcessors);
155 machProcessors = static_cast<processor_t *>(IOMalloc(array_size));
156 if (!machProcessors) {
157 panic("Can't allocate machProcessors array");
158 }
159 memset(machProcessors, 0, array_size);
160
161 ml_cpu_init_state();
162 for (unsigned int cpu = 0; cpu < topology_info->num_cpus; cpu++) {
163 const ml_topology_cpu *cpu_info = &topology_info->cpus[cpu];
164 const unsigned int cpu_id = cpu_info->cpu_id;
165 ml_processor_info_t this_processor_info;
166 ipi_handler_t ipi_handler;
167 perfmon_interrupt_handler_func pmi_handler;
168
169 memset(&this_processor_info, 0, sizeof(this_processor_info));
170 this_processor_info.cpu_id = reinterpret_cast<cpu_id_t>(cpu_id);
171 this_processor_info.phys_id = cpu_info->phys_id;
172 this_processor_info.log_id = cpu_id;
173 this_processor_info.cluster_id = cpu_info->cluster_id;
174 this_processor_info.cluster_type = cpu_info->cluster_type;
175 this_processor_info.l2_cache_size = cpu_info->l2_cache_size;
176 this_processor_info.l2_cache_id = cpu_info->l2_cache_id;
177 this_processor_info.l3_cache_size = cpu_info->l3_cache_size;
178 this_processor_info.l3_cache_id = cpu_info->l3_cache_id;
179
180 gPMGR->initCPUIdle(&this_processor_info);
181 this_processor_info.processor_idle = &processor_idle_wrapper;
182 this_processor_info.idle_timer = &idle_timer_wrapper;
183
184 kern_return_t result = ml_processor_register(&this_processor_info,
185 &machProcessors[cpu_id], &ipi_handler, &pmi_handler);
186 if (result == KERN_FAILURE) {
187 panic("ml_processor_register failed: %d", result);
188 }
189 register_aic_handlers(cpu_info, ipi_handler, pmi_handler);
190
191 if (processor_start(machProcessors[cpu_id]) != KERN_SUCCESS) {
192 panic("processor_start failed");
193 }
194 }
195 IOService::publishResource(gIOAllCPUInitializedKey, kOSBooleanTrue);
196 }
197
198 void
199 IOCPUInitialize(void)
200 {
201 topology_info = ml_get_topology_info();
202 boot_cpu = topology_info->boot_cpu->cpu_id;
203
204 thread_t thread;
205 kernel_thread_start(&cpu_boot_thread, NULL, &thread);
206 thread_set_thread_name(thread, "cpu_boot_thread");
207 thread_deallocate(thread);
208 }
209
210 static unsigned int
211 target_to_cpu_id(cpu_id_t in)
212 {
213 return (unsigned int)(uintptr_t)in;
214 }
215
216 // Release a secondary CPU from reset. Runs from a different CPU (obviously).
217 kern_return_t
218 PE_cpu_start(cpu_id_t target,
219 vm_offset_t /*start_paddr*/, vm_offset_t /*arg_paddr*/)
220 {
221 unsigned int cpu_id = target_to_cpu_id(target);
222
223 if (cpu_id != boot_cpu) {
224 gPMGR->enableCPUCore(cpu_id);
225 }
226 return KERN_SUCCESS;
227 }
228
229 // Initialize a CPU when it first comes up. Runs on the target CPU.
230 // |bootb| is true on the initial boot, false on S2R resume.
231 void
232 PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
233 {
234 unsigned int cpu_id = target_to_cpu_id(target);
235
236 if (!bootb && cpu_id == boot_cpu && ml_is_quiescing()) {
237 IOCPURunPlatformActiveActions();
238 }
239
240 ml_broadcast_cpu_event(CPU_BOOTED, cpu_id);
241
242 // Send myself an IPI to clear SIGPdisabled. Hang here if IPIs are broken.
243 // (Probably only works on the boot CPU.)
244 PE_cpu_signal(target, target);
245 while (ml_get_interrupts_enabled() && !ml_cpu_signal_is_enabled()) {
246 OSMemoryBarrier();
247 }
248 }
249
250 void
251 PE_cpu_halt(cpu_id_t target)
252 {
253 unsigned int cpu_id = target_to_cpu_id(target);
254 processor_exit(machProcessors[cpu_id]);
255 }
256
257 void
258 PE_cpu_signal(cpu_id_t /*source*/, cpu_id_t target)
259 {
260 struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
261 if (aic_ipis) {
262 gAIC->sendIPI(cpu->cpu_id, false);
263 } else {
264 ml_cpu_signal(cpu->phys_id);
265 }
266 }
267
268 void
269 PE_cpu_signal_deferred(cpu_id_t /*source*/, cpu_id_t target)
270 {
271 struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
272 if (aic_ipis) {
273 gAIC->sendIPI(cpu->cpu_id, true);
274 } else {
275 ml_cpu_signal_deferred(cpu->phys_id);
276 }
277 }
278
279 void
280 PE_cpu_signal_cancel(cpu_id_t /*source*/, cpu_id_t target)
281 {
282 struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)];
283 if (aic_ipis) {
284 gAIC->cancelDeferredIPI(cpu->cpu_id);
285 } else {
286 ml_cpu_signal_retract(cpu->phys_id);
287 }
288 }
289
290 // Brings down one CPU core for S2R. Runs on the target CPU.
291 void
292 PE_cpu_machine_quiesce(cpu_id_t target)
293 {
294 unsigned int cpu_id = target_to_cpu_id(target);
295
296 if (cpu_id == boot_cpu) {
297 IOCPURunPlatformQuiesceActions();
298 } else {
299 gPMGR->disableCPUCore(cpu_id);
300 }
301
302 ml_broadcast_cpu_event(CPU_DOWN, cpu_id);
303 ml_arm_sleep();
304 }
305
306 // Takes one secondary CPU core offline at runtime. Runs on the target CPU.
307 // Returns true if the platform code should go into deep sleep WFI, false otherwise.
308 bool
309 PE_cpu_down(cpu_id_t target)
310 {
311 unsigned int cpu_id = target_to_cpu_id(target);
312 assert(cpu_id != boot_cpu);
313 gPMGR->disableCPUCore(cpu_id);
314 return false;
315 }
316
317 void
318 PE_handle_ext_interrupt(void)
319 {
320 gCPUIC->externalInterrupt();
321 }
322
323 void
324 IOCPUSleepKernel(void)
325 {
326 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
327 unsigned int i;
328
329 printf("IOCPUSleepKernel enter\n");
330 #if defined(__arm64__)
331 sched_override_recommended_cores_for_sleep();
332 #endif
333
334 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
335 IOPlatformActionsPreSleep();
336 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
337
338 integer_t old_pri;
339 thread_t self = current_thread();
340
341 /*
342 * We need to boost this thread's priority to the maximum kernel priority to
343 * ensure we can urgently preempt ANY thread currently executing on the
344 * target CPU. Note that realtime threads have their own mechanism to eventually
345 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
346 */
347 old_pri = thread_kern_get_pri(self);
348 thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
349
350 // Sleep the non-boot CPUs.
351 ml_set_is_quiescing(true);
352 for (i = 0; i < topology_info->num_cpus; i++) {
353 unsigned int cpu_id = topology_info->cpus[i].cpu_id;
354 if (cpu_id != boot_cpu) {
355 processor_exit(machProcessors[cpu_id]);
356 }
357 }
358
359 console_suspend();
360
361 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
362 rootDomain->stop_watchdog_timer();
363
364 /*
365 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
366 * The system sleeps here.
367 */
368 processor_exit(machProcessors[boot_cpu]);
369
370 /*
371 * The system is now coming back from sleep on the boot CPU.
372 * The kQueueActive actions have already been called.
373 */
374
375 ml_set_is_quiescing(false);
376 rootDomain->start_watchdog_timer();
377 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
378
379 console_resume();
380
381 IOPlatformActionsPostResume();
382 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
383
384 for (i = 0; i < topology_info->num_cpus; i++) {
385 unsigned int cpu_id = topology_info->cpus[i].cpu_id;
386 if (cpu_id != boot_cpu) {
387 processor_start(machProcessors[cpu_id]);
388 }
389 }
390
391 #if defined(__arm64__)
392 sched_restore_recommended_cores_after_sleep();
393 #endif
394
395 thread_kern_set_pri(self, old_pri);
396 printf("IOCPUSleepKernel exit\n");
397 }
398
399 #endif /* USE_APPLEARMSMP */