]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOCPU.cpp
88ac5d1ff974197f0c4d9edcabc0ed2a363fa7c7
[apple/xnu.git] / iokit / Kernel / IOCPU.cpp
1 /*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 extern "C" {
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 #include <kern/cpu_number.h>
33 extern void kperf_kernel_configure(char *);
34 }
35
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOPlatformExpert.h>
38 #include <IOKit/pwr_mgt/RootDomain.h>
39 #include <IOKit/pwr_mgt/IOPMPrivate.h>
40 #include <IOKit/IOUserClient.h>
41 #include <IOKit/IOKitKeysPrivate.h>
42 #include <IOKit/IOCPU.h>
43 #include "IOKitKernelInternal.h"
44
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 #include <kern/queue.h>
47
48 extern "C" void console_suspend();
49 extern "C" void console_resume();
50 extern "C" void sched_override_recommended_cores_for_sleep(void);
51 extern "C" void sched_restore_recommended_cores_after_sleep(void);
52
53 typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority,
54 void * param1, void * param2, void * param3,
55 const char * name);
56
57 struct iocpu_platform_action_entry {
58 queue_chain_t link;
59 iocpu_platform_action_t action;
60 int32_t priority;
61 const char * name;
62 void * refcon0;
63 void * refcon1;
64 boolean_t callout_in_progress;
65 struct iocpu_platform_action_entry * alloc_list;
66 };
67 typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t;
68
69 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
70
71 static IOLock *gIOCPUsLock;
72 static OSArray *gIOCPUs;
73 static const OSSymbol *gIOCPUStateKey;
74 static OSString *gIOCPUStateNames[kIOCPUStateCount];
75
76 enum{
77 kQueueSleep = 0,
78 kQueueWake = 1,
79 kQueueQuiesce = 2,
80 kQueueActive = 3,
81 kQueueHaltRestart = 4,
82 kQueuePanic = 5,
83 kQueueCount = 6
84 };
85
86 const OSSymbol * gIOPlatformSleepActionKey;
87 const OSSymbol * gIOPlatformWakeActionKey;
88 const OSSymbol * gIOPlatformQuiesceActionKey;
89 const OSSymbol * gIOPlatformActiveActionKey;
90 const OSSymbol * gIOPlatformHaltRestartActionKey;
91 const OSSymbol * gIOPlatformPanicActionKey;
92
93 static queue_head_t gActionQueues[kQueueCount];
94 static const OSSymbol * gActionSymbols[kQueueCount];
95
96 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97
98 static void
99 iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry)
100 {
101 iocpu_platform_action_entry_t * next;
102
103 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
104 {
105 if (next->priority > entry->priority) {
106 queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link);
107 return;
108 }
109 }
110 queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail
111 }
112
113 static void
114 iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry)
115 {
116 remque(&entry->link);
117 }
118
119 static kern_return_t
120 iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority,
121 void * param1, void * param2, void * param3, boolean_t allow_nested_callouts)
122 {
123 kern_return_t ret = KERN_SUCCESS;
124 kern_return_t result = KERN_SUCCESS;
125 iocpu_platform_action_entry_t * next;
126
127 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
128 {
129 uint32_t pri = (next->priority < 0) ? -next->priority : next->priority;
130 if ((pri >= first_priority) && (pri <= last_priority)) {
131 //kprintf("[%p]", next->action);
132 if (!allow_nested_callouts && !next->callout_in_progress) {
133 next->callout_in_progress = TRUE;
134 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
135 next->callout_in_progress = FALSE;
136 } else if (allow_nested_callouts) {
137 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
138 }
139 }
140 if (KERN_SUCCESS == result) {
141 result = ret;
142 }
143 }
144 return result;
145 }
146
147 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
148
149 extern "C" kern_return_t
150 IOCPURunPlatformQuiesceActions(void)
151 {
152 return iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U - 1,
153 NULL, NULL, NULL, TRUE);
154 }
155
156 extern "C" kern_return_t
157 IOCPURunPlatformActiveActions(void)
158 {
159 return iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U - 1,
160 NULL, NULL, NULL, TRUE);
161 }
162
163 extern "C" kern_return_t
164 IOCPURunPlatformHaltRestartActions(uint32_t message)
165 {
166 if (!gActionQueues[kQueueHaltRestart].next) {
167 return kIOReturnNotReady;
168 }
169 return iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U - 1,
170 (void *)(uintptr_t) message, NULL, NULL, TRUE);
171 }
172
173 extern "C" kern_return_t
174 IOCPURunPlatformPanicActions(uint32_t message)
175 {
176 // Don't allow nested calls of panic actions
177 if (!gActionQueues[kQueuePanic].next) {
178 return kIOReturnNotReady;
179 }
180 return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1,
181 (void *)(uintptr_t) message, NULL, NULL, FALSE);
182 }
183
184
185 extern "C" kern_return_t
186 IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len)
187 {
188 PE_panic_save_context_t context = {
189 .psc_buffer = addr,
190 .psc_offset = offset,
191 .psc_length = len
192 };
193
194 // Don't allow nested calls of panic actions
195 if (!gActionQueues[kQueuePanic].next) {
196 return kIOReturnNotReady;
197 }
198 return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1,
199 (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE);
200 }
201
202 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
203
204 static kern_return_t
205 IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority,
206 void * param1, void * param2, void * param3,
207 const char * service_name)
208 {
209 IOReturn ret;
210 IOService * service = (IOService *) refcon0;
211 const OSSymbol * function = (const OSSymbol *) refcon1;
212
213 kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name);
214
215 ret = service->callPlatformFunction(function, false,
216 (void *)(uintptr_t) priority, param1, param2, param3);
217
218 return ret;
219 }
220
221 static void
222 IOInstallServicePlatformAction(IOService * service, uint32_t qidx)
223 {
224 iocpu_platform_action_entry_t * entry;
225 OSNumber * num;
226 uint32_t priority;
227 const OSSymbol * key = gActionSymbols[qidx];
228 queue_head_t * queue = &gActionQueues[qidx];
229 bool reverse;
230 bool uniq;
231
232 num = OSDynamicCast(OSNumber, service->getProperty(key));
233 if (!num) {
234 return;
235 }
236
237 reverse = false;
238 uniq = false;
239 switch (qidx) {
240 case kQueueWake:
241 case kQueueActive:
242 reverse = true;
243 break;
244 case kQueueHaltRestart:
245 case kQueuePanic:
246 uniq = true;
247 break;
248 }
249 if (uniq) {
250 queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link)
251 {
252 if (service == entry->refcon0) {
253 return;
254 }
255 }
256 }
257
258 entry = IONew(iocpu_platform_action_entry_t, 1);
259 entry->action = &IOServicePlatformAction;
260 entry->name = service->getName();
261 priority = num->unsigned32BitValue();
262 if (reverse) {
263 entry->priority = -priority;
264 } else {
265 entry->priority = priority;
266 }
267 entry->refcon0 = service;
268 entry->refcon1 = (void *) key;
269 entry->callout_in_progress = FALSE;
270
271 iocpu_add_platform_action(queue, entry);
272 }
273
274 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
275
276 void
277 IOCPUInitialize(void)
278 {
279 gIOCPUsLock = IOLockAlloc();
280 gIOCPUs = OSArray::withCapacity(1);
281
282 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) {
283 queue_init(&gActionQueues[qidx]);
284 }
285
286 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
287
288 gIOCPUStateNames[kIOCPUStateUnregistered] =
289 OSString::withCStringNoCopy("Unregistered");
290 gIOCPUStateNames[kIOCPUStateUninitalized] =
291 OSString::withCStringNoCopy("Uninitalized");
292 gIOCPUStateNames[kIOCPUStateStopped] =
293 OSString::withCStringNoCopy("Stopped");
294 gIOCPUStateNames[kIOCPUStateRunning] =
295 OSString::withCStringNoCopy("Running");
296
297 gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep]
298 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey);
299 gIOPlatformWakeActionKey = gActionSymbols[kQueueWake]
300 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey);
301 gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce]
302 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey);
303 gIOPlatformActiveActionKey = gActionSymbols[kQueueActive]
304 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey);
305 gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart]
306 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey);
307 gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic]
308 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey);
309 }
310
311 IOReturn
312 IOInstallServicePlatformActions(IOService * service)
313 {
314 IOLockLock(gIOCPUsLock);
315
316 IOInstallServicePlatformAction(service, kQueueHaltRestart);
317 IOInstallServicePlatformAction(service, kQueuePanic);
318
319 IOLockUnlock(gIOCPUsLock);
320
321 return kIOReturnSuccess;
322 }
323
324 IOReturn
325 IORemoveServicePlatformActions(IOService * service)
326 {
327 iocpu_platform_action_entry_t * entry;
328 iocpu_platform_action_entry_t * next;
329
330 IOLockLock(gIOCPUsLock);
331
332 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) {
333 next = (typeof(entry))queue_first(&gActionQueues[qidx]);
334 while (!queue_end(&gActionQueues[qidx], &next->link)) {
335 entry = next;
336 next = (typeof(entry))queue_next(&entry->link);
337 if (service == entry->refcon0) {
338 iocpu_remove_platform_action(entry);
339 IODelete(entry, iocpu_platform_action_entry_t, 1);
340 }
341 }
342 }
343
344 IOLockUnlock(gIOCPUsLock);
345
346 return kIOReturnSuccess;
347 }
348
349
350 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
351
352 kern_return_t
353 PE_cpu_start(cpu_id_t target,
354 vm_offset_t start_paddr, vm_offset_t arg_paddr)
355 {
356 IOCPU *targetCPU = (IOCPU *)target;
357
358 if (targetCPU == NULL) {
359 return KERN_FAILURE;
360 }
361 return targetCPU->startCPU(start_paddr, arg_paddr);
362 }
363
364 void
365 PE_cpu_halt(cpu_id_t target)
366 {
367 IOCPU *targetCPU = (IOCPU *)target;
368
369 targetCPU->haltCPU();
370 }
371
372 void
373 PE_cpu_signal(cpu_id_t source, cpu_id_t target)
374 {
375 IOCPU *sourceCPU = (IOCPU *)source;
376 IOCPU *targetCPU = (IOCPU *)target;
377
378 sourceCPU->signalCPU(targetCPU);
379 }
380
381 void
382 PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
383 {
384 IOCPU *sourceCPU = (IOCPU *)source;
385 IOCPU *targetCPU = (IOCPU *)target;
386
387 sourceCPU->signalCPUDeferred(targetCPU);
388 }
389
390 void
391 PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
392 {
393 IOCPU *sourceCPU = (IOCPU *)source;
394 IOCPU *targetCPU = (IOCPU *)target;
395
396 sourceCPU->signalCPUCancel(targetCPU);
397 }
398
399 void
400 PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
401 {
402 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
403
404 if (targetCPU == NULL) {
405 panic("%s: invalid target CPU %p", __func__, target);
406 }
407
408 targetCPU->initCPU(bootb);
409 #if defined(__arm__) || defined(__arm64__)
410 if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) {
411 ml_set_is_quiescing(false);
412 }
413 #endif /* defined(__arm__) || defined(__arm64__) */
414 }
415
416 void
417 PE_cpu_machine_quiesce(cpu_id_t target)
418 {
419 IOCPU *targetCPU = (IOCPU*)target;
420 #if defined(__arm__) || defined(__arm64__)
421 if (targetCPU->getCPUNumber() == (UInt32)master_cpu) {
422 ml_set_is_quiescing(true);
423 }
424 #endif /* defined(__arm__) || defined(__arm64__) */
425 targetCPU->quiesceCPU();
426 }
427
428 #if defined(__arm__) || defined(__arm64__)
429 static perfmon_interrupt_handler_func pmi_handler = 0;
430
431 kern_return_t
432 PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
433 {
434 pmi_handler = handler;
435
436 return KERN_SUCCESS;
437 }
438
439 void
440 PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
441 {
442 IOCPU *targetCPU = (IOCPU*)target;
443
444 if (targetCPU == nullptr) {
445 return;
446 }
447
448 if (enable) {
449 targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0);
450 targetCPU->getProvider()->enableInterrupt(1);
451 } else {
452 targetCPU->getProvider()->disableInterrupt(1);
453 }
454 }
455 #endif
456
457 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
458
459 #define super IOService
460
461 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
462 OSMetaClassDefineReservedUnused(IOCPU, 0);
463 OSMetaClassDefineReservedUnused(IOCPU, 1);
464 OSMetaClassDefineReservedUnused(IOCPU, 2);
465 OSMetaClassDefineReservedUnused(IOCPU, 3);
466 OSMetaClassDefineReservedUnused(IOCPU, 4);
467 OSMetaClassDefineReservedUnused(IOCPU, 5);
468 OSMetaClassDefineReservedUnused(IOCPU, 6);
469 OSMetaClassDefineReservedUnused(IOCPU, 7);
470
471 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
472
473 void
474 IOCPUSleepKernel(void)
475 {
476 #if defined(__x86_64__)
477 extern IOCPU *currentShutdownTarget;
478 #endif
479 long cnt, numCPUs;
480 IOCPU *target;
481 IOCPU *bootCPU = NULL;
482 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
483
484 kprintf("IOCPUSleepKernel\n");
485 #if defined(__arm64__)
486 sched_override_recommended_cores_for_sleep();
487 #endif
488
489 IORegistryIterator * iter;
490 OSOrderedSet * all;
491 IOService * service;
492
493 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
494
495 iter = IORegistryIterator::iterateOver( gIOServicePlane,
496 kIORegistryIterateRecursively );
497 if (iter) {
498 all = 0;
499 do{
500 if (all) {
501 all->release();
502 }
503 all = iter->iterateAll();
504 }while (!iter->isValid());
505 iter->release();
506
507 if (all) {
508 while ((service = (IOService *) all->getFirstObject())) {
509 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) {
510 IOInstallServicePlatformAction(service, qidx);
511 }
512 all->removeObject(service);
513 }
514 all->release();
515 }
516 }
517
518 iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U - 1,
519 NULL, NULL, NULL, TRUE);
520
521 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
522
523 numCPUs = gIOCPUs->getCount();
524 #if defined(__x86_64__)
525 currentShutdownTarget = NULL;
526 #endif
527
528 // Sleep the CPUs.
529 cnt = numCPUs;
530 while (cnt--) {
531 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
532
533 // We make certain that the bootCPU is the last to sleep
534 // We'll skip it for now, and halt it after finishing the
535 // non-boot CPU's.
536 if (target->getCPUNumber() == (UInt32)master_cpu) {
537 bootCPU = target;
538 } else if (target->getCPUState() == kIOCPUStateRunning) {
539 #if defined(__x86_64__)
540 currentShutdownTarget = target;
541 #endif
542 target->haltCPU();
543 }
544 }
545
546 assert(bootCPU != NULL);
547 assert(cpu_number() == master_cpu);
548
549 console_suspend();
550
551 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
552 rootDomain->stop_watchdog_timer();
553
554 // Now sleep the boot CPU.
555 bootCPU->haltCPU();
556
557 rootDomain->start_watchdog_timer();
558 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
559
560 console_resume();
561
562 iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U - 1,
563 NULL, NULL, NULL, TRUE);
564
565 iocpu_platform_action_entry_t * entry;
566 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) {
567 while (!(queue_empty(&gActionQueues[qidx]))) {
568 entry = (typeof(entry))queue_first(&gActionQueues[qidx]);
569 iocpu_remove_platform_action(entry);
570 IODelete(entry, iocpu_platform_action_entry_t, 1);
571 }
572 }
573
574 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
575
576 // Wake the other CPUs.
577 for (cnt = 0; cnt < numCPUs; cnt++) {
578 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
579
580 // Skip the already-woken boot CPU.
581 if (target->getCPUNumber() != (UInt32)master_cpu) {
582 if (target->getCPUState() == kIOCPUStateRunning) {
583 panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
584 }
585
586 if (target->getCPUState() == kIOCPUStateStopped) {
587 processor_start(target->getMachProcessor());
588 }
589 }
590 }
591
592 #if defined(__arm64__)
593 sched_restore_recommended_cores_after_sleep();
594 #endif
595 }
596
597 bool
598 IOCPU::start(IOService *provider)
599 {
600 OSData *busFrequency, *cpuFrequency, *timebaseFrequency;
601
602 if (!super::start(provider)) {
603 return false;
604 }
605
606 _cpuGroup = gIOCPUs;
607 cpuNub = provider;
608
609 IOLockLock(gIOCPUsLock);
610 gIOCPUs->setObject(this);
611 IOLockUnlock(gIOCPUsLock);
612
613 // Correct the bus, cpu and timebase frequencies in the device tree.
614 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
615 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
616 } else {
617 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
618 }
619 provider->setProperty("bus-frequency", busFrequency);
620 busFrequency->release();
621
622 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
623 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
624 } else {
625 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
626 }
627 provider->setProperty("clock-frequency", cpuFrequency);
628 cpuFrequency->release();
629
630 timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
631 provider->setProperty("timebase-frequency", timebaseFrequency);
632 timebaseFrequency->release();
633
634 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8);
635
636 setCPUNumber(0);
637 setCPUState(kIOCPUStateUnregistered);
638
639 return true;
640 }
641
642 OSObject *
643 IOCPU::getProperty(const OSSymbol *aKey) const
644 {
645 if (aKey == gIOCPUStateKey) {
646 return gIOCPUStateNames[_cpuState];
647 }
648
649 return super::getProperty(aKey);
650 }
651
652 bool
653 IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
654 {
655 if (aKey == gIOCPUStateKey) {
656 return false;
657 }
658
659 return super::setProperty(aKey, anObject);
660 }
661
662 bool
663 IOCPU::serializeProperties(OSSerialize *serialize) const
664 {
665 bool result;
666 OSDictionary *dict = dictionaryWithProperties();
667 if (!dict) {
668 return false;
669 }
670 dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]);
671 result = dict->serialize(serialize);
672 dict->release();
673 return result;
674 }
675
676 IOReturn
677 IOCPU::setProperties(OSObject *properties)
678 {
679 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
680 OSString *stateStr;
681 IOReturn result;
682
683 if (dict == 0) {
684 return kIOReturnUnsupported;
685 }
686
687 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey));
688 if (stateStr != 0) {
689 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
690 if (result != kIOReturnSuccess) {
691 return result;
692 }
693
694 if (setProperty(gIOCPUStateKey, stateStr)) {
695 return kIOReturnSuccess;
696 }
697
698 return kIOReturnUnsupported;
699 }
700
701 return kIOReturnUnsupported;
702 }
703
704 void
705 IOCPU::signalCPU(IOCPU */*target*/)
706 {
707 }
708
709 void
710 IOCPU::signalCPUDeferred(IOCPU *target)
711 {
712 // Our CPU may not support deferred IPIs,
713 // so send a regular IPI by default
714 signalCPU(target);
715 }
716
717 void
718 IOCPU::signalCPUCancel(IOCPU */*target*/)
719 {
720 // Meant to cancel signals sent by
721 // signalCPUDeferred; unsupported
722 // by default
723 }
724
725 void
726 IOCPU::enableCPUTimeBase(bool /*enable*/)
727 {
728 }
729
730 UInt32
731 IOCPU::getCPUNumber(void)
732 {
733 return _cpuNumber;
734 }
735
736 void
737 IOCPU::setCPUNumber(UInt32 cpuNumber)
738 {
739 _cpuNumber = cpuNumber;
740 super::setProperty("IOCPUNumber", _cpuNumber, 32);
741 }
742
743 UInt32
744 IOCPU::getCPUState(void)
745 {
746 return _cpuState;
747 }
748
749 void
750 IOCPU::setCPUState(UInt32 cpuState)
751 {
752 if (cpuState < kIOCPUStateCount) {
753 _cpuState = cpuState;
754 }
755 }
756
757 OSArray *
758 IOCPU::getCPUGroup(void)
759 {
760 return _cpuGroup;
761 }
762
763 UInt32
764 IOCPU::getCPUGroupSize(void)
765 {
766 return _cpuGroup->getCount();
767 }
768
769 processor_t
770 IOCPU::getMachProcessor(void)
771 {
772 return machProcessor;
773 }
774
775
776 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
777
778 #undef super
779 #define super IOInterruptController
780
781 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
782
783 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
784 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
785 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
786 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
787 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
788
789
790
791 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
792
793 IOReturn
794 IOCPUInterruptController::initCPUInterruptController(int sources)
795 {
796 return initCPUInterruptController(sources, sources);
797 }
798
799 IOReturn
800 IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
801 {
802 int cnt;
803
804 if (!super::init()) {
805 return kIOReturnInvalid;
806 }
807
808 numSources = sources;
809 numCPUs = cpus;
810
811 vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector));
812 if (vectors == 0) {
813 return kIOReturnNoMemory;
814 }
815 bzero(vectors, numSources * sizeof(IOInterruptVector));
816
817 // Allocate a lock for each vector
818 for (cnt = 0; cnt < numSources; cnt++) {
819 vectors[cnt].interruptLock = IOLockAlloc();
820 if (vectors[cnt].interruptLock == NULL) {
821 for (cnt = 0; cnt < numSources; cnt++) {
822 if (vectors[cnt].interruptLock != NULL) {
823 IOLockFree(vectors[cnt].interruptLock);
824 }
825 }
826 return kIOReturnNoResources;
827 }
828 }
829
830 ml_init_max_cpus(numSources);
831
832 #if KPERF
833 /*
834 * kperf allocates based on the number of CPUs and requires them to all be
835 * accounted for.
836 */
837 boolean_t found_kperf = FALSE;
838 char kperf_config_str[64];
839 found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str));
840 if (found_kperf && kperf_config_str[0] != '\0') {
841 kperf_kernel_configure(kperf_config_str);
842 }
843 #endif /* KPERF */
844
845 return kIOReturnSuccess;
846 }
847
848 void
849 IOCPUInterruptController::registerCPUInterruptController(void)
850 {
851 registerService();
852
853 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
854 this);
855 }
856
857 void
858 IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
859 {
860 int cnt;
861 OSArray *controller;
862 OSArray *specifier;
863 OSData *tmpData;
864 long tmpLong;
865
866 if ((service->getProperty(gIOInterruptControllersKey) != 0) &&
867 (service->getProperty(gIOInterruptSpecifiersKey) != 0)) {
868 return;
869 }
870
871 // Create the interrupt specifer array.
872 specifier = OSArray::withCapacity(numSources);
873 for (cnt = 0; cnt < numSources; cnt++) {
874 tmpLong = cnt;
875 tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong));
876 specifier->setObject(tmpData);
877 tmpData->release();
878 }
879 ;
880
881 // Create the interrupt controller array.
882 controller = OSArray::withCapacity(numSources);
883 for (cnt = 0; cnt < numSources; cnt++) {
884 controller->setObject(gPlatformInterruptControllerName);
885 }
886
887 // Put the two arrays into the property table.
888 service->setProperty(gIOInterruptControllersKey, controller);
889 service->setProperty(gIOInterruptSpecifiersKey, specifier);
890 controller->release();
891 specifier->release();
892 }
893
894 void
895 IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
896 {
897 IOInterruptHandler handler = OSMemberFunctionCast(
898 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
899
900 assert(numCPUs > 0);
901
902 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, 0);
903
904 IOTakeLock(vectors[0].interruptLock);
905 ++enabledCPUs;
906
907 if (enabledCPUs == numCPUs) {
908 IOService::cpusRunning();
909 thread_wakeup(this);
910 }
911 IOUnlock(vectors[0].interruptLock);
912 }
913
914 IOReturn
915 IOCPUInterruptController::registerInterrupt(IOService *nub,
916 int source,
917 void *target,
918 IOInterruptHandler handler,
919 void *refCon)
920 {
921 IOInterruptVector *vector;
922
923 if (source >= numSources) {
924 return kIOReturnNoResources;
925 }
926
927 vector = &vectors[source];
928
929 // Get the lock for this vector.
930 IOTakeLock(vector->interruptLock);
931
932 // Make sure the vector is not in use.
933 if (vector->interruptRegistered) {
934 IOUnlock(vector->interruptLock);
935 return kIOReturnNoResources;
936 }
937
938 // Fill in vector with the client's info.
939 vector->handler = handler;
940 vector->nub = nub;
941 vector->source = source;
942 vector->target = target;
943 vector->refCon = refCon;
944
945 // Get the vector ready. It starts hard disabled.
946 vector->interruptDisabledHard = 1;
947 vector->interruptDisabledSoft = 1;
948 vector->interruptRegistered = 1;
949
950 IOUnlock(vector->interruptLock);
951
952 IOTakeLock(vectors[0].interruptLock);
953 if (enabledCPUs != numCPUs) {
954 assert_wait(this, THREAD_UNINT);
955 IOUnlock(vectors[0].interruptLock);
956 thread_block(THREAD_CONTINUE_NULL);
957 } else {
958 IOUnlock(vectors[0].interruptLock);
959 }
960
961 return kIOReturnSuccess;
962 }
963
964 IOReturn
965 IOCPUInterruptController::getInterruptType(IOService */*nub*/,
966 int /*source*/,
967 int *interruptType)
968 {
969 if (interruptType == 0) {
970 return kIOReturnBadArgument;
971 }
972
973 *interruptType = kIOInterruptTypeLevel;
974
975 return kIOReturnSuccess;
976 }
977
978 IOReturn
979 IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
980 int /*source*/)
981 {
982 // ml_set_interrupts_enabled(true);
983 return kIOReturnSuccess;
984 }
985
986 IOReturn
987 IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
988 int /*source*/)
989 {
990 // ml_set_interrupts_enabled(false);
991 return kIOReturnSuccess;
992 }
993
994 IOReturn
995 IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
996 int /*source*/)
997 {
998 ml_cause_interrupt();
999 return kIOReturnSuccess;
1000 }
1001
1002 IOReturn
1003 IOCPUInterruptController::handleInterrupt(void */*refCon*/,
1004 IOService */*nub*/,
1005 int source)
1006 {
1007 IOInterruptVector *vector;
1008
1009 vector = &vectors[source];
1010
1011 if (!vector->interruptRegistered) {
1012 return kIOReturnInvalid;
1013 }
1014
1015 vector->handler(vector->target, vector->refCon,
1016 vector->nub, vector->source);
1017
1018 return kIOReturnSuccess;
1019 }
1020
1021 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */