]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOCPU.cpp
xnu-3789.51.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOCPU.cpp
1 /*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 extern "C" {
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 #include <kern/cpu_number.h>
33 }
34
35 #include <machine/machine_routines.h>
36
37 #include <IOKit/IOLib.h>
38 #include <IOKit/IOPlatformExpert.h>
39 #include <IOKit/pwr_mgt/RootDomain.h>
40 #include <IOKit/pwr_mgt/IOPMPrivate.h>
41 #include <IOKit/IOUserClient.h>
42 #include <IOKit/IOKitKeysPrivate.h>
43 #include <IOKit/IOCPU.h>
44 #include "IOKitKernelInternal.h"
45
46 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
47 #include <kern/queue.h>
48
49 extern "C" void console_suspend();
50 extern "C" void console_resume();
51
52 typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority,
53 void * param1, void * param2, void * param3,
54 const char * name);
55
56 struct iocpu_platform_action_entry
57 {
58 queue_chain_t link;
59 iocpu_platform_action_t action;
60 int32_t priority;
61 const char * name;
62 void * refcon0;
63 void * refcon1;
64 struct iocpu_platform_action_entry * alloc_list;
65 };
66 typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t;
67
68 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
69
70 #define kBootCPUNumber 0
71
72 enum
73 {
74 kQueueSleep = 0,
75 kQueueWake = 1,
76 kQueueQuiesce = 2,
77 kQueueActive = 3,
78 kQueueHaltRestart = 4,
79 kQueuePanic = 5,
80 kQueueCount = 6
81 };
82
83 const OSSymbol * gIOPlatformSleepActionKey;
84 const OSSymbol * gIOPlatformWakeActionKey;
85 const OSSymbol * gIOPlatformQuiesceActionKey;
86 const OSSymbol * gIOPlatformActiveActionKey;
87 const OSSymbol * gIOPlatformHaltRestartActionKey;
88 const OSSymbol * gIOPlatformPanicActionKey;
89
90 static queue_head_t gActionQueues[kQueueCount];
91 static const OSSymbol * gActionSymbols[kQueueCount];
92
93 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
94
95 static void
96 iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry)
97 {
98 iocpu_platform_action_entry_t * next;
99
100 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
101 {
102 if (next->priority > entry->priority)
103 {
104 queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link);
105 return;
106 }
107 }
108 queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail
109 }
110
111 static void
112 iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry)
113 {
114 remque(&entry->link);
115 }
116
117 static kern_return_t
118 iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority,
119 void * param1, void * param2, void * param3)
120 {
121 kern_return_t ret = KERN_SUCCESS;
122 kern_return_t result = KERN_SUCCESS;
123 iocpu_platform_action_entry_t * next;
124
125 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
126 {
127 uint32_t pri = (next->priority < 0) ? -next->priority : next->priority;
128 if ((pri >= first_priority) && (pri <= last_priority))
129 {
130 //kprintf("[%p]", next->action);
131 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
132 }
133 if (KERN_SUCCESS == result)
134 result = ret;
135 }
136 return (result);
137 }
138
139 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
140
141 extern "C" kern_return_t
142 IOCPURunPlatformQuiesceActions(void)
143 {
144 return (iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U-1,
145 NULL, NULL, NULL));
146 }
147
148 extern "C" kern_return_t
149 IOCPURunPlatformActiveActions(void)
150 {
151 return (iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U-1,
152 NULL, NULL, NULL));
153 }
154
155 extern "C" kern_return_t
156 IOCPURunPlatformHaltRestartActions(uint32_t message)
157 {
158 if (!gActionQueues[kQueueHaltRestart].next) return (kIOReturnNotReady);
159 return (iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U-1,
160 (void *)(uintptr_t) message, NULL, NULL));
161 }
162
163 extern "C" kern_return_t
164 IOCPURunPlatformPanicActions(uint32_t message)
165 {
166 if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady);
167 return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1,
168 (void *)(uintptr_t) message, NULL, NULL));
169 }
170
171 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
172
173 static kern_return_t
174 IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority,
175 void * param1, void * param2, void * param3,
176 const char * service_name)
177 {
178 IOReturn ret;
179 IOService * service = (IOService *) refcon0;
180 const OSSymbol * function = (const OSSymbol *) refcon1;
181
182 kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name);
183
184 ret = service->callPlatformFunction(function, false,
185 (void *)(uintptr_t) priority, param1, param2, param3);
186
187 return (ret);
188 }
189
190 static void
191 IOInstallServicePlatformAction(IOService * service, uint32_t qidx)
192 {
193 iocpu_platform_action_entry_t * entry;
194 OSNumber * num;
195 uint32_t priority;
196 const OSSymbol * key = gActionSymbols[qidx];
197 queue_head_t * queue = &gActionQueues[qidx];
198 bool reverse;
199 bool uniq;
200
201 num = OSDynamicCast(OSNumber, service->getProperty(key));
202 if (!num) return;
203
204 reverse = false;
205 uniq = false;
206 switch (qidx)
207 {
208 case kQueueWake:
209 case kQueueActive:
210 reverse = true;
211 break;
212 case kQueueHaltRestart:
213 case kQueuePanic:
214 uniq = true;
215 break;
216 }
217 if (uniq)
218 {
219 queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link)
220 {
221 if (service == entry->refcon0) return;
222 }
223 }
224
225 entry = IONew(iocpu_platform_action_entry_t, 1);
226 entry->action = &IOServicePlatformAction;
227 entry->name = service->getName();
228 priority = num->unsigned32BitValue();
229 if (reverse)
230 entry->priority = -priority;
231 else
232 entry->priority = priority;
233 entry->refcon0 = service;
234 entry->refcon1 = (void *) key;
235
236 iocpu_add_platform_action(queue, entry);
237 }
238
239 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
240
241 void
242 IOCPUInitialize(void)
243 {
244 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
245 {
246 queue_init(&gActionQueues[qidx]);
247 }
248
249 gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep]
250 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey);
251 gIOPlatformWakeActionKey = gActionSymbols[kQueueWake]
252 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey);
253 gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce]
254 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey);
255 gIOPlatformActiveActionKey = gActionSymbols[kQueueActive]
256 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey);
257 gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart]
258 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey);
259 gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic]
260 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey);
261 }
262
263 IOReturn
264 IOInstallServicePlatformActions(IOService * service)
265 {
266 IOInstallServicePlatformAction(service, kQueueHaltRestart);
267 IOInstallServicePlatformAction(service, kQueuePanic);
268
269 return (kIOReturnSuccess);
270 }
271
272 IOReturn
273 IORemoveServicePlatformActions(IOService * service)
274 {
275 iocpu_platform_action_entry_t * entry;
276 iocpu_platform_action_entry_t * next;
277
278 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
279 {
280 next = (typeof(entry)) queue_first(&gActionQueues[qidx]);
281 while (!queue_end(&gActionQueues[qidx], &next->link))
282 {
283 entry = next;
284 next = (typeof(entry)) queue_next(&entry->link);
285 if (service == entry->refcon0)
286 {
287 iocpu_remove_platform_action(entry);
288 IODelete(entry, iocpu_platform_action_entry_t, 1);
289 }
290 }
291 }
292
293 return (kIOReturnSuccess);
294 }
295
296
297 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
298
299 kern_return_t PE_cpu_start(cpu_id_t target,
300 vm_offset_t start_paddr, vm_offset_t arg_paddr)
301 {
302 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
303
304 if (targetCPU == 0) return KERN_FAILURE;
305 return targetCPU->startCPU(start_paddr, arg_paddr);
306 }
307
308 void PE_cpu_halt(cpu_id_t target)
309 {
310 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
311
312 if (targetCPU) targetCPU->haltCPU();
313 }
314
315 void PE_cpu_signal(cpu_id_t source, cpu_id_t target)
316 {
317 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
318 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
319
320 if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU);
321 }
322
323 void PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
324 {
325 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
326 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
327
328 if (sourceCPU && targetCPU) sourceCPU->signalCPUDeferred(targetCPU);
329 }
330
331 void PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
332 {
333 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
334 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
335
336 if (sourceCPU && targetCPU) sourceCPU->signalCPUCancel(targetCPU);
337 }
338
339 void PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
340 {
341 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
342
343 if (targetCPU) targetCPU->initCPU(bootb);
344 }
345
346 void PE_cpu_machine_quiesce(cpu_id_t target)
347 {
348 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
349
350 if (targetCPU) targetCPU->quiesceCPU();
351 }
352
353
354 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
355
356 #define super IOService
357
358 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
359 OSMetaClassDefineReservedUnused(IOCPU, 0);
360 OSMetaClassDefineReservedUnused(IOCPU, 1);
361 OSMetaClassDefineReservedUnused(IOCPU, 2);
362 OSMetaClassDefineReservedUnused(IOCPU, 3);
363 OSMetaClassDefineReservedUnused(IOCPU, 4);
364 OSMetaClassDefineReservedUnused(IOCPU, 5);
365 OSMetaClassDefineReservedUnused(IOCPU, 6);
366 OSMetaClassDefineReservedUnused(IOCPU, 7);
367
368 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
369
370 static OSArray *gIOCPUs;
371 static const OSSymbol *gIOCPUStateKey;
372 static OSString *gIOCPUStateNames[kIOCPUStateCount];
373
374 void IOCPUSleepKernel(void)
375 {
376 long cnt, numCPUs;
377 IOCPU *target;
378 IOCPU *bootCPU = NULL;
379 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
380
381 kprintf("IOCPUSleepKernel\n");
382
383 IORegistryIterator * iter;
384 OSOrderedSet * all;
385 IOService * service;
386
387 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
388
389 iter = IORegistryIterator::iterateOver( gIOServicePlane,
390 kIORegistryIterateRecursively );
391 if( iter)
392 {
393 all = 0;
394 do
395 {
396 if (all)
397 all->release();
398 all = iter->iterateAll();
399 }
400 while (!iter->isValid());
401 iter->release();
402
403 if (all)
404 {
405 while((service = (IOService *) all->getFirstObject()))
406 {
407 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
408 {
409 IOInstallServicePlatformAction(service, qidx);
410 }
411 all->removeObject(service);
412 }
413 all->release();
414 }
415 }
416
417 iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U-1,
418 NULL, NULL, NULL);
419
420 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
421
422 numCPUs = gIOCPUs->getCount();
423 // Sleep the CPUs.
424 cnt = numCPUs;
425 while (cnt--)
426 {
427 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
428
429 // We make certain that the bootCPU is the last to sleep
430 // We'll skip it for now, and halt it after finishing the
431 // non-boot CPU's.
432 if (target->getCPUNumber() == kBootCPUNumber)
433 {
434 bootCPU = target;
435 } else if (target->getCPUState() == kIOCPUStateRunning)
436 {
437 target->haltCPU();
438 }
439 }
440
441 assert(bootCPU != NULL);
442 assert(cpu_number() == 0);
443
444 console_suspend();
445
446 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
447
448 // Now sleep the boot CPU.
449 bootCPU->haltCPU();
450
451 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
452
453 console_resume();
454
455 iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U-1,
456 NULL, NULL, NULL);
457
458 iocpu_platform_action_entry_t * entry;
459 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
460 {
461 while (!(queue_empty(&gActionQueues[qidx])))
462 {
463 entry = (typeof(entry)) queue_first(&gActionQueues[qidx]);
464 iocpu_remove_platform_action(entry);
465 IODelete(entry, iocpu_platform_action_entry_t, 1);
466 }
467 }
468
469 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
470
471 // Wake the other CPUs.
472 for (cnt = 0; cnt < numCPUs; cnt++)
473 {
474 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
475
476 // Skip the already-woken boot CPU.
477 if ((target->getCPUNumber() != kBootCPUNumber)
478 && (target->getCPUState() == kIOCPUStateStopped))
479 {
480 processor_start(target->getMachProcessor());
481 }
482 }
483 }
484
485 void IOCPU::initCPUs(void)
486 {
487 if (gIOCPUs == 0) {
488 gIOCPUs = OSArray::withCapacity(1);
489
490 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
491
492 gIOCPUStateNames[kIOCPUStateUnregistered] =
493 OSString::withCStringNoCopy("Unregistered");
494 gIOCPUStateNames[kIOCPUStateUninitalized] =
495 OSString::withCStringNoCopy("Uninitalized");
496 gIOCPUStateNames[kIOCPUStateStopped] =
497 OSString::withCStringNoCopy("Stopped");
498 gIOCPUStateNames[kIOCPUStateRunning] =
499 OSString::withCStringNoCopy("Running");
500 }
501 }
502
503 bool IOCPU::start(IOService *provider)
504 {
505 OSData *busFrequency, *cpuFrequency, *timebaseFrequency;
506
507 if (!super::start(provider)) return false;
508
509 initCPUs();
510
511 _cpuGroup = gIOCPUs;
512 cpuNub = provider;
513
514 gIOCPUs->setObject(this);
515
516 // Correct the bus, cpu and timebase frequencies in the device tree.
517 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
518 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
519 } else {
520 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
521 }
522 provider->setProperty("bus-frequency", busFrequency);
523 busFrequency->release();
524
525 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
526 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
527 } else {
528 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
529 }
530 provider->setProperty("clock-frequency", cpuFrequency);
531 cpuFrequency->release();
532
533 timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
534 provider->setProperty("timebase-frequency", timebaseFrequency);
535 timebaseFrequency->release();
536
537 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t)*8);
538
539 setCPUNumber(0);
540 setCPUState(kIOCPUStateUnregistered);
541
542 return true;
543 }
544
545 OSObject *IOCPU::getProperty(const OSSymbol *aKey) const
546 {
547 if (aKey == gIOCPUStateKey) return gIOCPUStateNames[_cpuState];
548
549 return super::getProperty(aKey);
550 }
551
552 bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
553 {
554 if (aKey == gIOCPUStateKey) {
555 return false;
556 }
557
558 return super::setProperty(aKey, anObject);
559 }
560
561 bool IOCPU::serializeProperties(OSSerialize *serialize) const
562 {
563 bool result;
564 OSDictionary *dict = dictionaryWithProperties();
565 if (!dict) return false;
566 dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]);
567 result = dict->serialize(serialize);
568 dict->release();
569 return result;
570 }
571
572 IOReturn IOCPU::setProperties(OSObject *properties)
573 {
574 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
575 OSString *stateStr;
576 IOReturn result;
577
578 if (dict == 0) return kIOReturnUnsupported;
579
580 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey));
581 if (stateStr != 0) {
582 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
583 if (result != kIOReturnSuccess) return result;
584
585 if (setProperty(gIOCPUStateKey, stateStr)) return kIOReturnSuccess;
586
587 return kIOReturnUnsupported;
588 }
589
590 return kIOReturnUnsupported;
591 }
592
593 void IOCPU::signalCPU(IOCPU */*target*/)
594 {
595 }
596
597 void IOCPU::signalCPUDeferred(IOCPU *target)
598 {
599 // Our CPU may not support deferred IPIs,
600 // so send a regular IPI by default
601 signalCPU(target);
602 }
603
604 void IOCPU::signalCPUCancel(IOCPU */*target*/)
605 {
606 // Meant to cancel signals sent by
607 // signalCPUDeferred; unsupported
608 // by default
609 }
610
611 void IOCPU::enableCPUTimeBase(bool /*enable*/)
612 {
613 }
614
615 UInt32 IOCPU::getCPUNumber(void)
616 {
617 return _cpuNumber;
618 }
619
620 void IOCPU::setCPUNumber(UInt32 cpuNumber)
621 {
622 _cpuNumber = cpuNumber;
623 super::setProperty("IOCPUNumber", _cpuNumber, 32);
624 }
625
626 UInt32 IOCPU::getCPUState(void)
627 {
628 return _cpuState;
629 }
630
631 void IOCPU::setCPUState(UInt32 cpuState)
632 {
633 if (cpuState < kIOCPUStateCount) {
634 _cpuState = cpuState;
635 }
636 }
637
638 OSArray *IOCPU::getCPUGroup(void)
639 {
640 return _cpuGroup;
641 }
642
643 UInt32 IOCPU::getCPUGroupSize(void)
644 {
645 return _cpuGroup->getCount();
646 }
647
648 processor_t IOCPU::getMachProcessor(void)
649 {
650 return machProcessor;
651 }
652
653
654 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
655
656 #undef super
657 #define super IOInterruptController
658
659 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
660
661 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 0);
662 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
663 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
664 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
665 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
666 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
667
668
669
670 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
671
672
673 IOReturn IOCPUInterruptController::initCPUInterruptController(int sources)
674 {
675 int cnt;
676
677 if (!super::init()) return kIOReturnInvalid;
678
679 numCPUs = sources;
680
681 cpus = (IOCPU **)IOMalloc(numCPUs * sizeof(IOCPU *));
682 if (cpus == 0) return kIOReturnNoMemory;
683 bzero(cpus, numCPUs * sizeof(IOCPU *));
684
685 vectors = (IOInterruptVector *)IOMalloc(numCPUs * sizeof(IOInterruptVector));
686 if (vectors == 0) return kIOReturnNoMemory;
687 bzero(vectors, numCPUs * sizeof(IOInterruptVector));
688
689 // Allocate locks for the
690 for (cnt = 0; cnt < numCPUs; cnt++) {
691 vectors[cnt].interruptLock = IOLockAlloc();
692 if (vectors[cnt].interruptLock == NULL) {
693 for (cnt = 0; cnt < numCPUs; cnt++) {
694 if (vectors[cnt].interruptLock != NULL)
695 IOLockFree(vectors[cnt].interruptLock);
696 }
697 return kIOReturnNoResources;
698 }
699 }
700
701 ml_init_max_cpus(numCPUs);
702
703 return kIOReturnSuccess;
704 }
705
706 void IOCPUInterruptController::registerCPUInterruptController(void)
707 {
708 registerService();
709
710 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
711 this);
712 }
713
714 void IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
715 {
716 int cnt;
717 OSArray *controller;
718 OSArray *specifier;
719 OSData *tmpData;
720 long tmpLong;
721
722 if ((service->getProperty(gIOInterruptControllersKey) != 0) &&
723 (service->getProperty(gIOInterruptSpecifiersKey) != 0))
724 return;
725
726 // Create the interrupt specifer array.
727 specifier = OSArray::withCapacity(numCPUs);
728 for (cnt = 0; cnt < numCPUs; cnt++) {
729 tmpLong = cnt;
730 tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong));
731 specifier->setObject(tmpData);
732 tmpData->release();
733 };
734
735 // Create the interrupt controller array.
736 controller = OSArray::withCapacity(numCPUs);
737 for (cnt = 0; cnt < numCPUs; cnt++) {
738 controller->setObject(gPlatformInterruptControllerName);
739 }
740
741 // Put the two arrays into the property table.
742 service->setProperty(gIOInterruptControllersKey, controller);
743 service->setProperty(gIOInterruptSpecifiersKey, specifier);
744 controller->release();
745 specifier->release();
746 }
747
748 void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
749 {
750 IOInterruptHandler handler = OSMemberFunctionCast(
751 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
752
753 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, 0);
754
755 // Ensure that the increment is seen by all processors
756 OSIncrementAtomic(&enabledCPUs);
757
758 if (enabledCPUs == numCPUs) {
759 IOService::cpusRunning();
760 thread_wakeup(this);
761 }
762 }
763
764 IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub,
765 int source,
766 void *target,
767 IOInterruptHandler handler,
768 void *refCon)
769 {
770 IOInterruptVector *vector;
771
772 if (source >= numCPUs) return kIOReturnNoResources;
773
774 vector = &vectors[source];
775
776 // Get the lock for this vector.
777 IOTakeLock(vector->interruptLock);
778
779 // Make sure the vector is not in use.
780 if (vector->interruptRegistered) {
781 IOUnlock(vector->interruptLock);
782 return kIOReturnNoResources;
783 }
784
785 // Fill in vector with the client's info.
786 vector->handler = handler;
787 vector->nub = nub;
788 vector->source = source;
789 vector->target = target;
790 vector->refCon = refCon;
791
792 // Get the vector ready. It starts hard disabled.
793 vector->interruptDisabledHard = 1;
794 vector->interruptDisabledSoft = 1;
795 vector->interruptRegistered = 1;
796
797 IOUnlock(vector->interruptLock);
798
799 if (enabledCPUs != numCPUs) {
800 assert_wait(this, THREAD_UNINT);
801 thread_block(THREAD_CONTINUE_NULL);
802 }
803
804 return kIOReturnSuccess;
805 }
806
807 IOReturn IOCPUInterruptController::getInterruptType(IOService */*nub*/,
808 int /*source*/,
809 int *interruptType)
810 {
811 if (interruptType == 0) return kIOReturnBadArgument;
812
813 *interruptType = kIOInterruptTypeLevel;
814
815 return kIOReturnSuccess;
816 }
817
818 IOReturn IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
819 int /*source*/)
820 {
821 // ml_set_interrupts_enabled(true);
822 return kIOReturnSuccess;
823 }
824
825 IOReturn IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
826 int /*source*/)
827 {
828 // ml_set_interrupts_enabled(false);
829 return kIOReturnSuccess;
830 }
831
832 IOReturn IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
833 int /*source*/)
834 {
835 ml_cause_interrupt();
836 return kIOReturnSuccess;
837 }
838
839 IOReturn IOCPUInterruptController::handleInterrupt(void */*refCon*/,
840 IOService */*nub*/,
841 int source)
842 {
843 IOInterruptVector *vector;
844
845 vector = &vectors[source];
846
847 if (!vector->interruptRegistered) return kIOReturnInvalid;
848
849 vector->handler(vector->target, vector->refCon,
850 vector->nub, vector->source);
851
852 return kIOReturnSuccess;
853 }
854
855 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */