]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOCPU.cpp
xnu-4570.1.46.tar.gz
[apple/xnu.git] / iokit / Kernel / IOCPU.cpp
1 /*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 extern "C" {
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 #include <kern/cpu_number.h>
33 extern void kperf_kernel_configure(char *);
34 }
35
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOPlatformExpert.h>
38 #include <IOKit/pwr_mgt/RootDomain.h>
39 #include <IOKit/pwr_mgt/IOPMPrivate.h>
40 #include <IOKit/IOUserClient.h>
41 #include <IOKit/IOKitKeysPrivate.h>
42 #include <IOKit/IOCPU.h>
43 #include "IOKitKernelInternal.h"
44
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 #include <kern/queue.h>
47
48 extern "C" void console_suspend();
49 extern "C" void console_resume();
50
51 typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority,
52 void * param1, void * param2, void * param3,
53 const char * name);
54
55 struct iocpu_platform_action_entry
56 {
57 queue_chain_t link;
58 iocpu_platform_action_t action;
59 int32_t priority;
60 const char * name;
61 void * refcon0;
62 void * refcon1;
63 boolean_t callout_in_progress;
64 struct iocpu_platform_action_entry * alloc_list;
65 };
66 typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t;
67
68 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
69
70 static IOLock *gIOCPUsLock;
71 static OSArray *gIOCPUs;
72 static const OSSymbol *gIOCPUStateKey;
73 static OSString *gIOCPUStateNames[kIOCPUStateCount];
74
75 enum
76 {
77 kQueueSleep = 0,
78 kQueueWake = 1,
79 kQueueQuiesce = 2,
80 kQueueActive = 3,
81 kQueueHaltRestart = 4,
82 kQueuePanic = 5,
83 kQueueCount = 6
84 };
85
86 const OSSymbol * gIOPlatformSleepActionKey;
87 const OSSymbol * gIOPlatformWakeActionKey;
88 const OSSymbol * gIOPlatformQuiesceActionKey;
89 const OSSymbol * gIOPlatformActiveActionKey;
90 const OSSymbol * gIOPlatformHaltRestartActionKey;
91 const OSSymbol * gIOPlatformPanicActionKey;
92
93 static queue_head_t gActionQueues[kQueueCount];
94 static const OSSymbol * gActionSymbols[kQueueCount];
95
96 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97
98 static void
99 iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry)
100 {
101 iocpu_platform_action_entry_t * next;
102
103 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
104 {
105 if (next->priority > entry->priority)
106 {
107 queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link);
108 return;
109 }
110 }
111 queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail
112 }
113
114 static void
115 iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry)
116 {
117 remque(&entry->link);
118 }
119
120 static kern_return_t
121 iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority,
122 void * param1, void * param2, void * param3, boolean_t allow_nested_callouts)
123 {
124 kern_return_t ret = KERN_SUCCESS;
125 kern_return_t result = KERN_SUCCESS;
126 iocpu_platform_action_entry_t * next;
127
128 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
129 {
130 uint32_t pri = (next->priority < 0) ? -next->priority : next->priority;
131 if ((pri >= first_priority) && (pri <= last_priority))
132 {
133 //kprintf("[%p]", next->action);
134 if (!allow_nested_callouts && !next->callout_in_progress)
135 {
136 next->callout_in_progress = TRUE;
137 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
138 next->callout_in_progress = FALSE;
139 }
140 else if (allow_nested_callouts)
141 {
142 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
143 }
144 }
145 if (KERN_SUCCESS == result)
146 result = ret;
147 }
148 return (result);
149 }
150
151 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
152
153 extern "C" kern_return_t
154 IOCPURunPlatformQuiesceActions(void)
155 {
156 return (iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U-1,
157 NULL, NULL, NULL, TRUE));
158 }
159
160 extern "C" kern_return_t
161 IOCPURunPlatformActiveActions(void)
162 {
163 return (iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U-1,
164 NULL, NULL, NULL, TRUE));
165 }
166
167 extern "C" kern_return_t
168 IOCPURunPlatformHaltRestartActions(uint32_t message)
169 {
170 if (!gActionQueues[kQueueHaltRestart].next) return (kIOReturnNotReady);
171 return (iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U-1,
172 (void *)(uintptr_t) message, NULL, NULL, TRUE));
173 }
174
175 extern "C" kern_return_t
176 IOCPURunPlatformPanicActions(uint32_t message)
177 {
178 // Don't allow nested calls of panic actions
179 if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady);
180 return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1,
181 (void *)(uintptr_t) message, NULL, NULL, FALSE));
182 }
183
184
185 extern "C" kern_return_t
186 IOCPURunPlatformPanicSyncAction(void *addr, size_t len)
187 {
188 // Don't allow nested calls of panic actions
189 if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady);
190 return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1,
191 (void *)(uintptr_t)(kPEPanicSync), addr, (void *)(uintptr_t)len, FALSE));
192
193 }
194
195 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
196
197 static kern_return_t
198 IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority,
199 void * param1, void * param2, void * param3,
200 const char * service_name)
201 {
202 IOReturn ret;
203 IOService * service = (IOService *) refcon0;
204 const OSSymbol * function = (const OSSymbol *) refcon1;
205
206 kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name);
207
208 ret = service->callPlatformFunction(function, false,
209 (void *)(uintptr_t) priority, param1, param2, param3);
210
211 return (ret);
212 }
213
214 static void
215 IOInstallServicePlatformAction(IOService * service, uint32_t qidx)
216 {
217 iocpu_platform_action_entry_t * entry;
218 OSNumber * num;
219 uint32_t priority;
220 const OSSymbol * key = gActionSymbols[qidx];
221 queue_head_t * queue = &gActionQueues[qidx];
222 bool reverse;
223 bool uniq;
224
225 num = OSDynamicCast(OSNumber, service->getProperty(key));
226 if (!num) return;
227
228 reverse = false;
229 uniq = false;
230 switch (qidx)
231 {
232 case kQueueWake:
233 case kQueueActive:
234 reverse = true;
235 break;
236 case kQueueHaltRestart:
237 case kQueuePanic:
238 uniq = true;
239 break;
240 }
241 if (uniq)
242 {
243 queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link)
244 {
245 if (service == entry->refcon0) return;
246 }
247 }
248
249 entry = IONew(iocpu_platform_action_entry_t, 1);
250 entry->action = &IOServicePlatformAction;
251 entry->name = service->getName();
252 priority = num->unsigned32BitValue();
253 if (reverse)
254 entry->priority = -priority;
255 else
256 entry->priority = priority;
257 entry->refcon0 = service;
258 entry->refcon1 = (void *) key;
259 entry->callout_in_progress = FALSE;
260
261 iocpu_add_platform_action(queue, entry);
262 }
263
264 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
265
266 void
267 IOCPUInitialize(void)
268 {
269 gIOCPUsLock = IOLockAlloc();
270 gIOCPUs = OSArray::withCapacity(1);
271
272 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
273 {
274 queue_init(&gActionQueues[qidx]);
275 }
276
277 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
278
279 gIOCPUStateNames[kIOCPUStateUnregistered] =
280 OSString::withCStringNoCopy("Unregistered");
281 gIOCPUStateNames[kIOCPUStateUninitalized] =
282 OSString::withCStringNoCopy("Uninitalized");
283 gIOCPUStateNames[kIOCPUStateStopped] =
284 OSString::withCStringNoCopy("Stopped");
285 gIOCPUStateNames[kIOCPUStateRunning] =
286 OSString::withCStringNoCopy("Running");
287
288 gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep]
289 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey);
290 gIOPlatformWakeActionKey = gActionSymbols[kQueueWake]
291 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey);
292 gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce]
293 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey);
294 gIOPlatformActiveActionKey = gActionSymbols[kQueueActive]
295 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey);
296 gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart]
297 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey);
298 gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic]
299 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey);
300 }
301
302 IOReturn
303 IOInstallServicePlatformActions(IOService * service)
304 {
305 IOLockLock(gIOCPUsLock);
306
307 IOInstallServicePlatformAction(service, kQueueHaltRestart);
308 IOInstallServicePlatformAction(service, kQueuePanic);
309
310 IOLockUnlock(gIOCPUsLock);
311
312 return (kIOReturnSuccess);
313 }
314
315 IOReturn
316 IORemoveServicePlatformActions(IOService * service)
317 {
318 iocpu_platform_action_entry_t * entry;
319 iocpu_platform_action_entry_t * next;
320
321 IOLockLock(gIOCPUsLock);
322
323 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
324 {
325 next = (typeof(entry)) queue_first(&gActionQueues[qidx]);
326 while (!queue_end(&gActionQueues[qidx], &next->link))
327 {
328 entry = next;
329 next = (typeof(entry)) queue_next(&entry->link);
330 if (service == entry->refcon0)
331 {
332 iocpu_remove_platform_action(entry);
333 IODelete(entry, iocpu_platform_action_entry_t, 1);
334 }
335 }
336 }
337
338 IOLockUnlock(gIOCPUsLock);
339
340 return (kIOReturnSuccess);
341 }
342
343
344 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
345
346 kern_return_t PE_cpu_start(cpu_id_t target,
347 vm_offset_t start_paddr, vm_offset_t arg_paddr)
348 {
349 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
350
351 if (targetCPU == 0) return KERN_FAILURE;
352 return targetCPU->startCPU(start_paddr, arg_paddr);
353 }
354
355 void PE_cpu_halt(cpu_id_t target)
356 {
357 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
358
359 if (targetCPU) targetCPU->haltCPU();
360 }
361
362 void PE_cpu_signal(cpu_id_t source, cpu_id_t target)
363 {
364 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
365 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
366
367 if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU);
368 }
369
370 void PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
371 {
372 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
373 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
374
375 if (sourceCPU && targetCPU) sourceCPU->signalCPUDeferred(targetCPU);
376 }
377
378 void PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
379 {
380 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
381 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
382
383 if (sourceCPU && targetCPU) sourceCPU->signalCPUCancel(targetCPU);
384 }
385
386 void PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
387 {
388 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
389
390 if (targetCPU) {
391 targetCPU->initCPU(bootb);
392 #if defined(__arm__) || defined(__arm64__)
393 if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) ml_set_is_quiescing(false);
394 #endif /* defined(__arm__) || defined(__arm64__) */
395 }
396 }
397
398 void PE_cpu_machine_quiesce(cpu_id_t target)
399 {
400 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
401 if (targetCPU) {
402 #if defined(__arm__) || defined(__arm64__)
403 if (targetCPU->getCPUNumber() == (UInt32)master_cpu) ml_set_is_quiescing(true);
404 #endif /* defined(__arm__) || defined(__arm64__) */
405 targetCPU->quiesceCPU();
406 }
407 }
408
409 #if defined(__arm__) || defined(__arm64__)
410 static perfmon_interrupt_handler_func pmi_handler = 0;
411
412 kern_return_t PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
413 {
414 pmi_handler = handler;
415
416 return KERN_SUCCESS;
417 }
418
419 void PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
420 {
421 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
422
423 if (targetCPU) {
424 if (enable) {
425 targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0);
426 targetCPU->getProvider()->enableInterrupt(1);
427 } else {
428 targetCPU->getProvider()->disableInterrupt(1);
429 }
430 }
431 }
432 #endif
433
434 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
435
436 #define super IOService
437
438 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
439 OSMetaClassDefineReservedUnused(IOCPU, 0);
440 OSMetaClassDefineReservedUnused(IOCPU, 1);
441 OSMetaClassDefineReservedUnused(IOCPU, 2);
442 OSMetaClassDefineReservedUnused(IOCPU, 3);
443 OSMetaClassDefineReservedUnused(IOCPU, 4);
444 OSMetaClassDefineReservedUnused(IOCPU, 5);
445 OSMetaClassDefineReservedUnused(IOCPU, 6);
446 OSMetaClassDefineReservedUnused(IOCPU, 7);
447
448 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
449
450 void IOCPUSleepKernel(void)
451 {
452 long cnt, numCPUs;
453 IOCPU *target;
454 IOCPU *bootCPU = NULL;
455 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
456
457 kprintf("IOCPUSleepKernel\n");
458
459 IORegistryIterator * iter;
460 OSOrderedSet * all;
461 IOService * service;
462
463 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
464
465 iter = IORegistryIterator::iterateOver( gIOServicePlane,
466 kIORegistryIterateRecursively );
467 if( iter)
468 {
469 all = 0;
470 do
471 {
472 if (all)
473 all->release();
474 all = iter->iterateAll();
475 }
476 while (!iter->isValid());
477 iter->release();
478
479 if (all)
480 {
481 while((service = (IOService *) all->getFirstObject()))
482 {
483 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
484 {
485 IOInstallServicePlatformAction(service, qidx);
486 }
487 all->removeObject(service);
488 }
489 all->release();
490 }
491 }
492
493 iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U-1,
494 NULL, NULL, NULL, TRUE);
495
496 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
497
498 numCPUs = gIOCPUs->getCount();
499 // Sleep the CPUs.
500 cnt = numCPUs;
501 while (cnt--)
502 {
503 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
504
505 // We make certain that the bootCPU is the last to sleep
506 // We'll skip it for now, and halt it after finishing the
507 // non-boot CPU's.
508 if (target->getCPUNumber() == (UInt32)master_cpu)
509 {
510 bootCPU = target;
511 } else if (target->getCPUState() == kIOCPUStateRunning)
512 {
513 target->haltCPU();
514 }
515 }
516
517 assert(bootCPU != NULL);
518 assert(cpu_number() == master_cpu);
519
520 console_suspend();
521
522 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
523
524 // Now sleep the boot CPU.
525 bootCPU->haltCPU();
526
527 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
528
529 console_resume();
530
531 iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U-1,
532 NULL, NULL, NULL, TRUE);
533
534 iocpu_platform_action_entry_t * entry;
535 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
536 {
537 while (!(queue_empty(&gActionQueues[qidx])))
538 {
539 entry = (typeof(entry)) queue_first(&gActionQueues[qidx]);
540 iocpu_remove_platform_action(entry);
541 IODelete(entry, iocpu_platform_action_entry_t, 1);
542 }
543 }
544
545 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
546
547 // Wake the other CPUs.
548 for (cnt = 0; cnt < numCPUs; cnt++)
549 {
550 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
551
552 // Skip the already-woken boot CPU.
553 if (target->getCPUNumber() != (UInt32)master_cpu) {
554 if (target->getCPUState() == kIOCPUStateRunning)
555 panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
556
557 if (target->getCPUState() == kIOCPUStateStopped)
558 processor_start(target->getMachProcessor());
559 }
560 }
561 }
562
563 bool IOCPU::start(IOService *provider)
564 {
565 OSData *busFrequency, *cpuFrequency, *timebaseFrequency;
566
567 if (!super::start(provider)) return false;
568
569 _cpuGroup = gIOCPUs;
570 cpuNub = provider;
571
572 IOLockLock(gIOCPUsLock);
573 gIOCPUs->setObject(this);
574 IOLockUnlock(gIOCPUsLock);
575
576 // Correct the bus, cpu and timebase frequencies in the device tree.
577 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
578 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
579 } else {
580 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
581 }
582 provider->setProperty("bus-frequency", busFrequency);
583 busFrequency->release();
584
585 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
586 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
587 } else {
588 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
589 }
590 provider->setProperty("clock-frequency", cpuFrequency);
591 cpuFrequency->release();
592
593 timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
594 provider->setProperty("timebase-frequency", timebaseFrequency);
595 timebaseFrequency->release();
596
597 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t)*8);
598
599 setCPUNumber(0);
600 setCPUState(kIOCPUStateUnregistered);
601
602 return true;
603 }
604
605 OSObject *IOCPU::getProperty(const OSSymbol *aKey) const
606 {
607 if (aKey == gIOCPUStateKey) return gIOCPUStateNames[_cpuState];
608
609 return super::getProperty(aKey);
610 }
611
612 bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
613 {
614 if (aKey == gIOCPUStateKey) {
615 return false;
616 }
617
618 return super::setProperty(aKey, anObject);
619 }
620
621 bool IOCPU::serializeProperties(OSSerialize *serialize) const
622 {
623 bool result;
624 OSDictionary *dict = dictionaryWithProperties();
625 if (!dict) return false;
626 dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]);
627 result = dict->serialize(serialize);
628 dict->release();
629 return result;
630 }
631
632 IOReturn IOCPU::setProperties(OSObject *properties)
633 {
634 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
635 OSString *stateStr;
636 IOReturn result;
637
638 if (dict == 0) return kIOReturnUnsupported;
639
640 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey));
641 if (stateStr != 0) {
642 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
643 if (result != kIOReturnSuccess) return result;
644
645 if (setProperty(gIOCPUStateKey, stateStr)) return kIOReturnSuccess;
646
647 return kIOReturnUnsupported;
648 }
649
650 return kIOReturnUnsupported;
651 }
652
653 void IOCPU::signalCPU(IOCPU */*target*/)
654 {
655 }
656
657 void IOCPU::signalCPUDeferred(IOCPU *target)
658 {
659 // Our CPU may not support deferred IPIs,
660 // so send a regular IPI by default
661 signalCPU(target);
662 }
663
664 void IOCPU::signalCPUCancel(IOCPU */*target*/)
665 {
666 // Meant to cancel signals sent by
667 // signalCPUDeferred; unsupported
668 // by default
669 }
670
671 void IOCPU::enableCPUTimeBase(bool /*enable*/)
672 {
673 }
674
675 UInt32 IOCPU::getCPUNumber(void)
676 {
677 return _cpuNumber;
678 }
679
680 void IOCPU::setCPUNumber(UInt32 cpuNumber)
681 {
682 _cpuNumber = cpuNumber;
683 super::setProperty("IOCPUNumber", _cpuNumber, 32);
684 }
685
686 UInt32 IOCPU::getCPUState(void)
687 {
688 return _cpuState;
689 }
690
691 void IOCPU::setCPUState(UInt32 cpuState)
692 {
693 if (cpuState < kIOCPUStateCount) {
694 _cpuState = cpuState;
695 }
696 }
697
698 OSArray *IOCPU::getCPUGroup(void)
699 {
700 return _cpuGroup;
701 }
702
703 UInt32 IOCPU::getCPUGroupSize(void)
704 {
705 return _cpuGroup->getCount();
706 }
707
708 processor_t IOCPU::getMachProcessor(void)
709 {
710 return machProcessor;
711 }
712
713
714 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
715
716 #undef super
717 #define super IOInterruptController
718
719 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
720
721 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
722 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
723 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
724 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
725 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
726
727
728
729 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
730
731 IOReturn IOCPUInterruptController::initCPUInterruptController(int sources)
732 {
733 return initCPUInterruptController(sources, sources);
734 }
735
736 IOReturn IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
737 {
738 int cnt;
739
740 if (!super::init()) return kIOReturnInvalid;
741
742 numSources = sources;
743 numCPUs = cpus;
744
745 vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector));
746 if (vectors == 0) return kIOReturnNoMemory;
747 bzero(vectors, numSources * sizeof(IOInterruptVector));
748
749 // Allocate a lock for each vector
750 for (cnt = 0; cnt < numSources; cnt++) {
751 vectors[cnt].interruptLock = IOLockAlloc();
752 if (vectors[cnt].interruptLock == NULL) {
753 for (cnt = 0; cnt < numSources; cnt++) {
754 if (vectors[cnt].interruptLock != NULL)
755 IOLockFree(vectors[cnt].interruptLock);
756 }
757 return kIOReturnNoResources;
758 }
759 }
760
761 ml_init_max_cpus(numSources);
762
763 #if KPERF
764 /*
765 * kperf allocates based on the number of CPUs and requires them to all be
766 * accounted for.
767 */
768 boolean_t found_kperf = FALSE;
769 char kperf_config_str[64];
770 found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str));
771 if (found_kperf && kperf_config_str[0] != '\0') {
772 kperf_kernel_configure(kperf_config_str);
773 }
774 #endif
775
776 return kIOReturnSuccess;
777 }
778
779 void IOCPUInterruptController::registerCPUInterruptController(void)
780 {
781 registerService();
782
783 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
784 this);
785 }
786
787 void IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
788 {
789 int cnt;
790 OSArray *controller;
791 OSArray *specifier;
792 OSData *tmpData;
793 long tmpLong;
794
795 if ((service->getProperty(gIOInterruptControllersKey) != 0) &&
796 (service->getProperty(gIOInterruptSpecifiersKey) != 0))
797 return;
798
799 // Create the interrupt specifer array.
800 specifier = OSArray::withCapacity(numSources);
801 for (cnt = 0; cnt < numSources; cnt++) {
802 tmpLong = cnt;
803 tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong));
804 specifier->setObject(tmpData);
805 tmpData->release();
806 };
807
808 // Create the interrupt controller array.
809 controller = OSArray::withCapacity(numSources);
810 for (cnt = 0; cnt < numSources; cnt++) {
811 controller->setObject(gPlatformInterruptControllerName);
812 }
813
814 // Put the two arrays into the property table.
815 service->setProperty(gIOInterruptControllersKey, controller);
816 service->setProperty(gIOInterruptSpecifiersKey, specifier);
817 controller->release();
818 specifier->release();
819 }
820
821 void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
822 {
823 IOInterruptHandler handler = OSMemberFunctionCast(
824 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
825
826 assert(numCPUs > 0);
827
828 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, 0);
829
830 IOTakeLock(vectors[0].interruptLock);
831 ++enabledCPUs;
832
833 if (enabledCPUs == numCPUs) {
834 IOService::cpusRunning();
835 thread_wakeup(this);
836 }
837 IOUnlock(vectors[0].interruptLock);
838 }
839
840 IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub,
841 int source,
842 void *target,
843 IOInterruptHandler handler,
844 void *refCon)
845 {
846 IOInterruptVector *vector;
847
848 if (source >= numSources) return kIOReturnNoResources;
849
850 vector = &vectors[source];
851
852 // Get the lock for this vector.
853 IOTakeLock(vector->interruptLock);
854
855 // Make sure the vector is not in use.
856 if (vector->interruptRegistered) {
857 IOUnlock(vector->interruptLock);
858 return kIOReturnNoResources;
859 }
860
861 // Fill in vector with the client's info.
862 vector->handler = handler;
863 vector->nub = nub;
864 vector->source = source;
865 vector->target = target;
866 vector->refCon = refCon;
867
868 // Get the vector ready. It starts hard disabled.
869 vector->interruptDisabledHard = 1;
870 vector->interruptDisabledSoft = 1;
871 vector->interruptRegistered = 1;
872
873 IOUnlock(vector->interruptLock);
874
875 IOTakeLock(vectors[0].interruptLock);
876 if (enabledCPUs != numCPUs) {
877 assert_wait(this, THREAD_UNINT);
878 IOUnlock(vectors[0].interruptLock);
879 thread_block(THREAD_CONTINUE_NULL);
880 } else
881 IOUnlock(vectors[0].interruptLock);
882
883 return kIOReturnSuccess;
884 }
885
886 IOReturn IOCPUInterruptController::getInterruptType(IOService */*nub*/,
887 int /*source*/,
888 int *interruptType)
889 {
890 if (interruptType == 0) return kIOReturnBadArgument;
891
892 *interruptType = kIOInterruptTypeLevel;
893
894 return kIOReturnSuccess;
895 }
896
897 IOReturn IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
898 int /*source*/)
899 {
900 // ml_set_interrupts_enabled(true);
901 return kIOReturnSuccess;
902 }
903
904 IOReturn IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
905 int /*source*/)
906 {
907 // ml_set_interrupts_enabled(false);
908 return kIOReturnSuccess;
909 }
910
911 IOReturn IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
912 int /*source*/)
913 {
914 ml_cause_interrupt();
915 return kIOReturnSuccess;
916 }
917
918 IOReturn IOCPUInterruptController::handleInterrupt(void */*refCon*/,
919 IOService */*nub*/,
920 int source)
921 {
922 IOInterruptVector *vector;
923
924 vector = &vectors[source];
925
926 if (!vector->interruptRegistered) return kIOReturnInvalid;
927
928 vector->handler(vector->target, vector->refCon,
929 vector->nub, vector->source);
930
931 return kIOReturnSuccess;
932 }
933
934 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */