]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOCPU.cpp
xnu-4570.51.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOCPU.cpp
1 /*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 extern "C" {
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 #include <kern/cpu_number.h>
33 extern void kperf_kernel_configure(char *);
34 }
35
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOPlatformExpert.h>
38 #include <IOKit/pwr_mgt/RootDomain.h>
39 #include <IOKit/pwr_mgt/IOPMPrivate.h>
40 #include <IOKit/IOUserClient.h>
41 #include <IOKit/IOKitKeysPrivate.h>
42 #include <IOKit/IOCPU.h>
43 #include "IOKitKernelInternal.h"
44
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 #include <kern/queue.h>
47
48 extern "C" void console_suspend();
49 extern "C" void console_resume();
50
51 typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority,
52 void * param1, void * param2, void * param3,
53 const char * name);
54
55 struct iocpu_platform_action_entry
56 {
57 queue_chain_t link;
58 iocpu_platform_action_t action;
59 int32_t priority;
60 const char * name;
61 void * refcon0;
62 void * refcon1;
63 boolean_t callout_in_progress;
64 struct iocpu_platform_action_entry * alloc_list;
65 };
66 typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t;
67
68 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
69
70 static IOLock *gIOCPUsLock;
71 static OSArray *gIOCPUs;
72 static const OSSymbol *gIOCPUStateKey;
73 static OSString *gIOCPUStateNames[kIOCPUStateCount];
74
75 enum
76 {
77 kQueueSleep = 0,
78 kQueueWake = 1,
79 kQueueQuiesce = 2,
80 kQueueActive = 3,
81 kQueueHaltRestart = 4,
82 kQueuePanic = 5,
83 kQueueCount = 6
84 };
85
86 const OSSymbol * gIOPlatformSleepActionKey;
87 const OSSymbol * gIOPlatformWakeActionKey;
88 const OSSymbol * gIOPlatformQuiesceActionKey;
89 const OSSymbol * gIOPlatformActiveActionKey;
90 const OSSymbol * gIOPlatformHaltRestartActionKey;
91 const OSSymbol * gIOPlatformPanicActionKey;
92
93 static queue_head_t gActionQueues[kQueueCount];
94 static const OSSymbol * gActionSymbols[kQueueCount];
95
96 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
97
98 static void
99 iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry)
100 {
101 iocpu_platform_action_entry_t * next;
102
103 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
104 {
105 if (next->priority > entry->priority)
106 {
107 queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link);
108 return;
109 }
110 }
111 queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail
112 }
113
114 static void
115 iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry)
116 {
117 remque(&entry->link);
118 }
119
120 static kern_return_t
121 iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority,
122 void * param1, void * param2, void * param3, boolean_t allow_nested_callouts)
123 {
124 kern_return_t ret = KERN_SUCCESS;
125 kern_return_t result = KERN_SUCCESS;
126 iocpu_platform_action_entry_t * next;
127
128 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
129 {
130 uint32_t pri = (next->priority < 0) ? -next->priority : next->priority;
131 if ((pri >= first_priority) && (pri <= last_priority))
132 {
133 //kprintf("[%p]", next->action);
134 if (!allow_nested_callouts && !next->callout_in_progress)
135 {
136 next->callout_in_progress = TRUE;
137 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
138 next->callout_in_progress = FALSE;
139 }
140 else if (allow_nested_callouts)
141 {
142 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
143 }
144 }
145 if (KERN_SUCCESS == result)
146 result = ret;
147 }
148 return (result);
149 }
150
151 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
152
153 extern "C" kern_return_t
154 IOCPURunPlatformQuiesceActions(void)
155 {
156 return (iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U-1,
157 NULL, NULL, NULL, TRUE));
158 }
159
160 extern "C" kern_return_t
161 IOCPURunPlatformActiveActions(void)
162 {
163 return (iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U-1,
164 NULL, NULL, NULL, TRUE));
165 }
166
167 extern "C" kern_return_t
168 IOCPURunPlatformHaltRestartActions(uint32_t message)
169 {
170 if (!gActionQueues[kQueueHaltRestart].next) return (kIOReturnNotReady);
171 return (iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U-1,
172 (void *)(uintptr_t) message, NULL, NULL, TRUE));
173 }
174
175 extern "C" kern_return_t
176 IOCPURunPlatformPanicActions(uint32_t message)
177 {
178 // Don't allow nested calls of panic actions
179 if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady);
180 return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1,
181 (void *)(uintptr_t) message, NULL, NULL, FALSE));
182 }
183
184
185 extern "C" kern_return_t
186 IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len)
187 {
188 PE_panic_save_context_t context = {
189 .psc_buffer = addr,
190 .psc_offset = offset,
191 .psc_length = len
192 };
193
194 // Don't allow nested calls of panic actions
195 if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady);
196 return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1,
197 (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE));
198
199 }
200
201 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
202
203 static kern_return_t
204 IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority,
205 void * param1, void * param2, void * param3,
206 const char * service_name)
207 {
208 IOReturn ret;
209 IOService * service = (IOService *) refcon0;
210 const OSSymbol * function = (const OSSymbol *) refcon1;
211
212 kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name);
213
214 ret = service->callPlatformFunction(function, false,
215 (void *)(uintptr_t) priority, param1, param2, param3);
216
217 return (ret);
218 }
219
220 static void
221 IOInstallServicePlatformAction(IOService * service, uint32_t qidx)
222 {
223 iocpu_platform_action_entry_t * entry;
224 OSNumber * num;
225 uint32_t priority;
226 const OSSymbol * key = gActionSymbols[qidx];
227 queue_head_t * queue = &gActionQueues[qidx];
228 bool reverse;
229 bool uniq;
230
231 num = OSDynamicCast(OSNumber, service->getProperty(key));
232 if (!num) return;
233
234 reverse = false;
235 uniq = false;
236 switch (qidx)
237 {
238 case kQueueWake:
239 case kQueueActive:
240 reverse = true;
241 break;
242 case kQueueHaltRestart:
243 case kQueuePanic:
244 uniq = true;
245 break;
246 }
247 if (uniq)
248 {
249 queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link)
250 {
251 if (service == entry->refcon0) return;
252 }
253 }
254
255 entry = IONew(iocpu_platform_action_entry_t, 1);
256 entry->action = &IOServicePlatformAction;
257 entry->name = service->getName();
258 priority = num->unsigned32BitValue();
259 if (reverse)
260 entry->priority = -priority;
261 else
262 entry->priority = priority;
263 entry->refcon0 = service;
264 entry->refcon1 = (void *) key;
265 entry->callout_in_progress = FALSE;
266
267 iocpu_add_platform_action(queue, entry);
268 }
269
270 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
271
272 void
273 IOCPUInitialize(void)
274 {
275 gIOCPUsLock = IOLockAlloc();
276 gIOCPUs = OSArray::withCapacity(1);
277
278 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
279 {
280 queue_init(&gActionQueues[qidx]);
281 }
282
283 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
284
285 gIOCPUStateNames[kIOCPUStateUnregistered] =
286 OSString::withCStringNoCopy("Unregistered");
287 gIOCPUStateNames[kIOCPUStateUninitalized] =
288 OSString::withCStringNoCopy("Uninitalized");
289 gIOCPUStateNames[kIOCPUStateStopped] =
290 OSString::withCStringNoCopy("Stopped");
291 gIOCPUStateNames[kIOCPUStateRunning] =
292 OSString::withCStringNoCopy("Running");
293
294 gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep]
295 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey);
296 gIOPlatformWakeActionKey = gActionSymbols[kQueueWake]
297 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey);
298 gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce]
299 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey);
300 gIOPlatformActiveActionKey = gActionSymbols[kQueueActive]
301 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey);
302 gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart]
303 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey);
304 gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic]
305 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey);
306 }
307
308 IOReturn
309 IOInstallServicePlatformActions(IOService * service)
310 {
311 IOLockLock(gIOCPUsLock);
312
313 IOInstallServicePlatformAction(service, kQueueHaltRestart);
314 IOInstallServicePlatformAction(service, kQueuePanic);
315
316 IOLockUnlock(gIOCPUsLock);
317
318 return (kIOReturnSuccess);
319 }
320
321 IOReturn
322 IORemoveServicePlatformActions(IOService * service)
323 {
324 iocpu_platform_action_entry_t * entry;
325 iocpu_platform_action_entry_t * next;
326
327 IOLockLock(gIOCPUsLock);
328
329 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
330 {
331 next = (typeof(entry)) queue_first(&gActionQueues[qidx]);
332 while (!queue_end(&gActionQueues[qidx], &next->link))
333 {
334 entry = next;
335 next = (typeof(entry)) queue_next(&entry->link);
336 if (service == entry->refcon0)
337 {
338 iocpu_remove_platform_action(entry);
339 IODelete(entry, iocpu_platform_action_entry_t, 1);
340 }
341 }
342 }
343
344 IOLockUnlock(gIOCPUsLock);
345
346 return (kIOReturnSuccess);
347 }
348
349
350 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
351
352 kern_return_t PE_cpu_start(cpu_id_t target,
353 vm_offset_t start_paddr, vm_offset_t arg_paddr)
354 {
355 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
356
357 if (targetCPU == 0) return KERN_FAILURE;
358 return targetCPU->startCPU(start_paddr, arg_paddr);
359 }
360
361 void PE_cpu_halt(cpu_id_t target)
362 {
363 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
364
365 if (targetCPU) targetCPU->haltCPU();
366 }
367
368 void PE_cpu_signal(cpu_id_t source, cpu_id_t target)
369 {
370 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
371 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
372
373 if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU);
374 }
375
376 void PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
377 {
378 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
379 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
380
381 if (sourceCPU && targetCPU) sourceCPU->signalCPUDeferred(targetCPU);
382 }
383
384 void PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
385 {
386 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
387 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
388
389 if (sourceCPU && targetCPU) sourceCPU->signalCPUCancel(targetCPU);
390 }
391
392 void PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
393 {
394 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
395
396 if (targetCPU) {
397 targetCPU->initCPU(bootb);
398 #if defined(__arm__) || defined(__arm64__)
399 if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) ml_set_is_quiescing(false);
400 #endif /* defined(__arm__) || defined(__arm64__) */
401 }
402 }
403
404 void PE_cpu_machine_quiesce(cpu_id_t target)
405 {
406 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
407 if (targetCPU) {
408 #if defined(__arm__) || defined(__arm64__)
409 if (targetCPU->getCPUNumber() == (UInt32)master_cpu) ml_set_is_quiescing(true);
410 #endif /* defined(__arm__) || defined(__arm64__) */
411 targetCPU->quiesceCPU();
412 }
413 }
414
415 #if defined(__arm__) || defined(__arm64__)
416 static perfmon_interrupt_handler_func pmi_handler = 0;
417
418 kern_return_t PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
419 {
420 pmi_handler = handler;
421
422 return KERN_SUCCESS;
423 }
424
425 void PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
426 {
427 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
428
429 if (targetCPU) {
430 if (enable) {
431 targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0);
432 targetCPU->getProvider()->enableInterrupt(1);
433 } else {
434 targetCPU->getProvider()->disableInterrupt(1);
435 }
436 }
437 }
438 #endif
439
440 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
441
442 #define super IOService
443
444 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
445 OSMetaClassDefineReservedUnused(IOCPU, 0);
446 OSMetaClassDefineReservedUnused(IOCPU, 1);
447 OSMetaClassDefineReservedUnused(IOCPU, 2);
448 OSMetaClassDefineReservedUnused(IOCPU, 3);
449 OSMetaClassDefineReservedUnused(IOCPU, 4);
450 OSMetaClassDefineReservedUnused(IOCPU, 5);
451 OSMetaClassDefineReservedUnused(IOCPU, 6);
452 OSMetaClassDefineReservedUnused(IOCPU, 7);
453
454 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
455
456 void IOCPUSleepKernel(void)
457 {
458 long cnt, numCPUs;
459 IOCPU *target;
460 IOCPU *bootCPU = NULL;
461 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
462
463 kprintf("IOCPUSleepKernel\n");
464
465 IORegistryIterator * iter;
466 OSOrderedSet * all;
467 IOService * service;
468
469 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
470
471 iter = IORegistryIterator::iterateOver( gIOServicePlane,
472 kIORegistryIterateRecursively );
473 if( iter)
474 {
475 all = 0;
476 do
477 {
478 if (all)
479 all->release();
480 all = iter->iterateAll();
481 }
482 while (!iter->isValid());
483 iter->release();
484
485 if (all)
486 {
487 while((service = (IOService *) all->getFirstObject()))
488 {
489 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
490 {
491 IOInstallServicePlatformAction(service, qidx);
492 }
493 all->removeObject(service);
494 }
495 all->release();
496 }
497 }
498
499 iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U-1,
500 NULL, NULL, NULL, TRUE);
501
502 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
503
504 numCPUs = gIOCPUs->getCount();
505 // Sleep the CPUs.
506 cnt = numCPUs;
507 while (cnt--)
508 {
509 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
510
511 // We make certain that the bootCPU is the last to sleep
512 // We'll skip it for now, and halt it after finishing the
513 // non-boot CPU's.
514 if (target->getCPUNumber() == (UInt32)master_cpu)
515 {
516 bootCPU = target;
517 } else if (target->getCPUState() == kIOCPUStateRunning)
518 {
519 target->haltCPU();
520 }
521 }
522
523 assert(bootCPU != NULL);
524 assert(cpu_number() == master_cpu);
525
526 console_suspend();
527
528 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
529
530 // Now sleep the boot CPU.
531 bootCPU->haltCPU();
532
533 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
534
535 console_resume();
536
537 iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U-1,
538 NULL, NULL, NULL, TRUE);
539
540 iocpu_platform_action_entry_t * entry;
541 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
542 {
543 while (!(queue_empty(&gActionQueues[qidx])))
544 {
545 entry = (typeof(entry)) queue_first(&gActionQueues[qidx]);
546 iocpu_remove_platform_action(entry);
547 IODelete(entry, iocpu_platform_action_entry_t, 1);
548 }
549 }
550
551 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
552
553 // Wake the other CPUs.
554 for (cnt = 0; cnt < numCPUs; cnt++)
555 {
556 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
557
558 // Skip the already-woken boot CPU.
559 if (target->getCPUNumber() != (UInt32)master_cpu) {
560 if (target->getCPUState() == kIOCPUStateRunning)
561 panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
562
563 if (target->getCPUState() == kIOCPUStateStopped)
564 processor_start(target->getMachProcessor());
565 }
566 }
567 }
568
569 bool IOCPU::start(IOService *provider)
570 {
571 OSData *busFrequency, *cpuFrequency, *timebaseFrequency;
572
573 if (!super::start(provider)) return false;
574
575 _cpuGroup = gIOCPUs;
576 cpuNub = provider;
577
578 IOLockLock(gIOCPUsLock);
579 gIOCPUs->setObject(this);
580 IOLockUnlock(gIOCPUsLock);
581
582 // Correct the bus, cpu and timebase frequencies in the device tree.
583 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
584 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
585 } else {
586 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
587 }
588 provider->setProperty("bus-frequency", busFrequency);
589 busFrequency->release();
590
591 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
592 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
593 } else {
594 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
595 }
596 provider->setProperty("clock-frequency", cpuFrequency);
597 cpuFrequency->release();
598
599 timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
600 provider->setProperty("timebase-frequency", timebaseFrequency);
601 timebaseFrequency->release();
602
603 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t)*8);
604
605 setCPUNumber(0);
606 setCPUState(kIOCPUStateUnregistered);
607
608 return true;
609 }
610
611 OSObject *IOCPU::getProperty(const OSSymbol *aKey) const
612 {
613 if (aKey == gIOCPUStateKey) return gIOCPUStateNames[_cpuState];
614
615 return super::getProperty(aKey);
616 }
617
618 bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
619 {
620 if (aKey == gIOCPUStateKey) {
621 return false;
622 }
623
624 return super::setProperty(aKey, anObject);
625 }
626
627 bool IOCPU::serializeProperties(OSSerialize *serialize) const
628 {
629 bool result;
630 OSDictionary *dict = dictionaryWithProperties();
631 if (!dict) return false;
632 dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]);
633 result = dict->serialize(serialize);
634 dict->release();
635 return result;
636 }
637
638 IOReturn IOCPU::setProperties(OSObject *properties)
639 {
640 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
641 OSString *stateStr;
642 IOReturn result;
643
644 if (dict == 0) return kIOReturnUnsupported;
645
646 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey));
647 if (stateStr != 0) {
648 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
649 if (result != kIOReturnSuccess) return result;
650
651 if (setProperty(gIOCPUStateKey, stateStr)) return kIOReturnSuccess;
652
653 return kIOReturnUnsupported;
654 }
655
656 return kIOReturnUnsupported;
657 }
658
659 void IOCPU::signalCPU(IOCPU */*target*/)
660 {
661 }
662
663 void IOCPU::signalCPUDeferred(IOCPU *target)
664 {
665 // Our CPU may not support deferred IPIs,
666 // so send a regular IPI by default
667 signalCPU(target);
668 }
669
670 void IOCPU::signalCPUCancel(IOCPU */*target*/)
671 {
672 // Meant to cancel signals sent by
673 // signalCPUDeferred; unsupported
674 // by default
675 }
676
677 void IOCPU::enableCPUTimeBase(bool /*enable*/)
678 {
679 }
680
681 UInt32 IOCPU::getCPUNumber(void)
682 {
683 return _cpuNumber;
684 }
685
686 void IOCPU::setCPUNumber(UInt32 cpuNumber)
687 {
688 _cpuNumber = cpuNumber;
689 super::setProperty("IOCPUNumber", _cpuNumber, 32);
690 }
691
692 UInt32 IOCPU::getCPUState(void)
693 {
694 return _cpuState;
695 }
696
697 void IOCPU::setCPUState(UInt32 cpuState)
698 {
699 if (cpuState < kIOCPUStateCount) {
700 _cpuState = cpuState;
701 }
702 }
703
704 OSArray *IOCPU::getCPUGroup(void)
705 {
706 return _cpuGroup;
707 }
708
709 UInt32 IOCPU::getCPUGroupSize(void)
710 {
711 return _cpuGroup->getCount();
712 }
713
714 processor_t IOCPU::getMachProcessor(void)
715 {
716 return machProcessor;
717 }
718
719
720 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
721
722 #undef super
723 #define super IOInterruptController
724
725 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
726
727 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
728 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
729 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
730 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
731 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
732
733
734
735 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
736
737 IOReturn IOCPUInterruptController::initCPUInterruptController(int sources)
738 {
739 return initCPUInterruptController(sources, sources);
740 }
741
742 IOReturn IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
743 {
744 int cnt;
745
746 if (!super::init()) return kIOReturnInvalid;
747
748 numSources = sources;
749 numCPUs = cpus;
750
751 vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector));
752 if (vectors == 0) return kIOReturnNoMemory;
753 bzero(vectors, numSources * sizeof(IOInterruptVector));
754
755 // Allocate a lock for each vector
756 for (cnt = 0; cnt < numSources; cnt++) {
757 vectors[cnt].interruptLock = IOLockAlloc();
758 if (vectors[cnt].interruptLock == NULL) {
759 for (cnt = 0; cnt < numSources; cnt++) {
760 if (vectors[cnt].interruptLock != NULL)
761 IOLockFree(vectors[cnt].interruptLock);
762 }
763 return kIOReturnNoResources;
764 }
765 }
766
767 ml_init_max_cpus(numSources);
768
769 #if KPERF
770 /*
771 * kperf allocates based on the number of CPUs and requires them to all be
772 * accounted for.
773 */
774 boolean_t found_kperf = FALSE;
775 char kperf_config_str[64];
776 found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str));
777 if (found_kperf && kperf_config_str[0] != '\0') {
778 kperf_kernel_configure(kperf_config_str);
779 }
780 #endif
781
782 return kIOReturnSuccess;
783 }
784
785 void IOCPUInterruptController::registerCPUInterruptController(void)
786 {
787 registerService();
788
789 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
790 this);
791 }
792
793 void IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
794 {
795 int cnt;
796 OSArray *controller;
797 OSArray *specifier;
798 OSData *tmpData;
799 long tmpLong;
800
801 if ((service->getProperty(gIOInterruptControllersKey) != 0) &&
802 (service->getProperty(gIOInterruptSpecifiersKey) != 0))
803 return;
804
805 // Create the interrupt specifer array.
806 specifier = OSArray::withCapacity(numSources);
807 for (cnt = 0; cnt < numSources; cnt++) {
808 tmpLong = cnt;
809 tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong));
810 specifier->setObject(tmpData);
811 tmpData->release();
812 };
813
814 // Create the interrupt controller array.
815 controller = OSArray::withCapacity(numSources);
816 for (cnt = 0; cnt < numSources; cnt++) {
817 controller->setObject(gPlatformInterruptControllerName);
818 }
819
820 // Put the two arrays into the property table.
821 service->setProperty(gIOInterruptControllersKey, controller);
822 service->setProperty(gIOInterruptSpecifiersKey, specifier);
823 controller->release();
824 specifier->release();
825 }
826
827 void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
828 {
829 IOInterruptHandler handler = OSMemberFunctionCast(
830 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
831
832 assert(numCPUs > 0);
833
834 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, 0);
835
836 IOTakeLock(vectors[0].interruptLock);
837 ++enabledCPUs;
838
839 if (enabledCPUs == numCPUs) {
840 IOService::cpusRunning();
841 thread_wakeup(this);
842 }
843 IOUnlock(vectors[0].interruptLock);
844 }
845
846 IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub,
847 int source,
848 void *target,
849 IOInterruptHandler handler,
850 void *refCon)
851 {
852 IOInterruptVector *vector;
853
854 if (source >= numSources) return kIOReturnNoResources;
855
856 vector = &vectors[source];
857
858 // Get the lock for this vector.
859 IOTakeLock(vector->interruptLock);
860
861 // Make sure the vector is not in use.
862 if (vector->interruptRegistered) {
863 IOUnlock(vector->interruptLock);
864 return kIOReturnNoResources;
865 }
866
867 // Fill in vector with the client's info.
868 vector->handler = handler;
869 vector->nub = nub;
870 vector->source = source;
871 vector->target = target;
872 vector->refCon = refCon;
873
874 // Get the vector ready. It starts hard disabled.
875 vector->interruptDisabledHard = 1;
876 vector->interruptDisabledSoft = 1;
877 vector->interruptRegistered = 1;
878
879 IOUnlock(vector->interruptLock);
880
881 IOTakeLock(vectors[0].interruptLock);
882 if (enabledCPUs != numCPUs) {
883 assert_wait(this, THREAD_UNINT);
884 IOUnlock(vectors[0].interruptLock);
885 thread_block(THREAD_CONTINUE_NULL);
886 } else
887 IOUnlock(vectors[0].interruptLock);
888
889 return kIOReturnSuccess;
890 }
891
892 IOReturn IOCPUInterruptController::getInterruptType(IOService */*nub*/,
893 int /*source*/,
894 int *interruptType)
895 {
896 if (interruptType == 0) return kIOReturnBadArgument;
897
898 *interruptType = kIOInterruptTypeLevel;
899
900 return kIOReturnSuccess;
901 }
902
903 IOReturn IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
904 int /*source*/)
905 {
906 // ml_set_interrupts_enabled(true);
907 return kIOReturnSuccess;
908 }
909
910 IOReturn IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
911 int /*source*/)
912 {
913 // ml_set_interrupts_enabled(false);
914 return kIOReturnSuccess;
915 }
916
917 IOReturn IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
918 int /*source*/)
919 {
920 ml_cause_interrupt();
921 return kIOReturnSuccess;
922 }
923
924 IOReturn IOCPUInterruptController::handleInterrupt(void */*refCon*/,
925 IOService */*nub*/,
926 int source)
927 {
928 IOInterruptVector *vector;
929
930 vector = &vectors[source];
931
932 if (!vector->interruptRegistered) return kIOReturnInvalid;
933
934 vector->handler(vector->target, vector->refCon,
935 vector->nub, vector->source);
936
937 return kIOReturnSuccess;
938 }
939
940 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */