]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOCPU.cpp
xnu-6153.41.3.tar.gz
[apple/xnu.git] / iokit / Kernel / IOCPU.cpp
1 /*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 extern "C" {
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 #include <kern/cpu_number.h>
33 extern void kperf_kernel_configure(char *);
34 }
35
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOPlatformExpert.h>
38 #include <IOKit/pwr_mgt/RootDomain.h>
39 #include <IOKit/pwr_mgt/IOPMPrivate.h>
40 #include <IOKit/IOUserClient.h>
41 #include <IOKit/IOKitKeysPrivate.h>
42 #include <IOKit/IOCPU.h>
43 #include "IOKitKernelInternal.h"
44
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 #include <kern/queue.h>
47 #include <kern/sched_prim.h>
48
49 extern "C" void console_suspend();
50 extern "C" void console_resume();
51 extern "C" void sched_override_recommended_cores_for_sleep(void);
52 extern "C" void sched_restore_recommended_cores_after_sleep(void);
53
54 typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority,
55 void * param1, void * param2, void * param3,
56 const char * name);
57
58 struct iocpu_platform_action_entry {
59 queue_chain_t link;
60 iocpu_platform_action_t action;
61 int32_t priority;
62 const char * name;
63 void * refcon0;
64 void * refcon1;
65 boolean_t callout_in_progress;
66 struct iocpu_platform_action_entry * alloc_list;
67 };
68 typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t;
69
70 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
71
72 static IOLock *gIOCPUsLock;
73 static OSArray *gIOCPUs;
74 static const OSSymbol *gIOCPUStateKey;
75 static OSString *gIOCPUStateNames[kIOCPUStateCount];
76
77 enum{
78 kQueueSleep = 0,
79 kQueueWake = 1,
80 kQueueQuiesce = 2,
81 kQueueActive = 3,
82 kQueueHaltRestart = 4,
83 kQueuePanic = 5,
84 kQueueCount = 6
85 };
86
87 const OSSymbol * gIOPlatformSleepActionKey;
88 const OSSymbol * gIOPlatformWakeActionKey;
89 const OSSymbol * gIOPlatformQuiesceActionKey;
90 const OSSymbol * gIOPlatformActiveActionKey;
91 const OSSymbol * gIOPlatformHaltRestartActionKey;
92 const OSSymbol * gIOPlatformPanicActionKey;
93
94 static queue_head_t gActionQueues[kQueueCount];
95 static const OSSymbol * gActionSymbols[kQueueCount];
96
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98
99 static void
100 iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry)
101 {
102 iocpu_platform_action_entry_t * next;
103
104 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
105 {
106 if (next->priority > entry->priority) {
107 queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link);
108 return;
109 }
110 }
111 queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail
112 }
113
114 static void
115 iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry)
116 {
117 remque(&entry->link);
118 }
119
120 static kern_return_t
121 iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority,
122 void * param1, void * param2, void * param3, boolean_t allow_nested_callouts)
123 {
124 kern_return_t ret = KERN_SUCCESS;
125 kern_return_t result = KERN_SUCCESS;
126 iocpu_platform_action_entry_t * next;
127
128 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
129 {
130 uint32_t pri = (next->priority < 0) ? -next->priority : next->priority;
131 if ((pri >= first_priority) && (pri <= last_priority)) {
132 //kprintf("[%p]", next->action);
133 if (!allow_nested_callouts && !next->callout_in_progress) {
134 next->callout_in_progress = TRUE;
135 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
136 next->callout_in_progress = FALSE;
137 } else if (allow_nested_callouts) {
138 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
139 }
140 }
141 if (KERN_SUCCESS == result) {
142 result = ret;
143 }
144 }
145 return result;
146 }
147
148 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
149
150 extern "C" kern_return_t
151 IOCPURunPlatformQuiesceActions(void)
152 {
153 assert(preemption_enabled() == false);
154 return iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U - 1,
155 NULL, NULL, NULL, TRUE);
156 }
157
158 extern "C" kern_return_t
159 IOCPURunPlatformActiveActions(void)
160 {
161 assert(preemption_enabled() == false);
162 return iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U - 1,
163 NULL, NULL, NULL, TRUE);
164 }
165
166 extern "C" kern_return_t
167 IOCPURunPlatformHaltRestartActions(uint32_t message)
168 {
169 if (!gActionQueues[kQueueHaltRestart].next) {
170 return kIOReturnNotReady;
171 }
172 return iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U - 1,
173 (void *)(uintptr_t) message, NULL, NULL, TRUE);
174 }
175
176 extern "C" kern_return_t
177 IOCPURunPlatformPanicActions(uint32_t message)
178 {
179 // Don't allow nested calls of panic actions
180 if (!gActionQueues[kQueuePanic].next) {
181 return kIOReturnNotReady;
182 }
183 return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1,
184 (void *)(uintptr_t) message, NULL, NULL, FALSE);
185 }
186
187
188 extern "C" kern_return_t
189 IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len)
190 {
191 PE_panic_save_context_t context = {
192 .psc_buffer = addr,
193 .psc_offset = offset,
194 .psc_length = len
195 };
196
197 // Don't allow nested calls of panic actions
198 if (!gActionQueues[kQueuePanic].next) {
199 return kIOReturnNotReady;
200 }
201 return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1,
202 (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE);
203 }
204
205 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
206
207 static kern_return_t
208 IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority,
209 void * param1, void * param2, void * param3,
210 const char * service_name)
211 {
212 IOReturn ret;
213 IOService * service = (IOService *) refcon0;
214 const OSSymbol * function = (const OSSymbol *) refcon1;
215
216 kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name);
217
218 ret = service->callPlatformFunction(function, false,
219 (void *)(uintptr_t) priority, param1, param2, param3);
220
221 return ret;
222 }
223
224 static void
225 IOInstallServicePlatformAction(IOService * service, uint32_t qidx)
226 {
227 iocpu_platform_action_entry_t * entry;
228 OSNumber * num;
229 uint32_t priority;
230 const OSSymbol * key = gActionSymbols[qidx];
231 queue_head_t * queue = &gActionQueues[qidx];
232 bool reverse;
233 bool uniq;
234
235 num = OSDynamicCast(OSNumber, service->getProperty(key));
236 if (!num) {
237 return;
238 }
239
240 reverse = false;
241 uniq = false;
242 switch (qidx) {
243 case kQueueWake:
244 case kQueueActive:
245 reverse = true;
246 break;
247 case kQueueHaltRestart:
248 case kQueuePanic:
249 uniq = true;
250 break;
251 }
252 if (uniq) {
253 queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link)
254 {
255 if (service == entry->refcon0) {
256 return;
257 }
258 }
259 }
260
261 entry = IONew(iocpu_platform_action_entry_t, 1);
262 entry->action = &IOServicePlatformAction;
263 entry->name = service->getName();
264 priority = num->unsigned32BitValue();
265 if (reverse) {
266 entry->priority = -priority;
267 } else {
268 entry->priority = priority;
269 }
270 entry->refcon0 = service;
271 entry->refcon1 = (void *) key;
272 entry->callout_in_progress = FALSE;
273
274 iocpu_add_platform_action(queue, entry);
275 }
276
277 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
278
279 void
280 IOCPUInitialize(void)
281 {
282 gIOCPUsLock = IOLockAlloc();
283 gIOCPUs = OSArray::withCapacity(1);
284
285 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) {
286 queue_init(&gActionQueues[qidx]);
287 }
288
289 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
290
291 gIOCPUStateNames[kIOCPUStateUnregistered] =
292 OSString::withCStringNoCopy("Unregistered");
293 gIOCPUStateNames[kIOCPUStateUninitalized] =
294 OSString::withCStringNoCopy("Uninitalized");
295 gIOCPUStateNames[kIOCPUStateStopped] =
296 OSString::withCStringNoCopy("Stopped");
297 gIOCPUStateNames[kIOCPUStateRunning] =
298 OSString::withCStringNoCopy("Running");
299
300 gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep]
301 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey);
302 gIOPlatformWakeActionKey = gActionSymbols[kQueueWake]
303 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey);
304 gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce]
305 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey);
306 gIOPlatformActiveActionKey = gActionSymbols[kQueueActive]
307 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey);
308 gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart]
309 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey);
310 gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic]
311 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey);
312 }
313
314 IOReturn
315 IOInstallServicePlatformActions(IOService * service)
316 {
317 IOLockLock(gIOCPUsLock);
318
319 IOInstallServicePlatformAction(service, kQueueHaltRestart);
320 IOInstallServicePlatformAction(service, kQueuePanic);
321
322 IOLockUnlock(gIOCPUsLock);
323
324 return kIOReturnSuccess;
325 }
326
327 IOReturn
328 IORemoveServicePlatformActions(IOService * service)
329 {
330 iocpu_platform_action_entry_t * entry;
331 iocpu_platform_action_entry_t * next;
332
333 IOLockLock(gIOCPUsLock);
334
335 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) {
336 next = (typeof(entry))queue_first(&gActionQueues[qidx]);
337 while (!queue_end(&gActionQueues[qidx], &next->link)) {
338 entry = next;
339 next = (typeof(entry))queue_next(&entry->link);
340 if (service == entry->refcon0) {
341 iocpu_remove_platform_action(entry);
342 IODelete(entry, iocpu_platform_action_entry_t, 1);
343 }
344 }
345 }
346
347 IOLockUnlock(gIOCPUsLock);
348
349 return kIOReturnSuccess;
350 }
351
352
353 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
354
355 kern_return_t
356 PE_cpu_start(cpu_id_t target,
357 vm_offset_t start_paddr, vm_offset_t arg_paddr)
358 {
359 IOCPU *targetCPU = (IOCPU *)target;
360
361 if (targetCPU == NULL) {
362 return KERN_FAILURE;
363 }
364 return targetCPU->startCPU(start_paddr, arg_paddr);
365 }
366
367 void
368 PE_cpu_halt(cpu_id_t target)
369 {
370 IOCPU *targetCPU = (IOCPU *)target;
371
372 targetCPU->haltCPU();
373 }
374
375 void
376 PE_cpu_signal(cpu_id_t source, cpu_id_t target)
377 {
378 IOCPU *sourceCPU = (IOCPU *)source;
379 IOCPU *targetCPU = (IOCPU *)target;
380
381 sourceCPU->signalCPU(targetCPU);
382 }
383
384 void
385 PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
386 {
387 IOCPU *sourceCPU = (IOCPU *)source;
388 IOCPU *targetCPU = (IOCPU *)target;
389
390 sourceCPU->signalCPUDeferred(targetCPU);
391 }
392
393 void
394 PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
395 {
396 IOCPU *sourceCPU = (IOCPU *)source;
397 IOCPU *targetCPU = (IOCPU *)target;
398
399 sourceCPU->signalCPUCancel(targetCPU);
400 }
401
402 void
403 PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
404 {
405 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
406
407 if (targetCPU == NULL) {
408 panic("%s: invalid target CPU %p", __func__, target);
409 }
410
411 targetCPU->initCPU(bootb);
412 #if defined(__arm__) || defined(__arm64__)
413 if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) {
414 ml_set_is_quiescing(false);
415 }
416 #endif /* defined(__arm__) || defined(__arm64__) */
417 }
418
419 void
420 PE_cpu_machine_quiesce(cpu_id_t target)
421 {
422 IOCPU *targetCPU = (IOCPU*)target;
423 #if defined(__arm__) || defined(__arm64__)
424 if (targetCPU->getCPUNumber() == (UInt32)master_cpu) {
425 ml_set_is_quiescing(true);
426 }
427 #endif /* defined(__arm__) || defined(__arm64__) */
428 targetCPU->quiesceCPU();
429 }
430
431 #if defined(__arm__) || defined(__arm64__)
432 static perfmon_interrupt_handler_func pmi_handler = NULL;
433
434 kern_return_t
435 PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
436 {
437 pmi_handler = handler;
438
439 return KERN_SUCCESS;
440 }
441
442 void
443 PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
444 {
445 IOCPU *targetCPU = (IOCPU*)target;
446
447 if (targetCPU == nullptr) {
448 return;
449 }
450
451 if (enable) {
452 targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, NULL);
453 targetCPU->getProvider()->enableInterrupt(1);
454 } else {
455 targetCPU->getProvider()->disableInterrupt(1);
456 }
457 }
458 #endif
459
460 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
461
462 #define super IOService
463
464 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
465 OSMetaClassDefineReservedUnused(IOCPU, 0);
466 OSMetaClassDefineReservedUnused(IOCPU, 1);
467 OSMetaClassDefineReservedUnused(IOCPU, 2);
468 OSMetaClassDefineReservedUnused(IOCPU, 3);
469 OSMetaClassDefineReservedUnused(IOCPU, 4);
470 OSMetaClassDefineReservedUnused(IOCPU, 5);
471 OSMetaClassDefineReservedUnused(IOCPU, 6);
472 OSMetaClassDefineReservedUnused(IOCPU, 7);
473
474 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
475
476 void
477 IOCPUSleepKernel(void)
478 {
479 #if defined(__x86_64__)
480 extern IOCPU *currentShutdownTarget;
481 #endif
482 long cnt, numCPUs;
483 IOCPU *target;
484 IOCPU *bootCPU = NULL;
485 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
486
487 kprintf("IOCPUSleepKernel\n");
488 #if defined(__arm64__)
489 sched_override_recommended_cores_for_sleep();
490 #endif
491
492 IORegistryIterator * iter;
493 OSOrderedSet * all;
494 IOService * service;
495
496 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
497
498 iter = IORegistryIterator::iterateOver( gIOServicePlane,
499 kIORegistryIterateRecursively );
500 if (iter) {
501 all = NULL;
502 do{
503 if (all) {
504 all->release();
505 }
506 all = iter->iterateAll();
507 }while (!iter->isValid());
508 iter->release();
509
510 if (all) {
511 while ((service = (IOService *) all->getFirstObject())) {
512 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) {
513 IOInstallServicePlatformAction(service, qidx);
514 }
515 all->removeObject(service);
516 }
517 all->release();
518 }
519 }
520
521 iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U - 1,
522 NULL, NULL, NULL, TRUE);
523
524 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
525
526 numCPUs = gIOCPUs->getCount();
527 #if defined(__x86_64__)
528 currentShutdownTarget = NULL;
529 #endif
530
531 integer_t old_pri;
532 thread_t self = current_thread();
533
534 /*
535 * We need to boost this thread's priority to the maximum kernel priority to
536 * ensure we can urgently preempt ANY thread currently executing on the
537 * target CPU. Note that realtime threads have their own mechanism to eventually
538 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
539 */
540 old_pri = thread_kern_get_pri(self);
541 thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
542
543 // Sleep the CPUs.
544 cnt = numCPUs;
545 while (cnt--) {
546 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
547
548 // We make certain that the bootCPU is the last to sleep
549 // We'll skip it for now, and halt it after finishing the
550 // non-boot CPU's.
551 if (target->getCPUNumber() == (UInt32)master_cpu) {
552 bootCPU = target;
553 } else if (target->getCPUState() == kIOCPUStateRunning) {
554 #if defined(__x86_64__)
555 currentShutdownTarget = target;
556 #endif
557 target->haltCPU();
558 }
559 }
560
561 assert(bootCPU != NULL);
562 assert(cpu_number() == master_cpu);
563
564 console_suspend();
565
566 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
567 rootDomain->stop_watchdog_timer();
568
569 /*
570 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
571 * The system sleeps here.
572 */
573
574 bootCPU->haltCPU();
575
576 /*
577 * The system is now coming back from sleep on the boot CPU.
578 * The kQueueActive actions have already been called.
579 */
580
581 rootDomain->start_watchdog_timer();
582 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
583
584 console_resume();
585
586 iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U - 1,
587 NULL, NULL, NULL, TRUE);
588
589 iocpu_platform_action_entry_t * entry;
590 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) {
591 while (!(queue_empty(&gActionQueues[qidx]))) {
592 entry = (typeof(entry))queue_first(&gActionQueues[qidx]);
593 iocpu_remove_platform_action(entry);
594 IODelete(entry, iocpu_platform_action_entry_t, 1);
595 }
596 }
597
598 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
599
600 // Wake the other CPUs.
601 for (cnt = 0; cnt < numCPUs; cnt++) {
602 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
603
604 // Skip the already-woken boot CPU.
605 if (target->getCPUNumber() != (UInt32)master_cpu) {
606 if (target->getCPUState() == kIOCPUStateRunning) {
607 panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
608 }
609
610 if (target->getCPUState() == kIOCPUStateStopped) {
611 processor_start(target->getMachProcessor());
612 }
613 }
614 }
615
616 #if defined(__arm64__)
617 sched_restore_recommended_cores_after_sleep();
618 #endif
619
620 thread_kern_set_pri(self, old_pri);
621 }
622
623 bool
624 IOCPU::start(IOService *provider)
625 {
626 OSData *busFrequency, *cpuFrequency, *timebaseFrequency;
627
628 if (!super::start(provider)) {
629 return false;
630 }
631
632 _cpuGroup = gIOCPUs;
633 cpuNub = provider;
634
635 IOLockLock(gIOCPUsLock);
636 gIOCPUs->setObject(this);
637 IOLockUnlock(gIOCPUsLock);
638
639 // Correct the bus, cpu and timebase frequencies in the device tree.
640 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
641 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
642 } else {
643 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
644 }
645 provider->setProperty("bus-frequency", busFrequency);
646 busFrequency->release();
647
648 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
649 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
650 } else {
651 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
652 }
653 provider->setProperty("clock-frequency", cpuFrequency);
654 cpuFrequency->release();
655
656 timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
657 provider->setProperty("timebase-frequency", timebaseFrequency);
658 timebaseFrequency->release();
659
660 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8);
661
662 setCPUNumber(0);
663 setCPUState(kIOCPUStateUnregistered);
664
665 return true;
666 }
667
668 void
669 IOCPU::detach(IOService *provider)
670 {
671 super::detach(provider);
672 IOLockLock(gIOCPUsLock);
673 unsigned int index = gIOCPUs->getNextIndexOfObject(this, 0);
674 if (index != (unsigned int)-1) {
675 gIOCPUs->removeObject(index);
676 }
677 IOLockUnlock(gIOCPUsLock);
678 }
679
680 OSObject *
681 IOCPU::getProperty(const OSSymbol *aKey) const
682 {
683 if (aKey == gIOCPUStateKey) {
684 return gIOCPUStateNames[_cpuState];
685 }
686
687 return super::getProperty(aKey);
688 }
689
690 bool
691 IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
692 {
693 if (aKey == gIOCPUStateKey) {
694 return false;
695 }
696
697 return super::setProperty(aKey, anObject);
698 }
699
700 bool
701 IOCPU::serializeProperties(OSSerialize *serialize) const
702 {
703 bool result;
704 OSDictionary *dict = dictionaryWithProperties();
705 if (!dict) {
706 return false;
707 }
708 dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]);
709 result = dict->serialize(serialize);
710 dict->release();
711 return result;
712 }
713
714 IOReturn
715 IOCPU::setProperties(OSObject *properties)
716 {
717 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
718 OSString *stateStr;
719 IOReturn result;
720
721 if (dict == NULL) {
722 return kIOReturnUnsupported;
723 }
724
725 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey));
726 if (stateStr != NULL) {
727 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
728 if (result != kIOReturnSuccess) {
729 return result;
730 }
731
732 if (setProperty(gIOCPUStateKey, stateStr)) {
733 return kIOReturnSuccess;
734 }
735
736 return kIOReturnUnsupported;
737 }
738
739 return kIOReturnUnsupported;
740 }
741
742 void
743 IOCPU::signalCPU(IOCPU */*target*/)
744 {
745 }
746
747 void
748 IOCPU::signalCPUDeferred(IOCPU *target)
749 {
750 // Our CPU may not support deferred IPIs,
751 // so send a regular IPI by default
752 signalCPU(target);
753 }
754
755 void
756 IOCPU::signalCPUCancel(IOCPU */*target*/)
757 {
758 // Meant to cancel signals sent by
759 // signalCPUDeferred; unsupported
760 // by default
761 }
762
763 void
764 IOCPU::enableCPUTimeBase(bool /*enable*/)
765 {
766 }
767
768 UInt32
769 IOCPU::getCPUNumber(void)
770 {
771 return _cpuNumber;
772 }
773
774 void
775 IOCPU::setCPUNumber(UInt32 cpuNumber)
776 {
777 _cpuNumber = cpuNumber;
778 super::setProperty("IOCPUNumber", _cpuNumber, 32);
779 }
780
781 UInt32
782 IOCPU::getCPUState(void)
783 {
784 return _cpuState;
785 }
786
787 void
788 IOCPU::setCPUState(UInt32 cpuState)
789 {
790 if (cpuState < kIOCPUStateCount) {
791 _cpuState = cpuState;
792 }
793 }
794
795 OSArray *
796 IOCPU::getCPUGroup(void)
797 {
798 return _cpuGroup;
799 }
800
801 UInt32
802 IOCPU::getCPUGroupSize(void)
803 {
804 return _cpuGroup->getCount();
805 }
806
807 processor_t
808 IOCPU::getMachProcessor(void)
809 {
810 return machProcessor;
811 }
812
813
814 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
815
816 #undef super
817 #define super IOInterruptController
818
819 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
820
821 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
822 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
823 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
824 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
825 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
826
827
828
829 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
830
831 IOReturn
832 IOCPUInterruptController::initCPUInterruptController(int sources)
833 {
834 return initCPUInterruptController(sources, sources);
835 }
836
837 IOReturn
838 IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
839 {
840 int cnt;
841
842 if (!super::init()) {
843 return kIOReturnInvalid;
844 }
845
846 numSources = sources;
847 numCPUs = cpus;
848
849 vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector));
850 if (vectors == NULL) {
851 return kIOReturnNoMemory;
852 }
853 bzero(vectors, numSources * sizeof(IOInterruptVector));
854
855 // Allocate a lock for each vector
856 for (cnt = 0; cnt < numSources; cnt++) {
857 vectors[cnt].interruptLock = IOLockAlloc();
858 if (vectors[cnt].interruptLock == NULL) {
859 for (cnt = 0; cnt < numSources; cnt++) {
860 if (vectors[cnt].interruptLock != NULL) {
861 IOLockFree(vectors[cnt].interruptLock);
862 }
863 }
864 return kIOReturnNoResources;
865 }
866 }
867
868 ml_init_max_cpus(numSources);
869
870 #if KPERF
871 /*
872 * kperf allocates based on the number of CPUs and requires them to all be
873 * accounted for.
874 */
875 boolean_t found_kperf = FALSE;
876 char kperf_config_str[64];
877 found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str));
878 if (found_kperf && kperf_config_str[0] != '\0') {
879 kperf_kernel_configure(kperf_config_str);
880 }
881 #endif /* KPERF */
882
883 return kIOReturnSuccess;
884 }
885
886 void
887 IOCPUInterruptController::registerCPUInterruptController(void)
888 {
889 registerService();
890
891 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
892 this);
893 }
894
895 void
896 IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
897 {
898 int cnt;
899 OSArray *controller;
900 OSArray *specifier;
901 OSData *tmpData;
902 long tmpLong;
903
904 if ((service->getProperty(gIOInterruptControllersKey) != NULL) &&
905 (service->getProperty(gIOInterruptSpecifiersKey) != NULL)) {
906 return;
907 }
908
909 // Create the interrupt specifer array.
910 specifier = OSArray::withCapacity(numSources);
911 for (cnt = 0; cnt < numSources; cnt++) {
912 tmpLong = cnt;
913 tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong));
914 specifier->setObject(tmpData);
915 tmpData->release();
916 }
917 ;
918
919 // Create the interrupt controller array.
920 controller = OSArray::withCapacity(numSources);
921 for (cnt = 0; cnt < numSources; cnt++) {
922 controller->setObject(gPlatformInterruptControllerName);
923 }
924
925 // Put the two arrays into the property table.
926 service->setProperty(gIOInterruptControllersKey, controller);
927 service->setProperty(gIOInterruptSpecifiersKey, specifier);
928 controller->release();
929 specifier->release();
930 }
931
932 void
933 IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
934 {
935 IOInterruptHandler handler = OSMemberFunctionCast(
936 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
937
938 assert(numCPUs > 0);
939
940 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, NULL);
941
942 IOTakeLock(vectors[0].interruptLock);
943 ++enabledCPUs;
944
945 if (enabledCPUs == numCPUs) {
946 IOService::cpusRunning();
947 thread_wakeup(this);
948 }
949 IOUnlock(vectors[0].interruptLock);
950 }
951
952 IOReturn
953 IOCPUInterruptController::registerInterrupt(IOService *nub,
954 int source,
955 void *target,
956 IOInterruptHandler handler,
957 void *refCon)
958 {
959 IOInterruptVector *vector;
960
961 // Interrupts must be enabled, as this can allocate memory.
962 assert(ml_get_interrupts_enabled() == TRUE);
963
964 if (source >= numSources) {
965 return kIOReturnNoResources;
966 }
967
968 vector = &vectors[source];
969
970 // Get the lock for this vector.
971 IOTakeLock(vector->interruptLock);
972
973 // Make sure the vector is not in use.
974 if (vector->interruptRegistered) {
975 IOUnlock(vector->interruptLock);
976 return kIOReturnNoResources;
977 }
978
979 // Fill in vector with the client's info.
980 vector->handler = handler;
981 vector->nub = nub;
982 vector->source = source;
983 vector->target = target;
984 vector->refCon = refCon;
985
986 // Get the vector ready. It starts hard disabled.
987 vector->interruptDisabledHard = 1;
988 vector->interruptDisabledSoft = 1;
989 vector->interruptRegistered = 1;
990
991 IOUnlock(vector->interruptLock);
992
993 IOTakeLock(vectors[0].interruptLock);
994 if (enabledCPUs != numCPUs) {
995 assert_wait(this, THREAD_UNINT);
996 IOUnlock(vectors[0].interruptLock);
997 thread_block(THREAD_CONTINUE_NULL);
998 } else {
999 IOUnlock(vectors[0].interruptLock);
1000 }
1001
1002 return kIOReturnSuccess;
1003 }
1004
1005 IOReturn
1006 IOCPUInterruptController::getInterruptType(IOService */*nub*/,
1007 int /*source*/,
1008 int *interruptType)
1009 {
1010 if (interruptType == NULL) {
1011 return kIOReturnBadArgument;
1012 }
1013
1014 *interruptType = kIOInterruptTypeLevel;
1015
1016 return kIOReturnSuccess;
1017 }
1018
1019 IOReturn
1020 IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
1021 int /*source*/)
1022 {
1023 // ml_set_interrupts_enabled(true);
1024 return kIOReturnSuccess;
1025 }
1026
1027 IOReturn
1028 IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
1029 int /*source*/)
1030 {
1031 // ml_set_interrupts_enabled(false);
1032 return kIOReturnSuccess;
1033 }
1034
1035 IOReturn
1036 IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
1037 int /*source*/)
1038 {
1039 ml_cause_interrupt();
1040 return kIOReturnSuccess;
1041 }
1042
1043 IOReturn
1044 IOCPUInterruptController::handleInterrupt(void */*refCon*/,
1045 IOService */*nub*/,
1046 int source)
1047 {
1048 IOInterruptVector *vector;
1049
1050 vector = &vectors[source];
1051
1052 if (!vector->interruptRegistered) {
1053 return kIOReturnInvalid;
1054 }
1055
1056 vector->handler(vector->target, vector->refCon,
1057 vector->nub, vector->source);
1058
1059 return kIOReturnSuccess;
1060 }
1061
1062 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */