]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOCPU.cpp
a6197a1ced8364e7d80d2afe3679bef48a60a203
[apple/xnu.git] / iokit / Kernel / IOCPU.cpp
1 /*
2 * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved.
25 *
26 * DRI: Josh de Cesare
27 *
28 */
29
30 extern "C" {
31 #include <machine/machine_routines.h>
32 #include <pexpert/pexpert.h>
33 }
34
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IOPlatformExpert.h>
37 #include <IOKit/IOUserClient.h>
38 #include <IOKit/IOCPU.h>
39
40
41 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
42
43 kern_return_t PE_cpu_start(cpu_id_t target,
44 vm_offset_t start_paddr, vm_offset_t arg_paddr)
45 {
46 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
47
48 if (targetCPU == 0) return KERN_FAILURE;
49 return targetCPU->startCPU(start_paddr, arg_paddr);
50 }
51
52 void PE_cpu_halt(cpu_id_t target)
53 {
54 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
55
56 if (targetCPU) targetCPU->haltCPU();
57 }
58
59 void PE_cpu_signal(cpu_id_t source, cpu_id_t target)
60 {
61 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
62 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
63
64 if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU);
65 }
66
67 void PE_cpu_machine_init(cpu_id_t target, boolean_t boot)
68 {
69 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
70
71 if (targetCPU) targetCPU->initCPU(boot);
72 }
73
74 void PE_cpu_machine_quiesce(cpu_id_t target)
75 {
76 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
77
78 if (targetCPU) targetCPU->quiesceCPU();
79 }
80
81 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
82
83 #define super IOService
84
85 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
86 OSMetaClassDefineReservedUnused(IOCPU, 0);
87 OSMetaClassDefineReservedUnused(IOCPU, 1);
88 OSMetaClassDefineReservedUnused(IOCPU, 2);
89 OSMetaClassDefineReservedUnused(IOCPU, 3);
90 OSMetaClassDefineReservedUnused(IOCPU, 4);
91 OSMetaClassDefineReservedUnused(IOCPU, 5);
92 OSMetaClassDefineReservedUnused(IOCPU, 6);
93 OSMetaClassDefineReservedUnused(IOCPU, 7);
94
95 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
96
97 static OSArray *gIOCPUs;
98 static const OSSymbol *gIOCPUStateKey;
99 static OSString *gIOCPUStateNames[kIOCPUStateCount];
100
101 void IOCPUSleepKernel(void)
102 {
103 long cnt, numCPUs;
104 IOCPU *target;
105
106 numCPUs = gIOCPUs->getCount();
107
108 // Sleep the CPUs.
109 cnt = numCPUs;
110 while (cnt--) {
111 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
112 if (target->getCPUState() == kIOCPUStateRunning) {
113 target->haltCPU();
114 }
115 }
116
117 // Wake the other CPUs.
118 for (cnt = 1; cnt < numCPUs; cnt++) {
119 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
120 if (target->getCPUState() == kIOCPUStateStopped) {
121 processor_start(target->getMachProcessor());
122 }
123 }
124 }
125
126 void IOCPU::initCPUs(void)
127 {
128 if (gIOCPUs == 0) {
129 gIOCPUs = OSArray::withCapacity(1);
130
131 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
132
133 gIOCPUStateNames[kIOCPUStateUnregistered] =
134 OSString::withCStringNoCopy("Unregistered");
135 gIOCPUStateNames[kIOCPUStateUninitalized] =
136 OSString::withCStringNoCopy("Uninitalized");
137 gIOCPUStateNames[kIOCPUStateStopped] =
138 OSString::withCStringNoCopy("Stopped");
139 gIOCPUStateNames[kIOCPUStateRunning] =
140 OSString::withCStringNoCopy("Running");
141 }
142 }
143
144 bool IOCPU::start(IOService *provider)
145 {
146 OSData *busFrequency, *cpuFrequency, *timebaseFrequency;
147
148 if (!super::start(provider)) return false;
149
150 initCPUs();
151
152 _cpuGroup = gIOCPUs;
153 cpuNub = provider;
154
155 gIOCPUs->setObject(this);
156
157 // Correct the bus, cpu and timebase frequencies in the device tree.
158 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
159 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
160 } else {
161 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
162 }
163 provider->setProperty("bus-frequency", busFrequency);
164 busFrequency->release();
165
166 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
167 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
168 } else {
169 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
170 }
171 provider->setProperty("clock-frequency", cpuFrequency);
172 cpuFrequency->release();
173
174 timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
175 provider->setProperty("timebase-frequency", timebaseFrequency);
176 timebaseFrequency->release();
177
178 super::setProperty("IOCPUID", (UInt32)this, 32);
179
180 setCPUNumber(0);
181 setCPUState(kIOCPUStateUnregistered);
182
183 return true;
184 }
185
186 OSObject *IOCPU::getProperty(const OSSymbol *aKey) const
187 {
188 if (aKey == gIOCPUStateKey) return gIOCPUStateNames[_cpuState];
189
190 return super::getProperty(aKey);
191 }
192
193 bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
194 {
195 OSString *stateStr;
196
197 if (aKey == gIOCPUStateKey) {
198 stateStr = OSDynamicCast(OSString, anObject);
199 if (stateStr == 0) return false;
200
201 if (_cpuNumber == 0) return false;
202
203 if (stateStr->isEqualTo("running")) {
204 if (_cpuState == kIOCPUStateStopped) {
205 processor_start(machProcessor);
206 } else if (_cpuState != kIOCPUStateRunning) {
207 return false;
208 }
209 } else if (stateStr->isEqualTo("stopped")) {
210 if (_cpuState == kIOCPUStateRunning) {
211 haltCPU();
212 } else if (_cpuState != kIOCPUStateStopped) {
213 return false;
214 }
215 } else return false;
216
217 return true;
218 }
219
220 return super::setProperty(aKey, anObject);
221 }
222
223 bool IOCPU::serializeProperties(OSSerialize *serialize) const
224 {
225 super::setProperty(gIOCPUStateKey, gIOCPUStateNames[_cpuState]);
226
227 return super::serializeProperties(serialize);
228 }
229
230 IOReturn IOCPU::setProperties(OSObject *properties)
231 {
232 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
233 OSString *stateStr;
234 IOReturn result;
235
236 if (dict == 0) return kIOReturnUnsupported;
237
238 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey));
239 if (stateStr != 0) {
240 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
241 if (result != kIOReturnSuccess) return result;
242
243 if (setProperty(gIOCPUStateKey, stateStr)) return kIOReturnSuccess;
244
245 return kIOReturnUnsupported;
246 }
247
248 return kIOReturnUnsupported;
249 }
250
251 void IOCPU::signalCPU(IOCPU */*target*/)
252 {
253 }
254
255 void IOCPU::enableCPUTimeBase(bool /*enable*/)
256 {
257 }
258
259 UInt32 IOCPU::getCPUNumber(void)
260 {
261 return _cpuNumber;
262 }
263
264 void IOCPU::setCPUNumber(UInt32 cpuNumber)
265 {
266 _cpuNumber = cpuNumber;
267 super::setProperty("IOCPUNumber", _cpuNumber, 32);
268 }
269
270 UInt32 IOCPU::getCPUState(void)
271 {
272 return _cpuState;
273 }
274
275 void IOCPU::setCPUState(UInt32 cpuState)
276 {
277 if (cpuState < kIOCPUStateCount) {
278 _cpuState = cpuState;
279 }
280 }
281
282 OSArray *IOCPU::getCPUGroup(void)
283 {
284 return _cpuGroup;
285 }
286
287 UInt32 IOCPU::getCPUGroupSize(void)
288 {
289 return _cpuGroup->getCount();
290 }
291
292 processor_t IOCPU::getMachProcessor(void)
293 {
294 return machProcessor;
295 }
296
297
298 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
299
300 #undef super
301 #define super IOInterruptController
302
303 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
304
305 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 0);
306 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
307 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
308 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
309 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
310 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
311
312
313
314 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
315
316
317 IOReturn IOCPUInterruptController::initCPUInterruptController(int sources)
318 {
319 int cnt;
320
321 if (!super::init()) return kIOReturnInvalid;
322
323 numCPUs = sources;
324
325 cpus = (IOCPU **)IOMalloc(numCPUs * sizeof(IOCPU *));
326 if (cpus == 0) return kIOReturnNoMemory;
327 bzero(cpus, numCPUs * sizeof(IOCPU *));
328
329 vectors = (IOInterruptVector *)IOMalloc(numCPUs * sizeof(IOInterruptVector));
330 if (vectors == 0) return kIOReturnNoMemory;
331 bzero(vectors, numCPUs * sizeof(IOInterruptVector));
332
333 // Allocate locks for the
334 for (cnt = 0; cnt < numCPUs; cnt++) {
335 vectors[cnt].interruptLock = IOLockAlloc();
336 if (vectors[cnt].interruptLock == NULL) {
337 for (cnt = 0; cnt < numCPUs; cnt++) {
338 if (vectors[cnt].interruptLock != NULL)
339 IOLockFree(vectors[cnt].interruptLock);
340 }
341 return kIOReturnNoResources;
342 }
343 }
344
345 ml_init_max_cpus(numCPUs);
346
347 return kIOReturnSuccess;
348 }
349
350 void IOCPUInterruptController::registerCPUInterruptController(void)
351 {
352 registerService();
353
354 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
355 this);
356 }
357
358 void IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
359 {
360 int cnt;
361 OSArray *controller;
362 OSArray *specifier;
363 OSData *tmpData;
364 long tmpLong;
365
366 // Create the interrupt specifer array.
367 specifier = OSArray::withCapacity(numCPUs);
368 for (cnt = 0; cnt < numCPUs; cnt++) {
369 tmpLong = cnt;
370 tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong));
371 specifier->setObject(tmpData);
372 tmpData->release();
373 };
374
375 // Create the interrupt controller array.
376 controller = OSArray::withCapacity(numCPUs);
377 for (cnt = 0; cnt < numCPUs; cnt++) {
378 controller->setObject(gPlatformInterruptControllerName);
379 }
380
381 // Put the two arrays into the property table.
382 service->setProperty(gIOInterruptControllersKey, controller);
383 service->setProperty(gIOInterruptSpecifiersKey, specifier);
384 controller->release();
385 specifier->release();
386 }
387
388 void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
389 {
390 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this,
391 (IOInterruptHandler)&IOCPUInterruptController::handleInterrupt, 0);
392
393 enabledCPUs++;
394
395 if (enabledCPUs == numCPUs) thread_wakeup(this);
396 }
397
398 IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub,
399 int source,
400 void *target,
401 IOInterruptHandler handler,
402 void *refCon)
403 {
404 IOInterruptVector *vector;
405
406 if (source >= numCPUs) return kIOReturnNoResources;
407
408 vector = &vectors[source];
409
410 // Get the lock for this vector.
411 IOTakeLock(vector->interruptLock);
412
413 // Make sure the vector is not in use.
414 if (vector->interruptRegistered) {
415 IOUnlock(vector->interruptLock);
416 return kIOReturnNoResources;
417 }
418
419 // Fill in vector with the client's info.
420 vector->handler = handler;
421 vector->nub = nub;
422 vector->source = source;
423 vector->target = target;
424 vector->refCon = refCon;
425
426 // Get the vector ready. It starts hard disabled.
427 vector->interruptDisabledHard = 1;
428 vector->interruptDisabledSoft = 1;
429 vector->interruptRegistered = 1;
430
431 IOUnlock(vector->interruptLock);
432
433 if (enabledCPUs != numCPUs) {
434 assert_wait(this, THREAD_UNINT);
435 thread_block(THREAD_CONTINUE_NULL);
436 }
437
438 return kIOReturnSuccess;
439 }
440
441 IOReturn IOCPUInterruptController::getInterruptType(IOService */*nub*/,
442 int /*source*/,
443 int *interruptType)
444 {
445 if (interruptType == 0) return kIOReturnBadArgument;
446
447 *interruptType = kIOInterruptTypeLevel;
448
449 return kIOReturnSuccess;
450 }
451
452 IOReturn IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
453 int /*source*/)
454 {
455 // ml_set_interrupts_enabled(true);
456 return kIOReturnSuccess;
457 }
458
459 IOReturn IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
460 int /*source*/)
461 {
462 // ml_set_interrupts_enabled(false);
463 return kIOReturnSuccess;
464 }
465
466 IOReturn IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
467 int /*source*/)
468 {
469 ml_cause_interrupt();
470 return kIOReturnSuccess;
471 }
472
473 IOReturn IOCPUInterruptController::handleInterrupt(void */*refCon*/,
474 IOService */*nub*/,
475 int source)
476 {
477 IOInterruptVector *vector;
478
479 vector = &vectors[source];
480
481 if (!vector->interruptRegistered) return kIOReturnInvalid;
482
483 vector->handler(vector->target, vector->refCon,
484 vector->nub, vector->source);
485
486 return kIOReturnSuccess;
487 }
488
489 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */