]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOCPU.cpp
xnu-792.6.61.tar.gz
[apple/xnu.git] / iokit / Kernel / IOCPU.cpp
1 /*
2 * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved.
24 *
25 * DRI: Josh de Cesare
26 *
27 */
28
29 extern "C" {
30 #include <machine/machine_routines.h>
31 #include <pexpert/pexpert.h>
32 }
33
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOPlatformExpert.h>
36 #include <IOKit/IOUserClient.h>
37 #include <IOKit/IOCPU.h>
38
39
40 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
41
42 kern_return_t PE_cpu_start(cpu_id_t target,
43 vm_offset_t start_paddr, vm_offset_t arg_paddr)
44 {
45 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
46
47 if (targetCPU == 0) return KERN_FAILURE;
48 return targetCPU->startCPU(start_paddr, arg_paddr);
49 }
50
51 void PE_cpu_halt(cpu_id_t target)
52 {
53 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
54
55 if (targetCPU) targetCPU->haltCPU();
56 }
57
58 void PE_cpu_signal(cpu_id_t source, cpu_id_t target)
59 {
60 IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source);
61 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
62
63 if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU);
64 }
65
66 void PE_cpu_machine_init(cpu_id_t target, boolean_t boot)
67 {
68 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
69
70 if (targetCPU) targetCPU->initCPU(boot);
71 }
72
73 void PE_cpu_machine_quiesce(cpu_id_t target)
74 {
75 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
76
77 if (targetCPU) targetCPU->quiesceCPU();
78 }
79
80 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
81
82 #define super IOService
83
84 OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
85 OSMetaClassDefineReservedUnused(IOCPU, 0);
86 OSMetaClassDefineReservedUnused(IOCPU, 1);
87 OSMetaClassDefineReservedUnused(IOCPU, 2);
88 OSMetaClassDefineReservedUnused(IOCPU, 3);
89 OSMetaClassDefineReservedUnused(IOCPU, 4);
90 OSMetaClassDefineReservedUnused(IOCPU, 5);
91 OSMetaClassDefineReservedUnused(IOCPU, 6);
92 OSMetaClassDefineReservedUnused(IOCPU, 7);
93
94 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
95
96 static OSArray *gIOCPUs;
97 static const OSSymbol *gIOCPUStateKey;
98 static OSString *gIOCPUStateNames[kIOCPUStateCount];
99
100 void IOCPUSleepKernel(void)
101 {
102 long cnt, numCPUs;
103 IOCPU *target;
104
105 numCPUs = gIOCPUs->getCount();
106
107 // Sleep the CPUs.
108 cnt = numCPUs;
109 while (cnt--) {
110 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
111 if (target->getCPUState() == kIOCPUStateRunning) {
112 target->haltCPU();
113 }
114 }
115
116 // Wake the other CPUs.
117 for (cnt = 1; cnt < numCPUs; cnt++) {
118 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
119 if (target->getCPUState() == kIOCPUStateStopped) {
120 processor_start(target->getMachProcessor());
121 }
122 }
123 }
124
125 void IOCPU::initCPUs(void)
126 {
127 if (gIOCPUs == 0) {
128 gIOCPUs = OSArray::withCapacity(1);
129
130 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
131
132 gIOCPUStateNames[kIOCPUStateUnregistered] =
133 OSString::withCStringNoCopy("Unregistered");
134 gIOCPUStateNames[kIOCPUStateUninitalized] =
135 OSString::withCStringNoCopy("Uninitalized");
136 gIOCPUStateNames[kIOCPUStateStopped] =
137 OSString::withCStringNoCopy("Stopped");
138 gIOCPUStateNames[kIOCPUStateRunning] =
139 OSString::withCStringNoCopy("Running");
140 }
141 }
142
143 bool IOCPU::start(IOService *provider)
144 {
145 OSData *busFrequency, *cpuFrequency, *timebaseFrequency;
146
147 if (!super::start(provider)) return false;
148
149 initCPUs();
150
151 _cpuGroup = gIOCPUs;
152 cpuNub = provider;
153
154 gIOCPUs->setObject(this);
155
156 // Correct the bus, cpu and timebase frequencies in the device tree.
157 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
158 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
159 } else {
160 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
161 }
162 provider->setProperty("bus-frequency", busFrequency);
163 busFrequency->release();
164
165 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
166 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
167 } else {
168 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
169 }
170 provider->setProperty("clock-frequency", cpuFrequency);
171 cpuFrequency->release();
172
173 timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
174 provider->setProperty("timebase-frequency", timebaseFrequency);
175 timebaseFrequency->release();
176
177 super::setProperty("IOCPUID", (UInt32)this, 32);
178
179 setCPUNumber(0);
180 setCPUState(kIOCPUStateUnregistered);
181
182 return true;
183 }
184
185 OSObject *IOCPU::getProperty(const OSSymbol *aKey) const
186 {
187 if (aKey == gIOCPUStateKey) return gIOCPUStateNames[_cpuState];
188
189 return super::getProperty(aKey);
190 }
191
192 bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
193 {
194 OSString *stateStr;
195
196 if (aKey == gIOCPUStateKey) {
197 stateStr = OSDynamicCast(OSString, anObject);
198 if (stateStr == 0) return false;
199
200 if (_cpuNumber == 0) return false;
201
202 if (stateStr->isEqualTo("running")) {
203 if (_cpuState == kIOCPUStateStopped) {
204 processor_start(machProcessor);
205 } else if (_cpuState != kIOCPUStateRunning) {
206 return false;
207 }
208 } else if (stateStr->isEqualTo("stopped")) {
209 if (_cpuState == kIOCPUStateRunning) {
210 haltCPU();
211 } else if (_cpuState != kIOCPUStateStopped) {
212 return false;
213 }
214 } else return false;
215
216 return true;
217 }
218
219 return super::setProperty(aKey, anObject);
220 }
221
222 bool IOCPU::serializeProperties(OSSerialize *serialize) const
223 {
224 super::setProperty(gIOCPUStateKey, gIOCPUStateNames[_cpuState]);
225
226 return super::serializeProperties(serialize);
227 }
228
229 IOReturn IOCPU::setProperties(OSObject *properties)
230 {
231 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
232 OSString *stateStr;
233 IOReturn result;
234
235 if (dict == 0) return kIOReturnUnsupported;
236
237 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey));
238 if (stateStr != 0) {
239 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
240 if (result != kIOReturnSuccess) return result;
241
242 if (setProperty(gIOCPUStateKey, stateStr)) return kIOReturnSuccess;
243
244 return kIOReturnUnsupported;
245 }
246
247 return kIOReturnUnsupported;
248 }
249
250 void IOCPU::signalCPU(IOCPU */*target*/)
251 {
252 }
253
254 void IOCPU::enableCPUTimeBase(bool /*enable*/)
255 {
256 }
257
258 UInt32 IOCPU::getCPUNumber(void)
259 {
260 return _cpuNumber;
261 }
262
263 void IOCPU::setCPUNumber(UInt32 cpuNumber)
264 {
265 _cpuNumber = cpuNumber;
266 super::setProperty("IOCPUNumber", _cpuNumber, 32);
267 }
268
269 UInt32 IOCPU::getCPUState(void)
270 {
271 return _cpuState;
272 }
273
274 void IOCPU::setCPUState(UInt32 cpuState)
275 {
276 if (cpuState < kIOCPUStateCount) {
277 _cpuState = cpuState;
278 }
279 }
280
281 OSArray *IOCPU::getCPUGroup(void)
282 {
283 return _cpuGroup;
284 }
285
286 UInt32 IOCPU::getCPUGroupSize(void)
287 {
288 return _cpuGroup->getCount();
289 }
290
291 processor_t IOCPU::getMachProcessor(void)
292 {
293 return machProcessor;
294 }
295
296
297 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
298
299 #undef super
300 #define super IOInterruptController
301
302 OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
303
304 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 0);
305 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
306 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
307 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
308 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
309 OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
310
311
312
313 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
314
315
316 IOReturn IOCPUInterruptController::initCPUInterruptController(int sources)
317 {
318 int cnt;
319
320 if (!super::init()) return kIOReturnInvalid;
321
322 numCPUs = sources;
323
324 cpus = (IOCPU **)IOMalloc(numCPUs * sizeof(IOCPU *));
325 if (cpus == 0) return kIOReturnNoMemory;
326 bzero(cpus, numCPUs * sizeof(IOCPU *));
327
328 vectors = (IOInterruptVector *)IOMalloc(numCPUs * sizeof(IOInterruptVector));
329 if (vectors == 0) return kIOReturnNoMemory;
330 bzero(vectors, numCPUs * sizeof(IOInterruptVector));
331
332 // Allocate locks for the
333 for (cnt = 0; cnt < numCPUs; cnt++) {
334 vectors[cnt].interruptLock = IOLockAlloc();
335 if (vectors[cnt].interruptLock == NULL) {
336 for (cnt = 0; cnt < numCPUs; cnt++) {
337 if (vectors[cnt].interruptLock != NULL)
338 IOLockFree(vectors[cnt].interruptLock);
339 }
340 return kIOReturnNoResources;
341 }
342 }
343
344 ml_init_max_cpus(numCPUs);
345
346 return kIOReturnSuccess;
347 }
348
349 void IOCPUInterruptController::registerCPUInterruptController(void)
350 {
351 registerService();
352
353 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
354 this);
355 }
356
357 void IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
358 {
359 int cnt;
360 OSArray *controller;
361 OSArray *specifier;
362 OSData *tmpData;
363 long tmpLong;
364
365 // Create the interrupt specifer array.
366 specifier = OSArray::withCapacity(numCPUs);
367 for (cnt = 0; cnt < numCPUs; cnt++) {
368 tmpLong = cnt;
369 tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong));
370 specifier->setObject(tmpData);
371 tmpData->release();
372 };
373
374 // Create the interrupt controller array.
375 controller = OSArray::withCapacity(numCPUs);
376 for (cnt = 0; cnt < numCPUs; cnt++) {
377 controller->setObject(gPlatformInterruptControllerName);
378 }
379
380 // Put the two arrays into the property table.
381 service->setProperty(gIOInterruptControllersKey, controller);
382 service->setProperty(gIOInterruptSpecifiersKey, specifier);
383 controller->release();
384 specifier->release();
385 }
386
387 void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
388 {
389 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this,
390 (IOInterruptHandler)&IOCPUInterruptController::handleInterrupt, 0);
391
392 enabledCPUs++;
393
394 if (enabledCPUs == numCPUs) thread_wakeup(this);
395 }
396
397 IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub,
398 int source,
399 void *target,
400 IOInterruptHandler handler,
401 void *refCon)
402 {
403 IOInterruptVector *vector;
404
405 if (source >= numCPUs) return kIOReturnNoResources;
406
407 vector = &vectors[source];
408
409 // Get the lock for this vector.
410 IOTakeLock(vector->interruptLock);
411
412 // Make sure the vector is not in use.
413 if (vector->interruptRegistered) {
414 IOUnlock(vector->interruptLock);
415 return kIOReturnNoResources;
416 }
417
418 // Fill in vector with the client's info.
419 vector->handler = handler;
420 vector->nub = nub;
421 vector->source = source;
422 vector->target = target;
423 vector->refCon = refCon;
424
425 // Get the vector ready. It starts hard disabled.
426 vector->interruptDisabledHard = 1;
427 vector->interruptDisabledSoft = 1;
428 vector->interruptRegistered = 1;
429
430 IOUnlock(vector->interruptLock);
431
432 if (enabledCPUs != numCPUs) {
433 assert_wait(this, THREAD_UNINT);
434 thread_block(THREAD_CONTINUE_NULL);
435 }
436
437 return kIOReturnSuccess;
438 }
439
440 IOReturn IOCPUInterruptController::getInterruptType(IOService */*nub*/,
441 int /*source*/,
442 int *interruptType)
443 {
444 if (interruptType == 0) return kIOReturnBadArgument;
445
446 *interruptType = kIOInterruptTypeLevel;
447
448 return kIOReturnSuccess;
449 }
450
451 IOReturn IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
452 int /*source*/)
453 {
454 // ml_set_interrupts_enabled(true);
455 return kIOReturnSuccess;
456 }
457
458 IOReturn IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
459 int /*source*/)
460 {
461 // ml_set_interrupts_enabled(false);
462 return kIOReturnSuccess;
463 }
464
465 IOReturn IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
466 int /*source*/)
467 {
468 ml_cause_interrupt();
469 return kIOReturnSuccess;
470 }
471
472 IOReturn IOCPUInterruptController::handleInterrupt(void */*refCon*/,
473 IOService */*nub*/,
474 int source)
475 {
476 IOInterruptVector *vector;
477
478 vector = &vectors[source];
479
480 if (!vector->interruptRegistered) return kIOReturnInvalid;
481
482 vector->handler(vector->target, vector->refCon,
483 vector->nub, vector->source);
484
485 return kIOReturnSuccess;
486 }
487
488 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */