]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
29 | */ | |
30 | #include <machine/machine_routines.h> | |
31 | #include <machine/machine_cpu.h> | |
32 | #ifdef __ppc__ | |
33 | # include <ppc/exception.h> | |
34 | # include <ppc/misc_protos.h> | |
35 | #else | |
36 | # include <i386/cpu_data.h> | |
37 | # include <i386/misc_protos.h> | |
38 | #endif | |
39 | #include <machine/pmap.h> | |
40 | #include <kern/pms.h> | |
41 | #include <kern/processor.h> | |
42 | #include <kern/kalloc.h> | |
43 | #include <vm/vm_protos.h> | |
44 | ||
45 | static uint32_t pmsSyncrolator = 0; /* Only one control operation at a time please */ | |
46 | uint32_t pmsBroadcastWait = 0; /* Number of outstanding broadcasts */ | |
47 | ||
48 | int pmsInstalled = 0; /* Power Management Stepper can run and has table installed */ | |
49 | int pmsExperimental = 0; /* Power Management Stepper in experimental mode */ | |
50 | decl_simple_lock_data(,pmsBuildLock) /* Make sure only one guy can replace table at the same time */ | |
51 | ||
52 | static pmsDef *altDpmsTab = 0; /* Alternate step definition table */ | |
53 | static uint32_t altDpmsTabSize = 0; /* Size of alternate step definition table */ | |
54 | ||
55 | pmsDef pmsDummy = { /* This is the dummy step for initialization. All it does is to park */ | |
56 | .pmsLimit = 0, /* Time doesn't matter for a park */ | |
57 | .pmsStepID = pmsMaxStates - 1, /* Use the very last ID number for the dummy */ | |
58 | .pmsSetCmd = pmsParkIt, /* Force us to be parked */ | |
59 | .sf.pmsSetFuncInd = 0, /* No platform call for this one */ | |
60 | .pmsDown = pmsPrepSleep, /* We always park */ | |
61 | .pmsNext = pmsPrepSleep /* We always park */ | |
62 | }; | |
63 | ||
64 | pmsStat pmsStatsd[4][pmsMaxStates]; /* Generate enough statistics blocks for 4 processors */ | |
65 | ||
66 | pmsCtl pmsCtls = { /* Power Management Stepper control */ | |
67 | .pmsStats = &pmsStatsd | |
68 | }; | |
69 | ||
70 | pmsSetFunc_t pmsFuncTab[pmsSetFuncMax] = {0}; /* This is the function index table */ | |
71 | pmsQueryFunc_t pmsQueryFunc = 0; /* Pointer to pmsQuery function */ | |
72 | uint32_t pmsPlatformData = 0; /* Data provided by and passed to platform functions */ | |
73 | ||
74 | #ifdef __ppc__ | |
75 | # define PER_PROC_INFO struct per_proc_info | |
76 | # define GET_PER_PROC_INFO() getPerProc() | |
77 | #else | |
78 | # define PER_PROC_INFO cpu_data_t | |
79 | # define GET_PER_PROC_INFO() current_cpu_datap() | |
80 | #endif | |
81 | ||
82 | ||
83 | ||
84 | /* | |
85 | * Do any initialization needed | |
86 | */ | |
87 | ||
88 | void pmsInit(void) { | |
89 | ||
90 | int i; | |
91 | ||
92 | simple_lock_init(&pmsBuildLock, 0); /* Initialize the build lock */ | |
93 | for(i = 0; i < pmsMaxStates; i++) pmsCtls.pmsDefs[i] = &pmsDummy; /* Initialize the table to dummy steps */ | |
94 | ||
95 | pmsCPUMachineInit(); | |
96 | ||
97 | return; | |
98 | } | |
99 | ||
100 | ||
101 | /* | |
102 | * Start the power management stepper on all processors | |
103 | * | |
104 | * All processors must be parked. This should be called when the hardware | |
105 | * is ready to step. Probably only at boot and after wake from sleep. | |
106 | * | |
107 | */ | |
108 | ||
109 | void pmsStart(void) { | |
110 | ||
111 | boolean_t intr; | |
112 | ||
113 | if(!pmsInstalled) return; /* We can't do this if no table installed */ | |
114 | ||
115 | intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */ | |
116 | pmsRun(pmsStartUp); /* Start running the stepper everywhere */ | |
117 | (void)ml_set_interrupts_enabled(intr); /* Restore interruptions */ | |
118 | ||
119 | return; | |
120 | ||
121 | } | |
122 | ||
123 | ||
124 | /* | |
125 | * Park the stepper execution. This will force the stepper on this | |
126 | * processor to abandon its current step and stop. No changes to the | |
127 | * hardware state is made and any previous step is lost. | |
128 | * | |
129 | * This is used as the initial state at startup and when the step table | |
130 | * is being changed. | |
131 | * | |
132 | */ | |
133 | ||
134 | void pmsPark(void) { | |
135 | ||
136 | boolean_t intr; | |
137 | ||
138 | if(!pmsInstalled) return; /* We can't do this if no table installed */ | |
139 | ||
140 | intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */ | |
141 | pmsSetStep(pmsParked, 0); /* Park the stepper */ | |
142 | (void)ml_set_interrupts_enabled(intr); /* Restore interruptions */ | |
143 | ||
144 | return; | |
145 | ||
146 | } | |
147 | ||
148 | ||
149 | /* | |
150 | * Steps down to a lower power. | |
151 | * Interrupts must be off... | |
152 | */ | |
153 | ||
154 | void pmsDown(void) { | |
155 | ||
156 | PER_PROC_INFO *pp; | |
157 | uint32_t nstate; | |
158 | ||
159 | pp = GET_PER_PROC_INFO(); /* Get our per_proc */ | |
160 | ||
161 | if(!pmsInstalled || pp->pms.pmsState == pmsParked) return; /* No stepping if parked or not installed */ | |
162 | ||
163 | nstate = pmsCtls.pmsDefs[pp->pms.pmsState]->pmsDown; /* Get the downward step */ | |
164 | pmsSetStep(nstate, 0); /* Step to it */ | |
165 | return; | |
166 | } | |
167 | ||
168 | ||
169 | /* | |
170 | * Steps up to a higher power. The "timer" parameter is true if the | |
171 | * step was driven due to the pms timer expiring. | |
172 | * | |
173 | * Interrupts must be off... | |
174 | */ | |
175 | ||
176 | int pmsStepIdleSneaks; | |
177 | int pmsStepIdleTries; | |
178 | ||
179 | void pmsStep(int timer) { | |
180 | ||
181 | PER_PROC_INFO *pp; | |
182 | uint32_t nstate; | |
183 | uint32_t tstate; | |
184 | uint32_t pkgstate; | |
185 | int dir; | |
186 | int i; | |
187 | ||
188 | pp = GET_PER_PROC_INFO(); /* Get our per_proc */ | |
189 | ||
190 | if(!pmsInstalled || pp->pms.pmsState == pmsParked) | |
191 | return; /* No stepping if parked or not installed */ | |
192 | ||
193 | /* | |
194 | * Assume a normal step. | |
195 | */ | |
196 | nstate = pmsCtls.pmsDefs[pp->pms.pmsState]->pmsNext; | |
197 | ||
198 | /* | |
199 | * If we are idling and being asked to step up, check to see whether | |
200 | * the package we're in is already at a non-idle power state. If so, | |
201 | * attempt to work out what state that is, and go there directly to | |
202 | * avoid wasting time ramping up. | |
203 | */ | |
204 | if ((pp->pms.pmsState == pmsIdle) | |
205 | && ((pkgstate = pmsCPUPackageQuery()) != ~(uint32_t)0)) { | |
206 | /* | |
207 | * Search forward through the stepper program, | |
208 | * avoid looping for too long. | |
209 | */ | |
210 | tstate = nstate; | |
211 | pmsStepIdleTries++; | |
212 | for (i = 0; i < 32; i++) { | |
213 | /* | |
214 | * Compare command with current package state | |
215 | */ | |
216 | if ((pmsCtls.pmsDefs[tstate]->pmsSetCmd & pmsCPU) == pkgstate) { | |
217 | nstate = tstate; | |
218 | pmsStepIdleSneaks++; | |
219 | break; | |
220 | } | |
221 | ||
222 | /* | |
223 | * Advance to the next step in the program. | |
224 | */ | |
225 | if (pmsCtls.pmsDefs[tstate]->pmsNext == tstate) | |
226 | break; /* infinite loop */ | |
227 | tstate = pmsCtls.pmsDefs[tstate]->pmsNext; | |
228 | } | |
229 | } | |
230 | ||
231 | /* | |
232 | * Default to a step up. | |
233 | */ | |
234 | dir = 1; | |
235 | ||
236 | /* | |
237 | * If we are stepping as a consequence of timer expiry, select the | |
238 | * alternate exit path and note this as downward step for accounting | |
239 | * purposes. | |
240 | */ | |
241 | if (timer | |
242 | && (pmsCtls.pmsDefs[pp->pms.pmsState]->pmsSetCmd == pmsDelay)) { | |
243 | nstate = pmsCtls.pmsDefs[pp->pms.pmsState]->pmsTDelay; | |
244 | ||
245 | /* | |
246 | * Delayed steps are a step down for accounting purposes. | |
247 | */ | |
248 | dir = 0; | |
249 | } | |
250 | ||
251 | pmsSetStep(nstate, dir); | |
252 | return; | |
253 | } | |
254 | ||
255 | ||
256 | /* | |
257 | * Set a specific step | |
258 | * | |
259 | * We do not do statistics if exiting park | |
260 | * Interrupts must be off... | |
261 | * | |
262 | */ | |
263 | ||
264 | void pmsSetStep(uint32_t nstep, int dir) { | |
265 | ||
266 | PER_PROC_INFO *pp; | |
267 | uint32_t pstate, nCSetCmd, mCSetCmd; | |
268 | pmsDef *pnstate, *pcstate; | |
269 | uint64_t tb, dur; | |
270 | int cpu; | |
271 | ||
272 | pp = GET_PER_PROC_INFO(); /* Get our per_proc */ | |
273 | cpu = cpu_number(); /* Get our processor */ | |
274 | ||
275 | while(1) { /* Keep stepping until we get a delay */ | |
276 | ||
277 | if(pp->pms.pmsCSetCmd & pmsMustCmp) { /* Do we have to finish the delay before changing? */ | |
278 | while(mach_absolute_time() < pp->pms.pmsPop); /* Yes, spin here... */ | |
279 | } | |
280 | ||
281 | if((nstep == pmsParked) || ((uint32_t)pmsCtls.pmsDefs[nstep]->pmsSetCmd == pmsParkIt)) { /* Are we parking? */ | |
282 | ||
283 | tb = mach_absolute_time(); /* What time is it? */ | |
284 | pp->pms.pmsStamp = tb; /* Show transition now */ | |
285 | pp->pms.pmsPop = HalfwayToForever; /* Set the pop way into the future */ | |
286 | pp->pms.pmsState = pmsParked; /* Make sure we are parked */ | |
287 | etimer_resync_deadlines(); /* Cancel our timer if going */ | |
288 | return; | |
289 | } | |
290 | ||
291 | pnstate = pmsCtls.pmsDefs[nstep]; /* Point to the state definition */ | |
292 | pstate = pp->pms.pmsState; /* Save the current step */ | |
293 | pp->pms.pmsState = nstep; /* Set the current to the next step */ | |
294 | ||
295 | if(pnstate->pmsSetCmd != pmsDelay) { /* If this is not a delayed state, change the actual hardware now */ | |
296 | if(pnstate->pmsSetCmd & pmsCngCPU) pmsCPUSet(pnstate->pmsSetCmd); /* We have some CPU work to do... */ | |
297 | if((uint32_t)pnstate->sf.pmsSetFunc) pnstate->sf.pmsSetFunc(pnstate->pmsSetCmd, cpu, pmsPlatformData); /* Tell the platform to set power mode */ | |
298 | ||
299 | mCSetCmd = pnstate->pmsSetCmd & (pmsCngXClk | pmsCngCPU | pmsCngVolt); /* Isolate just the change flags */ | |
300 | mCSetCmd = (mCSetCmd - (mCSetCmd >> 7)) | pmsSync | pmsMustCmp | pmsPowerID; /* Form mask of bits that come from new command */ | |
301 | nCSetCmd = pp->pms.pmsCSetCmd & ~mCSetCmd; /* Clear changing bits */ | |
302 | nCSetCmd = nCSetCmd | (pnstate->pmsSetCmd & mCSetCmd); /* Flip on the changing bits and the always copy bits */ | |
303 | ||
304 | pp->pms.pmsCSetCmd = nCSetCmd; /* Set it for real */ | |
305 | } | |
306 | ||
307 | tb = mach_absolute_time(); /* What time is it? */ | |
308 | pp->pms.pmsPop = tb + pnstate->pmsLimit; /* Set the next pop */ | |
309 | ||
310 | if((pnstate->pmsSetCmd != pmsDelay) && (pp->pms.pmsCSetCmd & pmsSync) && (pnstate->pmsLimit != 0)) { /* Is this a synchronous command with a delay? */ | |
311 | while(mach_absolute_time() < pp->pms.pmsPop); /* Yes, spin here and wait it out... */ | |
312 | } | |
313 | ||
314 | /* | |
315 | * Gather some statistics | |
316 | */ | |
317 | ||
318 | dur = tb - pp->pms.pmsStamp; /* Get the amount of time we were in the old step */ | |
319 | pp->pms.pmsStamp = tb; /* Set the new timestamp */ | |
320 | if(!(pstate == pmsParked)) { /* Only take stats if we were not parked */ | |
321 | pcstate = pmsCtls.pmsDefs[pstate]; /* Get the previous step */ | |
322 | pmsCtls.pmsStats[cpu][pcstate->pmsStepID].stTime[dir] += dur; /* Accumulate the total time in the old step */ | |
323 | pmsCtls.pmsStats[cpu][pcstate->pmsStepID].stCnt[dir] += 1; /* Count transitions */ | |
324 | } | |
325 | ||
326 | /* | |
327 | * See if we are done chaining steps | |
328 | */ | |
329 | ||
330 | if((pnstate->pmsSetCmd == pmsDelay) | |
331 | || (!(pp->pms.pmsCSetCmd & pmsSync) && (pnstate->pmsLimit != 0))) { /* Is this not syncronous and a non-zero delay or a delayed step? */ | |
332 | etimer_resync_deadlines(); /* Start the timers ticking */ | |
333 | break; /* We've stepped as far as we're going to... */ | |
334 | } | |
335 | ||
336 | nstep = pnstate->pmsNext; /* Chain on to the next */ | |
337 | } | |
338 | ||
339 | return; | |
340 | ||
341 | } | |
342 | ||
343 | /* | |
344 | * Either park the stepper or force the step on a parked stepper for local processor only | |
345 | * | |
346 | */ | |
347 | ||
348 | void pmsRunLocal(uint32_t nstep) { | |
349 | ||
350 | PER_PROC_INFO *pp; | |
351 | uint32_t lastState; | |
352 | int cpu, i; | |
353 | boolean_t intr; | |
354 | ||
355 | if(!pmsInstalled) return; /* Ignore this if no step programs installed... */ | |
356 | ||
357 | intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */ | |
358 | ||
359 | pp = GET_PER_PROC_INFO(); /* Get our per_proc */ | |
360 | ||
361 | if(nstep == pmsStartUp) { /* Should we start up? */ | |
362 | pmsCPUInit(); /* Get us up to full with high voltage and park */ | |
363 | nstep = pmsNormHigh; /* Change request to transition to normal high */ | |
364 | } | |
365 | ||
366 | lastState = pp->pms.pmsState; /* Remember if we are parked now */ | |
367 | ||
368 | pmsSetStep(nstep, 1); /* Step to the new state */ | |
369 | ||
370 | if((lastState == pmsParked) && (pp->pms.pmsState != pmsParked)) { /* Did we just unpark? */ | |
371 | cpu = cpu_number(); /* Get our processor */ | |
372 | for(i = 0; i < pmsMaxStates; i++) { /* Step through the steps and clear the statistics since we were parked */ | |
373 | pmsCtls.pmsStats[cpu][i].stTime[0] = 0; /* Clear accumulated time - downward */ | |
374 | pmsCtls.pmsStats[cpu][i].stTime[1] = 0; /* Clear accumulated time - forward */ | |
375 | pmsCtls.pmsStats[cpu][i].stCnt[0] = 0; /* Clear transition count - downward */ | |
376 | pmsCtls.pmsStats[cpu][i].stCnt[1] = 0; /* Clear transition count - forward */ | |
377 | } | |
378 | } | |
379 | ||
380 | (void)ml_set_interrupts_enabled(intr); /* Restore interruptions */ | |
381 | ||
382 | return; | |
383 | ||
384 | } | |
385 | ||
386 | /* | |
387 | * Control the Power Management Stepper. | |
388 | * Called from user state by the superuser. | |
389 | * Interruptions disabled. | |
390 | * | |
391 | */ | |
392 | kern_return_t pmsControl(uint32_t request, user_addr_t reqaddr, uint32_t reqsize) { | |
393 | ||
394 | uint32_t nstep, result, presult; | |
395 | int ret, cpu; | |
396 | kern_return_t kret; | |
397 | pmsDef *ndefs; | |
398 | PER_PROC_INFO *pp; | |
399 | ||
400 | pp = GET_PER_PROC_INFO(); /* Get our per_proc */ | |
401 | cpu = cpu_number(); /* Get our processor */ | |
402 | ||
403 | if(!is_suser()) { /* We are better than most, */ | |
404 | return KERN_FAILURE; /* so we will only talk to the superuser. */ | |
405 | } | |
406 | ||
407 | if(request >= pmsCFree) { /* Can we understand the request? */ | |
408 | return KERN_INVALID_ARGUMENT; /* What language are these guys talking in, anyway? */ | |
409 | } | |
410 | ||
411 | if(request == pmsCQuery) { /* Are we just checking? */ | |
412 | result = pmsCPUQuery() & pmsCPU; /* Get the processor data and make sure there is no slop */ | |
413 | presult = 0; /* Assume nothing */ | |
414 | if((uint32_t)pmsQueryFunc) presult = pmsQueryFunc(cpu, pmsPlatformData); /* Go get the platform state */ | |
415 | result = result | (presult & (pmsXClk | pmsVoltage | pmsPowerID)); /* Merge the platform state with no slop */ | |
416 | return result; /* Tell 'em... */ | |
417 | } | |
418 | ||
419 | if(request == pmsCExperimental) { /* Enter experimental mode? */ | |
420 | ||
421 | if(pmsInstalled || (pmsExperimental & 1)) { /* Are we already running or in experimental? */ | |
422 | return KERN_FAILURE; /* Fail, since we are already running */ | |
423 | } | |
424 | ||
425 | pmsExperimental |= 1; /* Flip us into experimental but don't change other flags */ | |
426 | ||
427 | pmsCPUConf(); /* Configure for this machine */ | |
428 | pmsStart(); /* Start stepping */ | |
429 | return KERN_SUCCESS; /* We are victorious... */ | |
430 | ||
431 | } | |
432 | ||
433 | if(request == pmsCCnfg) { /* Do some up-front checking before we commit to doing this */ | |
434 | if((reqsize > (pmsMaxStates * sizeof(pmsDef))) || (reqsize < (pmsFree * sizeof(pmsDef)))) { /* Check that the size is reasonable */ | |
435 | return KERN_NO_SPACE; /* Tell them that they messed up */ | |
436 | } | |
437 | } | |
438 | ||
439 | if (request == pmsGCtls) { | |
440 | if (reqsize != sizeof(pmsCtls)) | |
441 | return(KERN_FAILURE); | |
442 | ret = copyout(&pmsCtls, reqaddr, reqsize); | |
443 | return kret; | |
444 | } | |
445 | ||
446 | if (request == pmsGStats) { | |
447 | if (reqsize != sizeof(pmsStatsd)) /* request size is fixed */ | |
448 | return KERN_FAILURE; | |
449 | ret = copyout(&pmsStatsd, reqaddr, reqsize); | |
450 | return kret; /* All done... */ | |
451 | } | |
452 | ||
453 | /* | |
454 | * We are committed after here. If there are any errors detected, we shouldn't die, but we | |
455 | * will be stuck in park. | |
456 | * | |
457 | * Also, we can possibly end up on another processor after the broadcast. | |
458 | * | |
459 | */ | |
460 | ||
461 | if(!hw_compare_and_store(0, 1, &pmsSyncrolator)) { /* Are we already doing this? */ | |
462 | return KERN_RESOURCE_SHORTAGE; /* Tell them that we are already busy and to try again */ | |
463 | } | |
464 | ||
465 | // NOTE: We will block in the following code until everyone has finished the prepare | |
466 | ||
467 | pmsRun(pmsPrepCng); /* Get everyone parked and in a proper state for step table changes, including me */ | |
468 | ||
469 | if(request == pmsCPark) { /* Is all we're supposed to do park? */ | |
470 | pmsSyncrolator = 0; /* Free us up */ | |
471 | return KERN_SUCCESS; /* Well, then we're done... */ | |
472 | } | |
473 | ||
474 | switch(request) { /* Select the routine */ | |
475 | ||
476 | case pmsCStart: /* Starts normal steppping */ | |
477 | nstep = pmsNormHigh; /* Set the request */ | |
478 | break; | |
479 | ||
480 | case pmsCFLow: /* Forces low power */ | |
481 | nstep = pmsLow; /* Set request */ | |
482 | break; | |
483 | ||
484 | case pmsCFHigh: /* Forces high power */ | |
485 | nstep = pmsHigh; /* Set request */ | |
486 | break; | |
487 | ||
488 | case pmsCCnfg: /* Loads new stepper program */ | |
489 | ||
490 | if(!(ndefs = (pmsDef *)kalloc(reqsize))) { /* Get memory for the whole thing */ | |
491 | pmsSyncrolator = 0; /* Free us up */ | |
492 | return KERN_INVALID_ADDRESS; /* All done... */ | |
493 | } | |
494 | ||
495 | ret = copyin(reqaddr, (void *)ndefs, reqsize); /* Get the new config table */ | |
496 | if(ret) { /* Hmmm, something went wrong with the copyin */ | |
497 | kfree((vm_offset_t)ndefs, reqsize); /* Free up the copied in data */ | |
498 | pmsSyncrolator = 0; /* Free us up */ | |
499 | return KERN_INVALID_ADDRESS; /* All done... */ | |
500 | } | |
501 | ||
502 | kret = pmsBuild(ndefs, reqsize, 0, 0, 0); /* Go build and replace the tables. Make sure we keep the old platform stuff */ | |
503 | if(kret) { /* Hmmm, something went wrong with the compilation */ | |
504 | kfree((vm_offset_t)ndefs, reqsize); /* Free up the copied in data */ | |
505 | pmsSyncrolator = 0; /* Free us up */ | |
506 | return kret; /* All done... */ | |
507 | } | |
508 | ||
509 | nstep = pmsNormHigh; /* Set the request */ | |
510 | break; | |
511 | ||
512 | default: | |
513 | panic("pmsCntrl: stepper control is so very, very confused = %08X\n", request); | |
514 | ||
515 | } | |
516 | ||
517 | pmsRun(nstep); /* Get everyone into step */ | |
518 | pmsSyncrolator = 0; /* Free us up */ | |
519 | return KERN_SUCCESS; /* All done... */ | |
520 | ||
521 | } | |
522 | ||
523 | /* | |
524 | * Broadcast a change to all processors including ourselves. | |
525 | * | |
526 | * Interruptions are disabled. | |
527 | */ | |
528 | ||
529 | void pmsRun(uint32_t nstep) { | |
530 | ||
531 | pmsCPURun(nstep); | |
532 | } | |
533 | ||
534 | ||
535 | /* | |
536 | * Build the tables needed for the stepper. This includes both the step definitions and the step control table. | |
537 | * | |
538 | * We most absolutely need to be parked before this happens because we're gonna change the table. | |
539 | * We're going to have to be pretty complete about checking for errors. | |
540 | * Also, a copy is always made because we don't want to be crippled by not being able to change | |
541 | * the table or description formats. | |
542 | * | |
543 | * We pass in a table of external functions and the new stepper def uses the corresponding | |
544 | * indexes rather than actual function addresses. This is done so that a proper table can be | |
545 | * built with the control syscall. It can't supply addresses, so the index has to do. We | |
546 | * internalize the table so our caller does not need to keep it. Note that passing in a 0 | |
547 | * will use the current function table. Also note that entry 0 is reserved and must be 0, | |
548 | * we will check and fail the build. | |
549 | * | |
550 | * The platformData parameter is a 32-bit word of data that is passed unaltered to the set function. | |
551 | * | |
552 | * The queryFunc parameter is the address of a function that will return the current state of the platform. | |
553 | * The format of the data returned is the same as the platform specific portions of pmsSetCmd, i.e., pmsXClk, | |
554 | * pmsVoltage, and any part of pmsPowerID that is maintained by the platform hardware (an example would be | |
555 | * the values of the gpios that correspond to pmsPowerID). The value should be constructed by querying | |
556 | * hardware rather than returning a value cached by software. One of the intents of this function is to | |
557 | * help recover lost or determine initial power states. | |
558 | * | |
559 | */ | |
560 | ||
561 | kern_return_t pmsBuild(pmsDef *pd, uint32_t pdsize, pmsSetFunc_t *functab, uint32_t platformData, pmsQueryFunc_t queryFunc) { | |
562 | ||
563 | int steps, newsize, i, cstp, nstps, oldAltSize, xdsply; | |
564 | uint32_t setf; | |
565 | uint64_t nlimit; | |
566 | pmsDef *newpd, *oldAlt; | |
567 | boolean_t intr; | |
568 | ||
569 | xdsply = (pmsExperimental & 3) != 0; /* Turn on kprintfs if requested or in experimental mode */ | |
570 | ||
571 | if(pdsize % sizeof(pmsDef)) return KERN_INVALID_ARGUMENT; /* Length not multiple of definition size */ | |
572 | ||
573 | steps = pdsize / sizeof(pmsDef); /* Get the number of steps supplied */ | |
574 | ||
575 | if((steps >= pmsMaxStates) || (steps < pmsFree)) /* Complain if too big or too small */ | |
576 | return KERN_INVALID_ARGUMENT; /* Squeak loudly!!! */ | |
577 | ||
578 | if((uint32_t)functab && (uint32_t)functab[0]) /* Verify that if they supplied a new function table, entry 0 is 0 */ | |
579 | return KERN_INVALID_ARGUMENT; /* Fail because they didn't reserve entry 0 */ | |
580 | ||
581 | if(xdsply) kprintf("\n StepID Down Next HWSel HWfun Limit\n"); | |
582 | ||
583 | for(i = 0; i < steps; i++) { /* Step through and verify the definitions */ | |
584 | ||
585 | if(xdsply) kprintf(" %6d %6d %6d %08X %6d %20lld\n", pd[i].pmsStepID, pd[i].pmsDown, | |
586 | pd[i].pmsNext, pd[i].pmsSetCmd, | |
587 | pd[i].sf.pmsSetFuncInd, pd[i].pmsLimit); | |
588 | ||
589 | if((pd[i].pmsLimit != 0) && (pd[i].pmsLimit < 100ULL)) { | |
590 | if(xdsply) kprintf("error step %3d: pmsLimit too small/n", i); | |
591 |