]>
Commit | Line | Data |
---|---|---|
0c530ab8 | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2004-2007 Apple Inc. All rights reserved. |
0c530ab8 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0c530ab8 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0c530ab8 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
0c530ab8 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0c530ab8 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
0c530ab8 A |
27 | */ |
28 | ||
29 | /* | |
30 | * CPU-specific power management support. | |
31 | * | |
32 | * Implements the "wrappers" to the KEXT. | |
33 | */ | |
34 | #include <i386/machine_routines.h> | |
35 | #include <i386/machine_cpu.h> | |
36 | #include <i386/misc_protos.h> | |
37 | #include <i386/pmap.h> | |
38 | #include <i386/asm.h> | |
39 | #include <i386/mp.h> | |
40 | #include <i386/proc_reg.h> | |
41 | #include <kern/pms.h> | |
42 | #include <kern/processor.h> | |
2d21ac55 | 43 | #include <i386/cpu_threads.h> |
0c530ab8 A |
44 | #include <i386/pmCPU.h> |
45 | #include <i386/cpuid.h> | |
46 | #include <i386/rtclock.h> | |
0c530ab8 A |
47 | |
48 | extern int disableConsoleOutput; | |
49 | ||
50 | decl_simple_lock_data(,pm_init_lock); | |
51 | ||
52 | /* | |
53 | * The following is set when the KEXT loads and initializes. | |
54 | */ | |
55 | pmDispatch_t *pmDispatch = NULL; | |
56 | ||
57 | /* | |
58 | * Current power management states (for use until KEXT is loaded). | |
59 | */ | |
60 | static pmInitState_t pmInitState; | |
61 | ||
2d21ac55 A |
62 | static uint32_t pmInitDone = 0; |
63 | ||
0c530ab8 A |
64 | /* |
65 | * Nap control variables: | |
66 | */ | |
0c530ab8 | 67 | uint32_t forcenap = 0; /* Force nap (fn) boot-arg controls */ |
0c530ab8 A |
68 | |
69 | /* | |
2d21ac55 | 70 | * Do any initialization needed |
0c530ab8 A |
71 | */ |
72 | void | |
2d21ac55 | 73 | pmsInit(void) |
0c530ab8 | 74 | { |
2d21ac55 | 75 | static int initialized = 0; |
0c530ab8 | 76 | |
0c530ab8 A |
77 | /* |
78 | * Initialize some of the initial state to "uninitialized" until | |
79 | * it gets set with something more useful. This allows the KEXT | |
80 | * to determine if the initial value was actually set to something. | |
81 | */ | |
2d21ac55 A |
82 | if (!initialized) { |
83 | pmInitState.PState = -1; | |
84 | pmInitState.PLimit = -1; | |
85 | pmInitState.maxBusDelay = -1; | |
86 | initialized = 1; | |
87 | } | |
0c530ab8 | 88 | |
2d21ac55 A |
89 | if (pmDispatch != NULL && pmDispatch->pmsInit != NULL) |
90 | (*pmDispatch->pmsInit)(); | |
0c530ab8 A |
91 | } |
92 | ||
93 | /* | |
2d21ac55 A |
94 | * Start the power management stepper on all processors |
95 | * | |
96 | * All processors must be parked. This should be called when the hardware | |
97 | * is ready to step. Probably only at boot and after wake from sleep. | |
98 | * | |
0c530ab8 A |
99 | */ |
100 | void | |
2d21ac55 | 101 | pmsStart(void) |
0c530ab8 | 102 | { |
2d21ac55 A |
103 | if (pmDispatch != NULL && pmDispatch->pmsStart != NULL) |
104 | (*pmDispatch->pmsStart)(); | |
0c530ab8 A |
105 | } |
106 | ||
107 | /* | |
2d21ac55 A |
108 | * Park the stepper execution. This will force the stepper on this |
109 | * processor to abandon its current step and stop. No changes to the | |
110 | * hardware state is made and any previous step is lost. | |
111 | * | |
112 | * This is used as the initial state at startup and when the step table | |
113 | * is being changed. | |
114 | * | |
0c530ab8 A |
115 | */ |
116 | void | |
2d21ac55 | 117 | pmsPark(void) |
0c530ab8 | 118 | { |
2d21ac55 A |
119 | if (pmDispatch != NULL && pmDispatch->pmsPark != NULL) |
120 | (*pmDispatch->pmsPark)(); | |
0c530ab8 A |
121 | } |
122 | ||
123 | /* | |
2d21ac55 A |
124 | * Control the Power Management Stepper. |
125 | * Called from user state by the superuser. | |
126 | * Interrupts disabled. | |
127 | * | |
128 | * This interface is deprecated and is now a no-op. | |
0c530ab8 | 129 | */ |
2d21ac55 A |
130 | kern_return_t |
131 | pmsControl(__unused uint32_t request, __unused user_addr_t reqaddr, | |
132 | __unused uint32_t reqsize) | |
0c530ab8 | 133 | { |
2d21ac55 | 134 | return(KERN_SUCCESS); |
0c530ab8 A |
135 | } |
136 | ||
137 | /* | |
2d21ac55 | 138 | * Broadcast a change to all processors including ourselves. |
0c530ab8 | 139 | * |
2d21ac55 | 140 | * Interrupts disabled. |
0c530ab8 A |
141 | */ |
142 | void | |
2d21ac55 | 143 | pmsRun(uint32_t nstep) |
0c530ab8 | 144 | { |
2d21ac55 A |
145 | if (pmDispatch != NULL && pmDispatch->pmsRun != NULL) |
146 | (*pmDispatch->pmsRun)(nstep); | |
0c530ab8 A |
147 | } |
148 | ||
149 | /* | |
2d21ac55 A |
150 | * Build the tables needed for the stepper. This includes both the step |
151 | * definitions and the step control table. | |
152 | * | |
153 | * We most absolutely need to be parked before this happens because we're | |
154 | * going to change the table. We also have to be complte about checking | |
155 | * for errors. A copy is always made because we don't want to be crippled | |
156 | * by not being able to change the table or description formats. | |
157 | * | |
158 | * We pass in a table of external functions and the new stepper def uses | |
159 | * the corresponding indexes rather than actual function addresses. This | |
160 | * is done so that a proper table can be built with the control syscall. | |
161 | * It can't supply addresses, so the index has to do. We internalize the | |
162 | * table so our caller does not need to keep it. Note that passing in a 0 | |
163 | * will use the current function table. Also note that entry 0 is reserved | |
164 | * and must be 0, we will check and fail the build. | |
165 | * | |
166 | * The platformData parameter is a 32-bit word of data that is passed unaltered | |
167 | * to the set function. | |
168 | * | |
169 | * The queryFunc parameter is the address of a function that will return the | |
170 | * current state of the platform. The format of the data returned is the same | |
171 | * as the platform specific portions of pmsSetCmd, i.e., pmsXClk, pmsVoltage, | |
172 | * and any part of pmsPowerID that is maintained by the platform hardware | |
173 | * (an example would be the values of the gpios that correspond to pmsPowerID). | |
174 | * The value should be constructed by querying hardware rather than returning | |
175 | * a value cached by software. One of the intents of this function is to help | |
176 | * recover lost or determine initial power states. | |
177 | * | |
0c530ab8 | 178 | */ |
2d21ac55 A |
179 | kern_return_t |
180 | pmsBuild(pmsDef *pd, uint32_t pdsize, pmsSetFunc_t *functab, | |
181 | uint32_t platformData, pmsQueryFunc_t queryFunc) | |
0c530ab8 | 182 | { |
2d21ac55 A |
183 | kern_return_t rc = 0; |
184 | ||
185 | if (pmDispatch != NULL && pmDispatch->pmsBuild != NULL) | |
186 | rc = (*pmDispatch->pmsBuild)(pd, pdsize, functab, | |
187 | platformData, queryFunc); | |
188 | ||
189 | return(rc); | |
0c530ab8 A |
190 | } |
191 | ||
2d21ac55 | 192 | |
0c530ab8 A |
193 | /* |
194 | * Load a new ratio/VID table. | |
195 | * | |
196 | * Note that this interface is specific to the Intel SpeedStep implementation. | |
197 | * It is expected that this will only be called once to override the default | |
198 | * ratio/VID table when the platform starts. | |
199 | * | |
200 | * Normally, the table will need to be replaced at the same time that the | |
201 | * stepper program proper is replaced, as the PState indices from an old | |
202 | * program may no longer be valid. When replacing the default program this | |
203 | * should not be a problem as any new table will have at least two PState | |
204 | * entries and the default program only references P0 and P1. | |
205 | */ | |
206 | kern_return_t | |
207 | pmsCPULoadVIDTable(uint16_t *tablep, int nstates) | |
208 | { | |
209 | if (pmDispatch != NULL && pmDispatch->pmsCPULoadVIDTable != NULL) | |
210 | return((*pmDispatch->pmsCPULoadVIDTable)(tablep, nstates)); | |
211 | else { | |
212 | int i; | |
213 | ||
214 | if (nstates > MAX_PSTATES) | |
215 | return(KERN_FAILURE); | |
216 | ||
217 | for (i = 0; i < nstates; i += 1) | |
218 | pmInitState.VIDTable[i] = tablep[i]; | |
219 | } | |
220 | return(KERN_SUCCESS); | |
221 | } | |
222 | ||
223 | /* | |
224 | * Set the (global) PState limit. CPUs will not be permitted to run at | |
225 | * a lower (more performant) PState than this. | |
226 | */ | |
227 | kern_return_t | |
228 | pmsCPUSetPStateLimit(uint32_t limit) | |
229 | { | |
230 | if (pmDispatch != NULL && pmDispatch->pmsCPUSetPStateLimit != NULL) | |
231 | return((*pmDispatch->pmsCPUSetPStateLimit)(limit)); | |
232 | ||
233 | pmInitState.PLimit = limit; | |
234 | return(KERN_SUCCESS); | |
235 | } | |
236 | ||
237 | /* | |
238 | * Initialize the Cstate change code. | |
239 | */ | |
240 | void | |
241 | power_management_init(void) | |
242 | { | |
2d21ac55 | 243 | static boolean_t initialized = FALSE; |
0c530ab8 A |
244 | |
245 | /* | |
246 | * Initialize the lock for the KEXT initialization. | |
247 | */ | |
2d21ac55 A |
248 | if (!initialized) { |
249 | simple_lock_init(&pm_init_lock, 0); | |
250 | initialized = TRUE; | |
251 | } | |
0c530ab8 A |
252 | |
253 | if (pmDispatch != NULL && pmDispatch->cstateInit != NULL) | |
254 | (*pmDispatch->cstateInit)(); | |
255 | } | |
256 | ||
0c530ab8 A |
257 | /* |
258 | * ACPI calls the following routine to set/update mwait hints. A table | |
259 | * (possibly null) specifies the available Cstates and their hints, all | |
260 | * other states are assumed to be invalid. ACPI may update available | |
261 | * states to change the nap policy (for example, while AC power is | |
262 | * available). | |
263 | */ | |
264 | kern_return_t | |
265 | Cstate_table_set(Cstate_hint_t *tablep, unsigned int nstates) | |
266 | { | |
267 | if (forcenap) | |
268 | return(KERN_SUCCESS); | |
269 | ||
270 | if (pmDispatch != NULL && pmDispatch->cstateTableSet != NULL) | |
271 | return((*pmDispatch->cstateTableSet)(tablep, nstates)); | |
272 | else { | |
273 | unsigned int i; | |
274 | ||
275 | for (i = 0; i < nstates; i += 1) { | |
276 | pmInitState.CStates[i].number = tablep[i].number; | |
277 | pmInitState.CStates[i].hint = tablep[i].hint; | |
278 | } | |
279 | ||
280 | pmInitState.CStatesCount = nstates; | |
281 | } | |
282 | return(KERN_SUCCESS); | |
283 | } | |
284 | ||
0c530ab8 A |
285 | /* |
286 | * Called when the CPU is idle. It will choose the best C state to | |
287 | * be in. | |
288 | */ | |
289 | void | |
2d21ac55 | 290 | machine_idle_cstate(boolean_t halted) |
0c530ab8 | 291 | { |
2d21ac55 A |
292 | if (pmInitDone |
293 | && pmDispatch != NULL | |
294 | && pmDispatch->cstateMachineIdle != NULL) | |
295 | (*pmDispatch->cstateMachineIdle)(!halted ? | |
296 | 0x7FFFFFFFFFFFFFFFULL : 0ULL); | |
297 | else if (halted) { | |
298 | /* | |
299 | * If no power managment and a processor is taken off-line, | |
300 | * then invalidate the cache and halt it (it will not be able | |
301 | * to be brought back on-line without resetting the CPU). | |
302 | */ | |
303 | __asm__ volatile ( "wbinvd; hlt" ); | |
304 | } else { | |
305 | /* | |
306 | * If no power management, re-enable interrupts and halt. | |
307 | * This will keep the CPU from spinning through the scheduler | |
308 | * and will allow at least some minimal power savings (but it | |
309 | * may cause problems in some MP configurations w.r.t to the | |
310 | * APIC stopping during a P-State transition). | |
311 | */ | |
312 | __asm__ volatile ( "sti; hlt" ); | |
313 | } | |
314 | } | |
315 | ||
316 | /* | |
317 | * Called when the CPU is to be halted. It will choose the best C-State | |
318 | * to be in. | |
319 | */ | |
320 | void | |
321 | pmCPUHalt(uint32_t reason) | |
322 | { | |
323 | ||
324 | switch (reason) { | |
325 | case PM_HALT_DEBUG: | |
326 | __asm__ volatile ("wbinvd; hlt"); | |
327 | break; | |
328 | ||
329 | case PM_HALT_PANIC: | |
330 | __asm__ volatile ("cli; wbinvd; hlt"); | |
331 | break; | |
332 | ||
333 | case PM_HALT_NORMAL: | |
334 | default: | |
335 | __asm__ volatile ("cli"); | |
336 | ||
337 | if (pmInitDone | |
338 | && pmDispatch != NULL | |
339 | && pmDispatch->pmCPUHalt != NULL) { | |
340 | (*pmDispatch->pmCPUHalt)(); | |
341 | } else { | |
342 | cpu_data_t *cpup = current_cpu_datap(); | |
343 | ||
344 | /* | |
345 | * If no power managment and a processor is taken off-line, | |
346 | * then invalidate the cache and halt it (it will not be able | |
347 | * to be brought back on-line without resetting the CPU). | |
348 | */ | |
349 | __asm__ volatile ("wbinvd"); | |
350 | cpup->lcpu.halted = TRUE; | |
351 | __asm__ volatile ( "wbinvd; hlt" ); | |
352 | } | |
353 | break; | |
0c530ab8 A |
354 | } |
355 | } | |
356 | ||
2d21ac55 A |
357 | /* |
358 | * Called to initialize the power management structures for the CPUs. | |
359 | */ | |
360 | void | |
361 | pmCPUStateInit(void) | |
362 | { | |
363 | if (pmDispatch != NULL && pmDispatch->pmCPUStateInit != NULL) | |
364 | (*pmDispatch->pmCPUStateInit)(); | |
365 | } | |
366 | ||
367 | static void | |
368 | pmInitComplete(void) | |
369 | { | |
370 | pmInitDone = 1; | |
371 | } | |
372 | ||
373 | static x86_lcpu_t * | |
374 | pmGetLogicalCPU(int cpu) | |
0c530ab8 | 375 | { |
2d21ac55 A |
376 | return(cpu_to_lcpu(cpu)); |
377 | } | |
378 | ||
379 | static x86_lcpu_t * | |
380 | pmGetMyLogicalCPU(void) | |
381 | { | |
382 | cpu_data_t *cpup = current_cpu_datap(); | |
0c530ab8 | 383 | |
2d21ac55 A |
384 | return(&cpup->lcpu); |
385 | } | |
386 | ||
387 | static x86_core_t * | |
388 | pmGetCore(int cpu) | |
389 | { | |
390 | return(cpu_to_core(cpu)); | |
0c530ab8 A |
391 | } |
392 | ||
2d21ac55 A |
393 | static x86_core_t * |
394 | pmGetMyCore(void) | |
0c530ab8 | 395 | { |
2d21ac55 | 396 | cpu_data_t *cpup = current_cpu_datap(); |
0c530ab8 | 397 | |
2d21ac55 | 398 | return(cpup->lcpu.core); |
0c530ab8 A |
399 | } |
400 | ||
2d21ac55 A |
401 | static x86_pkg_t * |
402 | pmGetPackage(int cpu) | |
0c530ab8 | 403 | { |
2d21ac55 A |
404 | return(cpu_to_package(cpu)); |
405 | } | |
406 | ||
407 | static x86_pkg_t * | |
408 | pmGetMyPackage(void) | |
409 | { | |
410 | cpu_data_t *cpup = current_cpu_datap(); | |
411 | ||
412 | return(cpup->lcpu.core->package); | |
413 | } | |
414 | ||
415 | static void | |
416 | pmLockCPUTopology(int lock) | |
417 | { | |
418 | if (lock) { | |
419 | simple_lock(&x86_topo_lock); | |
420 | } else { | |
421 | simple_unlock(&x86_topo_lock); | |
422 | } | |
0c530ab8 A |
423 | } |
424 | ||
425 | /* | |
2d21ac55 A |
426 | * Called to get the next deadline that has been set by the |
427 | * power management code. | |
0c530ab8 | 428 | */ |
2d21ac55 A |
429 | uint64_t |
430 | pmCPUGetDeadline(cpu_data_t *cpu) | |
431 | { | |
432 | uint64_t deadline = EndOfAllTime; | |
0c530ab8 | 433 | |
2d21ac55 A |
434 | if (pmInitDone |
435 | && pmDispatch != NULL | |
436 | && pmDispatch->GetDeadline != NULL) | |
437 | deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu); | |
438 | ||
439 | return(deadline); | |
0c530ab8 A |
440 | } |
441 | ||
442 | /* | |
2d21ac55 A |
443 | * Called to determine if the supplied deadline or the power management |
444 | * deadline is sooner. Returns which ever one is first. | |
0c530ab8 | 445 | */ |
2d21ac55 A |
446 | uint64_t |
447 | pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline) | |
0c530ab8 | 448 | { |
2d21ac55 A |
449 | if (pmInitDone |
450 | && pmDispatch != NULL | |
451 | && pmDispatch->SetDeadline != NULL) | |
452 | deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline); | |
453 | ||
454 | return(deadline); | |
0c530ab8 A |
455 | } |
456 | ||
0c530ab8 | 457 | /* |
2d21ac55 | 458 | * Called when a power management deadline expires. |
0c530ab8 A |
459 | */ |
460 | void | |
2d21ac55 | 461 | pmCPUDeadline(cpu_data_t *cpu) |
0c530ab8 | 462 | { |
2d21ac55 A |
463 | if (pmInitDone |
464 | && pmDispatch != NULL | |
465 | && pmDispatch->Deadline != NULL) | |
466 | (*pmDispatch->Deadline)(&cpu->lcpu); | |
467 | } | |
468 | ||
469 | /* | |
470 | * Called to get a CPU out of idle. | |
471 | */ | |
472 | boolean_t | |
473 | pmCPUExitIdle(cpu_data_t *cpu) | |
474 | { | |
475 | boolean_t do_ipi; | |
476 | ||
477 | if (pmInitDone | |
478 | && pmDispatch != NULL | |
479 | && pmDispatch->exitIdle != NULL) | |
480 | do_ipi = (*pmDispatch->exitIdle)(&cpu->lcpu); | |
481 | else | |
482 | do_ipi = TRUE; | |
483 | ||
484 | return(do_ipi); | |
0c530ab8 A |
485 | } |
486 | ||
2d21ac55 A |
487 | /* |
488 | * Called when a CPU is being restarted after being powered off (as in S3). | |
489 | */ | |
0c530ab8 | 490 | void |
2d21ac55 | 491 | pmCPUMarkRunning(cpu_data_t *cpu) |
0c530ab8 | 492 | { |
2d21ac55 A |
493 | if (pmInitDone |
494 | && pmDispatch != NULL | |
495 | && pmDispatch->markCPURunning != NULL) | |
496 | (*pmDispatch->markCPURunning)(&cpu->lcpu); | |
0c530ab8 A |
497 | } |
498 | ||
2d21ac55 A |
499 | /* |
500 | * Called from the HPET interrupt handler to perform the | |
501 | * necessary power management work. | |
502 | */ | |
0c530ab8 | 503 | void |
2d21ac55 | 504 | pmHPETInterrupt(void) |
0c530ab8 | 505 | { |
2d21ac55 A |
506 | if (pmInitDone |
507 | && pmDispatch != NULL | |
508 | && pmDispatch->HPETInterrupt != NULL) | |
509 | (*pmDispatch->HPETInterrupt)(); | |
510 | } | |
511 | ||
512 | /* | |
513 | * Called to get/set CPU power management state. | |
514 | */ | |
515 | int | |
516 | pmCPUControl(uint32_t cmd, void *datap) | |
517 | { | |
518 | int rc = -1; | |
519 | ||
520 | if (pmDispatch != NULL | |
521 | && pmDispatch->pmCPUControl != NULL) | |
522 | rc = (*pmDispatch->pmCPUControl)(cmd, datap); | |
523 | ||
524 | return(rc); | |
0c530ab8 A |
525 | } |
526 | ||
2d21ac55 A |
527 | /* |
528 | * Set the worst-case time for the C4 to C2 transition. | |
529 | * No longer does anything. | |
530 | */ | |
0c530ab8 | 531 | void |
2d21ac55 | 532 | ml_set_maxsnoop(__unused uint32_t maxdelay) |
0c530ab8 | 533 | { |
0c530ab8 A |
534 | } |
535 | ||
2d21ac55 A |
536 | |
537 | /* | |
538 | * Get the worst-case time for the C4 to C2 transition. Returns nanoseconds. | |
539 | */ | |
540 | unsigned | |
541 | ml_get_maxsnoop(void) | |
542 | { | |
543 | uint64_t max_snoop = 0; | |
544 | ||
545 | if (pmDispatch != NULL | |
546 | && pmDispatch->getMaxSnoop != NULL) | |
547 | max_snoop = pmDispatch->getMaxSnoop(); | |
548 | ||
549 | return((unsigned)(max_snoop & 0xffffffff)); | |
550 | } | |
551 | ||
552 | ||
553 | uint32_t | |
554 | ml_get_maxbusdelay(void) | |
555 | { | |
556 | uint64_t max_delay = 0; | |
557 | ||
558 | if (pmDispatch != NULL | |
559 | && pmDispatch->getMaxBusDelay != NULL) | |
560 | max_delay = pmDispatch->getMaxBusDelay(); | |
561 | ||
562 | return((uint32_t)(max_delay & 0xffffffff)); | |
563 | } | |
564 | ||
565 | /* | |
566 | * Set the maximum delay time allowed for snoop on the bus. | |
567 | * | |
568 | * Note that this value will be compared to the amount of time that it takes | |
569 | * to transition from a non-snooping power state (C4) to a snooping state (C2). | |
570 | * If maxBusDelay is less than C4C2SnoopDelay, | |
571 | * we will not enter the lowest power state. | |
572 | */ | |
0c530ab8 | 573 | void |
2d21ac55 | 574 | ml_set_maxbusdelay(uint32_t mdelay) |
0c530ab8 | 575 | { |
2d21ac55 A |
576 | uint64_t maxdelay = mdelay; |
577 | ||
578 | if (pmDispatch != NULL | |
579 | && pmDispatch->setMaxBusDelay != NULL) | |
580 | pmDispatch->setMaxBusDelay(maxdelay); | |
581 | else | |
582 | pmInitState.maxBusDelay = maxdelay; | |
0c530ab8 A |
583 | } |
584 | ||
2d21ac55 A |
585 | /* |
586 | * Put a CPU into "safe" mode with respect to power. | |
587 | * | |
588 | * Some systems cannot operate at a continuous "normal" speed without | |
589 | * exceeding the thermal design. This is called per-CPU to place the | |
590 | * CPUs into a "safe" operating mode. | |
591 | */ | |
0c530ab8 | 592 | void |
2d21ac55 A |
593 | pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags) |
594 | { | |
595 | if (pmDispatch != NULL | |
596 | && pmDispatch->pmCPUSafeMode != NULL) | |
597 | pmDispatch->pmCPUSafeMode(lcpu, flags); | |
598 | else { | |
599 | /* | |
600 | * Do something reasonable if the KEXT isn't present. | |
601 | * | |
602 | * We only look at the PAUSE and RESUME flags. The other flag(s) | |
603 | * will not make any sense without the KEXT, so just ignore them. | |
604 | * | |
605 | * We set the halted flag in the LCPU structure to indicate | |
606 | * that this CPU isn't to do anything. If it's the CPU we're | |
607 | * currently running on, then spin until the halted flag is | |
608 | * reset. | |
609 | */ | |
610 | if (flags & PM_SAFE_FL_PAUSE) { | |
611 | lcpu->halted = TRUE; | |
612 | if (lcpu == x86_lcpu()) { | |
613 | while (lcpu->halted) | |
614 | cpu_pause(); | |
615 | } | |
616 | } | |
617 | ||
618 | /* | |
619 | * Clear the halted flag for the specified CPU, that will | |
620 | * get it out of it's spin loop. | |
621 | */ | |
622 | if (flags & PM_SAFE_FL_RESUME) { | |
623 | lcpu->halted = FALSE; | |
624 | } | |
625 | } | |
626 | } | |
627 | ||
628 | /* | |
629 | * Returns the root of the package tree. | |
630 | */ | |
631 | static x86_pkg_t * | |
632 | pmGetPkgRoot(void) | |
633 | { | |
634 | return(x86_pkgs); | |
635 | } | |
636 | ||
637 | static boolean_t | |
638 | pmCPUGetHibernate(int cpu) | |
0c530ab8 | 639 | { |
2d21ac55 | 640 | return(cpu_datap(cpu)->cpu_hibernate); |
0c530ab8 A |
641 | } |
642 | ||
2d21ac55 A |
643 | static processor_t |
644 | pmLCPUtoProcessor(int lcpu) | |
645 | { | |
646 | return(cpu_datap(lcpu)->cpu_processor); | |
647 | } | |
648 | ||
649 | /* | |
650 | * Called by the power management kext to register itself and to get the | |
651 | * callbacks it might need into other kernel functions. This interface | |
652 | * is versioned to allow for slight mis-matches between the kext and the | |
653 | * kernel. | |
654 | */ | |
0c530ab8 | 655 | void |
2d21ac55 A |
656 | pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs, |
657 | pmCallBacks_t *callbacks) | |
0c530ab8 | 658 | { |
2d21ac55 A |
659 | if (callbacks != NULL && version == PM_DISPATCH_VERSION) { |
660 | callbacks->InitState = &pmInitState; | |
661 | callbacks->setRTCPop = setPop; | |
662 | callbacks->resyncDeadlines = etimer_resync_deadlines; | |
663 | callbacks->initComplete= pmInitComplete; | |
664 | callbacks->GetLCPU = pmGetLogicalCPU; | |
665 | callbacks->GetCore = pmGetCore; | |
666 | callbacks->GetPackage = pmGetPackage; | |
667 | callbacks->GetMyLCPU = pmGetMyLogicalCPU; | |
668 | callbacks->GetMyCore = pmGetMyCore; | |
669 | callbacks->GetMyPackage= pmGetMyPackage; | |
670 | callbacks->CoresPerPkg = cpuid_info()->cpuid_cores_per_package; | |
671 | callbacks->GetPkgRoot = pmGetPkgRoot; | |
672 | callbacks->LockCPUTopology = pmLockCPUTopology; | |
673 | callbacks->GetHibernate = pmCPUGetHibernate; | |
674 | callbacks->LCPUtoProcessor = pmLCPUtoProcessor; | |
675 | } | |
676 | ||
677 | if (cpuFuncs != NULL) { | |
678 | pmDispatch = cpuFuncs; | |
679 | } | |
0c530ab8 A |
680 | } |
681 | ||
2d21ac55 A |
682 | /* |
683 | * Unregisters the power management functions from the kext. | |
684 | */ | |
0c530ab8 | 685 | void |
2d21ac55 | 686 | pmUnRegister(pmDispatch_t *cpuFuncs) |
0c530ab8 | 687 | { |
2d21ac55 A |
688 | if (cpuFuncs != NULL && pmDispatch == cpuFuncs) { |
689 | pmDispatch = NULL; | |
690 | } | |
0c530ab8 | 691 | } |
2d21ac55 | 692 |