]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | #include <ppc/asm.h> | |
23 | #include <ppc/proc_reg.h> | |
24 | #include <cpus.h> | |
25 | #include <assym.s> | |
26 | #include <debug.h> | |
27 | #include <mach/ppc/vm_param.h> | |
28 | #include <ppc/exception.h> | |
29 | ||
30 | /* PCI config cycle probing | |
31 | * | |
32 | * boolean_t ml_probe_read(vm_offset_t paddr, unsigned int *val) | |
33 | * | |
34 | * Read the memory location at physical address paddr. | |
35 | * This is a part of a device probe, so there is a good chance we will | |
36 | * have a machine check here. So we have to be able to handle that. | |
37 | * We assume that machine checks are enabled both in MSR and HIDs | |
38 | */ | |
39 | ||
40 | ; Force a line boundry here | |
41 | .align 5 | |
42 | .globl EXT(ml_probe_read) | |
43 | ||
44 | LEXT(ml_probe_read) | |
45 | ||
46 | mfsprg r9,2 ; Get feature flags | |
47 | mfmsr r0 ; Save the current MSR | |
48 | neg r10,r3 ; Number of bytes to end of page | |
49 | rlwinm r2,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
50 | rlwinm. r10,r10,0,20,31 ; Clear excess junk and test for page bndry | |
51 | mr r12,r3 ; Save the load address | |
52 | mtcrf 0x04,r9 ; Set the features | |
53 | cmplwi cr1,r10,4 ; At least 4 bytes left in page? | |
54 | rlwinm r2,r2,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Clear translation | |
55 | beq- mprdoit ; We are right on the boundary... | |
56 | li r3,0 | |
57 | bltlr- cr1 ; No, just return failure... | |
58 | ||
59 | mprdoit: | |
60 | ||
61 | bt pfNoMSRirb,mprNoMSR ; No MSR... | |
62 | ||
63 | mtmsr r2 ; Translation and all off | |
64 | isync ; Toss prefetch | |
65 | b mprNoMSRx | |
66 | ||
67 | mprNoMSR: | |
68 | mr r5,r0 | |
69 | li r0,loadMSR ; Get the MSR setter SC | |
70 | mr r3,r2 ; Get new MSR | |
71 | sc ; Set it | |
72 | mr r0,r5 | |
73 | li r3,0 | |
74 | mprNoMSRx: | |
75 | ||
76 | ; | |
77 | ; We need to insure that there is no more than 1 BAT register that | |
78 | ; can get a hit. There could be repercussions beyond the ken | |
79 | ; of mortal man. It is best not to tempt fate. | |
80 | ; | |
81 | li r10,0 ; Clear a register | |
82 | mfdbatu r5,0 ; Save DBAT 0 high | |
83 | mfdbatl r6,0 ; Save DBAT 0 low | |
84 | mfdbatu r7,1 ; Save DBAT 1 high | |
85 | mfdbatu r8,2 ; Save DBAT 2 high | |
86 | mfdbatu r9,3 ; Save DBAT 3 high | |
87 | ||
88 | sync ; Make sure all is well | |
89 | ||
90 | mtdbatu 1,r10 ; Invalidate DBAT 1 | |
91 | mtdbatu 2,r10 ; Invalidate DBAT 2 | |
92 | mtdbatu 3,r10 ; Invalidate DBAT 3 | |
93 | ||
94 | rlwinm r10,r12,0,0,14 ; Round down to a 128k boundary | |
95 | ori r11,r10,0x32 ; Set uncached, coherent, R/W | |
96 | ori r10,r10,2 ; Make the upper half (128k, valid supervisor) | |
97 | mtdbatl 0,r11 ; Set lower BAT first | |
98 | mtdbatu 0,r10 ; Now the upper | |
99 | sync ; Just make sure | |
100 | ||
101 | ori r11,r2,lo16(MASK(MSR_DR)) ; Turn on data translation | |
102 | mtmsr r11 ; Do it for real | |
103 | isync ; Make sure of it | |
104 | ||
105 | eieio ; Make sure of all previous accesses | |
106 | sync ; Make sure it is all caught up | |
107 | ||
108 | lwz r11,0(r12) ; Get it and maybe machine check here | |
109 | ||
110 | eieio ; Make sure of ordering again | |
111 | sync ; Get caught up yet again | |
112 | isync ; Do not go further till we are here | |
113 | ||
114 | mtdbatu 0,r5 ; Restore DBAT 0 high | |
115 | mtdbatl 0,r6 ; Restore DBAT 0 low | |
116 | mtdbatu 1,r7 ; Restore DBAT 1 high | |
117 | mtdbatu 2,r8 ; Restore DBAT 2 high | |
118 | mtdbatu 3,r9 ; Restore DBAT 3 high | |
119 | sync | |
120 | ||
121 | li r3,1 ; We made it | |
122 | ||
123 | mtmsr r0 ; Restore translation and exceptions | |
124 | isync ; Toss speculations | |
125 | ||
126 | stw r11,0(r4) ; Save the loaded value | |
127 | blr ; Return... | |
128 | ||
129 | ; Force a line boundry here. This means we will be able to check addresses better | |
130 | .align 5 | |
131 | .globl EXT(ml_probe_read_mck) | |
132 | LEXT(ml_probe_read_mck) | |
133 | ||
134 | /* Read physical address | |
135 | * | |
136 | * unsigned int ml_phys_read_byte(vm_offset_t paddr) | |
137 | * | |
138 | * Read the byte at physical address paddr. Memory should not be cache inhibited. | |
139 | */ | |
140 | ||
141 | ; Force a line boundry here | |
142 | .align 5 | |
143 | .globl EXT(ml_phys_read_byte) | |
144 | ||
145 | LEXT(ml_phys_read_byte) | |
146 | ||
147 | mfmsr r10 ; Save the current MSR | |
148 | rlwinm r4,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
149 | rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation | |
150 | ||
151 | mtmsr r4 ; Translation and all off | |
152 | isync ; Toss prefetch | |
153 | ||
154 | lbz r3,0(r3) ; Get the byte | |
155 | sync | |
156 | ||
157 | mtmsr r10 ; Restore translation and rupts | |
158 | isync | |
159 | blr | |
160 | ||
161 | /* Read physical address | |
162 | * | |
163 | * unsigned int ml_phys_read(vm_offset_t paddr) | |
164 | * | |
165 | * Read the word at physical address paddr. Memory should not be cache inhibited. | |
166 | */ | |
167 | ||
168 | ; Force a line boundry here | |
169 | .align 5 | |
170 | .globl EXT(ml_phys_read) | |
171 | ||
172 | LEXT(ml_phys_read) | |
173 | ||
174 | mfmsr r0 ; Save the current MSR | |
175 | rlwinm r4,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
176 | rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation | |
177 | ||
178 | mtmsr r4 ; Translation and all off | |
179 | isync ; Toss prefetch | |
180 | ||
181 | lwz r3,0(r3) ; Get the word | |
182 | sync | |
183 | ||
184 | mtmsr r0 ; Restore translation and rupts | |
185 | isync | |
186 | blr | |
187 | ||
188 | /* Write physical address byte | |
189 | * | |
190 | * void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) | |
191 | * | |
192 | * Write the byte at physical address paddr. Memory should not be cache inhibited. | |
193 | */ | |
194 | ||
195 | ; Force a line boundry here | |
196 | .align 5 | |
197 | .globl EXT(ml_phys_write_byte) | |
198 | ||
199 | LEXT(ml_phys_write_byte) | |
200 | ||
201 | mfmsr r0 ; Save the current MSR | |
202 | rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
203 | rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation | |
204 | ||
205 | mtmsr r5 ; Translation and all off | |
206 | isync ; Toss prefetch | |
207 | ||
208 | stb r4,0(r3) ; Set the byte | |
209 | sync | |
210 | ||
211 | mtmsr r0 ; Restore translation and rupts | |
212 | isync | |
213 | blr | |
214 | ||
215 | /* Write physical address | |
216 | * | |
217 | * void ml_phys_write(vm_offset_t paddr, unsigned int data) | |
218 | * | |
219 | * Write the word at physical address paddr. Memory should not be cache inhibited. | |
220 | */ | |
221 | ||
222 | ; Force a line boundry here | |
223 | .align 5 | |
224 | .globl EXT(ml_phys_write) | |
225 | ||
226 | LEXT(ml_phys_write) | |
227 | ||
228 | mfmsr r0 ; Save the current MSR | |
229 | rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
230 | rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation | |
231 | ||
232 | mtmsr r5 ; Translation and all off | |
233 | isync ; Toss prefetch | |
234 | ||
235 | stw r4,0(r3) ; Set the word | |
236 | sync | |
237 | ||
238 | mtmsr r0 ; Restore translation and rupts | |
239 | isync | |
240 | blr | |
241 | ||
242 | ||
243 | /* set interrupts enabled or disabled | |
244 | * | |
245 | * boolean_t set_interrupts_enabled(boolean_t enable) | |
246 | * | |
247 | * Set EE bit to "enable" and return old value as boolean | |
248 | */ | |
249 | ||
250 | ; Force a line boundry here | |
0b4e3aa0 A |
251 | .align 5 |
252 | .globl EXT(ml_set_interrupts_enabled) | |
253 | ||
254 | LEXT(ml_set_interrupts_enabled) | |
1c79356b | 255 | |
0b4e3aa0 A |
256 | mfsprg r7,0 |
257 | lwz r4,PP_INTS_ENABLED(r7) | |
258 | mr. r4,r4 | |
259 | beq- EXT(fake_set_interrupts_enabled) | |
1c79356b A |
260 | mfmsr r5 ; Get the current MSR |
261 | mr r4,r3 ; Save the old value | |
262 | rlwinm r3,r5,17,31,31 ; Set return value | |
263 | rlwimi r5,r4,15,16,16 ; Insert new EE bit | |
0b4e3aa0 | 264 | andi. r8,r5,lo16(MASK(MSR_EE)) ; Interruptions |
1c79356b A |
265 | bne CheckPreemption |
266 | NoPreemption: | |
267 | mtmsr r5 ; Slam enablement | |
268 | blr | |
269 | ||
270 | CheckPreemption: | |
1c79356b A |
271 | lwz r8,PP_NEED_AST(r7) |
272 | lwz r7,PP_CPU_DATA(r7) | |
273 | li r6,AST_URGENT | |
274 | lwz r8,0(r8) | |
275 | lwz r7,CPU_PREEMPTION_LEVEL(r7) | |
276 | lis r0,HIGH_ADDR(DoPreemptCall) | |
277 | and. r8,r8,r6 | |
278 | ori r0,r0,LOW_ADDR(DoPreemptCall) | |
279 | beq+ NoPreemption | |
280 | cmpi cr0, r7, 0 | |
281 | bne+ NoPreemption | |
282 | sc | |
283 | mtmsr r5 | |
284 | blr | |
285 | ||
286 | ||
0b4e3aa0 A |
287 | /* Emulate a decremeter exception |
288 | * | |
289 | * void machine_clock_assist(void) | |
290 | * | |
291 | */ | |
292 | ||
293 | ; Force a line boundry here | |
294 | .align 5 | |
295 | .globl EXT(machine_clock_assist) | |
296 | ||
297 | LEXT(machine_clock_assist) | |
298 | ||
299 | mfsprg r7,0 | |
300 | lwz r4,PP_INTS_ENABLED(r7) | |
301 | mr. r4,r4 | |
302 | beq- EXT(CreateFakeDEC) | |
303 | blr | |
304 | ||
1c79356b A |
305 | /* Set machine into idle power-saving mode. |
306 | * | |
307 | * void machine_idle_ppc(void) | |
308 | * | |
309 | * We will use the PPC NAP or DOZE for this. | |
310 | * This call always returns. Must be called with spllo (i.e., interruptions | |
311 | * enabled). | |
312 | * | |
313 | */ | |
314 | ||
315 | ||
316 | ; Force a line boundry here | |
317 | .align 5 | |
318 | .globl EXT(machine_idle_ppc) | |
319 | ||
320 | LEXT(machine_idle_ppc) | |
321 | ||
322 | mfmsr r3 ; Get the current MSR | |
323 | rlwinm r5,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
324 | mtmsr r5 ; Hold up interruptions for now | |
325 | mfsprg r12,0 ; Get the per_proc_info | |
326 | mfspr r6,hid0 ; Get the current power-saving mode | |
327 | mfsprg r11,2 ; Get CPU specific features | |
328 | rlwinm r6,r6,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though) | |
329 | mtcrf 0xC7,r11 ; Get the facility flags | |
330 | ||
331 | lis r4,hi16(napm) ; Assume we can nap | |
332 | bt pfWillNapb,yesnap ; Yeah, nap is ok... | |
333 | ||
334 | lis r4,hi16(dozem) ; Assume we can doze | |
335 | bt pfCanDozeb,yesnap ; We can sleep or doze one this machine... | |
336 | ||
337 | ori r3,r3,lo16(MASK(MSR_EE)) ; Flip on EE | |
338 | mtmsr r3 ; Turn interruptions back on | |
339 | blr ; Leave... | |
340 | ||
341 | yesnap: mftbu r9 ; Get the upper timebase | |
342 | mftb r7 ; Get the lower timebase | |
343 | mftbu r8 ; Get the upper one again | |
344 | cmplw r9,r8 ; Did the top tick? | |
345 | bne- yesnap ; Yeah, need to get it again... | |
346 | stw r8,napStamp(r12) ; Set high order time stamp | |
347 | stw r7,napStamp+4(r12) ; Set low order nap stamp | |
348 | ||
1c79356b A |
349 | ; |
350 | ; We have to open up interruptions here because book 4 says that we should | |
351 | ; turn on only the POW bit and that we should have interrupts enabled | |
352 | ; The interrupt handler will detect that nap or doze is set if an interrupt | |
353 | ; is taken and set everything up to return directly to machine_idle_ret. | |
354 | ; So, make sure everything we need there is already set up... | |
355 | ; | |
356 | ori r7,r5,lo16(MASK(MSR_EE)) ; Flip on EE | |
357 | or r6,r6,r4 ; Set nap or doze | |
358 | oris r5,r7,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR | |
359 | mtspr hid0,r6 ; Set up the HID for nap/doze | |
360 | isync ; Make sure it is set | |
361 | mtmsr r7 ; Enable for interrupts | |
362 | rlwinm. r11,r11,0,pfAltivecb,pfAltivecb ; Do we have altivec? | |
363 | beq- minovec ; No... | |
364 | dssall ; Stop the streams before we nap/doze | |
365 | ||
366 | minovec: sync ; Make sure queues are clear | |
367 | mtmsr r5 ; Nap or doze | |
368 | isync ; Make sure this takes before we proceed | |
369 | b minovec ; loop if POW does not take | |
370 | ; | |
371 | ; Note that the interrupt handler will turn off the nap/doze bits in the hid. | |
372 | ; Also remember that the interrupt handler will force return to here whenever | |
373 | ; the nap/doze bits are set. | |
374 | ; | |
375 | .globl EXT(machine_idle_ret) | |
376 | LEXT(machine_idle_ret) | |
377 | mtmsr r7 ; Make sure the MSR is what we want | |
378 | isync ; In case we turn on translation | |
379 | ||
380 | blr ; Return... | |
381 | ||
382 | /* Put machine to sleep. | |
383 | * This call never returns. We always exit sleep via a soft reset. | |
384 | * All external interruptions must be drained at this point and disabled. | |
385 | * | |
386 | * void ml_ppc_sleep(void) | |
387 | * | |
388 | * We will use the PPC SLEEP for this. | |
389 | * | |
390 | * There is one bit of hackery in here: we need to enable for | |
391 | * interruptions when we go to sleep and there may be a pending | |
392 | * decrimenter rupt. So we make the decrimenter 0x7FFFFFFF and enable for | |
393 | * interruptions. The decrimenter rupt vector recognizes this and returns | |
394 | * directly back here. | |
395 | * | |
396 | */ | |
397 | ||
398 | ; Force a line boundry here | |
399 | .align 5 | |
400 | .globl EXT(ml_ppc_sleep) | |
401 | ||
402 | LEXT(ml_ppc_sleep) | |
403 | ||
404 | #if 0 | |
405 | mfmsr r5 ; Hack to spin instead of sleep | |
406 | rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation | |
407 | rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
408 | mtmsr r5 ; No talking | |
409 | isync | |
410 | ||
411 | ; No interrupts allowed after we get the savearea | |
412 | ||
413 | mfsprg r6,0 ; Get the per_proc | |
414 | mfsprg r7,1 ; Get the pending savearea | |
415 | stw r7,savedSave(r6) ; Save the savearea for when we wake up | |
416 | ||
417 | deadsleep: addi r3,r3,1 ; Make analyzer happy | |
418 | addi r3,r3,1 | |
419 | addi r3,r3,1 | |
420 | b deadsleep ; Die the death of 1000 joys... | |
421 | #endif | |
422 | ||
423 | mfsprg r12,0 ; Get the per_proc_info | |
424 | mfspr r4,hid0 ; Get the current power-saving mode | |
425 | eqv r10,r10,r10 ; Get all foxes | |
426 | mfsprg r11,2 ; Get CPU specific features | |
427 | mfmsr r5 ; Get the current MSR | |
428 | rlwinm r10,r10,0,1,31 ; Make 0x7FFFFFFF | |
429 | rlwinm r4,r4,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though) | |
430 | mtdec r10 ; Load decrimenter with 0x7FFFFFFF | |
431 | isync ; and make sure, | |
432 | mfdec r9 ; really sure, it gets there | |
433 | ||
434 | mtcrf 0x07,r11 ; Get the cache flags, etc | |
435 | ||
436 | oris r4,r4,hi16(sleepm) ; Set sleep | |
437 | rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation | |
438 | ; | |
439 | ; Note that we need translation off before we set the HID to sleep. Otherwise | |
440 | ; we will ignore any PTE misses that occur and cause an infinite loop. | |
441 | ; | |
442 | bt pfNoMSRirb,mpsNoMSR ; No MSR... | |
443 | ||
444 | mtmsr r5 ; Translation off | |
445 | isync ; Toss prefetch | |
446 | b mpsNoMSRx | |
447 | ||
448 | mpsNoMSR: | |
449 | li r0,loadMSR ; Get the MSR setter SC | |
450 | mr r3,r5 ; Get new MSR | |
451 | sc ; Set it | |
452 | mpsNoMSRx: | |
453 | ||
454 | ori r3,r5,lo16(MASK(MSR_EE)) ; Flip on EE | |
455 | sync | |
456 | mtspr hid0,r4 ; Set up the HID to sleep | |
457 | ||
458 | mtmsr r3 ; Enable for interrupts to drain decrimenter | |
459 | ||
460 | add r6,r4,r5 ; Just waste time | |
461 | add r6,r6,r4 ; A bit more | |
462 | add r6,r6,r5 ; A bit more | |
463 | ||
464 | mtmsr r5 ; Interruptions back off | |
465 | isync ; Toss prefetch | |
466 | ||
467 | mfsprg r7,1 ; Get the pending savearea | |
468 | stw r7,savedSave(r12) ; Save the savearea for when we wake up | |
469 | ||
470 | ; | |
471 | ; We are here with translation off, interrupts off, all possible | |
472 | ; interruptions drained off, and a decrimenter that will not pop. | |
473 | ; | |
474 | ||
475 | bl EXT(cacheInit) ; Clear out the caches. This will leave them on | |
476 | bl EXT(cacheDisable) ; Turn off all caches | |
477 | ||
478 | mfmsr r5 ; Get the current MSR | |
479 | oris r5,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR | |
480 | ; Leave EE off because power goes off shortly | |
481 | ||
482 | slSleepNow: sync ; Sync it all up | |
483 | mtmsr r5 ; Do sleep with interruptions enabled | |
484 | isync ; Take a pill | |
485 | b slSleepNow ; Go back to sleep if we wake up... | |
486 | ||
487 | ||
488 | ||
489 | /* Initialize all caches including the TLBs | |
490 | * | |
491 | * void cacheInit(void) | |
492 | * | |
493 | * This is used to force the caches to an initial clean state. First, we | |
494 | * check if the cache is on, if so, we need to flush the contents to memory. | |
495 | * Then we invalidate the L1. Next, we configure and invalidate the L2 etc. | |
496 | * Finally we turn on all of the caches | |
497 | * | |
498 | * Note that if translation is not disabled when this is called, the TLB will not | |
499 | * be completely clear after return. | |
500 | * | |
501 | */ | |
502 | ||
503 | ; Force a line boundry here | |
504 | .align 5 | |
505 | .globl EXT(cacheInit) | |
506 | ||
507 | LEXT(cacheInit) | |
508 | ||
509 | mfsprg r12,0 ; Get the per_proc_info | |
510 | mfspr r9,hid0 ; Get the current power-saving mode | |
511 | ||
512 | mfsprg r11,2 ; Get CPU specific features | |
513 | mfmsr r7 ; Get the current MSR | |
514 | rlwinm r4,r9,0,dpm+1,doze-1 ; Clear all possible power-saving modes (also disable DPM) | |
0b4e3aa0 | 515 | rlwimi r11,r11,pfLClckb+1,31,31 ; Move pfLClck to another position (to keep from using non-volatile CRs) |
1c79356b A |
516 | rlwinm r5,r7,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation |
517 | rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
518 | mtcrf 0x87,r11 ; Get the feature flags | |
519 | mtspr hid0,r4 ; Set up the HID | |
520 | ||
521 | bt pfNoMSRirb,ciNoMSR ; No MSR... | |
522 | ||
523 | mtmsr r5 ; Translation and all off | |
524 | isync ; Toss prefetch | |
525 | b ciNoMSRx | |
526 | ||
527 | ciNoMSR: | |
528 | li r0,loadMSR ; Get the MSR setter SC | |
529 | mr r3,r5 ; Get new MSR | |
530 | sc ; Set it | |
531 | ciNoMSRx: | |
532 | ||
533 | bf pfAltivecb,cinoDSS ; No Altivec here... | |
534 | ||
535 | dssall ; Stop streams | |
536 | sync | |
537 | ||
538 | cinoDSS: lis r5,hi16(EXT(tlb_system_lock)) ; Get the TLBIE lock | |
539 | li r0,128 ; Get number of TLB entries | |
540 | ori r5,r5,lo16(EXT(tlb_system_lock)) ; Grab up the bottom part | |
541 | ||
542 | li r6,0 ; Start at 0 | |
1c79356b A |
543 | |
544 | citlbhang: lwarx r2,0,r5 ; Get the TLBIE lock | |
545 | mr. r2,r2 ; Is it locked? | |
546 | bne- citlbhang ; It is locked, go wait... | |
547 | stwcx. r0,0,r5 ; Try to get it | |
548 | bne- citlbhang ; We was beat... | |
549 | ||
550 | mtctr r0 ; Set the CTR | |
551 | ||
552 | cipurgeTLB: tlbie r6 ; Purge this entry | |
553 | addi r6,r6,4096 ; Next page | |
554 | bdnz cipurgeTLB ; Do them all... | |
555 | ||
556 | mtcrf 0x80,r11 ; Set SMP capability | |
557 | sync ; Make sure all TLB purges are done | |
558 | eieio ; Order, order in the court | |
559 | ||
560 | bf pfSMPcapb,cinoSMP ; SMP incapable... | |
561 | ||
562 | tlbsync ; Sync all TLBs | |
563 | sync | |
150bd074 | 564 | isync |
1c79356b A |
565 | |
566 | cinoSMP: stw r2,0(r5) ; Unlock TLBIE lock | |
567 | ||
568 | cror cr0_eq,pfL1ib,pfL1db ; Check for either I- or D-cache | |
569 | bf- cr0_eq,cinoL1 ; No level 1 to flush... | |
570 | rlwinm. r0,r9,0,ice,dce ; Were either of the level 1s on? | |
571 | beq- cinoL1 ; No, no need to flush... | |
572 | ||
573 | bf pfL1fab,ciswdl1 ; If no hw flush assist, go do by software... | |
574 | ||
575 | mfspr r8,msscr0 ; Get the memory system control register | |
576 | oris r8,r8,hi16(dl1hwfm) ; Turn on the hardware flush request | |
577 | ||
578 | mtspr msscr0,r8 ; Start the flush operation | |
579 | ||
580 | ciwdl1f: mfspr r8,msscr0 ; Get the control register again | |
581 | ||
582 | rlwinm. r8,r8,0,dl1hwf,dl1hwf ; Has the flush request been reset yet? | |
583 | bne ciwdl1f ; No, flush is still in progress... | |
584 | b ciinvdl1 ; Go invalidate l1... | |
585 | ||
586 | ; | |
587 | ; We need to either make this very complicated or to use ROM for | |
588 | ; the flush. The problem is that if during the following sequence a | |
589 | ; snoop occurs that invalidates one of the lines in the cache, the | |
590 | ; PLRU sequence will be altered making it possible to miss lines | |
591 | ; during the flush. So, we either need to dedicate an area of RAM | |
592 | ; to each processor, lock use of a RAM area, or use ROM. ROM is | |
593 | ; by far the easiest. Note that this is not an issue for machines | |
594 | ; that have harware flush assists. | |
595 | ; | |
596 | ||
597 | ciswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size | |
0b4e3aa0 A |
598 | |
599 | bf 31,cisnlck ; Skip if pfLClck not set... | |
600 | ||
601 | mfspr r4,msscr0 ; ? | |
602 | rlwinm r6,r4,0,0,l2pfes-1 ; ? | |
603 | mtspr msscr0,r6 ; Set it | |
604 | sync | |
605 | isync | |
606 | ||
607 | mfspr r8,ldstcr ; Save the LDSTCR | |
608 | li r2,1 ; Get a mask of 0x01 | |
609 | lis r3,0xFFF0 ; Point to ROM | |
610 | rlwinm r11,r0,29,3,31 ; Get the amount of memory to handle all indexes | |
611 | ||
612 | li r6,0 ; Start here | |
613 | ||
614 | cisiniflsh: dcbf r6,r3 ; Flush each line of the range we use | |
615 | addi r6,r6,32 ; Bump to the next | |
616 | cmplw r6,r0 ; Have we reached the end? | |
617 | blt+ cisiniflsh ; Nope, continue initial flush... | |
618 | ||
619 | sync ; Make sure it is done | |
620 | ||
621 | addi r11,r11,-1 ; Get mask for index wrap | |
622 | li r6,0 ; Get starting offset | |
623 | ||
624 | cislckit: not r5,r2 ; Lock all but 1 way | |
625 | rlwimi r5,r8,0,0,23 ; Build LDSTCR | |
626 | mtspr ldstcr,r5 ; Lock a way | |
627 | sync ; Clear out memory accesses | |
628 | isync ; Wait for all | |
629 | ||
630 | ||
631 | cistouch: lwzx r10,r3,r6 ; Pick up some trash | |
632 | addi r6,r6,32 ; Go to the next index | |
633 | and. r0,r6,r11 ; See if we are about to do next index | |
634 | bne+ cistouch ; Nope, do more... | |
635 | ||
636 | sync ; Make sure it is all done | |
637 | isync | |
638 | ||
639 | sub r6,r6,r11 ; Back up to start + 1 | |
640 | addi r6,r6,-1 ; Get it right | |
641 | ||
642 | cisflush: dcbf r3,r6 ; Flush everything out | |
643 | addi r6,r6,32 ; Go to the next index | |
644 | and. r0,r6,r11 ; See if we are about to do next index | |
645 | bne+ cisflush ; Nope, do more... | |
646 | ||
647 | sync ; Make sure it is all done | |
648 | isync | |
649 | ||
650 | ||
651 | rlwinm. r2,r2,1,24,31 ; Shift to next way | |
652 | bne+ cislckit ; Do this for all ways... | |
653 | ||
654 | mtspr ldstcr,r8 ; Slam back to original | |
655 | sync | |
656 | isync | |
657 | ||
658 | mtspr msscr0,r4 ; ? | |
659 | sync | |
660 | isync | |
661 | ||
662 | b cinoL1 ; Go on to level 2... | |
663 | ||
664 | ||
665 | cisnlck: rlwinm r2,r0,0,1,30 ; Double cache size | |
1c79356b A |
666 | add r0,r0,r2 ; Get 3 times cache size |
667 | rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines | |
668 | lis r3,0xFFF0 ; Dead recon ROM address for now | |
669 | mtctr r0 ; Number of lines to flush | |
670 | ||
671 | ciswfldl1a: lwz r2,0(r3) ; Flush anything else | |
672 | addi r3,r3,32 ; Next line | |
673 | bdnz ciswfldl1a ; Flush the lot... | |
674 | ||
675 | ciinvdl1: sync ; Make sure all flushes have been committed | |
676 | ||
677 | mfspr r8,hid0 ; Get the HID0 bits | |
678 | rlwinm r8,r8,0,dce+1,ice-1 ; Clear cache enables | |
679 | mtspr hid0,r8 ; and turn off L1 cache | |
680 | sync ; Make sure all is done | |
0b4e3aa0 A |
681 | isync |
682 | ||
1c79356b A |
683 | ori r8,r8,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate |
684 | sync | |
685 | isync | |
686 | ||
687 | mtspr hid0,r8 ; Start the invalidate and turn on cache | |
688 | rlwinm r8,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits | |
689 | mtspr hid0,r8 ; Turn off the invalidate (needed for some older machines) | |
690 | sync | |
0b4e3aa0 | 691 | |
1c79356b A |
692 | |
693 | cinoL1: | |
694 | ; | |
695 | ; Flush and disable the level 2 | |
696 | ; | |
697 | bf pfL2b,cinol2 ; No level 2 cache to flush | |
698 | ||
699 | mfspr r8,l2cr ; Get the L2CR | |
700 | lwz r3,pfl2cr(r12) ; Get the L2CR value | |
701 | lis r0,hi16(l2sizm|l2clkm|l2ramm|l2ohm) ; Get confiuration bits | |
702 | xor r2,r8,r3 ; Get changing bits? | |
703 | ori r0,r0,lo16(l2slm|l2dfm|l2bypm) ; More config bits | |
704 | and. r0,r0,r2 ; Did any change? | |
705 | bne- ciinvdl2 ; Yes, just invalidate and get PLL synced... | |
706 | ||
707 | bf pfL2fab,ciswfl2 ; Flush not in hardware... | |
708 | ||
709 | mr r10,r3 ; Take a copy now | |
710 | ||
0b4e3aa0 | 711 | bf 31,cinol2lck ; Skip if pfLClck not set... |
1c79356b A |
712 | |
713 | oris r10,r10,hi16(l2ionlym|l2donlym) ; Set both instruction- and data-only | |
714 | sync | |
715 | mtspr l2cr,r10 ; Lock out the cache | |
716 | sync | |
717 | isync | |
718 | ||
719 | cinol2lck: ori r10,r10,lo16(l2hwfm) ; Request flush | |
720 | sync ; Make sure everything is done | |
721 | ||
722 | mtspr l2cr,r10 ; Request flush | |
723 | ||
724 | cihwfl2: mfspr r10,l2cr ; Get back the L2CR | |
725 | rlwinm. r10,r10,0,l2hwf,l2hwf ; Is the flush over? | |
726 | bne+ cihwfl2 ; Nope, keep going... | |
727 | b ciinvdl2 ; Flush done, go invalidate L2... | |
728 | ||
729 | ciswfl2: | |
730 | lwz r0,pfl2Size(r12) ; Get the L2 size | |
731 | oris r2,r3,hi16(l2dom) ; Set L2 to data only mode | |
0b4e3aa0 A |
732 | |
733 | b ciswfl2doa ; Branch to next line... | |
734 | ||
735 | .align 5 | |
736 | ciswfl2doc: | |
737 | mtspr l2cr,r2 ; Disable L2 | |
738 | sync | |
739 | isync | |
740 | b ciswfl2dod ; It is off, go invalidate it... | |
741 | ||
742 | ciswfl2doa: | |
743 | b ciswfl2dob ; Branch to next... | |
744 | ||
745 | ciswfl2dob: | |
746 | sync ; Finish memory stuff | |
747 | isync ; Stop speculation | |
748 | b ciswfl2doc ; Jump back up and turn on data only... | |
749 | ciswfl2dod: | |
1c79356b A |
750 | rlwinm r0,r0,27,5,31 ; Get the number of lines |
751 | lis r10,0xFFF0 ; Dead recon ROM for now | |
752 | mtctr r0 ; Set the number of lines | |
753 | ||
754 | ciswfldl2a: lwz r0,0(r10) ; Load something to flush something | |
755 | addi r10,r10,32 ; Next line | |
756 | bdnz ciswfldl2a ; Do the lot... | |
757 | ||
758 | ciinvdl2: rlwinm r3,r3,0,l2e+1,31 ; Clear the enable bit | |
759 | b cinla ; Branch to next line... | |
760 | ||
761 | .align 5 | |
762 | cinlc: mtspr l2cr,r3 ; Disable L2 | |
763 | sync | |
764 | isync | |
765 | b ciinvl2 ; It is off, go invalidate it... | |
766 | ||
767 | cinla: b cinlb ; Branch to next... | |
768 | ||
769 | cinlb: sync ; Finish memory stuff | |
770 | isync ; Stop speculation | |
771 | b cinlc ; Jump back up and turn off cache... | |
772 | ||
773 | ciinvl2: sync | |
774 | isync | |
775 | oris r2,r3,hi16(l2im) ; Get the invalidate flag set | |
776 | ||
777 | mtspr l2cr,r2 ; Start the invalidate | |
778 | sync | |
779 | isync | |
780 | ciinvdl2a: mfspr r2,l2cr ; Get the L2CR | |
781 | bf pfL2ib,ciinvdl2b ; Flush not in hardware... | |
782 | rlwinm. r2,r2,0,l2i,l2i ; Is the invalidate still going? | |
783 | bne+ ciinvdl2a ; Assume so, this will take a looong time... | |
784 | sync | |
785 | b cinol2 ; No level 2 cache to flush | |
786 | ciinvdl2b: | |
787 | rlwinm. r2,r2,0,l2ip,l2ip ; Is the invalidate still going? | |
788 | bne+ ciinvdl2a ; Assume so, this will take a looong time... | |
789 | sync | |
790 | mtspr l2cr,r3 ; Turn off the invalidate request | |
791 | ||
792 | cinol2: | |
793 | ||
794 | ; | |
795 | ; Flush and enable the level 3 | |
796 | ; | |
797 | bf pfL3b,cinol3 ; No level 3 cache to flush | |
798 | ||
799 | mfspr r8,l3cr ; Get the L3CR | |
800 | lwz r3,pfl3cr(r12) ; Get the L3CR value | |
801 | lis r0,hi16(l3pem|l3sizm|l3dxm|l3clkm|l3spom|l3ckspm) ; Get configuration bits | |
802 | xor r2,r8,r3 ; Get changing bits? | |
803 | ori r0,r0,lo16(l3pspm|l3repm|l3rtm|l3cyam|l3dmemm|l3dmsizm) ; More config bits | |
804 | and. r0,r0,r2 ; Did any change? | |
805 | bne- ciinvdl3 ; Yes, just invalidate and get PLL synced... | |
806 | ||
807 | sync ; 7450 book says do this even though not needed | |
808 | mr r10,r3 ; Take a copy now | |
809 | ||
810 | bf 31,cinol3lck ; Skip if pfL23lck not set... | |
811 | ||
812 | oris r10,r10,hi16(l3iom) ; Set instruction-only | |
813 | ori r10,r10,lo16(l3donlym) ; Set data-only | |
814 | sync | |
815 | mtspr l3cr,r10 ; Lock out the cache | |
816 | sync | |
817 | isync | |
818 | ||
819 | cinol3lck: ori r10,r10,lo16(l3hwfm) ; Request flush | |
820 | sync ; Make sure everything is done | |
821 | ||
822 | mtspr l3cr,r10 ; Request flush | |
823 | ||
824 | cihwfl3: mfspr r10,l3cr ; Get back the L3CR | |
825 | rlwinm. r10,r10,0,l3hwf,l3hwf ; Is the flush over? | |
826 | bne+ cihwfl3 ; Nope, keep going... | |
827 | ||
828 | ciinvdl3: rlwinm r3,r3,0,l3e+1,31 ; Clear the enable bit | |
829 | sync ; Make sure of life, liberty, and justice | |
830 | mtspr l3cr,r3 ; Disable L3 | |
831 | sync | |
832 | ||
833 | ori r3,r3,lo16(l3im) ; Get the invalidate flag set | |
834 | ||
835 | mtspr l3cr,r3 ; Start the invalidate | |
836 | ||
837 | ciinvdl3b: mfspr r3,l3cr ; Get the L3CR | |
838 | rlwinm. r3,r3,0,l3i,l3i ; Is the invalidate still going? | |
839 | bne+ ciinvdl3b ; Assume so... | |
840 | sync | |
841 | ||
7b1edb79 | 842 | bf pfL3pdetb, ciinvdl3nopdet |
1c79356b A |
843 | mfspr r3,l3pdet ; ? |
844 | rlwimi r3,r3,28,0,23 ; ? | |
845 | oris r3,r3,0xF000 ; ? | |
846 | ori r3,r3,0x0080 ; ? | |
847 | mtspr l3pdet,r3 ; ? | |
848 | isync | |
849 | ||
7b1edb79 | 850 | ciinvdl3nopdet: |
1c79356b A |
851 | mfspr r3,l3cr ; Get the L3CR |
852 | rlwinm r3,r3,0,l3clken+1,l3clken-1 ; Clear the clock enable bit | |
853 | mtspr l3cr,r3 ; Disable the clock | |
854 | ||
855 | li r2,128 ; ? | |
856 | ciinvdl3c: addi r2,r2,-1 ; ? | |
857 | cmplwi r2,0 ; ? | |
858 | bne+ ciinvdl3c | |
859 | ||
860 | mfspr r10,msssr0 ; ? | |
861 | rlwinm r10,r10,0,vgL3TAG+1,vgL3TAG-1 ; ? | |
862 | mtspr msssr0,r10 ; ? | |
863 | sync | |
864 | ||
865 | oris r3,r3,hi16(l3em|l3clkenm) ; Turn on enable bit | |
866 | mtspr l3cr,r3 ; Enable it | |
867 | sync | |
868 | cinol3: | |
869 | bf pfL2b,cinol2a ; No level 2 cache to enable | |
870 | ||
871 | lwz r3,pfl2cr(r12) ; Get the L2CR value | |
872 | oris r3,r3,hi16(l2em) ; Turn on enable bit | |
873 | mtspr l2cr,r3 ; Enable it | |
874 | sync | |
875 | ||
876 | ; | |
877 | ; Invalidate and turn on L1s | |
878 | ; | |
879 | ||
0b4e3aa0 A |
880 | cinol2a: |
881 | bt 31,cinoexit ; Skip if pfLClck set... | |
882 | ||
883 | rlwinm r8,r9,0,dce+1,ice-1 ; Clear the I- and D- cache enables | |
1c79356b A |
884 | mtspr hid0,r8 ; Turn off dem caches |
885 | sync | |
886 | ||
887 | ori r8,r9,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate | |
888 | rlwinm r9,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits | |
889 | sync | |
890 | isync | |
891 | ||
892 | mtspr hid0,r8 ; Start the invalidate and turn on L1 cache | |
0b4e3aa0 A |
893 | |
894 | cinoexit: mtspr hid0,r9 ; Turn off the invalidate (needed for some older machines) and restore entry conditions | |
1c79356b A |
895 | sync |
896 | mtmsr r7 ; Restore MSR to entry | |
897 | isync | |
898 | blr ; Return... | |
899 | ||
900 | ||
901 | /* Disables all caches | |
902 | * | |
903 | * void cacheDisable(void) | |
904 | * | |
905 | * Turns off all caches on the processor. They are not flushed. | |
906 | * | |
907 | */ | |
908 | ||
909 | ; Force a line boundry here | |
910 | .align 5 | |
911 | .globl EXT(cacheDisable) | |
912 | ||
913 | LEXT(cacheDisable) | |
914 | ||
915 | mfsprg r11,2 ; Get CPU specific features | |
916 | mtcrf 0x83,r11 ; Set feature flags | |
917 | ||
918 | bf pfAltivecb,cdNoAlt ; No vectors... | |
919 | ||
920 | dssall ; Stop streams | |
921 | ||
922 | cdNoAlt: sync | |
923 | ||
924 | mfspr r5,hid0 ; Get the hid | |
925 | rlwinm r5,r5,0,dce+1,ice-1 ; Clear the I- and D- cache enables | |
926 | mtspr hid0,r5 ; Turn off dem caches | |
927 | sync | |
928 | ||
929 | bf pfL2b,cdNoL2 ; Skip if no L2... | |
930 | ||
931 | mfspr r5,l2cr ; Get the L2 | |
932 | rlwinm r5,r5,0,l2e+1,31 ; Turn off enable bit | |
933 | ||
934 | b cinlaa ; Branch to next line... | |
935 | ||
936 | .align 5 | |
937 | cinlcc: mtspr l2cr,r5 ; Disable L2 | |
938 | sync | |
939 | isync | |
940 | b cdNoL2 ; It is off, we are done... | |
941 | ||
942 | cinlaa: b cinlbb ; Branch to next... | |
943 | ||
944 | cinlbb: sync ; Finish memory stuff | |
945 | isync ; Stop speculation | |
946 | b cinlcc ; Jump back up and turn off cache... | |
947 | ||
948 | cdNoL2: | |
949 | ||
950 | bf pfL3b,cdNoL3 ; Skip down if no L3... | |
951 | ||
952 | mfspr r5,l3cr ; Get the L3 | |
953 | rlwinm r5,r5,0,l3e+1,31 ; Turn off enable bit | |
954 | rlwinm r5,r5,0,l3clken+1,l3clken-1 ; Turn off cache enable bit | |
955 | mtspr l3cr,r5 ; Disable the caches | |
956 | sync | |
957 | ||
958 | cdNoL3: | |
959 | blr ; Leave... | |
960 | ||
961 | ||
962 | /* Initialize processor thermal monitoring | |
963 | * void ml_thrm_init(void) | |
964 | * | |
965 | * Build initial TAU registers and start them all going. | |
966 | * We ca not do this at initial start up because we need to have the processor frequency first. | |
967 | * And just why is this in assembler when it does not have to be?? Cause I am just too | |
968 | * lazy to open up a "C" file, thats why. | |
969 | */ | |
970 | ||
971 | ; Force a line boundry here | |
972 | .align 5 | |
973 | .globl EXT(ml_thrm_init) | |
974 | ||
975 | LEXT(ml_thrm_init) | |
976 | ||
977 | mfsprg r12,0 ; Get the per_proc blok | |
978 | lis r11,hi16(EXT(gPEClockFrequencyInfo)) ; Get top of processor information | |
979 | mfsprg r10,2 ; Get CPU specific features | |
980 | ori r11,r11,lo16(EXT(gPEClockFrequencyInfo)) ; Get bottom of processor information | |
981 | mtcrf 0x40,r10 ; Get the installed features | |
982 | ||
983 | li r3,lo16(thrmtidm|thrmvm) ; Set for lower-than thermal event at 0 degrees | |
984 | bflr pfThermalb ; No thermal monitoring on this cpu | |
985 | mtspr thrm1,r3 ; Do it | |
986 | ||
987 | lwz r3,thrmthrottleTemp(r12) ; Get our throttle temprature | |
988 | rlwinm r3,r3,31-thrmthre,thrmthrs,thrmthre ; Position it | |
989 | ori r3,r3,lo16(thrmvm) ; Set for higher-than event | |
990 | mtspr thrm2,r3 ; Set it | |
991 | ||
992 | lis r4,hi16(1000000) ; Top of million | |
993 | ; | |
994 | ; Note: some CPU manuals say this is processor clocks, some say bus rate. The latter | |
995 | ; makes more sense because otherwise we can not get over about 400MHz. | |
996 | #if 0 | |
997 | lwz r3,PECFIcpurate(r11) ; Get the processor speed | |
998 | #else | |
999 | lwz r3,PECFIbusrate(r11) ; Get the bus speed | |
1000 | #endif | |
1001 | ori r4,r4,lo16(1000000) ; Bottom of million | |
1002 | lis r7,hi16(thrmsitvm>>1) ; Get top of highest possible value | |
1003 | divwu r3,r3,r4 ; Get number of cycles per microseconds | |
1004 | ori r7,r7,lo16(thrmsitvm>>1) ; Get the bottom of the highest possible value | |
1005 | addi r3,r3,1 ; Insure we have enough | |
1006 | mulli r3,r3,20 ; Get 20 microseconds worth of cycles | |
1007 | cmplw r3,r7 ; Check against max | |
1008 | ble+ smallenuf ; It is ok... | |
1009 | mr r3,r7 ; Saturate | |
1010 | ||
1011 | smallenuf: rlwinm r3,r3,31-thrmsitve,thrmsitvs,thrmsitve ; Position | |
1012 | ori r3,r3,lo16(thrmem) ; Enable with at least 20micro sec sample | |
1013 | stw r3,thrm3val(r12) ; Save this in case we need it later | |
1014 | mtspr thrm3,r3 ; Do it | |
1015 | blr | |
1016 | ||
1017 | ||
1018 | /* Set thermal monitor bounds | |
1019 | * void ml_thrm_set(unsigned int low, unsigned int high) | |
1020 | * | |
1021 | * Set TAU to interrupt below low and above high. A value of | |
1022 | * zero disables interruptions in that direction. | |
1023 | */ | |
1024 | ||
1025 | ; Force a line boundry here | |
1026 | .align 5 | |
1027 | .globl EXT(ml_thrm_set) | |
1028 | ||
1029 | LEXT(ml_thrm_set) | |
1030 | ||
1031 | mfmsr r0 ; Get the MSR | |
1032 | rlwinm r6,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear EE bit | |
1033 | mtmsr r6 | |
1034 | ||
1035 | mfsprg r12,0 ; Get the per_proc blok | |
1036 | ||
1037 | rlwinm. r6,r3,31-thrmthre,thrmthrs,thrmthre ; Position it and see if enabled | |
1038 | mfsprg r9,2 ; Get CPU specific features | |
1039 | stw r3,thrmlowTemp(r12) ; Set the low temprature | |
1040 | mtcrf 0x40,r9 ; See if we can thermal this machine | |
1041 | rlwinm r9,r9,(((31-thrmtie)+(pfThermIntb+1))&31),thrmtie,thrmtie ; Set interrupt enable if this machine can handle it | |
1042 | bf pfThermalb,tsetcant ; No can do... | |
1043 | beq tsetlowo ; We are setting the low off... | |
1044 | ori r6,r6,lo16(thrmtidm|thrmvm) ; Set the lower-than and valid bit | |
1045 | or r6,r6,r9 ; Set interruption request if supported | |
1046 | ||
1047 | tsetlowo: mtspr thrm1,r6 ; Cram the register | |
1048 | ||
1049 | rlwinm. r6,r4,31-thrmthre,thrmthrs,thrmthre ; Position it and see if enabled | |
1050 | stw r4,thrmhighTemp(r12) ; Set the high temprature | |
1051 | beq tsethigho ; We are setting the high off... | |
1052 | ori r6,r6,lo16(thrmvm) ; Set valid bit | |
1053 | or r6,r6,r9 ; Set interruption request if supported | |
1054 | ||
1055 | tsethigho: mtspr thrm2,r6 ; Cram the register | |
1056 | ||
1057 | tsetcant: mtmsr r0 ; Reenable interruptions | |
1058 | blr ; Leave... | |
1059 | ||
1060 | /* Read processor temprature | |
1061 | * unsigned int ml_read_temp(void) | |
1062 | * | |
1063 | */ | |
1064 | ||
1065 | ; Force a line boundry here | |
1066 | .align 5 | |
1067 | .globl EXT(ml_read_temp) | |
1068 | ||
1069 | LEXT(ml_read_temp) | |
1070 | ||
1071 | mfmsr r9 ; Save the MSR | |
1072 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
1073 | li r5,15 ; Starting point for ranging (start at 15 so we do not overflow) | |
1074 | mfsprg r7,2 ; Get CPU specific features | |
1075 | mtmsr r8 ; Do not allow interruptions | |
1076 | mtcrf 0x40,r7 ; See if we can thermal this machine | |
1077 | bf pfThermalb,thrmcant ; No can do... | |
1078 | ||
1079 | mfspr r11,thrm1 ; Save thrm1 | |
1080 | ||
1081 | thrmrange: rlwinm r4,r5,31-thrmthre,thrmthrs,thrmthre ; Position it | |
1082 | ori r4,r4,lo16(thrmtidm|thrmvm) ; Flip on the valid bit and make comparision for less than | |
1083 | ||
1084 | mtspr thrm1,r4 ; Set the test value | |
1085 | ||
1086 | thrmreada: mfspr r3,thrm1 ; Get the thermal register back | |
1087 | rlwinm. r0,r3,0,thrmtiv,thrmtiv ; Has it settled yet? | |
1088 | beq+ thrmreada ; Nope... | |
1089 | ||
1090 | rlwinm. r0,r3,0,thrmtin,thrmtin ; Are we still under the threshold? | |
1091 | bne thrmsearch ; No, we went over... | |
1092 | ||
1093 | addi r5,r5,16 ; Start by trying every 16 degrees | |
1094 | cmplwi r5,127 ; Have we hit the max? | |
1095 | blt- thrmrange ; Got some more to do... | |
1096 | ||
1097 | thrmsearch: rlwinm r4,r5,31-thrmthre,thrmthrs,thrmthre ; Position it | |
1098 | ori r4,r4,lo16(thrmtidm|thrmvm) ; Flip on the valid bit and make comparision for less than | |
1099 | ||
1100 | mtspr thrm1,r4 ; Set the test value | |
1101 | ||
1102 | thrmread: mfspr r3,thrm1 ; Get the thermal register back | |
1103 | rlwinm. r0,r3,0,thrmtiv,thrmtiv ; Has it settled yet? | |
1104 | beq+ thrmread ; Nope... | |
1105 | ||
1106 | rlwinm. r0,r3,0,thrmtin,thrmtin ; Are we still under the threshold? | |
1107 | beq thrmdone ; No, we hit it... | |
1108 | addic. r5,r5,-1 ; Go down a degree | |
1109 | bge+ thrmsearch ; Try again (until we are below freezing)... | |
1110 | ||
1111 | thrmdone: addi r3,r5,1 ; Return the temprature (bump it up to make it correct) | |
1112 | mtspr thrm1,r11 ; Restore the thermal register | |
1113 | mtmsr r9 ; Re-enable interruptions | |
1114 | blr ; Leave... | |
1115 | ||
1116 | thrmcant: eqv r3,r3,r3 ; Return bogus temprature because we can not read it | |
1117 | mtmsr r9 ; Re-enable interruptions | |
1118 | blr ; Leave... | |
1119 | ||
1120 | /* Throttle processor speed up or down | |
1121 | * unsigned int ml_throttle(unsigned int step) | |
1122 | * | |
1123 | * Returns old speed and sets new. Both step and return are values from 0 to | |
1124 | * 255 that define number of throttle steps, 0 being off and "ictcfim" is max * 2. | |
1125 | * | |
1126 | */ | |
1127 | ||
1128 | ; Force a line boundry here | |
1129 | .align 5 | |
1130 | .globl EXT(ml_throttle) | |
1131 | ||
1132 | LEXT(ml_throttle) | |
1133 | ||
1134 | mfmsr r9 ; Save the MSR | |
1135 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
1136 | cmplwi r3,lo16(ictcfim>>1) ; See if we are going too far | |
1137 | mtmsr r8 ; Do not allow interruptions | |
1138 | ble+ throtok ; Throttle value is ok... | |
1139 | li r3,lo16(ictcfim>>1) ; Set max | |
1140 | ||
1141 | throtok: rlwinm. r4,r3,1,ictcfib,ictcfie ; Set the throttle | |
1142 | beq throtoff ; Skip if we are turning it off... | |
1143 | ori r4,r4,lo16(thrmvm) ; Turn on the valid bit | |
1144 | ||
1145 | throtoff: mfspr r3,ictc ; Get the old throttle | |
1146 | mtspr ictc,r4 ; Set the new | |
1147 | rlwinm r3,r3,31,1,31 ; Shift throttle value over | |
1148 | mtmsr r9 ; Restore interruptions | |
1149 | blr ; Return... | |
1150 | ||
1151 | /* | |
1152 | ** ml_get_timebase() | |
1153 | ** | |
1154 | ** Entry - R3 contains pointer to 64 bit structure. | |
1155 | ** | |
1156 | ** Exit - 64 bit structure filled in. | |
1157 | ** | |
1158 | */ | |
1159 | ; Force a line boundry here | |
1160 | .align 5 | |
1161 | .globl EXT(ml_get_timebase) | |
1162 | ||
1163 | LEXT(ml_get_timebase) | |
1164 | ||
1165 | loop: | |
1166 | mftbu r4 | |
1167 | mftb r5 | |
1168 | mftbu r6 | |
1169 | cmpw r6, r4 | |
1170 | bne- loop | |
1171 | ||
1172 | stw r4, 0(r3) | |
1173 | stw r5, 4(r3) | |
1174 | ||
1175 | blr | |
1176 | ||
1177 | /* | |
1178 | ** ml_sense_nmi() | |
1179 | ** | |
1180 | */ | |
1181 | ; Force a line boundry here | |
1182 | .align 5 | |
1183 | .globl EXT(ml_sense_nmi) | |
1184 | ||
1185 | LEXT(ml_sense_nmi) | |
1186 | ||
1187 | blr ; Leave... | |
1188 |