]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | #include <ppc/asm.h> | |
23 | #include <ppc/proc_reg.h> | |
24 | #include <cpus.h> | |
25 | #include <assym.s> | |
26 | #include <debug.h> | |
27 | #include <mach/ppc/vm_param.h> | |
28 | #include <ppc/exception.h> | |
29 | ||
30 | /* PCI config cycle probing | |
31 | * | |
32 | * boolean_t ml_probe_read(vm_offset_t paddr, unsigned int *val) | |
33 | * | |
34 | * Read the memory location at physical address paddr. | |
35 | * This is a part of a device probe, so there is a good chance we will | |
36 | * have a machine check here. So we have to be able to handle that. | |
37 | * We assume that machine checks are enabled both in MSR and HIDs | |
38 | */ | |
39 | ||
40 | ; Force a line boundry here | |
41 | .align 5 | |
42 | .globl EXT(ml_probe_read) | |
43 | ||
44 | LEXT(ml_probe_read) | |
45 | ||
46 | mfsprg r9,2 ; Get feature flags | |
47 | mfmsr r0 ; Save the current MSR | |
48 | neg r10,r3 ; Number of bytes to end of page | |
49 | rlwinm r2,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
50 | rlwinm. r10,r10,0,20,31 ; Clear excess junk and test for page bndry | |
51 | mr r12,r3 ; Save the load address | |
52 | mtcrf 0x04,r9 ; Set the features | |
53 | cmplwi cr1,r10,4 ; At least 4 bytes left in page? | |
54 | rlwinm r2,r2,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Clear translation | |
55 | beq- mprdoit ; We are right on the boundary... | |
56 | li r3,0 | |
57 | bltlr- cr1 ; No, just return failure... | |
58 | ||
59 | mprdoit: | |
60 | ||
61 | bt pfNoMSRirb,mprNoMSR ; No MSR... | |
62 | ||
63 | mtmsr r2 ; Translation and all off | |
64 | isync ; Toss prefetch | |
65 | b mprNoMSRx | |
66 | ||
67 | mprNoMSR: | |
68 | mr r5,r0 | |
69 | li r0,loadMSR ; Get the MSR setter SC | |
70 | mr r3,r2 ; Get new MSR | |
71 | sc ; Set it | |
72 | mr r0,r5 | |
73 | li r3,0 | |
74 | mprNoMSRx: | |
75 | ||
76 | ; | |
77 | ; We need to insure that there is no more than 1 BAT register that | |
78 | ; can get a hit. There could be repercussions beyond the ken | |
79 | ; of mortal man. It is best not to tempt fate. | |
80 | ; | |
81 | li r10,0 ; Clear a register | |
82 | mfdbatu r5,0 ; Save DBAT 0 high | |
83 | mfdbatl r6,0 ; Save DBAT 0 low | |
84 | mfdbatu r7,1 ; Save DBAT 1 high | |
85 | mfdbatu r8,2 ; Save DBAT 2 high | |
86 | mfdbatu r9,3 ; Save DBAT 3 high | |
87 | ||
88 | sync ; Make sure all is well | |
89 | ||
90 | mtdbatu 1,r10 ; Invalidate DBAT 1 | |
91 | mtdbatu 2,r10 ; Invalidate DBAT 2 | |
92 | mtdbatu 3,r10 ; Invalidate DBAT 3 | |
93 | ||
94 | rlwinm r10,r12,0,0,14 ; Round down to a 128k boundary | |
95 | ori r11,r10,0x32 ; Set uncached, coherent, R/W | |
96 | ori r10,r10,2 ; Make the upper half (128k, valid supervisor) | |
97 | mtdbatl 0,r11 ; Set lower BAT first | |
98 | mtdbatu 0,r10 ; Now the upper | |
99 | sync ; Just make sure | |
100 | ||
101 | ori r11,r2,lo16(MASK(MSR_DR)) ; Turn on data translation | |
102 | mtmsr r11 ; Do it for real | |
103 | isync ; Make sure of it | |
104 | ||
105 | eieio ; Make sure of all previous accesses | |
106 | sync ; Make sure it is all caught up | |
107 | ||
108 | lwz r11,0(r12) ; Get it and maybe machine check here | |
109 | ||
110 | eieio ; Make sure of ordering again | |
111 | sync ; Get caught up yet again | |
112 | isync ; Do not go further till we are here | |
113 | ||
114 | mtdbatu 0,r5 ; Restore DBAT 0 high | |
115 | mtdbatl 0,r6 ; Restore DBAT 0 low | |
116 | mtdbatu 1,r7 ; Restore DBAT 1 high | |
117 | mtdbatu 2,r8 ; Restore DBAT 2 high | |
118 | mtdbatu 3,r9 ; Restore DBAT 3 high | |
119 | sync | |
120 | ||
121 | li r3,1 ; We made it | |
122 | ||
123 | mtmsr r0 ; Restore translation and exceptions | |
124 | isync ; Toss speculations | |
125 | ||
126 | stw r11,0(r4) ; Save the loaded value | |
127 | blr ; Return... | |
128 | ||
129 | ; Force a line boundry here. This means we will be able to check addresses better | |
130 | .align 5 | |
131 | .globl EXT(ml_probe_read_mck) | |
132 | LEXT(ml_probe_read_mck) | |
133 | ||
134 | /* Read physical address | |
135 | * | |
136 | * unsigned int ml_phys_read_byte(vm_offset_t paddr) | |
137 | * | |
138 | * Read the byte at physical address paddr. Memory should not be cache inhibited. | |
139 | */ | |
140 | ||
141 | ; Force a line boundry here | |
142 | .align 5 | |
143 | .globl EXT(ml_phys_read_byte) | |
144 | ||
145 | LEXT(ml_phys_read_byte) | |
146 | ||
147 | mfmsr r10 ; Save the current MSR | |
148 | rlwinm r4,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
149 | rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation | |
150 | ||
151 | mtmsr r4 ; Translation and all off | |
152 | isync ; Toss prefetch | |
153 | ||
154 | lbz r3,0(r3) ; Get the byte | |
155 | sync | |
156 | ||
157 | mtmsr r10 ; Restore translation and rupts | |
158 | isync | |
159 | blr | |
160 | ||
161 | /* Read physical address | |
162 | * | |
163 | * unsigned int ml_phys_read(vm_offset_t paddr) | |
164 | * | |
165 | * Read the word at physical address paddr. Memory should not be cache inhibited. | |
166 | */ | |
167 | ||
168 | ; Force a line boundry here | |
169 | .align 5 | |
170 | .globl EXT(ml_phys_read) | |
171 | ||
172 | LEXT(ml_phys_read) | |
173 | ||
174 | mfmsr r0 ; Save the current MSR | |
175 | rlwinm r4,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
176 | rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation | |
177 | ||
178 | mtmsr r4 ; Translation and all off | |
179 | isync ; Toss prefetch | |
180 | ||
181 | lwz r3,0(r3) ; Get the word | |
182 | sync | |
183 | ||
184 | mtmsr r0 ; Restore translation and rupts | |
185 | isync | |
186 | blr | |
187 | ||
188 | /* Write physical address byte | |
189 | * | |
190 | * void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) | |
191 | * | |
192 | * Write the byte at physical address paddr. Memory should not be cache inhibited. | |
193 | */ | |
194 | ||
195 | ; Force a line boundry here | |
196 | .align 5 | |
197 | .globl EXT(ml_phys_write_byte) | |
198 | ||
199 | LEXT(ml_phys_write_byte) | |
200 | ||
201 | mfmsr r0 ; Save the current MSR | |
202 | rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
203 | rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation | |
204 | ||
205 | mtmsr r5 ; Translation and all off | |
206 | isync ; Toss prefetch | |
207 | ||
208 | stb r4,0(r3) ; Set the byte | |
209 | sync | |
210 | ||
211 | mtmsr r0 ; Restore translation and rupts | |
212 | isync | |
213 | blr | |
214 | ||
215 | /* Write physical address | |
216 | * | |
217 | * void ml_phys_write(vm_offset_t paddr, unsigned int data) | |
218 | * | |
219 | * Write the word at physical address paddr. Memory should not be cache inhibited. | |
220 | */ | |
221 | ||
222 | ; Force a line boundry here | |
223 | .align 5 | |
224 | .globl EXT(ml_phys_write) | |
225 | ||
226 | LEXT(ml_phys_write) | |
227 | ||
228 | mfmsr r0 ; Save the current MSR | |
229 | rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions | |
230 | rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation | |
231 | ||
232 | mtmsr r5 ; Translation and all off | |
233 | isync ; Toss prefetch | |
234 | ||
235 | stw r4,0(r3) ; Set the word | |
236 | sync | |
237 | ||
238 | mtmsr r0 ; Restore translation and rupts | |
239 | isync | |
240 | blr | |
241 | ||
242 | ||
243 | /* set interrupts enabled or disabled | |
244 | * | |
245 | * boolean_t set_interrupts_enabled(boolean_t enable) | |
246 | * | |
247 | * Set EE bit to "enable" and return old value as boolean | |
248 | */ | |
249 | ||
250 | ; Force a line boundry here | |
0b4e3aa0 A |
251 | .align 5 |
252 | .globl EXT(ml_set_interrupts_enabled) | |
253 | ||
254 | LEXT(ml_set_interrupts_enabled) | |
1c79356b | 255 | |
0b4e3aa0 A |
256 | mfsprg r7,0 |
257 | lwz r4,PP_INTS_ENABLED(r7) | |
258 | mr. r4,r4 | |
259 | beq- EXT(fake_set_interrupts_enabled) | |
1c79356b A |
260 | mfmsr r5 ; Get the current MSR |
261 | mr r4,r3 ; Save the old value | |
262 | rlwinm r3,r5,17,31,31 ; Set return value | |
263 | rlwimi r5,r4,15,16,16 ; Insert new EE bit | |
0b4e3aa0 | 264 | andi. r8,r5,lo16(MASK(MSR_EE)) ; Interruptions |
1c79356b A |
265 | bne CheckPreemption |
266 | NoPreemption: | |
267 | mtmsr r5 ; Slam enablement | |
268 | blr | |
269 | ||
270 | CheckPreemption: | |
1c79356b A |
271 | lwz r8,PP_NEED_AST(r7) |
272 | lwz r7,PP_CPU_DATA(r7) | |
273 | li r6,AST_URGENT | |
274 | lwz r8,0(r8) | |
275 | lwz r7,CPU_PREEMPTION_LEVEL(r7) | |
276 | lis r0,HIGH_ADDR(DoPreemptCall) | |
277 | and. r8,r8,r6 | |
278 | ori r0,r0,LOW_ADDR(DoPreemptCall) | |
279 | beq+ NoPreemption | |
280 | cmpi cr0, r7, 0 | |
281 | bne+ NoPreemption | |
282 | sc | |
283 | mtmsr r5 | |
284 | blr | |
285 | ||
286 | ||
0b4e3aa0 A |
287 | /* Emulate a decremeter exception |
288 | * | |
289 | * void machine_clock_assist(void) | |
290 | * | |
291 | */ | |
292 | ||
293 | ; Force a line boundry here | |
294 | .align 5 | |
295 | .globl EXT(machine_clock_assist) | |
296 | ||
297 | LEXT(machine_clock_assist) | |
298 | ||
299 | mfsprg r7,0 | |
300 | lwz r4,PP_INTS_ENABLED(r7) | |
301 | mr. r4,r4 | |
302 | beq- EXT(CreateFakeDEC) | |
303 | blr | |
304 | ||
1c79356b A |
305 | /* Set machine into idle power-saving mode. |
306 | * | |
307 | * void machine_idle_ppc(void) | |
308 | * | |
309 | * We will use the PPC NAP or DOZE for this. | |
310 | * This call always returns. Must be called with spllo (i.e., interruptions | |
311 | * enabled). | |
312 | * | |
313 | */ | |
314 | ||
315 | ||
316 | ; Force a line boundry here | |
317 | .align 5 | |
318 | .globl EXT(machine_idle_ppc) | |
319 | ||
320 | LEXT(machine_idle_ppc) | |
321 | ||
322 | mfmsr r3 ; Get the current MSR | |
323 | rlwinm r5,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
324 | mtmsr r5 ; Hold up interruptions for now | |
325 | mfsprg r12,0 ; Get the per_proc_info | |
326 | mfspr r6,hid0 ; Get the current power-saving mode | |
327 | mfsprg r11,2 ; Get CPU specific features | |
328 | rlwinm r6,r6,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though) | |
329 | mtcrf 0xC7,r11 ; Get the facility flags | |
330 | ||
331 | lis r4,hi16(napm) ; Assume we can nap | |
332 | bt pfWillNapb,yesnap ; Yeah, nap is ok... | |
333 | ||
334 | lis r4,hi16(dozem) ; Assume we can doze | |
335 | bt pfCanDozeb,yesnap ; We can sleep or doze one this machine... | |
336 | ||
337 | ori r3,r3,lo16(MASK(MSR_EE)) ; Flip on EE | |
338 | mtmsr r3 ; Turn interruptions back on | |
339 | blr ; Leave... | |
340 | ||
341 | yesnap: mftbu r9 ; Get the upper timebase | |
342 | mftb r7 ; Get the lower timebase | |
343 | mftbu r8 ; Get the upper one again | |
344 | cmplw r9,r8 ; Did the top tick? | |
345 | bne- yesnap ; Yeah, need to get it again... | |
346 | stw r8,napStamp(r12) ; Set high order time stamp | |
347 | stw r7,napStamp+4(r12) ; Set low order nap stamp | |
348 | ||
349 | bf pfL1nncb,minoflushl1 ; The L1 is coherent in nap/doze... | |
350 | ; | |
351 | ; 7450 does not keep L1 cache coherent across nap/sleep it must alwasy flush. | |
352 | ; It does not have a L1 flush assist, so we do not test for it here. | |
353 | ; | |
354 | ; Note that the time stamp take above is not completely accurate for 7450 | |
355 | ; because we are about to flush the L1 cache and that takes a bit of time. | |
356 | ; | |
357 | cror cr0_eq,pfL1ib,pfL1db ; Check for either I- or D-cache | |
358 | bf- cr0_eq,minoflushl1 ; No level 1 to flush... | |
359 | rlwinm. r0,r4,0,ice,dce ; Were either of the level 1s on? | |
360 | beq- minoflushl1 ; No, no need to flush... | |
361 | ||
362 | miswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size | |
363 | rlwinm r2,r0,0,1,30 ; Double it | |
364 | add r0,r0,r2 ; Get 3 times cache size | |
365 | rlwinm r2,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Turn off data translation | |
366 | rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines | |
367 | lis r3,0xFFF0 ; Dead recon ROM address for now | |
368 | mtctr r0 ; Number of lines to flush | |
369 | mtmsr r2 ; Do it | |
370 | isync | |
371 | ||
372 | miswfldl1a: lwz r2,0(r3) ; Flush anything else | |
373 | addi r3,r3,32 ; Next line | |
374 | bdnz miswfldl1a ; Flush the lot... | |
375 | ||
376 | miinvdl1: sync ; Make sure all flushes have been committed | |
377 | mtmsr r5 ; Put back data translation | |
378 | isync | |
379 | ||
380 | mfspr r8,hid0 ; Get the HID0 bits | |
381 | li r7,lo16(icem|dcem) ; Get the cache enable bits | |
382 | andc r8,r8,r7 ; Clear cache enables | |
383 | mtspr hid0,r8 ; and turn off L1 cache | |
384 | sync ; Make sure all is done | |
385 | ||
386 | ori r8,r8,lo16(icfim|dcfim) ; Set the HID0 bits for invalidate | |
387 | sync | |
388 | isync | |
389 | ||
390 | mtspr hid0,r8 ; Start the invalidate | |
391 | sync | |
392 | ||
393 | minoflushl1: | |
394 | ||
395 | ; | |
396 | ; We have to open up interruptions here because book 4 says that we should | |
397 | ; turn on only the POW bit and that we should have interrupts enabled | |
398 | ; The interrupt handler will detect that nap or doze is set if an interrupt | |
399 | ; is taken and set everything up to return directly to machine_idle_ret. | |
400 | ; So, make sure everything we need there is already set up... | |
401 | ; | |
402 | ori r7,r5,lo16(MASK(MSR_EE)) ; Flip on EE | |
403 | or r6,r6,r4 ; Set nap or doze | |
404 | oris r5,r7,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR | |
405 | mtspr hid0,r6 ; Set up the HID for nap/doze | |
406 | isync ; Make sure it is set | |
407 | mtmsr r7 ; Enable for interrupts | |
408 | rlwinm. r11,r11,0,pfAltivecb,pfAltivecb ; Do we have altivec? | |
409 | beq- minovec ; No... | |
410 | dssall ; Stop the streams before we nap/doze | |
411 | ||
412 | minovec: sync ; Make sure queues are clear | |
413 | mtmsr r5 ; Nap or doze | |
414 | isync ; Make sure this takes before we proceed | |
415 | b minovec ; loop if POW does not take | |
416 | ; | |
417 | ; Note that the interrupt handler will turn off the nap/doze bits in the hid. | |
418 | ; Also remember that the interrupt handler will force return to here whenever | |
419 | ; the nap/doze bits are set. | |
420 | ; | |
421 | .globl EXT(machine_idle_ret) | |
422 | LEXT(machine_idle_ret) | |
423 | mtmsr r7 ; Make sure the MSR is what we want | |
424 | isync ; In case we turn on translation | |
425 | ||
426 | blr ; Return... | |
427 | ||
428 | /* Put machine to sleep. | |
429 | * This call never returns. We always exit sleep via a soft reset. | |
430 | * All external interruptions must be drained at this point and disabled. | |
431 | * | |
432 | * void ml_ppc_sleep(void) | |
433 | * | |
434 | * We will use the PPC SLEEP for this. | |
435 | * | |
436 | * There is one bit of hackery in here: we need to enable for | |
437 | * interruptions when we go to sleep and there may be a pending | |
438 | * decrimenter rupt. So we make the decrimenter 0x7FFFFFFF and enable for | |
439 | * interruptions. The decrimenter rupt vector recognizes this and returns | |
440 | * directly back here. | |
441 | * | |
442 | */ | |
443 | ||
444 | ; Force a line boundry here | |
445 | .align 5 | |
446 | .globl EXT(ml_ppc_sleep) | |
447 | ||
448 | LEXT(ml_ppc_sleep) | |
449 | ||
450 | #if 0 | |
451 | mfmsr r5 ; Hack to spin instead of sleep | |
452 | rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation | |
453 | rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
454 | mtmsr r5 ; No talking | |
455 | isync | |
456 | ||
457 | ; No interrupts allowed after we get the savearea | |
458 | ||
459 | mfsprg r6,0 ; Get the per_proc | |
460 | mfsprg r7,1 ; Get the pending savearea | |
461 | stw r7,savedSave(r6) ; Save the savearea for when we wake up | |
462 | ||
463 | deadsleep: addi r3,r3,1 ; Make analyzer happy | |
464 | addi r3,r3,1 | |
465 | addi r3,r3,1 | |
466 | b deadsleep ; Die the death of 1000 joys... | |
467 | #endif | |
468 | ||
469 | mfsprg r12,0 ; Get the per_proc_info | |
470 | mfspr r4,hid0 ; Get the current power-saving mode | |
471 | eqv r10,r10,r10 ; Get all foxes | |
472 | mfsprg r11,2 ; Get CPU specific features | |
473 | mfmsr r5 ; Get the current MSR | |
474 | rlwinm r10,r10,0,1,31 ; Make 0x7FFFFFFF | |
475 | rlwinm r4,r4,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though) | |
476 | mtdec r10 ; Load decrimenter with 0x7FFFFFFF | |
477 | isync ; and make sure, | |
478 | mfdec r9 ; really sure, it gets there | |
479 | ||
480 | mtcrf 0x07,r11 ; Get the cache flags, etc | |
481 | ||
482 | oris r4,r4,hi16(sleepm) ; Set sleep | |
483 | rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation | |
484 | ; | |
485 | ; Note that we need translation off before we set the HID to sleep. Otherwise | |
486 | ; we will ignore any PTE misses that occur and cause an infinite loop. | |
487 | ; | |
488 | bt pfNoMSRirb,mpsNoMSR ; No MSR... | |
489 | ||
490 | mtmsr r5 ; Translation off | |
491 | isync ; Toss prefetch | |
492 | b mpsNoMSRx | |
493 | ||
494 | mpsNoMSR: | |
495 | li r0,loadMSR ; Get the MSR setter SC | |
496 | mr r3,r5 ; Get new MSR | |
497 | sc ; Set it | |
498 | mpsNoMSRx: | |
499 | ||
500 | ori r3,r5,lo16(MASK(MSR_EE)) ; Flip on EE | |
501 | sync | |
502 | mtspr hid0,r4 ; Set up the HID to sleep | |
503 | ||
504 | mtmsr r3 ; Enable for interrupts to drain decrimenter | |
505 | ||
506 | add r6,r4,r5 ; Just waste time | |
507 | add r6,r6,r4 ; A bit more | |
508 | add r6,r6,r5 ; A bit more | |
509 | ||
510 | mtmsr r5 ; Interruptions back off | |
511 | isync ; Toss prefetch | |
512 | ||
513 | mfsprg r7,1 ; Get the pending savearea | |
514 | stw r7,savedSave(r12) ; Save the savearea for when we wake up | |
515 | ||
516 | ; | |
517 | ; We are here with translation off, interrupts off, all possible | |
518 | ; interruptions drained off, and a decrimenter that will not pop. | |
519 | ; | |
520 | ||
521 | bl EXT(cacheInit) ; Clear out the caches. This will leave them on | |
522 | bl EXT(cacheDisable) ; Turn off all caches | |
523 | ||
524 | mfmsr r5 ; Get the current MSR | |
525 | oris r5,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR | |
526 | ; Leave EE off because power goes off shortly | |
527 | ||
528 | slSleepNow: sync ; Sync it all up | |
529 | mtmsr r5 ; Do sleep with interruptions enabled | |
530 | isync ; Take a pill | |
531 | b slSleepNow ; Go back to sleep if we wake up... | |
532 | ||
533 | ||
534 | ||
535 | /* Initialize all caches including the TLBs | |
536 | * | |
537 | * void cacheInit(void) | |
538 | * | |
539 | * This is used to force the caches to an initial clean state. First, we | |
540 | * check if the cache is on, if so, we need to flush the contents to memory. | |
541 | * Then we invalidate the L1. Next, we configure and invalidate the L2 etc. | |
542 | * Finally we turn on all of the caches | |
543 | * | |
544 | * Note that if translation is not disabled when this is called, the TLB will not | |
545 | * be completely clear after return. | |
546 | * | |
547 | */ | |
548 | ||
549 | ; Force a line boundry here | |
550 | .align 5 | |
551 | .globl EXT(cacheInit) | |
552 | ||
553 | LEXT(cacheInit) | |
554 | ||
555 | mfsprg r12,0 ; Get the per_proc_info | |
556 | mfspr r9,hid0 ; Get the current power-saving mode | |
557 | ||
558 | mfsprg r11,2 ; Get CPU specific features | |
559 | mfmsr r7 ; Get the current MSR | |
560 | rlwinm r4,r9,0,dpm+1,doze-1 ; Clear all possible power-saving modes (also disable DPM) | |
0b4e3aa0 | 561 | rlwimi r11,r11,pfLClckb+1,31,31 ; Move pfLClck to another position (to keep from using non-volatile CRs) |
1c79356b A |
562 | rlwinm r5,r7,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation |
563 | rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
564 | mtcrf 0x87,r11 ; Get the feature flags | |
565 | mtspr hid0,r4 ; Set up the HID | |
566 | ||
567 | bt pfNoMSRirb,ciNoMSR ; No MSR... | |
568 | ||
569 | mtmsr r5 ; Translation and all off | |
570 | isync ; Toss prefetch | |
571 | b ciNoMSRx | |
572 | ||
573 | ciNoMSR: | |
574 | li r0,loadMSR ; Get the MSR setter SC | |
575 | mr r3,r5 ; Get new MSR | |
576 | sc ; Set it | |
577 | ciNoMSRx: | |
578 | ||
579 | bf pfAltivecb,cinoDSS ; No Altivec here... | |
580 | ||
581 | dssall ; Stop streams | |
582 | sync | |
583 | ||
584 | cinoDSS: lis r5,hi16(EXT(tlb_system_lock)) ; Get the TLBIE lock | |
585 | li r0,128 ; Get number of TLB entries | |
586 | ori r5,r5,lo16(EXT(tlb_system_lock)) ; Grab up the bottom part | |
587 | ||
588 | li r6,0 ; Start at 0 | |
1c79356b A |
589 | |
590 | citlbhang: lwarx r2,0,r5 ; Get the TLBIE lock | |
591 | mr. r2,r2 ; Is it locked? | |
592 | bne- citlbhang ; It is locked, go wait... | |
593 | stwcx. r0,0,r5 ; Try to get it | |
594 | bne- citlbhang ; We was beat... | |
595 | ||
596 | mtctr r0 ; Set the CTR | |
597 | ||
598 | cipurgeTLB: tlbie r6 ; Purge this entry | |
599 | addi r6,r6,4096 ; Next page | |
600 | bdnz cipurgeTLB ; Do them all... | |
601 | ||
602 | mtcrf 0x80,r11 ; Set SMP capability | |
603 | sync ; Make sure all TLB purges are done | |
604 | eieio ; Order, order in the court | |
605 | ||
606 | bf pfSMPcapb,cinoSMP ; SMP incapable... | |
607 | ||
608 | tlbsync ; Sync all TLBs | |
609 | sync | |
150bd074 | 610 | isync |
1c79356b A |
611 | |
612 | cinoSMP: stw r2,0(r5) ; Unlock TLBIE lock | |
613 | ||
614 | cror cr0_eq,pfL1ib,pfL1db ; Check for either I- or D-cache | |
615 | bf- cr0_eq,cinoL1 ; No level 1 to flush... | |
616 | rlwinm. r0,r9,0,ice,dce ; Were either of the level 1s on? | |
617 | beq- cinoL1 ; No, no need to flush... | |
618 | ||
619 | bf pfL1fab,ciswdl1 ; If no hw flush assist, go do by software... | |
620 | ||
621 | mfspr r8,msscr0 ; Get the memory system control register | |
622 | oris r8,r8,hi16(dl1hwfm) ; Turn on the hardware flush request | |
623 | ||
624 | mtspr msscr0,r8 ; Start the flush operation | |
625 | ||
626 | ciwdl1f: mfspr r8,msscr0 ; Get the control register again | |
627 | ||
628 | rlwinm. r8,r8,0,dl1hwf,dl1hwf ; Has the flush request been reset yet? | |
629 | bne ciwdl1f ; No, flush is still in progress... | |
630 | b ciinvdl1 ; Go invalidate l1... | |
631 | ||
632 | ; | |
633 | ; We need to either make this very complicated or to use ROM for | |
634 | ; the flush. The problem is that if during the following sequence a | |
635 | ; snoop occurs that invalidates one of the lines in the cache, the | |
636 | ; PLRU sequence will be altered making it possible to miss lines | |
637 | ; during the flush. So, we either need to dedicate an area of RAM | |
638 | ; to each processor, lock use of a RAM area, or use ROM. ROM is | |
639 | ; by far the easiest. Note that this is not an issue for machines | |
640 | ; that have harware flush assists. | |
641 | ; | |
642 | ||
643 | ciswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size | |
0b4e3aa0 A |
644 | |
645 | bf 31,cisnlck ; Skip if pfLClck not set... | |
646 | ||
647 | mfspr r4,msscr0 ; ? | |
648 | rlwinm r6,r4,0,0,l2pfes-1 ; ? | |
649 | mtspr msscr0,r6 ; Set it | |
650 | sync | |
651 | isync | |
652 | ||
653 | mfspr r8,ldstcr ; Save the LDSTCR | |
654 | li r2,1 ; Get a mask of 0x01 | |
655 | lis r3,0xFFF0 ; Point to ROM | |
656 | rlwinm r11,r0,29,3,31 ; Get the amount of memory to handle all indexes | |
657 | ||
658 | li r6,0 ; Start here | |
659 | ||
660 | cisiniflsh: dcbf r6,r3 ; Flush each line of the range we use | |
661 | addi r6,r6,32 ; Bump to the next | |
662 | cmplw r6,r0 ; Have we reached the end? | |
663 | blt+ cisiniflsh ; Nope, continue initial flush... | |
664 | ||
665 | sync ; Make sure it is done | |
666 | ||
667 | addi r11,r11,-1 ; Get mask for index wrap | |
668 | li r6,0 ; Get starting offset | |
669 | ||
670 | cislckit: not r5,r2 ; Lock all but 1 way | |
671 | rlwimi r5,r8,0,0,23 ; Build LDSTCR | |
672 | mtspr ldstcr,r5 ; Lock a way | |
673 | sync ; Clear out memory accesses | |
674 | isync ; Wait for all | |
675 | ||
676 | ||
677 | cistouch: lwzx r10,r3,r6 ; Pick up some trash | |
678 | addi r6,r6,32 ; Go to the next index | |
679 | and. r0,r6,r11 ; See if we are about to do next index | |
680 | bne+ cistouch ; Nope, do more... | |
681 | ||
682 | sync ; Make sure it is all done | |
683 | isync | |
684 | ||
685 | sub r6,r6,r11 ; Back up to start + 1 | |
686 | addi r6,r6,-1 ; Get it right | |
687 | ||
688 | cisflush: dcbf r3,r6 ; Flush everything out | |
689 | addi r6,r6,32 ; Go to the next index | |
690 | and. r0,r6,r11 ; See if we are about to do next index | |
691 | bne+ cisflush ; Nope, do more... | |
692 | ||
693 | sync ; Make sure it is all done | |
694 | isync | |
695 | ||
696 | ||
697 | rlwinm. r2,r2,1,24,31 ; Shift to next way | |
698 | bne+ cislckit ; Do this for all ways... | |
699 | ||
700 | mtspr ldstcr,r8 ; Slam back to original | |
701 | sync | |
702 | isync | |
703 | ||
704 | mtspr msscr0,r4 ; ? | |
705 | sync | |
706 | isync | |
707 | ||
708 | b cinoL1 ; Go on to level 2... | |
709 | ||
710 | ||
711 | cisnlck: rlwinm r2,r0,0,1,30 ; Double cache size | |
1c79356b A |
712 | add r0,r0,r2 ; Get 3 times cache size |
713 | rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines | |
714 | lis r3,0xFFF0 ; Dead recon ROM address for now | |
715 | mtctr r0 ; Number of lines to flush | |
716 | ||
717 | ciswfldl1a: lwz r2,0(r3) ; Flush anything else | |
718 | addi r3,r3,32 ; Next line | |
719 | bdnz ciswfldl1a ; Flush the lot... | |
720 | ||
721 | ciinvdl1: sync ; Make sure all flushes have been committed | |
722 | ||
723 | mfspr r8,hid0 ; Get the HID0 bits | |
724 | rlwinm r8,r8,0,dce+1,ice-1 ; Clear cache enables | |
725 | mtspr hid0,r8 ; and turn off L1 cache | |
726 | sync ; Make sure all is done | |
0b4e3aa0 A |
727 | isync |
728 | ||
1c79356b A |
729 | ori r8,r8,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate |
730 | sync | |
731 | isync | |
732 | ||
733 | mtspr hid0,r8 ; Start the invalidate and turn on cache | |
734 | rlwinm r8,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits | |
735 | mtspr hid0,r8 ; Turn off the invalidate (needed for some older machines) | |
736 | sync | |
0b4e3aa0 | 737 | |
1c79356b A |
738 | |
739 | cinoL1: | |
740 | ; | |
741 | ; Flush and disable the level 2 | |
742 | ; | |
743 | bf pfL2b,cinol2 ; No level 2 cache to flush | |
744 | ||
745 | mfspr r8,l2cr ; Get the L2CR | |
746 | lwz r3,pfl2cr(r12) ; Get the L2CR value | |
747 | lis r0,hi16(l2sizm|l2clkm|l2ramm|l2ohm) ; Get confiuration bits | |
748 | xor r2,r8,r3 ; Get changing bits? | |
749 | ori r0,r0,lo16(l2slm|l2dfm|l2bypm) ; More config bits | |
750 | and. r0,r0,r2 ; Did any change? | |
751 | bne- ciinvdl2 ; Yes, just invalidate and get PLL synced... | |
752 | ||
753 | bf pfL2fab,ciswfl2 ; Flush not in hardware... | |
754 | ||
755 | mr r10,r3 ; Take a copy now | |
756 | ||
0b4e3aa0 | 757 | bf 31,cinol2lck ; Skip if pfLClck not set... |
1c79356b A |
758 | |
759 | oris r10,r10,hi16(l2ionlym|l2donlym) ; Set both instruction- and data-only | |
760 | sync | |
761 | mtspr l2cr,r10 ; Lock out the cache | |
762 | sync | |
763 | isync | |
764 | ||
765 | cinol2lck: ori r10,r10,lo16(l2hwfm) ; Request flush | |
766 | sync ; Make sure everything is done | |
767 | ||
768 | mtspr l2cr,r10 ; Request flush | |
769 | ||
770 | cihwfl2: mfspr r10,l2cr ; Get back the L2CR | |
771 | rlwinm. r10,r10,0,l2hwf,l2hwf ; Is the flush over? | |
772 | bne+ cihwfl2 ; Nope, keep going... | |
773 | b ciinvdl2 ; Flush done, go invalidate L2... | |
774 | ||
775 | ciswfl2: | |
776 | lwz r0,pfl2Size(r12) ; Get the L2 size | |
777 | oris r2,r3,hi16(l2dom) ; Set L2 to data only mode | |
0b4e3aa0 A |
778 | |
779 | b ciswfl2doa ; Branch to next line... | |
780 | ||
781 | .align 5 | |
782 | ciswfl2doc: | |
783 | mtspr l2cr,r2 ; Disable L2 | |
784 | sync | |
785 | isync | |
786 | b ciswfl2dod ; It is off, go invalidate it... | |
787 | ||
788 | ciswfl2doa: | |
789 | b ciswfl2dob ; Branch to next... | |
790 | ||
791 | ciswfl2dob: | |
792 | sync ; Finish memory stuff | |
793 | isync ; Stop speculation | |
794 | b ciswfl2doc ; Jump back up and turn on data only... | |
795 | ciswfl2dod: | |
1c79356b A |
796 | rlwinm r0,r0,27,5,31 ; Get the number of lines |
797 | lis r10,0xFFF0 ; Dead recon ROM for now | |
798 | mtctr r0 ; Set the number of lines | |
799 | ||
800 | ciswfldl2a: lwz r0,0(r10) ; Load something to flush something | |
801 | addi r10,r10,32 ; Next line | |
802 | bdnz ciswfldl2a ; Do the lot... | |
803 | ||
804 | ciinvdl2: rlwinm r3,r3,0,l2e+1,31 ; Clear the enable bit | |
805 | b cinla ; Branch to next line... | |
806 | ||
807 | .align 5 | |
808 | cinlc: mtspr l2cr,r3 ; Disable L2 | |
809 | sync | |
810 | isync | |
811 | b ciinvl2 ; It is off, go invalidate it... | |
812 | ||
813 | cinla: b cinlb ; Branch to next... | |
814 | ||
815 | cinlb: sync ; Finish memory stuff | |
816 | isync ; Stop speculation | |
817 | b cinlc ; Jump back up and turn off cache... | |
818 | ||
819 | ciinvl2: sync | |
820 | isync | |
821 | oris r2,r3,hi16(l2im) ; Get the invalidate flag set | |
822 | ||
823 | mtspr l2cr,r2 ; Start the invalidate | |
824 | sync | |
825 | isync | |
826 | ciinvdl2a: mfspr r2,l2cr ; Get the L2CR | |
827 | bf pfL2ib,ciinvdl2b ; Flush not in hardware... | |
828 | rlwinm. r2,r2,0,l2i,l2i ; Is the invalidate still going? | |
829 | bne+ ciinvdl2a ; Assume so, this will take a looong time... | |
830 | sync | |
831 | b cinol2 ; No level 2 cache to flush | |
832 | ciinvdl2b: | |
833 | rlwinm. r2,r2,0,l2ip,l2ip ; Is the invalidate still going? | |
834 | bne+ ciinvdl2a ; Assume so, this will take a looong time... | |
835 | sync | |
836 | mtspr l2cr,r3 ; Turn off the invalidate request | |
837 | ||
838 | cinol2: | |
839 | ||
840 | ; | |
841 | ; Flush and enable the level 3 | |
842 | ; | |
843 | bf pfL3b,cinol3 ; No level 3 cache to flush | |
844 | ||
845 | mfspr r8,l3cr ; Get the L3CR | |
846 | lwz r3,pfl3cr(r12) ; Get the L3CR value | |
847 | lis r0,hi16(l3pem|l3sizm|l3dxm|l3clkm|l3spom|l3ckspm) ; Get configuration bits | |
848 | xor r2,r8,r3 ; Get changing bits? | |
849 | ori r0,r0,lo16(l3pspm|l3repm|l3rtm|l3cyam|l3dmemm|l3dmsizm) ; More config bits | |
850 | and. r0,r0,r2 ; Did any change? | |
851 | bne- ciinvdl3 ; Yes, just invalidate and get PLL synced... | |
852 | ||
853 | sync ; 7450 book says do this even though not needed | |
854 | mr r10,r3 ; Take a copy now | |
855 | ||
856 | bf 31,cinol3lck ; Skip if pfL23lck not set... | |
857 | ||
858 | oris r10,r10,hi16(l3iom) ; Set instruction-only | |
859 | ori r10,r10,lo16(l3donlym) ; Set data-only | |
860 | sync | |
861 | mtspr l3cr,r10 ; Lock out the cache | |
862 | sync | |
863 | isync | |
864 | ||
865 | cinol3lck: ori r10,r10,lo16(l3hwfm) ; Request flush | |
866 | sync ; Make sure everything is done | |
867 | ||
868 | mtspr l3cr,r10 ; Request flush | |
869 | ||
870 | cihwfl3: mfspr r10,l3cr ; Get back the L3CR | |
871 | rlwinm. r10,r10,0,l3hwf,l3hwf ; Is the flush over? | |
872 | bne+ cihwfl3 ; Nope, keep going... | |
873 | ||
874 | ciinvdl3: rlwinm r3,r3,0,l3e+1,31 ; Clear the enable bit | |
875 | sync ; Make sure of life, liberty, and justice | |
876 | mtspr l3cr,r3 ; Disable L3 | |
877 | sync | |
878 | ||
879 | ori r3,r3,lo16(l3im) ; Get the invalidate flag set | |
880 | ||
881 | mtspr l3cr,r3 ; Start the invalidate | |
882 | ||
883 | ciinvdl3b: mfspr r3,l3cr ; Get the L3CR | |
884 | rlwinm. r3,r3,0,l3i,l3i ; Is the invalidate still going? | |
885 | bne+ ciinvdl3b ; Assume so... | |
886 | sync | |
887 | ||
888 | mfspr r3,l3pdet ; ? | |
889 | rlwimi r3,r3,28,0,23 ; ? | |
890 | oris r3,r3,0xF000 ; ? | |
891 | ori r3,r3,0x0080 ; ? | |
892 | mtspr l3pdet,r3 ; ? | |
893 | isync | |
894 | ||
895 | mfspr r3,l3cr ; Get the L3CR | |
896 | rlwinm r3,r3,0,l3clken+1,l3clken-1 ; Clear the clock enable bit | |
897 | mtspr l3cr,r3 ; Disable the clock | |
898 | ||
899 | li r2,128 ; ? | |
900 | ciinvdl3c: addi r2,r2,-1 ; ? | |
901 | cmplwi r2,0 ; ? | |
902 | bne+ ciinvdl3c | |
903 | ||
904 | mfspr r10,msssr0 ; ? | |
905 | rlwinm r10,r10,0,vgL3TAG+1,vgL3TAG-1 ; ? | |
906 | mtspr msssr0,r10 ; ? | |
907 | sync | |
908 | ||
909 | oris r3,r3,hi16(l3em|l3clkenm) ; Turn on enable bit | |
910 | mtspr l3cr,r3 ; Enable it | |
911 | sync | |
912 | cinol3: | |
913 | bf pfL2b,cinol2a ; No level 2 cache to enable | |
914 | ||
915 | lwz r3,pfl2cr(r12) ; Get the L2CR value | |
916 | oris r3,r3,hi16(l2em) ; Turn on enable bit | |
917 | mtspr l2cr,r3 ; Enable it | |
918 | sync | |
919 | ||
920 | ; | |
921 | ; Invalidate and turn on L1s | |
922 | ; | |
923 | ||
0b4e3aa0 A |
924 | cinol2a: |
925 | bt 31,cinoexit ; Skip if pfLClck set... | |
926 | ||
927 | rlwinm r8,r9,0,dce+1,ice-1 ; Clear the I- and D- cache enables | |
1c79356b A |
928 | mtspr hid0,r8 ; Turn off dem caches |
929 | sync | |
930 | ||
931 | ori r8,r9,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate | |
932 | rlwinm r9,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits | |
933 | sync | |
934 | isync | |
935 | ||
936 | mtspr hid0,r8 ; Start the invalidate and turn on L1 cache | |
0b4e3aa0 A |
937 | |
938 | cinoexit: mtspr hid0,r9 ; Turn off the invalidate (needed for some older machines) and restore entry conditions | |
1c79356b A |
939 | sync |
940 | mtmsr r7 ; Restore MSR to entry | |
941 | isync | |
942 | blr ; Return... | |
943 | ||
944 | ||
945 | /* Disables all caches | |
946 | * | |
947 | * void cacheDisable(void) | |
948 | * | |
949 | * Turns off all caches on the processor. They are not flushed. | |
950 | * | |
951 | */ | |
952 | ||
953 | ; Force a line boundry here | |
954 | .align 5 | |
955 | .globl EXT(cacheDisable) | |
956 | ||
957 | LEXT(cacheDisable) | |
958 | ||
959 | mfsprg r11,2 ; Get CPU specific features | |
960 | mtcrf 0x83,r11 ; Set feature flags | |
961 | ||
962 | bf pfAltivecb,cdNoAlt ; No vectors... | |
963 | ||
964 | dssall ; Stop streams | |
965 | ||
966 | cdNoAlt: sync | |
967 | ||
968 | mfspr r5,hid0 ; Get the hid | |
969 | rlwinm r5,r5,0,dce+1,ice-1 ; Clear the I- and D- cache enables | |
970 | mtspr hid0,r5 ; Turn off dem caches | |
971 | sync | |
972 | ||
973 | bf pfL2b,cdNoL2 ; Skip if no L2... | |
974 | ||
975 | mfspr r5,l2cr ; Get the L2 | |
976 | rlwinm r5,r5,0,l2e+1,31 ; Turn off enable bit | |
977 | ||
978 | b cinlaa ; Branch to next line... | |
979 | ||
980 | .align 5 | |
981 | cinlcc: mtspr l2cr,r5 ; Disable L2 | |
982 | sync | |
983 | isync | |
984 | b cdNoL2 ; It is off, we are done... | |
985 | ||
986 | cinlaa: b cinlbb ; Branch to next... | |
987 | ||
988 | cinlbb: sync ; Finish memory stuff | |
989 | isync ; Stop speculation | |
990 | b cinlcc ; Jump back up and turn off cache... | |
991 | ||
992 | cdNoL2: | |
993 | ||
994 | bf pfL3b,cdNoL3 ; Skip down if no L3... | |
995 | ||
996 | mfspr r5,l3cr ; Get the L3 | |
997 | rlwinm r5,r5,0,l3e+1,31 ; Turn off enable bit | |
998 | rlwinm r5,r5,0,l3clken+1,l3clken-1 ; Turn off cache enable bit | |
999 | mtspr l3cr,r5 ; Disable the caches | |
1000 | sync | |
1001 | ||
1002 | cdNoL3: | |
1003 | blr ; Leave... | |
1004 | ||
1005 | ||
1006 | /* Initialize processor thermal monitoring | |
1007 | * void ml_thrm_init(void) | |
1008 | * | |
1009 | * Build initial TAU registers and start them all going. | |
1010 | * We ca not do this at initial start up because we need to have the processor frequency first. | |
1011 | * And just why is this in assembler when it does not have to be?? Cause I am just too | |
1012 | * lazy to open up a "C" file, thats why. | |
1013 | */ | |
1014 | ||
1015 | ; Force a line boundry here | |
1016 | .align 5 | |
1017 | .globl EXT(ml_thrm_init) | |
1018 | ||
1019 | LEXT(ml_thrm_init) | |
1020 | ||
1021 | mfsprg r12,0 ; Get the per_proc blok | |
1022 | lis r11,hi16(EXT(gPEClockFrequencyInfo)) ; Get top of processor information | |
1023 | mfsprg r10,2 ; Get CPU specific features | |
1024 | ori r11,r11,lo16(EXT(gPEClockFrequencyInfo)) ; Get bottom of processor information | |
1025 | mtcrf 0x40,r10 ; Get the installed features | |
1026 | ||
1027 | li r3,lo16(thrmtidm|thrmvm) ; Set for lower-than thermal event at 0 degrees | |
1028 | bflr pfThermalb ; No thermal monitoring on this cpu | |
1029 | mtspr thrm1,r3 ; Do it | |
1030 | ||
1031 | lwz r3,thrmthrottleTemp(r12) ; Get our throttle temprature | |
1032 | rlwinm r3,r3,31-thrmthre,thrmthrs,thrmthre ; Position it | |
1033 | ori r3,r3,lo16(thrmvm) ; Set for higher-than event | |
1034 | mtspr thrm2,r3 ; Set it | |
1035 | ||
1036 | lis r4,hi16(1000000) ; Top of million | |
1037 | ; | |
1038 | ; Note: some CPU manuals say this is processor clocks, some say bus rate. The latter | |
1039 | ; makes more sense because otherwise we can not get over about 400MHz. | |
1040 | #if 0 | |
1041 | lwz r3,PECFIcpurate(r11) ; Get the processor speed | |
1042 | #else | |
1043 | lwz r3,PECFIbusrate(r11) ; Get the bus speed | |
1044 | #endif | |
1045 | ori r4,r4,lo16(1000000) ; Bottom of million | |
1046 | lis r7,hi16(thrmsitvm>>1) ; Get top of highest possible value | |
1047 | divwu r3,r3,r4 ; Get number of cycles per microseconds | |
1048 | ori r7,r7,lo16(thrmsitvm>>1) ; Get the bottom of the highest possible value | |
1049 | addi r3,r3,1 ; Insure we have enough | |
1050 | mulli r3,r3,20 ; Get 20 microseconds worth of cycles | |
1051 | cmplw r3,r7 ; Check against max | |
1052 | ble+ smallenuf ; It is ok... | |
1053 | mr r3,r7 ; Saturate | |
1054 | ||
1055 | smallenuf: rlwinm r3,r3,31-thrmsitve,thrmsitvs,thrmsitve ; Position | |
1056 | ori r3,r3,lo16(thrmem) ; Enable with at least 20micro sec sample | |
1057 | stw r3,thrm3val(r12) ; Save this in case we need it later | |
1058 | mtspr thrm3,r3 ; Do it | |
1059 | blr | |
1060 | ||
1061 | ||
1062 | /* Set thermal monitor bounds | |
1063 | * void ml_thrm_set(unsigned int low, unsigned int high) | |
1064 | * | |
1065 | * Set TAU to interrupt below low and above high. A value of | |
1066 | * zero disables interruptions in that direction. | |
1067 | */ | |
1068 | ||
1069 | ; Force a line boundry here | |
1070 | .align 5 | |
1071 | .globl EXT(ml_thrm_set) | |
1072 | ||
1073 | LEXT(ml_thrm_set) | |
1074 | ||
1075 | mfmsr r0 ; Get the MSR | |
1076 | rlwinm r6,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear EE bit | |
1077 | mtmsr r6 | |
1078 | ||
1079 | mfsprg r12,0 ; Get the per_proc blok | |
1080 | ||
1081 | rlwinm. r6,r3,31-thrmthre,thrmthrs,thrmthre ; Position it and see if enabled | |
1082 | mfsprg r9,2 ; Get CPU specific features | |
1083 | stw r3,thrmlowTemp(r12) ; Set the low temprature | |
1084 | mtcrf 0x40,r9 ; See if we can thermal this machine | |
1085 | rlwinm r9,r9,(((31-thrmtie)+(pfThermIntb+1))&31),thrmtie,thrmtie ; Set interrupt enable if this machine can handle it | |
1086 | bf pfThermalb,tsetcant ; No can do... | |
1087 | beq tsetlowo ; We are setting the low off... | |
1088 | ori r6,r6,lo16(thrmtidm|thrmvm) ; Set the lower-than and valid bit | |
1089 | or r6,r6,r9 ; Set interruption request if supported | |
1090 | ||
1091 | tsetlowo: mtspr thrm1,r6 ; Cram the register | |
1092 | ||
1093 | rlwinm. r6,r4,31-thrmthre,thrmthrs,thrmthre ; Position it and see if enabled | |
1094 | stw r4,thrmhighTemp(r12) ; Set the high temprature | |
1095 | beq tsethigho ; We are setting the high off... | |
1096 | ori r6,r6,lo16(thrmvm) ; Set valid bit | |
1097 | or r6,r6,r9 ; Set interruption request if supported | |
1098 | ||
1099 | tsethigho: mtspr thrm2,r6 ; Cram the register | |
1100 | ||
1101 | tsetcant: mtmsr r0 ; Reenable interruptions | |
1102 | blr ; Leave... | |
1103 | ||
1104 | /* Read processor temprature | |
1105 | * unsigned int ml_read_temp(void) | |
1106 | * | |
1107 | */ | |
1108 | ||
1109 | ; Force a line boundry here | |
1110 | .align 5 | |
1111 | .globl EXT(ml_read_temp) | |
1112 | ||
1113 | LEXT(ml_read_temp) | |
1114 | ||
1115 | mfmsr r9 ; Save the MSR | |
1116 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
1117 | li r5,15 ; Starting point for ranging (start at 15 so we do not overflow) | |
1118 | mfsprg r7,2 ; Get CPU specific features | |
1119 | mtmsr r8 ; Do not allow interruptions | |
1120 | mtcrf 0x40,r7 ; See if we can thermal this machine | |
1121 | bf pfThermalb,thrmcant ; No can do... | |
1122 | ||
1123 | mfspr r11,thrm1 ; Save thrm1 | |
1124 | ||
1125 | thrmrange: rlwinm r4,r5,31-thrmthre,thrmthrs,thrmthre ; Position it | |
1126 | ori r4,r4,lo16(thrmtidm|thrmvm) ; Flip on the valid bit and make comparision for less than | |
1127 | ||
1128 | mtspr thrm1,r4 ; Set the test value | |
1129 | ||
1130 | thrmreada: mfspr r3,thrm1 ; Get the thermal register back | |
1131 | rlwinm. r0,r3,0,thrmtiv,thrmtiv ; Has it settled yet? | |
1132 | beq+ thrmreada ; Nope... | |
1133 | ||
1134 | rlwinm. r0,r3,0,thrmtin,thrmtin ; Are we still under the threshold? | |
1135 | bne thrmsearch ; No, we went over... | |
1136 | ||
1137 | addi r5,r5,16 ; Start by trying every 16 degrees | |
1138 | cmplwi r5,127 ; Have we hit the max? | |
1139 | blt- thrmrange ; Got some more to do... | |
1140 | ||
1141 | thrmsearch: rlwinm r4,r5,31-thrmthre,thrmthrs,thrmthre ; Position it | |
1142 | ori r4,r4,lo16(thrmtidm|thrmvm) ; Flip on the valid bit and make comparision for less than | |
1143 | ||
1144 | mtspr thrm1,r4 ; Set the test value | |
1145 | ||
1146 | thrmread: mfspr r3,thrm1 ; Get the thermal register back | |
1147 | rlwinm. r0,r3,0,thrmtiv,thrmtiv ; Has it settled yet? | |
1148 | beq+ thrmread ; Nope... | |
1149 | ||
1150 | rlwinm. r0,r3,0,thrmtin,thrmtin ; Are we still under the threshold? | |
1151 | beq thrmdone ; No, we hit it... | |
1152 | addic. r5,r5,-1 ; Go down a degree | |
1153 | bge+ thrmsearch ; Try again (until we are below freezing)... | |
1154 | ||
1155 | thrmdone: addi r3,r5,1 ; Return the temprature (bump it up to make it correct) | |
1156 | mtspr thrm1,r11 ; Restore the thermal register | |
1157 | mtmsr r9 ; Re-enable interruptions | |
1158 | blr ; Leave... | |
1159 | ||
1160 | thrmcant: eqv r3,r3,r3 ; Return bogus temprature because we can not read it | |
1161 | mtmsr r9 ; Re-enable interruptions | |
1162 | blr ; Leave... | |
1163 | ||
1164 | /* Throttle processor speed up or down | |
1165 | * unsigned int ml_throttle(unsigned int step) | |
1166 | * | |
1167 | * Returns old speed and sets new. Both step and return are values from 0 to | |
1168 | * 255 that define number of throttle steps, 0 being off and "ictcfim" is max * 2. | |
1169 | * | |
1170 | */ | |
1171 | ||
1172 | ; Force a line boundry here | |
1173 | .align 5 | |
1174 | .globl EXT(ml_throttle) | |
1175 | ||
1176 | LEXT(ml_throttle) | |
1177 | ||
1178 | mfmsr r9 ; Save the MSR | |
1179 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions | |
1180 | cmplwi r3,lo16(ictcfim>>1) ; See if we are going too far | |
1181 | mtmsr r8 ; Do not allow interruptions | |
1182 | ble+ throtok ; Throttle value is ok... | |
1183 | li r3,lo16(ictcfim>>1) ; Set max | |
1184 | ||
1185 | throtok: rlwinm. r4,r3,1,ictcfib,ictcfie ; Set the throttle | |
1186 | beq throtoff ; Skip if we are turning it off... | |
1187 | ori r4,r4,lo16(thrmvm) ; Turn on the valid bit | |
1188 | ||
1189 | throtoff: mfspr r3,ictc ; Get the old throttle | |
1190 | mtspr ictc,r4 ; Set the new | |
1191 | rlwinm r3,r3,31,1,31 ; Shift throttle value over | |
1192 | mtmsr r9 ; Restore interruptions | |
1193 | blr ; Return... | |
1194 | ||
1195 | /* | |
1196 | ** ml_get_timebase() | |
1197 | ** | |
1198 | ** Entry - R3 contains pointer to 64 bit structure. | |
1199 | ** | |
1200 | ** Exit - 64 bit structure filled in. | |
1201 | ** | |
1202 | */ | |
1203 | ; Force a line boundry here | |
1204 | .align 5 | |
1205 | .globl EXT(ml_get_timebase) | |
1206 | ||
1207 | LEXT(ml_get_timebase) | |
1208 | ||
1209 | loop: | |
1210 | mftbu r4 | |
1211 | mftb r5 | |
1212 | mftbu r6 | |
1213 | cmpw r6, r4 | |
1214 | bne- loop | |
1215 | ||
1216 | stw r4, 0(r3) | |
1217 | stw r5, 4(r3) | |
1218 | ||
1219 | blr | |
1220 | ||
1221 | /* | |
1222 | ** ml_sense_nmi() | |
1223 | ** | |
1224 | */ | |
1225 | ; Force a line boundry here | |
1226 | .align 5 | |
1227 | .globl EXT(ml_sense_nmi) | |
1228 | ||
1229 | LEXT(ml_sense_nmi) | |
1230 | ||
1231 | blr ; Leave... | |
1232 |