2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <ppc/proc_reg.h>
32 #include <mach/ppc/vm_param.h>
33 #include <ppc/exception.h>
37 * ml_set_physical() -- turn off DR and (if 64-bit) turn SF on
38 * it is assumed that pf64Bit is already in cr6
39 * ml_set_physical_get_ffs() -- turn DR off, SF on, and get feature flags
40 * ml_set_physical_disabled() -- turn DR and EE off, SF on, get feature flags
41 * ml_set_translation_off() -- turn DR, IR, and EE off, SF on, get feature flags
43 * Callable only from assembler, these return:
46 * r10 -- feature flags (pf64Bit etc, ie SPRG 2)
47 * cr6 -- feature flags 24-27, ie pf64Bit, pf128Byte, and pf32Byte
49 * Uses r0 and r2. ml_set_translation_off also uses r3 and cr5.
53 .globl EXT(ml_set_translation_off)
54 LEXT(ml_set_translation_off)
55 mfsprg r10,2 // get feature flags
57 mtcrf 0x02,r10 // move pf64Bit etc to cr6
58 ori r0,r0,lo16(MASK(MSR_EE)+MASK(MSR_FP)+MASK(MSR_IR)+MASK(MSR_DR)) // turn off all 4
60 oris r0,r0,hi16(MASK(MSR_VEC)) // Turn off vector too
61 mtcrf 0x04,r10 // move pfNoMSRir etc to cr5
62 andc r2,r11,r0 // turn off EE, IR, and DR
63 bt++ pf64Bitb,ml_set_physical_64 // skip if 64-bit (only they take the hint)
64 bf pfNoMSRirb,ml_set_physical_32 // skip if we can load MSR directly
65 li r0,loadMSR // Get the MSR setter SC
66 mr r3,r2 // copy new MSR to r2
71 .globl EXT(ml_set_physical_disabled)
73 LEXT(ml_set_physical_disabled)
75 mfsprg r10,2 // get feature flags
76 ori r0,r0,lo16(MASK(MSR_EE)) // turn EE and fp off
77 mtcrf 0x02,r10 // move pf64Bit etc to cr6
78 b ml_set_physical_join
81 .globl EXT(ml_set_physical_get_ffs)
83 LEXT(ml_set_physical_get_ffs)
84 mfsprg r10,2 // get feature flags
85 mtcrf 0x02,r10 // move pf64Bit etc to cr6
87 .globl EXT(ml_set_physical)
90 li r0,0 // do not turn off interrupts
93 oris r0,r0,hi16(MASK(MSR_VEC)) // Always gonna turn of vectors
95 ori r0,r0,lo16(MASK(MSR_DR)+MASK(MSR_FP)) // always turn off DR and FP bit
96 andc r2,r11,r0 // turn off DR and maybe EE
97 bt++ pf64Bitb,ml_set_physical_64 // skip if 64-bit (only they take the hint)
99 mtmsr r2 // turn off translation
104 li r0,1 // get a 1 to slam into SF
105 rldimi r2,r0,63,MSR_SF_BIT // set SF bit (bit 0)
106 mtmsrd r2 // set 64-bit mode, turn off data relocation
112 * ml_restore(old_MSR)
114 * Callable only from assembler, restores the MSR in r11 saved by ml_set_physical.
115 * We assume cr6 and r11 are as set by ml_set_physical, ie:
116 * cr6 - pf64Bit flag (feature flags 24-27)
121 .globl EXT(ml_restore)
124 bt++ pf64Bitb,ml_restore_64 // handle 64-bit cpus (only they take the hint)
125 mtmsr r11 // restore a 32-bit MSR
130 mtmsrd r11 // restore a 64-bit MSR
135 /* PCI config cycle probing
137 * boolean_t ml_probe_read(vm_offset_t paddr, unsigned int *val)
139 * Read the memory location at physical address paddr.
140 * This is a part of a device probe, so there is a good chance we will
141 * have a machine check here. So we have to be able to handle that.
142 * We assume that machine checks are enabled both in MSR and HIDs
145 ; Force a line boundry here
147 .globl EXT(ml_probe_read)
151 mfsprg r9,2 ; Get feature flags
153 rlwinm. r0,r9,0,pf64Bitb,pf64Bitb ; Are we on a 64-bit machine?
154 rlwinm r3,r3,0,0,31 ; Clean up for 64-bit machines
155 bne++ mpr64bit ; Go do this the 64-bit way...
157 mpr32bit: lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag
158 mfmsr r0 ; Save the current MSR
159 ori r8,r8,lo16(MASK(MSR_FP)) ; Add the FP flag
161 neg r10,r3 ; Number of bytes to end of page
162 andc r0,r0,r8 ; Clear VEC and FP
163 rlwinm. r10,r10,0,20,31 ; Clear excess junk and test for page bndry
164 ori r8,r8,lo16(MASK(MSR_EE)|MASK(MSR_IR)|MASK(MSR_DR)) ; Drop EE, IR, and DR
165 mr r12,r3 ; Save the load address
166 andc r2,r0,r8 ; Clear VEC, FP, and EE
167 mtcrf 0x04,r9 ; Set the features
168 cmplwi cr1,r10,4 ; At least 4 bytes left in page?
169 beq- mprdoit ; We are right on the boundary...
171 bltlr- cr1 ; No, just return failure...
175 bt pfNoMSRirb,mprNoMSR ; No MSR...
177 mtmsr r2 ; Translation and all off
178 isync ; Toss prefetch
183 li r0,loadMSR ; Get the MSR setter SC
184 mr r3,r2 ; Get new MSR
190 mfspr r6, hid0 ; Get a copy of hid0
192 rlwinm. r5, r9, 0, pfNoMuMMCKb, pfNoMuMMCKb ; Check for NoMuMMCK
195 rlwinm r5, r6, 0, ice+1, ice-1 ; Turn off L1 I-Cache
197 isync ; Wait for I-Cache off
198 rlwinm r5, r6, 0, mum+1, mum-1 ; Turn off MuM w/ I-Cache on
203 ; We need to insure that there is no more than 1 BAT register that
204 ; can get a hit. There could be repercussions beyond the ken
205 ; of mortal man. It is best not to tempt fate.
208 ; Note: we will reload these from the shadow BATs later
210 li r10,0 ; Clear a register
212 sync ; Make sure all is well
214 mtdbatu 1,r10 ; Invalidate DBAT 1
215 mtdbatu 2,r10 ; Invalidate DBAT 2
216 mtdbatu 3,r10 ; Invalidate DBAT 3
218 rlwinm r10,r12,0,0,14 ; Round down to a 128k boundary
219 ori r11,r10,0x32 ; Set uncached, coherent, R/W
220 ori r10,r10,2 ; Make the upper half (128k, valid supervisor)
221 mtdbatl 0,r11 ; Set lower BAT first
222 mtdbatu 0,r10 ; Now the upper
223 sync ; Just make sure
225 dcbf 0,r12 ; Make sure we kill the cache to avoid paradoxes
228 ori r11,r2,lo16(MASK(MSR_DR)) ; Turn on data translation
229 mtmsr r11 ; Do it for real
230 isync ; Make sure of it
232 eieio ; Make sure of all previous accesses
233 sync ; Make sure it is all caught up
235 lwz r11,0(r12) ; Get it and maybe machine check here
237 eieio ; Make sure of ordering again
238 sync ; Get caught up yet again
239 isync ; Do not go further till we are here
241 mtmsr r2 ; Turn translation back off
244 lis r10,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
245 ori r10,r10,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address
247 lwz r5,0(r10) ; Pick up DBAT 0 high
248 lwz r6,4(r10) ; Pick up DBAT 0 low
249 lwz r7,8(r10) ; Pick up DBAT 1 high
250 lwz r8,16(r10) ; Pick up DBAT 2 high
251 lwz r9,24(r10) ; Pick up DBAT 3 high
253 mtdbatu 0,r5 ; Restore DBAT 0 high
254 mtdbatl 0,r6 ; Restore DBAT 0 low
255 mtdbatu 1,r7 ; Restore DBAT 1 high
256 mtdbatu 2,r8 ; Restore DBAT 2 high
257 mtdbatu 3,r9 ; Restore DBAT 3 high
262 mtmsr r0 ; Restore translation and exceptions
263 isync ; Toss speculations
265 stw r11,0(r4) ; Save the loaded value
268 ; Force a line boundry here. This means we will be able to check addresses better
270 .globl EXT(ml_probe_read_mck)
271 LEXT(ml_probe_read_mck)
274 /* PCI config cycle probing - 64-bit
276 * boolean_t ml_probe_read_64(addr64_t paddr, unsigned int *val)
278 * Read the memory location at physical address paddr.
279 * This is a part of a device probe, so there is a good chance we will
280 * have a machine check here. So we have to be able to handle that.
281 * We assume that machine checks are enabled both in MSR and HIDs
284 ; Force a line boundry here
286 .globl EXT(ml_probe_read_64)
288 LEXT(ml_probe_read_64)
290 mfsprg r9,2 ; Get feature flags
291 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
292 rlwinm. r0,r9,0,pf64Bitb,pf64Bitb ; Are we on a 64-bit machine?
293 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
295 mr r4,r5 ; Move result to common register
296 beq-- mpr32bit ; Go do this the 32-bit way...
298 mpr64bit: andi. r0,r3,3 ; Check if we are on a word boundary
299 li r0,0 ; Clear the EE bit (and everything else for that matter)
300 bne-- mprFail ; Boundary not good...
301 mfmsr r11 ; Get the MSR
302 mtmsrd r0,1 ; Set the EE bit only (do not care about RI)
303 rlwinm r11,r11,0,MSR_EE_BIT,MSR_EE_BIT ; Isolate just the EE bit
304 mfmsr r10 ; Refresh our view of the MSR (VMX/FP may have changed)
305 or r12,r10,r11 ; Turn on EE if on before we turned it off
306 ori r0,r0,lo16(MASK(MSR_IR)|MASK(MSR_DR)) ; Get the IR and DR bits
308 sldi r2,r2,63 ; Get the 64-bit bit
309 andc r10,r10,r0 ; Clear IR and DR
310 or r10,r10,r2 ; Set 64-bit
313 mtmsrd r10 ; Translation and EE off, 64-bit on
316 sldi r0,r0,32+8 ; Get the right bit to inhibit caching
318 mfspr r8,hid4 ; Get HID4
319 or r2,r8,r0 ; Set bit to make real accesses cache-inhibited
321 mtspr hid4,r2 ; Make real accesses cache-inhibited
322 isync ; Toss prefetches
324 lis r7,0xE000 ; Get the unlikeliest ESID possible
325 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
326 slbie r7 ; Make sure the ERAT is cleared
331 eieio ; Make sure of all previous accesses
333 lwz r11,0(r3) ; Get it and maybe machine check here
335 eieio ; Make sure of ordering again
336 sync ; Get caught up yet again
337 isync ; Do not go further till we are here
340 mtspr hid4,r8 ; Make real accesses not cache-inhibited
341 isync ; Toss prefetches
343 lis r7,0xE000 ; Get the unlikeliest ESID possible
344 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
345 slbie r7 ; Make sure the ERAT is cleared
347 mtmsrd r12 ; Restore entry MSR
350 stw r11,0(r4) ; Pass back the result
351 li r3,1 ; Indicate success
354 mprFail: li r3,0 ; Set failure
357 ; Force a line boundry here. This means we will be able to check addresses better
359 .globl EXT(ml_probe_read_mck_64)
360 LEXT(ml_probe_read_mck_64)
363 /* Read physical address byte
365 * unsigned int ml_phys_read_byte(vm_offset_t paddr)
366 * unsigned int ml_phys_read_byte_64(addr64_t paddr)
368 * Read the byte at physical address paddr. Memory should not be cache inhibited.
371 ; Force a line boundry here
374 .globl EXT(ml_phys_read_byte_64)
376 LEXT(ml_phys_read_byte_64)
378 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
379 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
380 b ml_phys_read_byte_join
382 .globl EXT(ml_phys_read_byte)
384 LEXT(ml_phys_read_byte)
385 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
386 ml_phys_read_byte_join: ; r3 = address to read (reg64_t)
387 mflr r11 ; Save the return
388 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
390 lbz r3,0(r3) ; Get the byte
391 b rdwrpost ; Clean up and leave...
394 /* Read physical address half word
396 * unsigned int ml_phys_read_half(vm_offset_t paddr)
397 * unsigned int ml_phys_read_half_64(addr64_t paddr)
399 * Read the half word at physical address paddr. Memory should not be cache inhibited.
402 ; Force a line boundry here
405 .globl EXT(ml_phys_read_half_64)
407 LEXT(ml_phys_read_half_64)
409 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
410 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
411 b ml_phys_read_half_join
413 .globl EXT(ml_phys_read_half)
415 LEXT(ml_phys_read_half)
416 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
417 ml_phys_read_half_join: ; r3 = address to read (reg64_t)
418 mflr r11 ; Save the return
419 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
421 lhz r3,0(r3) ; Get the half word
422 b rdwrpost ; Clean up and leave...
425 /* Read physical address word
427 * unsigned int ml_phys_read(vm_offset_t paddr)
428 * unsigned int ml_phys_read_64(addr64_t paddr)
429 * unsigned int ml_phys_read_word(vm_offset_t paddr)
430 * unsigned int ml_phys_read_word_64(addr64_t paddr)
432 * Read the word at physical address paddr. Memory should not be cache inhibited.
435 ; Force a line boundry here
438 .globl EXT(ml_phys_read_64)
439 .globl EXT(ml_phys_read_word_64)
441 LEXT(ml_phys_read_64)
442 LEXT(ml_phys_read_word_64)
444 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
445 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
446 b ml_phys_read_word_join
448 .globl EXT(ml_phys_read)
449 .globl EXT(ml_phys_read_word)
452 LEXT(ml_phys_read_word)
453 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
454 ml_phys_read_word_join: ; r3 = address to read (reg64_t)
455 mflr r11 ; Save the return
456 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
458 lwz r3,0(r3) ; Get the word
459 b rdwrpost ; Clean up and leave...
462 /* Read physical address double word
464 * unsigned long long ml_phys_read_double(vm_offset_t paddr)
465 * unsigned long long ml_phys_read_double_64(addr64_t paddr)
467 * Read the double word at physical address paddr. Memory should not be cache inhibited.
470 ; Force a line boundry here
473 .globl EXT(ml_phys_read_double_64)
475 LEXT(ml_phys_read_double_64)
477 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
478 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
479 b ml_phys_read_double_join
481 .globl EXT(ml_phys_read_double)
483 LEXT(ml_phys_read_double)
484 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
485 ml_phys_read_double_join: ; r3 = address to read (reg64_t)
486 mflr r11 ; Save the return
487 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
489 lwz r4,4(r3) ; Get the low word
490 lwz r3,0(r3) ; Get the high word
491 b rdwrpost ; Clean up and leave...
494 /* Write physical address byte
496 * void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
497 * void ml_phys_write_byte_64(addr64_t paddr, unsigned int data)
499 * Write the byte at physical address paddr. Memory should not be cache inhibited.
503 .globl EXT(ml_phys_write_byte_64)
505 LEXT(ml_phys_write_byte_64)
507 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
508 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
509 mr r4,r5 ; Copy over the data
510 b ml_phys_write_byte_join
512 .globl EXT(ml_phys_write_byte)
514 LEXT(ml_phys_write_byte)
515 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
516 ml_phys_write_byte_join: ; r3 = address to write (reg64_t), r4 = data
517 mflr r11 ; Save the return
518 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
520 stb r4,0(r3) ; Set the byte
521 b rdwrpost ; Clean up and leave...
524 /* Write physical address half word
526 * void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
527 * void ml_phys_write_half_64(addr64_t paddr, unsigned int data)
529 * Write the half word at physical address paddr. Memory should not be cache inhibited.
533 .globl EXT(ml_phys_write_half_64)
535 LEXT(ml_phys_write_half_64)
537 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
538 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
539 mr r4,r5 ; Copy over the data
540 b ml_phys_write_half_join
542 .globl EXT(ml_phys_write_half)
544 LEXT(ml_phys_write_half)
545 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
546 ml_phys_write_half_join: ; r3 = address to write (reg64_t), r4 = data
547 mflr r11 ; Save the return
548 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
550 sth r4,0(r3) ; Set the half word
551 b rdwrpost ; Clean up and leave...
554 /* Write physical address word
556 * void ml_phys_write(vm_offset_t paddr, unsigned int data)
557 * void ml_phys_write_64(addr64_t paddr, unsigned int data)
558 * void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
559 * void ml_phys_write_word_64(addr64_t paddr, unsigned int data)
561 * Write the word at physical address paddr. Memory should not be cache inhibited.
565 .globl EXT(ml_phys_write_64)
566 .globl EXT(ml_phys_write_word_64)
568 LEXT(ml_phys_write_64)
569 LEXT(ml_phys_write_word_64)
571 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
572 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
573 mr r4,r5 ; Copy over the data
574 b ml_phys_write_word_join
576 .globl EXT(ml_phys_write)
577 .globl EXT(ml_phys_write_word)
580 LEXT(ml_phys_write_word)
581 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
582 ml_phys_write_word_join: ; r3 = address to write (reg64_t), r4 = data
583 mflr r11 ; Save the return
584 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
586 stw r4,0(r3) ; Set the word
587 b rdwrpost ; Clean up and leave...
590 /* Write physical address double word
592 * void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
593 * void ml_phys_write_double_64(addr64_t paddr, unsigned long long data)
595 * Write the double word at physical address paddr. Memory should not be cache inhibited.
599 .globl EXT(ml_phys_write_double_64)
601 LEXT(ml_phys_write_double_64)
603 rlwinm r3,r3,0,1,0 ; Copy low 32 bits to top 32
604 rlwimi r3,r4,0,0,31 ; Insert low part of 64-bit address in bottom 32 bits
605 mr r4,r5 ; Copy over the high data
606 mr r5,r6 ; Copy over the low data
607 b ml_phys_write_double_join
609 .globl EXT(ml_phys_write_double)
611 LEXT(ml_phys_write_double)
612 rlwinm r3,r3,0,0,31 ; truncate address to 32-bits
613 ml_phys_write_double_join: ; r3 = address to write (reg64_t), r4,r5 = data (long long)
614 mflr r11 ; Save the return
615 bl rdwrpre ; Get set up, translation/interrupts off, 64-bit on, etc.
617 stw r4,0(r3) ; Set the high word
618 stw r5,4(r3) ; Set the low word
619 b rdwrpost ; Clean up and leave...
624 rdwrpre: mfsprg r12,2 ; Get feature flags
625 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector flag
626 mfmsr r10 ; Save the MSR
627 ori r8,r8,lo16(MASK(MSR_FP)) ; Add the FP flag
628 mtcrf 0x02,r12 ; move pf64Bit
629 andc r10,r10,r8 ; Clear VEC and FP
630 ori r9,r8,lo16(MASK(MSR_EE)|MASK(MSR_IR)|MASK(MSR_DR)) ; Drop EE, DR, and IR
631 li r2,1 ; Prepare for 64 bit
632 andc r9,r10,r9 ; Clear VEC, FP, DR, and EE
633 bf-- pf64Bitb,rdwrpre32 ; Join 32-bit code...
635 srdi r7,r3,31 ; Get a 1 if address is in I/O memory
636 rldimi r9,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
637 cmpldi cr7,r7,1 ; Is source in I/O memory?
638 mtmsrd r9 ; set 64-bit mode, turn off EE, DR, and IR
641 sldi r0,r2,32+8 ; Get the right bit to turn off caching
643 bnelr++ cr7 ; We are not in the I/O area, all ready...
645 mfspr r8,hid4 ; Get HID4
646 or r2,r8,r0 ; Set bit to make real accesses cache-inhibited
648 mtspr hid4,r2 ; Make real accesses cache-inhibited
649 isync ; Toss prefetches
651 lis r7,0xE000 ; Get the unlikeliest ESID possible
652 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
653 slbie r7 ; Make sure the ERAT is cleared
657 blr ; Finally, all ready...
661 rdwrpre32: rlwimi r9,r10,0,MSR_IR_BIT,MSR_IR_BIT ; Leave the IR bit unchanged
662 mtmsr r9 ; Drop EE, DR, and leave IR unchanged
664 blr ; All set up, leave...
668 rdwrpost: mtlr r11 ; Restore the return
669 bt++ pf64Bitb,rdwrpost64 ; Join 64-bit code...
671 mtmsr r10 ; Restore entry MSR (sans FP and VEC)
675 rdwrpost64: bne++ cr7,rdwrpcok ; Skip enabling real mode caching if we did not change it...
678 mtspr hid4,r8 ; Make real accesses not cache-inhibited
679 isync ; Toss prefetches
681 lis r7,0xE000 ; Get the unlikeliest ESID possible
682 srdi r7,r7,1 ; Make 0x7FFFFFFFF0000000
683 slbie r7 ; Make sure the ERAT is cleared
685 rdwrpcok: mtmsrd r10 ; Restore entry MSR (sans FP and VEC)
690 /* set interrupts enabled or disabled
692 * boolean_t set_interrupts_enabled(boolean_t enable)
694 * Set EE bit to "enable" and return old value as boolean
697 ; Force a line boundry here
699 .globl EXT(ml_set_interrupts_enabled)
701 LEXT(ml_set_interrupts_enabled)
703 andi. r4,r3,1 ; Are we turning interruptions on?
704 lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
705 mfmsr r5 ; Get the current MSR
706 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_FP)) ; Get float enable and EE enable
707 rlwinm r3,r5,17,31,31 ; Set return value
708 andc r5,r5,r0 ; Force VEC and FP off
709 bne CheckPreemption ; Interrupts going on, check ASTs...
711 mtmsr r5 ; Slam diable (always going disabled here)
712 isync ; Need this because FP/Vec might go off
718 mfsprg r9,1 ; Get current activation
719 lwz r7,ACT_PER_PROC(r9) ; Get the per_proc block
720 ori r5,r5,lo16(MASK(MSR_EE)) ; Turn on the enable
721 lwz r8,PP_PENDING_AST(r7) ; Get pending AST mask
722 li r6,AST_URGENT ; Get the type we will preempt for
723 lwz r7,ACT_PREEMPT_CNT(r9) ; Get preemption count
724 lis r0,hi16(DoPreemptCall) ; High part of Preempt FW call
725 cmpwi cr1,r7,0 ; Are preemptions masked off?
726 and. r8,r8,r6 ; Are we urgent?
727 crorc cr1_eq,cr0_eq,cr1_eq ; Remember if preemptions are masked or not urgent
728 ori r0,r0,lo16(DoPreemptCall) ; Bottome of FW call
730 mtmsr r5 ; Restore the MSR now, before we can preempt
731 isync ; Need this because FP/Vec might go off
733 beqlr++ cr1 ; Return if no premption...
737 ; Force a line boundry here
739 .globl EXT(timer_update)
742 stw r4,TIMER_HIGHCHK(r3)
746 stw r4,TIMER_HIGH(r3)
749 ; Force a line boundry here
751 .globl EXT(timer_grab)
754 0: lwz r11,TIMER_HIGH(r3)
757 lwz r9,TIMER_HIGHCHK(r3)
763 ; Force a line boundry here
765 .globl EXT(thread_timer_event)
767 LEXT(thread_timer_event)
768 mfsprg r10,1 ; Get the current activation
769 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
770 addi r10,r10,PP_PROCESSOR
771 lwz r11,THREAD_TIMER(r10)
773 lwz r9,TIMER_LOW(r11)
774 lwz r7,TIMER_TSTAMP(r11)
775 lwz r8,TIMER_TSTAMP+4(r11)
782 lwz r6,TIMER_HIGH(r11)
784 stw r7,TIMER_HIGHCHK(r11)
786 stw r8,TIMER_LOW(r11)
788 stw r7,TIMER_HIGH(r11)
791 0: stw r8,TIMER_LOW(r11)
793 1: stw r5,THREAD_TIMER(r10)
794 stw r3,TIMER_TSTAMP(r5)
795 stw r4,TIMER_TSTAMP+4(r5)
798 ; Force a line boundry here
800 .globl EXT(state_event)
803 mfsprg r10,1 ; Get the current activation
804 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
805 addi r10,r10,PP_PROCESSOR
806 lwz r11,CURRENT_STATE(r10)
808 lwz r9,TIMER_LOW(r11)
809 lwz r7,TIMER_TSTAMP(r11)
810 lwz r8,TIMER_TSTAMP+4(r11)
817 lwz r6,TIMER_HIGH(r11)
819 stw r7,TIMER_HIGHCHK(r11)
821 stw r8,TIMER_LOW(r11)
823 stw r7,TIMER_HIGH(r11)
826 0: stw r8,TIMER_LOW(r11)
828 1: stw r5,CURRENT_STATE(r10)
829 stw r3,TIMER_TSTAMP(r5)
830 stw r4,TIMER_TSTAMP+4(r5)
833 /* Set machine into idle power-saving mode.
835 * void machine_idle(void)
837 * We will use the PPC NAP or DOZE for this.
838 * This call always returns. Must be called with spllo (i.e., interruptions
843 ; Force a line boundry here
845 .globl EXT(machine_idle)
849 mfsprg r12,1 ; Get the current activation
850 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
851 lhz r10,PP_CPU_FLAGS(r12) ; Get the flags
852 lwz r11,PP_INTS_ENABLED(r12) ; Get interrupt enabled state
853 andi. r10,r10,SignalReady ; Are Signal ready?
854 cmpwi cr1,r11,0 ; Are interrupt disabled?
855 cror cr0_eq, cr1_eq, cr0_eq ; Interrupt disabled or Signal not ready?
856 mfmsr r3 ; Save the MSR
858 beq-- nonap ; Yes, return after re-enabling interrupts
859 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector flag
860 ori r0,r0,lo16(MASK(MSR_FP)) ; Add the FP flag
861 andc r3,r3,r0 ; Clear VEC and FP
862 ori r0,r0,lo16(MASK(MSR_EE)) ; Drop EE also
863 andc r5,r3,r0 ; Clear VEC, FP, DR, and EE
865 mtmsr r5 ; Hold up interruptions for now
866 isync ; May have messed with fp/vec
867 mfsprg r11,2 ; Get CPU specific features
868 mfspr r6,hid0 ; Get the current power-saving mode
869 mtcrf 0xC7,r11 ; Get the facility flags
871 lis r4,hi16(napm) ; Assume we can nap
872 bt pfWillNapb,yesnap ; Yeah, nap is ok...
874 lis r4,hi16(dozem) ; Assume we can doze
875 bt pfCanDozeb,yesnap ; We can sleep or doze one this machine...
877 nonap: ori r3,r3,lo16(MASK(MSR_EE)) ; Flip on EE
879 mtmsr r3 ; Turn interruptions back on
882 yesnap: mftbu r9 ; Get the upper timebase
883 mftb r7 ; Get the lower timebase
884 mftbu r8 ; Get the upper one again
885 cmplw r9,r8 ; Did the top tick?
886 bne-- yesnap ; Yeah, need to get it again...
887 stw r8,napStamp(r12) ; Set high order time stamp
888 stw r7,napStamp+4(r12) ; Set low order nap stamp
890 rlwinm. r0,r11,0,pfAltivecb,pfAltivecb ; Do we have altivec?
891 beq-- minovec ; No...
892 dssall ; Stop the streams before we nap/doze
894 lwz r8,napStamp(r12) ; Reload high order time stamp
900 minovec: rlwinm. r7,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before nap?
903 mfspr r7,msscr0 ; Get currect MSSCR0 value
904 rlwinm r7,r7,0,0,l2pfes-1 ; Disable L2 Prefetch
905 mtspr msscr0,r7 ; Updates MSSCR0 value
910 rlwinm. r7,r11,0,pfSlowNapb,pfSlowNapb ; Should nap at slow speed?
913 mfspr r7,hid1 ; Get current HID1 value
914 oris r7,r7,hi16(hid1psm) ; Select PLL1
915 mtspr hid1,r7 ; Update HID1 value
919 ; We have to open up interruptions here because book 4 says that we should
920 ; turn on only the POW bit and that we should have interrupts enabled.
921 ; The interrupt handler will detect that nap or doze is set if an interrupt
922 ; is taken and set everything up to return directly to machine_idle_ret.
923 ; So, make sure everything we need there is already set up...
927 lis r10,hi16(dozem|napm|sleepm) ; Mask of power management bits
929 bf-- pf64Bitb,mipNSF1 ; skip if 32-bit...
931 sldi r4,r4,32 ; Position the flags
932 sldi r10,r10,32 ; Position the masks
934 mipNSF1: li r2,lo16(MASK(MSR_DR)|MASK(MSR_IR)) ; Get the translation mask
935 andc r6,r6,r10 ; Clean up the old power bits
936 ori r7,r5,lo16(MASK(MSR_EE)) ; Flip on EE to make exit msr
937 andc r5,r5,r2 ; Clear IR and DR from current MSR
938 or r6,r6,r4 ; Set nap or doze
939 ori r5,r5,lo16(MASK(MSR_EE)) ; Flip on EE to make nap msr
940 oris r2,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR
943 mtspr hid0,r6 ; Set up the HID for nap/doze
944 mfspr r6,hid0 ; Yes, this is silly, keep it here
945 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
946 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
947 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
948 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
949 mfspr r6,hid0 ; Yes, this is a duplicate, keep it here
950 isync ; Make sure it is set
954 ; Turn translation off to nap
957 bt pfNoMSRirb,miNoMSR ; Jump if we need to use SC for this...
958 mtmsr r5 ; Turn translation off, interrupts on
960 b miNoMSRx ; Jump back in line...
962 miNoMSR: mr r3,r5 ; Pass in the new MSR value
963 li r0,loadMSR ; MSR setter ultrafast
964 sc ; Do it to it like you never done before...
966 miNoMSRx: bf-- pf64Bitb,mipowloop ; skip if 32-bit...
968 li r3,0x10 ; Fancy nap threshold is 0x10 ticks
969 mftb r8 ; Get the low half of the time base
970 mfdec r4 ; Get the decrementer ticks
971 cmplw r4,r3 ; Less than threshold?
974 mtdec r3 ; Load decrementer with threshold
975 isync ; and make sure,
976 mfdec r3 ; really sure, it gets there
978 rlwinm r6,r2,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear out the EE bit
979 sync ; Make sure queues are clear
980 mtmsr r6 ; Set MSR with EE off but POW on
981 isync ; Make sure this takes before we proceed
983 mftb r9 ; Get the low half of the time base
984 sub r9,r9,r8 ; Get the number of ticks spent waiting
985 sub r4,r4,r9 ; Adjust the decrementer value
987 mtdec r4 ; Load decrementer with the rest of the timeout
988 isync ; and make sure,
989 mfdec r4 ; really sure, it gets there
992 sync ; Make sure queues are clear
993 mtmsr r2 ; Nap or doze, MSR with POW, EE set, translation off
994 isync ; Make sure this takes before we proceed
995 b mipowloop ; loop if POW does not take
998 ; Note that the interrupt handler will turn off the nap/doze bits in the hid.
999 ; Also remember that the interrupt handler will force return to here whenever
1000 ; the nap/doze bits are set.
1002 .globl EXT(machine_idle_ret)
1003 LEXT(machine_idle_ret)
1004 mtmsr r7 ; Make sure the MSR is what we want
1005 isync ; In case we turn on translation
1007 ; Protect against a lost decrementer trap if the current decrementer value is negative
1008 ; by more than 10 ticks, re-arm it since it is unlikely to fire at this point...
1009 ; A hardware interrupt got us out of machine_idle and may also be contributing to this state
1011 mfdec r6 ; Get decrementer
1012 cmpwi cr0,r6,-10 ; Compare decrementer with -10
1013 bgelr++ ; Return if greater
1015 mtdec r0 ; Set decrementer to 1
1018 /* Put machine to sleep.
1019 * This call never returns. We always exit sleep via a soft reset.
1020 * All external interruptions must be drained at this point and disabled.
1022 * void ml_ppc_do_sleep(void)
1024 * We will use the PPC SLEEP for this.
1026 * There is one bit of hackery in here: we need to enable for
1027 * interruptions when we go to sleep and there may be a pending
1028 * decrimenter rupt. So we make the decrimenter 0x7FFFFFFF and enable for
1029 * interruptions. The decrimenter rupt vector recognizes this and returns
1030 * directly back here.
1034 ; Force a line boundry here
1036 .globl EXT(ml_ppc_do_sleep)
1038 LEXT(ml_ppc_do_sleep)
1041 mfmsr r5 ; Hack to spin instead of sleep
1042 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
1043 rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
1044 mtmsr r5 ; No talking
1047 deadsleep: addi r3,r3,1 ; Make analyzer happy
1050 b deadsleep ; Die the death of 1000 joys...
1053 mfsprg r12,1 ; Get the current activation
1054 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
1055 mfsprg r11,2 ; Get CPU specific features
1056 eqv r10,r10,r10 ; Get all foxes
1057 mtcrf 0x04,r11 ; move pfNoMSRirb to cr5
1058 mfspr r4,hid0 ; Get the current power-saving mode
1059 mtcrf 0x02,r11 ; move pf64Bit to cr6
1061 rlwinm. r5,r11,0,pfNoL2PFNapb,pfNoL2PFNapb ; Turn off L2 Prefetch before sleep?
1064 mfspr r5,msscr0 ; Get currect MSSCR0 value
1065 rlwinm r5,r5,0,0,l2pfes-1 ; Disable L2 Prefetch
1066 mtspr msscr0,r5 ; Updates MSSCR0 value
1071 bt++ pf64Bitb,mpsPF64bit ; PM bits are shifted on 64bit systems.
1073 rlwinm r4,r4,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though)
1074 oris r4,r4,hi16(sleepm) ; Set sleep
1078 lis r5, hi16(dozem|napm|sleepm) ; Clear all possible power-saving modes (not DPM though)
1081 lis r5, hi16(napm) ; Set sleep
1086 mfmsr r5 ; Get the current MSR
1087 rlwinm r10,r10,0,1,31 ; Make 0x7FFFFFFF
1088 mtdec r10 ; Load decrimenter with 0x7FFFFFFF
1089 isync ; and make sure,
1090 mfdec r9 ; really sure, it gets there
1092 li r2,1 ; Prepare for 64 bit
1093 rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
1095 ; Note that we need translation off before we set the HID to sleep. Otherwise
1096 ; we will ignore any PTE misses that occur and cause an infinite loop.
1098 bf++ pf64Bitb,mpsCheckMSR ; check 64-bit processor
1099 rldimi r5,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
1100 mtmsrd r5 ; set 64-bit mode, turn off EE, DR, and IR
1101 isync ; Toss prefetch
1105 bt pfNoMSRirb,mpsNoMSR ; No MSR...
1107 mtmsr r5 ; Translation off
1108 isync ; Toss prefetch
1112 li r0,loadMSR ; Get the MSR setter SC
1113 mr r3,r5 ; Get new MSR
1117 ori r3,r5,lo16(MASK(MSR_EE)) ; Flip on EE
1119 mtspr hid0,r4 ; Set up the HID to sleep
1120 mfspr r4,hid0 ; Yes, this is silly, keep it here
1121 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1122 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1123 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1124 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1125 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1127 mtmsr r3 ; Enable for interrupts to drain decrimenter
1129 add r6,r4,r5 ; Just waste time
1130 add r6,r6,r4 ; A bit more
1131 add r6,r6,r5 ; A bit more
1133 mtmsr r5 ; Interruptions back off
1134 isync ; Toss prefetch
1137 ; We are here with translation off, interrupts off, all possible
1138 ; interruptions drained off, and a decrimenter that will not pop.
1141 bl EXT(cacheInit) ; Clear out the caches. This will leave them on
1142 bl EXT(cacheDisable) ; Turn off all caches
1144 mfmsr r5 ; Get the current MSR
1145 oris r5,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR
1146 ; Leave EE off because power goes off shortly
1147 mfsprg r12,0 ; Get the per_proc_info
1149 lhz r11,PP_CPU_FLAGS(r12) ; Get the flags
1150 ori r11,r11,SleepState ; Marked SleepState
1151 sth r11,PP_CPU_FLAGS(r12) ; Set the flags
1154 mfsprg r11,2 ; Get CPU specific features
1155 rlwinm. r0,r11,0,pf64Bitb,pf64Bitb ; Test for 64 bit processor
1156 eqv r4,r4,r4 ; Get all foxes
1157 rlwinm r4,r4,0,1,31 ; Make 0x7FFFFFFF
1158 beq slSleepNow ; skip if 32-bit...
1159 li r3, 0x4000 ; Cause decrimenter to roll over soon
1160 mtdec r3 ; Load decrimenter with 0x00004000
1161 isync ; and make sure,
1162 mfdec r3 ; really sure, it gets there
1165 sync ; Sync it all up
1166 mtmsr r5 ; Do sleep with interruptions enabled
1168 mtdec r4 ; Load decrimenter with 0x7FFFFFFF
1169 isync ; and make sure,
1170 mfdec r3 ; really sure, it gets there
1171 b slSleepNow ; Go back to sleep if we wake up...
1175 /* Initialize all caches including the TLBs
1177 * void cacheInit(void)
1179 * This is used to force the caches to an initial clean state. First, we
1180 * check if the cache is on, if so, we need to flush the contents to memory.
1181 * Then we invalidate the L1. Next, we configure and invalidate the L2 etc.
1182 * Finally we turn on all of the caches
1184 * Note that if translation is not disabled when this is called, the TLB will not
1185 * be completely clear after return.
1189 ; Force a line boundry here
1191 .globl EXT(cacheInit)
1195 mfsprg r12,0 ; Get the per_proc_info
1196 mfspr r9,hid0 ; Get the current power-saving mode
1198 mfsprg r11,2 ; Get CPU specific features
1199 mfmsr r7 ; Get the current MSR
1200 rlwinm r7,r7,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
1201 rlwinm r7,r7,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
1202 rlwimi r11,r11,pfLClckb+1,31,31 ; Move pfLClck to another position (to keep from using non-volatile CRs)
1203 rlwinm r5,r7,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation
1204 rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions
1205 mtcrf 0x87,r11 ; Get the feature flags
1206 lis r10,hi16(dozem|napm|sleepm|dpmm) ; Mask of power management bits
1207 bf-- pf64Bitb,cIniNSF1 ; Skip if 32-bit...
1209 sldi r10,r10,32 ; Position the masks
1211 cIniNSF1: andc r4,r9,r10 ; Clean up the old power bits
1212 mtspr hid0,r4 ; Set up the HID
1213 mfspr r4,hid0 ; Yes, this is silly, keep it here
1214 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1215 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1216 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1217 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1218 mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
1220 bt pfNoMSRirb,ciNoMSR ; No MSR...
1222 mtmsr r5 ; Translation and all off
1223 isync ; Toss prefetch
1227 li r0,loadMSR ; Get the MSR setter SC
1228 mr r3,r5 ; Get new MSR
1232 bf pfAltivecb,cinoDSS ; No Altivec here...
1234 dssall ; Stop streams
1237 cinoDSS: li r5,tlbieLock ; Get the TLBIE lock
1238 li r0,128 ; Get number of TLB entries
1240 li r6,0 ; Start at 0
1241 bf-- pf64Bitb,citlbhang ; Skip if 32-bit...
1242 li r0,1024 ; Get the number of TLB entries
1244 citlbhang: lwarx r2,0,r5 ; Get the TLBIE lock
1245 mr. r2,r2 ; Is it locked?
1246 bne- citlbhang ; It is locked, go wait...
1247 stwcx. r0,0,r5 ; Try to get it
1248 bne- citlbhang ; We was beat...
1250 mtctr r0 ; Set the CTR
1252 cipurgeTLB: tlbie r6 ; Purge this entry
1253 addi r6,r6,4096 ; Next page
1254 bdnz cipurgeTLB ; Do them all...
1256 mtcrf 0x80,r11 ; Set SMP capability
1257 sync ; Make sure all TLB purges are done
1258 eieio ; Order, order in the court
1260 bf pfSMPcapb,cinoSMP ; SMP incapable...
1262 tlbsync ; Sync all TLBs
1266 bf-- pf64Bitb,cinoSMP ; Skip if 32-bit...
1267 ptesync ; Wait for quiet again
1270 cinoSMP: stw r2,tlbieLock(0) ; Unlock TLBIE lock
1272 bt++ pf64Bitb,cin64 ; Skip if 64-bit...
1274 rlwinm. r0,r9,0,ice,dce ; Were either of the level 1s on?
1275 beq- cinoL1 ; No, no need to flush...
1277 rlwinm. r0,r11,0,pfL1fab,pfL1fab ; do we have L1 flush assist?
1278 beq ciswdl1 ; If no hw flush assist, go do by software...
1280 mfspr r8,msscr0 ; Get the memory system control register
1281 oris r8,r8,hi16(dl1hwfm) ; Turn on the hardware flush request
1283 mtspr msscr0,r8 ; Start the flush operation
1285 ciwdl1f: mfspr r8,msscr0 ; Get the control register again
1287 rlwinm. r8,r8,0,dl1hwf,dl1hwf ; Has the flush request been reset yet?
1288 bne ciwdl1f ; No, flush is still in progress...
1289 b ciinvdl1 ; Go invalidate l1...
1292 ; We need to either make this very complicated or to use ROM for
1293 ; the flush. The problem is that if during the following sequence a
1294 ; snoop occurs that invalidates one of the lines in the cache, the
1295 ; PLRU sequence will be altered making it possible to miss lines
1296 ; during the flush. So, we either need to dedicate an area of RAM
1297 ; to each processor, lock use of a RAM area, or use ROM. ROM is
1298 ; by far the easiest. Note that this is not an issue for machines
1299 ; that have harware flush assists.
1302 ciswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size
1304 bf 31,cisnlck ; Skip if pfLClck not set...
1307 rlwinm r6,r4,0,0,l2pfes-1 ; ?
1308 mtspr msscr0,r6 ; Set it
1312 mfspr r8,ldstcr ; Save the LDSTCR
1313 li r2,1 ; Get a mask of 0x01
1314 lis r3,0xFFF0 ; Point to ROM
1315 rlwinm r11,r0,29,3,31 ; Get the amount of memory to handle all indexes
1317 li r6,0 ; Start here
1319 cisiniflsh: dcbf r6,r3 ; Flush each line of the range we use
1320 addi r6,r6,32 ; Bump to the next
1321 cmplw r6,r0 ; Have we reached the end?
1322 blt+ cisiniflsh ; Nope, continue initial flush...
1324 sync ; Make sure it is done
1326 addi r11,r11,-1 ; Get mask for index wrap
1327 li r6,0 ; Get starting offset
1329 cislckit: not r5,r2 ; Lock all but 1 way
1330 rlwimi r5,r8,0,0,23 ; Build LDSTCR
1331 mtspr ldstcr,r5 ; Lock a way
1332 sync ; Clear out memory accesses
1333 isync ; Wait for all
1336 cistouch: lwzx r10,r3,r6 ; Pick up some trash
1337 addi r6,r6,32 ; Go to the next index
1338 and. r0,r6,r11 ; See if we are about to do next index
1339 bne+ cistouch ; Nope, do more...
1341 sync ; Make sure it is all done
1344 sub r6,r6,r11 ; Back up to start + 1
1345 addi r6,r6,-1 ; Get it right
1347 cisflush: dcbf r3,r6 ; Flush everything out
1348 addi r6,r6,32 ; Go to the next index
1349 and. r0,r6,r11 ; See if we are about to do next index
1350 bne+ cisflush ; Nope, do more...
1352 sync ; Make sure it is all done
1356 rlwinm. r2,r2,1,24,31 ; Shift to next way
1357 bne+ cislckit ; Do this for all ways...
1359 mtspr ldstcr,r8 ; Slam back to original
1367 b cinoL1 ; Go on to level 2...
1370 cisnlck: rlwinm r2,r0,0,1,30 ; Double cache size
1371 add r0,r0,r2 ; Get 3 times cache size
1372 rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines
1373 lis r3,0xFFF0 ; Dead recon ROM address for now
1374 mtctr r0 ; Number of lines to flush
1376 ciswfldl1a: lwz r2,0(r3) ; Flush anything else
1377 addi r3,r3,32 ; Next line
1378 bdnz ciswfldl1a ; Flush the lot...
1380 ciinvdl1: sync ; Make sure all flushes have been committed
1382 mfspr r8,hid0 ; Get the HID0 bits
1383 rlwinm r8,r8,0,dce+1,ice-1 ; Clear cache enables
1384 mtspr hid0,r8 ; and turn off L1 cache
1385 sync ; Make sure all is done
1388 ori r8,r8,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate
1392 mtspr hid0,r8 ; Start the invalidate and turn on cache
1393 rlwinm r8,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits
1394 mtspr hid0,r8 ; Turn off the invalidate (needed for some older machines)
1400 ; Flush and disable the level 2
1402 mfsprg r10,2 ; need to check 2 features we did not put in CR
1403 rlwinm. r0,r10,0,pfL2b,pfL2b ; do we have L2?
1404 beq cinol2 ; No level 2 cache to flush
1406 mfspr r8,l2cr ; Get the L2CR
1407 lwz r3,pfl2cr(r12) ; Get the L2CR value
1408 rlwinm. r0,r8,0,l2e,l2e ; Was the L2 enabled?
1409 bne ciflushl2 ; Yes, force flush
1410 cmplwi r8, 0 ; Was the L2 all the way off?
1411 beq ciinvdl2 ; Yes, force invalidate
1412 lis r0,hi16(l2sizm|l2clkm|l2ramm|l2ohm) ; Get confiuration bits
1413 xor r2,r8,r3 ; Get changing bits?
1414 ori r0,r0,lo16(l2slm|l2dfm|l2bypm) ; More config bits
1415 and. r0,r0,r2 ; Did any change?
1416 bne- ciinvdl2 ; Yes, just invalidate and get PLL synced...
1419 rlwinm. r0,r10,0,pfL2fab,pfL2fab ; hardware-assisted L2 flush?
1420 beq ciswfl2 ; Flush not in hardware...
1422 mr r10,r8 ; Take a copy now
1424 bf 31,cinol2lck ; Skip if pfLClck not set...
1426 oris r10,r10,hi16(l2ionlym|l2donlym) ; Set both instruction- and data-only
1428 mtspr l2cr,r10 ; Lock out the cache
1432 cinol2lck: ori r10,r10,lo16(l2hwfm) ; Request flush
1433 sync ; Make sure everything is done
1435 mtspr l2cr,r10 ; Request flush
1437 cihwfl2: mfspr r10,l2cr ; Get back the L2CR
1438 rlwinm. r10,r10,0,l2hwf,l2hwf ; Is the flush over?
1439 bne+ cihwfl2 ; Nope, keep going...
1440 b ciinvdl2 ; Flush done, go invalidate L2...
1443 lwz r0,pfl2Size(r12) ; Get the L2 size
1444 oris r2,r8,hi16(l2dom) ; Set L2 to data only mode
1446 b ciswfl2doa ; Branch to next line...
1450 mtspr l2cr,r2 ; Disable L2
1453 b ciswfl2dod ; It is off, go invalidate it...
1456 b ciswfl2dob ; Branch to next...
1459 sync ; Finish memory stuff
1460 isync ; Stop speculation
1461 b ciswfl2doc ; Jump back up and turn on data only...
1463 rlwinm r0,r0,27,5,31 ; Get the number of lines
1464 lis r10,0xFFF0 ; Dead recon ROM for now
1465 mtctr r0 ; Set the number of lines
1467 ciswfldl2a: lwz r0,0(r10) ; Load something to flush something
1468 addi r10,r10,32 ; Next line
1469 bdnz ciswfldl2a ; Do the lot...
1471 ciinvdl2: rlwinm r8,r3,0,l2e+1,31 ; Clear the enable bit
1472 b cinla ; Branch to next line...
1475 cinlc: mtspr l2cr,r8 ; Disable L2
1478 b ciinvl2 ; It is off, go invalidate it...
1480 cinla: b cinlb ; Branch to next...
1482 cinlb: sync ; Finish memory stuff
1483 isync ; Stop speculation
1484 b cinlc ; Jump back up and turn off cache...
1489 cmplwi r3, 0 ; Should the L2 be all the way off?
1490 beq cinol2 ; Yes, done with L2
1492 oris r2,r8,hi16(l2im) ; Get the invalidate flag set
1494 mtspr l2cr,r2 ; Start the invalidate
1497 ciinvdl2a: mfspr r2,l2cr ; Get the L2CR
1498 mfsprg r0,2 ; need to check a feature in "non-volatile" set
1499 rlwinm. r0,r0,0,pfL2ib,pfL2ib ; flush in HW?
1500 beq ciinvdl2b ; Flush not in hardware...
1501 rlwinm. r2,r2,0,l2i,l2i ; Is the invalidate still going?
1502 bne+ ciinvdl2a ; Assume so, this will take a looong time...
1504 b cinol2 ; No level 2 cache to flush
1506 rlwinm. r2,r2,0,l2ip,l2ip ; Is the invalidate still going?
1507 bne+ ciinvdl2a ; Assume so, this will take a looong time...
1509 mtspr l2cr,r8 ; Turn off the invalidate request
1514 ; Flush and enable the level 3
1516 bf pfL3b,cinol3 ; No level 3 cache to flush
1518 mfspr r8,l3cr ; Get the L3CR
1519 lwz r3,pfl3cr(r12) ; Get the L3CR value
1520 rlwinm. r0,r8,0,l3e,l3e ; Was the L3 enabled?
1521 bne ciflushl3 ; Yes, force flush
1522 cmplwi r8, 0 ; Was the L3 all the way off?
1523 beq ciinvdl3 ; Yes, force invalidate
1524 lis r0,hi16(l3pem|l3sizm|l3dxm|l3clkm|l3spom|l3ckspm) ; Get configuration bits
1525 xor r2,r8,r3 ; Get changing bits?
1526 ori r0,r0,lo16(l3pspm|l3repm|l3rtm|l3cyam|l3dmemm|l3dmsizm) ; More config bits
1527 and. r0,r0,r2 ; Did any change?
1528 bne- ciinvdl3 ; Yes, just invalidate and get PLL synced...
1531 sync ; 7450 book says do this even though not needed
1532 mr r10,r8 ; Take a copy now
1534 bf 31,cinol3lck ; Skip if pfL23lck not set...
1536 oris r10,r10,hi16(l3iom) ; Set instruction-only
1537 ori r10,r10,lo16(l3donlym) ; Set data-only
1539 mtspr l3cr,r10 ; Lock out the cache
1543 cinol3lck: ori r10,r10,lo16(l3hwfm) ; Request flush
1544 sync ; Make sure everything is done
1546 mtspr l3cr,r10 ; Request flush
1548 cihwfl3: mfspr r10,l3cr ; Get back the L3CR
1549 rlwinm. r10,r10,0,l3hwf,l3hwf ; Is the flush over?
1550 bne+ cihwfl3 ; Nope, keep going...
1552 ciinvdl3: rlwinm r8,r3,0,l3e+1,31 ; Clear the enable bit
1553 sync ; Make sure of life, liberty, and justice
1554 mtspr l3cr,r8 ; Disable L3
1557 cmplwi r3, 0 ; Should the L3 be all the way off?
1558 beq cinol3 ; Yes, done with L3
1560 ori r8,r8,lo16(l3im) ; Get the invalidate flag set
1562 mtspr l3cr,r8 ; Start the invalidate
1564 ciinvdl3b: mfspr r8,l3cr ; Get the L3CR
1565 rlwinm. r8,r8,0,l3i,l3i ; Is the invalidate still going?
1566 bne+ ciinvdl3b ; Assume so...
1569 lwz r10, pfBootConfig(r12) ; ?
1570 rlwinm. r10, r10, 24, 28, 31 ; ?
1571 beq ciinvdl3nopdet ; ?
1575 rlwimi r2, r8, 0, 24, 31 ; ?
1576 subfic r10, r10, 32 ; ?
1578 ori r2, r2, 0x0080 ; ?
1581 mtspr l3pdet, r8 ; ?
1585 mfspr r8,l3cr ; Get the L3CR
1586 rlwinm r8,r8,0,l3clken+1,l3clken-1 ; Clear the clock enable bit
1587 mtspr l3cr,r8 ; Disable the clock
1590 ciinvdl3c: addi r2,r2,-1 ; ?
1594 mfspr r10,msssr0 ; ?
1595 rlwinm r10,r10,0,vgL3TAG+1,vgL3TAG-1 ; ?
1596 mtspr msssr0,r10 ; ?
1599 mtspr l3cr,r3 ; Enable it as desired
1602 mfsprg r0,2 ; need to check a feature in "non-volatile" set
1603 rlwinm. r0,r0,0,pfL2b,pfL2b ; is there an L2 cache?
1604 beq cinol2a ; No level 2 cache to enable
1606 lwz r3,pfl2cr(r12) ; Get the L2CR value
1607 cmplwi r3, 0 ; Should the L2 be all the way off?
1608 beq cinol2a : Yes, done with L2
1609 mtspr l2cr,r3 ; Enable it as desired
1613 ; Invalidate and turn on L1s
1617 bt 31,cinoexit ; Skip if pfLClck set...
1619 rlwinm r8,r9,0,dce+1,ice-1 ; Clear the I- and D- cache enables
1620 mtspr hid0,r8 ; Turn off dem caches
1623 ori r8,r9,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate
1624 rlwinm r9,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits
1628 mtspr hid0,r8 ; Start the invalidate and turn on L1 cache
1630 cinoexit: mtspr hid0,r9 ; Turn off the invalidate (needed for some older machines) and restore entry conditions
1632 mtmsr r7 ; Restore MSR to entry
1638 ; Handle 64-bit architecture
1639 ; This processor can not run without caches, so we just push everything out
1640 ; and flush. It will be relativily clean afterwards
1646 mfspr r10,hid1 ; Save hid1
1647 mfspr r4,hid4 ; Save hid4
1648 mr r12,r10 ; Really save hid1
1649 mr r11,r4 ; Get a working copy of hid4
1652 eqv r2,r2,r2 ; Get all foxes
1654 rldimi r10,r0,55,7 ; Clear I$ prefetch bits (7:8)
1657 mtspr hid1,r10 ; Stick it
1658 mtspr hid1,r10 ; Stick it again
1661 rldimi r11,r2,38,25 ; Disable D$ prefetch (25:25)
1664 mtspr hid4,r11 ; Stick it
1667 li r3,8 ; Set bit 28+32
1668 sldi r3,r3,32 ; Make it bit 28
1669 or r3,r3,r11 ; Turn on the flash invalidate L1D$
1671 oris r5,r11,0x0600 ; Set disable L1D$ bits
1673 mtspr hid4,r3 ; Invalidate
1676 mtspr hid4,r5 ; Un-invalidate and disable L1D$
1679 lis r8,GUSModeReg ; Get the GUS mode ring address
1680 mfsprg r0,2 ; Get the feature flags
1681 ori r8,r8,0x8000 ; Set to read data
1682 rlwinm. r0,r0,pfSCOMFixUpb+1,31,31 ; Set shift if we need a fix me up
1686 mtspr scomc,r8 ; Request the GUS mode
1687 mfspr r11,scomd ; Get the GUS mode
1688 mfspr r8,scomc ; Get back the status (we just ignore it)
1692 sld r11,r11,r0 ; Fix up if needed
1694 ori r6,r11,lo16(GUSMdmapen) ; Set the bit that means direct L2 cache address
1695 lis r8,GUSModeReg ; Get GUS mode register address
1699 mtspr scomd,r6 ; Set that we want direct L2 mode
1700 mtspr scomc,r8 ; Tell GUS we want direct L2 mode
1701 mfspr r3,scomc ; Get back the status
1705 li r3,0 ; Clear start point
1707 cflushlp: lis r6,0x0040 ; Pick 4MB line as our target
1708 or r6,r6,r3 ; Put in the line offset
1709 lwz r5,0(r6) ; Load a line
1710 addis r6,r6,8 ; Roll bit 42:44
1711 lwz r5,0(r6) ; Load a line
1712 addis r6,r6,8 ; Roll bit 42:44
1713 lwz r5,0(r6) ; Load a line
1714 addis r6,r6,8 ; Roll bit 42:44
1715 lwz r5,0(r6) ; Load a line
1716 addis r6,r6,8 ; Roll bit 42:44
1717 lwz r5,0(r6) ; Load a line
1718 addis r6,r6,8 ; Roll bit 42:44
1719 lwz r5,0(r6) ; Load a line
1720 addis r6,r6,8 ; Roll bit 42:44
1721 lwz r5,0(r6) ; Load a line
1722 addis r6,r6,8 ; Roll bit 42:44
1723 lwz r5,0(r6) ; Load a line
1725 addi r3,r3,128 ; Next line
1726 andis. r5,r3,8 ; Have we done enough?
1727 beq++ cflushlp ; Not yet...
1731 lis r6,0x0040 ; Pick 4MB line as our target
1733 cflushx: dcbf 0,r6 ; Flush line and invalidate
1734 addi r6,r6,128 ; Next line
1735 andis. r5,r6,0x0080 ; Have we done enough?
1736 beq++ cflushx ; Keep on flushing...
1738 mr r3,r10 ; Copy current hid1
1739 rldimi r3,r2,54,9 ; Set force icbi match mode
1741 li r6,0 ; Set start if ICBI range
1743 mtspr hid1,r3 ; Stick it
1744 mtspr hid1,r3 ; Stick it again
1747 cflicbi: icbi 0,r6 ; Kill I$
1748 addi r6,r6,128 ; Next line
1749 andis. r5,r6,1 ; Have we done them all?
1750 beq++ cflicbi ; Not yet...
1752 lis r8,GUSModeReg ; Get GUS mode register address
1756 mtspr scomd,r11 ; Set that we do not want direct mode
1757 mtspr scomc,r8 ; Tell GUS we do not want direct mode
1758 mfspr r3,scomc ; Get back the status
1763 mtspr hid0,r9 ; Restore entry hid0
1764 mfspr r9,hid0 ; Yes, this is silly, keep it here
1765 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1766 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1767 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1768 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1769 mfspr r9,hid0 ; Yes, this is a duplicate, keep it here
1773 mtspr hid1,r12 ; Restore entry hid1
1774 mtspr hid1,r12 ; Stick it again
1778 mtspr hid4,r4 ; Restore entry hid4
1782 mtmsr r7 ; Restore MSR to entry
1788 /* Disables all caches
1790 * void cacheDisable(void)
1792 * Turns off all caches on the processor. They are not flushed.
1796 ; Force a line boundry here
1798 .globl EXT(cacheDisable)
1802 mfsprg r11,2 ; Get CPU specific features
1803 mtcrf 0x83,r11 ; Set feature flags
1805 bf pfAltivecb,cdNoAlt ; No vectors...
1807 dssall ; Stop streams
1811 btlr pf64Bitb ; No way to disable a 64-bit machine...
1813 mfspr r5,hid0 ; Get the hid
1814 rlwinm r5,r5,0,dce+1,ice-1 ; Clear the I- and D- cache enables
1815 mtspr hid0,r5 ; Turn off dem caches
1818 rlwinm. r0,r11,0,pfL2b,pfL2b ; is there an L2?
1819 beq cdNoL2 ; Skip if no L2...
1821 mfspr r5,l2cr ; Get the L2
1822 rlwinm r5,r5,0,l2e+1,31 ; Turn off enable bit
1824 b cinlaa ; Branch to next line...
1827 cinlcc: mtspr l2cr,r5 ; Disable L2
1830 b cdNoL2 ; It is off, we are done...
1832 cinlaa: b cinlbb ; Branch to next...
1834 cinlbb: sync ; Finish memory stuff
1835 isync ; Stop speculation
1836 b cinlcc ; Jump back up and turn off cache...
1840 bf pfL3b,cdNoL3 ; Skip down if no L3...
1842 mfspr r5,l3cr ; Get the L3
1843 rlwinm r5,r5,0,l3e+1,31 ; Turn off enable bit
1844 rlwinm r5,r5,0,l3clken+1,l3clken-1 ; Turn off cache enable bit
1845 mtspr l3cr,r5 ; Disable the caches
1852 /* Initialize processor thermal monitoring
1853 * void ml_thrm_init(void)
1855 * Obsolete, deprecated and will be removed.
1858 ; Force a line boundry here
1860 .globl EXT(ml_thrm_init)
1865 /* Set thermal monitor bounds
1866 * void ml_thrm_set(unsigned int low, unsigned int high)
1868 * Obsolete, deprecated and will be removed.
1871 ; Force a line boundry here
1873 .globl EXT(ml_thrm_set)
1878 /* Read processor temprature
1879 * unsigned int ml_read_temp(void)
1881 * Obsolete, deprecated and will be removed.
1884 ; Force a line boundry here
1886 .globl EXT(ml_read_temp)
1892 /* Throttle processor speed up or down
1893 * unsigned int ml_throttle(unsigned int step)
1895 * Returns old speed and sets new. Both step and return are values from 0 to
1896 * 255 that define number of throttle steps, 0 being off and "ictcfim" is max * 2.
1898 * Obsolete, deprecated and will be removed.
1901 ; Force a line boundry here
1903 .globl EXT(ml_throttle)
1910 ** ml_get_timebase()
1912 ** Entry - R3 contains pointer to 64 bit structure.
1914 ** Exit - 64 bit structure filled in.
1917 ; Force a line boundry here
1919 .globl EXT(ml_get_timebase)
1921 LEXT(ml_get_timebase)
1936 * unsigned int cpu_number(void)
1938 * Returns the current cpu number.
1942 .globl EXT(cpu_number)
1945 mfsprg r4,1 ; Get the current activation
1946 lwz r4,ACT_PER_PROC(r4) ; Get the per_proc block
1947 lhz r3,PP_CPU_NUMBER(r4) ; Get CPU number
1951 * processor_t current_processor(void)
1953 * Returns the current processor.
1957 .globl EXT(current_processor)
1959 LEXT(current_processor)
1960 mfsprg r3,1 ; Get the current activation
1961 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1962 addi r3,r3,PP_PROCESSOR
1965 #if PROCESSOR_SIZE > PP_PROCESSOR_SIZE
1966 #error processor overflows per_proc
1970 * ast_t *ast_pending(void)
1972 * Returns the address of the pending AST mask for the current processor.
1976 .globl EXT(ast_pending)
1979 mfsprg r3,1 ; Get the current activation
1980 lwz r3,ACT_PER_PROC(r3) ; Get the per_proc block
1981 addi r3,r3,PP_PENDING_AST
1985 * void machine_set_current_thread(thread_t)
1987 * Set the current thread
1990 .globl EXT(machine_set_current_thread)
1992 LEXT(machine_set_current_thread)
1994 mfsprg r4,1 ; Get spr1
1995 lwz r5,ACT_PER_PROC(r4) ; Get the PerProc from the previous active thread
1996 stw r5,ACT_PER_PROC(r3) ; Set the PerProc in the active thread
1997 mtsprg 1,r3 ; Set spr1 with the active thread
2001 * thread_t current_thread(void)
2002 * thread_t current_act(void)
2005 * Return the current thread for outside components.
2008 .globl EXT(current_thread)
2009 .globl EXT(current_act)
2011 LEXT(current_thread)
2018 .globl EXT(mach_absolute_time)
2019 LEXT(mach_absolute_time)
2031 ; Force a line boundry here
2033 .globl EXT(ml_sense_nmi)
2040 ** ml_set_processor_speed_powertune()
2043 ; Force a line boundry here
2045 .globl EXT(ml_set_processor_speed_powertune)
2047 LEXT(ml_set_processor_speed_powertune)
2048 mflr r0 ; Save the link register
2049 stwu r1, -(FM_ALIGN(4*4)+FM_SIZE)(r1) ; Make some space on the stack
2050 stw r28, FM_ARG0+0x00(r1) ; Save a register
2051 stw r29, FM_ARG0+0x04(r1) ; Save a register
2052 stw r30, FM_ARG0+0x08(r1) ; Save a register
2053 stw r31, FM_ARG0+0x0C(r1) ; Save a register
2054 stw r0, (FM_ALIGN(4*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2056 mfsprg r31,1 ; Get the current activation
2057 lwz r31,ACT_PER_PROC(r31) ; Get the per_proc block
2059 rlwinm r28, r3, 31-dnap, dnap, dnap ; Shift the 1 bit to the dnap+32 bit
2060 rlwinm r3, r3, 2, 29, 29 ; Shift the 1 to a 4 and mask
2061 addi r3, r3, pfPowerTune0 ; Add in the pfPowerTune0 offset
2062 lwzx r29, r31, r3 ; Load the PowerTune number 0 or 1
2064 sldi r28, r28, 32 ; Shift to the top half
2065 ld r3, pfHID0(r31) ; Load the saved hid0 value
2066 and r28, r28, r3 ; Save the dnap bit
2067 lis r4, hi16(dnapm) ; Make a mask for the dnap bit
2068 sldi r4, r4, 32 ; Shift to the top half
2069 andc r3, r3, r4 ; Clear the dnap bit
2070 or r28, r28, r3 ; Insert the dnap bit as needed for later
2073 mtspr hid0, r3 ; Turn off dnap in hid0
2074 mfspr r3, hid0 ; Yes, this is silly, keep it here
2075 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2076 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2077 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2078 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2079 mfspr r3, hid0 ; Yes, this is a duplicate, keep it here
2080 isync ; Make sure it is set
2082 lis r3, hi16(PowerTuneControlReg) ; Write zero to the PCR
2083 ori r3, r3, lo16(PowerTuneControlReg)
2088 lis r3, hi16(PowerTuneControlReg) ; Write the PowerTune value to the PCR
2089 ori r3, r3, lo16(PowerTuneControlReg)
2094 rlwinm r29, r29, 13-6, 6, 7 ; Move to PSR speed location and isolate the requested speed
2096 lis r3, hi16(PowerTuneStatusReg) ; Read the status from the PSR
2097 ori r3, r3, lo16(PowerTuneStatusReg)
2101 rlwinm r0, r5, 0, 6, 7 ; Isolate the current speed
2102 rlwimi r0, r5, 0, 2, 2 ; Copy in the change in progress bit
2103 cmpw r0, r29 ; Compare the requested and current speeds
2104 beq spsPowerTuneDone
2105 rlwinm. r0, r5, 0, 3, 3
2106 beq spsPowerTuneLoop
2110 mtspr hid0, r28 ; Turn on dnap in hid0 if needed
2111 mfspr r28, hid0 ; Yes, this is silly, keep it here
2112 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2113 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2114 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2115 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2116 mfspr r28, hid0 ; Yes, this is a duplicate, keep it here
2117 isync ; Make sure it is set
2119 lwz r0, (FM_ALIGN(4*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
2120 lwz r28, FM_ARG0+0x00(r1) ; Restore a register
2121 lwz r29, FM_ARG0+0x04(r1) ; Restore a register
2122 lwz r30, FM_ARG0+0x08(r1) ; Restore a register
2123 lwz r31, FM_ARG0+0x0C(r1) ; Restore a register
2124 lwz r1, FM_BACKPTR(r1) ; Pop the stack
2129 ** ml_set_processor_speed_dpll()
2132 ; Force a line boundry here
2134 .globl EXT(ml_set_processor_speed_dpll)
2136 LEXT(ml_set_processor_speed_dpll)
2137 mfsprg r5,1 ; Get the current activation
2138 lwz r5,ACT_PER_PROC(r5) ; Get the per_proc block
2140 cmplwi r3, 0 ; Turn off BTIC before low speed
2142 mfspr r4, hid0 ; Get the current hid0 value
2143 rlwinm r4, r4, 0, btic+1, btic-1 ; Clear the BTIC bit
2145 mtspr hid0, r4 ; Set the new hid0 value
2150 mfspr r4, hid1 ; Get the current PLL settings
2151 rlwimi r4, r3, 31-hid1ps, hid1ps, hid1ps ; Copy the PLL Select bit
2152 stw r4, pfHID1(r5) ; Save the new hid1 value
2153 mtspr hid1, r4 ; Select desired PLL
2155 cmplwi r3, 0 ; Restore BTIC after high speed
2157 lwz r4, pfHID0(r5) ; Load the hid0 value
2159 mtspr hid0, r4 ; Set the hid0 value
2167 ** ml_set_processor_speed_dfs(divideby)
2168 ** divideby == 0 then divide by 1 (full speed)
2169 ** divideby == 1 then divide by 2 (half speed)
2170 ** divideby == 2 then divide by 4 (quarter speed)
2171 ** divideby == 3 then divide by 4 (quarter speed) - preferred
2174 ; Force a line boundry here
2176 .globl EXT(ml_set_processor_speed_dfs)
2178 LEXT(ml_set_processor_speed_dfs)
2180 mfspr r4,hid1 ; Get the current HID1
2181 mfsprg r5,0 ; Get the per_proc_info
2182 rlwimi r4,r3,31-hid1dfs1,hid1dfs0,hid1dfs1 ; Stick the new divider bits in
2183 stw r4,pfHID1(r5) ; Save the new hid1 value
2185 mtspr hid1,r4 ; Set the new HID1
2192 ** ml_set_processor_voltage()
2195 ; Force a line boundry here
2197 .globl EXT(ml_set_processor_voltage)
2199 LEXT(ml_set_processor_voltage)
2200 mfsprg r5,1 ; Get the current activation
2201 lwz r5,ACT_PER_PROC(r5) ; Get the per_proc block
2203 lwz r6, pfPowerModes(r5) ; Get the supported power modes
2205 rlwinm. r0, r6, 0, pmDPLLVminb, pmDPLLVminb ; Is DPLL Vmin supported
2208 mfspr r4, hid2 ; Get HID2 value
2209 rlwimi r4, r3, 31-hid2vmin, hid2vmin, hid2vmin ; Insert the voltage mode bit
2210 mtspr hid2, r4 ; Set the voltage mode
2211 sync ; Make sure it is done
2218 ; unsigned int ml_scom_write(unsigned int reg, unsigned long long data)
2219 ; 64-bit machines only
2224 .globl EXT(ml_scom_write)
2228 rldicr r3,r3,8,47 ; Align register it correctly
2229 rldimi r5,r4,32,0 ; Merge the high part of data
2230 sync ; Clean up everything
2232 mtspr scomd,r5 ; Stick in the data
2233 mtspr scomc,r3 ; Set write to register
2237 mfspr r3,scomc ; Read back status
2241 ; unsigned int ml_read_scom(unsigned int reg, unsigned long long *data)
2242 ; 64-bit machines only
2244 ; ASM Callers: data (r4) can be zero and the 64 bit data will be returned in r5
2248 .globl EXT(ml_scom_read)
2252 mfsprg r0,2 ; Get the feature flags
2253 rldicr r3,r3,8,47 ; Align register it correctly
2254 rlwinm r0,r0,pfSCOMFixUpb+1,31,31 ; Set shift if we need a fix me up
2256 ori r3,r3,0x8000 ; Set to read data
2259 mtspr scomc,r3 ; Request the register
2260 mfspr r5,scomd ; Get the register contents
2261 mfspr r3,scomc ; Get back the status
2265 sld r5,r5,r0 ; Fix up if needed
2267 cmplwi r4, 0 ; If data pointer is null, just return
2268 beqlr ; the received data in r5
2269 std r5,0(r4) ; Pass back the received data
2273 ; Calculates the hdec to dec ratio
2277 .globl EXT(ml_hdec_ratio)
2281 li r0,0 ; Clear the EE bit (and everything else for that matter)
2282 mfmsr r11 ; Get the MSR
2283 mtmsrd r0,1 ; Set the EE bit only (do not care about RI)
2284 rlwinm r11,r11,0,MSR_EE_BIT,MSR_EE_BIT ; Isolate just the EE bit
2285 mfmsr r10 ; Refresh our view of the MSR (VMX/FP may have changed)
2286 or r12,r10,r11 ; Turn on EE if on before we turned it off
2288 mftb r9 ; Get time now
2289 mfspr r2,hdec ; Save hdec
2291 mhrcalc: mftb r8 ; Get time now
2292 sub r8,r8,r9 ; How many ticks?
2293 cmplwi r8,10000 ; 10000 yet?
2294 blt mhrcalc ; Nope...
2296 mfspr r9,hdec ; Get hdec now
2297 sub r3,r2,r9 ; How many ticks?
2298 mtmsrd r12,1 ; Flip EE on if needed
2305 ; Calculates the number of ticks to the supplied event and
2306 ; sets the decrementer. Never set the time for less that the
2307 ; minimum, which is 10, nor more than maxDec, which is usually 0x7FFFFFFF
2308 ; and never more than that but can be set by root.
2319 spOver: mftbu r8 ; Get upper time
2320 addic r2,r4,-kMin ; Subtract minimum from target
2322 addme r11,r3 ; Do you have any bits I could borrow?
2323 mftbu r10 ; Get upper again
2324 subfe r0,r0,r0 ; Get -1 if we went negative 0 otherwise
2325 subc r7,r2,r9 ; Subtract bottom and get carry
2326 cmplw r8,r10 ; Did timebase upper tick?
2327 subfe r6,r8,r11 ; Get the upper difference accounting for borrow
2328 lwz r12,maxDec(0) ; Get the maximum decrementer size
2329 addme r0,r0 ; Get -1 or -2 if anything negative, 0 otherwise
2330 addic r2,r6,-1 ; Set carry if diff < 2**32
2331 srawi r0,r0,1 ; Make all foxes
2332 subi r10,r12,kMin ; Adjust maximum for minimum adjust
2333 andc r7,r7,r0 ; Pin time at 0 if under minimum
2334 subfe r2,r2,r2 ; 0 if diff > 2**32, -1 otherwise
2335 sub r7,r7,r10 ; Negative if duration is less than (max - min)
2336 or r2,r2,r0 ; If the duration is negative, it is not too big
2337 srawi r0,r7,31 ; -1 if duration is too small
2338 and r7,r7,r2 ; Clear duration if high part too big
2339 and r7,r7,r0 ; Clear duration if low part too big
2340 bne-- spOver ; Timer ticked...
2341 add r3,r7,r12 ; Add back the max for total
2342 mtdec r3 ; Set the decrementer